aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/Space.c5
-rw-r--r--drivers/net/amt.c313
-rw-r--r--drivers/net/appletalk/Kconfig11
-rw-r--r--drivers/net/appletalk/Makefile1
-rw-r--r--drivers/net/appletalk/ltpc.c1277
-rw-r--r--drivers/net/appletalk/ltpc.h74
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_3ad.c82
-rw-r--r--drivers/net/bonding/bond_alb.c43
-rw-r--r--drivers/net/bonding/bond_main.c585
-rw-r--r--drivers/net/bonding/bond_netlink.c168
-rw-r--r--drivers/net/bonding/bond_options.c141
-rw-r--r--drivers/net/bonding/bond_procfs.c24
-rw-r--r--drivers/net/bonding/bond_sysfs.c106
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c36
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_virtio.c15
-rw-r--r--drivers/net/can/Kconfig127
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/at91_can.c20
-rw-r--r--drivers/net/can/c_can/c_can.h19
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c17
-rw-r--r--drivers/net/can/c_can/c_can_main.c41
-rw-r--r--drivers/net/can/can327.c1144
-rw-r--r--drivers/net/can/cc770/cc770.c9
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1458
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h349
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c294
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c130
-rw-r--r--drivers/net/can/dev/Makefile17
-rw-r--r--drivers/net/can/dev/bittiming.c213
-rw-r--r--drivers/net/can/dev/calc_bittiming.c202
-rw-r--r--drivers/net/can/dev/dev.c66
-rw-r--r--drivers/net/can/dev/netlink.c9
-rw-r--r--drivers/net/can/dev/rx-offload.c15
-rw-r--r--drivers/net/can/dev/skb.c167
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c95
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c8
-rw-r--r--drivers/net/can/flexcan/flexcan.h24
-rw-r--r--drivers/net/can/grcan.c56
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c23
-rw-r--r--drivers/net/can/janz-ican3.c16
-rw-r--r--drivers/net/can/kvaser_pciefd.c18
-rw-r--r--drivers/net/can/led.c140
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c82
-rw-r--r--drivers/net/can/m_can/m_can.h4
-rw-r--r--drivers/net/can/m_can/m_can_pci.c48
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c4
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c9
-rw-r--r--drivers/net/can/pch_can.c19
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c56
-rw-r--r--drivers/net/can/rcar/rcar_can.c29
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c443
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c41
-rw-r--r--drivers/net/can/sja1000/sja1000.h3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c56
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/slcan.c793
-rw-r--r--drivers/net/can/slcan/Makefile7
-rw-r--r--drivers/net/can/slcan/slcan-core.c939
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c61
-rw-r--r--drivers/net/can/slcan/slcan.h19
-rw-r--r--drivers/net/can/softing/softing_main.c17
-rw-r--r--drivers/net/can/spi/hi311x.c32
-rw-r--r--drivers/net/can/spi/mcp251x.c49
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c406
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c144
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c153
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h62
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c46
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c417
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c24
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h110
-rw-r--r--drivers/net/can/sun4i_can.c25
-rw-r--r--drivers/net/can/ti_hecc.c32
-rw-r--r--drivers/net/can/usb/Kconfig15
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/can/usb/ems_usb.c15
-rw-r--r--drivers/net/can/usb/esd_usb.c (renamed from drivers/net/can/usb/esd_usb2.c)261
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c50
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h16
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c949
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile5
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h28
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c321
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c44
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c209
-rw-r--r--drivers/net/can/usb/mcba_usb.c41
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c47
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c69
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h2
-rw-r--r--drivers/net/can/usb/ucan.c12
-rw-r--r--drivers/net/can/usb/usb_8dev.c61
-rw-r--r--drivers/net/can/vcan.c22
-rw-r--r--drivers/net/can/vxcan.c35
-rw-r--r--drivers/net/can/xilinx_can.c110
-rw-r--r--drivers/net/dsa/Kconfig34
-rw-r--r--drivers/net/dsa/Makefile5
-rw-r--r--drivers/net/dsa/b53/b53_common.c131
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c2
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/b53/b53_priv.h49
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c93
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h14
-rw-r--r--drivers/net/dsa/b53/b53_spi.c6
-rw-r--r--drivers/net/dsa/b53/b53_srab.c41
-rw-r--r--drivers/net/dsa/bcm_sf2.c195
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c12
-rw-r--r--drivers/net/dsa/dsa_loop.c30
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c115
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c1
-rw-r--r--drivers/net/dsa/lan9303-core.c59
-rw-r--r--drivers/net/dsa/lan9303_i2c.c8
-rw-r--r--drivers/net/dsa/lan9303_mdio.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c102
-rw-r--r--drivers/net/dsa/microchip/Kconfig42
-rw-r--r--drivers/net/dsa/microchip/Makefile11
-rw-r--r--drivers/net/dsa/microchip/ksz8.h104
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1053
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h42
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c142
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c31
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c872
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h61
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c45
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h55
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c122
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2840
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h454
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c246
-rw-r--r--drivers/net/dsa/microchip/lan937x.h23
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c391
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h202
-rw-r--r--drivers/net/dsa/mt7530.c499
-rw-r--r--drivers/net/dsa/mt7530.h28
-rw-r--r--drivers/net/dsa/mv88e6060.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1105
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h54
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c316
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c96
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c116
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c35
-rw-r--r--drivers/net/dsa/ocelot/Kconfig1
-rw-r--r--drivers/net/dsa/ocelot/felix.c1337
-rw-r--r--drivers/net/dsa/ocelot/felix.h44
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c924
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c382
-rw-r--r--drivers/net/dsa/qca/Kconfig8
-rw-r--r--drivers/net/dsa/qca/Makefile2
-rw-r--r--drivers/net/dsa/qca/ar9331.c84
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2089
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1221
-rw-r--r--drivers/net/dsa/qca/qca8k.h (renamed from drivers/net/dsa/qca8k.h)163
-rw-r--r--drivers/net/dsa/qca8k.c2616
-rw-r--r--drivers/net/dsa/realtek-smi-core.c523
-rw-r--r--drivers/net/dsa/realtek/Kconfig54
-rw-r--r--drivers/net/dsa/realtek/Makefile6
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c287
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c570
-rw-r--r--drivers/net/dsa/realtek/realtek.h (renamed from drivers/net/dsa/realtek-smi-core.h)91
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c (renamed from drivers/net/dsa/rtl8365mb.c)885
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c (renamed from drivers/net/dsa/rtl8366.c)164
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c (renamed from drivers/net/dsa/rtl8366rb.c)487
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1062
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h259
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c227
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c18
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c37
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c2
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/eql.c7
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c40
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/etherh.c12
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c15
-rw-r--r--drivers/net/ethernet/Kconfig30
-rw-r--r--drivers/net/ethernet/Makefile4
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c6
-rw-r--r--drivers/net/ethernet/adi/Kconfig28
-rw-r--r--drivers/net/ethernet/adi/Makefile6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1720
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/agere/et131x.c22
-rw-r--r--drivers/net/ethernet/alacritech/slic.h2
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c35
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/Kconfig2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h19
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c23
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c470
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c15
-rw-r--r--drivers/net/ethernet/amd/Kconfig10
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c54
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c10
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c30
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h4
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c1251
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c20
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c19
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c26
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c58
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c22
-rw-r--r--drivers/net/ethernet/apple/bmac.c6
-rw-r--r--drivers/net/ethernet/apple/mace.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c155
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c90
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c165
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c429
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c50
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c5
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c5
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c10
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c19
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c21
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c31
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c40
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c23
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c554
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h784
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c209
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h18
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c37
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c3
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c20
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c477
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c11
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c16
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c17
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c26
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h14
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cortina/gemini.c36
-rw-r--r--drivers/net/ethernet/davicom/Kconfig31
-rw-r--r--drivers/net/ethernet/davicom/Makefile1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c32
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1260
-rw-r--r--drivers/net/ethernet/davicom/dm9051.h162
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig15
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5591
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c65
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c64
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c22
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c37
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/engleder/Kconfig1
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h48
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c40
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h25
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c501
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c28
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c5
-rw-r--r--drivers/net/ethernet/faraday/Kconfig12
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c303
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c12
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h12
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c99
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h31
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c44
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c456
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c176
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c102
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h66
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c246
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h119
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c103
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c272
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c308
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c321
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h24
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c238
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c164
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h54
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c506
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h43
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c8
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c119
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/fungible/Kconfig28
-rw-r--r--drivers/net/ethernet/fungible/Makefile7
-rw-r--r--drivers/net/ethernet/fungible/funcore/Makefile5
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c843
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.h150
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h1242
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c601
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h175
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig17
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile10
-rw-r--r--drivers/net/ethernet/fungible/funeth/fun_port.h97
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h171
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c34
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.h13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c1198
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.c155
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.h30
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2086
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c829
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h117
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c801
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h265
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c16
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c27
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c112
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c244
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c278
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c66
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c567
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c621
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c153
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c91
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c19
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c35
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c72
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c22
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h2
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c306
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h24
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c932
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h69
-rw-r--r--drivers/net/ethernet/intel/e100.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c159
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c245
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c536
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c208
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c237
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c146
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h56
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c954
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c78
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c391
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h113
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h395
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c185
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c352
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c566
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c817
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c813
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2064
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c519
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c1132
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h293
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c532
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c3826
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c5317
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h346
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c438
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c707
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c515
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c200
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c21
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c242
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c24
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h41
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c275
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c60
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c86
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/jme.c11
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/korina.c11
-rw-r--r--drivers/net/ethernet/lantiq_etop.c11
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c18
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c53
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c368
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c35
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c737
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h204
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c245
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h170
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c194
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h299
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c463
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1181
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h357
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h367
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c506
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h284
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c310
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h538
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1603
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c889
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c237
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c342
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c188
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c531
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c105
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c184
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2009
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h233
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1671
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c156
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c470
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c94
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c336
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c154
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c175
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c57
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h80
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c238
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h50
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c32
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c69
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h9
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c207
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c551
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h59
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c580
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c127
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1487
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c530
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h121
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c71
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c714
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig6
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1817
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h484
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c571
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h156
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c47
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c228
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c175
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c535
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1160
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h139
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c178
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h332
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c385
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c680
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c810
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c266
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c380
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c585
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c323
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c243
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c383
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c262
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c587
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1861
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1384
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c826
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c978
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c677
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c412
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c768
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h109
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c449
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h173
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c772
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c358
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1601
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c387
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c388
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1970
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c813
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h128
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c737
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1569
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c879
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h10
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c46
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c17
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c12
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c441
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h26
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c728
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h337
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c571
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c44
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c155
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c836
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c221
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c313
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h299
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c45
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c15
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c235
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c27
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c897
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h638
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c218
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c133
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c95
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c36
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c37
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h108
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h500
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c50
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c45
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c46
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c685
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c513
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c379
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c34
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h10
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c57
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c18
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h5
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h85
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c64
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c222
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c45
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c52
-rw-r--r--drivers/net/ethernet/mscc/Makefile11
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1390
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c21
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c117
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c418
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c67
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c489
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c458
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c115
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c143
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c85
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c22
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c12
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c52
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4809
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h518
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c86
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c532
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h157
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c67
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c119
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c486
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c553
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c1382
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c279
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c409
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c1539
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h128
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c145
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h226
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2311
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h112
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c466
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h219
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c506
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c112
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c42
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c12
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c13
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c8
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c36
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c164
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c327
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c129
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c37
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c72
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c90
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c11
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/rdc/r6040.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c8
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c341
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c204
-rw-r--r--drivers/net/ethernet/renesas/ravb.h14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c139
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c22
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig15
-rw-r--r--drivers/net/ethernet/sfc/Makefile6
-rw-r--r--drivers/net/ethernet/sfc/ef10.c126
-rw-r--r--drivers/net/ethernet/sfc/ef100.c89
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c152
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h9
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c526
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h13
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h83
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c450
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h70
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c46
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c72
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h14
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c84
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c26
-rw-r--r--drivers/net/ethernet/sfc/efx.c112
-rw-r--r--drivers/net/ethernet/sfc/efx.h10
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c276
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h3
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c121
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h19
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c22
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c94
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c3
-rw-r--r--drivers/net/ethernet/sfc/filter.h42
-rw-r--r--drivers/net/ethernet/sfc/mae.c511
-rw-r--r--drivers/net/ethernet/sfc/mae.h56
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c65
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h15
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h8140
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h90
-rw-r--r--drivers/net/ethernet/sfc/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/ptp.c164
-rw-r--r--drivers/net/ethernet/sfc/ptp.h1
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c39
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/Kconfig46
-rw-r--r--drivers/net/ethernet/sfc/siena/Makefile11
-rw-r--r--drivers/net/ethernet/sfc/siena/bitfield.h614
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c1337
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.h218
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c1368
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.h45
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c1408
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.h118
-rw-r--r--drivers/net/ethernet/sfc/siena/enum.h176
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c282
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c1340
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h60
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c (renamed from drivers/net/ethernet/sfc/farch.c)83
-rw-r--r--drivers/net/ethernet/sfc/siena/farch_regs.h2929
-rw-r--r--drivers/net/ethernet/sfc/siena/filter.h309
-rw-r--r--drivers/net/ethernet/sfc/siena/io.h310
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2260
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.h386
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_mon.c531
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h17204
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.c110
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.h17
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.c1282
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.h58
-rw-r--r--drivers/net/ethernet/sfc/siena/mtd.c124
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h1715
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c530
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.h206
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h251
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2201
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h45
-rw-r--r--drivers/net/ethernet/sfc/siena/rx.c400
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1094
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h110
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.c807
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.h52
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c (renamed from drivers/net/ethernet/sfc/siena.c)176
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.c (renamed from drivers/net/ethernet/sfc/siena_sriov.c)35
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.h (renamed from drivers/net/ethernet/sfc/siena_sriov.h)9
-rw-r--r--drivers/net/ethernet/sfc/siena/sriov.h83
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c392
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.h40
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c448
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.h39
-rw-r--r--drivers/net/ethernet/sfc/siena/vfdi.h252
-rw-r--r--drivers/net/ethernet/sfc/siena/workarounds.h28
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c680
-rw-r--r--drivers/net/ethernet/sfc/tc.h121
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.c228
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.h29
-rw-r--r--drivers/net/ethernet/sfc/tx.c18
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c40
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c12
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c20
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h11
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c10
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c406
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1078
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c37
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c51
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c708
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/sunplus/Kconfig32
-rw-r--r--drivers/net/ethernet/sunplus/Makefile6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_define.h270
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.c228
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.h19
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c565
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.c273
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.h13
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.c274
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.h18
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c131
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.c92
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_register.h86
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c6
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c10
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c66
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c336
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h7
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c193
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c7
-rw-r--r--drivers/net/ethernet/ti/cpmac.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c63
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c239
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h10
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpts.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c43
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c260
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c7
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c8
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c65
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.h3
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig45
-rw-r--r--drivers/net/ethernet/wangxun/Makefile7
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c170
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h50
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c166
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h57
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c8
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h185
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c92
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h133
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c946
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c95
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/Kconfig4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c106
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c3
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c6
-rw-r--r--drivers/net/fjes/fjes_main.c1157
-rw-r--r--drivers/net/geneve.c128
-rw-r--r--drivers/net/gtp.c568
-rw-r--r--drivers/net/hamradio/6pack.c17
-rw-r--r--drivers/net/hamradio/Kconfig34
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/dmascc.c1449
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/yam.c6
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h77
-rw-r--r--drivers/net/hyperv/netvsc.c168
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c101
-rw-r--r--drivers/net/hyperv/netvsc_drv.c182
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ieee802154/Kconfig7
-rw-r--r--drivers/net/ieee802154/adf7242.c7
-rw-r--r--drivers/net/ieee802154/at86rf230.c176
-rw-r--r--drivers/net/ieee802154/atusb.c223
-rw-r--r--drivers/net/ieee802154/ca8210.c192
-rw-r--r--drivers/net/ieee802154/cc2520.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c4
-rw-r--r--drivers/net/ieee802154/mcr20a.c9
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/Makefile12
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)14
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)26
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)12
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)12
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)12
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)12
-rw-r--r--drivers/net/ipa/gsi.c352
-rw-r--r--drivers/net/ipa/gsi.h49
-rw-r--r--drivers/net/ipa/gsi_private.h38
-rw-r--r--drivers/net/ipa/gsi_reg.h212
-rw-r--r--drivers/net/ipa/gsi_trans.c433
-rw-r--r--drivers/net/ipa/gsi_trans.h56
-rw-r--r--drivers/net/ipa/ipa.h6
-rw-r--r--drivers/net/ipa/ipa_cmd.c93
-rw-r--r--drivers/net/ipa/ipa_cmd.h13
-rw-r--r--drivers/net/ipa/ipa_data.h72
-rw-r--r--drivers/net/ipa/ipa_endpoint.c858
-rw-r--r--drivers/net/ipa/ipa_endpoint.h116
-rw-r--r--drivers/net/ipa/ipa_interrupt.c53
-rw-r--r--drivers/net/ipa/ipa_interrupt.h2
-rw-r--r--drivers/net/ipa/ipa_main.c290
-rw-r--r--drivers/net/ipa/ipa_mem.c20
-rw-r--r--drivers/net/ipa/ipa_modem.c15
-rw-r--r--drivers/net/ipa/ipa_modem.h2
-rw-r--r--drivers/net/ipa/ipa_power.c232
-rw-r--r--drivers/net/ipa/ipa_power.h9
-rw-r--r--drivers/net/ipa/ipa_qmi.c12
-rw-r--r--drivers/net/ipa/ipa_qmi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h41
-rw-r--r--drivers/net/ipa/ipa_reg.c97
-rw-r--r--drivers/net/ipa/ipa_reg.h1121
-rw-r--r--drivers/net/ipa/ipa_resource.c65
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c71
-rw-r--r--drivers/net/ipa/ipa_sysfs.h3
-rw-r--r--drivers/net/ipa/ipa_table.c31
-rw-r--r--drivers/net/ipa/ipa_table.h5
-rw-r--r--drivers/net/ipa/ipa_uc.c16
-rw-r--r--drivers/net/ipa/ipa_uc.h2
-rw-r--r--drivers/net/ipa/ipa_version.h30
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c446
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c512
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c533
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c509
-rw-r--r--drivers/net/ipvlan/ipvlan.h10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c28
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/loopback.c8
-rw-r--r--drivers/net/macsec.c300
-rw-r--r--drivers/net/macvlan.c69
-rw-r--r--drivers/net/macvtap.c10
-rw-r--r--drivers/net/mctp/Kconfig12
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c1080
-rw-r--r--drivers/net/mctp/mctp-serial.c11
-rw-r--r--drivers/net/mdio/fwnode_mdio.c63
-rw-r--r--drivers/net/mdio/mdio-aspeed.c139
-rw-r--r--drivers/net/mdio/mdio-i2c.c310
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c191
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c20
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c9
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c9
-rw-r--r--drivers/net/mdio/mdio-mux.c4
-rw-r--r--drivers/net/mdio/mdio-xgene.c3
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/mhi_net.c2
-rw-r--r--drivers/net/net_failover.c6
-rw-r--r--drivers/net/netconsole.c12
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/netdevsim/bus.c28
-rw-r--r--drivers/net/netdevsim/dev.c267
-rw-r--r--drivers/net/netdevsim/fib.c116
-rw-r--r--drivers/net/netdevsim/hwstats.c486
-rw-r--r--drivers/net/netdevsim/ipsec.c2
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/netdevsim/netdevsim.h28
-rw-r--r--drivers/net/ntb_netdev.c8
-rw-r--r--drivers/net/pcs/Kconfig18
-rw-r--r--drivers/net/pcs/Makefile2
-rw-r--r--drivers/net/pcs/pcs-altera-tse.c175
-rw-r--r--drivers/net/pcs/pcs-lynx.c80
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c531
-rw-r--r--drivers/net/pcs/pcs-xpcs.c217
-rw-r--r--drivers/net/pcs/pcs-xpcs.h1
-rw-r--r--drivers/net/phy/Kconfig21
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c42
-rw-r--r--drivers/net/phy/adin1100.c297
-rw-r--r--drivers/net/phy/aquantia_main.c160
-rw-r--r--drivers/net/phy/at803x.c237
-rw-r--r--drivers/net/phy/ax88796b.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c944
-rw-r--r--drivers/net/phy/bcm87xx.c36
-rw-r--r--drivers/net/phy/broadcom.c107
-rw-r--r--drivers/net/phy/dp83640.c19
-rw-r--r--drivers/net/phy/dp83822.c18
-rw-r--r--drivers/net/phy/dp83867.c92
-rw-r--r--drivers/net/phy/dp83td510.c258
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell-88x2222.c5
-rw-r--r--drivers/net/phy/marvell.c133
-rw-r--r--drivers/net/phy/marvell10g.c135
-rw-r--r--drivers/net/phy/mdio_bus.c7
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/meson-gxl.c23
-rw-r--r--drivers/net/phy/micrel.c1573
-rw-r--r--drivers/net/phy/microchip.c10
-rw-r--r--drivers/net/phy/microchip_t1.c463
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c114
-rw-r--r--drivers/net/phy/mscc/mscc_main.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c2
-rw-r--r--drivers/net/phy/mxl-gpy.c162
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c94
-rw-r--r--drivers/net/phy/phy-c45.c295
-rw-r--r--drivers/net/phy/phy-core.c99
-rw-r--r--drivers/net/phy/phy.c76
-rw-r--r--drivers/net/phy/phy_device.c98
-rw-r--r--drivers/net/phy/phylink.c696
-rw-r--r--drivers/net/phy/realtek.c44
-rw-r--r--drivers/net/phy/sfp-bus.c177
-rw-r--r--drivers/net/phy/sfp.c484
-rw-r--r--drivers/net/phy/sfp.h11
-rw-r--r--drivers/net/phy/smsc.c108
-rw-r--r--drivers/net/phy/spi_ks8995.c73
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_async.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/pse-pd/Kconfig23
-rw-r--r--drivers/net/pse-pd/Makefile6
-rw-r--r--drivers/net/pse-pd/pse_core.c314
-rw-r--r--drivers/net/pse-pd/pse_regulator.c147
-rw-r--r--drivers/net/rionet.c10
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/slip/slip.c8
-rw-r--r--drivers/net/sungem_phy.c7
-rw-r--r--drivers/net/tap.c58
-rw-r--r--drivers/net/team/team.c60
-rw-r--r--drivers/net/thunderbolt.c64
-rw-r--r--drivers/net/tun.c174
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/aqc111.c13
-rw-r--r--drivers/net/usb/asix.h19
-rw-r--r--drivers/net/usb/asix_common.c126
-rw-r--r--drivers/net/usb/asix_devices.c244
-rw-r--r--drivers/net/usb/ax88179_178a.c505
-rw-r--r--drivers/net/usb/catc.c52
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c22
-rw-r--r--drivers/net/usb/cdc_mbim.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c45
-rw-r--r--drivers/net/usb/cdc_subset.c10
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/ipheth.c6
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c13
-rw-r--r--drivers/net/usb/r8152.c133
-rw-r--r--drivers/net/usb/rndis_host.c49
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/smsc95xx.c415
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/usb/usbnet.c63
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c223
-rw-r--r--drivers/net/virtio_net.c837
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h80
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c300
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c157
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h24
-rw-r--r--drivers/net/vrf.c38
-rw-r--r--drivers/net/vxlan/Makefile7
-rw-r--r--drivers/net/vxlan/vxlan_core.c (renamed from drivers/net/vxlan.c)576
-rw-r--r--drivers/net/vxlan/vxlan_multicast.c272
-rw-r--r--drivers/net/vxlan/vxlan_private.h162
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c1005
-rw-r--r--drivers/net/wan/Kconfig75
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/hd64572.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/ixp4xx_hss.c41
-rw-r--r--drivers/net/wan/lapbether.c5
-rw-r--r--drivers/net/wan/lmc/Makefile18
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c65
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h255
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2008
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1206
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c106
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h468
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/slic_ds26522.c3
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireguard/allowedips.c9
-rw-r--r--drivers/net/wireguard/device.c44
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireguard/noise.c45
-rw-r--r--drivers/net/wireguard/peer.c3
-rw-r--r--drivers/net/wireguard/queueing.c3
-rw-r--r--drivers/net/wireguard/receive.c9
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c22
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/socket.c5
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c83
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c153
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h296
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c347
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c99
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h31
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c290
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c41
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c573
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c768
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h215
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c937
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h185
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c88
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h43
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c28
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h33
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c368
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c473
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h143
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c350
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1199
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c305
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1027
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c813
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c411
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c361
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h41
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c78
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c41
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c1462
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h669
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c804
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h45
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c43
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile5
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c76
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/spectral_common.h4
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h83
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c247
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c159
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c69
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c32
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c11
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c9
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c89
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c113
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c74
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c147
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c512
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c63
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h9
-rw-r--r--drivers/net/wireless/cisco/airo.c42
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c74
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c127
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c26
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c37
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c243
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c331
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c715
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c248
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c410
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c769
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c304
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c351
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h21
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c16
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c24
-rw-r--r--drivers/net/wireless/intersil/orinoco/airport.c1
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c6
-rw-r--r--drivers/net/wireless/intersil/p54/main.c17
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c7
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1449
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h6
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Makefile13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c46
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c41
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h14
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c69
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c255
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c143
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h217
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c319
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c319
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c109
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h199
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h331
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c942
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c819
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h284
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c421
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c542
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c442
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c1582
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h336
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c2130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h151
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c705
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h176
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h942
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1243
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c1005
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h336
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c324
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c965
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h163
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c181
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c226
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h71
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c386
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c255
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c113
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c139
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c268
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h21
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c228
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h18
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c23
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c54
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c23
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c32
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h20
-rw-r--r--drivers/net/wireless/purelifi/Kconfig17
-rw-r--r--drivers/net/wireless/purelifi/Makefile2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Kconfig14
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Makefile3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.c98
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.h70
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/firmware.c276
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/intf.h52
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c754
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h184
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c891
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.h198
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c1802
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/ray_cs.c28
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c15
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c288
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c55
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c378
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h19
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c23
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c191
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h35
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c434
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h98
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c67
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c53
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig22
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile23
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h26
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c235
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h64
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1930
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c1313
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h1181
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c261
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c1564
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1171
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c1521
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h186
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c332
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c1638
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h538
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1419
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h161
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c112
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h2234
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c515
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c382
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c183
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c2744
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c1501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c87
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c94
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c3146
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h88
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c4082
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c781
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c36704
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h36
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c92
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c262
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h110
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h30
-rw-r--r--drivers/net/wireless/rndis_wlan.c30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c11
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c49
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c11
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c2
-rw-r--r--drivers/net/wireless/silabs/Kconfig18
-rw-r--r--drivers/net/wireless/silabs/Makefile3
-rw-r--r--drivers/net/wireless/silabs/wfx/Kconfig13
-rw-r--r--drivers/net/wireless/silabs/wfx/Makefile25
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c324
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.h34
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h36
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c273
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c284
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c93
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c569
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.h66
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.c331
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c388
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.h15
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_cmd.h553
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_general.h252
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_mib.h346
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.c391
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c494
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.h61
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.c307
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.h48
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.c332
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.h78
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c227
-rw-r--r--drivers/net/wireless/silabs/wfx/key.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c497
-rw-r--r--drivers/net/wireless/silabs/wfx/main.h41
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c298
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.h44
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c147
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.h22
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c817
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h71
-rw-r--r--drivers/net/wireless/silabs/wfx/traces.h496
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h167
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c10
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c19
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c55
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c12
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c22
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c17
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c15
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c18
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c22
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c52
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c297
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c18
-rw-r--r--drivers/net/wireless/virt_wifi.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c8
-rw-r--r--drivers/net/wireless/zydas/zd1201.c3
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wwan/Kconfig16
-rw-r--r--drivers/net/wwan/Makefile1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c54
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h134
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c742
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h142
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c19
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c46
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c1
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c5
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/Makefile20
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.h180
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.c1281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.h179
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c1339
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h127
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c574
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h206
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1243
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h116
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c683
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h78
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.c122
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.h37
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c727
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h88
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c423
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h55
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c761
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h120
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c262
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.h31
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h135
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c273
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c509
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h98
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c176
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h350
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c550
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h135
-rw-r--r--drivers/net/wwan/wwan_core.c37
-rw-r--r--drivers/net/wwan/wwan_hwsim.c30
-rw-r--r--drivers/net/xen-netback/common.h14
-rw-r--r--drivers/net/xen-netback/interface.c24
-rw-r--r--drivers/net/xen-netback/netback.c17
-rw-r--r--drivers/net/xen-netback/rx.c3
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/net/xen-netfront.c233
2806 files changed, 350200 insertions, 126884 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2a4f998c180..9e63b8c43f3e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,6 +76,7 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
+ depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
@@ -85,8 +86,6 @@ config WIREGUARD
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
@@ -94,6 +93,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if MIPS
+ select CRYPTO_CHACHA_S390 if S390
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -499,6 +499,10 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/pse-pd/Kconfig"
+
+source "drivers/net/can/Kconfig"
+
source "drivers/net/mctp/Kconfig"
source "drivers/net/mdio/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 50b23e71065f..6ce076462dbf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
@@ -31,7 +32,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 49e67c9fb5a4..83214e2e70ab 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -68,7 +68,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
@@ -222,9 +222,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_CS89x0_ISA
{cs89x0_probe, 0},
#endif
-#ifdef CONFIG_NI65
- {ni65_probe, 0},
-#endif
{NULL, 0},
};
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index f1a36d7e2151..2d20be6ffb7e 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -51,13 +51,14 @@ static char *status_str[] = {
};
static char *type_str[] = {
+ "", /* Type 0 is not defined */
"AMT_MSG_DISCOVERY",
"AMT_MSG_ADVERTISEMENT",
"AMT_MSG_REQUEST",
"AMT_MSG_MEMBERSHIP_QUERY",
"AMT_MSG_MEMBERSHIP_UPDATE",
"AMT_MSG_MULTICAST_DATA",
- "AMT_MSG_TEARDOWM",
+ "AMT_MSG_TEARDOWN",
};
static char *action_str[] = {
@@ -448,7 +449,7 @@ out:
dev_put(amt->dev);
}
-/* Non-existant group is created as INCLUDE {empty}:
+/* Non-existent group is created as INCLUDE {empty}:
*
* RFC 3376 - 5.1. Action on Change of Interface State
*
@@ -562,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
ihv3->nsrcs = 0;
ihv3->resv = 0;
ihv3->suppress = false;
- ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
ihv3->csum = 0;
csum = &ihv3->csum;
csum_start = (void *)ihv3;
@@ -576,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
return skb;
}
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
{
if (validate && amt->status >= status)
return;
netdev_dbg(amt->dev, "Update GW status %s -> %s",
status_str[amt->status], status_str[status]);
- amt->status = status;
+ WRITE_ONCE(amt->status, status);
}
static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
@@ -599,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
tunnel->status = status;
}
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
-{
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, status, validate);
- spin_unlock_bh(&amt->lock);
-}
-
static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
enum amt_status status, bool validate)
{
@@ -699,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt)
if (unlikely(net_xmit_eval(err)))
amt->dev->stats.tx_errors++;
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
- spin_unlock_bh(&amt->lock);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
out:
rcu_read_unlock();
}
@@ -899,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
}
#endif
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+ struct sk_buff *skb)
+{
+ int index;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events >= AMT_MAX_EVENTS) {
+ spin_unlock_bh(&amt->lock);
+ return 1;
+ }
+
+ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+ amt->events[index].event = event;
+ amt->events[index].skb = skb;
+ amt->nr_events++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ queue_work(amt_wq, &amt->event_wq);
+ spin_unlock_bh(&amt->lock);
+
+ return 0;
+}
+
static void amt_secret_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
@@ -912,56 +925,72 @@ static void amt_secret_work(struct work_struct *work)
msecs_to_jiffies(AMT_SECRET_TIMEOUT));
}
-static void amt_discovery_work(struct work_struct *work)
+static void amt_event_send_discovery(struct amt_dev *amt)
{
- struct amt_dev *amt = container_of(to_delayed_work(work),
- struct amt_dev,
- discovery_wq);
-
- spin_lock_bh(&amt->lock);
if (amt->status > AMT_STATUS_SENT_DISCOVERY)
goto out;
get_random_bytes(&amt->nonce, sizeof(__be32));
- spin_unlock_bh(&amt->lock);
amt_send_discovery(amt);
- spin_lock_bh(&amt->lock);
out:
mod_delayed_work(amt_wq, &amt->discovery_wq,
msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
- spin_unlock_bh(&amt->lock);
}
-static void amt_req_work(struct work_struct *work)
+static void amt_discovery_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
struct amt_dev,
- req_wq);
+ discovery_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
u32 exp;
- spin_lock_bh(&amt->lock);
if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
goto out;
- if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
+ if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
netdev_dbg(amt->dev, "Gateway is not ready");
amt->qi = AMT_INIT_REQ_TIMEOUT;
- amt->ready4 = false;
- amt->ready6 = false;
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
amt->remote_ip = 0;
- __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt_update_gw_status(amt, AMT_STATUS_INIT, false);
amt->req_cnt = 0;
+ amt->nonce = 0;
+ goto out;
+ }
+
+ if (!amt->req_cnt) {
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ get_random_bytes(&amt->nonce, sizeof(__be32));
}
- spin_unlock_bh(&amt->lock);
amt_send_request(amt, false);
amt_send_request(amt, true);
amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
- spin_lock_bh(&amt->lock);
+ amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
- spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+ mod_delayed_work(amt_wq, &amt->req_wq,
+ msecs_to_jiffies(100));
}
static bool amt_send_membership_update(struct amt_dev *amt,
@@ -1217,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* Gateway only passes IGMP/MLD packets */
if (!report)
goto free;
- if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ if ((!v6 && !READ_ONCE(amt->ready4)) ||
+ (v6 && !READ_ONCE(amt->ready6)))
goto free;
if (amt_send_membership_update(amt, skb, v6))
goto free;
@@ -1370,11 +1400,11 @@ static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
int i;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -1455,11 +1485,11 @@ static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
int i, j;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -2218,8 +2248,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
struct amt_header_advertisement *amta;
int hdr_size;
- hdr_size = sizeof(*amta) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amta) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2234,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
ipv4_is_zeronet(amta->ip4))
return true;
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+ amt->nonce != amta->nonce)
+ return true;
+
amt->remote_ip = amta->ip4;
netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
mod_delayed_work(amt_wq, &amt->req_wq, 0);
@@ -2249,19 +2282,30 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
struct ethhdr *eth;
struct iphdr *iph;
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
if (amtmd->reserved || amtmd->version)
return true;
- hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
return true;
+
skb_reset_network_header(skb);
skb_push(skb, sizeof(*eth));
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = eth_hdr(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
iph = ip_hdr(skb);
+
if (iph->version == 4) {
if (!ipv4_is_multicast(iph->daddr))
return true;
@@ -2272,6 +2316,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
} else if (iph->version == 6) {
struct ipv6hdr *ip6h;
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return true;
+
ip6h = ipv6_hdr(skb);
if (!ipv6_addr_is_multicast(&ip6h->daddr))
return true;
@@ -2304,8 +2351,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
struct iphdr *iph;
int hdr_size, len;
- hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2313,54 +2359,66 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
if (amtmq->reserved || amtmq->version)
return true;
- hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+ if (amtmq->nonce != amt->nonce)
+ return true;
+
+ hdr_size -= sizeof(*eth);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
return true;
+
oeth = eth_hdr(skb);
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
skb_reset_network_header(skb);
eth = eth_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
- if (!ipv4_is_multicast(iph->daddr))
+ if (READ_ONCE(amt->ready4))
return true;
+
if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
sizeof(*ihv3)))
return true;
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+
ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready4 = true;
+ WRITE_ONCE(amt->ready4, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = ihv3->qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IP);
eth->h_proto = htons(ETH_P_IP);
ip_eth_mc_map(iph->daddr, eth->h_dest);
#if IS_ENABLED(CONFIG_IPV6)
} else if (iph->version == 6) {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld2_query *mld2q;
+ struct ipv6hdr *ip6h;
- if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ if (READ_ONCE(amt->ready6))
return true;
+
if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
sizeof(*mld2q)))
return true;
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+
mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready6 = true;
+ WRITE_ONCE(amt->ready6, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = mld2q->mld2q_qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IPV6);
eth->h_proto = htons(ETH_P_IPV6);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
@@ -2373,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ local_bh_disable();
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
amt->dev->stats.rx_dropped++;
}
+ local_bh_enable();
return false;
}
@@ -2387,23 +2447,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
{
struct amt_header_membership_update *amtmu;
struct amt_tunnel_list *tunnel;
- struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
- int len;
+ int len, hdr_size;
iph = ip_hdr(skb);
- udph = udp_hdr(skb);
- if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
- false, false))
+ hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
return true;
- amtmu = (struct amt_header_membership_update *)skb->data;
+ amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
if (amtmu->reserved || amtmu->version)
return true;
- skb_pull(skb, sizeof(*amtmu));
+ if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
+ return true;
+
skb_reset_network_header(skb);
list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
@@ -2421,9 +2481,12 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
}
}
- return false;
+ return true;
report:
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
if (ip_mc_check_igmp(skb)) {
@@ -2470,7 +2533,7 @@ report:
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
true);
dev_sw_netstats_rx_add(amt->dev, len);
@@ -2616,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
if (tunnel->ip4 == iph->saddr)
goto send;
+ spin_lock_bh(&amt->lock);
if (amt->nr_tunnels >= amt->max_tunnels) {
+ spin_unlock_bh(&amt->lock);
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
return true;
}
@@ -2624,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
tunnel = kzalloc(sizeof(*tunnel) +
(sizeof(struct hlist_head) * amt->hash_buckets),
GFP_ATOMIC);
- if (!tunnel)
+ if (!tunnel) {
+ spin_unlock_bh(&amt->lock);
return true;
+ }
tunnel->source_port = udph->source;
tunnel->ip4 = iph->saddr;
@@ -2638,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
- spin_lock_bh(&amt->lock);
list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
tunnel->key = amt->key;
- amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
amt->nr_tunnels++;
mod_delayed_work(amt_wq, &tunnel->gc_wq,
msecs_to_jiffies(amt_gmi(amt)));
@@ -2666,6 +2732,38 @@ send:
return false;
}
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+ int type = amt_parse_type(skb);
+ int err = 1;
+
+ if (type == -1)
+ goto drop;
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ err = amt_advertisement_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ err = amt_membership_query_handler(amt, skb);
+ if (!err)
+ return;
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+}
+
static int amt_rcv(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2677,6 +2775,7 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
amt = rcu_dereference_sk_user_data(sk);
if (!amt) {
err = true;
+ kfree_skb(skb);
goto out;
}
@@ -2696,8 +2795,11 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- if (amt_advertisement_handler(amt, skb))
- amt->dev->stats.rx_dropped++;
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
goto out;
case AMT_MSG_MULTICAST_DATA:
if (iph->saddr != amt->remote_ip) {
@@ -2716,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_membership_query_handler(amt, skb);
- if (err)
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
goto drop;
- else
- goto out;
+ }
+ goto out;
default:
err = true;
netdev_dbg(amt->dev, "Invalid type of Gateway\n");
@@ -2758,6 +2861,45 @@ out:
return 0;
}
+static void amt_event_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+ struct sk_buff *skb;
+ u8 event;
+ int i;
+
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events == 0) {
+ spin_unlock_bh(&amt->lock);
+ return;
+ }
+ event = amt->events[amt->event_idx].event;
+ skb = amt->events[amt->event_idx].skb;
+ amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+ amt->events[amt->event_idx].skb = NULL;
+ amt->nr_events--;
+ amt->event_idx++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ spin_unlock_bh(&amt->lock);
+
+ switch (event) {
+ case AMT_EVENT_RECEIVE:
+ amt_gw_rcv(amt, skb);
+ break;
+ case AMT_EVENT_SEND_DISCOVERY:
+ amt_event_send_discovery(amt);
+ break;
+ case AMT_EVENT_SEND_REQUEST:
+ amt_event_send_request(amt);
+ break;
+ default:
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2782,7 +2924,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
break;
case AMT_MSG_REQUEST:
case AMT_MSG_MEMBERSHIP_UPDATE:
- if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
mod_delayed_work(amt_wq, &amt->req_wq, 0);
break;
default:
@@ -2845,6 +2987,8 @@ static int amt_dev_open(struct net_device *dev)
amt->ready4 = false;
amt->ready6 = false;
+ amt->event_idx = 0;
+ amt->nr_events = 0;
err = amt_socket_create(amt);
if (err)
@@ -2852,6 +2996,7 @@ static int amt_dev_open(struct net_device *dev)
amt->req_cnt = 0;
amt->remote_ip = 0;
+ amt->nonce = 0;
get_random_bytes(&amt->key, sizeof(siphash_key_t));
amt->status = AMT_STATUS_INIT;
@@ -2870,6 +3015,8 @@ static int amt_dev_stop(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
struct amt_tunnel_list *tunnel, *tmp;
struct socket *sock;
+ struct sk_buff *skb;
+ int i;
cancel_delayed_work_sync(&amt->req_wq);
cancel_delayed_work_sync(&amt->discovery_wq);
@@ -2882,6 +3029,14 @@ static int amt_dev_stop(struct net_device *dev)
if (sock)
udp_tunnel_sock_release(sock);
+ cancel_work_sync(&amt->event_wq);
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ skb = amt->events[i].skb;
+ kfree_skb(skb);
+ amt->events[i].event = AMT_EVENT_NONE;
+ amt->events[i].skb = NULL;
+ }
+
amt->ready4 = false;
amt->ready6 = false;
amt->req_cnt = 0;
@@ -3073,7 +3228,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
if (amt->mode == AMT_MODE_RELAY) {
- amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
amt->qri = 10;
dev->needed_headroom = amt->stream_dev->needed_headroom +
AMT_RELAY_HLEN;
@@ -3124,8 +3279,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_WORK(&amt->event_wq, amt_event_work);
INIT_LIST_HEAD(&amt->tunnel_list);
-
return 0;
err:
dev_put(amt->stream_dev);
@@ -3258,7 +3413,7 @@ static int __init amt_init(void)
if (err < 0)
goto unregister_notifier;
- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
if (!amt_wq) {
err = -ENOMEM;
goto rtnl_unregister;
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 90b9f1d6eda9..b38ed52b82bc 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -39,17 +39,6 @@ config DEV_APPLETALK
connect to the AppleTalk network, say Y.
-config LTPC
- tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
- help
- This allows you to use the AppleTalk PC card to connect to LocalTalk
- networks. The card is also known as the Farallon PhoneNet PC card.
- If you are in doubt, this card is the one with the 65C02 chip on it.
- You also need version 1.3.3 or later of the netatalk package.
- This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/device_drivers/appletalk/ltpc.rst>.
-
config COPS
tristate "COPS LocalTalk PC support"
depends on DEV_APPLETALK && ISA
diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile
index 903da3303f41..6db2943ce5d6 100644
--- a/drivers/net/appletalk/Makefile
+++ b/drivers/net/appletalk/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_IPDDP) += ipddp.o
obj-$(CONFIG_COPS) += cops.o
-obj-$(CONFIG_LTPC) += ltpc.o
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
deleted file mode 100644
index 388d7b3bd4c2..000000000000
--- a/drivers/net/appletalk/ltpc.c
+++ /dev/null
@@ -1,1277 +0,0 @@
-/*** ltpc.c -- a driver for the LocalTalk PC card.
- *
- * Copyright (c) 1995,1996 Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This is ALPHA code at best. It may not work for you. It may
- * damage your equipment. It may damage your relations with other
- * users of your network. Use it at your own risk!
- *
- * Based in part on:
- * skeleton.c by Donald Becker
- * dummy.c by Nick Holloway and Alan Cox
- * loopback.c by Ross Biro, Fred van Kampen, Donald Becker
- * the netatalk source code (UMICH)
- * lots of work on the card...
- *
- * I do not have access to the (proprietary) SDK that goes with the card.
- * If you do, I don't want to know about it, and you can probably write
- * a better driver yourself anyway. This does mean that the pieces that
- * talk to the card are guesswork on my part, so use at your own risk!
- *
- * This is my first try at writing Linux networking code, and is also
- * guesswork. Again, use at your own risk! (Although on this part, I'd
- * welcome suggestions)
- *
- * This is a loadable kernel module which seems to work at my site
- * consisting of a 1.2.13 linux box running netatalk 1.3.3, and with
- * the kernel support from 1.3.3b2 including patches routing.patch
- * and ddp.disappears.from.chooser. In order to run it, you will need
- * to patch ddp.c and aarp.c in the kernel, but only a little...
- *
- * I'm fairly confident that while this is arguably badly written, the
- * problems that people experience will be "higher level", that is, with
- * complications in the netatalk code. The driver itself doesn't do
- * anything terribly complicated -- it pretends to be an ether device
- * as far as netatalk is concerned, strips the DDP data out of the ether
- * frame and builds a LLAP packet to send out the card. In the other
- * direction, it receives LLAP frames from the card and builds a fake
- * ether packet that it then tosses up to the networking code. You can
- * argue (correctly) that this is an ugly way to do things, but it
- * requires a minimal amount of fooling with the code in ddp.c and aarp.c.
- *
- * The card will do a lot more than is used here -- I *think* it has the
- * layers up through ATP. Even if you knew how that part works (which I
- * don't) it would be a big job to carve up the kernel ddp code to insert
- * things at a higher level, and probably a bad idea...
- *
- * There are a number of other cards that do LocalTalk on the PC. If
- * nobody finds any insurmountable (at the netatalk level) problems
- * here, this driver should encourage people to put some work into the
- * other cards (some of which I gather are still commercially available)
- * and also to put hooks for LocalTalk into the official ddp code.
- *
- * I welcome comments and suggestions. This is my first try at Linux
- * networking stuff, and there are probably lots of things that I did
- * suboptimally.
- *
- ***/
-
-/***
- *
- * $Log: ltpc.c,v $
- * Revision 1.1.2.1 2000/03/01 05:35:07 jgarzik
- * at and tr cleanup
- *
- * Revision 1.8 1997/01/28 05:44:54 bradford
- * Clean up for non-module a little.
- * Hacked about a bit to clean things up - Alan Cox
- * Probably broken it from the origina 1.8
- *
-
- * 1998/11/09: David Huggins-Daines <dhd@debian.org>
- * Cleaned up the initialization code to use the standard autoirq methods,
- and to probe for things in the standard order of i/o, irq, dma. This
- removes the "reset the reset" hack, because I couldn't figure out an
- easy way to get the card to trigger an interrupt after it.
- * Added support for passing configuration parameters on the kernel command
- line and through insmod
- * Changed the device name from "ltalk0" to "lt0", both to conform with the
- other localtalk driver, and to clear up the inconsistency between the
- module and the non-module versions of the driver :-)
- * Added a bunch of comments (I was going to make some enums for the state
- codes and the register offsets, but I'm still not sure exactly what their
- semantics are)
- * Don't poll anymore in interrupt-driven mode
- * It seems to work as a module now (as of 2.1.127), but I don't think
- I'm responsible for that...
-
- *
- * Revision 1.7 1996/12/12 03:42:33 bradford
- * DMA alloc cribbed from 3c505.c.
- *
- * Revision 1.6 1996/12/12 03:18:58 bradford
- * Added virt_to_bus; works in 2.1.13.
- *
- * Revision 1.5 1996/12/12 03:13:22 root
- * xmitQel initialization -- think through better though.
- *
- * Revision 1.4 1996/06/18 14:55:55 root
- * Change names to ltpc. Tabs. Took a shot at dma alloc,
- * although more needs to be done eventually.
- *
- * Revision 1.3 1996/05/22 14:59:39 root
- * Change dev->open, dev->close to track dummy.c in 1.99.(around 7)
- *
- * Revision 1.2 1996/05/22 14:58:24 root
- * Change tabs mostly.
- *
- * Revision 1.1 1996/04/23 04:45:09 root
- * Initial revision
- *
- * Revision 0.16 1996/03/05 15:59:56 root
- * Change ARPHRD_LOCALTLK definition to the "real" one.
- *
- * Revision 0.15 1996/03/05 06:28:30 root
- * Changes for kernel 1.3.70. Still need a few patches to kernel, but
- * it's getting closer.
- *
- * Revision 0.14 1996/02/25 17:38:32 root
- * More cleanups. Removed query to card on get_stats.
- *
- * Revision 0.13 1996/02/21 16:27:40 root
- * Refix debug_print_skb. Fix mac.raw gotcha that appeared in 1.3.65.
- * Clean up receive code a little.
- *
- * Revision 0.12 1996/02/19 16:34:53 root
- * Fix debug_print_skb. Kludge outgoing snet to 0 when using startup
- * range. Change debug to mask: 1 for verbose, 2 for higher level stuff
- * including packet printing, 4 for lower level (card i/o) stuff.
- *
- * Revision 0.11 1996/02/12 15:53:38 root
- * Added router sends (requires new aarp.c patch)
- *
- * Revision 0.10 1996/02/11 00:19:35 root
- * Change source LTALK_LOGGING debug switch to insmod ... debug=2.
- *
- * Revision 0.9 1996/02/10 23:59:35 root
- * Fixed those fixes for 1.2 -- DANGER! The at.h that comes with netatalk
- * has a *different* definition of struct sockaddr_at than the Linux kernel
- * does. This is an "insidious and invidious" bug...
- * (Actually the preceding comment is false -- it's the atalk.h in the
- * ancient atalk-0.06 that's the problem)
- *
- * Revision 0.8 1996/02/10 19:09:00 root
- * Merge 1.3 changes. Tested OK under 1.3.60.
- *
- * Revision 0.7 1996/02/10 17:56:56 root
- * Added debug=1 parameter on insmod for debugging prints. Tried
- * to fix timer unload on rmmod, but I don't think that's the problem.
- *
- * Revision 0.6 1995/12/31 19:01:09 root
- * Clean up rmmod, irq comments per feedback from Corin Anderson (Thanks Corey!)
- * Clean up initial probing -- sometimes the card wakes up latched in reset.
- *
- * Revision 0.5 1995/12/22 06:03:44 root
- * Added comments in front and cleaned up a bit.
- * This version sent out to people.
- *
- * Revision 0.4 1995/12/18 03:46:44 root
- * Return shortDDP to longDDP fake to 0/0. Added command structs.
- *
- ***/
-
-/* ltpc jumpers are:
-*
-* Interrupts -- set at most one. If none are set, the driver uses
-* polled mode. Because the card was developed in the XT era, the
-* original documentation refers to IRQ2. Since you'll be running
-* this on an AT (or later) class machine, that really means IRQ9.
-*
-* SW1 IRQ 4
-* SW2 IRQ 3
-* SW3 IRQ 9 (2 in original card documentation only applies to XT)
-*
-*
-* DMA -- choose DMA 1 or 3, and set both corresponding switches.
-*
-* SW4 DMA 3
-* SW5 DMA 1
-* SW6 DMA 3
-* SW7 DMA 1
-*
-*
-* I/O address -- choose one.
-*
-* SW8 220 / 240
-*/
-
-/* To have some stuff logged, do
-* insmod ltpc.o debug=1
-*
-* For a whole bunch of stuff, use higher numbers.
-*
-* The default is 0, i.e. no messages except for the probe results.
-*/
-
-/* insmod-tweakable variables */
-static int debug;
-#define DEBUG_VERBOSE 1
-#define DEBUG_UPPER 2
-#define DEBUG_LOWER 4
-
-static int io;
-static int irq;
-static int dma;
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/atalk.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <net/Space.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-
-/* our stuff */
-#include "ltpc.h"
-
-static DEFINE_SPINLOCK(txqueue_lock);
-static DEFINE_SPINLOCK(mbox_lock);
-
-/* function prototypes */
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen);
-static int sendup_buffer (struct net_device *dev);
-
-/* Dma Memory related stuff, cribbed directly from 3c505.c */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
-
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-/* DMA data buffer, DMA command buffer */
-static unsigned char *ltdmabuf;
-static unsigned char *ltdmacbuf;
-
-/* private struct, holds our appletalk address */
-
-struct ltpc_private
-{
- struct atalk_addr my_addr;
-};
-
-/* transmit queue element struct */
-
-struct xmitQel {
- struct xmitQel *next;
- /* command buffer */
- unsigned char *cbuf;
- short cbuflen;
- /* data buffer */
- unsigned char *dbuf;
- short dbuflen;
- unsigned char QWrite; /* read or write data */
- unsigned char mailbox;
-};
-
-/* the transmit queue itself */
-
-static struct xmitQel *xmQhd, *xmQtl;
-
-static void enQ(struct xmitQel *qel)
-{
- unsigned long flags;
- qel->next = NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQtl) {
- xmQtl->next = qel;
- } else {
- xmQhd = qel;
- }
- xmQtl = qel;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if (debug & DEBUG_LOWER)
- printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
-}
-
-static struct xmitQel *deQ(void)
-{
- unsigned long flags;
- int i;
- struct xmitQel *qel=NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQhd) {
- qel = xmQhd;
- xmQhd = qel->next;
- if(!xmQhd) xmQtl = NULL;
- }
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if ((debug & DEBUG_LOWER) && qel) {
- int n;
- printk(KERN_DEBUG "ltpc: dequeued command ");
- n = qel->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
- printk("\n");
- }
-
- return qel;
-}
-
-/* and... the queue elements we'll be using */
-static struct xmitQel qels[16];
-
-/* and their corresponding mailboxes */
-static unsigned char mailbox[16];
-static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static int wait_timeout(struct net_device *dev, int c)
-{
- /* returns true if it stayed c */
- /* this uses base+6, but it's ok */
- int i;
-
- /* twenty second or so total */
-
- for(i=0;i<200000;i++) {
- if ( c != inb_p(dev->base_addr+6) ) return 0;
- udelay(100);
- }
- return 1; /* timed out */
-}
-
-/* get the first free mailbox */
-
-static int getmbox(void)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&mbox_lock, flags);
- for(i=1;i<16;i++) if(!mboxinuse[i]) {
- mboxinuse[i]=1;
- spin_unlock_irqrestore(&mbox_lock, flags);
- return i;
- }
- spin_unlock_irqrestore(&mbox_lock, flags);
- return 0;
-}
-
-/* read a command from the card */
-static void handlefc(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
-}
-
-/* read data from the card */
-static void handlefd(struct net_device *dev)
-{
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
- sendup_buffer(dev);
-}
-
-static void handlewrite(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- /* on entry, 0xfb and ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfb) ) {
- flags=claim_dma_lock();
- printk("timed out in handlewrite, dma res %d\n",
- get_dma_residue(dev->dma) );
- release_dma_lock(flags);
- }
-}
-
-static void handleread(struct net_device *dev)
-{
- /* on entry, 0xfb */
- /* on exit, ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
-}
-
-static void handlecommand(struct net_device *dev)
-{
- /* on entry, 0xfa and ltdmacbuf holds command */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
-}
-
-/* ready made command for getting the result from the card */
-static unsigned char rescbuf[2] = {LT_GETRESULT,0};
-static unsigned char resdbuf[2];
-
-static int QInIdle;
-
-/* idle expects to be called with the IRQ line high -- either because of
- * an interrupt, or because the line is tri-stated
- */
-
-static void idle(struct net_device *dev)
-{
- unsigned long flags;
- int state;
- /* FIXME This is initialized to shut the warning up, but I need to
- * think this through again.
- */
- struct xmitQel *q = NULL;
- int oops;
- int i;
- int base = dev->base_addr;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if(QInIdle) {
- spin_unlock_irqrestore(&txqueue_lock, flags);
- return;
- }
- QInIdle = 1;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- /* this tri-states the IRQ line */
- (void) inb_p(base+6);
-
- oops = 100;
-
-loop:
- if (0>oops--) {
- printk("idle: looped too many times\n");
- goto done;
- }
-
- state = inb_p(base+6);
- if (state != inb_p(base+6)) goto loop;
-
- switch(state) {
- case 0xfc:
- /* incoming command */
- if (debug & DEBUG_LOWER) printk("idle: fc\n");
- handlefc(dev);
- break;
- case 0xfd:
- /* incoming data */
- if(debug & DEBUG_LOWER) printk("idle: fd\n");
- handlefd(dev);
- break;
- case 0xf9:
- /* result ready */
- if (debug & DEBUG_LOWER) printk("idle: f9\n");
- if(!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- }
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if( wait_timeout(dev,0xf9) )
- printk("timed out idle f9\n");
- break;
- case 0xf8:
- /* ?? */
- if (xmQhd) {
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if(wait_timeout(dev,0xf8) )
- printk("timed out idle f8\n");
- } else {
- goto done;
- }
- break;
- case 0xfa:
- /* waiting for command */
- if(debug & DEBUG_LOWER) printk("idle: fa\n");
- if (xmQhd) {
- q=deQ();
- memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
- ltdmacbuf[1] = q->mailbox;
- if (debug>1) {
- int n;
- printk("ltpc: sent command ");
- n = q->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++)
- printk("%02x ",ltdmacbuf[i]);
- printk("\n");
- }
-
- handlecommand(dev);
-
- if (0xfa == inb_p(base + 6)) {
- /* we timed out, so return */
- goto done;
- }
- } else {
- /* we don't seem to have a command */
- if (!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- } else {
- printk("trouble: response command already queued\n");
- goto done;
- }
- }
- break;
- case 0Xfb:
- /* data transfer ready */
- if(debug & DEBUG_LOWER) printk("idle: fb\n");
- if(q->QWrite) {
- memcpy(ltdmabuf,q->dbuf,q->dbuflen);
- handlewrite(dev);
- } else {
- handleread(dev);
- /* non-zero mailbox numbers are for
- commmands, 0 is for GETRESULT
- requests */
- if(q->mailbox) {
- memcpy(q->dbuf,ltdmabuf,q->dbuflen);
- } else {
- /* this was a result */
- mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
- mboxinuse[0]=0;
- }
- }
- break;
- }
- goto loop;
-
-done:
- QInIdle=0;
-
- /* now set the interrupts back as appropriate */
- /* the first read takes it out of tri-state (but still high) */
- /* the second resets it */
- /* note that after this point, any read of base+6 will
- trigger an interrupt */
-
- if (dev->irq) {
- inb_p(base+7);
- inb_p(base+7);
- }
-}
-
-
-static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 1;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 0;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-/* end of idle handlers -- what should be seen is do_read, do_write */
-
-static struct timer_list ltpc_timer;
-static struct net_device *ltpc_timer_dev;
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
-
-static int read_30 ( struct net_device *dev)
-{
- lt_command c;
- c.getflags.command = LT_GETFLAGS;
- return do_read(dev, &c, sizeof(c.getflags),&c,0);
-}
-
-static int set_30 (struct net_device *dev,int x)
-{
- lt_command c;
- c.setflags.command = LT_SETFLAGS;
- c.setflags.flags = x;
- return do_write(dev, &c, sizeof(c.setflags),&c,0);
-}
-
-/* LLAP to DDP translation */
-
-static int sendup_buffer (struct net_device *dev)
-{
- /* on entry, command is in ltdmacbuf, data in ltdmabuf */
- /* called from idle, non-reentrant */
-
- int dnode, snode, llaptype, len;
- int sklen;
- struct sk_buff *skb;
- struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
-
- if (ltc->command != LT_RCVLAP) {
- printk("unknown command 0x%02x from ltpc card\n",ltc->command);
- return -1;
- }
- dnode = ltc->dnode;
- snode = ltc->snode;
- llaptype = ltc->laptype;
- len = ltc->length;
-
- sklen = len;
- if (llaptype == 1)
- sklen += 8; /* correct for short ddp */
- if(sklen > 800) {
- printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
- dev->name,sklen);
- return -1;
- }
-
- if ( (llaptype==0) || (llaptype>2) ) {
- printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
- return -1;
- }
-
-
- skb = dev_alloc_skb(3+sklen);
- if (skb == NULL)
- {
- printk("%s: dropping packet due to memory squeeze.\n",
- dev->name);
- return -1;
- }
- skb->dev = dev;
-
- if (sklen > len)
- skb_reserve(skb,8);
- skb_put(skb,len+3);
- skb->protocol = htons(ETH_P_LOCALTALK);
- /* add LLAP header */
- skb->data[0] = dnode;
- skb->data[1] = snode;
- skb->data[2] = llaptype;
- skb_reset_mac_header(skb); /* save pointer to llap header */
- skb_pull(skb,3);
-
- /* copy ddp(s,e)hdr + contents */
- skb_copy_to_linear_data(skb, ltdmabuf, len);
-
- skb_reset_transport_header(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* toss it onwards */
- netif_rx(skb);
- return 0;
-}
-
-/* the handler for the board interrupt */
-
-static irqreturn_t
-ltpc_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- if (dev==NULL) {
- printk("ltpc_interrupt: unknown device.\n");
- return IRQ_NONE;
- }
-
- inb_p(dev->base_addr+6); /* disable further interrupts from board */
-
- idle(dev); /* handle whatever is coming in */
-
- /* idle re-enables interrupts from board */
-
- return IRQ_HANDLED;
-}
-
-/***
- *
- * The ioctls that the driver responds to are:
- *
- * SIOCSIFADDR -- do probe using the passed node hint.
- * SIOCGIFADDR -- return net, node.
- *
- * some of this stuff should be done elsewhere.
- *
- ***/
-
-static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
- /* we'll keep the localtalk node address in dev->pa_addr */
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct atalk_addr *aa = &ltpc_priv->my_addr;
- struct lt_init c;
- int ltflags;
-
- if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
-
- switch(cmd) {
- case SIOCSIFADDR:
-
- aa->s_net = sa->sat_addr.s_net;
-
- /* this does the probe and returns the node addr */
- c.command = LT_INIT;
- c.hint = sa->sat_addr.s_node;
-
- aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
-
- /* get all llap frames raw */
- ltflags = read_30(dev);
- ltflags |= LT_FLAG_ALLLAP;
- set_30 (dev,ltflags);
-
- dev->broadcast[0] = 0xFF;
- dev->addr_len=1;
- dev_addr_set(dev, &aa->s_node);
-
- return 0;
-
- case SIOCGIFADDR:
-
- sa->sat_addr.s_net = aa->s_net;
- sa->sat_addr.s_node = aa->s_node;
-
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- /* This needs to be present to keep netatalk happy. */
- /* Actually netatalk needs fixing! */
-}
-
-static int ltpc_poll_counter;
-
-static void ltpc_poll(struct timer_list *unused)
-{
- del_timer(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) {
- if (!ltpc_poll_counter) {
- ltpc_poll_counter = 50;
- printk("ltpc poll is alive\n");
- }
- ltpc_poll_counter--;
- }
-
- /* poll 20 times per second */
- idle(ltpc_timer_dev);
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
-}
-
-/* DDP to LLAP translation */
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- /* in kernel 1.3.xx, on entry skb->data points to ddp header,
- * and skb->len is the length of the ddp data + ddp header
- */
- int i;
- struct lt_sendlap cbuf;
- unsigned char *hdr;
-
- cbuf.command = LT_SENDLAP;
- cbuf.dnode = skb->data[0];
- cbuf.laptype = skb->data[2];
- skb_pull(skb,3); /* skip past LLAP header */
- cbuf.length = skb->len; /* this is host order */
- skb_reset_transport_header(skb);
-
- if(debug & DEBUG_UPPER) {
- printk("command ");
- for(i=0;i<6;i++)
- printk("%02x ",((unsigned char *)&cbuf)[i]);
- printk("\n");
- }
-
- hdr = skb_transport_header(skb);
- do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
-
- if(debug & DEBUG_UPPER) {
- printk("sent %d ddp bytes\n",skb->len);
- for (i = 0; i < skb->len; i++)
- printk("%02x ", hdr[i]);
- printk("\n");
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* initialization stuff */
-
-static int __init ltpc_probe_dma(int base, int dma)
-{
- int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
- unsigned long timeout;
- unsigned long f;
-
- if (want & 1) {
- if (request_dma(1,"ltpc")) {
- want &= ~1;
- } else {
- f=claim_dma_lock();
- disable_dma(1);
- clear_dma_ff(1);
- set_dma_mode(1,DMA_MODE_WRITE);
- set_dma_addr(1,virt_to_bus(ltdmabuf));
- set_dma_count(1,sizeof(struct lt_mem));
- enable_dma(1);
- release_dma_lock(f);
- }
- }
- if (want & 2) {
- if (request_dma(3,"ltpc")) {
- want &= ~2;
- } else {
- f=claim_dma_lock();
- disable_dma(3);
- clear_dma_ff(3);
- set_dma_mode(3,DMA_MODE_WRITE);
- set_dma_addr(3,virt_to_bus(ltdmabuf));
- set_dma_count(3,sizeof(struct lt_mem));
- enable_dma(3);
- release_dma_lock(f);
- }
- }
- /* set up request */
-
- /* FIXME -- do timings better! */
-
- ltdmabuf[0] = LT_READMEM;
- ltdmabuf[1] = 1; /* mailbox */
- ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */
- ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */
- ltdmabuf[6] = 0; /* dunno if this is necessary */
-
- inb_p(io+1);
- inb_p(io+0);
- timeout = jiffies+100*HZ/100;
- while(time_before(jiffies, timeout)) {
- if ( 0xfa == inb_p(io+6) ) break;
- }
-
- inb_p(io+3);
- inb_p(io+2);
- while(time_before(jiffies, timeout)) {
- if ( 0xfb == inb_p(io+6) ) break;
- }
-
- /* release the other dma channel (if we opened both of them) */
-
- if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
- want &= ~2;
- free_dma(3);
- }
-
- if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
- want &= ~1;
- free_dma(1);
- }
-
- if (!want)
- return 0;
-
- return (want & 2) ? 3 : 1;
-}
-
-static const struct net_device_ops ltpc_netdev = {
- .ndo_start_xmit = ltpc_xmit,
- .ndo_do_ioctl = ltpc_ioctl,
- .ndo_set_rx_mode = set_multicast_list,
-};
-
-static struct net_device * __init ltpc_probe(void)
-{
- struct net_device *dev;
- int err = -ENOMEM;
- int x=0,y=0;
- int autoirq;
- unsigned long f;
- unsigned long timeout;
-
- dev = alloc_ltalkdev(sizeof(struct ltpc_private));
- if (!dev)
- goto out;
-
- /* probe for the I/O port address */
-
- if (io != 0x240 && request_region(0x220,8,"ltpc")) {
- x = inb_p(0x220+6);
- if ( (x!=0xff) && (x>=0xf0) ) {
- io = 0x220;
- goto got_port;
- }
- release_region(0x220,8);
- }
- if (io != 0x220 && request_region(0x240,8,"ltpc")) {
- y = inb_p(0x240+6);
- if ( (y!=0xff) && (y>=0xf0) ){
- io = 0x240;
- goto got_port;
- }
- release_region(0x240,8);
- }
-
- /* give up in despair */
- printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
- err = -ENODEV;
- goto out1;
-
- got_port:
- /* probe for the IRQ line */
- if (irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- /* reset the interrupt line */
- inb_p(io+7);
- inb_p(io+7);
- /* trigger an interrupt (I hope) */
- inb_p(io+6);
- mdelay(2);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
- } else {
- irq = autoirq;
- }
- }
-
- /* allocate a DMA buffer */
- ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
- if (!ltdmabuf) {
- printk(KERN_ERR "ltpc: mem alloc failed\n");
- err = -ENOMEM;
- goto out2;
- }
-
- ltdmacbuf = &ltdmabuf[800];
-
- if(debug & DEBUG_VERBOSE) {
- printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
- }
-
- /* reset the card */
-
- inb_p(io+1);
- inb_p(io+3);
-
- msleep(20);
-
- inb_p(io+0);
- inb_p(io+2);
- inb_p(io+7); /* clear reset */
- inb_p(io+4);
- inb_p(io+5);
- inb_p(io+5); /* enable dma */
- inb_p(io+6); /* tri-state interrupt line */
-
- ssleep(1);
-
- /* now, figure out which dma channel we're using, unless it's
- already been specified */
- /* well, 0 is a legal DMA channel, but the LTPC card doesn't
- use it... */
- dma = ltpc_probe_dma(io, dma);
- if (!dma) { /* no dma channel */
- printk(KERN_ERR "No DMA channel found on ltpc card.\n");
- err = -ENODEV;
- goto out3;
- }
-
- /* print out friendly message */
- if(irq)
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
- else
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
-
- dev->netdev_ops = &ltpc_netdev;
- dev->base_addr = io;
- dev->irq = irq;
- dev->dma = dma;
-
- /* the card will want to send a result at this point */
- /* (I think... leaving out this part makes the kernel crash,
- so I put it back in...) */
-
- f=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,0x100);
- enable_dma(dma);
- release_dma_lock(f);
-
- (void) inb_p(io+3);
- (void) inb_p(io+2);
- timeout = jiffies+100*HZ/100;
-
- while(time_before(jiffies, timeout)) {
- if( 0xf9 == inb_p(io+6))
- break;
- schedule();
- }
-
- if(debug & DEBUG_VERBOSE) {
- printk("setting up timer and irq\n");
- }
-
- /* grab it and don't let go :-) */
- if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
- {
- (void) inb_p(io+7); /* enable interrupts from board */
- (void) inb_p(io+7); /* and reset irq line */
- } else {
- if( irq )
- printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
- dev->irq = 0;
- /* polled mode -- 20 times per second */
- /* this is really, really slow... should it poll more often? */
- ltpc_timer_dev = dev;
- timer_setup(&ltpc_timer, ltpc_poll, 0);
-
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
- }
- err = register_netdev(dev);
- if (err)
- goto out4;
-
- return NULL;
-out4:
- del_timer_sync(&ltpc_timer);
- if (dev->irq)
- free_irq(dev->irq, dev);
-out3:
- free_pages((unsigned long)ltdmabuf, get_order(1000));
-out2:
- release_region(io, 8);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-#ifndef MODULE
-/* handles "ltpc=io,irq,dma" kernel command lines */
-static int __init ltpc_setup(char *str)
-{
- int ints[5];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- if (ints[0] == 0) {
- if (str && !strncmp(str, "auto", 4)) {
- /* do nothing :-) */
- }
- else {
- /* usage message */
- printk (KERN_ERR
- "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
- return 0;
- }
- } else {
- io = ints[1];
- if (ints[0] > 1) {
- irq = ints[2];
- }
- if (ints[0] > 2) {
- dma = ints[3];
- }
- /* ignore any other parameters */
- }
- return 1;
-}
-
-__setup("ltpc=", ltpc_setup);
-#endif
-
-static struct net_device *dev_ltpc;
-
-MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(dma, int, dma, 0);
-
-
-static int __init ltpc_module_init(void)
-{
- if(io == 0)
- printk(KERN_NOTICE
- "ltpc: Autoprobing is not recommended for modules\n");
-
- dev_ltpc = ltpc_probe();
- return PTR_ERR_OR_ZERO(dev_ltpc);
-}
-module_init(ltpc_module_init);
-
-static void __exit ltpc_cleanup(void)
-{
-
- if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
- unregister_netdev(dev_ltpc);
-
- del_timer_sync(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
-
- if (dev_ltpc->irq)
- free_irq(dev_ltpc->irq, dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
-
- if (dev_ltpc->dma)
- free_dma(dev_ltpc->dma);
-
- if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
-
- if (dev_ltpc->base_addr)
- release_region(dev_ltpc->base_addr,8);
-
- free_netdev(dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("free_pages\n");
-
- free_pages( (unsigned long) ltdmabuf, get_order(1000));
-
- if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
-}
-
-module_exit(ltpc_cleanup);
diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h
deleted file mode 100644
index 58cf945732a4..000000000000
--- a/drivers/net/appletalk/ltpc.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*** ltpc.h
- *
- *
- ***/
-
-#define LT_GETRESULT 0x00
-#define LT_WRITEMEM 0x01
-#define LT_READMEM 0x02
-#define LT_GETFLAGS 0x04
-#define LT_SETFLAGS 0x05
-#define LT_INIT 0x10
-#define LT_SENDLAP 0x13
-#define LT_RCVLAP 0x14
-
-/* the flag that we care about */
-#define LT_FLAG_ALLLAP 0x04
-
-struct lt_getresult {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_mem {
- unsigned char command;
- unsigned char mailbox;
- unsigned short addr; /* host order */
- unsigned short length; /* host order */
-};
-
-struct lt_setflags {
- unsigned char command;
- unsigned char mailbox;
- unsigned char flags;
-};
-
-struct lt_getflags {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_init {
- unsigned char command;
- unsigned char mailbox;
- unsigned char hint;
-};
-
-struct lt_sendlap {
- unsigned char command;
- unsigned char mailbox;
- unsigned char dnode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-struct lt_rcvlap {
- unsigned char command;
- unsigned char dnode;
- unsigned char snode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-union lt_command {
- struct lt_getresult getresult;
- struct lt_mem mem;
- struct lt_setflags setflags;
- struct lt_getflags getflags;
- struct lt_init init;
- struct lt_sendlap sendlap;
- struct lt_rcvlap rcvlap;
-};
-typedef union lt_command lt_command;
-
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..c580acb8b1d3 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index ba587e5fc24f..683203f87ae2 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -148,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -221,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -448,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -478,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..e58a1e0cadd2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,11 +84,13 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -225,7 +227,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -1021,8 +1023,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1779,6 +1781,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1994,42 +1997,30 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
@@ -2227,7 +2218,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
@@ -2278,6 +2270,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2326,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 533e476988f2..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
@@ -652,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -664,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
@@ -1269,6 +1280,27 @@ unwind:
return res;
}
+/* determine if the packet is NA or NS */
+static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
+{
+ struct ipv6hdr *ip6hdr;
+ struct icmp6hdr *hdr;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
+ return true;
+
+ ip6hdr = ipv6_hdr(skb);
+ if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
+ return true;
+
+ hdr = icmp6_hdr(skb);
+ return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
+ hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
+}
+
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
@@ -1280,12 +1312,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
@@ -1348,8 +1380,11 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
- case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
+ if (alb_determine_nd(skb, bond))
+ break;
+ fallthrough;
+ case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
@@ -1432,10 +1467,12 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
break;
}
- if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
+
+ /* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07fc603c2fa7..e84c49bf4d0c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -88,6 +88,7 @@
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
+#include <net/ip6_route.h>
#include "bonding_priv.h"
@@ -864,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -889,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -900,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -1025,12 +1025,38 @@ out:
}
+/**
+ * bond_choose_primary_or_current - select the primary or high priority slave
+ * @bond: our bonding struct
+ *
+ * - Check if there is a primary link. If the primary link was set and is up,
+ * go on and do link reselection.
+ *
+ * - If primary link is not set or down, find the highest priority link.
+ * If the highest priority link is not current slave, set it as primary
+ * link and do link reselection.
+ */
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
+ struct slave *slave, *hprio = NULL;
+ struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP) {
+ hprio = hprio ?: slave;
+ if (slave->prio > hprio->prio)
+ hprio = slave;
+ }
+ }
+
+ if (hprio && hprio != curr) {
+ prim = hprio;
+ goto link_reselect;
+ }
+
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
@@ -1041,6 +1067,7 @@ static struct slave *bond_choose_primary_or_current(struct bonding *bond)
return prim;
}
+link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
@@ -1418,8 +1445,8 @@ static void bond_compute_features(struct bonding *bond)
struct list_head *iter;
struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int gso_max_size = GSO_MAX_SIZE;
- u16 gso_max_segs = GSO_MAX_SEGS;
+ unsigned int tso_max_size = TSO_MAX_SIZE;
+ u16 tso_max_segs = TSO_MAX_SEGS;
if (!bond_has_slaves(bond))
goto done;
@@ -1448,8 +1475,8 @@ static void bond_compute_features(struct bonding *bond)
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
- gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
- gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
+ tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
+ tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
}
bond_dev->hard_header_len = max_hard_header_len;
@@ -1462,8 +1489,8 @@ done:
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->mpls_features = mpls_features;
- netif_set_gso_max_segs(bond_dev, gso_max_segs);
- netif_set_gso_max_size(bond_dev, gso_max_size);
+ netif_set_tso_max_segs(bond_dev, tso_max_segs);
+ netif_set_tso_max_size(bond_dev, tso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
@@ -1973,6 +2000,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2051,7 +2080,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2136,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2379,10 +2406,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
@@ -2418,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -2794,31 +2821,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return ret;
}
-/* We go to the (large) trouble of VLAN tagging ARP frames because
- * switches in VLAN mode (especially if ports are configured as
- * "native" to a VLAN) might not pass non-tagged frames.
- */
-static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
- __be32 src_ip, struct bond_vlan_tag *tags)
+static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct bond_vlan_tag *outer_tag = tags;
- struct net_device *slave_dev = slave->dev;
struct net_device *bond_dev = slave->bond->dev;
-
- slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
- arp_op, &dest_ip, &src_ip);
-
- skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
- NULL, slave_dev->dev_addr, NULL);
-
- if (!skb) {
- net_err_ratelimited("ARP packet allocation failed\n");
- return;
- }
+ struct net_device *slave_dev = slave->dev;
+ struct bond_vlan_tag *outer_tag = tags;
if (!tags || tags->vlan_proto == VLAN_N_VID)
- goto xmit;
+ return true;
tags++;
@@ -2835,7 +2846,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
+ return false;
}
tags++;
@@ -2848,8 +2859,37 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
outer_tag->vlan_id);
}
-xmit:
- arp_xmit(skb);
+ return true;
+}
+
+/* We go to the (large) trouble of VLAN tagging ARP frames because
+ * switches in VLAN mode (especially if ports are configured as
+ * "native" to a VLAN) might not pass non-tagged frames.
+ */
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
+
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
+ NULL, slave_dev->dev_addr, NULL);
+
+ if (!skb) {
+ net_err_ratelimited("ARP packet allocation failed\n");
+ return;
+ }
+
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ arp_xmit(skb);
+ }
+
+ return;
}
/* Validate the device path between the @start_dev and the @end_dev.
@@ -2966,30 +3006,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
unsigned int alen;
- if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond) && is_arp) ||
- !slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
- return RX_HANDLER_ANOTHER;
- } else if (!is_arp) {
- return RX_HANDLER_ANOTHER;
- }
-
alen = arp_hdr_len(bond->dev);
- slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
- __func__, skb->dev->name);
-
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
@@ -3050,8 +3077,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3060,6 +3086,227 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct in6_addr mcaddr;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
+ daddr, saddr);
+
+ skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
+ if (!skb) {
+ net_err_ratelimited("NS packet allocation failed\n");
+ return;
+ }
+
+ addrconf_addr_solict_mult(daddr, &mcaddr);
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ ndisc_send_skb(skb, &mcaddr, saddr);
+ }
+}
+
+static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct bond_vlan_tag *tags;
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct flowi6 fl6;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
+ __func__, &targets[i]);
+ tags = NULL;
+
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+ fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+ continue;
+ }
+
+ /* bond device itself */
+ if (dst->dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ tags = bond_verify_device_path(bond->dev, dst->dev, 0);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(tags))
+ goto found;
+
+ /* Not our device - skip */
+ slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
+ &targets[i], dst->dev ? dst->dev->name : "NULL");
+
+ dst_release(dst);
+ continue;
+
+found:
+ if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
+ bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
+ dst_release(dst);
+ kfree(tags);
+ }
+}
+
+static int bond_confirm_addr6(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct in6_addr *addr = (struct in6_addr *)priv->data;
+
+ return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
+}
+
+static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+{
+ struct netdev_nested_priv priv = {
+ .data = addr,
+ };
+ int ret = false;
+
+ if (bond_confirm_addr6(bond->dev, &priv))
+ return true;
+
+ rcu_read_lock();
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
+ struct in6_addr *saddr, struct in6_addr *daddr)
+{
+ int i;
+
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+ __func__, saddr, daddr);
+ return;
+ }
+
+ i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
+ if (i == -1) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
+ __func__, saddr);
+ return;
+ }
+ slave->last_rx = jiffies;
+ slave->target_last_arp_rx[i] = jiffies;
+}
+
+static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+ struct slave *curr_active_slave, *curr_arp_slave;
+ struct icmp6hdr *hdr = icmp6_hdr(skb);
+ struct in6_addr *saddr, *daddr;
+
+ if (skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK ||
+ hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ goto out;
+
+ saddr = &ipv6_hdr(skb)->saddr;
+ daddr = &ipv6_hdr(skb)->daddr;
+
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ saddr, daddr);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+ /* We 'trust' the received ARP enough to validate it if:
+ * see bond_arp_rcv().
+ */
+ if (bond_is_active_slave(slave))
+ bond_validate_na(bond, slave, saddr, daddr);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_na(bond, slave, saddr, daddr);
+ else if (curr_arp_slave &&
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ bond_validate_na(bond, slave, saddr, daddr);
+
+out:
+ return RX_HANDLER_ANOTHER;
+}
+#endif
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
+#endif
+ bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
+
+ /* Use arp validate logic for both ARP and NS */
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (slave_do_arp_validate_only(bond) && is_ipv6) ||
+#endif
+ !slave_do_arp_validate_only(bond))
+ slave->last_rx = jiffies;
+ return RX_HANDLER_ANOTHER;
+ } else if (is_arp) {
+ return bond_arp_rcv(skb, bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_ipv6) {
+ return bond_na_rcv(skb, bond, slave);
+#endif
+ } else {
+ return RX_HANDLER_ANOTHER;
+ }
+}
+
+static void bond_send_validate(struct bonding *bond, struct slave *slave)
+{
+ bond_arp_send_all(bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ bond_ns_send_all(bond, slave);
+#endif
+}
+
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
@@ -3101,12 +3348,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3131,7 +3378,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -3155,7 +3402,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
- bond_arp_send_all(bond, slave);
+ bond_send_validate(bond, slave);
}
rcu_read_unlock();
@@ -3197,7 +3444,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3248,9 +3495,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
@@ -3267,8 +3514,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3277,10 +3524,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -3369,7 +3616,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
curr_active_slave->dev->name);
if (curr_active_slave) {
- bond_arp_send_all(bond, curr_active_slave);
+ bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
@@ -3421,7 +3668,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_arp_send_all(bond, new_slave);
+ bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
@@ -3477,9 +3724,11 @@ re_arm:
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
@@ -3820,14 +4069,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -3857,7 +4111,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -3874,8 +4128,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
- return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
- skb->mac_header, skb->network_header,
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+ skb_mac_offset(skb), skb_network_offset(skb),
skb_headlen(skb));
}
@@ -3928,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -3957,7 +4217,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -3965,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -3976,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -3983,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -4133,9 +4410,7 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
fallthrough;
case SIOCGHWTSTAMP:
- rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
- rcu_read_unlock();
if (!real_dev)
return -EOPNOTSUPP;
@@ -4531,7 +4806,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
switch (packets_per_slave) {
case 0:
- slave_id = prandom_u32();
+ slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
@@ -4884,25 +5159,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
+ bool xmit_suc = false;
+ bool skb_used = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (bond_is_last_slave(bond, slave))
- break;
- if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *skb2;
+
+ if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
+ continue;
+ if (bond_is_last_slave(bond, slave)) {
+ skb2 = skb;
+ skb_used = true;
+ } else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
- bond_dev_queue_xmit(bond, skb2, slave->dev);
}
+
+ if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
+ xmit_suc = true;
}
- if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- return bond_dev_queue_xmit(bond, skb, slave->dev);
- return bond_tx_drop(bond_dev, skb);
+ if (!skb_used)
+ dev_kfree_skb_any(skb);
+
+ if (xmit_suc)
+ return NETDEV_TX_OK;
+
+ dev_core_stats_tx_dropped_inc(bond_dev);
+ return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
@@ -5002,7 +5291,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
@@ -5040,7 +5329,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
@@ -5080,8 +5369,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
@@ -5355,7 +5650,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
@@ -5367,18 +5662,23 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
const struct ethtool_ops *ops;
struct net_device *real_dev;
struct phy_device *phydev;
+ int ret = 0;
rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
+ dev_hold(real_dev);
rcu_read_unlock();
+
if (real_dev) {
ops = real_dev->ethtool_ops;
phydev = real_dev->phydev;
if (phy_has_tsinfo(phydev)) {
- return phy_ts_info(phydev, info);
+ ret = phy_ts_info(phydev, info);
+ goto out;
} else if (ops->get_ts_info) {
- return ops->get_ts_info(real_dev, info);
+ ret = ops->get_ts_info(real_dev, info);
+ goto out;
}
}
@@ -5386,7 +5686,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
- return 0;
+out:
+ dev_put(real_dev);
+ return ret;
}
static const struct ethtool_ops bond_ethtool_ops = {
@@ -5928,6 +6230,9 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
+#endif
return 0;
}
@@ -5944,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
@@ -5984,45 +6280,33 @@ int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
- struct alb_bond_info *bond_info;
- int res;
+ int res = -ENOMEM;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
- if (!bond_dev) {
- pr_err("%s: eek! can't alloc netdev!\n", name);
- rtnl_unlock();
- return -ENOMEM;
- }
+ if (!bond_dev)
+ goto out;
- /*
- * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
- * It is set to 0 by default which is wrong.
- */
bond = netdev_priv(bond_dev);
- bond_info = &(BOND_ALB_INFO(bond));
- bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
-
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
- rtnl_unlock();
-
- return res;
+ goto out;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
+out:
rtnl_unlock();
- return 0;
+ return res;
}
static int __net_init bond_net_init(struct net *net)
@@ -6038,27 +6322,38 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
+ struct bond_net *bn;
+ struct net *net;
LIST_HEAD(list);
- bond_destroy_sysfs(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
+ }
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct bonding *bond, *tmp_bond;
+
+ bn = net_generic(net, bond_net_id);
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, &list);
+ }
unregister_netdevice_many(&list);
rtnl_unlock();
- bond_destroy_proc_dir(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_proc_dir(bn);
+ }
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
- .exit = bond_net_exit,
+ .exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1007bf6d385d..c2d080fc4fc4 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -14,6 +14,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
+#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
@@ -26,6 +27,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
+ nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
0;
}
@@ -52,6 +54,9 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
+ if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
+ goto nla_put_failure;
+
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
const struct port *ad_port;
@@ -111,10 +116,12 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
+ [IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -149,7 +156,18 @@ static int bond_slave_changelink(struct net_device *bond_dev,
snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
slave_dev->name, queue_id);
bond_opt_initstr(&newval, queue_id_str);
- err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval,
+ data[IFLA_BOND_SLAVE_QUEUE_ID], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_SLAVE_PRIO]) {
+ int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, prio);
+ err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_PRIO], extack);
if (err)
return err;
}
@@ -173,7 +191,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
bond_opt_initval(&newval, mode);
- err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval,
+ data[IFLA_BOND_MODE], extack);
if (err)
return err;
}
@@ -190,7 +209,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
active_slave = slave_dev->name;
}
bond_opt_initstr(&newval, active_slave);
- err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval,
+ data[IFLA_BOND_ACTIVE_SLAVE], extack);
if (err)
return err;
}
@@ -198,7 +218,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
bond_opt_initval(&newval, miimon);
- err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval,
+ data[IFLA_BOND_MIIMON], extack);
if (err)
return err;
}
@@ -206,7 +227,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
bond_opt_initval(&newval, updelay);
- err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval,
+ data[IFLA_BOND_UPDELAY], extack);
if (err)
return err;
}
@@ -214,7 +236,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
bond_opt_initval(&newval, downdelay);
- err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval,
+ data[IFLA_BOND_DOWNDELAY], extack);
if (err)
return err;
}
@@ -222,7 +245,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
bond_opt_initval(&newval, delay);
- err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval,
+ data[IFLA_BOND_PEER_NOTIF_DELAY], extack);
if (err)
return err;
}
@@ -230,7 +254,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
+ data[IFLA_BOND_USE_CARRIER], extack);
if (err)
return err;
}
@@ -238,12 +263,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP monitoring cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_interval);
- err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval,
+ data[IFLA_BOND_ARP_INTERVAL], extack);
if (err)
return err;
}
@@ -262,7 +289,9 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
- &newval);
+ &newval,
+ data[IFLA_BOND_ARP_IP_TARGET],
+ extack);
if (err)
break;
i++;
@@ -272,16 +301,49 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_BOND_NS_IP6_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_ns_ip6_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
+ struct in6_addr addr6;
+
+ if (nla_len(attr) < sizeof(addr6)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
+ return -EINVAL;
+ }
+
+ addr6 = nla_get_in6_addr(attr);
+
+ bond_opt_initextra(&newval, &addr6, sizeof(addr6));
+ err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
+ &newval,
+ data[IFLA_BOND_NS_IP6_TARGET],
+ extack);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
+ if (err)
+ return err;
+ }
+#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP validating cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_validate);
- err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval,
+ data[IFLA_BOND_ARP_VALIDATE], extack);
if (err)
return err;
}
@@ -290,7 +352,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
bond_opt_initval(&newval, arp_all_targets);
- err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval,
+ data[IFLA_BOND_ARP_ALL_TARGETS], extack);
if (err)
return err;
}
@@ -304,7 +367,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
primary = dev->name;
bond_opt_initstr(&newval, primary);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval,
+ data[IFLA_BOND_PRIMARY], extack);
if (err)
return err;
}
@@ -313,7 +377,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
bond_opt_initval(&newval, primary_reselect);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval,
+ data[IFLA_BOND_PRIMARY_RESELECT], extack);
if (err)
return err;
}
@@ -322,7 +387,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
bond_opt_initval(&newval, fail_over_mac);
- err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval,
+ data[IFLA_BOND_FAIL_OVER_MAC], extack);
if (err)
return err;
}
@@ -331,7 +397,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
bond_opt_initval(&newval, xmit_hash_policy);
- err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval,
+ data[IFLA_BOND_XMIT_HASH_POLICY], extack);
if (err)
return err;
}
@@ -340,7 +407,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
bond_opt_initval(&newval, resend_igmp);
- err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval,
+ data[IFLA_BOND_RESEND_IGMP], extack);
if (err)
return err;
}
@@ -349,7 +417,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
bond_opt_initval(&newval, num_peer_notif);
- err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval,
+ data[IFLA_BOND_NUM_PEER_NOTIF], extack);
if (err)
return err;
}
@@ -358,7 +427,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
bond_opt_initval(&newval, all_slaves_active);
- err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval,
+ data[IFLA_BOND_ALL_SLAVES_ACTIVE], extack);
if (err)
return err;
}
@@ -367,7 +437,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
bond_opt_initval(&newval, min_links);
- err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval,
+ data[IFLA_BOND_MIN_LINKS], extack);
if (err)
return err;
}
@@ -376,7 +447,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
bond_opt_initval(&newval, lp_interval);
- err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval,
+ data[IFLA_BOND_LP_INTERVAL], extack);
if (err)
return err;
}
@@ -385,7 +457,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
bond_opt_initval(&newval, packets_per_slave);
- err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval,
+ data[IFLA_BOND_PACKETS_PER_SLAVE], extack);
if (err)
return err;
}
@@ -394,7 +467,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
bond_opt_initval(&newval, lacp_active);
- err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval,
+ data[IFLA_BOND_AD_LACP_ACTIVE], extack);
if (err)
return err;
}
@@ -404,7 +478,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
bond_opt_initval(&newval, lacp_rate);
- err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval,
+ data[IFLA_BOND_AD_LACP_RATE], extack);
if (err)
return err;
}
@@ -413,7 +488,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_SELECT]);
bond_opt_initval(&newval, ad_select);
- err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval,
+ data[IFLA_BOND_AD_SELECT], extack);
if (err)
return err;
}
@@ -422,7 +498,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
bond_opt_initval(&newval, actor_sys_prio);
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYS_PRIO], extack);
if (err)
return err;
}
@@ -431,7 +508,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
bond_opt_initval(&newval, port_key);
- err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval,
+ data[IFLA_BOND_AD_USER_PORT_KEY], extack);
if (err)
return err;
}
@@ -441,7 +519,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval,
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYSTEM], extack);
if (err)
return err;
}
@@ -449,7 +528,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
bond_opt_initval(&newval, dynamic_lb);
- err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval,
+ data[IFLA_BOND_TLB_DYNAMIC_LB], extack);
if (err)
return err;
}
@@ -458,7 +538,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
bond_opt_initval(&newval, missed_max);
- err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval,
+ data[IFLA_BOND_MISSED_MAX], extack);
if (err)
return err;
}
@@ -526,6 +607,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
0;
}
@@ -603,6 +687,26 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.arp_all_targets))
goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
+ if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
+ goto nla_put_failure;
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+#endif
+
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 2e8484a91a0e..3498db1c1b3c 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -34,10 +34,14 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
@@ -295,6 +299,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
+ [BOND_OPT_NS_TARGETS] = {
+ .id = BOND_OPT_NS_TARGETS,
+ .name = "ns_ip6_target",
+ .desc = "NS targets in ffff:ffff::ffff:ffff form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_ns_ip6_targets_set
+ },
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -356,6 +367,16 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
+ [BOND_OPT_PRIO] = {
+ .id = BOND_OPT_PRIO,
+ .name = "prio",
+ .desc = "Link priority for failover re-selection",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_prio_set
+ },
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
@@ -623,27 +644,35 @@ static int bond_opt_check_deps(struct bonding *bond,
}
static void bond_opt_dep_print(struct bonding *bond,
- const struct bond_option *opt)
+ const struct bond_option *opt,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
- if (test_bit(params->mode, &opt->unsuppmodes))
+ if (test_bit(params->mode, &opt->unsuppmodes)) {
netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
opt->name, modeval->string, modeval->value);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "option not supported in mode");
+ }
}
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, const struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
case -EINVAL:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr, "invalid option value");
if (val) {
if (val->string) {
/* sometimes RAWVAL opts may have new lines */
@@ -665,13 +694,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
opt->name, minval ? minval->value : 0, maxval->value);
break;
case -EACCES:
- bond_opt_dep_print(bond, opt);
+ bond_opt_dep_print(bond, opt, bad_attr, extack);
break;
case -ENOTEMPTY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond device has slaves");
netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
opt->name);
break;
case -EBUSY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond is up");
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
@@ -682,6 +715,8 @@ static void bond_opt_error_interpret(struct bonding *bond,
*p = '\0';
netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
opt->name, val->string);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "interface does not exist");
}
break;
default:
@@ -694,13 +729,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
* @bond: target bond device
* @option: option to set
* @val: value to set it to
+ * @bad_attr: netlink attribue that caused the error
+ * @extack: extended netlink error structure, used when an error message
+ * needs to be returned to the caller via netlink
*
* This function is used to change the bond's option value, it can be
* used for both enabling/changing an option and for disabling it. RTNL lock
* must be obtained before calling this function.
*/
int __bond_opt_set(struct bonding *bond,
- unsigned int option, struct bond_opt_value *val)
+ unsigned int option, struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack)
{
const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
@@ -722,7 +761,7 @@ int __bond_opt_set(struct bonding *bond,
ret = opt->set(bond, retval);
out:
if (ret)
- bond_opt_error_interpret(bond, opt, ret, val);
+ bond_opt_error_interpret(bond, opt, ret, val, bad_attr, extack);
return ret;
}
@@ -744,7 +783,7 @@ int __bond_opt_set_notify(struct bonding *bond,
ASSERT_RTNL();
- ret = __bond_opt_set(bond, option, val);
+ ret = __bond_opt_set(bond, option, val, NULL, NULL);
if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
@@ -1052,7 +1091,7 @@ static int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1184,6 +1223,71 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+ struct in6_addr *target,
+ unsigned long last_rx)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = *target;
+ }
+}
+
+void bond_option_ns_ip6_targets_clear(struct bonding *bond)
+{
+ struct in6_addr addr_any = in6addr_any;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
+}
+
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct in6_addr *target = (struct in6_addr *)newval->extra;
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct in6_addr addr_any = in6addr_any;
+ int index;
+
+ if (!bond_is_ip6_target_ok(target)) {
+ netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
+ target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
+ netdev_err(bond->dev, "NS target %pI6c is already present\n",
+ target);
+ return -EINVAL;
+ }
+
+ index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
+ if (index == -1) {
+ netdev_err(bond->dev, "NS target table is full!\n");
+ return -EINVAL;
+ }
+
+ netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
+
+ _bond_options_ns_ip6_target_set(bond, index, target, jiffies);
+
+ return 0;
+}
+#else
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ return -EPERM;
+}
+#endif
+
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1214,6 +1318,27 @@ static int bond_option_missed_max_set(struct bonding *bond,
return 0;
}
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+ slave->prio = newval->value;
+
+ if (rtnl_dereference(bond->primary_slave))
+ slave_warn(bond->dev, slave->dev,
+ "prio updated, but will not affect failover re-selection as primary slave have been set\n");
+ else
+ bond_select_active_slave(bond);
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 2ec11af5f0cc..43be458422b3 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -11,7 +11,7 @@
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
@@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
@@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
@@ -129,6 +129,21 @@ static void bond_info_show_master(struct seq_file *seq)
printed = 1;
}
seq_printf(seq, "\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+ printed = 0;
+ seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
+
+ for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
+ if (ipv6_addr_any(&bond->params.ns_targets[i]))
+ break;
+ if (printed)
+ seq_printf(seq, ",");
+ seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
+ printed = 1;
+ }
+ seq_printf(seq, "\n");
+#endif
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -175,7 +190,7 @@ static void bond_info_show_master(struct seq_file *seq)
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
@@ -307,7 +322,6 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
}
/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 9b5a5df23d21..8996bd0a194a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -47,10 +47,10 @@ static ssize_t bonding_show_bonds(struct class *cls,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -178,10 +178,10 @@ static ssize_t bonding_show_slaves(struct device *d,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
@@ -203,7 +203,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +217,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +233,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +248,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +265,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +277,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +292,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -310,7 +310,7 @@ static ssize_t bonding_show_missed_max(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.missed_max);
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
}
static DEVICE_ATTR(arp_missed_max, 0644,
bonding_show_missed_max, bonding_sysfs_store_option);
@@ -322,7 +322,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -333,7 +333,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -345,8 +345,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -361,7 +361,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -375,7 +375,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -386,7 +386,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -400,7 +400,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -412,7 +412,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -426,7 +426,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -443,7 +443,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -462,8 +462,8 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
@@ -475,7 +475,7 @@ static ssize_t bonding_show_carrier(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -493,7 +493,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -509,7 +509,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -524,9 +524,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -545,9 +545,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -566,9 +566,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -587,9 +587,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -609,7 +609,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -634,11 +634,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -658,7 +658,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -670,7 +670,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -682,7 +682,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -693,7 +693,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -705,7 +705,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -717,7 +717,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -731,7 +731,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -746,7 +746,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 6a6cdd0bb258..313866f2c0e4 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -15,43 +15,37 @@ struct slave_attribute {
ssize_t (*show)(struct slave *, char *);
};
-#define SLAVE_ATTR(_name, _mode, _show) \
-const struct slave_attribute slave_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
-};
#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -59,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
@@ -70,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -85,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -100,11 +94,11 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 2a7af611d43a..688075859ae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -196,7 +196,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb_reset_mac_header(skb);
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
+ ret = netif_rx(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 91230894692d..0b0f234b0b50 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -714,20 +714,29 @@ static int cfv_probe(struct virtio_device *vdev)
/* Initialize NAPI poll context data */
vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
cfv->ctx.head = USHRT_MAX;
- netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
+ netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll,
+ CFV_DEFAULT_QUOTA);
tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet);
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
@@ -754,7 +763,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..3048ad77edb3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,5 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "CAN Device Drivers"
+
+menuconfig CAN_DEV
+ tristate "CAN Device Drivers"
+ default y
+ depends on CAN
+ help
+ Controller Area Network (CAN) is serial communications protocol up to
+ 1Mbit/s for its original release (now known as Classical CAN) and up
+ to 8Mbit/s for the more recent CAN with Flexible Data-Rate
+ (CAN-FD). The CAN bus was originally mainly for automotive, but is now
+ widely used in marine (NMEA2000), industrial, and medical
+ applications. More information on the CAN network protocol family
+ PF_CAN is contained in <Documentation/networking/can.rst>.
+
+ This section contains all the CAN(-FD) device drivers including the
+ virtual ones. If you own such devices or plan to use the virtual CAN
+ interfaces to develop applications, say Y here.
+
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
@@ -28,35 +49,22 @@ config CAN_VXCAN
This driver can also be built as a module. If so, the module
will be called vxcan.
-config CAN_SLCAN
- tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on TTY
+config CAN_NETLINK
+ bool "CAN device drivers with Netlink support"
+ default y
help
- CAN driver for several 'low cost' CAN interfaces that are attached
- via serial lines or via USB-to-serial adapters using the LAWICEL
- ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
-
- As only the sending and receiving of CAN frames is implemented, this
- driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+ Enables the common framework for CAN device drivers. This is the
+ standard library and provides features for the Netlink interface such
+ as bittiming validation, support of CAN error states, device restart
+ and others.
- Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the linux-can project, see
- https://github.com/linux-can/can-utils for details.
+ The additional features selected by this option will be added to the
+ can-dev module.
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ This is required by all platform and hardware CAN drivers. If you
+ plan to use such devices or if unsure, say Y.
-config CAN_DEV
- tristate "Platform CAN drivers with Netlink support"
- default y
- help
- Enables the common framework for platform CAN drivers with Netlink
- support. This is the standard library for CAN drivers.
- If unsure, say Y.
-
-if CAN_DEV
+if CAN_NETLINK
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
@@ -69,24 +77,14 @@ config CAN_CALC_BITTIMING
source clock frequencies. Disabling saves some space, but then the
bit-timing parameters must be specified directly using the Netlink
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
- If unsure, say Y.
-config CAN_LEDS
- bool "Enable LED triggers for Netlink based drivers"
- depends on LEDS_CLASS
- # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
- # everything that this driver is doing. This is marked as broken
- # because it uses stuff that is intended to be changed or removed.
- # Please consider switching to the netdev trigger and confirm it
- # fulfills your needs instead of fixing this driver.
- depends on BROKEN
- select LEDS_TRIGGERS
- help
- This option adds two LED triggers for packet receive and transmit
- events on each supported CAN device.
+ The additional features selected by this option will be added to the
+ can-dev module.
+
+ If unsure, say Y.
- Say Y here if you are working on a system with led-class supported
- LEDs and you want to use them as canbus activity indicators.
+config CAN_RX_OFFLOAD
+ bool
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
@@ -95,10 +93,29 @@ config CAN_AT91
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
+config CAN_CAN327
+ tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)"
+ depends on TTY
+ select CAN_RX_OFFLOAD
+ help
+ CAN driver for several 'low cost' OBD-II interfaces based on the
+ ELM327 OBD-II interpreter chip.
+
+ This is a best effort driver - the ELM327 interface was never
+ designed to be used as a standalone CAN interface. However, it can
+ still be used for simple request-response protocols (such as OBD II),
+ and to monitor broadcast messages on a bus (such as in a vehicle).
+
+ Please refer to the documentation for information on how to use it:
+ Documentation/networking/device_drivers/can/can327.rst
+
+ If this driver is built as a module, it will be called can327.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
Say Y here if you want to support for Freescale FlexCAN.
@@ -135,6 +152,26 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
+ help
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -148,6 +185,7 @@ config CAN_SUN4I
config CAN_TI_HECC
depends on ARM
tristate "TI High End CAN Controller"
+ select CAN_RX_OFFLOAD
help
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
@@ -170,6 +208,7 @@ config PCH_CAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
@@ -180,7 +219,7 @@ source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
source "drivers/net/can/usb/Kconfig"
-endif
+endif #CAN_NETLINK
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
@@ -190,4 +229,4 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
-endmenu
+endif #CAN_DEV
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1e660afcb61b..61c75ce9d500 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
-obj-$(CONFIG_CAN_SLCAN) += slcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += rcar/
@@ -14,8 +14,10 @@ obj-y += usb/
obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
+obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a00655ccda02..199cb200f2bd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -23,7 +24,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
@@ -452,7 +452,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
mb = get_tx_next_mb(priv);
@@ -618,8 +618,6 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
stats->rx_bytes += cf->len;
netif_receive_skb(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
/**
@@ -854,7 +852,6 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
mb - get_mb_tx_first(priv),
NULL);
dev->stats.tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
}
}
@@ -1101,8 +1098,6 @@ static int at91_open(struct net_device *dev)
goto out_close;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
@@ -1133,8 +1128,6 @@ static int at91_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1160,6 +1153,10 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops at91_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static ssize_t mb0_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1301,6 +1298,7 @@ static int at91_can_probe(struct platform_device *pdev)
}
dev->netdev_ops = &at91_netdev_ops;
+ dev->ethtool_ops = &at91_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1317,7 +1315,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
- netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+ netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
@@ -1331,8 +1329,6 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
- devm_can_led_init(dev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index bd2f6dc01194..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -223,7 +223,7 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
-void c_can_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops c_can_ethtool_ops;
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
@@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 6655146294fc..e41167eda673 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -11,14 +11,6 @@
#include "c_can.h"
-static void c_can_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct c_can_priv *priv = netdev_priv(netdev);
- strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
-}
-
static void c_can_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -32,12 +24,7 @@ static void c_can_get_ringparam(struct net_device *netdev,
ring->tx_pending = priv->msg_obj_tx_num;
}
-static const struct ethtool_ops c_can_ethtool_ops = {
- .get_drvinfo = c_can_get_drvinfo,
+const struct ethtool_ops c_can_ethtool_ops = {
.get_ringparam = c_can_get_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void c_can_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &c_can_ethtool_ops;
-}
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index faa217f26771..c63f7fc1e691 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -40,7 +40,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "c_can.h"
@@ -430,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -438,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -458,7 +457,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct c_can_tx_ring *tx_ring = &priv->tx;
u32 idx, obj, cmd = IF_COMM_TX;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (c_can_tx_busy(priv, tx_ring))
@@ -466,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -749,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -759,11 +758,9 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_bytes += bytes;
stats->tx_packets += pkts;
- can_led_event(dev, CAN_LED_EVENT_TX);
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
@@ -906,9 +903,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
quota -= n;
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -957,15 +951,14 @@ static int c_can_handle_state_change(struct net_device *dev,
switch (error_type) {
case C_CAN_NO_ERROR:
- /* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -975,7 +968,7 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -1182,8 +1175,6 @@ static int c_can_open(struct net_device *dev)
if (err)
goto exit_start_fail;
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
napi_enable(&priv->napi);
/* enable status change, error and module interrupts */
c_can_irq_control(priv, true);
@@ -1214,8 +1205,6 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1246,7 +1235,8 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->tx.tail = 0;
priv->tx.obj_num = msg_obj_tx_num;
- netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
+ netif_napi_add_weight(dev, &priv->napi, c_can_poll,
+ priv->msg_obj_rx_num);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
@@ -1364,8 +1354,6 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
- int err;
-
/* Deactivate pins to prevent DRA7 DCAN IP from being
* stuck in transition when module is disabled.
* Pins are activated in c_can_start() and deactivated
@@ -1375,12 +1363,9 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- c_can_set_ethtool_ops(dev);
+ dev->ethtool_ops = &c_can_ethtool_ops;
- err = register_candev(dev);
- if (!err)
- devm_can_led_init(dev);
- return err;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
new file mode 100644
index 000000000000..094197780776
--- /dev/null
+++ b/drivers/net/can/can327.c
@@ -0,0 +1,1144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ELM327 based CAN interface driver (tty line discipline)
+ *
+ * This driver started as a derivative of linux/drivers/net/can/slcan.c
+ * and my thanks go to the original authors for their inspiration.
+ *
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/tty.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+
+#define CAN327_NAPI_WEIGHT 4
+
+#define CAN327_SIZE_TXBUF 32
+#define CAN327_SIZE_RXBUF 1024
+
+#define CAN327_CAN_CONFIG_SEND_SFF 0x8000
+#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000
+#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000
+#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000
+
+#define CAN327_DUMMY_CHAR 'y'
+#define CAN327_DUMMY_STRING "y"
+#define CAN327_READY_CHAR '>'
+
+/* Bits in elm->cmds_todo */
+enum can327_tx_do {
+ CAN327_TX_DO_CAN_DATA = 0,
+ CAN327_TX_DO_CANID_11BIT,
+ CAN327_TX_DO_CANID_29BIT_LOW,
+ CAN327_TX_DO_CANID_29BIT_HIGH,
+ CAN327_TX_DO_CAN_CONFIG_PART2,
+ CAN327_TX_DO_CAN_CONFIG,
+ CAN327_TX_DO_RESPONSES,
+ CAN327_TX_DO_SILENT_MONITOR,
+ CAN327_TX_DO_INIT,
+};
+
+struct can327 {
+ /* This must be the first member when using alloc_candev() */
+ struct can_priv can;
+
+ struct can_rx_offload offload;
+
+ /* TTY buffers */
+ u8 txbuf[CAN327_SIZE_TXBUF];
+ u8 rxbuf[CAN327_SIZE_RXBUF];
+
+ /* Per-channel lock */
+ spinlock_t lock;
+
+ /* TTY and netdev devices that we're bridging */
+ struct tty_struct *tty;
+ struct net_device *dev;
+
+ /* TTY buffer accounting */
+ struct work_struct tx_work; /* Flushes TTY TX buffer */
+ u8 *txhead; /* Next TX byte */
+ size_t txleft; /* Bytes left to TX */
+ int rxfill; /* Bytes already RX'd in buffer */
+
+ /* State machine */
+ enum {
+ CAN327_STATE_NOTINIT = 0,
+ CAN327_STATE_GETDUMMYCHAR,
+ CAN327_STATE_GETPROMPT,
+ CAN327_STATE_RECEIVING,
+ } state;
+
+ /* Things we have yet to send */
+ char **next_init_cmd;
+ unsigned long cmds_todo;
+
+ /* The CAN frame and config the ELM327 is sending/using,
+ * or will send/use after finishing all cmds_todo
+ */
+ struct can_frame can_frame_to_send;
+ u16 can_config;
+ u8 can_bitrate_divisor;
+
+ /* Parser state */
+ bool drop_next_line;
+
+ /* Stop the channel on UART side hardware failure, e.g. stray
+ * characters or neverending lines. This may be caused by bad
+ * UART wiring, a bad ELM327, a bad UART bridge...
+ * Once this is true, nothing will be sent to the TTY.
+ */
+ bool uart_side_failure;
+};
+
+static inline void can327_uart_side_failure(struct can327 *elm);
+
+static void can327_send(struct can327 *elm, const void *buf, size_t len)
+{
+ int written;
+
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->uart_side_failure)
+ return;
+
+ memcpy(elm->txbuf, buf, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ written = elm->tty->ops->write(elm->tty, elm->txbuf, len);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+ return;
+ }
+
+ elm->txleft = len - written;
+ elm->txhead = elm->txbuf + written;
+}
+
+/* Take the ELM327 out of almost any state and back into command mode.
+ * We send CAN327_DUMMY_CHAR which will either abort any running
+ * operation, or be echoed back to us in case we're already in command
+ * mode.
+ */
+static void can327_kick_into_cmd_mode(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->state != CAN327_STATE_GETDUMMYCHAR &&
+ elm->state != CAN327_STATE_GETPROMPT) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+
+ elm->state = CAN327_STATE_GETDUMMYCHAR;
+ }
+}
+
+/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */
+static void can327_send_frame(struct can327 *elm, struct can_frame *frame)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Schedule any necessary changes in ELM327's CAN configuration */
+ if (elm->can_frame_to_send.can_id != frame->can_id) {
+ /* Set the new CAN ID for transmission. */
+ if ((frame->can_id ^ elm->can_frame_to_send.can_id)
+ & CAN_EFF_FLAG) {
+ elm->can_config =
+ (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) |
+ CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF |
+ elm->can_bitrate_divisor;
+
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+ }
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo);
+ } else {
+ set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_LOW,
+ &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH,
+ &elm->cmds_todo);
+ }
+ }
+
+ /* Schedule the CAN frame itself. */
+ elm->can_frame_to_send = *frame;
+ set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+/* ELM327 initialisation sequence.
+ * The line length is limited by the buffer in can327_handle_prompt().
+ */
+static char *can327_init_script[] = {
+ "AT WS\r", /* v1.0: Warm Start */
+ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */
+ "AT M0\r", /* v1.0: Memory Off */
+ "AT AL\r", /* v1.0: Allow Long messages */
+ "AT BI\r", /* v1.0: Bypass Initialisation */
+ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */
+ "AT CFC0\r", /* v1.0: CAN Flow Control Off */
+ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */
+ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */
+ "AT E1\r", /* v1.0: Echo On */
+ "AT H1\r", /* v1.0: Headers On */
+ "AT L0\r", /* v1.0: Linefeeds Off */
+ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */
+ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */
+ "AT AT0\r", /* v1.2: Adaptive Timing Off */
+ "AT D1\r", /* v1.3: Print DLC On */
+ "AT S1\r", /* v1.3: Spaces On */
+ "AT TP B\r", /* v1.0: Try Protocol B */
+ NULL
+};
+
+static void can327_init_device(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ elm->state = CAN327_STATE_NOTINIT;
+ elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */
+ elm->rxfill = 0;
+ elm->drop_next_line = 0;
+
+ /* We can only set the bitrate as a fraction of 500000.
+ * The bitrates listed in can327_bitrate_const will
+ * limit the user to the right values.
+ */
+ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate;
+ elm->can_config =
+ CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor;
+
+ /* Configure ELM327 and then start monitoring */
+ elm->next_init_cmd = &can327_init_script[0];
+ set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (!netif_running(elm->dev))
+ return;
+
+ /* Queue for NAPI pickup.
+ * rx-offload will update stats and LEDs for us.
+ */
+ if (can_rx_offload_queue_tail(&elm->offload, skb))
+ elm->dev->stats.rx_fifo_errors++;
+
+ /* Wake NAPI */
+ can_rx_offload_irq_finish(&elm->offload);
+}
+
+/* Called when we're out of ideas and just want it all to end. */
+static inline void can327_uart_side_failure(struct can327 *elm)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ elm->uart_side_failure = true;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ elm->can.can_stats.bus_off++;
+ netif_stop_queue(elm->dev);
+ elm->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(elm->dev);
+
+ netdev_err(elm->dev,
+ "ELM327 misbehaved. Blocking further communication.\n");
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ return;
+
+ frame->can_id |= CAN_ERR_BUSOFF;
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Compares a byte buffer (non-NUL terminated) to the payload part of
+ * a string, and returns true iff the buffer (content *and* length) is
+ * exactly that string, without the terminating NUL byte.
+ *
+ * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9
+ * and !memcmp(buf, "BUS ERROR", 9).
+ *
+ * The reason to use strings is so we can easily include them in the C
+ * code, and to avoid hardcoding lengths.
+ */
+static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes,
+ const char *reference)
+{
+ size_t ref_len = strlen(reference);
+
+ return (nbytes == ref_len) && !memcmp(buf, reference, ref_len);
+}
+
+static void can327_parse_error(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ /* It's okay to return here:
+ * The outer parsing loop will drop this UART buffer.
+ */
+ return;
+
+ /* Filter possible error messages based on length of RX'd line */
+ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) {
+ netdev_err(elm->dev,
+ "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n");
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) {
+ /* This will only happen if the last data line was complete.
+ * Otherwise, can327_parse_frame() will heuristically
+ * emit this kind of error frame instead.
+ */
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) {
+ frame->can_id |= CAN_ERR_BUSERROR;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_OVERLOAD;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_TX;
+ } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) {
+ /* ERR is followed by two digits, hence line length 5 */
+ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n",
+ elm->rxbuf[3], elm->rxbuf[4]);
+ frame->can_id |= CAN_ERR_CRTL;
+ } else {
+ /* Something else has happened.
+ * Maybe garbage on the UART line.
+ * Emit a generic error frame.
+ */
+ }
+
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Parse CAN frames coming as ASCII from ELM327.
+ * They can be of various formats:
+ *
+ * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL
+ * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL
+ *
+ * where D = DLC, PL = payload byte
+ *
+ * Instead of a payload, RTR indicates a remote request.
+ *
+ * We will use the spaces and line length to guess the format.
+ */
+static int can327_parse_frame(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ int hexlen;
+ int datastart;
+ int i;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_skb(elm->dev, &frame);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Find first non-hex and non-space character:
+ * - In the simplest case, there is none.
+ * - For RTR frames, 'R' is the first non-hex character.
+ * - An error message may replace the end of the data line.
+ */
+ for (hexlen = 0; hexlen <= len; hexlen++) {
+ if (hex_to_bin(elm->rxbuf[hexlen]) < 0 &&
+ elm->rxbuf[hexlen] != ' ') {
+ break;
+ }
+ }
+
+ /* Sanity check whether the line is really a clean hexdump,
+ * or terminated by an error message, or contains garbage.
+ */
+ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) &&
+ !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] &&
+ ' ' != elm->rxbuf[hexlen]) {
+ /* The line is likely garbled anyway, so bail.
+ * The main code will restart listening.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* Use spaces in CAN ID to distinguish 29 or 11 bit address length.
+ * No out-of-bounds access:
+ * We use the fact that we can always read from elm->rxbuf.
+ */
+ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' &&
+ elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' &&
+ elm->rxbuf[13] == ' ') {
+ frame->can_id = CAN_EFF_FLAG;
+ datastart = 14;
+ } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') {
+ datastart = 6;
+ } else {
+ /* This is not a well-formatted data line.
+ * Assume it's an error message.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ if (hexlen < datastart) {
+ /* The line is too short to be a valid frame hex dump.
+ * Something interrupted the hex dump or it is invalid.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* From here on all chars up to buf[hexlen] are hex or spaces,
+ * at well-defined offsets.
+ */
+
+ /* Read CAN data length */
+ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0);
+
+ /* Read CAN ID */
+ if (frame->can_id & CAN_EFF_FLAG) {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) |
+ (hex_to_bin(elm->rxbuf[1]) << 24) |
+ (hex_to_bin(elm->rxbuf[3]) << 20) |
+ (hex_to_bin(elm->rxbuf[4]) << 16) |
+ (hex_to_bin(elm->rxbuf[6]) << 12) |
+ (hex_to_bin(elm->rxbuf[7]) << 8) |
+ (hex_to_bin(elm->rxbuf[9]) << 4) |
+ (hex_to_bin(elm->rxbuf[10]) << 0);
+ } else {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) |
+ (hex_to_bin(elm->rxbuf[1]) << 4) |
+ (hex_to_bin(elm->rxbuf[2]) << 0);
+ }
+
+ /* Check for RTR frame */
+ if (elm->rxfill >= hexlen + 3 &&
+ !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) {
+ frame->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Is the line long enough to hold the advertised payload?
+ * Note: RTR frames have a DLC, but no actual payload.
+ */
+ if (!(frame->can_id & CAN_RTR_FLAG) &&
+ (hexlen < frame->len * 3 + datastart)) {
+ /* Incomplete frame.
+ * Probably the ELM327's RS232 TX buffer was full.
+ * Emit an error frame and exit.
+ */
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->len = CAN_ERR_DLC;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ can327_feed_frame_to_netdev(elm, skb);
+
+ /* Signal failure to parse.
+ * The line will be re-parsed as an error line, which will fail.
+ * However, this will correctly drop the state machine back into
+ * command mode.
+ */
+ return -ENODATA;
+ }
+
+ /* Parse the data nibbles. */
+ for (i = 0; i < frame->len; i++) {
+ frame->data[i] =
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) |
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1]));
+ }
+
+ /* Feed the frame to the network layer. */
+ can327_feed_frame_to_netdev(elm, skb);
+
+ return 0;
+}
+
+static void can327_parse_line(struct can327 *elm, size_t len)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Skip empty lines */
+ if (!len)
+ return;
+
+ /* Skip echo lines */
+ if (elm->drop_next_line) {
+ elm->drop_next_line = 0;
+ return;
+ } else if (!memcmp(elm->rxbuf, "AT", 2)) {
+ return;
+ }
+
+ /* Regular parsing */
+ if (elm->state == CAN327_STATE_RECEIVING &&
+ can327_parse_frame(elm, len)) {
+ /* Parse an error line. */
+ can327_parse_error(elm, len);
+
+ /* Start afresh. */
+ can327_kick_into_cmd_mode(elm);
+ }
+}
+
+static void can327_handle_prompt(struct can327 *elm)
+{
+ struct can_frame *frame = &elm->can_frame_to_send;
+ /* Size this buffer for the largest ELM327 line we may generate,
+ * which is currently an 8 byte CAN frame's payload hexdump.
+ * Items in can327_init_script must fit here, too!
+ */
+ char local_txbuf[sizeof("0102030405060708\r")];
+
+ lockdep_assert_held(&elm->lock);
+
+ if (!elm->cmds_todo) {
+ /* Enter CAN monitor mode */
+ can327_send(elm, "ATMA\r", 5);
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+
+ return;
+ }
+
+ /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */
+ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf), "%s",
+ *elm->next_init_cmd);
+
+ elm->next_init_cmd++;
+ if (!(*elm->next_init_cmd)) {
+ clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ /* Init finished. */
+ }
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCSM%i\r",
+ !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATR%i\r",
+ !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPC\r");
+ set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPB%04X\r",
+ elm->can_config);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCP%02X\r",
+ (frame->can_id & CAN_EFF_MASK) >> 24);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%06X\r",
+ frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%03X\r",
+ frame->can_id & CAN_SFF_MASK);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) {
+ if (frame->can_id & CAN_RTR_FLAG) {
+ /* Send an RTR frame. Their DLC is fixed.
+ * Some chips don't send them at all.
+ */
+ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r");
+ } else {
+ /* Send a regular CAN data frame */
+ int i;
+
+ for (i = 0; i < frame->len; i++) {
+ snprintf(&local_txbuf[2 * i],
+ sizeof(local_txbuf), "%02X",
+ frame->data[i]);
+ }
+
+ snprintf(&local_txbuf[2 * i], sizeof(local_txbuf),
+ "\r");
+ }
+
+ elm->drop_next_line = 1;
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+ }
+
+ can327_send(elm, local_txbuf, strlen(local_txbuf));
+}
+
+static bool can327_is_ready_char(char c)
+{
+ /* Bits 0xc0 are sometimes set (randomly), hence the mask.
+ * Probably bad hardware.
+ */
+ return (c & 0x3f) == CAN327_READY_CHAR;
+}
+
+static void can327_drop_bytes(struct can327 *elm, size_t i)
+{
+ lockdep_assert_held(&elm->lock);
+
+ memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i);
+ elm->rxfill -= i;
+}
+
+static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx)
+{
+ size_t len, pos;
+
+ lockdep_assert_held(&elm->lock);
+
+ switch (elm->state) {
+ case CAN327_STATE_NOTINIT:
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_GETDUMMYCHAR:
+ /* Wait for 'y' or '>' */
+ for (pos = 0; pos < elm->rxfill; pos++) {
+ if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) {
+ can327_send(elm, "\r", 1);
+ elm->state = CAN327_STATE_GETPROMPT;
+ pos++;
+ break;
+ } else if (can327_is_ready_char(elm->rxbuf[pos])) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ pos++;
+ break;
+ }
+ }
+
+ can327_drop_bytes(elm, pos);
+ break;
+
+ case CAN327_STATE_GETPROMPT:
+ /* Wait for '>' */
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1]))
+ can327_handle_prompt(elm);
+
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_RECEIVING:
+ /* Find <CR> delimiting feedback lines. */
+ len = first_new_char_idx;
+ while (len < elm->rxfill && elm->rxbuf[len] != '\r')
+ len++;
+
+ if (len == CAN327_SIZE_RXBUF) {
+ /* Assume the buffer ran full with garbage.
+ * Did we even connect at the right baud rate?
+ */
+ netdev_err(elm->dev,
+ "RX buffer overflow. Faulty ELM327 or UART?\n");
+ can327_uart_side_failure(elm);
+ } else if (len == elm->rxfill) {
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) {
+ /* The ELM327's AT ST response timeout ran out,
+ * so we got a prompt.
+ * Clear RX buffer and restart listening.
+ */
+ elm->rxfill = 0;
+
+ can327_handle_prompt(elm);
+ }
+
+ /* No <CR> found - we haven't received a full line yet.
+ * Wait for more data.
+ */
+ } else {
+ /* We have a full line to parse. */
+ can327_parse_line(elm, len);
+
+ /* Remove parsed data from RX buffer. */
+ can327_drop_bytes(elm, len + 1);
+
+ /* More data to parse? */
+ if (elm->rxfill)
+ can327_parse_rxbuf(elm, 0);
+ }
+ }
+}
+
+static int can327_netdev_open(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&elm->lock);
+
+ if (!elm->tty) {
+ spin_unlock_bh(&elm->lock);
+ return -ENODEV;
+ }
+
+ if (elm->uart_side_failure)
+ netdev_warn(elm->dev,
+ "Reopening netdev after a UART side fault has been detected.\n");
+
+ /* Clear TTY buffers */
+ elm->rxfill = 0;
+ elm->txleft = 0;
+
+ /* open_candev() checks for elm->can.bittiming.bitrate != 0 */
+ err = open_candev(dev);
+ if (err) {
+ spin_unlock_bh(&elm->lock);
+ return err;
+ }
+
+ can327_init_device(elm);
+ spin_unlock_bh(&elm->lock);
+
+ err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT);
+ if (err) {
+ close_candev(dev);
+ return err;
+ }
+
+ can_rx_offload_enable(&elm->offload);
+
+ elm->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int can327_netdev_close(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+
+ /* Interrupt whatever the ELM327 is doing right now */
+ spin_lock_bh(&elm->lock);
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ spin_unlock_bh(&elm->lock);
+
+ netif_stop_queue(dev);
+
+ /* Give UART one final chance to flush. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ flush_work(&elm->tx_work);
+
+ can_rx_offload_disable(&elm->offload);
+ elm->can.state = CAN_STATE_STOPPED;
+ can_rx_offload_del(&elm->offload);
+ close_candev(dev);
+
+ return 0;
+}
+
+/* Send a can_frame to a TTY. */
+static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* We shouldn't get here after a hardware fault:
+ * can_bus_off() calls netif_carrier_off()
+ */
+ if (elm->uart_side_failure) {
+ WARN_ON_ONCE(elm->uart_side_failure);
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ /* BHs are already disabled, so no spin_lock_bh().
+ * See Documentation/networking/netdevices.rst
+ */
+ spin_lock(&elm->lock);
+ can327_send_frame(elm, frame);
+ spin_unlock(&elm->lock);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops can327_netdev_ops = {
+ .ndo_open = can327_netdev_open,
+ .ndo_stop = can327_netdev_close,
+ .ndo_start_xmit = can327_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops can327_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static bool can327_is_valid_rx_char(u8 c)
+{
+ static const bool lut_char_is_valid['z'] = {
+ ['\r'] = true,
+ [' '] = true,
+ ['.'] = true,
+ ['0'] = true, true, true, true, true,
+ ['5'] = true, true, true, true, true,
+ ['<'] = true,
+ [CAN327_READY_CHAR] = true,
+ ['?'] = true,
+ ['A'] = true, true, true, true, true, true, true,
+ ['H'] = true, true, true, true, true, true, true,
+ ['O'] = true, true, true, true, true, true, true,
+ ['V'] = true, true, true, true, true,
+ ['a'] = true,
+ ['b'] = true,
+ ['v'] = true,
+ [CAN327_DUMMY_CHAR] = true,
+ };
+ BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z');
+
+ return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]);
+}
+
+/* Handle incoming ELM327 ASCII data.
+ * This will not be re-entered while running, but other ldisc
+ * functions may be called in parallel.
+ */
+static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ size_t first_new_char_idx;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ /* Store old rxfill, so can327_parse_rxbuf() will have
+ * the option of skipping already checked characters.
+ */
+ first_new_char_idx = elm->rxfill;
+
+ while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) {
+ if (fp && *fp++) {
+ netdev_err(elm->dev,
+ "Error in received character stream. Check your wiring.");
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ /* Ignore NUL characters, which the PIC microcontroller may
+ * inadvertently insert due to a known hardware bug.
+ * See ELM327 documentation, which refers to a Microchip PIC
+ * bug description.
+ */
+ if (*cp) {
+ /* Check for stray characters on the UART line.
+ * Likely caused by bad hardware.
+ */
+ if (!can327_is_valid_rx_char(*cp)) {
+ netdev_err(elm->dev,
+ "Received illegal character %02x.\n",
+ *cp);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->rxbuf[elm->rxfill++] = *cp;
+ }
+
+ cp++;
+ }
+
+ if (count >= 0) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %i",
+ count);
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ can327_parse_rxbuf(elm, first_new_char_idx);
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Write out remaining transmit buffer.
+ * Scheduled when TTY is writable.
+ */
+static void can327_ldisc_tx_worker(struct work_struct *work)
+{
+ struct can327 *elm = container_of(work, struct can327, tx_work);
+ ssize_t written;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ if (elm->txleft) {
+ written = elm->tty->ops->write(elm->tty, elm->txhead,
+ elm->txleft);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->txleft -= written;
+ elm->txhead += written;
+ }
+
+ if (!elm->txleft)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Called by the driver when there's room for more data. */
+static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ schedule_work(&elm->tx_work);
+}
+
+/* ELM327 can only handle bitrates that are integer divisors of 500 kHz,
+ * or 7/8 of that. Divisors are 1 to 64.
+ * Currently we don't implement support for 7/8 rates.
+ */
+static const u32 can327_bitrate_const[] = {
+ 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771,
+ 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204,
+ 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195,
+ 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151,
+ 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000,
+ 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411,
+ 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555,
+ 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000
+};
+
+static int can327_ldisc_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct can327 *elm;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(struct can327), 0);
+ if (!dev)
+ return -ENFILE;
+ elm = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ spin_lock_init(&elm->lock);
+ INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker);
+
+ /* Configure CAN metadata */
+ elm->can.bitrate_const = can327_bitrate_const;
+ elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const);
+ elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ elm->dev = dev;
+ dev->netdev_ops = &can327_netdev_ops;
+ dev->ethtool_ops = &can327_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ elm->tty = tty;
+ tty->disc_data = elm;
+
+ /* Let 'er rip */
+ err = register_candev(elm->dev);
+ if (err) {
+ free_candev(elm->dev);
+ return err;
+ }
+
+ netdev_info(elm->dev, "can327 on %s.\n", tty->name);
+
+ return 0;
+}
+
+/* Close down a can327 channel.
+ * This means flushing out any pending queues, and then returning.
+ * This call is serialized against other ldisc functions:
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this function for a hangup event.
+ */
+static void can327_ldisc_close(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set elm->tty = NULL after this.
+ */
+ unregister_candev(elm->dev);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&elm->lock);
+ tty->disc_data = NULL;
+ elm->tty = NULL;
+ spin_unlock_bh(&elm->lock);
+
+ netdev_info(elm->dev, "can327 off %s.\n", tty->name);
+
+ free_candev(elm->dev);
+}
+
+static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1;
+ if (copy_to_user((void __user *)arg, elm->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops can327_ldisc = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .num = N_CAN327,
+ .receive_buf = can327_ldisc_rx,
+ .write_wakeup = can327_ldisc_tx_wakeup,
+ .open = can327_ldisc_open,
+ .close = can327_ldisc_close,
+ .ioctl = can327_ldisc_ioctl,
+};
+
+static int __init can327_init(void)
+{
+ int status;
+
+ status = tty_register_ldisc(&can327_ldisc);
+ if (status)
+ pr_err("Can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit can327_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&can327_ldisc);
+}
+
+module_init(can327_init);
+module_exit(can327_exit);
+
+MODULE_ALIAS_LDISC(N_CAN327);
+MODULE_DESCRIPTION("ELM327 based CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Staudt <max@enpas.org>");
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index bb7224cfc6ab..30909f3aab57 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -428,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cc770_priv *priv = netdev_priv(dev);
unsigned int mo = obj2msgobj(CC770_OBJ_TX);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -512,6 +513,7 @@ static int cc770_err(struct net_device *dev, u8 status)
/* Use extended functions of the CC770 */
if (priv->control_normal_mode & CTRL_EAF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = cc770_read_reg(priv, tx_error_counter);
cf->data[7] = cc770_read_reg(priv, rx_error_counter);
}
@@ -835,6 +837,10 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops cc770_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_cc770dev(struct net_device *dev)
{
struct cc770_priv *priv = netdev_priv(dev);
@@ -845,6 +851,7 @@ int register_cc770dev(struct net_device *dev)
return err;
dev->netdev_ops = &cc770_netdev_ops;
+ dev->ethtool_ops = &cc770_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..6e2073351a8f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core" if COMPILE_TEST
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on PCI
+ select CAN_CTUCANFD
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on HAS_IOMEM && (OF || COMPILE_TEST)
+ select CAN_CTUCANFD
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..64c349fd4600
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return: True - Frame inserted successfully
+ * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id pointer
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ctucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+ ndev->ethtool_ops = &ctucan_ethtool_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..0c181ab51bf8
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TXTB_INFO = 0x76,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_RETR_CTR = 0x7d,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TS_INFO = 0x7f,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_TTTM BIT(5)
+#define REG_MODE_ROM BIT(6)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RXBAM BIT(9)
+#define REG_MODE_SAM BIT(11)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+#define REG_STATUS_STCNT BIT(16)
+
+/* COMMAND registers */
+#define REG_COMMAND_RXRPMV BIT(1)
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+#define REG_TX_STATUS_TX5S GENMASK(19, 16)
+#define REG_TX_STATUS_TX6S GENMASK(23, 20)
+#define REG_TX_STATUS_TX7S GENMASK(27, 24)
+#define REG_TX_STATUS_TX8S GENMASK(31, 28)
+
+/* TX_COMMAND TXTB_INFO registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+#define REG_TX_COMMAND_TXB5 BIT(12)
+#define REG_TX_COMMAND_TXB6 BIT(13)
+#define REG_TX_COMMAND_TXB7 BIT(14)
+#define REG_TX_COMMAND_TXB8 BIT(15)
+#define REG_TX_COMMAND_TXT_BUFFER_COUNT GENMASK(19, 16)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+#define REG_TX_PRIORITY_TXT5P GENMASK(18, 16)
+#define REG_TX_PRIORITY_TXT6P GENMASK(22, 20)
+#define REG_TX_PRIORITY_TXT7P GENMASK(26, 24)
+#define REG_TX_PRIORITY_TXT8P GENMASK(30, 28)
+
+/* ERR_CAPT RETR_CTR ALC TS_INFO registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_RETR_CTR_VAL GENMASK(11, 8)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+#define REG_ERR_CAPT_TS_BITS GENMASK(29, 24)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..8f2956a8ae43
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (core_i < num_cores) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..f83684f006ea
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 3e2e207861fc..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += bittiming.o
-can-dev-y += dev.o
-can-dev-y += length.o
-can-dev-y += netlink.o
-can-dev-y += rx-offload.o
-can-dev-y += skb.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-$(CONFIG_CAN_LEDS) += led.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index d5fca3bfaf9a..7ae80763c960 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -4,214 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/units.h>
#include <linux/can/dev.h>
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-
-void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
- const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
-
-{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
- return;
-
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
-
- /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
- * delay compensation" (TDC) is only applicable if data BRP is
- * one or two.
- */
- if (dbt->brp == 1 || dbt->brp == 2) {
- /* Sample point in clock periods */
- u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- if (sample_point_in_tc < tdc_const->tdco_min)
- return;
- tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
- }
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
- struct can_priv *priv = netdev_priv(dev);
+ const struct can_priv *priv = netdev_priv(dev);
unsigned int tseg1, alltseg;
u64 brp64;
@@ -244,25 +47,21 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* Checks the validity of predefined bitrate settings */
static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
{
- struct can_priv *priv = netdev_priv(dev);
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
- break;
+ return 0;
}
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
+ return -EINVAL;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..d3caa040614d
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500 * KILO /* BPS */)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ return;
+
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ }
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c192f25f9695..c1956b1e9faf 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,7 +4,6 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
@@ -14,16 +13,9 @@
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
-#include <linux/can/led.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
{
@@ -154,7 +146,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
+ netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
@@ -330,6 +322,56 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+ * supporting hardware timestamps
+ */
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_ON;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(can_eth_ioctl_hwts);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
+
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
@@ -512,11 +554,9 @@ static __init int can_dev_init(void)
{
int err;
- can_led_notifier_init();
-
err = can_netlink_register();
if (!err)
- pr_info(MOD_DESC "\n");
+ pr_info("CAN device driver interface\n");
return err;
}
@@ -525,8 +565,6 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
can_netlink_unregister();
-
- can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 7633d98e3912..8efa22d9f214 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -176,7 +176,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->bittiming_const && !priv->do_set_bittiming)
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
@@ -278,7 +279,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
+ !priv->data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
@@ -509,7 +511,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate &&
+ if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
+ priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming)) ||
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 7f80d8e1e750..81ebf0562c89 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -70,8 +70,6 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
napi_reschedule(&offload->napi);
}
- can_led_event(offload->dev, CAN_LED_EVENT_RX);
-
return work_done;
}
@@ -221,7 +219,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -240,7 +238,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
@@ -249,14 +247,14 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
@@ -331,13 +329,14 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+ netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
+ weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
__func__, offload->skb_queue_len_max);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 61660248c69e..241ec636e91f 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,6 +5,13 @@
*/
#include <linux/can/dev.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
/* Local echo of CAN messages
*
@@ -64,6 +71,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
@@ -80,8 +90,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -97,13 +107,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -133,7 +142,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -177,6 +186,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -190,16 +213,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -221,23 +236,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -252,3 +295,75 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 0bff1884d5cc..9bdadd716f4e 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -14,7 +14,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/imx/sci.h>
@@ -296,44 +295,45 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -341,23 +341,23 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
@@ -365,8 +365,8 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -722,11 +722,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -744,7 +742,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16);
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -844,7 +842,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -891,7 +889,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -943,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -976,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
@@ -1082,7 +1080,6 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload, 0,
reg_ctrl << 16, NULL);
stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
@@ -1699,11 +1696,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
@@ -1741,8 +1736,6 @@ static int flexcan_open(struct net_device *dev)
flexcan_chip_interrupts_enable(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -1788,8 +1781,6 @@ static int flexcan_close(struct net_device *dev)
pm_runtime_put(priv->dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -2094,20 +2085,20 @@ static int flexcan_probe(struct platform_device *pdev)
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) {
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
return -EINVAL;
}
if ((devtype_data->quirks &
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) {
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
dev_err(&pdev->dev,
"Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
devtype_data->quirks);
@@ -2122,7 +2113,7 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
- flexcan_set_ethtool_ops(dev);
+ dev->ethtool_ops = &flexcan_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -2186,13 +2177,11 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
of_can_transceiver(dev);
- devm_can_led_init(dev);
return 0;
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
index 3ae535577700..50e86b2da532 100644
--- a/drivers/net/can/flexcan/flexcan-ethtool.c
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -100,15 +100,11 @@ static int flexcan_get_sset_count(struct net_device *netdev, int sset)
}
}
-static const struct ethtool_ops flexcan_ethtool_ops = {
+const struct ethtool_ops flexcan_ethtool_ops = {
.get_ringparam = flexcan_get_ringparam,
.get_strings = flexcan_get_strings,
.get_priv_flags = flexcan_get_priv_flags,
.set_priv_flags = flexcan_set_priv_flags,
.get_sset_count = flexcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void flexcan_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &flexcan_ethtool_ops;
-}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index fccdff8b1f0f..025c3417031f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -21,7 +21,7 @@
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
* Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
* MX25 FlexCAN2 03.00.00.00 no no no no no no 64
* MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
* MX35 FlexCAN2 03.00.00.00 no no no no no no 64
@@ -63,11 +63,11 @@
/* Setup 16 mailboxes */
#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Device supports RX via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
/* Device supports RTR reception via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16)
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -114,14 +114,14 @@ struct flexcan_priv {
void (*write)(u32 val, void __iomem *addr);
};
-void flexcan_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops flexcan_ethtool_ops;
static inline bool
flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
}
static inline bool
@@ -129,10 +129,10 @@ flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR);
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
}
static inline bool
@@ -140,7 +140,7 @@ flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
}
static inline bool
@@ -149,7 +149,7 @@ flexcan_active_rx_rtr(const struct flexcan_priv *priv)
const u32 quirks = priv->devtype_data.quirks;
if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
- if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
return true;
} else {
/* RX-FIFO is always RTR capable */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index d0c5a7a60daf..4bedcc3eea0d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
@@ -241,13 +242,14 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
+ struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@@ -670,6 +672,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
/* There are no others at this point */
break;
}
+ cf.can_id |= CAN_ERR_CNT;
cf.data[6] = txerr;
cf.data[7] = rxerr;
priv->can.state = state;
@@ -921,7 +924,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@@ -946,7 +949,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
- dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@@ -1102,8 +1105,10 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
+ spin_unlock_irqrestore(&priv->lock, flags);
del_timer_sync(&priv->hang_timer);
del_timer_sync(&priv->rr_timer);
+ spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@@ -1122,7 +1127,7 @@ static int grcan_close(struct net_device *dev)
return 0;
}
-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -1130,7 +1135,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
- work_done = catch_up_echo_skb(dev, budget, true);
+ work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@@ -1144,8 +1149,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@@ -1227,19 +1230,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
- int tx_work_done, rx_work_done;
- int rx_budget = budget / 2;
- int tx_budget = budget - rx_budget;
+ int work_done;
- /* Half of the budget for receiving messages */
- rx_work_done = grcan_receive(dev, rx_budget);
+ work_done = grcan_receive(dev, budget);
- /* Half of the budget for transmitting messages as that can trigger echo
- * frames being received
- */
- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+ grcan_transmit_catch_up(dev);
- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@@ -1256,7 +1253,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
- return rx_work_done + tx_work_done;
+ return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@@ -1348,7 +1345,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
unsigned long flags;
u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
/* Trying to transmit in silent mode will generate error interrupts, but
@@ -1565,6 +1562,10 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops grcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int grcan_setup_netdev(struct platform_device *ofdev,
void __iomem *base,
int irq, u32 ambafreq, bool txbug)
@@ -1581,12 +1582,14 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
dev->irq = irq;
dev->flags |= IFF_ECHO;
dev->netdev_ops = &grcan_netdev_ops;
+ dev->ethtool_ops = &grcan_ethtool_ops;
dev->sysfs_groups[0] = &sysfs_grcan_group;
priv = netdev_priv(dev);
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
+ priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@@ -1613,7 +1616,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
}
- netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
SET_NETDEV_DEV(dev, &ofdev->dev);
dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
@@ -1639,6 +1642,7 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1647,10 +1651,14 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
- err = of_property_read_u32(np, "systemid", &sysid);
- if (!err && ((sysid & GRLIB_VERSION_MASK)
- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
- txbug = false;
+ sysid_parent = of_find_node_by_path("/ambapp0");
+ if (sysid_parent) {
+ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
+ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+ of_node_put(sysid_parent);
+ }
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index b0a3473f211d..07eaf724a572 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -345,9 +346,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
}
- if (pkts)
- can_led_event(ndev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -495,7 +493,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -504,7 +502,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
@@ -626,7 +624,6 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
if (isr & tx_irq_mask)
@@ -830,7 +827,6 @@ static int ifi_canfd_open(struct net_device *ndev)
ifi_canfd_start(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -853,8 +849,6 @@ static int ifi_canfd_close(struct net_device *ndev)
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -866,7 +860,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
u32 txst, txid, txdlc;
int i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
/* Check if the TX buffer is full */
@@ -932,6 +926,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ifi_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int ifi_canfd_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -969,12 +967,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
+ ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
@@ -1004,8 +1003,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
goto err_reg;
}
- devm_can_led_init(ndev);
-
dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
priv->base, ndev->irq, priv->can.clock.freq);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 808c105cf8f7..0732a5092141 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
@@ -1127,7 +1128,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
mod->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
switch (ecc & ECC_MASK) {
case ECC_BIT:
@@ -1153,7 +1154,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (state == CAN_STATE_ERROR_WARNING) {
mod->can.can_stats.error_warning++;
cf->data[1] = (txerr > rxerr) ?
@@ -1277,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
if (!skb)
return;
+ skb_tx_timestamp(skb);
+
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
}
@@ -1690,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
void __iomem *desc_addr;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
spin_lock_irqsave(&mod->lock, flags);
@@ -1752,6 +1755,10 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ican3_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/*
* Low-level CAN Device
*/
@@ -1910,7 +1917,7 @@ static int ican3_probe(struct platform_device *pdev)
mod = netdev_priv(ndev);
mod->ndev = ndev;
mod->num = pdata->modno;
- netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
@@ -1923,6 +1930,7 @@ static int ican3_probe(struct platform_device *pdev)
mod->free_page = DPM_FREE_START;
ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->ethtool_ops = &ican3_ethtool_ops;
ndev->flags |= IFF_ECHO;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 017f2d36ffc3..bcad11709bc9 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/timer.h>
@@ -328,12 +329,9 @@ MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
- int ret;
-
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
- return ret;
+ return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
@@ -774,7 +772,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
int nwords;
u8 count;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
@@ -919,10 +917,15 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
{
int i;
@@ -939,6 +942,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
@@ -1306,7 +1310,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
can->kv_pcie->freq_to_ticks_div));
- cf->can_id |= CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
deleted file mode 100644
index db14897f8e16..000000000000
--- a/drivers/net/can/led.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/can/dev.h>
-
-#include <linux/can/led.h>
-
-static unsigned long led_delay = 50;
-module_param(led_delay, ulong, 0644);
-MODULE_PARM_DESC(led_delay,
- "blink delay time for activity leds (msecs, default: 50).");
-
-/* Trigger a LED event in response to a CAN device event */
-void can_led_event(struct net_device *netdev, enum can_led_event event)
-{
- struct can_priv *priv = netdev_priv(netdev);
-
- switch (event) {
- case CAN_LED_EVENT_OPEN:
- led_trigger_event(priv->tx_led_trig, LED_FULL);
- led_trigger_event(priv->rx_led_trig, LED_FULL);
- led_trigger_event(priv->rxtx_led_trig, LED_FULL);
- break;
- case CAN_LED_EVENT_STOP:
- led_trigger_event(priv->tx_led_trig, LED_OFF);
- led_trigger_event(priv->rx_led_trig, LED_OFF);
- led_trigger_event(priv->rxtx_led_trig, LED_OFF);
- break;
- case CAN_LED_EVENT_TX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->tx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- case CAN_LED_EVENT_RX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->rx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- }
-}
-EXPORT_SYMBOL_GPL(can_led_event);
-
-static void can_led_release(struct device *gendev, void *res)
-{
- struct can_priv *priv = netdev_priv(to_net_dev(gendev));
-
- led_trigger_unregister_simple(priv->tx_led_trig);
- led_trigger_unregister_simple(priv->rx_led_trig);
- led_trigger_unregister_simple(priv->rxtx_led_trig);
-}
-
-/* Register CAN LED triggers for a CAN device
- *
- * This is normally called from a driver's probe function
- */
-void devm_can_led_init(struct net_device *netdev)
-{
- struct can_priv *priv = netdev_priv(netdev);
- void *res;
-
- res = devres_alloc(can_led_release, 0, GFP_KERNEL);
- if (!res) {
- netdev_err(netdev, "cannot register LED triggers\n");
- return;
- }
-
- snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
- "%s-tx", netdev->name);
- snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
- "%s-rx", netdev->name);
- snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
- "%s-rxtx", netdev->name);
-
- led_trigger_register_simple(priv->tx_led_trig_name,
- &priv->tx_led_trig);
- led_trigger_register_simple(priv->rx_led_trig_name,
- &priv->rx_led_trig);
- led_trigger_register_simple(priv->rxtx_led_trig_name,
- &priv->rxtx_led_trig);
-
- devres_add(&netdev->dev, res);
-}
-EXPORT_SYMBOL_GPL(devm_can_led_init);
-
-/* NETDEV rename notifier to rename the associated led triggers too */
-static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct can_priv *priv = safe_candev_priv(netdev);
- char name[CAN_LED_NAME_SZ];
-
- if (!priv)
- return NOTIFY_DONE;
-
- if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
- return NOTIFY_DONE;
-
- if (msg == NETDEV_CHANGENAME) {
- snprintf(name, sizeof(name), "%s-tx", netdev->name);
- led_trigger_rename_static(name, priv->tx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rx", netdev->name);
- led_trigger_rename_static(name, priv->rx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
- led_trigger_rename_static(name, priv->rxtx_led_trig);
- }
-
- return NOTIFY_DONE;
-}
-
-/* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
- .notifier_call = can_led_notifier,
-};
-
-int __init can_led_notifier_init(void)
-{
- return register_netdevice_notifier(&can_netdev_notifier);
-}
-
-void __exit can_led_notifier_exit(void)
-{
- unregister_netdevice_notifier(&can_netdev_notifier);
-}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 45ad1b3f0cd0..fc2afab36279 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
+ select CAN_RX_OFFLOAD
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 5b47cd867783..00d11e95fd98 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -77,9 +78,6 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
/* message ram configuration data length */
#define MRAM_CFG_LEN 8
@@ -336,6 +334,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +347,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
@@ -458,7 +462,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -526,7 +530,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
/* acknowledge rx fifo 0 */
m_can_write(cdev, M_CAN_RXF0A, fgi);
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
@@ -562,9 +566,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
rxfs = m_can_read(cdev, M_CAN_RXF0S);
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -741,7 +742,7 @@ static int m_can_handle_state_change(struct net_device *dev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -750,7 +751,7 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -945,7 +946,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
- work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
+ work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
@@ -1030,7 +1031,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
- timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
/* ack txe element */
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
@@ -1084,8 +1085,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
-
- can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
}
} else {
@@ -1094,7 +1093,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
- can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
!m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
@@ -1351,10 +1349,12 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
- /* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing
+ /* enable internal timestamp generation, with a prescaler of 16. The
+ * prescaler is applied to the nominal bit timing
*/
- m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+ m_can_write(cdev, M_CAN_TSCC,
+ FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+ FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
m_can_config_endisable(cdev, false);
@@ -1467,8 +1467,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
}
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, M_CAN_NAPI_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
@@ -1489,34 +1488,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
@@ -1571,7 +1558,6 @@ static int m_can_close(struct net_device *dev)
can_rx_offload_disable(&cdev->offload);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
phy_power_off(cdev->transceiver);
@@ -1631,8 +1617,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
if (err)
goto out_fail;
- can_put_echo_skb(skb, dev, 0, 0);
-
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~CCCR_CMR_MASK;
@@ -1649,6 +1633,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
m_can_write(cdev, M_CAN_CCCR, cccr);
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+
+ can_put_echo_skb(skb, dev, 0, 0);
+
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
@@ -1734,7 +1721,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (cdev->is_peripheral) {
@@ -1814,8 +1801,6 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
m_can_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
if (!cdev->is_peripheral)
napi_enable(&cdev->napi);
@@ -1844,10 +1829,15 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops m_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
@@ -1987,7 +1977,7 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
- M_CAN_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
if (ret)
goto clk_disable;
}
@@ -2003,8 +1993,6 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto rx_offload_del;
}
- devm_can_led_init(cdev->net);
-
of_can_transceiver(cdev->net);
dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 2c5d40997168..4c0267f9f297 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -7,7 +7,6 @@
#define _CAN_M_CAN_H_
#include <linux/can/core.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -85,9 +84,6 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
-
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index b56a54d6c5a9..8f184a852a0a 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
-struct m_can_pci_config {
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
- unsigned int clock_freq;
-};
-
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 128,
- .sjw_max = 128,
- .brp_min = 1,
- .brp_max = 512,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 16,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static const struct m_can_pci_config m_can_pci_ehl = {
- .bit_timing = &m_can_bittiming_const_ehl,
- .data_timing = &m_can_data_bittiming_const_ehl,
- .clock_freq = 200000000,
-};
-
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
- const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
- cfg = (const struct m_can_pci_config *)id->driver_data;
-
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->bit_timing = cfg->bit_timing;
- mcan_class->data_timing = cfg->data_timing;
- mcan_class->can.clock.freq = cfg->clock_freq;
+ mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 04687b15b250..41645a24384c 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -388,7 +388,7 @@ out_power:
return ret;
}
-static int tcan4x5x_can_remove(struct spi_device *spi)
+static void tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
@@ -397,8 +397,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
tcan4x5x_power_enable(priv->power, 0);
m_can_class_free_dev(priv->cdev.net);
-
- return 0;
}
static const struct of_device_id tcan4x5x_of_match[] = {
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..26e212b8ca7a 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..b0ed798ae70f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
@@ -61,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
@@ -320,14 +322,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
- goto exit_free_mscan;
+ goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_free_mscan;
+ goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -335,7 +337,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return 0;
-exit_free_mscan:
+exit_put_clock:
+ if (data->put_clock)
+ data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 5b5802fac772..a6829cdc0e81 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -191,7 +191,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -616,6 +616,10 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mscan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
@@ -676,10 +680,11 @@ struct net_device *alloc_mscandev(void)
priv = netdev_priv(dev);
dev->netdev_ops = &mscan_netdev_ops;
+ dev->ethtool_ops = &mscan_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
- netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
+ netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 888bef03de09..2a44b2803e55 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -489,6 +490,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
if (!skb)
return;
+ errc = ioread32(&priv->regs->errc);
if (status & PCH_BUS_OFF) {
pch_can_set_tx_all(priv, 0);
pch_can_set_rx_all(priv, 0);
@@ -496,9 +498,12 @@ static void pch_can_error(struct net_device *ndev, u32 status)
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
+ } else {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
}
- errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
@@ -556,9 +561,6 @@ static void pch_can_error(struct net_device *ndev, u32 status)
break;
}
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
priv->can.state = state;
netif_receive_skb(skb);
}
@@ -880,7 +882,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
int i;
u32 id2;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
tx_obj_no = priv->tx_obj;
@@ -937,6 +939,10 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops pch_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void pch_can_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
@@ -1187,9 +1193,10 @@ static int pch_can_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &pch_can_netdev_ops;
+ ndev->ethtool_ops = &pch_can_ethtool_ops;
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
- netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
+ netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
rc = pci_enable_msi(priv->dev);
if (rc) {
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index b2dea360813d..31c9c127e24b 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -7,6 +7,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include "peak_canfd_user.h"
@@ -373,7 +374,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -386,7 +387,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -430,7 +431,7 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
return -ENOMEM;
}
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
cf->data[6] = priv->bec.txerr;
@@ -650,7 +651,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
int room_left;
u8 len;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
@@ -742,13 +743,59 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static int peak_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops peak_canfd_ethtool_ops = {
+ .get_ts_info = peak_get_ts_info,
+};
+
struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
int echo_skb_max)
{
@@ -789,6 +836,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
ndev->flags |= IFF_ECHO;
ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->ethtool_ops = &peak_canfd_ethtool_ops;
ndev->dev_id = index;
return ndev;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 33e37395379d..cc43c9c5e38c 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -10,9 +10,9 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -233,11 +233,8 @@ static void rcar_can_error(struct net_device *ndev)
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -337,6 +334,10 @@ static void rcar_can_error(struct net_device *ndev)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
@@ -389,7 +390,6 @@ static void rcar_can_tx_done(struct net_device *ndev)
/* Clear interrupt */
isr = readb(&priv->regs->isr);
writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
@@ -531,7 +531,6 @@ static int rcar_can_open(struct net_device *ndev)
ndev->irq, err);
goto out_close;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
rcar_can_start(ndev);
netif_start_queue(ndev);
return 0;
@@ -581,7 +580,6 @@ static int rcar_can_close(struct net_device *ndev)
clk_disable_unprepare(priv->can_clk);
clk_disable_unprepare(priv->clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -592,7 +590,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
u32 data, i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -633,6 +631,10 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
@@ -666,8 +668,6 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
}
stats->rx_packets++;
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
-
netif_receive_skb(skb);
}
@@ -790,6 +790,7 @@ static int rcar_can_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->ethtool_ops = &rcar_can_ethtool_ops;
ndev->irq = irq;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
@@ -803,8 +804,8 @@ static int rcar_can_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
- RCAR_CAN_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed, error %d\n",
@@ -812,8 +813,6 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_candev;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b7dc1c32875f..b306cf554634 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -27,9 +27,9 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -44,6 +44,7 @@
enum rcanfd_chip_id {
RENESAS_RCAR_GEN3 = 0,
RENESAS_RZG2L,
+ RENESAS_R8A779A0,
};
/* Global register bits */
@@ -79,27 +80,33 @@ enum rcanfd_chip_id {
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF1 BIT(17)
-#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
+#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
-#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
- (gpriv->fdmode ?\
- RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+ ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
+ RCANFD_GERFL_MES | \
+ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
/* AFL Rx rules registers */
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+ (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+
+#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+ (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
+ reg_v3u(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -116,9 +123,15 @@ enum rcanfd_chip_id {
#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+
+#define RCANFD_NCFG_NTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+
+#define RCANFD_NCFG_NSJW(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
@@ -180,11 +193,18 @@ enum rcanfd_chip_id {
/* RSCFDnCFDCmDCFG */
#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+
+#define RCANFD_DCFG_DTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+
+#define RCANFD_DCFG_DTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_CLOE BIT(30)
+#define RCANFD_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -219,10 +239,10 @@ enum rcanfd_chip_id {
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -282,33 +302,31 @@ enum rcanfd_chip_id {
#define RCANFD_GTSC (0x0094)
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0 (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40)
/* Common FIFO Control registers */
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -387,22 +405,23 @@ enum rcanfd_chip_id {
#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET (0x0e00)
-#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
- (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
- (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
#define RCANFD_C_CFOFFSET (0x0e80)
-#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
- (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
- (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
- (0x30 * (ch)) + (0x10 * (idx)) + \
- (0x04 * (df)))
+
+#define RCANFD_C_CFID(ch, idx) \
+ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx) \
+ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
@@ -415,6 +434,12 @@ enum rcanfd_chip_id {
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+/* R-Car V3U Classical and CAN FD mode specific register map */
+#define RCANFD_V3U_CFDCFG (0x1314)
+#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+
+#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
@@ -434,26 +459,28 @@ enum rcanfd_chip_id {
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET (0x3000)
-#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
- (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
- (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
- (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET (0x3400)
-#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
- (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
- (0x180 * (ch)) + (0x80 * (idx)) + \
- (0x04 * (df)))
+#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
@@ -470,7 +497,7 @@ enum rcanfd_chip_id {
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
-#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
@@ -521,6 +548,7 @@ struct rcar_canfd_global {
struct reset_control *rstc1;
struct reset_control *rstc2;
enum rcanfd_chip_id chip_id;
+ u32 max_channels;
};
/* CAN FD mode nominal rate constants */
@@ -563,6 +591,17 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
};
/* Helper functions */
+static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+{
+ return gpriv->chip_id == RENESAS_R8A779A0;
+}
+
+static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
+ u32 v3u, u32 not_v3u)
+{
+ return is_v3u(gpriv) ? v3u : not_v3u;
+}
+
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -628,6 +667,25 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+{
+ if (is_v3u(gpriv)) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_FDOE);
+ else
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_CLOE);
+ } else {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
+}
+
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
u32 sts, ch;
@@ -660,15 +718,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
+ rcar_canfd_set_mode(gpriv);
/* Transition all Channels to reset mode */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
@@ -709,7 +762,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
/* Channel configuration settings */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
RCANFD_CCTR_ERRD);
rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -729,20 +782,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
start = 0; /* Channel 0 always starts from 0th rule */
} else {
/* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
- start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+ start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
}
/* Enable write access to entry */
page = RCANFD_GAFL_PAGENUM(start);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
- (RCANFD_GAFLECTR_AFLPN(page) |
+ (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
- RCANFD_GAFLCFG_SETRNC(ch, num_rules));
- if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
+ RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
+ if (is_v3u(gpriv))
+ offset = RCANFD_V3U_GAFL_OFFSET;
+ else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
offset = RCANFD_C_GAFL_OFFSET;
@@ -754,8 +809,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Any data length accepted */
rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
- RCANFD_GAFLP1_GAFLFDP(ridx));
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
rcar_canfd_clear_bit(gpriv->base,
@@ -779,7 +834,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
- rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
}
static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -801,15 +856,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
else
cfpls = 0; /* b000 - Max 8 bytes payload */
- cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
- RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
- rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
if (gpriv->fdmode)
/* Clear FD mode specific control/status register */
rcar_canfd_write(gpriv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
}
static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -880,30 +935,26 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
- netdev_dbg(ndev, "Ch0: ECC Error flag\n");
- stats->tx_dropped++;
- }
- if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
- netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
if (gerfl & RCANFD_GERFL_MES) {
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (sts & RCANFD_CFSTS_CFMLT) {
netdev_dbg(ndev, "Tx Message Lost flag\n");
stats->tx_dropped++;
rcar_canfd_write(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFMLT);
}
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (sts & RCANFD_RFSTS_RFMLT) {
netdev_dbg(ndev, "Rx Message Lost flag\n");
stats->rx_dropped++;
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFMLT);
}
}
@@ -997,7 +1048,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
@@ -1007,7 +1058,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error passive interrupt\n");
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
cf->data[6] = txerr;
@@ -1038,6 +1089,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
static void rcar_canfd_tx_done(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct net_device_stats *stats = &ndev->stats;
u32 sts;
unsigned long flags;
@@ -1053,7 +1105,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
unsent = RCANFD_CFSTS_CFMC(sts);
/* Wake producer only when there is room */
@@ -1069,9 +1121,8 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
} while (1);
/* Clear interrupt */
- rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch)
@@ -1091,7 +1142,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1101,15 +1152,17 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
{
struct rcar_canfd_channel *priv = gpriv->ch[ch];
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- u32 sts;
+ u32 sts, cc;
/* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
+ cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF &&
+ cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
+ RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
__napi_schedule(&priv->napi);
}
@@ -1121,7 +1174,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_receive(gpriv, ch);
return IRQ_HANDLED;
@@ -1135,7 +1188,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_global_err(gpriv, ch);
rcar_canfd_handle_global_receive(gpriv, ch);
}
@@ -1181,18 +1234,16 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
/* Handle Tx interrupts */
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (likely(sts & RCANFD_CFSTS_CFTXIF))
rcar_canfd_tx_done(ndev);
}
static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_tx(gpriv, ch);
+ rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1220,11 +1271,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c
static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_err(gpriv, ch);
+ rcar_canfd_handle_channel_err(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1235,7 +1284,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
u32 ch;
/* Common FIFO is a per channel resource */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_channel_err(gpriv, ch);
rcar_canfd_handle_channel_tx(gpriv, ch);
}
@@ -1246,6 +1295,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
static void rcar_canfd_set_bittiming(struct net_device *dev)
{
struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
@@ -1260,8 +1310,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
/* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
@@ -1273,16 +1323,28 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ if (is_v3u(gpriv))
+ rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
+ else
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ if (is_v3u(gpriv)) {
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
+ RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) |
+ RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) |
+ RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) |
+ RCANFD_CFG_TSEG2(tseg2));
+ }
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev,
@@ -1294,6 +1356,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
static int rcar_canfd_start(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err = -EOPNOTSUPP;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1315,9 +1378,9 @@ static int rcar_canfd_start(struct net_device *ndev)
}
/* Enable Common & Rx FIFO */
- rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -1351,7 +1414,6 @@ static int rcar_canfd_open(struct net_device *ndev)
if (err)
goto out_close;
netif_start_queue(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
return 0;
out_close:
napi_disable(&priv->napi);
@@ -1365,6 +1427,7 @@ out_clock:
static void rcar_canfd_stop(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1382,9 +1445,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
rcar_canfd_disable_channel_interrupts(priv);
/* Disable Common & Rx FIFO */
- rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
@@ -1400,7 +1463,6 @@ static int rcar_canfd_close(struct net_device *ndev)
napi_disable(&priv->napi);
clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -1408,12 +1470,13 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 sts = 0, id, dlc;
unsigned long flags;
u32 ch = priv->channel;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) {
@@ -1428,11 +1491,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
rcar_canfd_write(priv->base,
- RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
- RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
@@ -1445,10 +1508,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
rcar_canfd_write(priv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
rcar_canfd_put_data(priv, cf,
- RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
} else {
rcar_canfd_write(priv->base,
RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1471,7 +1534,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
* pointer for the Common FIFO
*/
rcar_canfd_write(priv->base,
- RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+ RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1480,18 +1543,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 sts = 0, id, dlc;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
- dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
- sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
- if (sts & RCANFD_RFFDSTS_RFFDF)
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ sts & RCANFD_RFFDSTS_RFFDF)
skb = alloc_canfd_skb(priv->ndev, &cf);
else
skb = alloc_can_skb(priv->ndev,
@@ -1529,12 +1595,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFBRS)
cf->flags |= CANFD_BRS;
- rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
}
} else {
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
+ else if (is_v3u(gpriv))
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
}
@@ -1542,9 +1610,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
/* Write 0xff to RFPC to increment the CPU-side
* pointer of the Rx FIFO
*/
- rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
-
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
@@ -1556,13 +1622,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
{
struct rcar_canfd_channel *priv =
container_of(napi, struct rcar_canfd_channel, napi);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int num_pkts;
u32 sts;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
for (num_pkts = 0; num_pkts < quota; num_pkts++) {
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
/* Check FIFO empty condition */
if (sts & RCANFD_RFSTS_RFEMP)
break;
@@ -1571,7 +1638,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* Clear interrupt bit */
if (sts & RCANFD_RFSTS_RFIF)
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFIF);
}
@@ -1579,7 +1646,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
if (num_pkts < quota) {
if (napi_complete_done(napi, num_pkts)) {
/* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
}
}
@@ -1622,6 +1689,10 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
u32 fcan_freq)
{
@@ -1638,10 +1709,12 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv = netdev_priv(ndev);
ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->ethtool_ops = &rcar_canfd_ethtool_ops;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
priv->base = gpriv->base;
priv->channel = ch;
+ priv->gpriv = gpriv;
priv->can.clock.freq = fcan_freq;
dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
@@ -1670,7 +1743,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
err = devm_request_irq(&pdev->dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
err_irq, err);
@@ -1684,7 +1757,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
err = devm_request_irq(&pdev->dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
tx_irq, err);
@@ -1710,20 +1783,18 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- priv->gpriv = gpriv;
SET_NETDEV_DEV(ndev, &pdev->dev);
- netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
- RCANFD_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev,
"register_candev() failed, error %d\n", err);
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
return 0;
@@ -1756,21 +1827,25 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
enum rcanfd_chip_id chip_id;
+ int max_channels;
+ char name[9] = "channelX";
+ int i;
chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ max_channels = chip_id == RENESAS_R8A779A0 ? 8 : 2;
if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(0); /* Channel 0 */
-
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(1); /* Channel 1 */
+ for (i = 0; i < max_channels; ++i) {
+ name[7] = '0' + i;
+ of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(i);
+ of_node_put(of_child);
+ }
- if (chip_id == RENESAS_RCAR_GEN3) {
+ if (chip_id != RENESAS_RZG2L) {
ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
if (ch_irq < 0) {
/* For backward compatibility get irq by index */
@@ -1798,14 +1873,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Global controller context */
gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
gpriv->chip_id = chip_id;
+ gpriv->max_channels = max_channels;
if (gpriv->chip_id == RENESAS_RZG2L) {
gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
@@ -1821,12 +1896,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
@@ -1834,12 +1906,10 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+
gpriv->fcan = RCANFD_CANFDCLK;
} else {
@@ -1847,7 +1917,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
+ if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id != RENESAS_RZG2L)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
@@ -1859,7 +1929,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->base = addr;
/* Request IRQ that's common for both channels */
- if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
+ if (gpriv->chip_id != RENESAS_RZG2L) {
err = devm_request_irq(&pdev->dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
@@ -1925,7 +1995,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_controller(gpriv);
/* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
/* Configure Channel's Rx fifo */
rcar_canfd_configure_rx(gpriv, ch);
@@ -1951,7 +2021,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_mode;
}
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
if (err)
goto fail_channel;
@@ -1963,7 +2033,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return 0;
fail_channel:
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
rcar_canfd_disable_global_interrupts(gpriv);
@@ -1984,7 +2054,7 @@ static int rcar_canfd_remove(struct platform_device *pdev)
rcar_canfd_reset_controller(gpriv);
rcar_canfd_disable_global_interrupts(gpriv);
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
@@ -2014,6 +2084,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
{ .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+ { .compatible = "renesas,r8a779a0-canfd", .data = (void *)RENESAS_R8A779A0 },
{ }
};
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..4b2f9cb17fc3 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -107,7 +107,7 @@ config CAN_TSCAN1
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..ebd5941c3f53 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 966316479485..aac5956e4a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -52,6 +52,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -60,7 +61,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "sja1000.h"
@@ -184,8 +184,9 @@ static void chipset_init(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- /* set clock divider and output control register */
- priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG))
+ /* set clock divider and output control register */
+ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
/* set acceptance filter (accept all) */
priv->write_reg(priv, SJA1000_ACCC0, 0x00);
@@ -210,7 +211,8 @@ static void sja1000_start(struct net_device *dev)
set_reset_mode(dev);
/* Initialize chip if uninitialized at this stage */
- if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG ||
+ priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
chipset_init(dev);
/* Clear error counters and error code capture */
@@ -289,7 +291,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
u8 cmd_reg_val = 0x00;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -383,8 +385,6 @@ static void sja1000_rx(struct net_device *dev)
sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -405,9 +405,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -429,6 +426,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
@@ -531,7 +533,6 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_packets++;
}
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -587,8 +588,6 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -606,8 +605,6 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -658,25 +655,23 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops sja1000_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &sja1000_netdev_ops;
+ dev->ethtool_ops = &sja1000_ethtool_ops;
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- if (!ret)
- devm_can_led_init(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 9d46398f8154..7f736f1df547 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -145,7 +145,8 @@
/*
* Flags for sja1000priv.flags
*/
-#define SJA1000_CUSTOM_IRQ_HANDLER 0x1
+#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0)
+#define SJA1000_QUIRK_NO_CDR_REG BIT(1)
/*
* SJA1000 private data structure
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index f9ec7bd8dfac..6779d5357069 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -31,7 +32,7 @@ MODULE_LICENSE("GPL v2");
struct sja1000_of_data {
size_t priv_sz;
- int (*init)(struct sja1000_priv *priv, struct device_node *of);
+ void (*init)(struct sja1000_priv *priv, struct device_node *of);
};
struct technologic_priv {
@@ -94,15 +95,18 @@ static void sp_technologic_write_reg16(const struct sja1000_priv *priv,
spin_unlock_irqrestore(&tp->io_lock, flags);
}
-static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
+static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
{
struct technologic_priv *tp = priv->priv;
priv->read_reg = sp_technologic_read_reg16;
priv->write_reg = sp_technologic_write_reg16;
spin_lock_init(&tp->io_lock);
+}
- return 0;
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG;
}
static void sp_populate(struct sja1000_priv *priv,
@@ -155,11 +159,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -194,8 +200,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -210,9 +221,9 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq = NULL;
struct sja1000_platform_data *pdata;
struct device_node *of = pdev->dev.of_node;
- const struct of_device_id *of_id;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -237,17 +248,20 @@ static int sp_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
} else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
return -ENODEV;
}
- of_id = of_match_device(sp_of_table, &pdev->dev);
- if (of_id && of_id->data) {
- of_data = of_id->data;
+ of_data = device_get_match_data(&pdev->dev);
+ if (of_data)
priv_sz = of_data->priv_sz;
- }
dev = alloc_sja1000dev(priv_sz);
if (!dev)
@@ -267,13 +281,19 @@ static int sp_probe(struct platform_device *pdev)
priv->reg_base = addr;
if (of) {
- sp_populate_of(priv, of);
-
- if (of_data && of_data->init) {
- err = of_data->init(priv, of);
- if (err)
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
goto exit_free;
+ }
}
+
+ sp_populate_of(priv, of);
+
+ if (of_data && of_data->init)
+ of_data->init(priv, of);
} else {
sp_populate(priv, pdata, res_mem->flags);
}
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
deleted file mode 100644
index d4c7ce998a34..000000000000
--- a/drivers/net/can/slcan.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * slcan.c - serial line CAN interface driver (using tty line discipline)
- *
- * This file is derived from linux/drivers/net/slip/slip.c
- *
- * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
- * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
- * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see http://www.gnu.org/licenses/gpl.html
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/skb.h>
-#include <linux/can/can-ml.h>
-
-MODULE_ALIAS_LDISC(N_SLCAN);
-MODULE_DESCRIPTION("serial line CAN interface");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
-
-#define SLCAN_MAGIC 0x53CA
-
-static int maxdev = 10; /* MAX number of SLCAN channels;
- This can be overridden with
- insmod slcan.ko maxdev=nnn */
-module_param(maxdev, int, 0);
-MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
-
-/* maximum rx buffer len: extended CAN frame with timestamp */
-#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
-
-#define SLC_CMD_LEN 1
-#define SLC_SFF_ID_LEN 3
-#define SLC_EFF_ID_LEN 8
-
-struct slcan {
- int magic;
-
- /* Various fields. */
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
- spinlock_t lock;
- struct work_struct tx_work; /* Flushes transmit buffer */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char rbuff[SLC_MTU]; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next XMIT byte */
- int xleft; /* bytes left in XMIT queue */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ERROR 1 /* Parity, etc. error */
-};
-
-static struct net_device **slcan_devs;
-
- /************************************************************************
- * SLCAN ENCAPSULATION FORMAT *
- ************************************************************************/
-
-/*
- * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
- * frame format) a data length code (len) which can be from 0 to 8
- * and up to <len> data bytes as payload.
- * Additionally a CAN frame may become a remote transmission frame if the
- * RTR-bit is set. This causes another ECU to send a CAN frame with the
- * given can_id.
- *
- * The SLCAN ASCII representation of these different frame types is:
- * <type> <id> <dlc> <data>*
- *
- * Extended frames (29 bit) are defined by capital characters in the type.
- * RTR frames are defined as 'r' types - normal frames have 't' type:
- * t => 11 bit data frame
- * r => 11 bit RTR frame
- * T => 29 bit data frame
- * R => 29 bit RTR frame
- *
- * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
- * The <dlc> is a one byte ASCII number ('0' - '8')
- * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
- *
- * Examples:
- *
- * t1230 : can_id 0x123, len 0, no data
- * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
- * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
- * r1230 : can_id 0x123, len 0, no data, remote transmission request
- *
- */
-
- /************************************************************************
- * STANDARD SLCAN DECAPSULATION *
- ************************************************************************/
-
-/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump(struct slcan *sl)
-{
- struct sk_buff *skb;
- struct can_frame cf;
- int i, tmp;
- u32 tmpid;
- char *cmd = sl->rbuff;
-
- memset(&cf, 0, sizeof(cf));
-
- switch (*cmd) {
- case 'r':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 't':
- /* store dlc ASCII value and terminate SFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
- break;
- case 'R':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 'T':
- cf.can_id |= CAN_EFF_FLAG;
- /* store dlc ASCII value and terminate EFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
- break;
- default:
- return;
- }
-
- if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
- return;
-
- cf.can_id |= tmpid;
-
- /* get len from sanitized ASCII value */
- if (cf.len >= '0' && cf.len < '9')
- cf.len -= '0';
- else
- return;
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.len; i++) {
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] = (tmp << 4);
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] |= tmp;
- }
- }
-
- skb = dev_alloc_skb(sizeof(struct can_frame) +
- sizeof(struct can_skb_priv));
- if (!skb)
- return;
-
- skb->dev = sl->dev;
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = sl->dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb_put_data(skb, &cf, sizeof(struct can_frame));
-
- sl->dev->stats.rx_packets++;
- if (!(cf.can_id & CAN_RTR_FLAG))
- sl->dev->stats.rx_bytes += cf.len;
-
- netif_rx_ni(skb);
-}
-
-/* parse tty input stream */
-static void slcan_unesc(struct slcan *sl, unsigned char s)
-{
- if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- (sl->rcount > 4)) {
- slc_bump(sl);
- }
- sl->rcount = 0;
- } else {
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < SLC_MTU) {
- sl->rbuff[sl->rcount++] = s;
- return;
- } else {
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
- }
-}
-
- /************************************************************************
- * STANDARD SLCAN ENCAPSULATION *
- ************************************************************************/
-
-/* Encapsulate one can_frame and stuff into a TTY queue. */
-static void slc_encaps(struct slcan *sl, struct can_frame *cf)
-{
- int actual, i;
- unsigned char *pos;
- unsigned char *endpos;
- canid_t id = cf->can_id;
-
- pos = sl->xbuff;
-
- if (cf->can_id & CAN_RTR_FLAG)
- *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
- else
- *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
-
- /* determine number of chars for the CAN-identifier */
- if (cf->can_id & CAN_EFF_FLAG) {
- id &= CAN_EFF_MASK;
- endpos = pos + SLC_EFF_ID_LEN;
- } else {
- *pos |= 0x20; /* convert R/T to lower case for SFF */
- id &= CAN_SFF_MASK;
- endpos = pos + SLC_SFF_ID_LEN;
- }
-
- /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
- pos++;
- while (endpos >= pos) {
- *endpos-- = hex_asc_upper[id & 0xf];
- id >>= 4;
- }
-
- pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
-
- *pos++ = cf->len + '0';
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf->len; i++)
- pos = hex_byte_pack_upper(pos, cf->data[i]);
-
- sl->dev->stats.tx_bytes += cf->len;
- }
-
- *pos++ = '\r';
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside the ops->write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
- sl->xleft = (pos - sl->xbuff) - actual;
- sl->xhead = sl->xbuff + actual;
-}
-
-/* Write out any remaining transmit buffer. Scheduled when tty is writable */
-static void slcan_transmit(struct work_struct *work)
-{
- struct slcan *sl = container_of(work, struct slcan, tx_work);
- int actual;
-
- spin_lock_bh(&sl->lock);
- /* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
- spin_unlock_bh(&sl->lock);
- return;
- }
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- spin_unlock_bh(&sl->lock);
- netif_wake_queue(sl->dev);
- return;
- }
-
- actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
- spin_unlock_bh(&sl->lock);
-}
-
-/*
- * Called by the driver when there's room for more data.
- * Schedule the transmit.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
-{
- struct slcan *sl;
-
- rcu_read_lock();
- sl = rcu_dereference(tty->disc_data);
- if (sl)
- schedule_work(&sl->tx_work);
- rcu_read_unlock();
-}
-
-/* Send a can_frame to a TTY queue. */
-static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (skb->len != CAN_MTU)
- goto out;
-
- spin_lock(&sl->lock);
- if (!netif_running(dev)) {
- spin_unlock(&sl->lock);
- printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
- goto out;
- }
- if (sl->tty == NULL) {
- spin_unlock(&sl->lock);
- goto out;
- }
-
- netif_stop_queue(sl->dev);
- slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
- spin_unlock(&sl->lock);
-
-out:
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-/******************************************
- * Routines looking at netdevice side.
- ******************************************/
-
-/* Netdevice UP -> DOWN routine */
-static int slc_close(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- /* TTY discipline is running. */
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- }
- netif_stop_queue(dev);
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock_bh(&sl->lock);
-
- return 0;
-}
-
-/* Netdevice DOWN -> UP routine */
-static int slc_open(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- sl->flags &= (1 << SLF_INUSE);
- netif_start_queue(dev);
- return 0;
-}
-
-/* Hook the destructor so we can free slcan devs at the right point in time */
-static void slc_free_netdev(struct net_device *dev)
-{
- int i = dev->base_addr;
-
- slcan_devs[i] = NULL;
-}
-
-static int slcan_change_mtu(struct net_device *dev, int new_mtu)
-{
- return -EINVAL;
-}
-
-static const struct net_device_ops slc_netdev_ops = {
- .ndo_open = slc_open,
- .ndo_stop = slc_close,
- .ndo_start_xmit = slc_xmit,
- .ndo_change_mtu = slcan_change_mtu,
-};
-
-static void slc_setup(struct net_device *dev)
-{
- dev->netdev_ops = &slc_netdev_ops;
- dev->needs_free_netdev = true;
- dev->priv_destructor = slc_free_netdev;
-
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- dev->mtu = CAN_MTU;
- dev->type = ARPHRD_CAN;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-/******************************************
- Routines looking at TTY side.
- ******************************************/
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of SLCAN data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing. This will not
- * be re-entered while running but other ldisc functions may be called
- * in parallel
- */
-
-static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int count)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return;
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- slcan_unesc(sl, *cp++);
- }
-}
-
-/************************************
- * slcan_open helper routines.
- ************************************/
-
-/* Collect hanged up channels */
-static void slc_sync(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- continue;
- if (dev->flags & IFF_UP)
- dev_close(dev);
- }
-}
-
-/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(void)
-{
- int i;
- char name[IFNAMSIZ];
- struct net_device *dev = NULL;
- struct can_ml_priv *can_ml;
- struct slcan *sl;
- int size;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- }
-
- /* Sorry, too many, all slots in use */
- if (i >= maxdev)
- return NULL;
-
- sprintf(name, "slcan%d", i);
- size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
- dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
- if (!dev)
- return NULL;
-
- dev->base_addr = i;
- sl = netdev_priv(dev);
- can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
- can_set_ml_priv(dev, can_ml);
-
- /* Initialize channel control data */
- sl->magic = SLCAN_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- INIT_WORK(&sl->tx_work, slcan_transmit);
- slcan_devs[i] = dev;
-
- return sl;
-}
-
-/*
- * Open the high-level part of the SLCAN channel.
- * This function is called by the TTY module when the
- * SLCAN line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free SLCAN channel...
- *
- * Called in process context serialized from other ldisc calls.
- */
-
-static int slcan_open(struct tty_struct *tty)
-{
- struct slcan *sl;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* RTnetlink lock is misused here to serialize concurrent
- opens of slcan channels. There are better ways, but it is
- the simplest one.
- */
- rtnl_lock();
-
- /* Collect hanged up channels. */
- slc_sync();
-
- sl = tty->disc_data;
-
- err = -EEXIST;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == SLCAN_MAGIC)
- goto err_exit;
-
- /* OK. Find a free SLCAN channel to use. */
- err = -ENFILE;
- sl = slc_alloc();
- if (sl == NULL)
- goto err_exit;
-
- sl->tty = tty;
- tty->disc_data = sl;
-
- if (!test_bit(SLF_INUSE, &sl->flags)) {
- /* Perform the low-level SLCAN initialization. */
- sl->rcount = 0;
- sl->xleft = 0;
-
- set_bit(SLF_INUSE, &sl->flags);
-
- err = register_netdevice(sl->dev);
- if (err)
- goto err_free_chan;
- }
-
- /* Done. We have linked the TTY line to a channel. */
- rtnl_unlock();
- tty->receive_room = 65536; /* We don't flow control */
-
- /* TTY layer expects 0 on success */
- return 0;
-
-err_free_chan:
- sl->tty = NULL;
- tty->disc_data = NULL;
- clear_bit(SLF_INUSE, &sl->flags);
- slc_free_netdev(sl->dev);
- /* do not call free_netdev before rtnl_unlock */
- rtnl_unlock();
- free_netdev(sl->dev);
- return err;
-
-err_exit:
- rtnl_unlock();
-
- /* Count references from TTY module */
- return err;
-}
-
-/*
- * Close down a SLCAN channel.
- * This means flushing out any pending queues, and then returning. This
- * call is serialized against other ldisc functions.
- *
- * We also use this method for a hangup event.
- */
-
-static void slcan_close(struct tty_struct *tty)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
- return;
-
- spin_lock_bh(&sl->lock);
- rcu_assign_pointer(tty->disc_data, NULL);
- sl->tty = NULL;
- spin_unlock_bh(&sl->lock);
-
- synchronize_rcu();
- flush_work(&sl->tx_work);
-
- /* Flush network side */
- unregister_netdev(sl->dev);
- /* This will complete via sl_free_netdev */
-}
-
-static void slcan_hangup(struct tty_struct *tty)
-{
- slcan_close(tty);
-}
-
-/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
- unsigned int tmp;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- tmp = strlen(sl->dev->name) + 1;
- if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
- return -EFAULT;
- return 0;
-
- case SIOCSIFHWADDR:
- return -EINVAL;
-
- default:
- return tty_mode_ioctl(tty, cmd, arg);
- }
-}
-
-static struct tty_ldisc_ops slc_ldisc = {
- .owner = THIS_MODULE,
- .num = N_SLCAN,
- .name = "slcan",
- .open = slcan_open,
- .close = slcan_close,
- .hangup = slcan_hangup,
- .ioctl = slcan_ioctl,
- .receive_buf = slcan_receive_buf,
- .write_wakeup = slcan_write_wakeup,
-};
-
-static int __init slcan_init(void)
-{
- int status;
-
- if (maxdev < 4)
- maxdev = 4; /* Sanity */
-
- pr_info("slcan: serial line CAN interface driver\n");
- pr_info("slcan: %d dynamic interface channels.\n", maxdev);
-
- slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
- if (!slcan_devs)
- return -ENOMEM;
-
- /* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(&slc_ldisc);
- if (status) {
- printk(KERN_ERR "slcan: can't register line discipline\n");
- kfree(slcan_devs);
- }
- return status;
-}
-
-static void __exit slcan_exit(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
- unsigned long timeout = jiffies + HZ;
- int busy = 0;
-
- if (slcan_devs == NULL)
- return;
-
- /* First of all: check for active disciplines and hangup them.
- */
- do {
- if (busy)
- msleep_interruptible(100);
-
- busy = 0;
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- sl = netdev_priv(dev);
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- busy++;
- tty_hangup(sl->tty);
- }
- spin_unlock_bh(&sl->lock);
- }
- } while (busy && time_before(jiffies, timeout));
-
- /* FIXME: hangup is async so we should wait when doing this second
- phase */
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- slcan_devs[i] = NULL;
-
- sl = netdev_priv(dev);
- if (sl->tty) {
- printk(KERN_ERR "%s: tty discipline still running\n",
- dev->name);
- }
-
- unregister_netdev(dev);
- }
-
- kfree(slcan_devs);
- slcan_devs = NULL;
-
- tty_unregister_ldisc(&slc_ldisc);
-}
-
-module_init(slcan_init);
-module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile
new file mode 100644
index 000000000000..8a88e484ee21
--- /dev/null
+++ b/drivers/net/can/slcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+slcan-objs :=
+slcan-objs += slcan-core.o
+slcan-objs += slcan-ethtool.o
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
new file mode 100644
index 000000000000..fbb34139daa1
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -0,0 +1,939 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip/slip.c and got
+ * inspiration from linux/drivers/net/can/can327.c for the rework made
+ * on the line discipline code.
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+#include "slcan.h"
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
+
+#define SLCAN_CMD_LEN 1
+#define SLCAN_SFF_ID_LEN 3
+#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_STATE_LEN 1
+#define SLCAN_STATE_BE_RXCNT_LEN 3
+#define SLCAN_STATE_BE_TXCNT_LEN 3
+#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+struct slcan {
+ struct can_priv can;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_ERROR 0 /* Parity, etc. error */
+#define SLF_XCMD 1 /* Command transmission */
+ unsigned long cmd_flags; /* Command flags */
+#define CF_ERR_RST 0 /* Reset errors on open */
+ wait_queue_head_t xcmd_wait; /* Wait queue for commands */
+ /* transmission */
+};
+
+static const u32 slcan_bitrate_const[] = {
+ 10000, 20000, 50000, 100000, 125000,
+ 250000, 500000, 800000, 1000000
+};
+
+bool slcan_err_rst_on_open(struct net_device *ndev)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ return !!test_bit(CF_ERR_RST, &sl->cmd_flags);
+}
+
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (on)
+ set_bit(CF_ERR_RST, &sl->cmd_flags);
+ else
+ clear_bit(CF_ERR_RST, &sl->cmd_flags);
+
+ return 0;
+}
+
+/*************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ *************************************************************************/
+
+/* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (len) which can be from 0 to 8
+ * and up to <len> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, len 0, no data
+ * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, len 0, no data, remote transmission request
+ *
+ */
+
+/*************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ *************************************************************************/
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slcan_bump_frame(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int i, tmp;
+ u32 tmpid;
+ char *cmd = sl->rbuff;
+
+ skb = alloc_can_skb(sl->dev, &cf);
+ if (unlikely(!skb)) {
+ sl->dev->stats.rx_dropped++;
+ return;
+ }
+
+ switch (*cmd) {
+ case 'r':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 't':
+ /* store dlc ASCII value and terminate SFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1;
+ break;
+ case 'R':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 'T':
+ cf->can_id |= CAN_EFF_FLAG;
+ /* store dlc ASCII value and terminate EFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1;
+ break;
+ default:
+ goto decode_failed;
+ }
+
+ if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid))
+ goto decode_failed;
+
+ cf->can_id |= tmpid;
+
+ /* get len from sanitized ASCII value */
+ if (cf->len >= '0' && cf->len < '9')
+ cf->len -= '0';
+ else
+ goto decode_failed;
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++) {
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] = (tmp << 4);
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] |= tmp;
+ }
+ }
+
+ sl->dev->stats.rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf->len;
+
+ netif_rx(skb);
+ return;
+
+decode_failed:
+ sl->dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+}
+
+/* A change state frame must contain state info and receive and transmit
+ * error counters.
+ *
+ * Examples:
+ *
+ * sb256256 : state bus-off: rx counter 256, tx counter 256
+ * sa057033 : state active, rx counter 57, tx counter 33
+ */
+static void slcan_bump_state(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ u32 rxerr, txerr;
+ enum can_state state, rx_state, tx_state;
+
+ switch (cmd[1]) {
+ case 'a':
+ state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case 'w':
+ state = CAN_STATE_ERROR_WARNING;
+ break;
+ case 'p':
+ state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case 'b':
+ state = CAN_STATE_BUS_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ return;
+
+ cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+ cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0;
+ if (kstrtou32(cmd, 10, &txerr))
+ return;
+
+ *cmd = 0;
+ cmd -= SLCAN_STATE_BE_RXCNT_LEN;
+ if (kstrtou32(cmd, 10, &rxerr))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (state == CAN_STATE_BUS_OFF) {
+ can_bus_off(dev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (skb)
+ netif_rx(skb);
+}
+
+/* An error frame can contain more than one type of error.
+ *
+ * Examples:
+ *
+ * e1a : len 1, errors: ACK error
+ * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
+ */
+static void slcan_bump_err(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ int i, len;
+
+ /* get len from sanitized ASCII value */
+ len = cmd[1];
+ if (len >= '0' && len < '9')
+ len -= '0';
+ else
+ return;
+
+ if ((len + SLCAN_CMD_LEN + 1) > sl->rcount)
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ cmd += SLCAN_CMD_LEN + 1;
+ for (i = 0; i < len; i++, cmd++) {
+ switch (*cmd) {
+ case 'a':
+ netdev_dbg(dev, "ACK error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+
+ break;
+ case 'b':
+ netdev_dbg(dev, "Bit0 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ break;
+ case 'B':
+ netdev_dbg(dev, "Bit1 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ break;
+ case 'c':
+ netdev_dbg(dev, "CRC error\n");
+ rx_errors = true;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ break;
+ case 'f':
+ netdev_dbg(dev, "Form Error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ break;
+ case 'o':
+ netdev_dbg(dev, "Rx overrun error\n");
+ rx_over_errors = true;
+ rx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ break;
+ case 'O':
+ netdev_dbg(dev, "Tx overrun error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW;
+ }
+
+ break;
+ case 's':
+ netdev_dbg(dev, "Stuff error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ break;
+ default:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return;
+ }
+ }
+
+ if (rx_errors)
+ dev->stats.rx_errors++;
+
+ if (rx_over_errors)
+ dev->stats.rx_over_errors++;
+
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void slcan_bump(struct slcan *sl)
+{
+ switch (sl->rbuff[0]) {
+ case 'r':
+ fallthrough;
+ case 't':
+ fallthrough;
+ case 'R':
+ fallthrough;
+ case 'T':
+ return slcan_bump_frame(sl);
+ case 'e':
+ return slcan_bump_err(sl);
+ case 's':
+ return slcan_bump_state(sl);
+ default:
+ return;
+ }
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+ sl->rcount > 4)
+ slcan_bump(sl);
+
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLCAN_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+}
+
+/*************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ *************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slcan_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, i;
+ unsigned char *pos;
+ unsigned char *endpos;
+ canid_t id = cf->can_id;
+
+ pos = sl->xbuff;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
+ else
+ *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
+
+ /* determine number of chars for the CAN-identifier */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ endpos = pos + SLCAN_EFF_ID_LEN;
+ } else {
+ *pos |= 0x20; /* convert R/T to lower case for SFF */
+ id &= CAN_SFF_MASK;
+ endpos = pos + SLCAN_SFF_ID_LEN;
+ }
+
+ /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+ pos++;
+ while (endpos >= pos) {
+ *endpos-- = hex_asc_upper[id & 0xf];
+ id >>= 4;
+ }
+
+ pos += (cf->can_id & CAN_EFF_FLAG) ?
+ SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN;
+
+ *pos++ = cf->len + '0';
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++)
+ pos = hex_byte_pack_upper(pos, cf->data[i]);
+
+ sl->dev->stats.tx_bytes += cf->len;
+ }
+
+ *pos++ = '\r';
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+ sl->xleft = (pos - sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
+{
+ struct slcan *sl = container_of(work, struct slcan, tx_work);
+ int actual;
+
+ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+ if (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags))) {
+ spin_unlock_bh(&sl->lock);
+ return;
+ }
+
+ if (sl->xleft <= 0) {
+ if (unlikely(test_bit(SLF_XCMD, &sl->flags))) {
+ clear_bit(SLF_XCMD, &sl->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ wake_up(&sl->xcmd_wait);
+ return;
+ }
+
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+ spin_unlock_bh(&sl->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ netdev_warn(dev, "xmit: iface is down\n");
+ goto out;
+ }
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
+{
+ int ret, actual, n;
+
+ spin_lock(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ return -ENODEV;
+ }
+
+ n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, n);
+ sl->xleft = n - actual;
+ sl->xhead = sl->xbuff + actual;
+ set_bit(SLF_XCMD, &sl->flags);
+ spin_unlock(&sl->lock);
+ ret = wait_event_interruptible_timeout(sl->xcmd_wait,
+ !test_bit(SLF_XCMD, &sl->flags),
+ HZ);
+ clear_bit(SLF_XCMD, &sl->flags);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Netdevice UP -> DOWN routine */
+static int slcan_netdev_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ int err;
+
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ err = slcan_transmit_cmd(sl, "C\r");
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
+ }
+
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ flush_work(&sl->tx_work);
+
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ close_candev(dev);
+ sl->can.state = CAN_STATE_STOPPED;
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNSET;
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slcan_netdev_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ unsigned char cmd[SLCAN_MTU];
+ int err, s;
+
+ /* The baud rate is not set with the command
+ * `ip link set <iface> type can bitrate <baud>' and therefore
+ * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
+ * open_candev() to fail. So let's set to a fake value.
+ */
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN;
+
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
+ if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
+ break;
+ }
+
+ /* The CAN framework has already validate the bitrate value,
+ * so we can avoid to check if `s' has been properly set.
+ */
+ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s);
+ err = slcan_transmit_cmd(sl, cmd);
+ if (err) {
+ netdev_err(dev,
+ "failed to send bitrate command 'C\\rS%d\\r'\n",
+ s);
+ goto cmd_transmit_failed;
+ }
+
+ if (test_bit(CF_ERR_RST, &sl->cmd_flags)) {
+ err = slcan_transmit_cmd(sl, "F\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send error command 'F\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ err = slcan_transmit_cmd(sl, "L\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send listen-only command 'L\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ } else {
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+ }
+
+ sl->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+ return 0;
+
+cmd_transmit_failed:
+ close_candev(dev);
+ return err;
+}
+
+static const struct net_device_ops slcan_netdev_ops = {
+ .ndo_open = slcan_netdev_open,
+ .ndo_stop = slcan_netdev_close,
+ .ndo_start_xmit = slcan_netdev_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/******************************************
+ * Routines looking at TTY side.
+ ******************************************/
+
+/* Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+static void slcan_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, const char *fp,
+ int count)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ if (!netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/* Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+static int slcan_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(*sl), 1);
+ if (!dev)
+ return -ENFILE;
+
+ sl = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
+
+ /* Configure CAN metadata */
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
+ sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ sl->dev = dev;
+ dev->netdev_ops = &slcan_netdev_ops;
+ dev->ethtool_ops = &slcan_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ sl->tty = tty;
+ tty->disc_data = sl;
+
+ err = register_candev(dev);
+ if (err) {
+ free_candev(dev);
+ pr_err("can't register candev\n");
+ return err;
+ }
+
+ netdev_info(dev, "slcan on %s.\n", tty->name);
+ /* TTY layer expects 0 on success */
+ return 0;
+}
+
+/* Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this method for a hangup event.
+ */
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set sl->tty = NULL after this.
+ */
+ unregister_candev(sl->dev);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ netdev_info(sl->dev, "slcan off %s.\n", tty->name);
+ free_candev(sl->dev);
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slcan_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_SLCAN,
+ .name = KBUILD_MODNAME,
+ .open = slcan_open,
+ .close = slcan_close,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ pr_info("serial line CAN interface driver\n");
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(&slcan_ldisc);
+ if (status)
+ pr_err("can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&slcan_ldisc);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
new file mode 100644
index 000000000000..f598c653fbfa
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "slcan.h"
+
+static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0)
+ "err-rst-on-open",
+};
+
+static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, slcan_priv_flags_strings,
+ sizeof(slcan_priv_flags_strings));
+ }
+}
+
+static u32 slcan_get_priv_flags(struct net_device *ndev)
+{
+ u32 flags = 0;
+
+ if (slcan_err_rst_on_open(ndev))
+ flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN;
+
+ return flags;
+}
+
+static int slcan_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN);
+
+ return slcan_enable_err_rst_on_open(ndev, err_rst_op_open);
+}
+
+static int slcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(slcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops slcan_ethtool_ops = {
+ .get_strings = slcan_get_strings,
+ .get_priv_flags = slcan_get_priv_flags,
+ .set_priv_flags = slcan_set_priv_flags,
+ .get_sset_count = slcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
new file mode 100644
index 000000000000..85cedf856db3
--- /dev/null
+++ b/drivers/net/can/slcan/slcan.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * slcan.h - serial line CAN interface driver
+ *
+ * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk>
+ * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#ifndef _SLCAN_H
+#define _SLCAN_H
+
+bool slcan_err_rst_on_open(struct net_device *ndev);
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
+
+extern const struct ethtool_ops slcan_ethtool_ops;
+
+#endif /* _SLCAN_H */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d74e895bddf7..c72f505d29fe 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -5,6 +5,7 @@
* - Kurt Van Dijck, EIA Electronics
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/io.h>
@@ -59,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
uint8_t buf[DPRAM_TX_SIZE];
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
spin_lock(&card->spin);
@@ -392,13 +393,10 @@ static int softing_netdev_open(struct net_device *ndev)
static int softing_netdev_stop(struct net_device *ndev)
{
- int ret;
-
netif_stop_queue(ndev);
/* softing cycle does close_candev() */
- ret = softing_startstop(ndev, 0);
- return ret;
+ return softing_startstop(ndev, 0);
}
static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
@@ -614,8 +612,12 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops softing_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const softing_btr_const = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -652,6 +654,7 @@ static struct net_device *softing_netdev_create(struct softing *card,
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &softing_netdev_ops;
+ netdev->ethtool_ops = &softing_ethtool_ops;
priv->can.do_set_mode = softing_candev_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
@@ -849,7 +852,7 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index cfcc14fe3e42..e1b8533a602e 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -16,11 +16,11 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -354,9 +354,7 @@ static void hi3110_hw_rx(struct spi_device *spi)
}
priv->net->stats.rx_packets++;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
@@ -375,7 +373,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -567,8 +565,6 @@ static int hi3110_stop(struct net_device *net)
mutex_unlock(&priv->hi3110_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -672,12 +668,10 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx_ni(skb);
+ netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
can_bus_off(net);
@@ -686,6 +680,10 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
hi3110_hw_sleep(spi);
break;
}
+ } else {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
}
@@ -718,14 +716,13 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
- can_led_event(net, CAN_LED_EVENT_TX);
priv->tx_busy = false;
netif_wake_queue(net);
}
@@ -783,7 +780,6 @@ static int hi3110_open(struct net_device *net)
if (ret)
goto out_free_wq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
@@ -807,6 +803,10 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_start_xmit = hi3110_hard_start_xmit,
};
+static const struct ethtool_ops hi3110_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id hi3110_of_match[] = {
{
.compatible = "holt,hi3110",
@@ -861,6 +861,7 @@ static int hi3110_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
+ net->ethtool_ops = &hi3110_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -931,7 +932,6 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
netdev_info(net, "%x successfully initialized.\n", priv->model);
return 0;
@@ -948,7 +948,7 @@ static int hi3110_can_probe(struct spi_device *spi)
return dev_err_probe(dev, ret, "Probe failed\n");
}
-static int hi3110_can_remove(struct spi_device *spi)
+static void hi3110_can_remove(struct spi_device *spi)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -960,8 +960,6 @@ static int hi3110_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused hi3110_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 025e07cb7439..79c4bab5f724 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,11 +22,11 @@
#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -738,9 +738,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
}
priv->net->stats.rx_packets++;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -791,7 +789,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -973,8 +971,6 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -987,7 +983,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
- netif_rx_ni(skb);
+ netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
@@ -1074,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1086,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1096,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
@@ -1177,7 +1185,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
break;
if (intf & CANINTF_TX) {
- can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_busy) {
net->stats.tx_packets++;
net->stats.tx_bytes += can_get_echo_skb(net, 0,
@@ -1232,8 +1239,6 @@ static int mcp251x_open(struct net_device *net)
if (ret)
goto out_free_irq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
-
netif_wake_queue(net);
mutex_unlock(&priv->mcp_lock);
@@ -1256,6 +1261,10 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mcp251x_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id mcp251x_of_match[] = {
{
.compatible = "microchip,mcp2510",
@@ -1321,6 +1330,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &mcp251x_netdev_ops;
+ net->ethtool_ops = &mcp251x_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -1403,15 +1413,16 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
-
ret = mcp251x_gpio_setup(priv);
if (ret)
- goto error_probe;
+ goto out_unregister_candev;
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
+out_unregister_candev:
+ unregister_candev(net);
+
error_probe:
destroy_workqueue(priv->wq);
priv->wq = NULL;
@@ -1427,7 +1438,7 @@ out_free:
return ret;
}
-static int mcp251x_can_remove(struct spi_device *spi)
+static void mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1442,8 +1453,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused mcp251x_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index dd0fc0a54be1..877e4356010d 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -2,6 +2,7 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
+ select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
help
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index a83d685d64e0..94d7de954294 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -6,6 +6,8 @@ mcp251xfd-objs :=
mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
mcp251xfd-objs += mcp251xfd-regmap.o
mcp251xfd-objs += mcp251xfd-ring.o
mcp251xfd-objs += mcp251xfd-rx.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
index 2f9a623d381d..0d96097a2547 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -78,7 +78,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* FIFO 1 - TX */
+ /* TX FIFO */
val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
tx_ring->obj_num - 1) |
MCP251XFD_REG_FIFOCON_TXEN |
@@ -99,7 +99,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr),
val);
if (err)
return err;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index b5986df6eca0..68df6d4641b5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -12,6 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -37,6 +38,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -75,6 +82,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -112,6 +121,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
return "<unknown>";
}
+static const char *
+mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference)
+{
+ switch (~osc & osc_reference &
+ (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) {
+ case MCP251XFD_REG_OSC_PLLRDY:
+ return "PLL";
+ case MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator";
+ case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator/PLL";
+ }
+
+ return "<unknown>";
+}
+
static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
{
if (!priv->reg_vdd)
@@ -178,6 +203,11 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
+static inline bool mcp251xfd_reg_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
static inline int
mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
{
@@ -197,34 +227,55 @@ static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
const u8 mode_req, bool nowait)
{
- u32 con, con_reqop;
+ u32 con = 0, con_reqop, osc = 0;
+ u8 mode;
int err;
con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
- if (err)
+ if (err == -EBADMSG) {
+ netdev_err(priv->ndev,
+ "Failed to set Requested Operation Mode.\n");
+
+ return -ENODEV;
+ } else if (err) {
return err;
+ }
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
return 0;
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ !mcp251xfd_reg_invalid(con) &&
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
con) == mode_req,
MCP251XFD_POLL_SLEEP_US,
MCP251XFD_POLL_TIMEOUT_US);
- if (err) {
- u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ if (err != -ETIMEDOUT && err != -EBADMSG)
+ return err;
+
+ /* Ignore return value.
+ * Print below error messages, even if this fails.
+ */
+ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (mcp251xfd_reg_invalid(con)) {
netdev_err(priv->ndev,
- "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode_req), mode_req,
- mcp251xfd_get_mode_str(mode), mode);
- return err;
+ "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n",
+ con, osc);
+
+ return -ENODEV;
}
- return 0;
+ mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode,
+ con, osc);
+
+ return -ETIMEDOUT;
}
static inline int
@@ -241,27 +292,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, true);
}
-static inline bool mcp251xfd_osc_invalid(u32 reg)
+static int
+mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv,
+ u32 osc_reference, u32 osc_mask)
{
- return reg == 0x0 || reg == 0xffffffff;
+ u32 osc;
+ int err;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ !mcp251xfd_reg_invalid(osc) &&
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (err != -ETIMEDOUT)
+ return err;
+
+ if (mcp251xfd_reg_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to read Oscillator Configuration Register (osc=0x%08x).\n",
+ osc);
+ return -ENODEV;
+ }
+
+ netdev_err(priv->ndev,
+ "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n",
+ mcp251xfd_get_osc_str(osc, osc_reference),
+ osc, osc_reference, osc_mask);
+
+ return -ETIMEDOUT;
}
-static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv)
{
u32 osc, osc_reference, osc_mask;
int err;
- /* Set Power On Defaults for "Clock Output Divisor" and remove
- * "Oscillator Disable" bit.
+ /* For normal sleep on MCP2517FD and MCP2518FD, clearing
+ * "Oscillator Disable" will wake the chip. For low power mode
+ * on MCP2518FD, asserting the chip select will wake the
+ * chip. Writing to the Oscillator register will wake it in
+ * both cases.
*/
osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* We cannot check for the PLL ready bit (either set or
+ * unset), as the PLL might be enabled. This can happen if the
+ * system reboots, while the mcp251xfd stays powered.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY;
- osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY;
- /* Note:
- *
- * If the controller is in Sleep Mode the following write only
+ /* If the controller is in Sleep Mode the following write only
* removes the "Oscillator Disable" bit and powers it up. All
* other bits are unaffected.
*/
@@ -269,24 +351,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* Wait for "Oscillator Ready" bit */
- err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
- (osc & osc_mask) == osc_reference,
- MCP251XFD_OSC_STAB_SLEEP_US,
- MCP251XFD_OSC_STAB_TIMEOUT_US);
- if (mcp251xfd_osc_invalid(osc)) {
- netdev_err(priv->ndev,
- "Failed to detect %s (osc=0x%08x).\n",
- mcp251xfd_get_model_str(priv), osc);
- return -ENODEV;
- } else if (err == -ETIMEDOUT) {
- netdev_err(priv->ndev,
- "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
- osc, osc_reference);
- return -ETIMEDOUT;
+ /* Sometimes the PLL is stuck enabled, the controller never
+ * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our
+ * caller takes care of retry.
+ */
+ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+}
+
+static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv)
+{
+ if (priv->pll_enable) {
+ u32 osc;
+ int err;
+
+ /* Turn off PLL */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ netdev_err(priv->ndev,
+ "Failed to disable PLL.\n");
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow;
}
- return err;
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -294,10 +383,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
const __be16 cmd = mcp251xfd_cmd_reset();
int err;
- /* The Set Mode and SPI Reset command only seems to works if
- * the controller is not in Sleep Mode.
+ /* The Set Mode and SPI Reset command only works if the
+ * controller is not in Sleep Mode.
*/
- err = mcp251xfd_chip_clock_enable(priv);
+ err = mcp251xfd_chip_wake(priv);
if (err)
return err;
@@ -311,34 +400,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
{
- u32 osc, osc_reference;
+ u32 osc_reference, osc_mask;
u8 mode;
int err;
- err = mcp251xfd_chip_get_mode(priv, &mode);
- if (err)
- return err;
-
- if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
- netdev_info(priv->ndev,
- "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode), mode);
- return -ETIMEDOUT;
- }
-
+ /* Check for reset defaults of OSC reg.
+ * This will take care of stabilization period.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
- /* check reset defaults of OSC reg */
- err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ err = mcp251xfd_chip_get_mode(priv, &mode);
if (err)
return err;
- if (osc != osc_reference) {
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
- osc, osc_reference);
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
return -ETIMEDOUT;
}
@@ -374,7 +458,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
{
- u32 osc;
+ u32 osc, osc_reference, osc_mask;
int err;
/* Activate Low Power Mode on Oscillator Disable. This only
@@ -384,10 +468,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
osc = MCP251XFD_REG_OSC_LPMEN |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ if (priv->pll_enable) {
+ osc |= MCP251XFD_REG_OSC_PLLEN;
+ osc_reference |= MCP251XFD_REG_OSC_PLLRDY;
+ }
+
err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
if (err)
return err;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast;
+
+ return 0;
+}
+
+static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
+{
/* Set Time Base Counter Prescaler to 1.
*
* This means an overflow of the 32 bit Time Base Counter
@@ -628,14 +731,14 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
}
-static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
- const enum can_state state)
+static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
{
priv->can.state = state;
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ mcp251xfd_chip_sleep(priv);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
@@ -650,6 +753,10 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ err = mcp251xfd_chip_timestamp_init(priv);
+ if (err)
+ goto out_chip_stop;
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -662,7 +769,9 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
- mcp251xfd_ring_init(priv);
+ err = mcp251xfd_ring_init(priv);
+ if (err)
+ goto out_chip_stop;
err = mcp251xfd_chip_fifo_init(priv);
if (err)
@@ -816,7 +925,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -921,7 +1030,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
return 0;
mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -990,11 +1099,12 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
if (err)
return err;
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1159,7 +1269,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -1284,6 +1395,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
return 0;
}
+static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
+ size_t len;
+
+ if (priv->rx_ring_num == 1)
+ len = sizeof(priv->regs_status.intf);
+ else
+ len = sizeof(priv->regs_status);
+
+ return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status, len / val_bytes);
+}
+
#define mcp251xfd_handle(priv, irq, ...) \
({ \
struct mcp251xfd_priv *_priv = (priv); \
@@ -1300,7 +1425,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
- const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -1312,21 +1436,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
if (!rx_pending)
break;
+ /* Assume 1st RX-FIFO pending, if other FIFOs
+ * are pending the main IRQ handler will take
+ * care.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
err = mcp251xfd_handle(priv, rxif);
if (err)
goto out_fail;
handled = IRQ_HANDLED;
- } while (1);
+
+ /* We don't know which RX-FIFO is pending, but only
+ * handle the 1st RX-FIFO. Leave loop here if we have
+ * more than 1 RX-FIFO to avoid starvation.
+ */
+ } while (priv->rx_ring_num == 1);
do {
u32 intf_pending, intf_pending_clearable;
bool set_normal_mode = false;
- err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
- &priv->regs_status,
- sizeof(priv->regs_status) /
- val_bytes);
+ err = mcp251xfd_read_regs_status(priv);
if (err)
goto out_fail;
@@ -1478,6 +1609,7 @@ static int mcp251xfd_open(struct net_device *ndev)
goto out_transceiver_disable;
mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -1498,6 +1630,7 @@ static int mcp251xfd_open(struct net_device *ndev)
free_irq(spi->irq, priv);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
mcp251xfd_timestamp_stop(priv);
out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
@@ -1517,6 +1650,9 @@ static int mcp251xfd_stop(struct net_device *ndev)
struct mcp251xfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ hrtimer_cancel(&priv->rx_irq_timer);
+ hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -1535,6 +1671,7 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_change_mtu = can_change_mtu,
};
@@ -1555,8 +1692,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
u32 osc;
int err;
- /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
- * autodetect the model.
+ /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+ * so use it to autodetect the model.
*/
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
MCP251XFD_REG_OSC_LPMEN,
@@ -1568,12 +1705,20 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (err)
return err;
- if (osc & MCP251XFD_REG_OSC_LPMEN)
- devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
- else
+ if (osc & MCP251XFD_REG_OSC_LPMEN) {
+ /* We cannot distinguish between MCP2518FD and
+ * MCP251863. If firmware specifies MCP251863, keep
+ * it, otherwise set to MCP2518FD.
+ */
+ if (mcp251xfd_is_251863(priv))
+ devtype_data = &mcp251xfd_devtype_data_mcp251863;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ } else {
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+ }
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
"Detected %s, but firmware specifies a %s. Fixing up.\n",
@@ -1621,8 +1766,9 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
- u32 *dev_id, u32 *effective_speed_hz)
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ u32 *effective_speed_hz_slow,
+ u32 *effective_speed_hz_fast)
{
struct mcp251xfd_map_buf_nocrc *buf_rx;
struct mcp251xfd_map_buf_nocrc *buf_tx;
@@ -1641,23 +1787,27 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
xfer[0].tx_buf = buf_tx;
xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
- xfer[1].len = sizeof(dev_id);
+ xfer[1].len = sizeof(*dev_id);
+ xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+
err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
if (err)
goto out_kfree_buf_tx;
- *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
- *effective_speed_hz = xfer->effective_speed_hz;
+ *dev_id = get_unaligned_le32(buf_rx->data);
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
out_kfree_buf_tx:
kfree(buf_tx);
out_kfree_buf_rx:
kfree(buf_rx);
- return 0;
+ return err;
}
#define MCP251XFD_QUIRK_ACTIVE(quirk) \
@@ -1666,34 +1816,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
static int
mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
{
- u32 dev_id, effective_speed_hz;
+ u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast;
+ unsigned long clk_rate;
int err;
err = mcp251xfd_register_get_dev_id(priv, &dev_id,
- &effective_speed_hz);
+ &effective_speed_hz_slow,
+ &effective_speed_hz_fast);
if (err)
return err;
+ clk_rate = clk_get_rate(priv->clk);
+
netdev_info(priv->ndev,
- "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n",
mcp251xfd_get_model_str(priv),
FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
priv->rx_int ? '+' : '-',
+ priv->pll_enable ? '+' : '-',
MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
MCP251XFD_QUIRK_ACTIVE(CRC_REG),
MCP251XFD_QUIRK_ACTIVE(CRC_RX),
MCP251XFD_QUIRK_ACTIVE(CRC_TX),
MCP251XFD_QUIRK_ACTIVE(ECC),
MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ clk_rate / 1000000,
+ clk_rate % 1000000 / 1000 / 10,
priv->can.clock.freq / 1000000,
priv->can.clock.freq % 1000000 / 1000 / 10,
priv->spi_max_speed_hz_orig / 1000000,
priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
- priv->spi->max_speed_hz / 1000000,
- priv->spi->max_speed_hz % 1000000 / 1000 / 10,
- effective_speed_hz / 1000000,
- effective_speed_hz % 1000000 / 1000 / 10);
+ priv->spi_max_speed_hz_slow / 1000000,
+ priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10,
+ effective_speed_hz_slow / 1000000,
+ effective_speed_hz_slow % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_fast / 1000000,
+ priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10,
+ effective_speed_hz_fast / 1000000,
+ effective_speed_hz_fast % 1000000 / 1000 / 10);
return 0;
}
@@ -1719,19 +1880,27 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
if (err == -ENODEV)
goto out_runtime_disable;
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_sleep;
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ mcp251xfd_ethtool_init(priv);
err = register_candev(ndev);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_done(priv);
if (err)
@@ -1741,7 +1910,7 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
* disable the clocks and vdd. If CONFIG_PM is not enabled,
* the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ err = mcp251xfd_chip_sleep(priv);
if (err)
goto out_unregister_candev;
@@ -1751,8 +1920,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
out_unregister_candev:
unregister_candev(ndev);
- out_chip_set_mode_sleep:
- mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
out_runtime_put_noidle:
@@ -1768,10 +1937,10 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- pm_runtime_get_sync(ndev->dev.parent);
- pm_runtime_put_noidle(ndev->dev.parent);
- mcp251xfd_clks_and_vdd_disable(priv);
- pm_runtime_disable(ndev->dev.parent);
+ if (pm_runtime_enabled(ndev->dev.parent))
+ pm_runtime_disable(ndev->dev.parent);
+ else
+ mcp251xfd_clks_and_vdd_disable(priv);
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -1782,6 +1951,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -1798,6 +1970,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -1814,6 +1989,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
+ bool pll_enable = false;
u32 freq = 0;
int err;
@@ -1864,12 +2040,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
return -ERANGE;
}
- if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
- dev_err(&spi->dev,
- "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
- freq);
- return -ERANGE;
- }
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER)
+ pll_enable = true;
ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
MCP251XFD_TX_OBJ_NUM_MAX);
@@ -1885,6 +2057,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv = netdev_priv(ndev);
spi_set_drvdata(spi, priv);
priv->can.clock.freq = freq;
+ if (pll_enable)
+ priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER;
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
@@ -1893,10 +2067,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
priv->clk = clk;
+ priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
@@ -1934,7 +2110,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
- spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ priv->spi_max_speed_hz_slow = min(spi->max_speed_hz,
+ freq / 2 / 1000 * 850);
+ if (priv->pll_enable)
+ priv->spi_max_speed_hz_fast = min(spi->max_speed_hz,
+ freq *
+ MCP251XFD_OSC_PLL_MULTIPLIER /
+ 2 / 1000 * 850);
+ else
+ priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow;
+ spi->max_speed_hz = priv->spi_max_speed_hz_slow;
spi->bits_per_word = 8;
spi->rt = true;
err = spi_setup(spi);
@@ -1951,8 +2136,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
goto out_free_candev;
err = mcp251xfd_register(priv);
- if (err)
+ if (err) {
+ dev_err_probe(&spi->dev, err, "Failed to detect %s.\n",
+ mcp251xfd_get_model_str(priv));
goto out_can_rx_offload_del;
+ }
return 0;
@@ -1966,7 +2154,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
return err;
}
-static int mcp251xfd_remove(struct spi_device *spi)
+static void mcp251xfd_remove(struct spi_device *spi)
{
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
@@ -1975,8 +2163,6 @@ static int mcp251xfd_remove(struct spi_device *spi)
mcp251xfd_unregister(priv);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
-
- return 0;
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index ffae8fdd3af0..004eaf96262b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
.val = tx->base,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
- .val = 0,
+ .val = tx->nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
- .val = MCP251XFD_TX_FIFO,
+ .val = tx->fifo_nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
.val = tx->obj_num,
@@ -253,7 +253,7 @@ void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
sizeof(struct mcp251xfd_dump_object_reg);
- /* TEF ring, RX ring, TX rings */
+ /* TEF ring, RX rings, TX ring */
rings_num = 1 + priv->rx_ring_num + 1;
obj_num += rings_num;
file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644
index 000000000000..3585f02575df
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ ring->rx_max_pending = layout.max_rx;
+ ring->tx_max_pending = layout.max_tx;
+
+ ring->rx_pending = priv->rx_obj_num;
+ ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+ if ((layout.cur_rx != priv->rx_obj_num ||
+ layout.cur_tx != priv->tx->obj_num) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->tx->obj_num = layout.cur_tx;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 rx_max_frames, tx_max_frames;
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (priv->rx_obj_num_coalesce_irq == 0)
+ rx_max_frames = 1;
+ else
+ rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+ ec->rx_max_coalesced_frames_irq = rx_max_frames;
+ ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+ if (priv->tx_obj_num_coalesce_irq == 0)
+ tx_max_frames = 1;
+ else
+ tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+ ec->tx_max_coalesced_frames_irq = tx_max_frames;
+ ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+ if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+ ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+ layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+ ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ringparam = mcp251xfd_ring_get_ringparam,
+ .set_ringparam = mcp251xfd_ring_set_ringparam,
+ .get_coalesce = mcp251xfd_ring_get_coalesce,
+ .set_coalesce = mcp251xfd_ring_set_coalesce,
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+ struct can_ram_layout layout;
+
+ priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+ priv->rx_obj_num = layout.default_rx;
+ priv->tx->obj_num = layout.default_tx;
+
+ priv->rx_obj_num_coalesce_irq = 0;
+ priv->tx_obj_num_coalesce_irq = 0;
+ priv->rx_coalesce_usecs_irq = 0;
+ priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644
index 000000000000..9e8e82cdba46
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ u8 val)
+{
+ u8 max;
+
+ max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+ return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ const u8 coalesce, u8 val)
+{
+ u8 fifo_num = obj->fifo_num;
+ u8 ret = 0, i;
+
+ val = can_ram_clamp(config, obj, val);
+
+ if (coalesce) {
+ /* Use 1st FIFO for coalescing, if requested.
+ *
+ * Either use complete FIFO (and FIFO Full IRQ) for
+ * coalescing or only half of FIFO (FIFO Half Full
+ * IRQ) and use remaining half for normal objects.
+ */
+ ret = min_t(u8, coalesce * 2, config->fifo_depth);
+ val -= ret;
+ fifo_num--;
+ }
+
+ for (i = 0; i < fifo_num && val; i++) {
+ u8 n;
+
+ n = min_t(u8, rounddown_pow_of_two(val),
+ config->fifo_depth);
+
+ /* skip small FIFOs */
+ if (n < obj->fifo_depth_min)
+ return ret;
+
+ ret += n;
+ val -= n;
+ }
+
+ return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode)
+{
+ u8 num_rx, num_tx;
+ u16 ram_free;
+
+ /* default CAN */
+
+ num_tx = config->tx.def[fd_mode];
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * num_tx;
+
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->default_tx = num_tx;
+
+ /* MAX CAN */
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * config->tx.min;
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ ram_free = config->size;
+ ram_free -= config->rx.size[fd_mode] * config->rx.min;
+ num_tx = ram_free / config->tx.size[fd_mode];
+
+ layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* cur CAN */
+
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+ ec->rx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_rx / 2, config->fifo_depth);
+ num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+ (u32)config->rx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+ num_rx_coalesce, num_rx);
+ }
+
+ ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+ num_tx = ram_free / config->tx.size[fd_mode];
+ num_tx = min_t(u8, ring->tx_pending, num_tx);
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+ ec->tx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_tx / 2, config->fifo_depth);
+ num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+ (u32)config->tx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+ num_tx_coalesce, num_tx);
+ }
+
+ layout->cur_rx = num_rx;
+ layout->cur_tx = num_tx;
+ layout->rx_coalesce = num_rx_coalesce;
+ layout->tx_coalesce = num_tx_coalesce;
+ } else {
+ layout->cur_rx = layout->default_rx;
+ layout->cur_tx = layout->default_tx;
+ layout->rx_coalesce = 0;
+ layout->tx_coalesce = 0;
+ }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644
index 000000000000..7558c1510cbf
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+ CAN_RAM_MODE_CAN,
+ CAN_RAM_MODE_CANFD,
+ __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+ u8 size[__CAN_RAM_MODE_MAX];
+
+ u8 def[__CAN_RAM_MODE_MAX];
+ u8 min;
+ u8 max;
+
+ u8 fifo_num;
+ u8 fifo_depth_min;
+ u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+ const struct can_ram_obj_config rx;
+ const struct can_ram_obj_config tx;
+
+ u16 size;
+ u8 fifo_depth;
+};
+
+struct can_ram_layout {
+ u8 default_rx;
+ u8 default_tx;
+
+ u8 max_rx;
+ u8 max_tx;
+
+ u8 cur_rx;
+ u8 cur_tx;
+
+ u8 rx_coalesce;
+ u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode);
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 7b120c716228..92b7bc7f14b9 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -2,8 +2,8 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
#include "mcp251xfd.h"
@@ -47,22 +47,32 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
-static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+static inline bool
+mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
+ unsigned int reg)
{
+ struct mcp251xfd_rx_ring *ring;
+ int n;
+
switch (reg) {
case MCP251XFD_REG_INT:
case MCP251XFD_REG_TEFCON:
- case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_FLTCON(0):
case MCP251XFD_REG_ECCSTAT:
case MCP251XFD_REG_CRC:
return false;
case MCP251XFD_REG_CON:
- case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
return true;
default:
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr))
+ return false;
+ if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr))
+ return true;
+ }
+
WARN(1, "Status of reg 0x%04x unknown.\n", reg);
}
@@ -92,7 +102,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- if (mcp251xfd_update_bits_read_reg(reg)) {
+ if (mcp251xfd_update_bits_read_reg(priv, reg)) {
struct spi_transfer xfer[2] = { };
struct spi_message msg;
@@ -324,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context,
* register. It increments once per SYS clock tick,
* which is 20 or 40 MHz.
*
- * Observation shows that if the lowest byte (which is
- * transferred first on the SPI bus) of that register
- * is 0x00 or 0x80 the calculated CRC doesn't always
- * match the transferred one.
+ * Observation on the mcp2518fd shows that if the
+ * lowest byte (which is transferred first on the SPI
+ * bus) of that register is 0x00 or 0x80 the
+ * calculated CRC doesn't always match the transferred
+ * one. On the mcp2517fd this problem is not limited
+ * to the first byte being 0x00 or 0x80.
*
* If the highest bit in the lowest byte is flipped
* the transferred CRC matches the calculated one. We
- * assume for now the CRC calculation in the chip
- * works on wrong data and the transferred data is
- * correct.
+ * assume for now the CRC operates on the correct
+ * data.
*/
if (reg == MCP251XFD_REG_TBC &&
- (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ ((buf_rx->data[0] & 0xf8) == 0x0 ||
+ (buf_rx->data[0] & 0xf8) == 0x80)) {
/* Flip highest bit in lowest byte of le32 */
buf_rx->data[0] ^= 0x80;
@@ -346,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context,
val_len);
if (!err) {
/* If CRC is now correct, assume
- * transferred data was OK, flip bit
- * back to original value.
+ * flipped data is OK.
*/
- buf_rx->data[0] ^= 0x80;
goto out;
}
}
@@ -368,7 +378,7 @@ mcp251xfd_regmap_crc_read(void *context,
* to the caller. It will take care of both cases.
*
*/
- if (reg == MCP251XFD_REG_OSC) {
+ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) {
err = 0;
goto out;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 92f9e9b01289..bf3f0f150199 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -15,6 +15,7 @@
#include <asm/unaligned.h>
#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
@@ -53,6 +54,72 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
}
static void
+mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* TEF- and TX-FIFO have same number of objects */
+ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+ addr, val, val);
+ tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+ tef_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+ &tef_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
+ xfer = &tef_ring->uinc_xfer[i];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+ val = MCP251XFD_REG_TEFCON_UINC |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &tef_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+}
+
+static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_tx_ring *ring,
struct mcp251xfd_tx_obj *tx_obj,
@@ -88,84 +155,68 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
ARRAY_SIZE(tx_obj->xfer));
}
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+static void
+mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
{
- struct mcp251xfd_tef_ring *tef_ring;
struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
+ int i;
- /* TX */
tx_ring = priv->tx;
tx_ring->head = 0;
tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+ tx_ring->base = *base;
+ tx_ring->nr = 0;
+ tx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
+ *fifo_nr += 1;
/* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
addr, val, val);
mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+}
+
+static void
+mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
- /* RX */
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
rx_ring->head = 0;
rx_ring->tail = 0;
+ rx_ring->base = *base;
rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+ rx_ring->fifo_nr = *fifo_nr;
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
+ *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
+ *fifo_nr += 1;
- prev_rx_ring = rx_ring;
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+ addr, val, val);
+ rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+ rx_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+ &rx_ring->irq_enable_xfer, 1);
/* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
addr, val, val);
@@ -187,9 +238,149 @@ void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
* the chip select at the end of the message.
*/
xfer->cs_change = 0;
+
+ /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+ * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+ * is activated), use the last transfer to disable:
+ *
+ * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+ *
+ * and enable:
+ *
+ * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+ * - or -
+ * - TFERFFIE (Receive FIFO Full Interrupt)
+ *
+ * depending on rx_max_coalesce_frames_irq.
+ *
+ * The RXOVIE (Overflow Interrupt) is always enabled.
+ */
+ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+ priv->rx_obj_num_coalesce_irq)) {
+ val = MCP251XFD_REG_FIFOCON_UINC |
+ MCP251XFD_REG_FIFOCON_RXOVIE;
+
+ if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+ val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+ else if (priv->rx_obj_num_coalesce_irq)
+ val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &rx_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
}
}
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+ int i;
+
+ netdev_reset_queue(priv->ndev);
+
+ mcp251xfd_ring_init_tef(priv, &base);
+ mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
+ mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
+
+ /* mcp251xfd_handle_rxif() will iterate over all RX rings.
+ * Rings with their corresponding bit set in
+ * priv->regs_status.rxif are read out.
+ *
+ * If the chip is configured for only 1 RX-FIFO, and if there
+ * is an RX interrupt pending (RXIF in INT register is set),
+ * it must be the 1st RX-FIFO.
+ *
+ * We mark the RXIF of the 1st FIFO as pending here, so that
+ * we can skip the read of the RXIF register in
+ * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
+ *
+ * If we use more than 1 RX-FIFO, this value gets overwritten
+ * in mcp251xfd_read_regs_status(), so set it unconditionally
+ * here.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
+
+ if (priv->tx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx_obj_num_coalesce_irq *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+ priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+ }
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+ priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+ if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+ continue;
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2u*%u bytes = %4u bytes\n",
+ mcp251xfd_get_rx_obj_addr(rx_ring,
+ priv->rx_obj_num_coalesce_irq),
+ rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+ rx_ring->obj_size,
+ (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+ rx_ring->obj_size);
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_num * rx_ring->obj_size);
+ }
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ priv->tx->fifo_nr,
+ mcp251xfd_get_tx_obj_addr(priv->tx, 0),
+ priv->tx->obj_num, priv->tx->obj_size,
+ priv->tx->obj_num * priv->tx->obj_size);
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %4d bytes\n",
+ MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
+
+ ram_used = base - MCP251XFD_RAM_START;
+ if (ram_used > MCP251XFD_RAM_SIZE) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
{
int i;
@@ -200,40 +391,103 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
}
}
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ rx_irq_timer);
+ struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ tx_irq_timer);
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+ .rx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+ .min = MCP251XFD_RX_OBJ_NUM_MIN,
+ .max = MCP251XFD_RX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+ .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+ .fifo_num = MCP251XFD_FIFO_RX_NUM,
+ .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .tx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+ .min = MCP251XFD_TX_OBJ_NUM_MIN,
+ .max = MCP251XFD_TX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+ .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+ .fifo_num = MCP251XFD_FIFO_TX_NUM,
+ .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .size = MCP251XFD_RAM_SIZE,
+ .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
{
- struct mcp251xfd_tx_ring *tx_ring;
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
+ u8 tx_obj_size, rx_obj_size;
+ u8 rem, i;
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- if (mcp251xfd_is_fd_mode(priv)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ priv->rx_obj_num = layout.default_rx;
+ tx_ring->obj_num = layout.default_tx;
+ }
+
+ if (fd_mode) {
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
} else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
tx_ring->obj_size = tx_obj_size;
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
+ rem = priv->rx_obj_num;
+ for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+ u8 rx_obj_num;
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
+ if (i == 0 && priv->rx_obj_num_coalesce_irq)
+ rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+ MCP251XFD_FIFO_DEPTH);
+ else
+ rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+ MCP251XFD_FIFO_DEPTH);
+ rem -= rx_obj_num;
rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
GFP_KERNEL);
@@ -241,29 +495,18 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
mcp251xfd_ring_free(priv);
return -ENOMEM;
}
+
rx_ring->obj_num = rx_obj_num;
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
}
priv->rx_ring_num = i;
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+ hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
+ hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index 63f2526464b3..ced8d9c81f8c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -19,7 +19,7 @@
static inline int
mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
+ u8 *rx_head, bool *fifo_empty)
{
u32 fifo_sta;
int err;
@@ -30,6 +30,7 @@ mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
return err;
*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+ *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
return 0;
}
@@ -84,10 +85,12 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
{
u32 new_head;
u8 chip_rx_head;
+ bool fifo_empty;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+ &fifo_empty);
+ if (err || fifo_empty)
return err;
/* chip_rx_head, is the next RX-Object filled by the HW.
@@ -170,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
}
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
stats->rx_fifo_errors++;
@@ -251,10 +254,23 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
int err, n;
mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ /* - if RX IRQ coalescing is active always handle ring 0
+ * - only handle rings if RX IRQ is active
+ */
+ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+ !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+ continue;
+
err = mcp251xfd_handle_rxif_ring(priv, ring);
if (err)
return err;
}
+ if (priv->rx_coalesce_usecs_irq)
+ hrtimer_start(&priv->rx_irq_timer,
+ ns_to_ktime(priv->rx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index 406166005b99..237617b0c125 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -256,5 +256,11 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
netif_wake_queue(priv->ndev);
}
+ if (priv->tx_coalesce_usecs_irq)
+ hrtimer_start(&priv->tx_irq_timer,
+ ns_to_ktime(priv->tx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index ffb6c36b7d9b..160528d3cc26 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -172,7 +172,7 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
u8 tx_head;
int err;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (mcp251xfd_tx_busy(priv, tx_ring))
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index f551c900803e..2b0309fedfac 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,8 +2,8 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019 Pengutronix,
- * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -367,25 +367,6 @@
#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-#define MCP251XFD_TX_FIFO 1
-#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
-
/* SPI commands */
#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -406,12 +387,38 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
#define MCP251XFD_POLL_SLEEP_US (10)
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
#define MCP251XFD_READ_CRC_RETRIES_MAX 3
#define MCP251XFD_ECC_CNT_MAX 2
#define MCP251XFD_SANITIZE_SPI 1
#define MCP251XFD_SANITIZE_CAN 1
+/* FIFO and Ring */
+#define MCP251XFD_FIFO_TEF_NUM 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
+#define MCP251XFD_FIFO_TX_NUM 1U
+
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
+static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
+static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
+
/* Silence TX MAB overflow warnings */
#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
/* Use CRC to access registers */
@@ -434,7 +441,7 @@ struct mcp251xfd_hw_tef_obj {
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
@@ -512,7 +519,12 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
};
@@ -521,6 +533,8 @@ struct mcp251xfd_tx_ring {
unsigned int tail;
u16 base;
+ u8 nr;
+ u8 fifo_nr;
u8 obj_num;
u8 obj_size;
@@ -538,8 +552,13 @@ struct mcp251xfd_rx_ring {
u8 obj_num;
u8 obj_size;
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
- struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -561,12 +580,14 @@ struct mcp251xfd_ecc {
struct mcp251xfd_regs_status {
u32 intf;
+ u32 rxif;
};
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -574,6 +595,13 @@ struct mcp251xfd_devtype_data {
u32 quirks;
};
+enum mcp251xfd_flags {
+ MCP251XFD_FLAGS_DOWN,
+ MCP251XFD_FLAGS_FD_MODE,
+
+ __MCP251XFD_FLAGS_SIZE__
+};
+
struct mcp251xfd_priv {
struct can_priv can;
struct can_rx_offload offload;
@@ -592,12 +620,24 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
+ u32 spi_max_speed_hz_fast;
+ u32 spi_max_speed_hz_slow;
+
+ struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
- struct mcp251xfd_tef_ring tef[1];
- struct mcp251xfd_tx_ring tx[1];
- struct mcp251xfd_rx_ring *rx[1];
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
+ u8 rx_obj_num;
+ u8 rx_obj_num_coalesce_irq;
+ u8 tx_obj_num_coalesce_irq;
+
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_coalesce_usecs_irq;
+ struct hrtimer rx_irq_timer;
+ struct hrtimer tx_irq_timer;
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
@@ -608,6 +648,7 @@ struct mcp251xfd_priv {
struct gpio_desc *rx_int;
struct clk *clk;
+ bool pll_enable;
struct regulator *reg_vdd;
struct regulator *reg_xceiver;
@@ -619,12 +660,13 @@ struct mcp251xfd_priv {
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
{
@@ -776,7 +818,7 @@ mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
int err;
err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
&fifo_sta);
if (err)
return err;
@@ -878,8 +920,10 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 25d6d81ab4f4..2b78f9197681 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -51,9 +51,9 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -429,7 +429,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
canid_t id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -516,8 +516,6 @@ static void sun4i_can_rx(struct net_device *dev)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
@@ -538,11 +536,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -573,6 +566,11 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (skb && state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
@@ -664,7 +662,6 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
@@ -729,7 +726,6 @@ static int sun4ican_open(struct net_device *dev)
goto exit_can_start;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
netif_start_queue(dev);
return 0;
@@ -756,7 +752,6 @@ static int sun4ican_close(struct net_device *dev)
free_irq(dev->irq, dev);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -767,6 +762,10 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct ethtool_ops sun4ican_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct sun4ican_quirks sun4ican_quirks_a10 = {
.has_reset = false,
};
@@ -857,6 +856,7 @@ static int sun4ican_probe(struct platform_device *pdev)
}
dev->netdev_ops = &sun4ican_netdev_ops;
+ dev->ethtool_ops = &sun4ican_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -883,7 +883,6 @@ static int sun4ican_probe(struct platform_device *pdev)
DRV_NAME, err);
goto exit_free;
}
- devm_can_led_init(dev);
dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n",
priv->base, dev->irq);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index ff31b993ab17..27700f72eac2 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI HECC (CAN) device driver
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -23,6 +14,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
@@ -34,7 +26,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
@@ -479,7 +470,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
mbxno = get_tx_head_mb(priv);
@@ -633,7 +624,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
@@ -663,12 +654,13 @@ static void ti_hecc_change_state(struct net_device *ndev,
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = hecc_read(priv, HECC_CANTEC);
cf->data[7] = hecc_read(priv, HECC_CANREC);
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
@@ -759,7 +751,6 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload,
mbxno, stamp, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
}
@@ -814,8 +805,6 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
-
ti_hecc_start(ndev);
can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
@@ -834,8 +823,6 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -846,6 +833,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ti_hecc_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id ti_hecc_dt_ids[] = {
{
.compatible = "ti,am3517-hecc",
@@ -923,6 +914,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &ti_hecc_netdev_ops;
+ ndev->ethtool_ops = &ti_hecc_ethtool_ops;
priv->clk = clk_get(&pdev->dev, "hecc_ck");
if (IS_ERR(priv->clk)) {
@@ -954,8 +946,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_offload;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32)ndev->irq);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index f959215c9d53..1218f9642f33 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -14,11 +14,18 @@ config CAN_EMS_USB
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_ESD_USB2
- tristate "ESD USB/2 CAN/USB interface"
+config CAN_ESD_USB
+ tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver supports the CAN-USB/2 interface
- from esd electronic system design gmbh (http://www.esd.eu).
+ This driver adds supports for several CAN/USB interfaces
+ from esd electronics gmbh (https://www.esd.eu).
+
+ The drivers supports the following devices:
+ - esd CAN-USB/2
+ - esd CAN-USB/Micro
+
+ To compile this driver as a module, choose M here: the module
+ will be called esd_usb.
config CAN_ETAS_ES58X
tristate "ETAS ES58X CAN/USB interfaces"
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 748cf31a0d53..1ea16be5743b 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
-obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 7bedceffdfa3..050c0b49938a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -194,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
@@ -746,7 +747,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -819,7 +820,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
- dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
@@ -880,8 +880,12 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ems_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const ems_usb_bittiming_const = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -991,6 +995,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
+ netdev->ethtool_ops = &ems_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -1075,7 +1080,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ems_usb_driver = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.probe = ems_usb_probe,
.disconnect = ems_usb_disconnect,
.id_table = ems_usb_table,
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb.c
index 286daaaea0b8..81b88e9e5bdc 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
+ * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+ * Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -14,20 +16,24 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
+MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
+MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
-/* Define these values to match your devices */
+/* USB vendor and product ID */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
#define USB_CANUSBM_PRODUCT_ID 0x0011
+/* CAN controller clock frequencies */
#define ESD_USB2_CAN_CLOCK 60000000
#define ESD_USBM_CAN_CLOCK 36000000
-#define ESD_USB2_MAX_NETS 2
-/* USB2 commands */
+/* Maximum number of CAN nets */
+#define ESD_USB_MAX_NETS 2
+
+/* USB commands */
#define CMD_VERSION 1 /* also used for VERSION_REPLY */
#define CMD_CAN_RX 2 /* device to host only */
#define CMD_CAN_TX 3 /* also used for TX_DONE */
@@ -43,13 +49,15 @@ MODULE_LICENSE("GPL v2");
#define ESD_EVENT 0x40000000
#define ESD_IDMASK 0x1fffffff
-/* esd CAN event ids used by this driver */
-#define ESD_EV_CAN_ERROR_EXT 2
+/* esd CAN event ids */
+#define ESD_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
/* baudrate message flags */
-#define ESD_USB2_UBR 0x80000000
-#define ESD_USB2_LOM 0x40000000
-#define ESD_USB2_NO_BAUDRATE 0x7fffffff
+#define ESD_USB_UBR 0x80000000
+#define ESD_USB_LOM 0x40000000
+#define ESD_USB_NO_BAUDRATE 0x7fffffff
+
+/* bit timing CAN-USB/2 */
#define ESD_USB2_TSEG1_MIN 1
#define ESD_USB2_TSEG1_MAX 16
#define ESD_USB2_TSEG1_SHIFT 16
@@ -68,7 +76,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_ID_ENABLE 0x80
#define ESD_MAX_ID_SEGMENT 64
-/* SJA1000 ECC register (emulated by usb2 firmware) */
+/* SJA1000 ECC register (emulated by usb firmware) */
#define SJA1000_ECC_SEG 0x1F
#define SJA1000_ECC_DIR 0x20
#define SJA1000_ECC_ERR 0x06
@@ -158,7 +166,7 @@ struct set_baudrate_msg {
};
/* Main message type used between library and application */
-struct __attribute__ ((packed)) esd_usb2_msg {
+struct __packed esd_usb_msg {
union {
struct header_msg hdr;
struct version_msg version;
@@ -171,23 +179,23 @@ struct __attribute__ ((packed)) esd_usb2_msg {
} msg;
};
-static struct usb_device_id esd_usb2_table[] = {
+static struct usb_device_id esd_usb_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
-MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+MODULE_DEVICE_TABLE(usb, esd_usb_table);
-struct esd_usb2_net_priv;
+struct esd_usb_net_priv;
struct esd_tx_urb_context {
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
u32 echo_index;
};
-struct esd_usb2 {
+struct esd_usb {
struct usb_device *udev;
- struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+ struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS];
struct usb_anchor rx_submitted;
@@ -198,22 +206,22 @@ struct esd_usb2 {
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
-struct esd_usb2_net_priv {
+struct esd_usb_net_priv {
struct can_priv can; /* must be the first member */
atomic_t active_tx_jobs;
struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
- struct esd_usb2 *usb2;
+ struct esd_usb *usb;
struct net_device *netdev;
int index;
u8 old_state;
struct can_berr_counter bec;
};
-static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -258,7 +266,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
+ CAN_ERR_CNT;
switch (ecc & SJA1000_ECC_MASK) {
case SJA1000_ECC_BIT:
@@ -296,8 +305,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
}
}
-static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -311,7 +320,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
id = le32_to_cpu(msg->msg.rx.id);
if (id & ESD_EVENT) {
- esd_usb2_rx_event(priv, msg);
+ esd_usb_rx_event(priv, msg);
} else {
skb = alloc_can_skb(priv->netdev, &cf);
if (skb == NULL) {
@@ -338,12 +347,10 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
netif_rx(skb);
}
-
- return;
}
-static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
+ struct esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct net_device *netdev = priv->netdev;
@@ -370,9 +377,9 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
netif_wake_queue(netdev);
}
-static void esd_usb2_read_bulk_callback(struct urb *urb)
+static void esd_usb_read_bulk_callback(struct urb *urb)
{
- struct esd_usb2 *dev = urb->context;
+ struct esd_usb *dev = urb->context;
int retval;
int pos = 0;
int i;
@@ -394,9 +401,9 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
}
while (pos < urb->actual_length) {
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
- msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+ msg = (struct esd_usb_msg *)(urb->transfer_buffer + pos);
switch (msg->msg.hdr.cmd) {
case CMD_CAN_RX:
@@ -405,7 +412,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+ esd_usb_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
break;
case CMD_CAN_TX:
@@ -414,8 +421,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
break;
}
- esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
- msg);
+ esd_usb_tx_done_msg(dev->nets[msg->msg.txdone.net],
+ msg);
break;
}
@@ -430,7 +437,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
urb->transfer_buffer, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == -ENODEV) {
@@ -442,19 +449,15 @@ resubmit_urb:
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
}
-
- return;
}
-/*
- * callback for bulk IN urb
- */
-static void esd_usb2_write_bulk_callback(struct urb *urb)
+/* callback for bulk IN urb */
+static void esd_usb_write_bulk_callback(struct urb *urb)
{
struct esd_tx_urb_context *context = urb->context;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
struct net_device *netdev;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
WARN_ON(!context);
@@ -478,7 +481,7 @@ static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 12) & 0xf,
@@ -491,7 +494,7 @@ static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 28) & 0xf,
@@ -504,13 +507,13 @@ static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d", dev->net_count);
}
static DEVICE_ATTR_RO(nets);
-static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+static int esd_usb_send_msg(struct esd_usb *dev, struct esd_usb_msg *msg)
{
int actual_length;
@@ -522,8 +525,8 @@ static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
1000);
}
-static int esd_usb2_wait_msg(struct esd_usb2 *dev,
- struct esd_usb2_msg *msg)
+static int esd_usb_wait_msg(struct esd_usb *dev,
+ struct esd_usb_msg *msg)
{
int actual_length;
@@ -535,7 +538,7 @@ static int esd_usb2_wait_msg(struct esd_usb2 *dev,
1000);
}
-static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
{
int i, err = 0;
@@ -568,7 +571,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->rx_submitted);
@@ -606,14 +609,12 @@ freeurb:
return 0;
}
-/*
- * Start interface
- */
-static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+/* Start interface */
+static int esd_usb_start(struct esd_usb_net_priv *priv)
{
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb *dev = priv->usb;
struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err, i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -622,8 +623,7 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
goto out;
}
- /*
- * Enable all IDs
+ /* Enable all IDs
* The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
* Each bit represents one 11 bit CAN identifier. A set bit
* enables reception of the corresponding CAN identifier. A cleared
@@ -644,11 +644,11 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
/* enable 29bit extended IDs */
msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err)
goto out;
- err = esd_usb2_setup_rx_urbs(dev);
+ err = esd_usb_setup_rx_urbs(dev);
if (err)
goto out;
@@ -664,9 +664,9 @@ out:
return err;
}
-static void unlink_all_urbs(struct esd_usb2 *dev)
+static void unlink_all_urbs(struct esd_usb *dev)
{
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
@@ -687,9 +687,9 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
}
}
-static int esd_usb2_open(struct net_device *netdev)
+static int esd_usb_open(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
int err;
/* common open */
@@ -698,7 +698,7 @@ static int esd_usb2_open(struct net_device *netdev)
return err;
/* finally start device */
- err = esd_usb2_start(priv);
+ err = esd_usb_start(priv);
if (err) {
netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
@@ -710,22 +710,22 @@ static int esd_usb2_open(struct net_device *netdev)
return 0;
}
-static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb *dev = priv->usb;
struct esd_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
struct urb *urb;
u8 *buf;
int i, err;
int ret = NETDEV_TX_OK;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(struct esd_usb_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -745,7 +745,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto nobufmem;
}
- msg = (struct esd_usb2_msg *)buf;
+ msg = (struct esd_usb_msg *)buf;
msg->msg.hdr.len = 3; /* minimal length */
msg->msg.hdr.cmd = CMD_CAN_TX;
@@ -771,9 +771,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
}
}
- /*
- * This may never happen.
- */
+ /* This may never happen */
if (!context) {
netdev_warn(netdev, "couldn't find free context\n");
ret = NETDEV_TX_BUSY;
@@ -788,7 +786,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
msg->msg.hdr.len << 2,
- esd_usb2_write_bulk_callback, context);
+ esd_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -821,8 +819,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
netif_trans_update(netdev);
- /*
- * Release our reference to this URB, the USB core will eventually free
+ /* Release our reference to this URB, the USB core will eventually free
* it entirely.
*/
usb_free_urb(urb);
@@ -839,24 +836,24 @@ nourbmem:
return ret;
}
-static int esd_usb2_close(struct net_device *netdev)
+static int esd_usb_close(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg *msg;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_msg *msg;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- /* Disable all IDs (see esd_usb2_start()) */
+ /* Disable all IDs (see esd_usb_start()) */
msg->msg.hdr.cmd = CMD_IDADD;
msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
msg->msg.filter.net = priv->index;
msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
msg->msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
@@ -864,8 +861,8 @@ static int esd_usb2_close(struct net_device *netdev)
msg->msg.hdr.cmd = CMD_SETBAUD;
msg->msg.setbaud.net = priv->index;
msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ msg->msg.setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
@@ -879,13 +876,17 @@ static int esd_usb2_close(struct net_device *netdev)
return 0;
}
-static const struct net_device_ops esd_usb2_netdev_ops = {
- .ndo_open = esd_usb2_open,
- .ndo_stop = esd_usb2_close,
- .ndo_start_xmit = esd_usb2_start_xmit,
+static const struct net_device_ops esd_usb_netdev_ops = {
+ .ndo_open = esd_usb_open,
+ .ndo_stop = esd_usb_close,
+ .ndo_start_xmit = esd_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops esd_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
@@ -900,20 +901,20 @@ static const struct can_bittiming_const esd_usb2_bittiming_const = {
static int esd_usb2_set_bittiming(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg *msg;
+ struct esd_usb_msg *msg;
int err;
u32 canbtr;
int sjw_shift;
- canbtr = ESD_USB2_UBR;
+ canbtr = ESD_USB_UBR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- canbtr |= ESD_USB2_LOM;
+ canbtr |= ESD_USB_LOM;
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
- if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
sjw_shift = ESD_USBM_SJW_SHIFT;
else
@@ -941,16 +942,16 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
netdev_info(netdev, "setting BTR=%#x\n", canbtr);
- err = esd_usb2_send_msg(priv->usb2, msg);
+ err = esd_usb_send_msg(priv->usb, msg);
kfree(msg);
return err;
}
-static int esd_usb2_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
+static int esd_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
bec->txerr = priv->bec.txerr;
bec->rxerr = priv->bec.rxerr;
@@ -958,7 +959,7 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
return 0;
}
-static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
@@ -972,11 +973,11 @@ static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
return 0;
}
-static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int err = 0;
int i;
@@ -995,7 +996,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
- priv->usb2 = dev;
+ priv->usb = dev;
priv->netdev = netdev;
priv->index = index;
@@ -1013,12 +1014,13 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
- priv->can.do_set_mode = esd_usb2_set_mode;
- priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+ priv->can.do_set_mode = esd_usb_set_mode;
+ priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
netdev->flags |= IFF_ECHO; /* we support local echo */
- netdev->netdev_ops = &esd_usb2_netdev_ops;
+ netdev->netdev_ops = &esd_usb_netdev_ops;
+ netdev->ethtool_ops = &esd_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = index;
@@ -1038,17 +1040,16 @@ done:
return err;
}
-/*
- * probe function for new USB2 devices
+/* probe function for new USB devices
*
* check version information and number of available
* CAN interfaces
*/
-static int esd_usb2_probe(struct usb_interface *intf,
+static int esd_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct esd_usb2 *dev;
- struct esd_usb2_msg *msg;
+ struct esd_usb *dev;
+ struct esd_usb_msg *msg;
int i, err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1076,13 +1077,13 @@ static int esd_usb2_probe(struct usb_interface *intf,
msg->msg.version.flags = 0;
msg->msg.version.drv_version = 0;
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "sending version message failed\n");
goto free_msg;
}
- err = esd_usb2_wait_msg(dev, msg);
+ err = esd_usb_wait_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "no version message answer\n");
goto free_msg;
@@ -1105,7 +1106,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
/* do per device probing */
for (i = 0; i < dev->net_count; i++)
- esd_usb2_probe_one_net(intf, i);
+ esd_usb_probe_one_net(intf, i);
free_msg:
kfree(msg);
@@ -1115,12 +1116,10 @@ done:
return err;
}
-/*
- * called by the usb core when the device is removed from the system
- */
-static void esd_usb2_disconnect(struct usb_interface *intf)
+/* called by the usb core when the device is removed from the system */
+static void esd_usb_disconnect(struct usb_interface *intf)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
int i;
@@ -1144,11 +1143,11 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
}
/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver esd_usb2_driver = {
- .name = "esd_usb2",
- .probe = esd_usb2_probe,
- .disconnect = esd_usb2_disconnect,
- .id_table = esd_usb2_table,
+static struct usb_driver esd_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = esd_usb_probe,
+ .disconnect = esd_usb_disconnect,
+ .id_table = esd_usb_table,
};
-module_usb_driver(esd_usb2_driver);
+module_usb_driver(esd_usb_driver);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2ed2370a3166..25f863b4f5f0 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -10,6 +10,7 @@
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -18,14 +19,11 @@
#include "es58x_core.h"
-#define DRV_VERSION "1.00"
MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
-#define ES58X_MODULE_NAME "etas_es58x"
#define ES58X_VENDOR_ID 0x108C
#define ES581_4_PRODUCT_ID 0x0159
#define ES582_1_PRODUCT_ID 0x0168
@@ -59,11 +57,11 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
#define es58x_print_hex_dump(buf, len) \
print_hex_dump(KERN_DEBUG, \
- ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ KBUILD_MODNAME " " __stringify(buf) ": ", \
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
#define es58x_print_hex_dump_debug(buf, len) \
- print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
/* The last two bytes of an ES58X command is a CRC16. The first two
@@ -1462,10 +1460,6 @@ static void es58x_read_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_read_bulk_callback, es58x_dev);
-
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret == -ENODEV) {
for (i = 0; i < es58x_dev->num_can_ch; i++)
@@ -1599,7 +1593,8 @@ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
return NULL;
usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- buf, tx_buf_len, NULL, NULL);
+ buf, tx_buf_len, es58x_write_bulk_callback,
+ NULL);
return urb;
}
@@ -1632,9 +1627,7 @@ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
int ret;
es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_write_bulk_callback, netdev);
+ urb->context = netdev;
usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -1707,7 +1700,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
{
const struct device *dev = es58x_dev->dev;
const struct es58x_parameters *param = es58x_dev->param;
- size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe);
struct urb *urb;
u8 *buf;
int i;
@@ -1739,7 +1732,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
return ret;
}
- dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
return ret;
@@ -1787,7 +1780,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1805,12 +1798,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1845,7 +1839,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -1918,7 +1913,7 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
unsigned int frame_len;
int ret;
- if (can_dropped_invalid_skb(netdev, skb)) {
+ if (can_dev_dropped_skb(netdev, skb)) {
if (priv->tx_urb)
goto xmit_commit;
return NETDEV_TX_OK;
@@ -1979,7 +1974,12 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
- .ndo_start_xmit = es58x_start_xmit
+ .ndo_start_xmit = es58x_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops es58x_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@@ -2086,6 +2086,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
netdev->dev_port = channel_idx;
@@ -2179,9 +2180,8 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep_in, *ep_out;
int ret;
- dev_info(dev,
- "Starting %s %s (Serial Number %s) driver version %s\n",
- udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+ dev_info(dev, "Starting %s %s (Serial Number %s)\n",
+ udev->manufacturer, udev->product, udev->serial);
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
@@ -2215,14 +2215,12 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
ep_in->bEndpointAddress);
es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
ep_out->bEndpointAddress);
- es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
return es58x_dev;
}
@@ -2280,7 +2278,7 @@ static void es58x_disconnect(struct usb_interface *intf)
}
static struct usb_driver es58x_driver = {
- .name = ES58X_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = es58x_probe,
.disconnect = es58x_disconnect,
.id_table = es58x_id_table
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..640fe0a1df63 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -222,7 +222,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
@@ -373,8 +373,6 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -382,8 +380,11 @@ struct es58x_operators {
* @timestamps: a temporary buffer to store the time stamps before
* feeding them to es58x_can_get_echo_skb(). Can only be used
* in RX branches.
- * @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -399,22 +400,21 @@ struct es58x_device {
const struct es58x_parameters *param;
const struct es58x_operators *ops;
- int rx_pipe;
- int tx_pipe;
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
struct usb_anchor rx_urbs;
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
u64 ktime_req_ns;
s64 realtime_diff_ns;
u64 timestamps[ES58X_ECHO_BULK_MAX];
- u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index ec87126e1a7d..c97ffa71fd75 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -69,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev,
int i, num_element;
u32 rcv_packet_idx;
- const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+ const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1,
+ BITS_PER_TYPE(echo_msg->packet_idx));
num_element = es58x_msg_num_element(es58x_dev->dev,
es58x_fd_urb_cmd->echo_msg,
@@ -172,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
const struct es58x_fd_rx_event_msg *rx_event_msg;
int ret;
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
if (ret)
return ret;
- rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
rx_event_msg->event_code,
get_unaligned_le64(&rx_event_msg->timestamp));
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b487e3fe770a..9c2c25fde3d1 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -9,26 +9,45 @@
* Many thanks to all socketcan devs!
*/
+#include <linux/bitfield.h>
+#include <linux/clocksource.h>
#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/timecounter.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
-#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define GS_USB_ENDPOINT_IN 1
+#define GS_USB_ENDPOINT_OUT 2
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -40,6 +59,13 @@ enum gs_usb_breq {
GS_USB_BREQ_DEVICE_CONFIG,
GS_USB_BREQ_TIMESTAMP,
GS_USB_BREQ_IDENTIFY,
+ GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_SET_USER_ID,
+ GS_USB_BREQ_DATA_BITTIMING,
+ GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
};
enum gs_can_mode {
@@ -63,6 +89,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -87,11 +121,19 @@ struct gs_device_config {
__le32 hw_version;
} __packed;
-#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
-#define GS_CAN_MODE_LOOP_BACK BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
struct gs_device_mode {
__le32 mode;
@@ -116,12 +158,30 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_MASK GENMASK(11, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
struct gs_device_bt_const {
__le32 feature;
@@ -136,7 +196,60 @@ struct gs_device_bt_const {
__le32 brp_inc;
} __packed;
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
+
+ __le32 dtseg1_min;
+ __le32 dtseg1_max;
+ __le32 dtseg2_min;
+ __le32 dtseg2_max;
+ __le32 dsjw_max;
+ __le32 dbrp_min;
+ __le32 dbrp_max;
+ __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
+
+struct classic_can {
+ u8 data[8];
+} __packed;
+
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+
+struct classic_can_quirk {
+ u8 data[8];
+ u8 quirk;
+} __packed;
+
+struct canfd {
+ u8 data[64];
+} __packed;
+
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
+struct canfd_quirk {
+ u8 data[64];
+ u8 quirk;
+} __packed;
struct gs_host_frame {
u32 echo_id;
@@ -147,7 +260,14 @@ struct gs_host_frame {
u8 flags;
u8 reserved;
- u8 data[8];
+ union {
+ DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
+ DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+ DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
+ DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+ };
} __packed;
/* The GS USB devices make use of the same flags and masks as in
* linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -158,9 +278,9 @@ struct gs_host_frame {
/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
#define GS_MAX_RX_URBS 30
/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
+ * Current hardware only supports 3 interfaces. The future may vary.
*/
-#define GS_MAX_INTF 2
+#define GS_MAX_INTF 3
struct gs_tx_context {
struct gs_can *dev;
@@ -176,9 +296,18 @@ struct gs_can {
struct usb_device *udev;
struct usb_interface *iface;
- struct can_bittiming_const bt_const;
+ struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
+ u32 feature;
+ unsigned int hf_size_tx;
+
/* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -191,8 +320,9 @@ struct gs_can {
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+ unsigned int hf_size_rx;
+ u8 active_channels;
};
/* 'allocate' a tx context.
@@ -242,31 +372,109 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ struct gs_device_mode dm = {
+ .mode = GS_CAN_MODE_RESET,
+ };
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_can *dev,
+ u32 *timestamp_p)
+{
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &timestamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- dm->mode = GS_CAN_MODE_RESET;
+ *timestamp_p = le32_to_cpu(timestamp);
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ return 0;
+}
- kfree(dm);
+static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_can *dev = container_of(cc, struct gs_can, cc);
+ u32 timestamp = 0;
+ int err;
+
+ lockdep_assert_held(&dev->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&dev->tc_lock);
+ err = gs_usb_get_timestamp(dev, &timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ if (err)
+ netdev_err(dev->netdev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
- return rc;
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_can *dev;
+
+ dev = container_of(delayed_work, struct gs_can, timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_read(&dev->tc);
+ spin_unlock_bh(&dev->tc_lock);
+
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ spin_lock_bh(&dev->tc_lock);
+ ns = timecounter_cyc2time(&dev->tc, timestamp);
+ spin_unlock_bh(&dev->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_can *dev)
+{
+ struct cyclecounter *cc = &dev->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&dev->tc_lock);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
+ spin_unlock_bh(&dev->tc_lock);
+
+ INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_can *dev)
+{
+ cancel_delayed_work_sync(&dev->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -294,6 +502,24 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP))
+ return;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
@@ -304,6 +530,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
struct gs_host_frame *hf = urb->transfer_buffer;
struct gs_tx_context *txc;
struct can_frame *cf;
+ struct canfd_frame *cfd;
struct sk_buff *skb;
BUG_ON(!usbcan);
@@ -332,18 +559,35 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
if (hf->echo_id == -1) { /* normal rx */
- skb = alloc_can_skb(dev->netdev, &cf);
- if (!skb)
- return;
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ skb = alloc_canfd_skb(dev->netdev, &cfd);
+ if (!skb)
+ return;
+
+ cfd->can_id = le32_to_cpu(hf->can_id);
+ cfd->len = can_fd_dlc2len(hf->can_dlc);
+ if (hf->flags & GS_CAN_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (hf->flags & GS_CAN_FLAG_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, hf->canfd->data, cfd->len);
+ } else {
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = le32_to_cpu(hf->can_id);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- cf->can_id = le32_to_cpu(hf->can_id);
+ memcpy(cf->data, hf->classic_can->data, 8);
- can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->data, 8);
+ /* ERROR frames tell us information about the controller */
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+ }
- /* ERROR frames tell us information about the controller */
- if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
- gs_update_state(dev, cf);
+ gs_usb_set_timestamp(dev, skb, hf);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -367,6 +611,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
+ skb = dev->can.echo_skb[hf->echo_id];
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
NULL);
@@ -392,14 +639,10 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb,
- usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
- hf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- usbcan
- );
+ usb_fill_bulk_urb(urb, usbcan->udev,
+ usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN),
+ hf, dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, usbcan);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -417,38 +660,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dbt,
- sizeof(*dbt),
- 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
+}
- return (rc > 0) ? 0 : rc;
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
+ u8 request = GS_USB_BREQ_DATA_BITTIMING;
+
+ if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+ request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+ /* request data bit timings */
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -459,11 +708,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
-
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,11 +718,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
struct urb *urb;
struct gs_host_frame *hf;
struct can_frame *cf;
+ struct canfd_frame *cfd;
int rc;
unsigned int idx;
struct gs_tx_context *txc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* find an empty context to keep track of transmission */
@@ -491,8 +736,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
- &urb->transfer_dma);
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
goto nomem_hf;
@@ -510,21 +754,33 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->flags = 0;
hf->reserved = 0;
- cf = (struct can_frame *)skb->data;
+ if (can_is_canfd_skb(skb)) {
+ cfd = (struct canfd_frame *)skb->data;
- hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+ hf->can_id = cpu_to_le32(cfd->can_id);
+ hf->can_dlc = can_fd_len2dlc(cfd->len);
+ hf->flags |= GS_CAN_FLAG_FD;
+ if (cfd->flags & CANFD_BRS)
+ hf->flags |= GS_CAN_FLAG_BRS;
+ if (cfd->flags & CANFD_ESI)
+ hf->flags |= GS_CAN_FLAG_ESI;
- memcpy(hf->data, cf->data, cf->len);
+ memcpy(hf->canfd->data, cfd->data, cfd->len);
+ } else {
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cpu_to_le32(cf->can_id);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->classic_can->data, cf->data, cf->len);
+ }
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
- hf,
- sizeof(*hf),
- gs_usb_xmit_callback,
- txc);
+ usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
+ hf, dev->hf_size_tx,
+ gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -539,10 +795,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -562,10 +814,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ kfree(hf);
nomem_hf:
usb_free_urb(urb);
@@ -580,16 +829,34 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
+ struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ ctrlmode = dev->can.ctrlmode;
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ flags |= GS_CAN_MODE_FD;
+
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, classic_can, 1);
+ }
+
+ if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
@@ -600,10 +867,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- GFP_KERNEL,
- &urb->transfer_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -615,12 +880,11 @@ static int gs_can_open(struct net_device *netdev)
usb_fill_bulk_urb(urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ GS_USB_ENDPOINT_IN),
buf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -630,8 +894,7 @@ static int gs_can_open(struct net_device *netdev)
netif_device_detach(dev->netdev);
netdev_err(netdev,
- "usb_submit failed (err=%d)\n",
- rc);
+ "usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
usb_free_urb(urb);
@@ -645,13 +908,7 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
- ctrlmode = dev->can.ctrlmode;
-
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -666,30 +923,31 @@ static int gs_can_open(struct net_device *netdev)
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
+ /* start polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_init(dev);
+
/* finally start device */
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
-
- if (rc < 0) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
- kfree(dm);
-
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -704,9 +962,15 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
+ /* stop polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ }
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
@@ -729,58 +993,57 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_eth_ioctl_hwts(netdev, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = gs_can_eth_ioctl,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
-
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface),
- 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- imode,
- sizeof(*imode),
- 100);
-
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -789,8 +1052,67 @@ static int gs_usb_set_phys_id(struct net_device *dev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -800,29 +1122,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel,
- 0,
- bt_const,
- sizeof(*bt_const),
- 1000);
-
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
+
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -830,26 +1144,26 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
dev = netdev_priv(netdev);
netdev->netdev_ops = &gs_usb_netdev_ops;
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
/* dev setup */
- strcpy(dev->bt_const.name, "gs_usb");
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -866,13 +1180,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
+ dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -885,22 +1200,104 @@ static struct gs_can *gs_make_candev(unsigned int channel,
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- SET_NETDEV_DEV(netdev, &intf->dev);
+ if (feature & GS_CAN_FEATURE_FD) {
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ /* The data bit timing will be overwritten, if
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.data_bittiming_const = &dev->bt_const;
+ dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
+
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
+
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
+
+ /* The CANtact Pro from LinkLayer Labs is based on the
+ * LPC54616 µC, which is affected by the NXP LPC USB transfer
+ * erratum. However, the current firmware (version 2) doesn't
+ * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+ * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+ * this issue.
+ *
+ * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+ * CANtact Pro firmware uses a request value, which is already
+ * used by the candleLight firmware for a different purpose
+ * (GS_USB_BREQ_GET_USER_ID). Set the feature
+ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+ * issue.
+ */
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
+ dev->udev->manufacturer && dev->udev->product &&
+ !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+ !strcmp(dev->udev->product, "CANtact Pro") &&
+ (le32_to_cpu(dconf->sw_version) <= 2))
+ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+ GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
+
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
+
+ /* fetch extended bit timing constants if device has feature
+ * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+ */
+ if (feature & GS_CAN_FEATURE_FD &&
+ feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev,
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
+ }
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
+
+ dev->can.data_bittiming_const = &dev->data_bt_const;
+ }
- kfree(bt_const);
+ SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
return dev;
+
+ out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
@@ -913,84 +1310,64 @@ static void gs_destroy_candev(struct gs_can *dev)
static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct gs_host_frame *hf;
struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ int rc;
/* send host config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
- 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
- dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
- rc);
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
- kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ if (!dev)
return -ENOMEM;
- }
init_usb_anchor(&dev->rx_submitted);
- atomic_set(&dev->active_channels, 0);
-
usb_set_intfdata(intf, dev);
- dev->udev = interface_to_usbdev(intf);
+ dev->udev = udev;
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
+ unsigned int hf_size_rx = 0;
+
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -1001,22 +1378,36 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
- }
- kfree(dconf);
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx);
+ }
return 0;
}
static void gs_usb_disconnect(struct usb_interface *intf)
{
- unsigned i;
struct gs_usb *dev = usb_get_intfdata(intf);
+ unsigned int i;
+
usb_set_intfdata(intf, NULL);
if (!dev) {
@@ -1033,20 +1424,24 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+ USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+ USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
- .probe = gs_usb_probe,
+ .name = KBUILD_MODNAME,
+ .probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
- .id_table = gs_usb_table,
+ .id_table = gs_usb_table,
};
module_usb_driver(gs_usb_driver);
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index cf260044f0b9..b20d951a0790 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,3 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_kvaser_usb_hydra.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 3a49257f9fa6..f6c0938027ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -35,9 +35,11 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
+#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -65,12 +67,7 @@ struct kvaser_usb_dev_card_data_hydra {
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -83,7 +80,7 @@ struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -165,6 +162,12 @@ struct kvaser_usb_dev_ops {
u16 transid);
};
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
+};
+
struct kvaser_usb_dev_cfg {
const struct can_clock clock;
const unsigned int timestamp_freq;
@@ -175,6 +178,8 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
@@ -184,4 +189,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index c4b4d3d0a387..802e27c0eced 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/kernel.h>
@@ -61,8 +62,6 @@
#define USB_USBCAN_R_V2_PRODUCT_ID 294
#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
-#define USB_LEAF_PRODUCT_ID_END \
- USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
/* Kvaser USBCan-II devices product ids */
#define USB_USBCAN_REVB_PRODUCT_ID 2
@@ -89,128 +88,163 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
#define USB_HYBRID_CANLIN_PRODUCT_ID 277
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
-#define USB_HYDRA_PRODUCT_ID_END \
- USB_HYBRID_PRO_CANLIN_PRODUCT_ID
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
{
- int actual_len; /* Not used */
-
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
- cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+ cmd, len, NULL, KVASER_USB_TIMEOUT);
}
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
@@ -287,6 +321,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -303,8 +338,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -398,6 +433,7 @@ static int kvaser_usb_open(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
@@ -408,11 +444,11 @@ static int kvaser_usb_open(struct net_device *netdev)
if (err)
goto error;
- err = dev->ops->dev_set_opt_mode(priv);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -441,7 +477,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -469,22 +505,23 @@ static int kvaser_usb_close(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -523,6 +560,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -532,7 +570,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
unsigned int i;
unsigned long flags;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -565,8 +603,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -629,6 +666,22 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
int i;
@@ -650,15 +703,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -675,6 +729,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
@@ -687,26 +742,31 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
+ } else {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
+ }
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
@@ -731,29 +791,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
struct kvaser_usb *dev;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
+
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
+ return -ENODEV;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
- return -ENODEV;
- }
-
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
@@ -767,22 +820,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
dev_err(&intf->dev,
"Failed to initialize card, error %d\n", err);
return err;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software details, error %d\n", err);
@@ -800,14 +853,14 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get capabilities, error %d\n", err);
@@ -817,7 +870,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
+ err = kvaser_usb_init_one(dev, i);
if (err) {
kvaser_usb_remove_interfaces(dev);
return err;
@@ -840,7 +893,7 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
+ .name = KBUILD_MODNAME,
.probe = kvaser_usb_probe,
.disconnect = kvaser_usb_disconnect,
.id_table = kvaser_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index a26823c5b62a..66f672ea631b 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -375,7 +375,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -534,7 +534,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -573,7 +573,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
@@ -694,7 +694,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -735,7 +735,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -917,8 +917,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
netif_rx(skb);
}
@@ -1069,8 +1072,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
shhwtstamps->hwtstamp = hwtstamp;
cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
netif_rx(skb);
@@ -1388,7 +1394,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1462,7 +1468,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1527,7 +1533,7 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
int sjw = bt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1561,7 +1567,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
int sjw = dbt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1705,7 +1711,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1845,7 +1851,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1869,7 +1875,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@@ -1887,7 +1893,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()
@@ -1910,7 +1916,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
@@ -2052,7 +2058,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index c805b999c543..19958037720f 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -101,16 +101,6 @@
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -320,6 +310,38 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -343,50 +365,107 @@ struct kvaser_usb_err_summary {
};
};
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.clock = {
.freq = 8 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *cmd_len,
@@ -404,7 +483,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -492,6 +571,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -524,16 +606,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
- switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
- case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
- break;
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ */
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
}
}
@@ -550,7 +639,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
@@ -558,7 +647,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
- dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -597,7 +686,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
@@ -730,7 +819,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -809,7 +898,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -836,8 +925,11 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
netif_rx(skb);
}
@@ -999,7 +1091,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -1015,7 +1107,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
@@ -1030,7 +1122,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1113,6 +1205,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1128,14 +1223,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1147,12 +1242,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
@@ -1225,7 +1320,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@@ -1243,7 +1338,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
@@ -1331,9 +1426,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 77bddff86252..218b098b261d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -10,7 +10,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -33,10 +33,6 @@
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
-/* MCBA endpoint numbers */
-#define MCBA_USB_EP_IN 1
-#define MCBA_USB_EP_OUT 1
-
/* Microchip command id */
#define MBCA_CMD_RECEIVE_MESSAGE 0xE3
#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
@@ -83,6 +79,8 @@ struct mcba_priv {
atomic_t free_ctx_cnt;
void *rxbuf[MCBA_MAX_RX_URBS];
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ int rx_pipe;
+ int tx_pipe;
};
/* CAN frame */
@@ -234,8 +232,6 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
NULL);
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
}
if (urb->status)
@@ -268,10 +264,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
- usb_fill_bulk_urb(urb, priv->udev,
- usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
- MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
- ctx);
+ usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
+ mcba_usb_write_bulk_callback, ctx);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
@@ -317,7 +311,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
.cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV
};
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
ctx = mcba_usb_get_free_ctx(priv, cf);
@@ -364,7 +358,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
xmit_failed:
can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
mcba_usb_free_ctx(ctx);
- dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
@@ -457,7 +450,6 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
}
stats->rx_packets++;
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
}
@@ -608,7 +600,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+ priv->rx_pipe,
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
@@ -653,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+ priv->rx_pipe,
buf, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -705,7 +697,6 @@ static int mcba_usb_open(struct net_device *netdev)
priv->can_speed_check = true;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
netif_start_queue(netdev);
return 0;
@@ -737,7 +728,6 @@ static int mcba_usb_close(struct net_device *netdev)
mcba_urb_unlink(priv);
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -769,6 +759,10 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_start_xmit = mcba_usb_start_xmit,
};
+static const struct ethtool_ops mcba_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Microchip CANBUS has hardcoded bittiming values by default.
* This function sends request via USB to change the speed and align bittiming
* values for presentation purposes only
@@ -807,6 +801,13 @@ static int mcba_usb_probe(struct usb_interface *intf,
struct mcba_priv *priv;
int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+
+ err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
+ if (err) {
+ dev_err(&intf->dev, "Can't find endpoints\n");
+ return err;
+ }
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
if (!netdev) {
@@ -840,6 +841,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->can.do_set_bittiming = mcba_net_set_bittiming;
netdev->netdev_ops = &mcba_netdev_ops;
+ netdev->ethtool_ops = &mcba_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -852,7 +854,8 @@ static int mcba_usb_probe(struct usb_interface *intf,
goto cleanup_free_candev;
}
- devm_can_led_init(netdev);
+ priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
+ priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
/* Start USB dev only if we have successfully registered CAN device */
err = mcba_usb_start(priv);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 17dc178f555b..687dd542f7f6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -506,6 +506,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
/* Supply TX/RX error counters in case of
* controller error.
*/
+ cf->can_id = CAN_ERR_CNT;
cf->data[6] = mc->pdev->bec.txerr;
cf->data[7] = mc->pdev->bec.rxerr;
}
@@ -533,7 +534,7 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
{
struct pcan_usb *pdev = mc->pdev;
- /* acccording to the content of the packet */
+ /* according to the content of the packet */
switch (ir) {
case PCAN_USB_ERR_CNT_DEC:
case PCAN_USB_ERR_CNT_INC:
@@ -964,6 +965,7 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index b850ff8fe4bd..1d996d3320fe 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -57,7 +57,7 @@ MODULE_DEVICE_TABLE(usb, peak_usb_table);
* dump memory
*/
#define DUMP_WIDTH 16
-void pcan_dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(const char *prompt, const void *p, int l)
{
pr_info("%s dumping %s (%d bytes):\n",
PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -351,7 +351,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = dev->adapter->tx_buffer_size;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
@@ -775,13 +775,54 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/*
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
@@ -921,7 +962,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f60af573a2e0..f6bdd8b3f290 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -132,7 +132,7 @@ struct peak_usb_device {
struct peak_usb_device *next_siblings;
};
-void pcan_dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(const char *prompt, const void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
@@ -145,5 +145,6 @@ int peak_usb_netif_rx(struct sk_buff *skb,
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 65487ec33566..2ea1500df393 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -33,6 +33,10 @@
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
+/* struct pcan_ufd_fw_info::type */
+#define PCAN_USBFD_TYPE_STD 1
+#define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */
+
/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
@@ -44,6 +48,13 @@ struct __packed pcan_ufd_fw_info {
__le32 dev_id[2]; /* "device id" per CAN */
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
+
+ /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ u8 cmd_out_ep; /* ep for cmd */
+ u8 cmd_in_ep; /* ep for replies */
+ u8 data_out_ep[2]; /* ep for CANx TX */
+ u8 data_in_ep; /* ep for CAN RX */
+ u8 dummy[3];
};
/* handle device specific info used by the netdevices */
@@ -171,6 +182,9 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
/* send PCAN-USB Pro FD commands synchronously */
static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
@@ -200,7 +214,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
- PCAN_USBPRO_EP_CMDOUT),
+ fw_info->cmd_out_ep),
packet_ptr, packet_len,
NULL, PCAN_UFD_CMD_TIMEOUT_MS);
if (err) {
@@ -426,6 +440,9 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
struct urb *urb, u8 *buf)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
u8 *pc = buf;
/* build the entire cmds list in the provided buffer, to go back into
@@ -439,7 +456,7 @@ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
/* complete the URB */
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep),
buf, pc - buf,
pcan_usb_pro_restart_complete, dev);
@@ -839,6 +856,15 @@ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
return 0;
}
+/* probe function for all PCAN-USB FD family usb interfaces */
+static int pcan_usb_fd_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *iface_desc = &intf->altsetting[0];
+
+ /* CAN interface is always interface #0 */
+ return iface_desc->desc.bInterfaceNumber;
+}
+
/* stop interface (last chance before set bus off) */
static int pcan_usb_fd_stop(struct peak_usb_device *dev)
{
@@ -860,6 +886,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
{
struct pcan_usb_fd_device *pdev =
container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info;
int i, err = -ENOMEM;
/* do this for 1st channel only */
@@ -878,10 +905,12 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* number of ts msgs to ignore before taking one into account */
pdev->usb_if->cm_ignore_count = 5;
+ fw_info = &pdev->usb_if->fw_info;
+
err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
PCAN_USBPRO_INFO_FW,
- &pdev->usb_if->fw_info,
- sizeof(pdev->usb_if->fw_info));
+ fw_info,
+ sizeof(*fw_info));
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -895,14 +924,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
*/
dev_info(dev->netdev->dev.parent,
"PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
- dev->adapter->name, pdev->usb_if->fw_info.hw_version,
- pdev->usb_if->fw_info.fw_version[0],
- pdev->usb_if->fw_info.fw_version[1],
- pdev->usb_if->fw_info.fw_version[2],
+ dev->adapter->name, fw_info->hw_version,
+ fw_info->fw_version[0],
+ fw_info->fw_version[1],
+ fw_info->fw_version[2],
dev->adapter->ctrl_count);
/* check for ability to switch between ISO/non-ISO modes */
- if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ if (fw_info->fw_version[0] >= 2) {
/* firmware >= 2.x supports ISO/non-ISO switching */
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
} else {
@@ -910,6 +939,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for cmds pipes. If not, then default EP should be used.
+ */
+ if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
+ fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
+ }
+
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
if (err) {
@@ -930,12 +967,23 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* do a copy of the ctrlmode[_supported] too */
dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
+
+ fw_info = &pdev->usb_if->fw_info;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
dev->device_number =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for data pipes. If not, then statically defined EP are used
+ * (see peak_usb_create_dev()).
+ */
+ if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ dev->ep_msg_in = fw_info->data_in_ep;
+ dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
+ }
+
/* set clock domain */
for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
@@ -1032,6 +1080,7 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/* describes the PCAN-USB FD adapter */
@@ -1091,7 +1140,7 @@ const struct peak_usb_adapter pcan_usb_fd = {
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
/* device callbacks */
- .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .intf_probe = pcan_usb_fd_probe,
.dev_init = pcan_usb_fd_init,
.dev_exit = pcan_usb_fd_exit,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index ebe087f258e3..5d8f6a40bb2c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -439,7 +439,7 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->serial_num);
+ *device_id = le32_to_cpu(pdn->dev_num);
return err;
}
@@ -1022,6 +1022,7 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 5d4cf14eb9d9..a34e0fc021c9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -112,7 +112,7 @@ struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
__le16 dummy;
- __le32 serial_num;
+ __le32 dev_num;
};
#define PCAN_USBPRO_LED_DEVICE 0x00
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index c7c41d1fd038..67c2ff407d06 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -28,6 +28,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -1119,7 +1120,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
/* check skb */
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* allocate a context and slow down tx path, if fifo state is low */
@@ -1233,6 +1234,10 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Request to set bittiming
*
* This function generates an USB set bittiming message and transmits
@@ -1392,7 +1397,7 @@ static int ucan_probe(struct usb_interface *intf,
* Stage 3 for the final driver initialisation.
*/
- /* Prepare Memory for control transferes */
+ /* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
@@ -1512,6 +1517,7 @@ static int ucan_probe(struct usb_interface *intf,
spin_lock_init(&up->context_lock);
spin_lock_init(&up->echo_skb_lock);
netdev->netdev_ops = &ucan_netdev_ops;
+ netdev->ethtool_ops = &ucan_ethtool_ops;
usb_set_intfdata(intf, up);
SET_NETDEV_DEV(netdev, &intf->dev);
@@ -1526,7 +1532,7 @@ static int ucan_probe(struct usb_interface *intf,
ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
sizeof(union ucan_ctl_payload));
if (ret > 0) {
- /* copy string while ensuring zero terminiation */
+ /* copy string while ensuring zero termination */
strncpy(firmware_str, up->ctl_msg_buffer->raw,
sizeof(union ucan_ctl_payload));
firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 431af1ec1e3c..8a5596ce4e46 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -12,6 +12,7 @@
* who were very cooperative and answered my questions.
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -21,7 +22,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
/* driver constants */
#define MAX_RX_URBS 20
@@ -439,9 +439,11 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
@@ -480,8 +482,6 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
stats->rx_packets++;
netif_rx(skb);
-
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
} else {
netdev_warn(priv->netdev, "frame type %d unknown",
msg->type);
@@ -582,8 +582,6 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
- can_led_event(netdev, CAN_LED_EVENT_TX);
-
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -604,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = sizeof(struct usb_8dev_tx_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -663,9 +661,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
atomic_inc(&priv->active_tx_urbs);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- goto failed;
- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index, NULL);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+ stats->tx_dropped++;
+ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
/* Slow down tx path */
netif_stop_queue(netdev);
@@ -684,19 +693,6 @@ nofreecontext:
return NETDEV_TX_BUSY;
-failed:
- can_free_echo_skb(netdev, context->echo_index, NULL);
-
- usb_unanchor_urb(urb);
- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
-
- atomic_dec(&priv->active_tx_urbs);
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
-
nomembuf:
usb_free_urb(urb);
@@ -809,8 +805,6 @@ static int usb_8dev_open(struct net_device *netdev)
if (err)
return err;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
-
/* finally start device */
err = usb_8dev_start(priv);
if (err) {
@@ -867,8 +861,6 @@ static int usb_8dev_close(struct net_device *netdev)
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
-
return err;
}
@@ -879,8 +871,12 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops usb_8dev_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const usb_8dev_bittiming_const = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -936,6 +932,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
+ netdev->ethtool_ops = &usb_8dev_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -976,8 +973,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
(version>>8) & 0xff, version & 0xff);
}
- devm_can_led_init(netdev);
-
return 0;
cleanup_unregister_candev:
@@ -1008,7 +1003,7 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
}
static struct usb_driver usb_8dev_driver = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.probe = usb_8dev_probe,
.disconnect = usb_8dev_disconnect,
.id_table = usb_8dev_table,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index c42f18845b02..285635c23443 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -40,6 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -70,35 +71,36 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
- int loop, len;
+ unsigned int len;
+ int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
stats->tx_bytes += len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
+ skb_tx_timestamp(skb);
+
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
@@ -134,7 +136,8 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
@@ -146,6 +149,10 @@ static const struct net_device_ops vcan_netdev_ops = {
.ndo_change_mtu = vcan_change_mtu,
};
+static const struct ethtool_ops vcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
@@ -161,6 +168,7 @@ static void vcan_setup(struct net_device *dev)
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
+ dev->ethtool_ops = &vcan_ethtool_ops;
dev->needs_free_netdev = true;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 47ccc15a3486..26a472d2ea58 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -9,6 +9,7 @@
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -33,28 +34,34 @@ struct vxcan_priv {
struct net_device __rcu *peer;
};
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
- u8 len;
+ struct sk_buff *skb;
+ unsigned int len;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (unlikely(!peer)) {
- kfree_skb(skb);
+ kfree_skb(oskb);
dev->stats.tx_dropped++;
goto out_unlock;
}
- skb = can_create_echo_skb(skb);
- if (!skb)
+ skb_tx_timestamp(oskb);
+
+ skb = skb_clone(oskb, GFP_ATOMIC);
+ if (skb) {
+ consume_skb(oskb);
+ } else {
+ kfree_skb(oskb);
goto out_unlock;
+ }
/* reset CAN GW hop counter */
skb->csum_start = 0;
@@ -62,8 +69,8 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
- if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ len = can_skb_get_data_len(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
@@ -124,7 +131,8 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
@@ -139,6 +147,10 @@ static const struct net_device_ops vxcan_netdev_ops = {
.ndo_change_mtu = vxcan_change_mtu,
};
+static const struct ethtool_ops vxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vxcan_setup(struct net_device *dev)
{
struct can_ml_priv *can_ml;
@@ -148,8 +160,9 @@ static void vxcan_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
+ dev->ethtool_ops = &vxcan_ethtool_ops;
dev->needs_free_netdev = true;
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 1674b561c9a2..43c812ea1de0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xilinx CAN device driver
*
- * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2012 - 2022 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
@@ -9,8 +9,10 @@
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -26,7 +28,6 @@
#include <linux/types.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/pm_runtime.h>
#define DRIVER_NAME "xilinx_can"
@@ -51,7 +52,7 @@ enum xcan_reg {
/* only on CAN FD cores */
XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
- * Prescalar
+ * Prescaler
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
@@ -87,6 +88,8 @@ enum xcan_reg {
#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
+#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
@@ -100,6 +103,7 @@ enum xcan_reg {
#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
@@ -133,6 +137,7 @@ enum xcan_reg {
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
@@ -239,7 +244,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -259,24 +264,44 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
+/* Transmission Delay Compensation constants for CANFD 1.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 32,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+/* Transmission Delay Compensation constants for CANFD 2.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd2 = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 64,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -406,7 +431,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
return -EPERM;
}
- /* Setting Baud Rate prescalar value in BRPR Register */
+ /* Setting Baud Rate prescaler value in BRPR Register */
btr0 = (bt->brp - 1);
/* Setting Time Segment 1 in BTR Register */
@@ -423,8 +448,16 @@ static int xcan_set_bittiming(struct net_device *ndev)
if (priv->devtype.cantype == XAXI_CANFD ||
priv->devtype.cantype == XAXI_CANFD_2_0) {
- /* Setting Baud Rate prescalar value in F_BRPR Register */
+ /* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
+ if (can_tdc_is_enabled(&priv->can)) {
+ if (priv->devtype.cantype == XAXI_CANFD)
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ else
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ }
/* Setting Time Segment 1 in BTR Register */
btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
@@ -710,7 +743,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
@@ -933,6 +966,7 @@ static void xcan_set_error_state(struct net_device *ndev,
can_change_state(ndev, cf, tx_state, rx_state);
if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
@@ -1209,16 +1243,15 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
XCAN_IXR_RXNEMP_MASK);
}
- if (work_done) {
- can_led_event(ndev, CAN_LED_EVENT_RX);
+ if (work_done)
xcan_update_error_state_after_rxtx(ndev);
- }
if (work_done < quota) {
- napi_complete_done(napi, work_done);
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= xcan_rx_int_mask(priv);
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ if (napi_complete_done(napi, work_done)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= xcan_rx_int_mask(priv);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
}
return work_done;
}
@@ -1297,7 +1330,6 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
spin_unlock_irqrestore(&priv->tx_lock, flags);
- can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
@@ -1419,7 +1451,6 @@ static int xcan_open(struct net_device *ndev)
goto err_candev;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -1451,7 +1482,6 @@ static int xcan_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
pm_runtime_put(priv->dev);
return 0;
@@ -1488,6 +1518,22 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
+/**
+ * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
+ * @ndev: Pointer to net_device structure
+ * @tdcv: Pointer to TDCV value
+ *
+ * Return: 0 on success
+ */
+static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ return 0;
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1495,6 +1541,10 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops xcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/**
* xcan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -1740,17 +1790,24 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (devtype->cantype == XAXI_CANFD)
+ if (devtype->cantype == XAXI_CANFD) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
+ priv->can.tdc_const = &xcan_tdc_const_canfd;
+ }
- if (devtype->cantype == XAXI_CANFD_2_0)
+ if (devtype->cantype == XAXI_CANFD_2_0) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
+ priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ }
if (devtype->cantype == XAXI_CANFD ||
- devtype->cantype == XAXI_CANFD_2_0)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_TDC_AUTO;
+ priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ }
priv->reg_base = addr;
priv->tx_max = tx_max;
@@ -1769,6 +1826,7 @@ static int xcan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &xcan_netdev_ops;
+ ndev->ethtool_ops = &xcan_ethtool_ops;
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
@@ -1803,7 +1861,7 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(priv->can_clk);
- netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+ netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
ret = register_candev(ndev);
if (ret) {
@@ -1811,8 +1869,6 @@ static int xcan_probe(struct platform_device *pdev)
goto err_disableclks;
}
- devm_can_led_init(ndev);
-
pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..07507b4820d7 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
help
This enables support for the MediaTek MT7530, MT7531, and MT7621
Ethernet switch chips.
@@ -59,37 +60,29 @@ source "drivers/net/dsa/sja1105/Kconfig"
source "drivers/net/dsa/xrs700x/Kconfig"
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- select NET_DSA_TAG_QCA
- select REGMAP
- help
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
+source "drivers/net/dsa/realtek/Kconfig"
-config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI Ethernet switch family support"
- select NET_DSA_TAG_RTL4_A
- select NET_DSA_TAG_RTL8_4
- select FIXED_PHY
- select IRQ_DOMAIN
- select REALTEK_PHY
- select REGMAP
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && ARCH_RZN1
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
help
- This enables support for the Realtek SMI-based switch
- chips, currently only RTL8366RB.
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
depends on I2C
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_SMSC_LAN9303
select REGMAP_I2C
help
@@ -97,10 +90,11 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
+ depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8da1569a34e6..16eb879e0cb4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -8,9 +8,7 @@ endif
obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
@@ -23,5 +21,6 @@ obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
+obj-y += realtek/
obj-y += sja1105/
obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3867f3d4545f..59cdfc51ce06 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -972,7 +972,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
@@ -1309,87 +1309,70 @@ void b53_port_event(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_port_event);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (dev->ops->serdes_phylink_validate)
- dev->ops->serdes_phylink_validate(dev, port, mask, state);
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
+ */
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
- /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
- * support Gigabit, including Half duplex.
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !(is5325(dev) || is5365(dev))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
- if (!phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- phylink_helper_basex_speed(state);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
}
-EXPORT_SYMBOL(b53_phylink_validate);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- int ret = -EOPNOTSUPP;
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_link_state)
- ret = dev->ops->serdes_link_state(dev, port, state);
+ if (!dev->ops->phylink_mac_select_pcs)
+ return NULL;
- return ret;
+ return dev->ops->phylink_mac_select_pcs(dev, port, interface);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct b53_device *dev = ds->priv;
-
- if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
- return;
-
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_config)
- dev->ops->serdes_config(dev, port, mode, state);
}
EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct b53_device *dev = ds->priv;
-
- if (dev->ops->serdes_an_restart)
- dev->ops->serdes_an_restart(dev, port);
-}
-EXPORT_SYMBOL(b53_phylink_mac_an_restart);
-
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -1620,12 +1603,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
- return -ENOSPC;
-
*idx = find_first_bit(free_bins, dev->num_arl_bins);
-
- return -ENOENT;
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
@@ -1704,7 +1683,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
}
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1724,7 +1704,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1825,7 +1806,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1845,7 +1827,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1861,7 +1844,7 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_del);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
@@ -2102,7 +2085,8 @@ out:
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
@@ -2186,7 +2170,7 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
- ret = phy_init_eee(phy, 0);
+ ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2259,10 +2243,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
- .phylink_validate = b53_phylink_validate,
- .phylink_mac_link_state = b53_phylink_mac_link_state,
+ .phylink_get_caps = b53_phylink_get_caps,
+ .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
.phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_an_restart = b53_phylink_mac_an_restart,
.phylink_mac_link_down = b53_phylink_mac_link_down,
.phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..6ddc03b58b28 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -356,8 +356,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..e968322dfbf0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,8 +316,6 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index b41dc8ac2ca8..795cbffd5c2b 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -21,7 +21,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <net/dsa.h>
@@ -29,7 +29,6 @@
struct b53_device;
struct net_device;
-struct phylink_link_state;
struct b53_io_ops {
int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -46,19 +45,15 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
- int (*serdes_link_state)(struct b53_device *dev, int port,
- struct phylink_link_state *state);
- void (*serdes_config)(struct b53_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- void (*serdes_an_restart)(struct b53_device *dev, int port);
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
- void (*serdes_phylink_validate)(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
};
#define B53_INVALID_LANE 0xff
@@ -86,8 +81,15 @@ enum {
BCM7278_DEVICE_ID = 0x7278,
};
+struct b53_pcs {
+ struct phylink_pcs pcs;
+ struct b53_device *dev;
+ u8 lane;
+};
+
#define B53_N_PORTS 9
#define B53_N_PORTS_25 6
+#define B53_N_PCS 2
struct b53_port {
u16 vlan_ctl_mask;
@@ -144,6 +146,8 @@ struct b53_device {
bool vlan_enabled;
unsigned int num_ports;
struct b53_port *ports;
+
+ struct b53_pcs pcs[B53_N_PCS];
};
#define b53_for_each_port(dev, i) \
@@ -325,7 +329,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload);
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
@@ -337,15 +341,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -363,17 +361,22 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 5ae3d9783b68..0690210770ff 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -17,6 +17,11 @@
#include "b53_serdes.h"
#include "b53_regs.h"
+static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct b53_pcs, pcs);
+}
+
static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
u16 value)
{
@@ -60,51 +65,47 @@ static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
return b53_serdes_read_blk(dev, offset, block);
}
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state)
+static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
reg |= FIBER_MODE_1000X;
else
reg &= ~FIBER_MODE_1000X;
b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK, reg);
+
+ return 0;
}
-EXPORT_SYMBOL(b53_serdes_config);
-void b53_serdes_an_restart(struct b53_device *dev, int port)
+static void b53_serdes_an_restart(struct phylink_pcs *pcs)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
reg |= BMCR_ANRESTART;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
-EXPORT_SYMBOL(b53_serdes_an_restart);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 dig, bmsr;
- if (lane == B53_INVALID_LANE)
- return 1;
-
dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
SERDES_DIGITAL_BLK);
bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
@@ -133,10 +134,7 @@ int b53_serdes_link_state(struct b53_device *dev, int port,
state->pause |= MLO_PAUSE_RX;
if (dig & PAUSE_RESOLUTION_TX_SIDE)
state->pause |= MLO_PAUSE_TX;
-
- return 0;
}
-EXPORT_SYMBOL(b53_serdes_link_state);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up)
@@ -158,9 +156,14 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static const struct phylink_pcs_ops b53_pcs_ops = {
+ .pcs_get_state = b53_serdes_get_state,
+ .pcs_config = b53_serdes_config,
+ .pcs_an_restart = b53_serdes_an_restart,
+};
+
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
@@ -169,20 +172,47 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
- phylink_set(supported, 2500baseX_Full);
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
- phylink_set(supported, 1000baseX_Full);
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
-EXPORT_SYMBOL(b53_serdes_phylink_validate);
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
+
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
+ !dev->pcs[lane].dev)
+ return NULL;
+
+ if (!phy_interface_mode_is_8023z(interface) &&
+ interface != PHY_INTERFACE_MODE_SGMII)
+ return NULL;
+
+ return &dev->pcs[lane].pcs;
+}
+EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
int b53_serdes_init(struct b53_device *dev, int port)
{
u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_pcs *pcs;
u16 id0, msb, lsb;
if (lane == B53_INVALID_LANE)
@@ -205,6 +235,11 @@ int b53_serdes_init(struct b53_device *dev, int port)
(id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
(u32)msb << 16 | lsb);
+ pcs = &dev->pcs[lane];
+ pcs->dev = dev;
+ pcs->lane = lane;
+ pcs->pcs.ops = &b53_pcs_ops;
+
return 0;
}
EXPORT_SYMBOL(b53_serdes_init);
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index 55d280fe38e4..ef81f5da5f81 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -107,17 +107,13 @@ static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
return dev->ops->serdes_map_lane(dev, port);
}
-int b53_serdes_get_link(struct b53_device *dev, int port);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state);
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state);
-void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
int b53_serdes_init(struct b53_device *dev, int port);
#else
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 2b88f03e5252..308f15d3832e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -314,16 +314,12 @@ static int b53_spi_probe(struct spi_device *spi)
return 0;
}
-static int b53_spi_remove(struct spi_device *spi)
+static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
if (dev)
b53_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 4591bb1c05d2..bcb44034404d 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port)
}
}
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -456,13 +489,11 @@ static const struct b53_io_ops b53_srab_ops = {
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
+ .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
.serdes_map_lane = b53_srab_serdes_map_lane,
- .serdes_link_state = b53_serdes_link_state,
- .serdes_config = b53_serdes_config,
- .serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
- .serdes_phylink_validate = b53_serdes_phylink_validate,
#endif
};
@@ -636,8 +667,6 @@ static int b53_srab_remove(struct platform_device *pdev)
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 33499fcd8848..cde253d27bd0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -94,6 +94,24 @@ static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -141,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -167,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -621,7 +624,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@@ -681,8 +684,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
@@ -690,6 +695,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
@@ -709,49 +715,25 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
PHY_BRCM_IDDQ_SUSPEND;
}
-static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
+ unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_MOCA) {
- linkmode_zero(supported);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID))
- dev_err(ds->dev,
- "Unsupported interface: %d for port %d\n",
- state->interface, port);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
@@ -830,17 +812,13 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ if (priv->wol_ports_mask & BIT(port))
+ return;
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
@@ -854,51 +832,56 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
+ if (tx_pause)
+ reg |= TX_PAUSE_EN;
+ if (rx_pause)
+ reg |= RX_PAUSE_EN;
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
- core_writel(priv, reg, offset);
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
}
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
@@ -1000,7 +983,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -1024,7 +1007,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1218,7 +1201,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phylink_validate = bcm_sf2_sw_validate,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_mac_config = bcm_sf2_sw_mac_config,
.phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
.phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
@@ -1568,8 +1551,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index a7e2fcf2df2c..c4010b7bf089 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
- struct cfp_rule *rule = NULL;
+ struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
- break;
+ return rule;
}
- return rule;
+ return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1296,7 +1296,7 @@ void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
+ strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 33daaf10c488..5b139f2206b6 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -168,7 +168,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
__func__, port, bridge.dev->name);
@@ -350,8 +351,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
@@ -377,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = {
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+static void dsa_loop_phydevs_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (!IS_ERR(phydevs[i])) {
+ fixed_phy_unregister(phydevs[i]);
+ phy_device_free(phydevs[i]);
+ }
+}
+
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@@ -384,23 +394,23 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i;
+ unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
- return mdio_driver_register(&dsa_loop_drv);
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret)
+ dsa_loop_phydevs_unregister();
+
+ return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
- unsigned int i;
-
mdio_driver_unregister(&dsa_loop_drv);
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i]))
- fixed_phy_unregister(phydevs[i]);
+ dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 726f267cb228..951f7935c872 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -128,6 +128,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -288,7 +298,7 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
@@ -675,7 +685,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
@@ -827,7 +838,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -872,7 +884,8 @@ out:
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -1534,6 +1547,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1717,7 +1769,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1769,7 +1824,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1806,22 +1864,43 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
struct hellcreek *hellcreek = ds->priv;
- if (type != TC_SETUP_QDISC_TAPRIO)
- return -EOPNOTSUPP;
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
- if (!hellcreek_validate_schedule(hellcreek, taprio))
- return -EOPNOTSUPP;
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
- return hellcreek_port_del_schedule(ds, port);
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1891,11 +1970,8 @@ static int hellcreek_probe(struct platform_device *pdev)
if (!port->counter_values)
return -ENOMEM;
- port->vlan_dev_bitmap =
- devm_kcalloc(dev,
- BITS_TO_LONGS(VLAN_N_VID),
- sizeof(unsigned long),
- GFP_KERNEL);
+ port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!port->vlan_dev_bitmap)
return -ENOMEM;
@@ -1996,7 +2072,6 @@ static int hellcreek_remove(struct platform_device *pdev)
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..4a678f7d61ae 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -37,6 +37,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +73,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index b3bc948d6145..ffd06cf8c44f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -331,7 +331,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 2572c6087bb5..b28baab6d56a 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
const char *label, *state;
int ret = -EINVAL;
+ of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d55784d19fa4..438e46af03e9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,6 +10,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -21,6 +22,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -31,6 +36,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -850,15 +856,12 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -874,7 +877,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -1083,28 +1086,35 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
@@ -1181,7 +1191,8 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1193,8 +1204,8 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1238,7 +1249,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
@@ -1253,7 +1265,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1310,7 +1323,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
@@ -1338,6 +1351,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1348,6 +1362,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..7d746cd9ca1b 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -65,18 +65,14 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return 0;
}
-static int lan9303_i2c_remove(struct i2c_client *client)
+static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return 0;
+ return;
lan9303_remove(&sw_dev->chip);
-
- i2c_set_clientdata(client, NULL);
-
- return 0;
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..4f33369a2de5 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 46ed953e787e..05ecaa007ab1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -213,6 +213,7 @@
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
@@ -239,6 +240,15 @@
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
struct gswip_hw_info {
int max_ports;
int cpu_port;
@@ -498,8 +508,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -512,7 +523,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -858,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -878,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds)
return err;
}
+ ds->mtu_enforcement_ingress = true;
+
gswip_port_enable(ds, cpu_port, NULL);
ds->configure_vlan_while_not_filtering = false;
@@ -1147,7 +1160,8 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
@@ -1346,7 +1360,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
int fid = -1;
int i;
int err;
@@ -1354,7 +1368,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
if (!bridge)
return -EINVAL;
- for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
fid = priv->vlans[i].fid;
break;
@@ -1384,13 +1398,15 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
@@ -1410,8 +1426,9 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
return err;
}
@@ -1441,6 +1458,39 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
+
+ return 0;
+}
+
static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -1632,9 +1682,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1786,6 +1833,8 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
@@ -1810,6 +1859,8 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
@@ -1938,11 +1989,9 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
@@ -2019,8 +2068,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
- if (err)
+ if (err) {
+ of_node_put(gphy_fw_np);
goto remove_gphy;
+ }
i++;
}
@@ -2145,8 +2196,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2170,13 +2223,12 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c9e2a8989556..06b1efdb5e7d 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,49 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NET_DSA_MICROCHIP_KSZ_COMMON
- select NET_DSA_TAG_KSZ
- tristate
-
-menuconfig NET_DSA_MICROCHIP_KSZ9477
- tristate "Microchip KSZ9477 series switch support"
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
help
- This driver adds support for Microchip KSZ9477 switch chips.
+ This driver adds support for Microchip KSZ9477 series switch and
+ KSZ8795/KSZ88x3 switch chips.
config NET_DSA_MICROCHIP_KSZ9477_I2C
- tristate "KSZ9477 series I2C connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
-config NET_DSA_MICROCHIP_KSZ9477_SPI
- tristate "KSZ9477 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
-menuconfig NET_DSA_MICROCHIP_KSZ8795
- tristate "Microchip KSZ8795 series switch support"
- depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
- help
- This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
-
-config NET_DSA_MICROCHIP_KSZ8795_SPI
- tristate "KSZ8795 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
- select REGMAP_SPI
- help
- This driver accesses KSZ8795 chip through SPI.
-
- It is required to use the KSZ8795 switch driver as the only access
- is through SPI.
-
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
select MDIO_BITBANG
help
Select to enable support for registering switches configured through
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 2a03b21a3386..28873559efc2 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o
+ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz8795.o
+ksz_switch-objs += lan937x_main.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 9d611895d3cf..8582b4b67d98 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -7,63 +7,55 @@
#ifndef __KSZ8XXX_H
#define __KSZ8XXX_H
-#include <linux/kernel.h>
-enum ksz_regs {
- REG_IND_CTRL_0,
- REG_IND_DATA_8,
- REG_IND_DATA_CHECK,
- REG_IND_DATA_HI,
- REG_IND_DATA_LO,
- REG_IND_MIB_CHECK,
- P_FORCE_CTRL,
- P_LINK_STATUS,
- P_LOCAL_CTRL,
- P_NEG_RESTART_CTRL,
- P_REMOTE_STATUS,
- P_SPEED_STATUS,
- S_TAIL_TAG_CTRL,
-};
+#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
-enum ksz_masks {
- PORT_802_1P_REMAPPING,
- SW_TAIL_TAG_ENABLE,
- MIB_COUNTER_OVERFLOW,
- MIB_COUNTER_VALID,
- VLAN_TABLE_FID,
- VLAN_TABLE_MEMBERSHIP,
- VLAN_TABLE_VALID,
- STATIC_MAC_TABLE_VALID,
- STATIC_MAC_TABLE_USE_FID,
- STATIC_MAC_TABLE_FID,
- STATIC_MAC_TABLE_OVERRIDE,
- STATIC_MAC_TABLE_FWD_PORTS,
- DYNAMIC_MAC_TABLE_ENTRIES_H,
- DYNAMIC_MAC_TABLE_MAC_EMPTY,
- DYNAMIC_MAC_TABLE_NOT_READY,
- DYNAMIC_MAC_TABLE_ENTRIES,
- DYNAMIC_MAC_TABLE_FID,
- DYNAMIC_MAC_TABLE_SRC_PORT,
- DYNAMIC_MAC_TABLE_TIMESTAMP,
-};
-
-enum ksz_shifts {
- VLAN_TABLE_MEMBERSHIP_S,
- VLAN_TABLE,
- STATIC_MAC_FWD_PORTS,
- STATIC_MAC_FID,
- DYNAMIC_MAC_ENTRIES_H,
- DYNAMIC_MAC_ENTRIES,
- DYNAMIC_MAC_FID,
- DYNAMIC_MAC_TIMESTAMP,
- DYNAMIC_MAC_SRC_PORT,
-};
-
-struct ksz8 {
- const u8 *regs;
- const u32 *masks;
- const u8 *shifts;
- void *priv;
-};
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz8_get_stp_reg(void);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_detect(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 991b9c6b6ce7..bd3b133e7085 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -26,190 +26,6 @@
#include "ksz8795_reg.h"
#include "ksz8.h"
-static const u8 ksz8795_regs[] = {
- [REG_IND_CTRL_0] = 0x6E,
- [REG_IND_DATA_8] = 0x70,
- [REG_IND_DATA_CHECK] = 0x72,
- [REG_IND_DATA_HI] = 0x71,
- [REG_IND_DATA_LO] = 0x75,
- [REG_IND_MIB_CHECK] = 0x74,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x07,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x08,
- [P_SPEED_STATUS] = 0x09,
- [S_TAIL_TAG_CTRL] = 0x0C,
-};
-
-static const u32 ksz8795_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(7),
- [SW_TAIL_TAG_ENABLE] = BIT(1),
- [MIB_COUNTER_OVERFLOW] = BIT(6),
- [MIB_COUNTER_VALID] = BIT(5),
- [VLAN_TABLE_FID] = GENMASK(6, 0),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
- [VLAN_TABLE_VALID] = BIT(12),
- [STATIC_MAC_TABLE_VALID] = BIT(21),
- [STATIC_MAC_TABLE_USE_FID] = BIT(23),
- [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
-};
-
-static const u8 ksz8795_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 7,
- [VLAN_TABLE] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 24,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 29,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 27,
- [DYNAMIC_MAC_SRC_PORT] = 24,
-};
-
-static const u8 ksz8863_regs[] = {
- [REG_IND_CTRL_0] = 0x79,
- [REG_IND_DATA_8] = 0x7B,
- [REG_IND_DATA_CHECK] = 0x7B,
- [REG_IND_DATA_HI] = 0x7C,
- [REG_IND_DATA_LO] = 0x80,
- [REG_IND_MIB_CHECK] = 0x80,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x0C,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x0E,
- [P_SPEED_STATUS] = 0x0F,
- [S_TAIL_TAG_CTRL] = 0x03,
-};
-
-static const u32 ksz8863_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(3),
- [SW_TAIL_TAG_ENABLE] = BIT(6),
- [MIB_COUNTER_OVERFLOW] = BIT(7),
- [MIB_COUNTER_VALID] = BIT(6),
- [VLAN_TABLE_FID] = GENMASK(15, 12),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
- [VLAN_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_USE_FID] = BIT(21),
- [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
-};
-
-static u8 ksz8863_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 22,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 24,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 24,
- [DYNAMIC_MAC_SRC_PORT] = 20,
-};
-
-struct mib_names {
- char string[ETH_GSTRING_LEN];
-};
-
-static const struct mib_names ksz87xx_mib_names[] = {
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "rx_1523_2000" },
- { "rx_2001" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_total" },
- { "tx_total" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static const struct mib_names ksz88xx_mib_names[] = {
- { "rx" },
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "tx" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static bool ksz_is_ksz88x3(struct ksz_device *dev)
-{
- return dev->chip_id == 0x8830;
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -222,7 +38,27 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
-static int ksz8_reset_switch(struct ksz_device *dev)
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ if (!ret)
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
/* reset switch */
@@ -273,20 +109,19 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
true);
}
-static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
- ctrl_addr = addr + dev->reg_mib_cnt * port;
+ ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
@@ -312,18 +147,17 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
- addr -= dev->reg_mib_cnt;
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -365,14 +199,15 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
u32 *last = (u32 *)dropped;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u32 cur;
- addr -= dev->reg_mib_cnt;
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
@@ -394,8 +229,8 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
}
}
-static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
if (ksz_is_ksz88x3(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -403,7 +238,7 @@ static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
}
-static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
if (ksz_is_ksz88x3(dev))
return;
@@ -418,7 +253,7 @@ static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
-static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
@@ -433,31 +268,30 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
mib->cnt_ptr = 0;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
@@ -468,10 +302,11 @@ static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ regs = dev->info->regs;
+
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
@@ -482,13 +317,12 @@ static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
- struct ksz8 *ksz8 = dev->priv;
int timeout = 100;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
do {
ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
@@ -509,22 +343,20 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
return 0;
}
-static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u8 data;
int rc;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
@@ -574,17 +406,16 @@ static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
return rc;
}
-static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
data_hi = data >> 32;
@@ -613,17 +444,16 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
return -ENXIO;
}
-static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
@@ -649,12 +479,11 @@ static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
u8 *member, u8 *valid)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*fid = vlan & masks[VLAN_TABLE_FID];
*member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
@@ -665,12 +494,11 @@ static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
u16 *vlan)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*vlan = fid;
*vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
@@ -680,12 +508,11 @@ static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
u64 data;
int i;
- shifts = ksz8->shifts;
+ shifts = dev->info->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= 4;
@@ -725,21 +552,32 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, link;
- const u8 *regs = ksz8->regs;
int processed = true;
+ const u16 *regs;
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
@@ -769,7 +607,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
@@ -790,7 +631,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
@@ -804,7 +648,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
@@ -820,8 +667,14 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
@@ -836,7 +689,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
@@ -846,14 +702,18 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
}
if (processed)
*val = data;
+
+ return 0;
}
-static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, data;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
@@ -861,15 +721,26 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
@@ -895,9 +766,17 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
@@ -927,11 +806,19 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -948,8 +835,12 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
@@ -958,44 +849,11 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
default:
break;
}
-}
-
-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- struct ksz_device *dev = ds->priv;
-
- /* ksz88x3 uses the same tag schema as KSZ9893 */
- return ksz_is_ksz88x3(dev) ?
- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
-}
-
-static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
-{
- /* Silicon Errata Sheet (DS80000830A):
- * Port 1 does not work with LinkMD Cable-Testing.
- * Port 1 does not respond to received PAUSE control frames.
- */
- if (!port)
- return MICREL_KSZ8_P1_ERRATA;
return 0;
}
-static void ksz8_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- struct ksz_device *dev = ds->priv;
- int i;
-
- for (i = 0; i < dev->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN,
- dev->mib_names[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -1005,65 +863,30 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
}
-static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
-
- p = &dev->ports[port];
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
-}
-
-static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
struct ksz_port *p;
+ const u16 *regs;
+
+ regs = dev->info->regs;
- if ((uint)port < dev->port_cnt) {
+ if ((uint)port < dev->info->port_cnt) {
first = port;
cnt = port + 1;
} else {
/* Flush all ports. */
first = 0;
- cnt = dev->port_cnt;
+ cnt = dev->info->port_cnt;
}
for (index = first; index < cnt; index++) {
p = &dev->ports[index];
if (!p->on)
continue;
- ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL,
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
@@ -1072,15 +895,113 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (!p->on)
continue;
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
}
}
-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
- struct netlink_ext_ack *extack)
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 member;
+ struct alu_struct alu;
+
+ do {
+ alu.is_static = false;
+ ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
+ &timestamp, &entries);
+ if (!ret && (member & BIT(port))) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ break;
+ }
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
+
+ return ret;
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ /* Remember the first empty entry. */
+ } else if (!empty) {
+ empty = index + 1;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (mdb->vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = mdb->vid;
+ }
+ ksz8_w_sta_mac_table(dev, index, &alu);
+ return 0;
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ struct alu_struct alu;
+ int index;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ goto exit;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+exit:
+ return 0;
+}
+
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
@@ -1105,12 +1026,11 @@ static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
}
}
-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
@@ -1131,7 +1051,7 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
* Remove Tag flag to be changed, unless there are no
* other VLANs currently configured.
*/
- for (vid = 1; vid < dev->num_vlans; ++vid) {
+ for (vid = 1; vid < dev->info->num_vlans; ++vid) {
/* Skip the VID we are going to add or reconfigure */
if (vid == vlan->vid)
continue;
@@ -1178,10 +1098,9 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
u16 data, pvid;
u8 fid, member, valid;
@@ -1211,12 +1130,10 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
@@ -1235,10 +1152,9 @@ static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
u8 data;
if (mirror->ingress) {
@@ -1259,7 +1175,6 @@ static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
- u8 data8;
if (!p->interface && dev->compat_interface) {
dev_warn(dev->dev,
@@ -1268,50 +1183,15 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
port);
p->interface = dev->compat_interface;
}
-
- /* Configure MII interface for proper network communication. */
- ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
- data8 &= ~PORT_INTERFACE_TYPE;
- data8 &= ~PORT_GMII_1GPS_MODE;
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_INTERFACE_RMII;
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_GMII;
- p->phydev.speed = SPEED_1000;
- break;
- default:
- data8 &= ~PORT_RGMII_ID_IN_ENABLE;
- data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_OUT_ENABLE;
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_RGMII;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
- p->phydev.duplex = 1;
}
-static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
- masks = ksz8->masks;
+ masks = dev->info->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
@@ -1341,17 +1221,17 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz8_cfg_port_member(dev, port, member);
}
-static void ksz8_config_cpu_port(struct dsa_switch *ds)
+void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
struct ksz_port *p;
const u32 *masks;
+ const u16 *regs;
u8 remote;
int i;
- masks = ksz8->masks;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
/* Switch marks the maximum frame with extra byte as oversize. */
ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
@@ -1365,13 +1245,12 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
if (i == dev->phy_port_cnt)
break;
p->on = 1;
- p->phy = 1;
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
@@ -1379,34 +1258,55 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
continue;
if (!ksz_is_ksz88x3(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
- if (remote & PORT_FIBER_MODE)
+ if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
}
if (p->fiber)
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- true);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
else
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- false);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
}
}
-static int ksz8_setup(struct dsa_switch *ds)
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->info->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
+int ksz8_enable_stp_addr(struct ksz_device *dev)
+{
struct alu_struct alu;
- int i, ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
+
+ ksz8_w_sta_mac_table(dev, 0, &alu);
- ret = ksz8_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+ return 0;
+}
+
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
@@ -1425,10 +1325,6 @@ static int ksz8_setup(struct dsa_switch *ds)
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
- ksz8_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
-
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
@@ -1436,52 +1332,16 @@ static int ksz8_setup(struct dsa_switch *ds)
if (!ksz_is_ksz88x3(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
- for (i = 0; i < (dev->num_vlans / 4); i++)
+ for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- /* Setup STP address for STP operation. */
- memset(&alu, 0, sizeof(alu));
- ether_addr_copy(alu.mac, eth_stp_addr);
- alu.is_static = true;
- alu.is_override = true;
- alu.port_forward = dev->host_mask;
-
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
+ return ksz8_handle_global_errata(ds);
}
-static void ksz8_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct ksz_device *dev = ds->priv;
-
- if (port == dev->cpu_port) {
- if (state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- } else {
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- }
-
- /* Allow all the expected bits */
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
+ config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
* "Port 1 does not respond to received flow control PAUSE frames"
@@ -1489,232 +1349,23 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
* switches.
*/
if (!ksz_is_ksz88x3(dev) || port)
- phylink_set(mask, Pause);
+ config->mac_capabilities |= MAC_SYM_PAUSE;
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if (!ksz_is_ksz88x3(dev))
- phylink_set(mask, Asym_Pause);
-
- /* 10M and 100M are only supported */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
}
-static const struct dsa_switch_ops ksz8_switch_ops = {
- .get_tag_protocol = ksz8_get_tag_protocol,
- .get_phy_flags = ksz8_sw_get_phy_flags,
- .setup = ksz8_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .phylink_validate = ksz8_validate,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz8_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8_port_vlan_filtering,
- .port_vlan_add = ksz8_port_vlan_add,
- .port_vlan_del = ksz8_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8_port_mirror_add,
- .port_mirror_del = ksz8_port_mirror_del,
-};
-
-static u32 ksz8_get_port_addr(int port, int offset)
+u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz8_switch_detect(struct ksz_device *dev)
-{
- u8 id1, id2;
- u16 id16;
- int ret;
-
- /* read chip id */
- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
- if (ret)
- return ret;
-
- id1 = id16 >> 8;
- id2 = id16 & SW_CHIP_ID_M;
-
- switch (id1) {
- case KSZ87_FAMILY_ID:
- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
-
- if (id2 == CHIP_ID_95) {
- u8 val;
-
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
- }
- break;
- case KSZ88_FAMILY_ID:
- if (id2 != CHIP_ID_63)
- return -ENODEV;
- break;
- default:
- dev_err(dev->dev, "invalid family id: %d\n", id1);
- return -ENODEV;
- }
- id16 &= ~0xff;
- id16 |= id2;
- dev->chip_id = id16;
-
- return 0;
-}
-
-struct ksz_chip_data {
- u16 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
-};
-
-static const struct ksz_chip_data ksz8_switch_chips[] = {
- {
- .chip_id = 0x8795,
- .dev_name = "KSZ8795",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- /*
- * WARNING
- * =======
- * KSZ8794 is similar to KSZ8795, except the port map
- * contains a gap between external and CPU ports, the
- * port map is NOT continuous. The per-port register
- * map is shifted accordingly too, i.e. registers at
- * offset 0x40 are NOT used on KSZ8794 and they ARE
- * used on KSZ8795 for external port 3.
- * external cpu
- * KSZ8794 0,1,2 4
- * KSZ8795 0,1,2,3 4
- * KSZ8765 0,1,2,3 4
- */
- .chip_id = 0x8794,
- .dev_name = "KSZ8794",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8765,
- .dev_name = "KSZ8765",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8830,
- .dev_name = "KSZ8863/KSZ8873",
- .num_vlans = 16,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x4, /* can be configured as cpu port */
- .port_cnt = 3,
- },
-};
-
-static int ksz8_switch_init(struct ksz_device *dev)
+int ksz8_switch_init(struct ksz_device *dev)
{
- struct ksz8 *ksz8 = dev->priv;
- int i;
-
- dev->ds->ops = &ksz8_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz8_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = fls(chip->cpu_ports);
- dev->cpu_port = fls(chip->cpu_ports) - 1;
- dev->phy_port_cnt = dev->port_cnt - 1;
- dev->cpu_ports = chip->cpu_ports;
- dev->host_mask = chip->cpu_ports;
- dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
- chip->cpu_ports;
- break;
- }
- }
-
- /* no switch found */
- if (!dev->cpu_ports)
- return -ENODEV;
-
- if (ksz_is_ksz88x3(dev)) {
- ksz8->regs = ksz8863_regs;
- ksz8->masks = ksz8863_masks;
- ksz8->shifts = ksz8863_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names);
- dev->mib_names = ksz88xx_mib_names;
- } else {
- ksz8->regs = ksz8795_regs;
- ksz8->masks = ksz8795_masks;
- ksz8->shifts = ksz8795_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names);
- dev->mib_names = ksz87xx_mib_names;
- }
-
- dev->reg_mib_cnt = MIB_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (dev->mib_cnt + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
+ dev->cpu_port = fls(dev->info->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->info->port_cnt - 1;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
@@ -1729,37 +1380,11 @@ static int ksz8_switch_init(struct ksz_device *dev)
return 0;
}
-static void ksz8_switch_exit(struct ksz_device *dev)
+void ksz8_switch_exit(struct ksz_device *dev)
{
ksz8_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz8_dev_ops = {
- .get_port_addr = ksz8_get_port_addr,
- .cfg_port_member = ksz8_cfg_port_member,
- .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
- .port_setup = ksz8_port_setup,
- .r_phy = ksz8_r_phy,
- .w_phy = ksz8_w_phy,
- .r_dyn_mac_table = ksz8_r_dyn_mac_table,
- .r_sta_mac_table = ksz8_r_sta_mac_table,
- .w_sta_mac_table = ksz8_w_sta_mac_table,
- .r_mib_cnt = ksz8_r_mib_cnt,
- .r_mib_pkt = ksz8_r_mib_pkt,
- .freeze_mib = ksz8_freeze_mib,
- .port_init_cnt = ksz8_port_init_cnt,
- .shutdown = ksz8_reset_switch,
- .detect = ksz8_switch_detect,
- .init = ksz8_switch_init,
- .exit = ksz8_switch_exit,
-};
-
-int ksz8_switch_register(struct ksz_device *dev)
-{
- return ksz_switch_register(dev, &ksz8_dev_ops);
-}
-EXPORT_SYMBOL(ksz8_switch_register);
-
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 6b40bc25f7ff..77487d611824 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -14,22 +14,8 @@
#define KS_PRIO_M 0x3
#define KS_PRIO_S 2
-#define REG_CHIP_ID0 0x00
-
-#define KSZ87_FAMILY_ID 0x87
-#define KSZ88_FAMILY_ID 0x88
-
-#define REG_CHIP_ID1 0x01
-
-#define SW_CHIP_ID_M 0xF0
-#define SW_CHIP_ID_S 4
#define SW_REVISION_M 0x0E
#define SW_REVISION_S 1
-#define SW_START 0x01
-
-#define CHIP_ID_94 0x60
-#define CHIP_ID_95 0x90
-#define CHIP_ID_63 0x30
#define KSZ8863_REG_SW_RESET 0x43
@@ -57,7 +43,6 @@
#define REG_SW_CTRL_2 0x04
#define UNICAST_VLAN_BOUNDARY BIT(7)
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -77,13 +62,9 @@
#define SW_FLOW_CTRL BIT(5)
#define SW_10_MBIT BIT(4)
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_CTRL_5 0x07
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_CTRL_6 0x08
#define SW_MIB_COUNTER_FLUSH BIT(7)
@@ -160,9 +141,6 @@
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
@@ -192,15 +170,7 @@
#define REG_PORT_5_CTRL_6 0x56
#define PORT_MII_INTERNAL_CLOCK BIT(7)
-#define PORT_GMII_1GPS_MODE BIT(6)
-#define PORT_RGMII_ID_IN_ENABLE BIT(4)
-#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
#define PORT_GMII_MAC_MODE BIT(2)
-#define PORT_INTERFACE_TYPE 0x3
-#define PORT_INTERFACE_MII 0
-#define PORT_INTERFACE_RMII 1
-#define PORT_INTERFACE_GMII 2
-#define PORT_INTERFACE_RGMII 3
#define REG_PORT_1_CTRL_7 0x17
#define REG_PORT_2_CTRL_7 0x27
@@ -220,8 +190,6 @@
#define REG_PORT_4_STATUS_0 0x48
/* For KSZ8765. */
-#define PORT_FIBER_MODE BIT(7)
-
#define PORT_REMOTE_ASYM_PAUSE BIT(5)
#define PORT_REMOTE_SYM_PAUSE BIT(4)
#define PORT_REMOTE_100BTX_FD BIT(3)
@@ -325,7 +293,6 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
@@ -791,7 +758,6 @@
#define P_TAG_CTRL REG_PORT_CTRL_0
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
-#define P_STP_CTRL REG_PORT_CTRL_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -812,11 +778,9 @@
#define IND_ACC_TABLE(table) ((table) << 8)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
deleted file mode 100644
index 866767b70d65..000000000000
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Microchip KSZ8795 series register access through SPI
- *
- * Copyright (C) 2017 Microchip Technology Inc.
- * Tristram Ha <Tristram.Ha@microchip.com>
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz8.h"
-#include "ksz_common.h"
-
-#define KSZ8795_SPI_ADDR_SHIFT 12
-#define KSZ8795_SPI_ADDR_ALIGN 3
-#define KSZ8795_SPI_TURNAROUND_SHIFT 1
-
-#define KSZ8863_SPI_ADDR_SHIFT 8
-#define KSZ8863_SPI_ADDR_ALIGN 8
-#define KSZ8863_SPI_TURNAROUND_SHIFT 0
-
-KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
- KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
-
-KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
- KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
-
-static int ksz8795_spi_probe(struct spi_device *spi)
-{
- const struct regmap_config *regmap_config;
- struct device *ddev = &spi->dev;
- struct regmap_config rc;
- struct ksz_device *dev;
- struct ksz8 *ksz8;
- int i, ret = 0;
-
- ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = spi;
-
- dev = ksz_switch_alloc(&spi->dev, ksz8);
- if (!dev)
- return -ENOMEM;
-
- regmap_config = device_get_match_data(ddev);
- if (!regmap_config)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- rc = regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz8_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz8795_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz8795_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (!dev)
- return;
-
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
-
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz8795_dt_ids[] = {
- { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config },
- { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
-
-static struct spi_driver ksz8795_spi_driver = {
- .driver = {
- .name = "ksz8795-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz8795_dt_ids),
- },
- .probe = ksz8795_spi_probe,
- .remove = ksz8795_spi_remove,
- .shutdown = ksz8795_spi_shutdown,
-};
-
-module_spi_driver(ksz8795_spi_driver);
-
-MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5883fa7edda2..ddb40838181e 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -26,11 +26,9 @@ static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
@@ -55,13 +53,11 @@ static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
- struct ksz8 *ksz8;
int i, ret = 0;
u32 reg;
u8 *val;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
@@ -142,17 +138,10 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int ret;
int i;
- ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = mdiodev;
-
- dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
@@ -174,7 +163,7 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -191,8 +180,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
@@ -206,8 +193,14 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
}
static const struct of_device_id ksz8863_dt_ids[] = {
- { .compatible = "microchip,ksz8863" },
- { .compatible = "microchip,ksz8873" },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 353b5f981740..a6a0321a8931 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -11,58 +11,13 @@
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz9477_reg.h"
#include "ksz_common.h"
-
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define NEW_XMII BIT(1)
-#define IS_9893 BIT(2)
-
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
+#include "ksz9477.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
@@ -88,6 +43,28 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size, max_frame = 0;
+ int i;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ /* Cache the per-port MTU setting */
+ dev->ports[port].max_frame = frame_size;
+
+ for (i = 0; i < dev->info->port_cnt; i++)
+ max_frame = max(max_frame, dev->ports[i].max_frame);
+
+ return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+ REG_SW_MTU_MASK, max_frame);
+}
+
+int ksz9477_max_mtu(struct ksz_device *dev, int port)
+{
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -193,7 +170,7 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
-static int ksz9477_reset_switch(struct ksz_device *dev)
+int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
@@ -216,21 +193,22 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ return 0;
- if (dev->synclko_125)
- ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
-static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
@@ -257,14 +235,14 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
*cnt += data;
}
-static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
- addr = ksz9477_mib_names[addr].index;
+ addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
-static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
@@ -278,7 +256,7 @@ static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -289,27 +267,22 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
mutex_unlock(&mib->cnt_mutex);
-
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
{
- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
- struct ksz_device *dev = ds->priv;
-
- if (dev->features & IS_9893)
- proto = DSA_TAG_PROTO_KSZ9893;
- return proto;
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
}
-static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -317,7 +290,7 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -350,114 +323,58 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
- }
-
- return val;
-}
-
-static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
- u16 val)
-{
- struct ksz_device *dev = ds->priv;
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
- /* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return 0;
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
+ }
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return 0;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ *data = val;
return 0;
}
-static void ksz9477_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
+ /* No real PHY after this. */
+ if (!dev->info->internal_phy[addr])
+ return 0;
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
- ETH_GSTRING_LEN);
- }
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
-static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
}
-static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
-}
-
-static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
+ const u16 *regs = dev->info->regs;
u8 data;
regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- if (port < dev->port_cnt) {
+ if (port < dev->info->port_cnt) {
/* flush individual port */
- ksz_pread8(dev, port, P_STP_CTRL, &data);
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, port, P_STP_CTRL,
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -471,11 +388,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
@@ -508,10 +424,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
@@ -542,10 +457,9 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -599,10 +513,9 @@ exit:
return ret;
}
-static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -689,10 +602,9 @@ static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
alu->mac[5] = alu_table[3] & 0xFF;
}
-static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
@@ -719,6 +631,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
@@ -741,26 +656,30 @@ exit:
return ret;
}
-static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -788,7 +707,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics) {
+ if (index == dev->info->num_statics) {
err = -ENOSPC;
goto exit;
}
@@ -804,7 +723,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -816,26 +735,30 @@ exit:
return err;
}
-static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -861,7 +784,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->info->num_statics)
goto exit;
/* clear port */
@@ -877,7 +800,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -891,19 +814,36 @@ exit:
return ret;
}
-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
+ u8 data;
+ int p;
+
+ /* Limit to one sniffer port
+ * Check if any of the port is already set for sniffing
+ * If yes, instruct the user to remove the previous entry & exit
+ */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ /* Skip the current sniffing port */
+ if (p == mirror->to_local_port)
+ continue;
+
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if (data & PORT_MIRROR_SNIFFER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+ }
if (ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
/* configure mirror port */
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
@@ -913,160 +853,47 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
+ bool in_use = false;
u8 data;
+ int p;
if (mirror->ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
-
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, false);
-}
-
-static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
-{
- bool gbit;
-
- if (dev->features & NEW_XMII)
- gbit = !(data & PORT_MII_NOT_1GBIT);
- else
- gbit = !!(data & PORT_MII_1000MBIT_S1);
- return gbit;
-}
-
-static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data)
-{
- if (dev->features & NEW_XMII) {
- if (gbit)
- *data &= ~PORT_MII_NOT_1GBIT;
- else
- *data |= PORT_MII_NOT_1GBIT;
- } else {
- if (gbit)
- *data |= PORT_MII_1000MBIT_S1;
- else
- *data &= ~PORT_MII_1000MBIT_S1;
- }
-}
-static int ksz9477_get_xmii(struct ksz_device *dev, u8 data)
-{
- int mode;
+ /* Check if any of the port is still referring to sniffer port */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
- if (dev->features & NEW_XMII) {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL:
- mode = 0;
- break;
- case PORT_RMII_SEL:
- mode = 1;
- break;
- case PORT_GMII_SEL:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- } else {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL_S1:
- mode = 0;
- break;
- case PORT_RMII_SEL_S1:
- mode = 1;
- break;
- case PORT_GMII_SEL_S1:
- mode = 2;
+ if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
+ in_use = true;
break;
- default:
- mode = 3;
}
}
- return mode;
-}
-
-static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data)
-{
- u8 xmii;
- if (dev->features & NEW_XMII) {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL;
- break;
- case 1:
- xmii = PORT_RMII_SEL;
- break;
- case 2:
- xmii = PORT_GMII_SEL;
- break;
- default:
- xmii = PORT_RGMII_SEL;
- break;
- }
- } else {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL_S1;
- break;
- case 1:
- xmii = PORT_RMII_SEL_S1;
- break;
- case 2:
- xmii = PORT_GMII_SEL_S1;
- break;
- default:
- xmii = PORT_RGMII_SEL_S1;
- break;
- }
- }
- *data &= ~PORT_MII_SEL_M;
- *data |= xmii;
+ /* delete sniffing if there are no other mirroring rules */
+ if (!in_use)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
}
static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
{
phy_interface_t interface;
bool gbit;
- int mode;
- u8 data8;
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- gbit = ksz9477_get_gbit(dev, data8);
- mode = ksz9477_get_xmii(dev, data8);
- switch (mode) {
- case 2:
- interface = PHY_INTERFACE_MODE_GMII;
- if (gbit)
- break;
- fallthrough;
- case 0:
- interface = PHY_INTERFACE_MODE_MII;
- break;
- case 1:
- interface = PHY_INTERFACE_MODE_RMII;
- break;
- default:
- interface = PHY_INTERFACE_MODE_RGMII;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_TXID;
- if (data8 & PORT_RGMII_ID_IG_ENABLE) {
- interface = PHY_INTERFACE_MODE_RGMII_RXID;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_ID;
- }
- break;
- }
+
+ gbit = ksz_get_gbit(dev, port);
+
+ interface = ksz_get_xmii(dev, port, gbit);
+
return interface;
}
@@ -1105,7 +932,7 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled (except on KSZ8565 which is 100Mbit)
*/
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
/* Register settings are required to meet data sheet
@@ -1126,12 +953,46 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
}
-static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (dev->info->gbit_capable[port])
+ config->mac_capabilities |= MAC_1000FD;
+}
+
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
- u8 data8, member;
u16 data16;
+ u8 member;
/* enable tag tail for host port */
if (cpu_port)
@@ -1158,57 +1019,19 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
- if (port < dev->phy_port_cnt) {
+ if (dev->info->internal_phy[port]) {
/* do not force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
false);
- if (dev->phy_errata_9477)
+ if (dev->info->phy_errata_9477)
ksz9477_phy_errata_setup(dev, port);
} else {
/* force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- ksz9477_set_xmii(dev, 0, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- ksz9477_set_xmii(dev, 1, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- ksz9477_set_xmii(dev, 2, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- p->phydev.speed = SPEED_1000;
- break;
- default:
- ksz9477_set_xmii(dev, 3, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- data8 &= ~PORT_RGMII_ID_IG_ENABLE;
- data8 &= ~PORT_RGMII_ID_EG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- /* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
- data8 &= ~PORT_MII_MAC_MODE;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
- p->phydev.duplex = 1;
}
if (cpu_port)
@@ -1219,18 +1042,19 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
-static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) &&
+ (dev->info->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
@@ -1270,44 +1094,54 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->on = 1;
}
}
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
-
- ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
-
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
- }
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
}
}
-static int ksz9477_setup(struct dsa_switch *ds)
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
- struct ksz_device *dev = ds->priv;
- int ret = 0;
+ const u32 *masks;
+ u32 data;
+ int ret;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ masks = dev->info->masks;
- ret = ksz9477_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* Set the Override bit for forwarding BPDU packet to CPU */
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
+ ALU_V_OVERRIDE | BIT(dev->cpu_port));
+ if (ret < 0)
+ return ret;
+
+ data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
return ret;
}
+ return 0;
+}
+
+int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
@@ -1315,12 +1149,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
- ksz9477_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
@@ -1328,55 +1164,21 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
-static const struct dsa_switch_ops ksz9477_switch_ops = {
- .get_tag_protocol = ksz9477_get_tag_protocol,
- .setup = ksz9477_setup,
- .phy_read = ksz9477_phy_read16,
- .phy_write = ksz9477_phy_write16,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz9477_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz9477_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_add = ksz9477_port_vlan_add,
- .port_vlan_del = ksz9477_port_vlan_del,
- .port_fdb_dump = ksz9477_port_fdb_dump,
- .port_fdb_add = ksz9477_port_fdb_add,
- .port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_add = ksz9477_port_mdb_add,
- .port_mdb_del = ksz9477_port_mdb_del,
- .port_mirror_add = ksz9477_port_mirror_add,
- .port_mirror_del = ksz9477_port_mirror_del,
-};
-
-static u32 ksz9477_get_port_addr(int port, int offset)
+u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz9477_switch_detect(struct ksz_device *dev)
+int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
- u8 id_hi;
- u8 id_lo;
- u32 id32;
int ret;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
/* turn off SPI DO Edge select */
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
@@ -1387,204 +1189,14 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
if (ret)
return ret;
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
-
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
-
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
-
- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
- id_hi = (u8)(id32 >> 16);
- id_lo = (u8)(id32 >> 8);
- if ((id_lo & 0xf) == 3) {
- /* Chip is from KSZ9893 design. */
- dev_info(dev->dev, "Found KSZ9893\n");
- dev->features |= IS_9893;
-
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
- /* Chip uses new XMII register definitions. */
- dev->features |= NEW_XMII;
-
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
- }
-
- /* Change chip id to known ones so it can be matched against them. */
- id32 = (id_hi << 16) | (id_lo << 8);
-
- dev->chip_id = id32;
-
- return 0;
-}
-
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
- bool phy_errata_9477;
-};
-
-static const struct ksz_chip_data ksz9477_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989300,
- .dev_name = "KSZ9893",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x07, /* can be configured as cpu port */
- .port_cnt = 3, /* total port count */
- },
- {
- .chip_id = 0x00956700,
- .dev_name = "KSZ9567",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
-};
-
-static int ksz9477_switch_init(struct ksz_device *dev)
-{
- int i;
-
- dev->ds->ops = &ksz9477_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
- dev->phy_errata_9477 = chip->phy_errata_9477;
-
- break;
- }
- }
-
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- dev->port_mask = (1 << dev->port_cnt) - 1;
-
- dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
- dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (TOTAL_SWITCH_COUNTER_NUM + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
-
return 0;
}
-static void ksz9477_switch_exit(struct ksz_device *dev)
+void ksz9477_switch_exit(struct ksz_device *dev)
{
ksz9477_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz9477_dev_ops = {
- .get_port_addr = ksz9477_get_port_addr,
- .cfg_port_member = ksz9477_cfg_port_member,
- .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .port_setup = ksz9477_port_setup,
- .r_mib_cnt = ksz9477_r_mib_cnt,
- .r_mib_pkt = ksz9477_r_mib_pkt,
- .freeze_mib = ksz9477_freeze_mib,
- .port_init_cnt = ksz9477_port_init_cnt,
- .shutdown = ksz9477_reset_switch,
- .detect = ksz9477_switch_detect,
- .init = ksz9477_switch_init,
- .exit = ksz9477_switch_exit,
-};
-
-int ksz9477_switch_register(struct ksz_device *dev)
-{
- int ret, i;
- struct phy_device *phydev;
-
- ret = ksz_switch_register(dev, &ksz9477_dev_ops);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->phy_port_cnt; ++i) {
- if (!dsa_is_user_port(dev->ds, i))
- continue;
-
- phydev = dsa_to_port(dev->ds, i)->slave->phydev;
-
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
- }
- return ret;
-}
-EXPORT_SYMBOL(ksz9477_switch_register);
-
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000000..00862c4cfb7f
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_get_stp_reg(void);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz9477_max_mtu(struct ksz_device *dev, int port);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_dsa_init(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index f3afb8b8c4cc..3763930dc6fc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -41,7 +41,7 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
- ret = ksz9477_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -52,16 +52,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int ksz9477_i2c_remove(struct i2c_client *i2c)
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
@@ -71,8 +67,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
@@ -87,11 +83,34 @@ static const struct i2c_device_id ksz9477_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz9567" },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 16939f29faa5..53c68d286dd3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -25,7 +25,6 @@
#define REG_CHIP_ID2__1 0x0002
-#define CHIP_ID_63 0x63
#define CHIP_ID_66 0x66
#define CHIP_ID_67 0x67
#define CHIP_ID_77 0x77
@@ -166,7 +165,6 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define SW_START BIT(0)
#define REG_SW_MAC_ADDR_0 0x0302
#define REG_SW_MAC_ADDR_1 0x0303
@@ -176,6 +174,7 @@
#define REG_SW_MAC_ADDR_5 0x0307
#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
#define REG_SW_ISP_TPID__2 0x030A
@@ -190,8 +189,9 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -226,6 +226,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
@@ -265,7 +266,6 @@
#define REG_SW_MAC_CTRL_1 0x0331
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -276,13 +276,9 @@
#define REG_SW_MAC_CTRL_2 0x0332
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_MAC_CTRL_3 0x0333
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_MAC_CTRL_4 0x0334
#define SW_PASS_PAUSE BIT(3)
@@ -425,12 +421,9 @@
#define REG_SW_ALU_STAT_CTRL__4 0x041C
-#define ALU_STAT_INDEX_M (BIT(4) - 1)
-#define ALU_STAT_INDEX_S 16
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
#define ALU_RESV_MCAST_ADDR BIT(1)
-#define ALU_STAT_READ BIT(0)
#define REG_SW_ALU_VAL_A 0x0420
@@ -1184,35 +1177,11 @@
#define PORT_LINK_STATUS_FAIL BIT(0)
/* 3 - xMII */
-#define REG_PORT_XMII_CTRL_0 0x0300
-
#define PORT_SGMII_SEL BIT(7)
-#define PORT_MII_FULL_DUPLEX BIT(6)
-#define PORT_MII_100MBIT BIT(4)
#define PORT_GRXC_ENABLE BIT(0)
-#define REG_PORT_XMII_CTRL_1 0x0301
-
#define PORT_RMII_CLK_SEL BIT(7)
-/* S1 */
-#define PORT_MII_1000MBIT_S1 BIT(6)
-/* S2 */
-#define PORT_MII_NOT_1GBIT BIT(6)
#define PORT_MII_SEL_EDGE BIT(5)
-#define PORT_RGMII_ID_IG_ENABLE BIT(4)
-#define PORT_RGMII_ID_EG_ENABLE BIT(3)
-#define PORT_MII_MAC_MODE BIT(2)
-#define PORT_MII_SEL_M 0x3
-/* S1 */
-#define PORT_MII_SEL_S1 0x0
-#define PORT_RMII_SEL_S1 0x1
-#define PORT_GMII_SEL_S1 0x2
-#define PORT_RGMII_SEL_S1 0x3
-/* S2 */
-#define PORT_RGMII_SEL 0x0
-#define PORT_RMII_SEL 0x1
-#define PORT_GMII_SEL 0x2
-#define PORT_MII_SEL 0x3
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1268,8 +1237,6 @@
/* 5 - MIB Counters */
#define REG_PORT_MIB_CTRL_STAT__4 0x0500
-#define MIB_COUNTER_OVERFLOW BIT(31)
-#define MIB_COUNTER_VALID BIT(30)
#define MIB_COUNTER_READ BIT(25)
#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
@@ -1585,10 +1552,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
@@ -1632,11 +1595,7 @@
#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
-#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
#define P_PHY_CTRL REG_PORT_PHY_CTRL
-#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
-#define P_LINK_STATUS REG_PORT_PHY_STATUS
-#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
@@ -1656,10 +1615,6 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
+#define KSZ9477_MAX_FRAME_SIZE 9000
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
deleted file mode 100644
index e3cb0e6c9f6f..000000000000
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ9477 series register access through SPI
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_common.h"
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 5
-
-KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
-
-static int ksz9477_spi_probe(struct spi_device *spi)
-{
- struct regmap_config rc;
- struct ksz_device *dev;
- int i, ret;
-
- dev = ksz_switch_alloc(&spi->dev, spi);
- if (!dev)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- rc = ksz9477_regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz9477_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz9477_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz9477_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz8563" },
- { .compatible = "microchip,ksz9567" },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
-
-static struct spi_driver ksz9477_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
- },
- .probe = ksz9477_spi_probe,
- .remove = ksz9477_spi_remove,
- .shutdown = ksz9477_spi_shutdown,
-};
-
-module_spi_driver(ksz9477_spi_driver);
-
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 55dbda04ea62..d612181b3226 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,19 +14,1597 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
+
+#define MIB_COUNTER_NUM 0x20
+
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static const struct ksz_mib_names ksz88xx_mib_names[] = {
+ { 0x00, "rx" },
+ { 0x01, "rx_hi" },
+ { 0x02, "rx_undersize" },
+ { 0x03, "rx_fragments" },
+ { 0x04, "rx_oversize" },
+ { 0x05, "rx_jabbers" },
+ { 0x06, "rx_symbol_err" },
+ { 0x07, "rx_crc_err" },
+ { 0x08, "rx_align_err" },
+ { 0x09, "rx_mac_ctrl" },
+ { 0x0a, "rx_pause" },
+ { 0x0b, "rx_bcast" },
+ { 0x0c, "rx_mcast" },
+ { 0x0d, "rx_ucast" },
+ { 0x0e, "rx_64_or_less" },
+ { 0x0f, "rx_65_127" },
+ { 0x10, "rx_128_255" },
+ { 0x11, "rx_256_511" },
+ { 0x12, "rx_512_1023" },
+ { 0x13, "rx_1024_1522" },
+ { 0x14, "tx" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1a, "tx_ucast" },
+ { 0x1b, "tx_deferred" },
+ { 0x1c, "tx_total_col" },
+ { 0x1d, "tx_exc_col" },
+ { 0x1e, "tx_single_col" },
+ { 0x1f, "tx_mult_col" },
+ { 0x100, "rx_discards" },
+ { 0x101, "tx_discards" },
+};
+
+static const struct ksz_mib_names ksz9477_mib_names[] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+};
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .teardown = lan937x_teardown,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .setup_rgmii_delay = lan937x_setup_rgmii_delay,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+ [P_XMII_CTRL_1] = 0x56,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(5),
+};
+
+static const u8 ksz8795_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 0,
+ [P_MII_10MBIT] = 1,
+ [P_MII_FULL_DUPLEX] = 0,
+ [P_MII_HALF_DUPLEX] = 1,
+};
+
+static const u8 ksz8795_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 3,
+ [P_GMII_SEL] = 2,
+ [P_RMII_SEL] = 1,
+ [P_MII_SEL] = 0,
+ [P_GMII_1GBIT] = 1,
+ [P_GMII_NOT_1GBIT] = 0,
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 24,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz9477_regs[] = {
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+ [P_XMII_CTRL_0] = 0x0300,
+ [P_XMII_CTRL_1] = 0x0301,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u8 ksz9477_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 1,
+ [P_MII_10MBIT] = 0,
+ [P_MII_FULL_DUPLEX] = 1,
+ [P_MII_HALF_DUPLEX] = 0,
+};
+
+static const u8 ksz9477_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 0,
+ [P_RMII_SEL] = 1,
+ [P_GMII_SEL] = 2,
+ [P_MII_SEL] = 3,
+ [P_GMII_1GBIT] = 0,
+ [P_GMII_NOT_1GBIT] = 1,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
+const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
+ [KSZ8795] = {
+ .chip_id = KSZ8795_CHIP_ID,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8794] = {
+ /* WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8794_CHIP_ID,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, false, false},
+ },
+
+ [KSZ8765] = {
+ .chip_id = KSZ8765_CHIP_ID,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8830] = {
+ .chip_id = KSZ8830_CHIP_ID,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .ops = &ksz8_dev_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ9477] = {
+ .chip_id = KSZ9477_CHIP_ID,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, false},
+ .supports_rmii = {false, false, false, false,
+ false, true, false},
+ .supports_rgmii = {false, false, false, false,
+ false, true, false},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
+ },
+
+ [KSZ9897] = {
+ .chip_id = KSZ9897_CHIP_ID,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [KSZ9893] = {
+ .chip_id = KSZ9893_CHIP_ID,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
+ [KSZ9567] = {
+ .chip_id = KSZ9567_CHIP_ID,
+ .dev_name = "KSZ9567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [LAN9370] = {
+ .chip_id = LAN9370_CHIP_ID,
+ .dev_name = "LAN9370",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [LAN9371] = {
+ .chip_id = LAN9371_CHIP_ID,
+ .dev_name = "LAN9371",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true, true},
+ .supports_rmii = {false, false, false, false, true, true},
+ .supports_rgmii = {false, false, false, false, true, true},
+ .internal_phy = {true, true, true, true, false, false},
+ },
+
+ [LAN9372] = {
+ .chip_id = LAN9372_CHIP_ID,
+ .dev_name = "LAN9372",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+
+ [LAN9373] = {
+ .chip_id = LAN9373_CHIP_ID,
+ .dev_name = "LAN9373",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x38, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, false,
+ false, false, true, true},
+ },
+
+ [LAN9374] = {
+ .chip_id = LAN9374_CHIP_ID,
+ .dev_name = "LAN9374",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+};
+EXPORT_SYMBOL_GPL(ksz_switch_chips);
+
+static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (chip->chip_id == prod_num)
+ return chip;
+ }
+
+ return NULL;
+}
+
+static int ksz_check_device_id(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *dt_chip_data;
+
+ dt_chip_data = of_device_get_match_data(dev->dev);
+
+ /* Check for Device Tree and Chip ID */
+ if (dt_chip_data->chip_id != dev->chip_id) {
+ dev_err(dev->dev,
+ "Device tree specifies chip %s but found %s, please fix it!\n",
+ dt_chip_data->dev_name, dev->info->dev_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+
+ config->legacy_pre_march2020 = false;
+
+ if (dev->info->supports_mii[port])
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+
+ if (dev->info->supports_rmii[port])
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ if (dev->info->internal_phy[port]) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
+}
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
-void ksz_update_port_member(struct ksz_device *dev, int port)
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < dev->info->mib_cnt; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN,
+ dev->info->mib_names[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
- int i;
+ int i, j;
if (!dsa_is_user_port(ds, port))
return;
@@ -45,19 +1623,411 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
+ continue;
- if (other_p->stp_state == BR_STATE_FORWARDING &&
- p->stp_state == BR_STATE_FORWARDING) {
+ if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
-EXPORT_SYMBOL_GPL(ksz_update_port_member);
+
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+ struct ksz_port *p;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL],
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_pirq;
+ }
+
+ /* start switch */
+ regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
+}
static void port_r_cnt(struct ksz_device *dev, int port)
{
@@ -65,17 +2035,17 @@ static void port_r_cnt(struct ksz_device *dev, int port)
u64 *dropped;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
@@ -91,7 +2061,7 @@ static void ksz_mib_read_work(struct work_struct *work)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
@@ -106,10 +2076,14 @@ static void ksz_mib_read_work(struct work_struct *work)
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
mutex_unlock(&mib->cnt_mutex);
}
@@ -122,34 +2096,59 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- for (i = 0; i < dev->port_cnt; i++)
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ struct ksz_port_mib *mib = &dev->ports[i].mib;
+
dev->dev_ops->port_init_cnt(dev, i);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
+ }
}
-EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
-EXPORT_SYMBOL_GPL(ksz_phy_read16);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_phy_write16);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID) {
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ }
+
+ return 0;
+}
+
+static void ksz_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
@@ -160,20 +2159,19 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
-EXPORT_SYMBOL_GPL(ksz_mac_link_down);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
- return dev->mib_cnt;
+ return dev->info->mib_cnt;
}
-EXPORT_SYMBOL_GPL(ksz_sset_count);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
@@ -184,16 +2182,16 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
/* Only read dropped counters if no link. */
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
- memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
+ memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
-EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -201,133 +2199,93 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-void ksz_port_fast_age(struct dsa_switch *ds, int port)
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data)
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 member;
- struct alu_struct alu;
- do {
- alu.is_static = false;
- ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
- &member, &timestamp,
- &entries);
- if (!ret && (member & BIT(port))) {
- ret = cb(alu.mac, alu.fid, alu.is_static, data);
- if (ret)
- break;
- }
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
- return ret;
+ return dev->dev_ops->set_ageing_time(dev, msecs);
}
-EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int empty = 0;
- alu.port_forward = 0;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
- }
- }
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
- /* no available entry */
- if (index == dev->num_statics && !empty)
- return -ENOSPC;
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
+}
- /* add entry */
- if (index == dev->num_statics) {
- index = empty - 1;
- memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
- alu.is_static = true;
- }
- alu.port_forward |= BIT(port);
- if (mdb->vid) {
- alu.is_use_fid = true;
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
- /* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
- }
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
- return 0;
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- }
- }
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
+}
+
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
- /* no available entry */
- if (index == dev->num_statics)
- goto exit;
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
- /* clear port */
- alu.port_forward &= ~BIT(port);
- if (!alu.port_forward)
- alu.is_static = false;
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
-exit:
- return 0;
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
@@ -343,7 +2301,537 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_enable_port);
+
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u16 *regs;
+ u8 data;
+
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ p = &dev->ports[port];
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
+
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X_VALUE;
+
+ return proto;
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->max_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->max_mtu(dev, port);
+}
+
+static void ksz_set_xmii(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
+ P_RGMII_ID_EG_ENABLE);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= bitval[P_MII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= bitval[P_RMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= bitval[P_GMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ data8 |= bitval[P_RGMII_SEL];
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ data8 &= ~P_MII_MAC_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ if (p->rgmii_tx_val)
+ data8 |= P_RGMII_ID_EG_ENABLE;
+
+ if (p->rgmii_rx_val)
+ data8 |= P_RGMII_ID_IG_ENABLE;
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ phy_interface_t interface;
+ u8 data8;
+ u8 val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_MII_SEL_M, data8);
+
+ if (val == bitval[P_MII_SEL]) {
+ if (gbit)
+ interface = PHY_INTERFACE_MODE_GMII;
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & P_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ }
+
+ return interface;
+}
+
+static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ksz_is_ksz88x3(dev))
+ return;
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ ksz_set_xmii(dev, port, state->interface);
+
+ if (dev->dev_ops->phylink_mac_config)
+ dev->dev_ops->phylink_mac_config(dev, port, mode, state);
+
+ if (dev->dev_ops->setup_rgmii_delay)
+ dev->dev_ops->setup_rgmii_delay(dev, port);
+}
+
+bool ksz_get_gbit(struct ksz_device *dev, int port)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ bool gbit = false;
+ u8 data8;
+ bool val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_GMII_1GBIT_M, data8);
+
+ if (val == bitval[P_GMII_1GBIT])
+ gbit = true;
+
+ return gbit;
+}
+
+static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~P_GMII_1GBIT_M;
+
+ if (gbit)
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
+ else
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
+
+ data8 &= ~P_MII_100MBIT_M;
+
+ if (speed == SPEED_100)
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
+ else
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
+}
+
+static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
+{
+ if (speed == SPEED_1000)
+ ksz_set_gbit(dev, port, true);
+ else
+ ksz_set_gbit(dev, port, false);
+
+ if (speed == SPEED_100 || speed == SPEED_10)
+ ksz_set_100_10mbit(dev, port, speed);
+}
+
+static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ u8 mask;
+ u8 val;
+
+ mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL];
+
+ if (duplex == DUPLEX_FULL)
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
+ else
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
+
+ if (tx_pause)
+ val |= masks[P_MII_TX_FLOW_CTRL];
+
+ if (rx_pause)
+ val |= masks[P_MII_RX_FLOW_CTRL];
+
+ ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
+}
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct ksz_port *p;
+
+ p = &dev->ports[port];
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ p->phydev.speed = speed;
+
+ ksz_port_set_xmii_speed(dev, port, speed);
+
+ ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->phylink_mac_link_up)
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
+ phydev, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2, id4;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ8830_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ dev->chip_id = id32;
+ break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .teardown = ksz_teardown,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_config = ksz_phylink_mac_config,
+ .phylink_mac_link_up = ksz_phylink_mac_link_up,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
@@ -356,6 +2844,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
@@ -371,13 +2860,51 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops)
+static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
+ struct device_node *port_dn)
{
+ phy_interface_t phy_mode = dev->ports[port_num].interface;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1) {
+ dev_warn(dev->dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port_num);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ dev->ports[port_num].rgmii_rx_val = rx_delay;
+ dev->ports[port_num].rgmii_tx_val = tx_delay;
+}
+
+int ksz_switch_register(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *info;
struct device_node *port, *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
+ int i;
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
@@ -399,19 +2926,57 @@ int ksz_switch_register(struct ksz_device *dev,
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- dev->dev_ops = ops;
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
- if (dev->dev_ops->detect(dev))
- return -EINVAL;
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
+ dev->dev_ops = dev->info->ops;
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->info->port_cnt * sizeof(struct ksz_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) * (dev->info->mib_cnt + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
+ }
+
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->info->port_cnt;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
- for (port_num = 0; port_num < dev->port_cnt; ++port_num)
+ for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
@@ -420,20 +2985,31 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
- if (ports)
+ if (ports) {
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
+ of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
+
+ ksz_parse_rgmii_delay(dev, port_num, port);
}
+ of_node_put(ports);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
}
ret = dsa_register_switch(dev->ds);
@@ -443,12 +3019,12 @@ int ksz_switch_register(struct ksz_device *dev,
}
/* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
+ dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index df8ae59c8525..9cfa179575ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -13,6 +13,11 @@
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
+#include <linux/irq.h>
+
+#define KSZ_MAX_NUM_PORTS 8
+
+struct ksz_device;
struct vlan_table {
u32 table[3];
@@ -22,29 +27,86 @@ struct ksz_port_mib {
struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
+ struct spinlock stats64_lock;
+};
+
+struct ksz_mib_names {
+ int index;
+ char string[ETH_GSTRING_LEN];
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+ u8 port_nirqs;
+ const struct ksz_dev_ops *ops;
+ bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
+ const struct ksz_mib_names *mib_names;
+ int mib_cnt;
+ u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ const u8 *xmii_ctrl0;
+ const u8 *xmii_ctrl1;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
+ bool supports_mii[KSZ_MAX_NUM_PORTS];
+ bool supports_rmii[KSZ_MAX_NUM_PORTS];
+ bool supports_rgmii[KSZ_MAX_NUM_PORTS];
+ bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
};
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
struct ksz_port_mib mib;
phy_interface_t interface;
+ u16 max_frame;
+ u32 rgmii_tx_val;
+ u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
};
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
- const char *name;
+ const struct ksz_chip_data *info;
struct mutex dev_mutex; /* device access */
struct mutex regmap_mutex; /* regmap access */
@@ -56,25 +118,18 @@ struct ksz_device {
struct regmap *regmap[3];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
/* chip specific data */
u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
+ u8 chip_rev;
int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
int phy_port_cnt;
- int port_cnt;
- u8 reg_mib_cnt;
- int mib_cnt;
- const struct mib_names *mib_names;
phy_interface_t compat_interface;
- u32 regs_size;
- bool phy_errata_9477;
bool synclko_125;
+ bool synclko_disable;
struct vlan_table *vlan_cache;
@@ -83,10 +138,124 @@ struct ksz_device {
unsigned long mib_read_interval;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
+};
+
+/* List of supported models */
+enum ksz_model {
+ KSZ8563,
+ KSZ8795,
+ KSZ8794,
+ KSZ8765,
+ KSZ8830,
+ KSZ9477,
+ KSZ9896,
+ KSZ9897,
+ KSZ9893,
+ KSZ9567,
+ LAN9370,
+ LAN9371,
+ LAN9372,
+ LAN9373,
+ LAN9374,
+};
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
+
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+ P_XMII_CTRL_0,
+ P_XMII_CTRL_1,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+ P_MII_TX_FLOW_CTRL,
+ P_MII_RX_FLOW_CTRL,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
+enum ksz_xmii_ctrl0 {
+ P_MII_100MBIT,
+ P_MII_10MBIT,
+ P_MII_FULL_DUPLEX,
+ P_MII_HALF_DUPLEX,
+};
+
+enum ksz_xmii_ctrl1 {
+ P_RGMII_SEL,
+ P_RMII_SEL,
+ P_GMII_SEL,
+ P_MII_SEL,
+ P_GMII_1GBIT,
+ P_GMII_NOT_1GBIT,
};
struct alu_struct {
@@ -109,63 +278,77 @@ struct alu_struct {
};
struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*max_mtu)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
+ void (*phylink_mac_config)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
+int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
-int ksz8_switch_register(struct ksz_device *dev);
-int ksz9477_switch_register(struct ksz_device *dev);
-
-void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
-
-/* Common DSA access functions */
-
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge);
-void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+bool ksz_get_gbit(struct ksz_device *dev, int port);
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
+extern const struct ksz_chip_data ksz_switch_chips[];
/* Common register access functions */
@@ -174,6 +357,10 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[0], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -183,6 +370,10 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[1], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -192,6 +383,10 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[2], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -202,7 +397,10 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -210,17 +408,38 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -235,40 +454,50 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
+ u8 mask, u8 val)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ regmap_update_bits(dev->regmap[0],
+ dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
}
static inline void ksz_regmap_lock(void *__mtx)
@@ -283,6 +512,79 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8830_CHIP_ID;
+}
+
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
+/* xMII configuration */
+#define P_MII_DUPLEX_M BIT(6)
+#define P_MII_100MBIT_M BIT(4)
+
+#define P_GMII_1GBIT_M BIT(6)
+#define P_RGMII_ID_IG_ENABLE BIT(4)
+#define P_RGMII_ID_EG_ENABLE BIT(3)
+#define P_MII_MAC_MODE BIT(2)
+#define P_MII_SEL_M 0x3
+
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
new file mode 100644
index 000000000000..1b6ab891b986
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip ksz series register access through SPI
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define KSZ8795_SPI_ADDR_SHIFT 12
+#define KSZ8795_SPI_ADDR_ALIGN 3
+#define KSZ8795_SPI_TURNAROUND_SHIFT 1
+
+#define KSZ8863_SPI_ADDR_SHIFT 8
+#define KSZ8863_SPI_ADDR_ALIGN 8
+#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
+KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
+ KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
+ KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
+{
+ const struct regmap_config *regmap_config;
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &spi->dev;
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int i, ret = 0;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ if (chip->chip_id == KSZ8830_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
+ regmap_config = ksz8795_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
+ rc = regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&spi->dev,
+ "Failed to initialize regmap%i: %d\n",
+ regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ dev->irq = spi->irq;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static void ksz_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id ksz_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz8765",
+ .data = &ksz_switch_chips[KSZ8765]
+ },
+ {
+ .compatible = "microchip,ksz8794",
+ .data = &ksz_switch_chips[KSZ8794]
+ },
+ {
+ .compatible = "microchip,ksz8795",
+ .data = &ksz_switch_chips[KSZ8795]
+ },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
+
+static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { "ksz9477" },
+ { "ksz9896" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+
+static struct spi_driver ksz_spi_driver = {
+ .driver = {
+ .name = "ksz-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = ksz_dt_ids,
+ },
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
+};
+
+module_spi_driver(ksz_spi_driver);
+
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
+MODULE_ALIAS("spi:lan937x");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000000..8e9e66d6728d
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000000..7e4f307a0387
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "lan937x.h"
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+{
+ u16 data16;
+ int ret;
+
+ /* Enable Phy access through SPI */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
+ if (ret < 0)
+ return ret;
+
+ /* Allow SPI access */
+ data16 |= VPHY_SPI_INDIRECT_ENABLE;
+
+ return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ return lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ return lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
+ masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL],
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+
+ return 0;
+}
+
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
+static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
+ u16 reg, u8 val)
+{
+ u16 data16;
+
+ ksz_pread16(dev, port, reg, &data16);
+
+ /* Update tune Adjust */
+ data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
+ ksz_pwrite16(dev, port, reg, data16);
+
+ /* write DLL reset to take effect */
+ data16 |= PORT_DLL_RESET;
+ ksz_pwrite16(dev, port, reg, data16);
+}
+
+static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ /* Apply different codes based on the ports as per characterization
+ * results
+ */
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
+ RGMII_2_TX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
+}
+
+static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
+ RGMII_2_RX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ }
+}
+
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (p->rgmii_tx_val) {
+ lan937x_set_rgmii_tx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
+ port);
+ }
+
+ if (p->rgmii_rx_val) {
+ lan937x_set_rgmii_rx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
+ port);
+ }
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* enable Indirect Access from SPI to the VPHY registers */
+ ret = lan937x_enable_spi_indirect_access(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable spi indirect access");
+ return ret;
+ }
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
+ (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
+ true);
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+
+ /* enable global MIB counter freeze function */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+
+ return 0;
+}
+
+void lan937x_teardown(struct dsa_switch *ds)
+{
+
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000000..5bc16a4c4441
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2021 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_MII_SEL_EDGE BIT(5)
+
+#define REG_PORT_XMII_CTRL_4 0x0304
+#define REG_PORT_XMII_CTRL_5 0x0306
+
+#define PORT_DLL_RESET BIT(15)
+#define PORT_TUNE_ADJ GENMASK(13, 7)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+/* The port number as per the datasheet */
+#define RGMII_2_PORT_NUM 5
+#define RGMII_1_PORT_NUM 6
+
+#define LAN937X_RGMII_2_PORT (RGMII_2_PORT_NUM - 1)
+#define LAN937X_RGMII_1_PORT (RGMII_1_PORT_NUM - 1)
+
+#define RGMII_1_TX_DELAY_2NS 2
+#define RGMII_2_TX_DELAY_2NS 0
+#define RGMII_1_RX_DELAY_2NS 0x1B
+#define RGMII_2_RX_DELAY_2NS 0x14
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b82512e5b33b..e74c6b406172 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -24,6 +24,11 @@
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
@@ -501,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -587,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -1033,6 +1041,7 @@ static int
mt7530_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1041,7 +1050,11 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
* restore the port matrix if the port is the member of a certain
* bridge.
*/
- priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ if (dsa_port_is_user(dp)) {
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
+ }
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
@@ -1186,10 +1199,12 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- u32 port_bitmap = BIT(MT7530_CPU_PORT);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1266,9 +1281,12 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
* the CPU port get out of VLAN filtering mode.
*/
if (all_user_ports_removed) {
- mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
| PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -1306,6 +1324,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1334,8 +1353,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
*/
if (priv->ports[port].enable)
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(MT7530_CPU_PORT)));
- priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ PCR_MATRIX(BIT(cpu_dp->index)));
+ priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1349,7 +1368,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1365,7 +1385,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1416,7 +1437,8 @@ err:
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1442,7 +1464,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1498,6 +1521,9 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1505,7 +1531,7 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
* for becoming a VLAN-aware port.
*/
mt7530_port_set_vlan_aware(ds, port);
- mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ mt7530_port_set_vlan_aware(ds, cpu_dp->index);
} else {
mt7530_port_set_vlan_unaware(ds, port);
}
@@ -1517,11 +1543,11 @@ static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
u8 new_members;
u32 val;
- new_members = entry->old_members | BIT(entry->port) |
- BIT(MT7530_CPU_PORT);
+ new_members = entry->old_members | BIT(entry->port);
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
@@ -1532,22 +1558,20 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv,
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN.
- */
- val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
- MT7530_VLAN_EGRESS_TAG;
- mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(entry->port),
- ETAG_CTRL_P(entry->port, val));
-
- /* CPU port is always taken as a tagged port for serving more than one
+ * CPU port is always taken as a tagged port for serving more than one
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag.
*/
+ if (dsa_port_is_cpu(dp))
+ val = MT7530_VLAN_EGRESS_STACK;
+ else if (entry->untagged)
+ val = MT7530_VLAN_EGRESS_UNTAG;
+ else
+ val = MT7530_VLAN_EGRESS_TAG;
mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
- ETAG_CTRL_P(MT7530_CPU_PORT,
- MT7530_VLAN_EGRESS_STACK));
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
}
static void
@@ -1566,11 +1590,7 @@ mt7530_hw_vlan_del(struct mt7530_priv *priv,
return;
}
- /* If certain member apart from CPU port is still alive in the VLAN,
- * the entry would be kept valid. Otherwise, the entry is got to be
- * disabled.
- */
- if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ if (new_members) {
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
@@ -1709,7 +1729,7 @@ static int mt753x_mirror_port_set(unsigned int id, u32 val)
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
@@ -2074,7 +2094,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
@@ -2088,11 +2108,12 @@ static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
+ struct device_node *dn = NULL;
struct device_node *phy_node;
struct device_node *mac_np;
struct mt7530_dummy_poll p;
phy_interface_t interface;
- struct device_node *dn;
+ struct dsa_port *cpu_dp;
u32 id, val;
int ret, i;
@@ -2100,7 +2121,19 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ dn = cpu_dp->master->dev.of_node->parent;
+ /* It doesn't matter which CPU port is found first,
+ * their masters should share the same parent OF node
+ */
+ break;
+ }
+
+ if (!dn) {
+ dev_err(ds->dev, "parent OF node of DSA master not found");
+ return -EINVAL;
+ }
+
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2224,6 +2257,7 @@ mt7530_setup(struct dsa_switch *ds)
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
+ of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
@@ -2261,6 +2295,7 @@ mt7531_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_dummy_poll p;
+ struct dsa_port *cpu_dp;
u32 val, id;
int ret, i;
@@ -2294,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2333,8 +2374,11 @@ mt7531_setup(struct dsa_switch *ds)
CORE_PLL_GROUP4, val);
/* BPDU to CPU port */
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(MT7530_CPU_PORT));
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(cpu_dp->index));
+ break;
+ }
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
@@ -2384,35 +2428,30 @@ mt7531_setup(struct dsa_switch *ds)
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
@@ -2420,42 +2459,35 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
+
case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
-}
-
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->phy_mode_supported(ds, port, state);
}
static int
@@ -2528,30 +2560,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
return 0;
}
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
-}
-
-static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
-{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
unsigned int val;
/* For adjusting speed and duplex of SGMII force mode. */
@@ -2577,6 +2590,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
/* MT7531 SGMII 1G force mode can only work in full duplex mode,
* no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ *
+ * The speed check is unnecessary as the MAC capabilities apply
+ * this restriction. --rmk
*/
if ((speed == SPEED_10 || speed == SPEED_100) &&
duplex != DUPLEX_FULL)
@@ -2652,9 +2668,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
return 0;
}
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 val;
/* Only restart AN when AN is enabled */
@@ -2691,9 +2708,6 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
return mt7531_sgmii_setup_mode_force(priv, port, interface);
default:
return -EINVAL;
@@ -2711,6 +2725,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &priv->pcs[port].pcs;
+
+ default:
+ return NULL;
+ }
+}
+
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
@@ -2718,9 +2750,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
switch (port) {
case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
@@ -2754,13 +2783,6 @@ unsupported:
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
@@ -2775,17 +2797,6 @@ unsupported:
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
-}
-
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2795,16 +2806,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
-
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -2817,8 +2825,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
@@ -2846,7 +2852,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr |= PMCR_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_EEE1G;
@@ -2880,8 +2886,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
@@ -2898,81 +2902,54 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
- mt7531_sgmii_validate(priv, port, supported);
-}
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct mt7530_priv *priv = ds->priv;
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
- if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
-
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
-
- priv->info->mac_port_validate(ds, port, mask);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -2999,8 +2976,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static int
@@ -3012,6 +2987,7 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
if (state->interface == PHY_INTERFACE_MODE_SGMII &&
(status & MT7531_SGMII_AN_ENABLE)) {
val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
@@ -3042,33 +3018,89 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ unsigned int val;
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
- return -EOPNOTSUPP;
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
- return priv->info->mac_port_get_state(ds, port, state);
+ state->link = false;
}
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
+
+static const struct phylink_pcs_ops mt7531_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7531_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7531_pcs_an_restart,
+ .pcs_link_up = mt7531_pcs_link_up,
+};
+
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret = priv->info->sw_setup(ds);
+ int i, ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
+ }
+ ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3139,10 +3171,9 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
@@ -3152,39 +3183,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
static const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
.phy_read = mt7531_ind_phy_read,
.phy_write = mt7531_ind_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
},
};
@@ -3241,9 +3267,8 @@ mt7530_probe(struct mdio_device *mdiodev)
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
!priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
@@ -3306,8 +3331,6 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mt7530_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..e8d966435350 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -8,7 +8,6 @@
#define MT7530_NUM_PORTS 7
#define MT7530_NUM_PHYS 5
-#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
@@ -374,6 +373,7 @@ enum mt7530_vlan_port_acc_frm {
#define MT7531_SGMII_LINK_STATUS BIT(18)
#define MT7531_SGMII_AN_ENABLE BIT(12)
#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
/* Register for SGMII PCS_SPPED_ABILITY */
#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
@@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface)
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
@@ -752,36 +758,27 @@ struct mt7530_priv;
* port
* @mac_port_validate: Holding the way to set addition validate type for a
* certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
int (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -823,6 +820,7 @@ struct mt7530_priv {
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
int irq;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..fdda62d6eb16 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
@@ -294,8 +297,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 58ca684d73f7..2479be3a1e35 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -86,12 +86,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- /* There's no bus specific operation to wait for a mask */
- for (i = 0; i < 16; i++) {
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
@@ -99,7 +103,10 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
if ((data & mask) == val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout while waiting for switch\n");
@@ -442,9 +449,6 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
- if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(port);
-
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -563,133 +567,276 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
}
}
-static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- /* FIXME: if the port is in 1000Base-X mode, then it only supports
- * 1000M FD speeds. In this case, CMODE will indicate 5.
+ u8 cmode = chip->ports[port].cmode;
+
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
*/
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+};
- mv88e6065_phylink_validate(chip, port, mask, state);
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+{
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
}
-static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 5)
- phylink_set(mask, 2500baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
-static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ u16 reg, val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
-static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ goto unlock;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+unlock:
+ mv88e6xxx_reg_unlock(chip);
}
+}
+
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
}
+}
+
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
- mv88e6390_phylink_validate(chip, port, mask, state);
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
}
-static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
bool is_6191x =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
- if (((port == 0 || port == 9) && !is_6191x) || port == 10) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- }
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
+ if (port == 0 || port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ /* FIXME: USXGMII is not supported yet */
+ /* __set_bit(PHY_INTERFACE_MODE_USXGMII, supported); */
+
+ config->mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
-static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mv88e6xxx_chip *chip = ds->priv;
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set_port_modes(mask);
-
- if (chip->info->ops->phylink_validate)
- chip->info->ops->phylink_validate(chip, port, mask, state);
+ chip->info->ops->phylink_get_caps(chip, port, config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ /* Internal ports need GMII for PHYLIB */
+ if (mv88e6xxx_phy_is_internal(ds, port))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
}
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
@@ -989,7 +1136,7 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
@@ -1283,8 +1430,15 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
pvlan = 0;
- /* Frames from user ports can egress any local DSA links and CPU ports,
- * as well as any local member of their bridge group.
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
+ */
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
*/
dsa_switch_for_each_port(other_dp, ds)
if (other_dp->type == DSA_PORT_TYPE_CPU ||
@@ -1476,15 +1630,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
- * the LAG ID as the port number.
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev);
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
@@ -1517,24 +1672,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
- return;
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1616,21 +1778,11 @@ static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- int i, err;
- u16 fid;
-
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
- /* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, &fid);
- if (err)
- return err;
-
- set_bit(fid, fid_bitmap);
- }
-
- /* Set every FID bit used by the VLAN entries */
+ /* Every FID has an associated VID, so walking the VTU
+ * will discover the full set of FIDs in use.
+ */
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
@@ -1643,10 +1795,7 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
if (err)
return err;
- /* The reset value 0x000 is used to indicate that multiple address
- * databases are not needed. Return the next positive available.
- */
- *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
@@ -1654,6 +1803,187 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ if (!sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
+ return err;
+ }
+
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
+{
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_sid_get(chip, sid);
+ if (err)
+ goto err;
+
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
@@ -2138,6 +2468,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -2270,6 +2603,12 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+ }
+
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
@@ -2284,6 +2623,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -2308,8 +2654,75 @@ unlock:
return err;
}
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2323,7 +2736,8 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2469,7 +2883,8 @@ static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2480,6 +2895,10 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
@@ -2514,6 +2933,12 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
@@ -2525,7 +2950,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct dsa_bridge bridge)
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2857,27 +3283,58 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ int tx_amp, speed;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
+ dp = dsa_to_port(ds, port);
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
*/
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ struct phylink_config pl_config = {};
+ unsigned long caps;
+
+ mv88e6xxx_get_caps(ds, port, &pl_config);
+
+ caps = pl_config.mac_capabilities;
+
+ if (chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+ else
+ mode = PHY_INTERFACE_MODE_NA;
+
+ if (caps & MAC_10000FD)
+ speed = SPEED_10000;
+ else if (caps & MAC_5000FD)
+ speed = SPEED_5000;
+ else if (caps & MAC_2500FD)
+ speed = SPEED_2500;
+ else if (caps & MAC_1000)
+ speed = SPEED_1000;
+ else if (caps & MAC_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- SPEED_MAX, DUPLEX_FULL,
- PAUSE_OFF,
- PHY_INTERFACE_MODE_NA);
- else
+ speed, DUPLEX_FULL,
+ PAUSE_OFF, mode);
+ } else {
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
PAUSE_ON,
PHY_INTERFACE_MODE_NA);
+ }
if (err)
return err;
@@ -2911,12 +3368,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
- * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
- * untagged frames on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't send a
- * copy of all transmitted/received frames on this port to the CPU.
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
*/
- err = mv88e6xxx_port_set_map_da(chip, port);
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
@@ -2924,8 +3382,44 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
if (err)
return err;
@@ -2938,7 +3432,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
@@ -3011,6 +3505,22 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -3209,6 +3719,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3399,7 +3916,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@@ -3424,14 +3941,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
- return err;
+ goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
+ goto out;
}
if (external)
@@ -3440,21 +3957,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
list_add(&mdio_bus->list, &chip->mdios);
return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
+ mdiobus_free(bus);
}
}
@@ -3470,6 +3992,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
if (err)
return err;
@@ -3577,7 +4100,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3611,7 +4136,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3627,6 +4152,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3657,7 +4183,9 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3694,7 +4222,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3735,7 +4265,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -3783,6 +4313,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -3799,7 +4331,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3839,9 +4371,11 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3875,9 +4409,11 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3919,7 +4455,9 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3965,6 +4503,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -3974,7 +4514,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -4016,7 +4556,9 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -4062,6 +4604,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4073,8 +4617,9 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4113,7 +4658,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4160,6 +4705,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4175,7 +4722,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4221,6 +4768,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4236,7 +4785,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4280,6 +4829,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4296,7 +4847,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -4342,6 +4893,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4353,10 +4906,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -4396,7 +4950,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
- .phylink_validate = mv88e6065_phylink_validate,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -4441,6 +4995,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4458,7 +5014,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -4502,7 +5058,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -4544,7 +5100,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -4592,6 +5148,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4610,7 +5168,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -4652,7 +5210,9 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -4694,9 +5254,11 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -4742,6 +5304,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4759,7 +5323,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .phylink_validate = mv88e6352_phylink_validate,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -4806,6 +5371,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4824,7 +5391,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -4871,6 +5438,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
@@ -4888,7 +5457,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
@@ -4939,6 +5508,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6393x_serdes_power,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
@@ -4952,7 +5523,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6393x_phylink_validate,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -4965,6 +5536,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5007,6 +5579,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5030,6 +5603,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5074,6 +5648,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5097,6 +5672,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5121,6 +5697,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5144,6 +5721,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5168,6 +5746,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5191,6 +5770,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5215,6 +5795,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5260,6 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5283,6 +5865,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5305,6 +5888,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5327,6 +5911,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5349,6 +5934,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5399,6 +5985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5444,6 +6031,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5517,6 +6105,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5541,6 +6130,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5564,6 +6154,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5588,6 +6179,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5612,6 +6204,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5636,6 +6229,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5659,6 +6253,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5714,6 +6309,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
return 0;
}
+static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
+ struct mdio_device *mdiodev)
+{
+ int err;
+
+ /* dual_chip takes precedence over single/multi-chip modes */
+ if (chip->info->dual_chip)
+ return -EINVAL;
+
+ /* If the mdio addr is 16 indicating the first port address of a switch
+ * (e.g. mv88e6*41) in single chip addressing mode the device may be
+ * configured in single chip addressing mode. Setup the smi access as
+ * single chip addressing mode and attempt to detect the model of the
+ * switch, if this fails the device is not configured in single chip
+ * addressing mode.
+ */
+ if (mdiodev->addr != 16)
+ return -EINVAL;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_detect(chip);
+}
+
static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
{
struct mv88e6xxx_chip *chip;
@@ -5727,6 +6348,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
return chip;
}
@@ -5740,11 +6362,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->tag_protocol;
}
-static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
+static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol;
+ struct dsa_port *cpu_dp;
int err;
switch (proto) {
@@ -5769,17 +6392,31 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_setup_port_mode(chip, port);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ if (err) {
+ mv88e6xxx_reg_unlock(chip);
+ goto unwind;
+ }
+ }
mv88e6xxx_reg_unlock(chip);
- if (err)
- chip->tag_protocol = old_protocol;
+ return 0;
+
+unwind:
+ chip->tag_protocol = old_protocol;
+
+ mv88e6xxx_reg_lock(chip);
+ dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
+ mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5793,7 +6430,8 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5807,7 +6445,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -5881,7 +6520,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED))
return -EINVAL;
ops = chip->info->ops;
@@ -5939,6 +6578,13 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
out:
mv88e6xxx_reg_unlock(chip);
@@ -5946,32 +6592,40 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -5980,20 +6634,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@@ -6038,8 +6693,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
- struct net_device *lag;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@@ -6048,8 +6703,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (!dp->lag_dev || dp->ds != ds)
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@@ -6062,7 +6717,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag = dsa_lag_dev(ds->dst, id);
+ lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
@@ -6098,7 +6753,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag)
+ struct dsa_lag lag)
{
int err;
@@ -6122,16 +6777,18 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@@ -6154,7 +6811,7 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
@@ -6179,13 +6836,14 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -6202,7 +6860,7 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
@@ -6221,7 +6879,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
- .phylink_validate = mv88e6xxx_validate,
+ .phylink_get_caps = mv88e6xxx_get_caps,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
@@ -6249,15 +6907,18 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_add = mv88e6xxx_port_mdb_add,
- .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
.port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
@@ -6382,10 +7043,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
- if (err)
- goto out;
-
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(chip->reset)) {
err = PTR_ERR(chip->reset);
@@ -6394,9 +7051,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (chip->reset)
usleep_range(1000, 2000);
- err = mv88e6xxx_detect(chip);
- if (err)
- goto out;
+ /* Detect if the device is configured in single chip addressing mode,
+ * otherwise continue with address specific smi init/detection.
+ */
+ err = mv88e6xxx_single_chip_detect(chip, mdiodev);
+ if (err) {
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto out;
+ }
if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
chip->tag_protocol = DSA_TAG_PROTO_EDSA;
@@ -6518,8 +7185,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8271b8aa7b71..e693154cf803 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -20,6 +20,7 @@
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
#define MV88E6XXX_FID_STANDALONE 0
#define MV88E6XXX_FID_BRIDGED 1
@@ -130,6 +131,7 @@ struct mv88e6xxx_info {
unsigned int num_internal_phys;
unsigned int num_gpio;
unsigned int max_vid;
+ unsigned int max_sid;
unsigned int port_base_addr;
unsigned int phy_base_addr;
unsigned int global1_addr;
@@ -179,7 +181,14 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
+ bool policy;
u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
u8 state[DSA_MAX_PORTS];
};
@@ -278,6 +287,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
@@ -287,6 +297,16 @@ struct mv88e6xxx_region_priv {
enum mv88e6xxx_region_id id;
};
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -387,11 +407,15 @@ struct mv88e6xxx_chip {
/* devlink regions */
struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
};
struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_mdio_bus {
@@ -464,14 +488,13 @@ struct mv88e6xxx_ops {
int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
int pause);
-#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
#define DUPLEX_UNFORCED -2
/* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ * Use SPEED_UNFORCED for normal detection.
*
* Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
* or DUPLEX_UNFORCED for normal duplex detection.
@@ -586,6 +609,10 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -596,6 +623,12 @@ struct mv88e6xxx_ops {
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
/* GPIO operations */
const struct mv88e6xxx_gpio_ops *gpio_ops;
@@ -609,9 +642,8 @@ struct mv88e6xxx_ops {
const struct mv88e6xxx_ptp_ops *ptp_ops;
/* Phylink */
- void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
@@ -695,6 +727,13 @@ struct mv88e6xxx_hw_stat {
int type;
};
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
{
return chip->info->pvt;
@@ -725,6 +764,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
return chip->info->max_vid;
}
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 381068395c63..1266eabee086 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -503,6 +503,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -605,6 +684,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
@@ -640,6 +725,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -706,6 +796,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 4f3dbb015f77..65958b2a0d3a 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -46,6 +46,7 @@
/* Offset 0x02: VTU FID Register */
#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
#define MV88E6352_G1_VTU_FID_MASK 0x0fff
/* Offset 0x03: VTU SID Register */
@@ -347,6 +348,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ae12c981923e..38e18f5811bf 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -27,7 +27,7 @@ static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
-
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
@@ -36,13 +36,15 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
@@ -51,15 +53,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
- u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
@@ -88,7 +89,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool *valid, u16 *vid)
{
u16 val;
int err;
@@ -97,25 +98,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->vid = val & 0xfff;
+ if (vid) {
+ *vid = val & 0xfff;
- if (val & MV88E6390_G1_VTU_VID_PAGE)
- entry->vid |= 0x1000;
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
- entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool valid, u16 vid)
{
- u16 val = entry->vid & 0xfff;
+ u16 val = vid & 0xfff;
- if (entry->vid & 0x1000)
+ if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
- if (entry->valid)
+ if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -144,7 +148,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3];
int err;
@@ -157,36 +161,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
- entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
- }
-
- return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 regs[3];
- int err;
- int i;
-
- err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
- if (err)
- return err;
-
- /* Extract PortState data */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int state_offset = (i % 4) * 4 + 2;
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
- entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
@@ -196,8 +184,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
- regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
- regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
@@ -265,48 +256,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
/* VLAN Translation Unit Operations */
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
- if (err)
- return err;
-
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *vtu)
-{
- struct mv88e6xxx_vtu_entry stu;
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
- if (err)
- return err;
-
- stu.sid = vtu->sid - 1;
-
- err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
- if (err)
- return err;
-
- if (stu.sid != vtu->sid || !stu.valid)
- return -EINVAL;
-
- return 0;
-}
-
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -324,7 +273,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
@@ -333,7 +282,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -347,11 +296,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
@@ -381,7 +326,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
@@ -389,12 +334,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -417,16 +357,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -444,12 +379,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
@@ -476,27 +411,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write MemberTag and PortState data */
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
@@ -514,41 +443,113 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write PortState data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- /* Write MemberTag data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
- /* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+ return 0;
}
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
int err;
@@ -556,16 +557,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
- struct mv88e6xxx_vtu_entry entry;
+ u16 val, vid;
int spid;
int err;
- u16 val;
mv88e6xxx_reg_lock(chip);
@@ -577,7 +621,7 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
@@ -585,13 +629,13 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_miss_violation++;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index f3e27573a386..7536b8b0ad01 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,9 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
@@ -370,6 +372,7 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index eda710062933..a9d6e40321a2 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -289,3 +289,31 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 389f8a6ec0ab..331b4ca089ff 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -301,7 +301,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ab41619a809b..5c4195c635b0 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -294,28 +294,10 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex)
-{
- if (speed == SPEED_MAX)
- speed = 200;
-
- if (speed > 200)
- return -EOPNOTSUPP;
-
- /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
- duplex);
-}
-
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
@@ -327,9 +309,6 @@ int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 100;
-
if (speed > 100)
return -EOPNOTSUPP;
@@ -341,9 +320,6 @@ int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 5 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -369,9 +345,6 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed > 1000)
return -EOPNOTSUPP;
@@ -386,9 +359,6 @@ int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -414,9 +384,6 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -445,9 +412,6 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
u16 reg, ctrl;
int err;
- if (speed == SPEED_MAX)
- speed = (port > 0 && port < 9) ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -550,6 +514,15 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -610,6 +583,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
if (lane < 0)
return lane;
@@ -665,6 +640,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
@@ -1234,6 +1222,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
return err;
}
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -1278,7 +1295,7 @@ int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
@@ -1287,7 +1304,10 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03382b66f800..cb04243f37c1 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,6 +42,11 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
@@ -142,7 +147,11 @@
/* Offset 0x04: Port Control Register */
#define MV88E6XXX_PORT_CTL0 0x04
#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
-#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
@@ -333,8 +342,6 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
@@ -365,6 +372,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode);
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
@@ -425,7 +435,7 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
index b49d05f0e117..7a9f9ff6dedf 100644
--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2b05ead515cd..d94150d8f3f4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 ctrl, u16 status, u16 lpa,
+ u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
+ state->link = false;
+
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
- * still get here on link up. But we want to set an_complete
- * only if AN was enabled, thus we look at BMCR_ANENABLE.
- * (According to 802.3-2008 section 22.2.4.2.10, we should be
- * able to get this same value from BMSR_ANEGCAPABLE, but tests
- * show that these Marvell PHYs don't conform to this part of
- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ * still get here on link up.
*/
- state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -272,14 +275,6 @@ int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
-{
- if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
- return true;
-
- return false;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -293,20 +288,24 @@ static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6352_port_has_serdes(chip, port))
- return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+ int err;
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
- int i;
+ int err, i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
@@ -348,11 +347,12 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
u64 value;
- int i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
@@ -419,8 +419,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
return 32 * sizeof(u16);
}
@@ -432,7 +437,8 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int err;
int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
@@ -915,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &ctrl);
+ MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -939,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1310,6 +1316,44 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 8dd8ed225b45..29bb4e91e2f6 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -27,6 +27,8 @@
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -176,6 +178,9 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 282fe08db050..a990271b7482 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -55,11 +55,15 @@ static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- for (i = 0; i < 16; i++) {
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
@@ -67,7 +71,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (!!(data & BIT(bit)) == !!val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
@@ -104,11 +111,6 @@ static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
@@ -132,11 +134,6 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
@@ -155,9 +152,20 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
MV88E6XXX_SMI_CMD, 15, 0);
}
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -175,5 +183,8 @@ int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
chip->bus = bus;
chip->sw_addr = sw_addr;
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
return 0;
}
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 220b0b027b55..08db9cf76818 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -6,6 +6,7 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9957772201d5..dd3a18cc89dd 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -25,32 +25,65 @@
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int key_length, upstream, err;
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int key_length, err;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
- upstream = dsa_upstream_port(ds, port);
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -71,20 +104,36 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
{
- struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int upstream, err;
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
+ u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ unsigned long cpu_ports = dsa_cpu_ports(ds);
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
@@ -96,14 +145,14 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return -ENOMEM;
}
- upstream = dsa_upstream_port(ds, port);
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
- untagging_rule->ingress_port_mask = BIT(upstream);
+ untagging_rule->ingress_port_mask = cpu_ports;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = cookie;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -120,11 +169,13 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = BIT(upstream);
+ redirect_rule->ingress_port_mask = cpu_ports;
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = cookie;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -142,273 +193,320 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- port, false);
+ cookie, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
return err;
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- port, false);
+ cookie, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
-static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
+ if (err)
+ goto add_tx_failed;
return 0;
+
+add_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
}
-/* Alternatively to using the NPI functionality, that same hardware MAC
- * connected internally to the enetc or fman DSA master can be configured to
- * use the software-defined tag_8021q frame format. As far as the hardware is
- * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
- * module are now disconnected from it, but can still be accessed through
- * register-based MMIO.
- */
-static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
+static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
- mutex_lock(&ocelot->fwd_domain_lock);
+ struct dsa_port *cpu_dp;
+ int err;
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
- /* Overwrite PGID_CPU with the non-tagging port */
- ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
+ err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
+ if (err)
+ goto del_tx_failed;
+
+ return 0;
- ocelot_apply_bridge_fwd_mask(ocelot, true);
+del_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
- mutex_unlock(&ocelot->fwd_domain_lock);
+ return err;
}
-static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
+static int felix_trap_get_cpu_port(struct dsa_switch *ds,
+ const struct ocelot_vcap_filter *trap)
{
- mutex_lock(&ocelot->fwd_domain_lock);
-
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ struct dsa_port *dp;
+ int first_port;
- /* Restore PGID_CPU */
- ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
- PGID_CPU);
+ if (WARN_ON(!trap->ingress_port_mask))
+ return -1;
- ocelot_apply_bridge_fwd_mask(ocelot, true);
+ first_port = __ffs(trap->ingress_port_mask);
+ dp = dsa_to_port(ds, first_port);
- mutex_unlock(&ocelot->fwd_domain_lock);
+ return dp->cpu_dp->index;
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool cpu_copy_ena;
+ int err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
- }
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
}
- }
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ return 0;
+}
+
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
+ * running Linux, and this forms a DSA setup together with the enetc or fman
+ * DSA master.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->npi_xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->npi_inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
+
+static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
+{
+ /* Restore hardware defaults */
+ int unused_port = ocelot->num_phys_ports + 2;
+
+ ocelot->npi = -1;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
+ QSYS_EXT_CPU_CFG);
+
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+
+ /* Enable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+}
+
+static int felix_tag_npi_setup(struct dsa_switch *ds)
+{
+ struct dsa_port *dp, *first_cpu_dp = NULL;
+ struct ocelot *ocelot = ds->priv;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
+ dev_err(ds->dev, "Multiple NPI ports not supported\n");
+ return -EINVAL;
+ }
+
+ first_cpu_dp = dp->cpu_dp;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
+ if (!first_cpu_dp)
+ return -EINVAL;
+
+ felix_npi_port_init(ocelot, first_cpu_dp->index);
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
+static void felix_tag_npi_teardown(struct dsa_switch *ds)
{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
+ struct ocelot *ocelot = ds->priv;
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+}
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
+static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
+ return BIT(ocelot->num_phys_ports);
+}
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
}
-static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA master can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
+static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
+ .setup = felix_tag_npi_setup,
+ .teardown = felix_tag_npi_teardown,
+ .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
+};
+
+static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
- felix_8021q_cpu_port_init(ocelot, cpu);
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
+ if (err)
+ return err;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
+ dp->cpu_dp->index);
+ dsa_switch_for_each_available_port(dp, ds)
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -421,202 +519,194 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
- }
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
*/
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
- err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
- if (err)
- return err;
-
- err = felix_setup_mmio_filtering(felix);
- if (err)
- goto out_tag_8021q_unregister;
+ ocelot_drain_cpu_queue(ocelot, 0);
return 0;
-
-out_tag_8021q_unregister:
- dsa_tag_8021q_unregister(ds);
- return err;
}
-static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
+static void felix_tag_8021q_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
-
- err = felix_teardown_mmio_filtering(felix);
- if (err)
- dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
- err);
-
- dsa_tag_8021q_unregister(ds);
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ struct dsa_port *dp;
+ dsa_switch_for_each_available_port(dp, ds)
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
- }
+ dp->index);
- felix_8021q_cpu_port_deinit(ocelot, cpu);
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_tag_8021q_unregister(ds);
}
-/* The CPU port module is connected to the Node Processor Interface (NPI). This
- * is the mode through which frames can be injected from and extracted to an
- * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
- * running Linux, and this forms a DSA setup together with the enetc or fman
- * DSA master.
- */
-static void felix_npi_port_init(struct ocelot *ocelot, int port)
+static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
{
- ocelot->npi = port;
+ return dsa_cpu_ports(ds);
+}
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
- QSYS_EXT_CPU_CFG);
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
- /* NPI port Injection/Extraction configuration */
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
- ocelot->npi_xtr_prefix);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
- ocelot->npi_inj_prefix);
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
- /* Disable transmission of pause frames */
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+ return felix_update_trapping_destinations(ds, true);
}
-static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
-{
- /* Restore hardware defaults */
- int unused_port = ocelot->num_phys_ports + 2;
+static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
+ .setup = felix_tag_8021q_setup,
+ .teardown = felix_tag_8021q_teardown,
+ .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
+};
- ocelot->npi = -1;
+static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
+ bool uc, bool mc, bool bc)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long val;
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
- QSYS_EXT_CPU_CFG);
+ val = uc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
- OCELOT_TAG_PREFIX_DISABLED);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
- OCELOT_TAG_PREFIX_DISABLED);
+ val = mc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
- /* Enable transmission of pause frames */
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+ val = bc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
}
-static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
+static void
+felix_migrate_host_flood(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
-
- felix_npi_port_init(ocelot, cpu);
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ if (old_proto_ops) {
+ mask = old_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, false, false, false);
+ }
- return 0;
+ mask = proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
}
-static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
+static int felix_migrate_mdbs(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
+ unsigned long from, to;
+
+ if (!old_proto_ops)
+ return 0;
- felix_npi_port_deinit(ocelot, cpu);
+ from = old_proto_ops->get_host_fwd_mask(ds);
+ to = proto_ops->get_host_fwd_mask(ds);
+
+ return ocelot_migrate_mdbs(ocelot, from, to);
}
-static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
+/* Configure the shared hardware resources for a transition between
+ * @old_proto_ops and @proto_ops.
+ * Manual migration is needed because as far as DSA is concerned, no change of
+ * the CPU port is taking place here, just of the tagging protocol.
+ */
+static int
+felix_tag_proto_setup_shared(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
+ bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
int err;
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- err = felix_setup_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- err = felix_setup_tag_8021q(ds, cpu);
- break;
- default:
- err = -EPROTONOSUPPORT;
- }
+ err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
+ if (err)
+ return err;
- return err;
-}
+ felix_update_trapping_destinations(ds, using_tag_8021q);
-static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
-{
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- felix_teardown_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- felix_teardown_tag_8021q(ds, cpu);
- break;
- default:
- break;
- }
+ felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
+
+ return 0;
}
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
-static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+static int felix_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
+ const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- enum dsa_tag_protocol old_proto = felix->tag_proto;
int err;
- if (proto != DSA_TAG_PROTO_SEVILLE &&
- proto != DSA_TAG_PROTO_OCELOT &&
- proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ proto_ops = &felix_tag_npi_proto_ops;
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ proto_ops = &felix_tag_8021q_proto_ops;
+ break;
+ default:
return -EPROTONOSUPPORT;
+ }
- felix_del_tag_protocol(ds, cpu, old_proto);
+ old_proto_ops = felix->tag_proto_ops;
- err = felix_set_tag_protocol(ds, cpu, proto);
- if (err) {
- felix_set_tag_protocol(ds, cpu, old_proto);
- return err;
- }
+ if (proto_ops == old_proto_ops)
+ return 0;
+
+ err = proto_ops->setup(ds);
+ if (err)
+ goto setup_failed;
+ err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
+ if (err)
+ goto setup_shared_failed;
+
+ if (old_proto_ops)
+ old_proto_ops->teardown(ds);
+
+ felix->tag_proto_ops = proto_ops;
felix->tag_proto = proto;
return 0;
+
+setup_shared_failed:
+ proto_ops->teardown(ds);
+setup_failed:
+ return err;
}
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
@@ -629,6 +719,38 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
return felix->tag_proto;
}
+static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
+ bool uc, bool mc)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (uc)
+ felix->host_flood_uc_mask |= BIT(port);
+ else
+ felix->host_flood_uc_mask &= ~BIT(port);
+
+ if (mc)
+ felix->host_flood_mc_mask |= BIT(port);
+ else
+ felix->host_flood_mc_mask &= ~BIT(port);
+
+ mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -659,35 +781,111 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -713,19 +911,22 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, bridge.dev);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
@@ -737,22 +938,36 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -822,6 +1037,21 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
+}
+
static void felix_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -833,16 +1063,18 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
+static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct phylink_pcs *pcs = NULL;
if (felix->pcs && felix->pcs[port])
- phylink_set_pcs(dp->pl, felix->pcs[port]);
+ pcs = felix->pcs[port];
+
+ return pcs;
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -873,6 +1105,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -894,6 +1147,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -924,11 +1226,29 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
- struct ocelot *ocelot = &felix->ocelot;
struct device *dev = felix->ocelot.dev;
struct device_node *child;
@@ -955,7 +1275,7 @@ static int felix_parse_ports_node(struct felix *felix,
return -ENODEV;
}
- err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
@@ -992,11 +1312,55 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -1007,7 +1371,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
- ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
@@ -1031,20 +1394,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1061,7 +1415,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1073,16 +1426,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1090,6 +1438,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
+ ocelot_port->index = port;
ocelot->ports[port] = ocelot_port;
}
@@ -1192,7 +1541,8 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@@ -1211,45 +1561,34 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_init_port(ocelot, port);
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- /* The initial tag protocol is NPI which always returns 0, so
- * there's no real point in checking for errors.
- */
- felix_set_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
+ /* The initial tag protocol is NPI which won't fail during initial
+ * setup, there's no real point in checking for errors.
+ */
+ felix_change_tag_protocol(ds, felix->tag_proto);
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1265,22 +1604,13 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_del_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
+ if (felix->tag_proto_ops)
+ felix->tag_proto_ops->teardown(ds);
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1302,14 +1632,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1317,9 +1656,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
-
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1349,8 +1685,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
return true;
}
@@ -1370,7 +1710,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1413,9 +1753,18 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
ocelot_port_set_maxlen(ocelot, port, new_mtu);
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
+ felix->info->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
return 0;
}
@@ -1430,8 +1779,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1469,6 +1827,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1618,6 +1994,44 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
@@ -1625,18 +2039,28 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
+ .phylink_get_caps = felix_phylink_get_caps,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1658,6 +2082,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1678,6 +2104,13 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
+ .port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9395ac119d33..c9c29999c336 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,17 +7,28 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
+ const u32 *port_modes;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
@@ -44,14 +55,27 @@ struct felix_info {
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
- int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
- struct regmap *(*init_regmap)(struct ocelot *ocelot,
- struct resource *res);
+};
+
+/* Methods for initializing the hardware resources specific to a tagging
+ * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
+ * for "ocelot-8021q").
+ * It is important that the resources configured here do not have side effects
+ * for the other tagging protocols. If that is the case, their configuration
+ * needs to go to felix_tag_proto_setup_shared().
+ */
+struct felix_tag_proto_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+ unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -64,9 +88,11 @@ struct felix {
struct mii_bus *imdio;
struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
+ const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
+ unsigned long host_flood_uc_mask;
+ unsigned long host_flood_mc_mask;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index bf8d38239e7e..26a35ae322d1 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -16,14 +16,33 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
+#include <linux/time.h>
#include "felix.h"
+#define VSC9959_NUM_PORTS 6
+
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
#define VSC9959_IMDIO_PCI_BAR 0
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -256,27 +275,102 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -298,7 +392,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -384,100 +477,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -529,99 +565,8 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -944,15 +889,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -963,6 +901,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 1000baseT_Half);
phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
state->interface == PHY_INTERFACE_MODE_2500BASEX ||
@@ -975,27 +914,6 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 4 && port != 5)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* Not supported on internal to-CPU ports */
- if (port == 4 || port == 5)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -1028,9 +946,11 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1046,10 +966,11 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1061,7 +982,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -1081,6 +1002,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1132,11 +1054,285 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
+}
+
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
+ u64 min_gate_len[OCELOT_NUM_TC];
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u32 val, maxlen;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ taprio = ocelot_port->taprio;
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps at speed %d\n",
+ port, maxlen, needed_bit_time_ps, speed);
+
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
+ u32 max_sdu;
+
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = requested_max_sdu;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
+ */
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
@@ -1157,10 +1353,17 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1203,26 +1406,36 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
+ mutex_lock(&ocelot->tas_lock);
+
if (!taprio->enable) {
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
return 0;
}
if (taprio->cycle_time > NSEC_PER_SEC ||
- taprio->cycle_time_extension >= NSEC_PER_SEC)
- return -EINVAL;
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
- return -ERANGE;
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err;
+ }
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
@@ -1243,8 +1456,10 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
* config is pending, need reset the TAS module
*/
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
- return -EBUSY;
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err;
+ }
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
@@ -1277,10 +1492,67 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+ if (ret)
+ goto err;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+err:
+ mutex_unlock(&ocelot->tas_lock);
return ret;
}
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ int port;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+ mutex_unlock(&ocelot->tas_lock);
+}
+
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
@@ -1327,6 +1599,21 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1334,6 +1621,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
@@ -1365,7 +1654,15 @@ struct felix_stream {
u32 ssid;
};
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
struct list_head list;
refcount_t refcount;
u32 index;
@@ -1380,13 +1677,6 @@ struct felix_stream_filter {
u32 maxsdu;
};
-struct felix_stream_filter_counters {
- u32 match;
- u32 not_pass_gate;
- u32 not_pass_sdu;
- u32 red;
-};
-
struct felix_stream_gate {
u32 index;
u8 enable;
@@ -1890,25 +2180,6 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
}
}
-static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
- struct felix_stream_filter_counters *counters)
-{
- ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
- SYS_STAT_CFG_STAT_VIEW_M,
- SYS_STAT_CFG);
-
- counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
- counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
- counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
- counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
-
- /* Clear the PSFP counter. */
- ocelot_write(ocelot,
- SYS_STAT_CFG_STAT_VIEW(index) |
- SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
- SYS_STAT_CFG);
-}
-
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
struct flow_cls_offload *f)
{
@@ -1933,11 +2204,17 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
return ret;
}
+ mutex_lock(&psfp->lock);
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_GATE:
size = struct_size(sgi, entries, a->gate.num_entries);
sgi = kzalloc(size, GFP_KERNEL);
+ if (!sgi) {
+ ret = -ENOMEM;
+ goto err;
+ }
vsc9959_psfp_parse_gate(a, sgi);
ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
if (ret) {
@@ -1970,6 +2247,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
sfi.maxsdu = a->police.mtu;
break;
default:
+ mutex_unlock(&psfp->lock);
return -EOPNOTSUPP;
}
}
@@ -2039,6 +2317,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
goto err;
}
+ mutex_unlock(&psfp->lock);
+
return 0;
err:
@@ -2048,6 +2328,8 @@ err:
if (sfi.fm_valid)
ocelot_vcap_policer_del(ocelot, sfi.fmid);
+ mutex_unlock(&psfp->lock);
+
return ret;
}
@@ -2055,18 +2337,22 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
struct flow_cls_offload *f)
{
struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
static struct felix_stream_filter *sfi;
- struct ocelot_psfp_list *psfp;
- psfp = &ocelot->psfp;
+ mutex_lock(&psfp->lock);
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
- if (!stream)
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
- if (!sfi)
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
if (sfi->sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
@@ -2092,27 +2378,83 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
stream_entry->ports);
}
+ mutex_unlock(&psfp->lock);
+
return 0;
}
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
struct flow_cls_offload *f,
struct flow_stats *stats)
{
- struct felix_stream_filter_counters counters;
- struct ocelot_psfp_list *psfp;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
struct felix_stream *stream;
- psfp = &ocelot->psfp;
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream)
return -ENOMEM;
- vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
- stats->pkts = counters.match;
- stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
- counters.red;
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
return 0;
}
@@ -2124,6 +2466,7 @@ static void vsc9959_psfp_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&psfp->stream_list);
INIT_LIST_HEAD(&psfp->sfi_list);
INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
}
/* When using cut-through forwarding and the egress port runs at a higher data
@@ -2139,7 +2482,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
- int port, other_port;
+ int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2165,7 +2508,8 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
if (ocelot->npi >= 0)
mask |= BIT(ocelot->npi);
else
- mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot);
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
}
/* Calculate the minimum link speed, among the ports that are
@@ -2182,19 +2526,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
min_speed = other_ocelot_port->speed;
}
- /* Enable cut-through forwarding for all traffic classes. */
- if (ocelot_port->speed == min_speed)
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0);
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
- val ? "enabling" : "disabling");
+ val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
@@ -2212,34 +2564,35 @@ static const struct ocelot_ops vsc9959_ops = {
.psfp_filter_del = vsc9959_psfp_filter_del,
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
};
static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
.vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
.vcap_pol_base2 = 0,
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
- .num_ports = 6,
+ .num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
.phylink_validate = vsc9959_phylink_validate,
- .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
- .init_regmap = ocelot_regmap_init,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -2291,7 +2644,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
@@ -2322,7 +2674,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
@@ -2352,8 +2704,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 8c1c9da61602..7af33b2c685d 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -14,11 +14,30 @@
#include <linux/iopoll.h>
#include "felix.h"
+#define VSC9953_NUM_PORTS 10
+
#define VSC9953_VCAP_POLICER_BASE 11
#define VSC9953_VCAP_POLICER_MAX 31
#define VSC9953_VCAP_POLICER_BASE2 120
#define VSC9953_VCAP_POLICER_MAX2 161
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
REG(ANA_VLANMASK, 0x00b504),
@@ -251,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -293,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -369,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -524,100 +543,8 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
@@ -917,15 +844,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -935,6 +855,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
phylink_set(mask, 2500baseT_Full);
@@ -945,25 +866,6 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 8 && port != 9)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- /* Not supported on internal to-CPU ports */
- if (port == 8 || port == 9)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -1029,7 +931,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
}
/* Needed in order to initialize the bus mutex lock */
- rc = of_mdiobus_register(bus, NULL);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1083,30 +985,30 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
.stats_layout = vsc9953_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
.vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
- .num_ports = 10,
+ .num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
- .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
- .init_regmap = ocelot_regmap_init,
+ .port_modes = vsc9953_port_modes,
};
static int seville_probe(struct platform_device *pdev)
@@ -1181,8 +1083,6 @@ static int seville_remove(struct platform_device *pdev)
kfree(felix->ds);
kfree(felix);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index 13b7e679b8b5..ba339747362c 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -7,3 +7,11 @@ config NET_DSA_AR9331
help
This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
switch.
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 274022319066..701f1d199e93 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..e7b98b864fa1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -231,6 +231,7 @@ struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
@@ -378,7 +379,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -499,52 +500,27 @@ static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_AR9331;
}
-static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
switch (port) {
case 0:
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
- state->interface, port);
}
static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -629,6 +605,7 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
@@ -669,6 +646,9 @@ static void ar9331_read_stats(struct ar9331_sw_port *port)
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
spin_unlock(&port->stats_lock);
}
@@ -693,15 +673,27 @@ static void ar9331_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
- .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -843,7 +835,7 @@ static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_write(sbus, p, r, val);
+ return __mdiobus_write(sbus, p, r, val);
}
static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
@@ -854,7 +846,7 @@ static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_read(sbus, p, r);
+ return __mdiobus_read(sbus, p, r);
}
static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
@@ -874,6 +866,8 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
return 0;
}
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+
ret = __ar9331_mdio_read(sbus, reg);
if (ret < 0)
goto error;
@@ -885,9 +879,13 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
*(u32 *)val_buf |= ret << 16;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+
return ret;
}
@@ -897,12 +895,15 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
struct mii_bus *sbus = priv->sbus;
int ret;
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
if (reg == AR9331_SW_REG_PAGE) {
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
0, val);
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
}
@@ -922,10 +923,14 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+
return ret;
}
@@ -1091,12 +1096,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
new file mode 100644
index 000000000000..c5c3b4e92f28
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -0,0 +1,2089 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/gpio/consumer.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
+
+#include "qca8k.h"
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x3ff;
+}
+
+static int
+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+{
+ u16 *cached_lo = &priv->mdio_cache.lo;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (lo == *cached_lo)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit lo register\n");
+
+ *cached_lo = lo;
+ return 0;
+}
+
+static int
+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+{
+ u16 *cached_hi = &priv->mdio_cache.hi;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (hi == *cached_hi)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit hi register\n");
+
+ *cached_hi = hi;
+ return 0;
+}
+
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret >= 0) {
+ *val = ret;
+ ret = bus->read(bus, phy_id, regnum + 1);
+ *val |= ret << 16;
+ }
+
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit register\n");
+ *val = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
+{
+ u16 lo, hi;
+ int ret;
+
+ lo = val & 0xffff;
+ hi = (u16)(val >> 16);
+
+ ret = qca8k_set_lo(priv, phy_id, regnum, lo);
+ if (ret >= 0)
+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
+}
+
+static int
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
+{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (page == *cached_page)
+ return 0;
+
+ ret = bus->write(bus, 0x18, 0, page);
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to set qca8k page\n");
+ return ret;
+ }
+
+ *cached_page = page;
+ usleep_range(1000, 2000);
+ return 0;
+}
+
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
+ u8 len, cmd;
+ int i;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+
+ /* Make sure the seq match the requested packet */
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ __le32 *data2;
+ u32 command;
+ u16 hdr;
+ int i;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
+ * Actually for some reason the steps are:
+ * 0: nothing
+ * 1-4: first 4 byte
+ * 5-6: first 12 byte
+ * 7-15: all 16 byte
+ */
+ if (len == 16)
+ real_len = 15;
+ else
+ real_len = len;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
+ if (cmd == MDIO_WRITE)
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ if (ret < 0)
+ goto exit;
+
+ val &= ~mask;
+ val |= write_val;
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static struct regmap_config qca8k_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .reg_read = qca8k_regmap_read,
+ .reg_write = qca8k_regmap_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
+ .rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+};
+
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
+{
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ *val = mgmt_eth_data->data[0];
+
+ return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
+{
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_master;
+ int ret, ret1;
+ bool ack;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
+ }
+
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
+
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
+
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
+
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_master is operational */
+ mgmt_master = priv->mgmt_master;
+ if (!mgmt_master) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ ret = -EINVAL;
+ goto err_mgmt_master;
+ }
+
+ read_skb->dev = mgmt_master;
+ clear_skb->dev = mgmt_master;
+ write_skb->dev = mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(write_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
+
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
+
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(read_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+ } else {
+ kfree_skb(read_skb);
+ }
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(clear_skb);
+
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ return ret;
+
+ /* Error handling before lock */
+err_mgmt_master:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
+
+ return ret;
+}
+
+static u32
+qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+static int
+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
+{
+ u16 r1, r2, page;
+ u32 val;
+ int ret, ret1;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ bus, 0x10 | r2, r1, &val);
+
+ /* Check if qca8k_read has failed for a different reason
+ * before returnting -ETIMEDOUT
+ */
+ if (ret < 0 && ret1 < 0)
+ return ret1;
+
+ return ret;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+ QCA8K_MDIO_MASTER_DATA(data);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (ret >= 0)
+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
+
+ return ret;
+}
+
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
+
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
+}
+
+static int
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
+}
+
+static int
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
+}
+
+static int
+qca8k_mdio_register(struct qca8k_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
+ struct mii_bus *bus;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = (void *)priv;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+ ds->slave_mii_bus = bus;
+
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+ struct device_node *ports, *port;
+ phy_interface_t mode;
+ int err;
+
+ ports = of_get_child_by_name(priv->dev->of_node, "ports");
+ if (!ports)
+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
+
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err) {
+ of_node_put(port);
+ of_node_put(ports);
+ return err;
+ }
+
+ if (!dsa_is_user_port(priv->ds, reg))
+ continue;
+
+ of_get_phy_mode(port, &mode);
+
+ if (of_property_read_bool(port, "phy-handle") &&
+ mode != PHY_INTERFACE_MODE_INTERNAL)
+ external_mdio_mask |= BIT(reg);
+ else
+ internal_mdio_mask |= BIT(reg);
+ }
+
+ of_node_put(ports);
+ if (!external_mdio_mask && !internal_mdio_mask) {
+ dev_err(priv->dev, "no PHYs are defined.\n");
+ return -EINVAL;
+ }
+
+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+ * the MDIO_MASTER register also _disconnects_ the external MDC
+ * passthrough to the internal PHYs. It's not possible to use both
+ * configurations at the same time!
+ *
+ * Because this came up during the review process:
+ * If the external mdio-bus driver is capable magically disabling
+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+ * accessors for the time being, it would be possible to pull this
+ * off.
+ */
+ if (!!external_mdio_mask && !!internal_mdio_mask) {
+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+ return -EINVAL;
+ }
+
+ if (external_mdio_mask) {
+ /* Make sure to disable the internal mdio bus in cases
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ }
+
+ return qca8k_mdio_register(priv);
+}
+
+static int
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
+{
+ u32 mask = 0;
+ int ret = 0;
+
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
+
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
+ return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ const struct qca8k_match_data *data = priv->info;
+ struct device_node *node = priv->dev->of_node;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
+
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
+ }
+
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
+
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
+
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
+
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct phylink_pcs *pcs = NULL;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
+static void
+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index;
+ u32 reg;
+
+ switch (port) {
+ case 0: /* 1st CPU port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII)
+ return;
+
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY, nothing to do */
+ return;
+ case 6: /* 2nd CPU port / external PHY */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX)
+ return;
+
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+ return;
+ }
+
+ if (port != 6 && phylink_autoneg_inband(mode)) {
+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+ __func__);
+ return;
+ }
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337)
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ /* Enable SGMII on the port */
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+ default:
+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+}
+
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0: /* 1st CPU port */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ config->legacy_pre_march2020 = false;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+
+ if (phylink_autoneg_inband(mode)) {
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ reg = QCA8K_PORT_STATUS_SPEED_10;
+ break;
+ case SPEED_100:
+ reg = QCA8K_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_1000:
+ reg = QCA8K_PORT_STATUS_SPEED_1000;
+ break;
+ default:
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+ if (rx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+ if (tx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_TXFLOW;
+ }
+
+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+}
+
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
+
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return ret;
+ if (phylink_autoneg_inband(mode))
+ val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+ else
+ val |= QCA8K_PWS_SERDES_AEN_DIS;
+ qca8k_write(priv, QCA8K_REG_PWS, val);
+
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
+}
+
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ __le32 *data2;
+ u8 port;
+ int i;
+
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
+
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
+
+ data2 = (__le32 *)skb->data;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
+ continue;
+ }
+
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
+
+ data2 += mib->size;
+ }
+
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ mib_eth_data = &priv->mib_eth_data;
+
+ mutex_lock(&mib_eth_data->mutex);
+
+ reinit_completion(&mib_eth_data->rw_done);
+
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (ret)
+ goto exit;
+
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
+}
+
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Communicate to the phy internal driver the switch revision.
+ * Based on the switch revision different values needs to be
+ * set to the dbg and mmd reg on the phy.
+ * The first 2 bit are used to communicate the switch revision
+ * to the phy driver.
+ */
+ if (port > 0 && port < 6)
+ return priv->switch_revision;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_QCA;
+}
+
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+ bool operational)
+{
+ struct dsa_port *dp = master->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
+
+ priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct qca_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, ret, i;
+ u32 mask;
+
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
+ }
+
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
+
+ /* Initial setup of all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
+ .get_mac_eee = qca8k_get_mac_eee,
+ .set_mac_eee = qca8k_set_mac_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_change_mtu = qca8k_port_change_mtu,
+ .port_max_mtu = qca8k_port_max_mtu,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .phylink_mac_config = qca8k_phylink_mac_config,
+ .phylink_mac_link_down = qca8k_phylink_mac_link_down,
+ .phylink_mac_link_up = qca8k_phylink_mac_link_up,
+ .get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
+ .master_state_change = qca8k_master_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv;
+ int ret;
+
+ /* allocate the private data struct so that we can probe the switches
+ * ID register
+ */
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
+
+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+ GPIOD_ASIS);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ if (priv->reset_gpio) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ /* The active low duration must be greater than 10 ms
+ * and checkpatch.pl wants 20 ms.
+ */
+ msleep(20);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ }
+
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->mdio_cache.page = 0xffff;
+ priv->mdio_cache.lo = 0xffff;
+ priv->mdio_cache.hi = 0xffff;
+
+ /* Check the detected switch id */
+ ret = qca8k_read_switch_id(priv);
+ if (ret)
+ return ret;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
+ priv->ds->priv = priv;
+ priv->ds->ops = &qca8k_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+ int port;
+
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
+ continue;
+
+ qca8k_port_set_status(priv, port, enable);
+ }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 0);
+
+ return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 1);
+
+ return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ qca8k_suspend, qca8k_resume);
+
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+ .read_eth = qca8k_read_eth,
+ .write_eth = qca8k_write_eth,
+};
+
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca8328 = {
+ .id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca833x = {
+ .id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct of_device_id qca8k_of_match[] = {
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
+ { .compatible = "qca,qca8334", .data = &qca833x },
+ { .compatible = "qca,qca8337", .data = &qca833x },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+ .probe = qca8k_sw_probe,
+ .remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
+ .mdiodrv.driver = {
+ .name = "qca8k",
+ .of_match_table = qca8k_of_match,
+ .pm = &qca8k_pm_ops,
+ },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000000..fb45b598847b
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && priv->info->ops->read_eth &&
+ !priv->info->ops->read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && priv->info->ops->write_eth &&
+ !priv->info->ops->write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[3];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[3] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
+ return false;
+ }
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return false;
+ }
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
+ return false;
+ }
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index ab4a417b25a9..0b7a5cb12321 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -11,6 +11,11 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT 5
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -63,7 +68,7 @@
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
#define QCA8K_MDIO_MASTER_CTRL 0x3c
@@ -313,14 +318,26 @@ enum qca8k_vlan_cmd {
QCA8K_VLAN_READ = 6,
};
-struct ar8xxx_port_status {
- int enabled;
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+ /* TODO: remove these extra ops when we can support regmap bulk read/write */
+ int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+ int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
};
struct qca8k_match_data {
u8 id;
bool reduced_package;
u8 mib_count;
+ const struct qca8k_info_ops *ops;
};
enum {
@@ -328,6 +345,22 @@ enum {
QCA8K_CPU_PORT6,
};
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
struct qca8k_ports_config {
bool sgmii_rx_clk_falling_edge;
bool sgmii_tx_clk_falling_edge;
@@ -336,23 +369,49 @@ struct qca8k_ports_config {
u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+/* lo and hi can also be cached and from Documentation we can skip one
+ * extra mdio write if lo or hi is didn't change.
+ */
+ u16 lo;
+ u16 hi;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
u8 mirror_rx;
u8 mirror_tx;
u8 lag_hash_mode;
- bool legacy_phy_port_mapping;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
- struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
};
struct qca8k_mib_desc {
@@ -368,4 +427,94 @@ struct qca8k_fdb {
u8 mac[6];
};
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
deleted file mode 100644
index 039694518788..000000000000
--- a/drivers/net/dsa/qca8k.c
+++ /dev/null
@@ -1,2616 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2016 John Crispin <john@phrozen.org>
- */
-
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/netdevice.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include <net/dsa.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
-#include <linux/mdio.h>
-#include <linux/phylink.h>
-#include <linux/gpio/consumer.h>
-#include <linux/etherdevice.h>
-
-#include "qca8k.h"
-
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
- MIB_DESC(1, 0xa8, "RXUnicast"),
- MIB_DESC(1, 0xac, "TXUnicast"),
-};
-
-/* The 32bit switch registers are accessed indirectly. To achieve this we need
- * to set the page of the register. Track the last page that was set to reduce
- * mdio writes
- */
-static u16 qca8k_current_page = 0xffff;
-
-static void
-qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
-{
- regaddr >>= 1;
- *r1 = regaddr & 0x1e;
-
- regaddr >>= 5;
- *r2 = regaddr & 0x7;
-
- regaddr >>= 3;
- *page = regaddr & 0x3ff;
-}
-
-static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
-{
- int ret;
-
- ret = bus->read(bus, phy_id, regnum);
- if (ret >= 0) {
- *val = ret;
- ret = bus->read(bus, phy_id, regnum + 1);
- *val |= ret << 16;
- }
-
- if (ret < 0) {
- dev_err_ratelimited(&bus->dev,
- "failed to read qca8k 32bit register\n");
- *val = 0;
- return ret;
- }
-
- return 0;
-}
-
-static void
-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
-{
- u16 lo, hi;
- int ret;
-
- lo = val & 0xffff;
- hi = (u16)(val >> 16);
-
- ret = bus->write(bus, phy_id, regnum, lo);
- if (ret >= 0)
- ret = bus->write(bus, phy_id, regnum + 1, hi);
- if (ret < 0)
- dev_err_ratelimited(&bus->dev,
- "failed to write qca8k 32bit register\n");
-}
-
-static int
-qca8k_set_page(struct mii_bus *bus, u16 page)
-{
- int ret;
-
- if (page == qca8k_current_page)
- return 0;
-
- ret = bus->write(bus, 0x18, 0, page);
- if (ret < 0) {
- dev_err_ratelimited(&bus->dev,
- "failed to set qca8k page\n");
- return ret;
- }
-
- qca8k_current_page = page;
- usleep_range(1000, 2000);
- return 0;
-}
-
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
- return regmap_read(priv->regmap, reg, val);
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_write(priv->regmap, reg, val);
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
- return regmap_update_bits(priv->regmap, reg, mask, write_val);
-}
-
-static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
- return ret;
-}
-
-static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
- return ret;
-}
-
-static int
-qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
- if (ret < 0)
- goto exit;
-
- val &= ~mask;
- val |= write_val;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
-static struct regmap_config qca8k_regmap_config = {
- .reg_bits = 16,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
- .reg_update_bits = qca8k_regmap_update_bits,
- .rd_table = &qca8k_readable_table,
- .disable_locking = true, /* Locking is handled by qca8k read/write */
- .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
-};
-
-static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
- u32 val;
-
- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[4], val;
- int i, ret;
-
- /* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
-
- reg[i] = val;
- }
-
- /* vid - 83:72 */
- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
- /* aging - 67:64 */
- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
- /* portmask - 54:48 */
- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
- /* mac - 47:0 */
- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
-
- return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
- int i;
-
- /* vid - 83:72 */
- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
- /* aging - 67:64 */
- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
- /* portmask - 54:48 */
- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
- /* mac - 47:0 */
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
-
- /* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
- u32 reg;
- int ret;
-
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
- }
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
- }
-
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_fdb_read(priv, &fdb);
- if (ret < 0)
- goto exit;
-
- /* Rule exist. Delete first */
- if (!fdb.aging) {
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
- }
-
- /* Add port to fdb portmask */
- fdb.port_mask |= port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- /* Rule doesn't exist. Why delete? */
- if (!fdb.aging) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
-
- /* Only port in the rule is this port. Don't re insert */
- if (fdb.port_mask == port_mask)
- goto exit;
-
- /* Remove port from port mask */
- fdb.port_mask &= ~port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
-
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
-
- if ((reg & mask) != mask) {
- del = false;
- break;
- }
- }
-
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
- } else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
- }
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 and 6 have no internal PHY */
- if (port > 0 && port < 6)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
- else
- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static u32
-qca8k_port_to_phy(int port)
-{
- /* From Andrew Lunn:
- * Port 0 has no internal phy.
- * Port 1 has an internal PHY at MDIO address 0.
- * Port 2 has an internal PHY at MDIO address 1.
- * ...
- * Port 5 has an internal PHY at MDIO address 4.
- * Port 6 has no internal PHY.
- */
-
- return port - 1;
-}
-
-static int
-qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
-{
- u16 r1, r2, page;
- u32 val;
- int ret, ret1;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- bus, 0x10 | r2, r1, &val);
-
- /* Check if qca8k_read has failed for a different reason
- * before returnting -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
-
- return ret;
-}
-
-static int
-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
-{
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
- return -EINVAL;
-
- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
- QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
- QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
- QCA8K_MDIO_MASTER_DATA(data);
-
- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY);
-
-exit:
- /* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
-
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
-static int
-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
-{
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
- return -EINVAL;
-
- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
- QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
- QCA8K_MDIO_MASTER_REG_ADDR(regnum);
-
- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
-
-exit:
- /* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
-
- mutex_unlock(&bus->mdio_lock);
-
- if (ret >= 0)
- ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
-
- return ret;
-}
-
-static int
-qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
-{
- struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
-
- return qca8k_mdio_write(bus, phy, regnum, data);
-}
-
-static int
-qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
-{
- struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
-
- return qca8k_mdio_read(bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- return qca8k_mdio_write(priv->bus, port, regnum, data);
-}
-
-static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- ret = qca8k_mdio_read(priv->bus, port, regnum);
-
- if (ret < 0)
- return 0xffff;
-
- return ret;
-}
-
-static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
-{
- struct dsa_switch *ds = priv->ds;
- struct mii_bus *bus;
-
- bus = devm_mdiobus_alloc(ds->dev);
-
- if (!bus)
- return -ENOMEM;
-
- bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
-
- ds->slave_mii_bus = bus;
-
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
-}
-
-static int
-qca8k_setup_mdio_bus(struct qca8k_priv *priv)
-{
- u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
- phy_interface_t mode;
- int err;
-
- ports = of_get_child_by_name(priv->dev->of_node, "ports");
- if (!ports)
- ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
-
- if (!ports)
- return -EINVAL;
-
- for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
- of_node_put(port);
- of_node_put(ports);
- return err;
- }
-
- if (!dsa_is_user_port(priv->ds, reg))
- continue;
-
- of_get_phy_mode(port, &mode);
-
- if (of_property_read_bool(port, "phy-handle") &&
- mode != PHY_INTERFACE_MODE_INTERNAL)
- external_mdio_mask |= BIT(reg);
- else
- internal_mdio_mask |= BIT(reg);
- }
-
- of_node_put(ports);
- if (!external_mdio_mask && !internal_mdio_mask) {
- dev_err(priv->dev, "no PHYs are defined.\n");
- return -EINVAL;
- }
-
- /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
- * the MDIO_MASTER register also _disconnects_ the external MDC
- * passthrough to the internal PHYs. It's not possible to use both
- * configurations at the same time!
- *
- * Because this came up during the review process:
- * If the external mdio-bus driver is capable magically disabling
- * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
- * accessors for the time being, it would be possible to pull this
- * off.
- */
- if (!!external_mdio_mask && !!internal_mdio_mask) {
- dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
- return -EINVAL;
- }
-
- if (external_mdio_mask) {
- /* Make sure to disable the internal mdio bus in cases
- * a dt-overlay and driver reload changed the configuration
- */
-
- return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
- }
-
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
-}
-
-static int
-qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
-{
- u32 mask = 0;
- int ret = 0;
-
- /* SoC specific settings for ipq8064.
- * If more device require this consider adding
- * a dedicated binding.
- */
- if (of_machine_is_compatible("qcom,ipq8064"))
- mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
-
- /* SoC specific settings for ipq8065 */
- if (of_machine_is_compatible("qcom,ipq8065"))
- mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
-
- if (mask) {
- ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
- QCA8K_MAC_PWR_RGMII0_1_8V |
- QCA8K_MAC_PWR_RGMII1_1_8V,
- mask);
- }
-
- return ret;
-}
-
-static int qca8k_find_cpu_port(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Find the connected cpu port. Valid port are 0 or 6 */
- if (dsa_is_cpu_port(ds, 0))
- return 0;
-
- dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
-
- if (dsa_is_cpu_port(ds, 6))
- return 6;
-
- return -EINVAL;
-}
-
-static int
-qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
-{
- struct device_node *node = priv->dev->of_node;
- const struct qca8k_match_data *data;
- u32 val = 0;
- int ret;
-
- /* QCA8327 require to set to the correct mode.
- * His bigger brother QCA8328 have the 172 pin layout.
- * Should be applied by default but we set this just to make sure.
- */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- data = of_device_get_match_data(priv->dev);
-
- /* Set the correct package of 148 pin for QCA8327 */
- if (data->reduced_package)
- val |= QCA8327_PWS_PACKAGE148_EN;
-
- ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
- val);
- if (ret)
- return ret;
- }
-
- if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
- val |= QCA8K_PWS_POWER_ON_SEL;
-
- if (of_property_read_bool(node, "qca,led-open-drain")) {
- if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
- dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
- return -EINVAL;
- }
-
- val |= QCA8K_PWS_LED_OPEN_EN_CSR;
- }
-
- return qca8k_rmw(priv, QCA8K_REG_PWS,
- QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
- val);
-}
-
-static int
-qca8k_parse_port_config(struct qca8k_priv *priv)
-{
- int port, cpu_port_index = -1, ret;
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 delay;
-
- /* We have 2 CPU port. Check them */
- for (port = 0; port < QCA8K_NUM_PORTS; port++) {
- /* Skip every other port */
- if (port != 0 && port != 6)
- continue;
-
- dp = dsa_to_port(priv->ds, port);
- port_dn = dp->dn;
- cpu_port_index++;
-
- if (!of_device_is_available(port_dn))
- continue;
-
- ret = of_get_phy_mode(port_dn, &mode);
- if (ret)
- continue;
-
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_SGMII:
- delay = 0;
-
- if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
- /* Switch regs accept value in ns, convert ps to ns */
- delay = delay / 1000;
- else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_TXID)
- delay = 1;
-
- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-
- priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
-
- delay = 0;
-
- if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
- /* Switch regs accept value in ns, convert ps to ns */
- delay = delay / 1000;
- else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_RXID)
- delay = 2;
-
- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-
- priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
-
- /* Skip sgmii parsing for rgmii* mode */
- if (mode == PHY_INTERFACE_MODE_RGMII ||
- mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- mode == PHY_INTERFACE_MODE_RGMII_RXID)
- break;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
- priv->ports_config.sgmii_tx_clk_falling_edge = true;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
- priv->ports_config.sgmii_rx_clk_falling_edge = true;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
- priv->ports_config.sgmii_enable_pll = true;
-
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
- priv->ports_config.sgmii_enable_pll = false;
- }
-
- if (priv->switch_revision < 2)
- dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
- }
-
- break;
- default:
- continue;
- }
- }
-
- return 0;
-}
-
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, ret, i;
- u32 mask;
-
- cpu_port = qca8k_find_cpu_port(ds);
- if (cpu_port < 0) {
- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
- return cpu_port;
- }
-
- /* Parse CPU port config to be later used in phy_link mac_config */
- ret = qca8k_parse_port_config(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_of_pws_reg(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mac_pwr_sel(priv);
- if (ret)
- return ret;
-
- /* Make sure MAC06 is disabled */
- ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
- if (ret) {
- dev_err(priv->dev, "failed disabling MAC06 exchange");
- return ret;
- }
-
- /* Enable CPU Port */
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "mib init failed");
-
- /* Initial setup of all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
-
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
- }
-
- /* Disable MAC by default on all user ports */
- if (dsa_is_user_port(ds, i))
- qca8k_port_set_status(priv, i, 0);
- }
-
- /* Forward all unknown frames to CPU port for Linux processing
- * Notice that in multi-cpu config only one port should be set
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports
- * Configure specific switch configuration for ports
- */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- QCA8K_EGREES_VLAN_PORT_MASK(i),
- QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- switch (i) {
- /* The 2 CPU port and port 5 requires some different
- * priority than any other ports.
- */
- case 0:
- case 5:
- case 6:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
- break;
- default:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
- }
- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
- mask);
- }
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
- mask);
- }
-
- /* Setup our port MTUs to match power on defaults */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- /* Set min a max ageing value supported */
- ds->ageing_time_min = 7000;
- ds->ageing_time_max = 458745000;
-
- /* Set max number of LAGs supported */
- ds->num_lag_ids = QCA8K_NUM_LAGS;
-
- return 0;
-}
-
-static void
-qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
- u32 reg)
-{
- u32 delay, val = 0;
- int ret;
-
- /* Delay can be declared in 3 different way.
- * Mode to rgmii and internal-delay standard binding defined
- * rgmii-id or rgmii-tx/rx phy mode set.
- * The parse logic set a delay different than 0 only when one
- * of the 3 different way is used. In all other case delay is
- * not enabled. With ID or TX/RXID delay is enabled and set
- * to the default and recommended value.
- */
- if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
- delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
-
- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
- }
-
- if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
- delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
-
- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
- }
-
- /* Set RGMII delay based on the selected values */
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
- val);
- if (ret)
- dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
- cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
-}
-
-static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- int cpu_port_index, ret;
- u32 reg, val;
-
- switch (port) {
- case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- return;
-
- reg = QCA8K_REG_PORT0_PAD_CTRL;
- cpu_port_index = QCA8K_CPU_PORT0;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- /* Internal PHY, nothing to do */
- return;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- return;
-
- reg = QCA8K_REG_PORT6_PAD_CTRL;
- cpu_port_index = QCA8K_CPU_PORT6;
- break;
- default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
- return;
- }
-
- if (port != 6 && phylink_autoneg_inband(mode)) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
-
- /* Configure rgmii delay */
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* QCA8337 requires to set rgmii rx delay for all ports.
- * This is enabled through PORT5_PAD_CTRL for all ports,
- * rather than individual port registers.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337)
- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- break;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- /* Enable SGMII on the port */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-
- /* Enable/disable SerDes auto-negotiation as necessary */
- ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
- if (ret)
- return;
- if (phylink_autoneg_inband(mode))
- val &= ~QCA8K_PWS_SERDES_AEN_DIS;
- else
- val |= QCA8K_PWS_SERDES_AEN_DIS;
- qca8k_write(priv, QCA8K_REG_PWS, val);
-
- /* Configure the SGMII parameters */
- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
- if (ret)
- return;
-
- val |= QCA8K_SGMII_EN_SD;
-
- if (priv->ports_config.sgmii_enable_pll)
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX;
-
- if (dsa_is_cpu_port(ds, port)) {
- /* CPU port, we're talking to the CPU MAC, be a PHY */
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_PHY;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_MAC;
- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_BASEX;
- }
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
- if (priv->switch_id == QCA8K_ID_QCA8327 ||
- priv->switch_id == QCA8K_ID_QCA8337)
- reg = QCA8K_REG_PORT0_PAD_CTRL;
-
- val = 0;
-
- /* SGMII Clock phase configuration */
- if (priv->ports_config.sgmii_rx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
- if (priv->ports_config.sgmii_tx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
- if (val)
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
- break;
- default:
- dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-}
-
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (port) {
- case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- goto unsupported;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- /* Internal PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- goto unsupported;
- break;
- default:
-unsupported:
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- phylink_set(mask, 1000baseX_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
-
- return 1;
-}
-
-static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- struct qca8k_priv *priv = ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
-}
-
-static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
-
- if (phylink_autoneg_inband(mode)) {
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- } else {
- switch (speed) {
- case SPEED_10:
- reg = QCA8K_PORT_STATUS_SPEED_10;
- break;
- case SPEED_100:
- reg = QCA8K_PORT_STATUS_SPEED_100;
- break;
- case SPEED_1000:
- reg = QCA8K_PORT_STATUS_SPEED_1000;
- break;
- default:
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= QCA8K_PORT_STATUS_DUPLEX;
-
- if (rx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_RXFLOW;
-
- if (tx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_TXFLOW;
- }
-
- reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
-}
-
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
-}
-
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_match_data *match_data;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
-
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
-
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
- }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
-
- if (sset != ETH_SS_STATS)
- return 0;
-
- match_data = of_device_get_match_data(priv->dev);
-
- return match_data->mib_count;
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
- if (ret < 0)
- goto exit;
-
- if (eee->eee_enabled)
- reg |= lpi_en;
- else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
-}
-
-static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-static void
-qca8k_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-{
- struct qca8k_priv *priv = ds->priv;
- unsigned int secs = msecs / 1000;
- u32 val;
-
- /* AGE_TIME reg is set in 7s step */
- val = secs / 7;
-
- /* Handle case with 0 as val to NOT disable
- * learning
- */
- if (!val)
- val = 1;
-
- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
- QCA8K_ATU_AGE_TIME(val));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
-
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
-
- return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
-
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
-
- /* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
-{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
-
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
- }
- mutex_unlock(&priv->reg_mutex);
-
- return 0;
-}
-
-static int
-qca8k_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
-{
- struct qca8k_priv *priv = ds->priv;
- int monitor_port, ret;
- u32 reg, val;
-
- /* Check for existent entry */
- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
- return -EEXIST;
-
- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
- if (ret)
- return ret;
-
- /* QCA83xx can have only one port set to mirror mode.
- * Check that the correct port is requested and return error otherwise.
- * When no mirror port is set, the values is set to 0xF
- */
- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
- return -EEXIST;
-
- /* Set the monitor port */
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
- mirror->to_local_port);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- return ret;
-
- if (ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_update_bits(priv->regmap, reg, val, val);
- if (ret)
- return ret;
-
- /* Track mirror port for tx and rx to decide when the
- * mirror port has to be disabled.
- */
- if (ingress)
- priv->mirror_rx |= BIT(port);
- else
- priv->mirror_tx |= BIT(port);
-
- return 0;
-}
-
-static void
-qca8k_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg, val;
- int ret;
-
- if (mirror->ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_clear_bits(priv->regmap, reg, val);
- if (ret)
- goto err;
-
- if (mirror->ingress)
- priv->mirror_rx &= ~BIT(port);
- else
- priv->mirror_tx &= ~BIT(port);
-
- /* No port set to send packet to mirror port. Disable mirror port */
- if (!priv->mirror_rx && !priv->mirror_tx) {
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- goto err;
- }
-err:
- dev_err(priv->dev, "Failed to del mirror port from %d", port);
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
- if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
- return ret;
- }
-
- if (pvid) {
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- QCA8K_EGREES_VLAN_PORT_MASK(port),
- QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_del(priv, port, vlan->vid);
- if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
- return ret;
-}
-
-static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Communicate to the phy internal driver the switch revision.
- * Based on the switch revision different values needs to be
- * set to the dbg and mmd reg on the phy.
- * The first 2 bit are used to communicate the switch revision
- * to the phy driver.
- */
- if (port > 0 && port < 6)
- return priv->switch_revision;
-
- return 0;
-}
-
-static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_QCA;
-}
-
-static bool
-qca8k_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
-{
- struct dsa_port *dp;
- int id, members = 0;
-
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
- return false;
-
- dsa_lag_foreach_port(dp, ds->dst, lag)
- /* Includes the port joining the LAG */
- members++;
-
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
- return false;
-
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
- return false;
-
- if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
- return false;
-
- return true;
-}
-
-static int
-qca8k_lag_setup_hash(struct dsa_switch *ds,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
-{
- struct qca8k_priv *priv = ds->priv;
- bool unique_lag = true;
- u32 hash = 0;
- int i, id;
-
- id = dsa_lag_id(ds->dst, lag);
-
- switch (info->hash_type) {
- case NETDEV_LAG_HASH_L23:
- hash |= QCA8K_TRUNK_HASH_SIP_EN;
- hash |= QCA8K_TRUNK_HASH_DIP_EN;
- fallthrough;
- case NETDEV_LAG_HASH_L2:
- hash |= QCA8K_TRUNK_HASH_SA_EN;
- hash |= QCA8K_TRUNK_HASH_DA_EN;
- break;
- default: /* We should NEVER reach this */
- return -EOPNOTSUPP;
- }
-
- /* Check if we are the unique configured LAG */
- dsa_lags_foreach_id(i, ds->dst)
- if (i != id && dsa_lag_dev(ds->dst, i)) {
- unique_lag = false;
- break;
- }
-
- /* Hash Mode is global. Make sure the same Hash Mode
- * is set to all the 4 possible lag.
- * If we are the unique LAG we can set whatever hash
- * mode we want.
- * To change hash mode it's needed to remove all LAG
- * and change the mode with the latest.
- */
- if (unique_lag) {
- priv->lag_hash_mode = hash;
- } else if (priv->lag_hash_mode != hash) {
- netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
- return -EOPNOTSUPP;
- }
-
- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
- QCA8K_TRUNK_HASH_MASK, hash);
-}
-
-static int
-qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct net_device *lag, bool delete)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret, id, i;
- u32 val;
-
- id = dsa_lag_id(ds->dst, lag);
-
- /* Read current port member */
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
- if (ret)
- return ret;
-
- /* Shift val to the correct trunk */
- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
- if (delete)
- val &= ~BIT(port);
- else
- val |= BIT(port);
-
- /* Update port member. With empty portmap disable trunk */
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
- QCA8K_REG_GOL_TRUNK_MEMBER(id) |
- QCA8K_REG_GOL_TRUNK_EN(id),
- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
- val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
-
- /* Search empty member if adding or port on deleting */
- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
- if (ret)
- return ret;
-
- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
-
- if (delete) {
- /* If port flagged to be disabled assume this member is
- * empty
- */
- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
-
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
- if (val != port)
- continue;
- } else {
- /* If port flagged to be enabled assume this member is
- * already set
- */
- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
- }
-
- /* We have found the member to add/remove */
- break;
- }
-
- /* Set port in the correct port mask or disable port if in delete mode */
- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
-}
-
-static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
-{
- int ret;
-
- if (!qca8k_lag_can_offload(ds, lag, info))
- return -EOPNOTSUPP;
-
- ret = qca8k_lag_setup_hash(ds, lag, info);
- if (ret)
- return ret;
-
- return qca8k_lag_refresh_portmap(ds, port, lag, false);
-}
-
-static int
-qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
-{
- return qca8k_lag_refresh_portmap(ds, port, lag, true);
-}
-
-static const struct dsa_switch_ops qca8k_switch_ops = {
- .get_tag_protocol = qca8k_get_tag_protocol,
- .setup = qca8k_setup,
- .get_strings = qca8k_get_strings,
- .get_ethtool_stats = qca8k_get_ethtool_stats,
- .get_sset_count = qca8k_get_sset_count,
- .set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
- .set_mac_eee = qca8k_set_mac_eee,
- .port_enable = qca8k_port_enable,
- .port_disable = qca8k_port_disable,
- .port_change_mtu = qca8k_port_change_mtu,
- .port_max_mtu = qca8k_port_max_mtu,
- .port_stp_state_set = qca8k_port_stp_state_set,
- .port_bridge_join = qca8k_port_bridge_join,
- .port_bridge_leave = qca8k_port_bridge_leave,
- .port_fast_age = qca8k_port_fast_age,
- .port_fdb_add = qca8k_port_fdb_add,
- .port_fdb_del = qca8k_port_fdb_del,
- .port_fdb_dump = qca8k_port_fdb_dump,
- .port_mdb_add = qca8k_port_mdb_add,
- .port_mdb_del = qca8k_port_mdb_del,
- .port_mirror_add = qca8k_port_mirror_add,
- .port_mirror_del = qca8k_port_mirror_del,
- .port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_add = qca8k_port_vlan_add,
- .port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
- .get_phy_flags = qca8k_get_phy_flags,
- .port_lag_join = qca8k_port_lag_join,
- .port_lag_leave = qca8k_port_lag_leave,
-};
-
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
- const struct qca8k_match_data *data;
- u32 val;
- u8 id;
- int ret;
-
- /* get the switches ID from the compatible */
- data = of_device_get_match_data(priv->dev);
- if (!data)
- return -ENODEV;
-
- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
- if (ret < 0)
- return -ENODEV;
-
- id = QCA8K_MASK_CTRL_DEVICE_ID(val);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
- }
-
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
-
- return 0;
-}
-
-static int
-qca8k_sw_probe(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv;
- int ret;
-
- /* allocate the private data struct so that we can probe the switches
- * ID register
- */
- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
-
- priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
- GPIOD_ASIS);
- if (IS_ERR(priv->reset_gpio))
- return PTR_ERR(priv->reset_gpio);
-
- if (priv->reset_gpio) {
- gpiod_set_value_cansleep(priv->reset_gpio, 1);
- /* The active low duration must be greater than 10 ms
- * and checkpatch.pl wants 20 ms.
- */
- msleep(20);
- gpiod_set_value_cansleep(priv->reset_gpio, 0);
- }
-
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
- &qca8k_regmap_config);
- if (IS_ERR(priv->regmap)) {
- dev_err(priv->dev, "regmap initialization failed");
- return PTR_ERR(priv->regmap);
- }
-
- /* Check the detected switch id */
- ret = qca8k_read_switch_id(priv);
- if (ret)
- return ret;
-
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
- mutex_init(&priv->reg_mutex);
- dev_set_drvdata(&mdiodev->dev, priv);
-
- return dsa_register_switch(priv->ds);
-}
-
-static void
-qca8k_sw_remove(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
- int i;
-
- if (!priv)
- return;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
-
- dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
-
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static void
-qca8k_set_pm(struct qca8k_priv *priv, int enable)
-{
- int i;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
- continue;
-
- qca8k_port_set_status(priv, i, enable);
- }
-}
-
-static int qca8k_suspend(struct device *dev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(dev);
-
- qca8k_set_pm(priv, 0);
-
- return dsa_switch_suspend(priv->ds);
-}
-
-static int qca8k_resume(struct device *dev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(dev);
-
- qca8k_set_pm(priv, 1);
-
- return dsa_switch_resume(priv->ds);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
- qca8k_suspend, qca8k_resume);
-
-static const struct qca8k_match_data qca8327 = {
- .id = QCA8K_ID_QCA8327,
- .reduced_package = true,
- .mib_count = QCA8K_QCA832X_MIB_COUNT,
-};
-
-static const struct qca8k_match_data qca8328 = {
- .id = QCA8K_ID_QCA8327,
- .mib_count = QCA8K_QCA832X_MIB_COUNT,
-};
-
-static const struct qca8k_match_data qca833x = {
- .id = QCA8K_ID_QCA8337,
- .mib_count = QCA8K_QCA833X_MIB_COUNT,
-};
-
-static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca8327 },
- { .compatible = "qca,qca8328", .data = &qca8328 },
- { .compatible = "qca,qca8334", .data = &qca833x },
- { .compatible = "qca,qca8337", .data = &qca833x },
- { /* sentinel */ },
-};
-
-static struct mdio_driver qca8kmdio_driver = {
- .probe = qca8k_sw_probe,
- .remove = qca8k_sw_remove,
- .shutdown = qca8k_sw_shutdown,
- .mdiodrv.driver = {
- .name = "qca8k",
- .of_match_table = qca8k_of_match,
- .pm = &qca8k_pm_ops,
- },
-};
-
-mdio_module_driver(qca8kmdio_driver);
-
-MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
-MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
deleted file mode 100644
index aae46ada8d83..000000000000
--- a/drivers/net/dsa/realtek-smi-core.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Realtek Simple Management Interface (SMI) driver
- * It can be discussed how "simple" this interface is.
- *
- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
- * but the protocol is not MDIO at all. Instead it is a Realtek
- * pecularity that need to bit-bang the lines in a special way to
- * communicate with the switch.
- *
- * ASICs we intend to support with this driver:
- *
- * RTL8366 - The original version, apparently
- * RTL8369 - Similar enough to have the same datsheet as RTL8366
- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
- * different register layout from the other two
- * RTL8366S - Is this "RTL8366 super"?
- * RTL8367 - Has an OpenWRT driver as well
- * RTL8368S - Seems to be an alternative name for RTL8366RB
- * RTL8370 - Also uses SMI
- *
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
- * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
- * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/bitops.h>
-#include <linux/if_bridge.h>
-
-#include "realtek-smi-core.h"
-
-#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
-
-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
-{
- ndelay(smi->clk_delay);
-}
-
-static void realtek_smi_start(struct realtek_smi *smi)
-{
- /* Set GPIO pins to output mode, with initial state:
- * SCK = 0, SDA = 1
- */
- gpiod_direction_output(smi->mdc, 0);
- gpiod_direction_output(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
-
- /* CLK 1: 0 -> 1, 1 -> 0 */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
-
- /* CLK 2: */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
-}
-
-static void realtek_smi_stop(struct realtek_smi *smi)
-{
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Add a click */
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Set GPIO pins to input mode */
- gpiod_direction_input(smi->mdio);
- gpiod_direction_input(smi->mdc);
-}
-
-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
-{
- for (; len > 0; len--) {
- realtek_smi_clk_delay(smi);
-
- /* Prepare data */
- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- }
-}
-
-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
-{
- gpiod_direction_input(smi->mdio);
-
- for (*data = 0; len > 0; len--) {
- u32 u;
-
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- u = !!gpiod_get_value(smi->mdio);
- gpiod_set_value(smi->mdc, 0);
-
- *data |= (u << (len - 1));
- }
-
- gpiod_direction_output(smi->mdio, 0);
-}
-
-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
-{
- int retry_cnt;
-
- retry_cnt = 0;
- do {
- u32 ack;
-
- realtek_smi_read_bits(smi, 1, &ack);
- if (ack == 0)
- break;
-
- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
- dev_err(smi->dev, "ACK timeout\n");
- return -ETIMEDOUT;
- }
- } while (1);
-
- return 0;
-}
-
-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return realtek_smi_wait_for_ack(smi);
-}
-
-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return 0;
-}
-
-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x00, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x01, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
-{
- unsigned long flags;
- u8 lo = 0;
- u8 hi = 0;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send READ command */
- ret = realtek_smi_write_byte(smi, smi->cmd_read);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Read DATA[7:0] */
- realtek_smi_read_byte0(smi, &lo);
- /* Read DATA[15:8] */
- realtek_smi_read_byte1(smi, &hi);
-
- *data = ((u32)lo) | (((u32)hi) << 8);
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-static int realtek_smi_write_reg(struct realtek_smi *smi,
- u32 addr, u32 data, bool ack)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send WRITE command */
- ret = realtek_smi_write_byte(smi, smi->cmd_write);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Write DATA[7:0] */
- ret = realtek_smi_write_byte(smi, data & 0xff);
- if (ret)
- goto out;
-
- /* Write DATA[15:8] */
- if (ack)
- ret = realtek_smi_write_byte(smi, data >> 8);
- else
- ret = realtek_smi_write_byte_noack(smi, data >> 8);
- if (ret)
- goto out;
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-/* There is one single case when we need to use this accessor and that
- * is when issueing soft reset. Since the device reset as soon as we write
- * that bit, no ACK will come back for natural reasons.
- */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data)
-{
- return realtek_smi_write_reg(smi, addr, data, false);
-}
-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
-
-/* Regmap accessors */
-
-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_write_reg(smi, reg, val, true);
-}
-
-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_read_reg(smi, reg, val);
-}
-
-static const struct regmap_config realtek_smi_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_read(smi, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_write(smi, addr, regnum, val);
-}
-
-int realtek_smi_setup_mdio(struct realtek_smi *smi)
-{
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(smi->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- smi->slave_mii_bus->priv = smi;
- smi->slave_mii_bus->name = "SMI slave MII";
- smi->slave_mii_bus->read = realtek_smi_mdio_read;
- smi->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- smi->ds->index);
- smi->slave_mii_bus->dev.of_node = mdio_np;
- smi->slave_mii_bus->parent = smi->dev;
- smi->ds->slave_mii_bus = smi->slave_mii_bus;
-
- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
- if (ret) {
- dev_err(smi->dev, "unable to register MDIO bus %s\n",
- smi->slave_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
-{
- const struct realtek_smi_variant *var;
- struct device *dev = &pdev->dev;
- struct realtek_smi *smi;
- struct device_node *np;
- int ret;
-
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
- if (!smi)
- return -ENOMEM;
- smi->chip_data = (void *)smi + sizeof(*smi);
- smi->map = devm_regmap_init(dev, NULL, smi,
- &realtek_smi_mdio_regmap_config);
- if (IS_ERR(smi->map)) {
- ret = PTR_ERR(smi->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- smi->dev = dev;
- smi->clk_delay = var->clk_delay;
- smi->cmd_read = var->cmd_read;
- smi->cmd_write = var->cmd_write;
- smi->ops = var->ops;
-
- dev_set_drvdata(dev, smi);
- spin_lock_init(&smi->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- /* Assert then deassert RESET */
- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(smi->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(smi->reset);
- }
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(smi->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
-
- /* Fetch MDIO pins */
- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdc))
- return PTR_ERR(smi->mdc);
- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdio))
- return PTR_ERR(smi->mdio);
-
- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = smi->ops->detect(smi);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
- if (!smi->ds)
- return -ENOMEM;
-
- smi->ds->dev = dev;
- smi->ds->num_ports = smi->num_ports;
- smi->ds->priv = smi;
-
- smi->ds->ops = var->ds_ops;
- ret = dsa_register_switch(smi->ds);
- if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
- return ret;
- }
- return 0;
-}
-
-static int realtek_smi_remove(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return 0;
-
- dsa_unregister_switch(smi->ds);
- if (smi->slave_mii_bus)
- of_node_put(smi->slave_mii_bus->dev.of_node);
- gpiod_set_value(smi->reset, 1);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void realtek_smi_shutdown(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return;
-
- dsa_switch_shutdown(smi->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct of_device_id realtek_smi_of_match[] = {
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
- },
- .probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000000..060165a85fb7
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
+config NET_DSA_REALTEK_MDIO
+ tristate "Realtek MDIO interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000000..0aab57252a7c
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000000..3e54fac5f902
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static void realtek_mdio_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_mdio_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_mdio_lock,
+ .unlock = realtek_mdio_unlock,
+};
+
+static const struct regmap_config realtek_mdio_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv;
+ struct device *dev = &mdiodev->dev;
+ const struct realtek_variant *var;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_mdio_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_mdio_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->mdio_addr = mdiodev->addr;
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->write_reg_noack = realtek_mdio_write;
+
+ np = dev->of_node;
+
+ dev_set_drvdata(dev, priv);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = var->ds_ops_mdio;
+
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+}
+
+static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id realtek_mdio_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
+
+static struct mdio_driver realtek_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "realtek-mdio",
+ .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+mdio_module_driver(realtek_mdio_driver);
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000000..1b447d96b9c4
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static void realtek_smi_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_smi_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_smi_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_smi_lock,
+ .unlock = realtek_smi_unlock,
+};
+
+static const struct regmap_config realtek_smi_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+static int realtek_smi_setup_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "SMI slave MII";
+ priv->slave_mii_bus->read = realtek_smi_mdio_read;
+ priv->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ ds->index);
+ priv->slave_mii_bus->dev.of_node = mdio_np;
+ priv->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+
+ ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ priv->slave_mii_bus->id);
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_smi_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_smi_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->setup_interface = realtek_smi_setup_mdio;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc))
+ return PTR_ERR(priv->mdc);
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+
+ priv->ds->ops = var->ds_ops_smi;
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "unable to register switch\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+ if (priv->slave_mii_bus)
+ of_node_put(priv->slave_mii_bus->dev.of_node);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ return 0;
+}
+
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek.h
index 5bfa53e2480a..4fa7c6ba874a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -5,15 +5,18 @@
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-struct realtek_smi_ops;
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct realtek_ops;
struct dentry;
struct inode;
struct file;
@@ -25,7 +28,7 @@ struct rtl8366_mib_counter {
const char *name;
};
-/**
+/*
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc {
@@ -43,13 +46,17 @@ struct rtl8366_vlan_4k {
u8 fid;
};
-struct realtek_smi {
+struct realtek_priv {
struct device *dev;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
struct regmap *map;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
struct mii_bus *slave_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
unsigned int clk_delay;
u8 cmd_read;
@@ -65,7 +72,9 @@ struct realtek_smi {
unsigned int num_mib_counters;
struct rtl8366_mib_counter *mib_counters;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ int (*setup_interface)(struct dsa_switch *ds);
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
int vlan4k_enabled;
@@ -74,61 +83,57 @@ struct realtek_smi {
void *chip_data; /* Per-chip extra variant data */
};
-/**
- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
* @detect: detects the chiptype
*/
-struct realtek_smi_ops {
- int (*detect)(struct realtek_smi *smi);
- int (*reset_chip)(struct realtek_smi *smi);
- int (*setup)(struct realtek_smi *smi);
- void (*cleanup)(struct realtek_smi *smi);
- int (*get_mib_counter)(struct realtek_smi *smi,
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ void (*cleanup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue);
- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc);
- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc);
- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k);
- int (*set_vlan_4k)(struct realtek_smi *smi,
+ int (*set_vlan_4k)(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k);
- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
u16 val);
};
-struct realtek_smi_variant {
- const struct dsa_switch_ops *ds_ops;
- const struct realtek_smi_ops *ops;
+struct realtek_variant {
+ const struct dsa_switch_ops *ds_ops_smi;
+ const struct dsa_switch_ops *ds_ops_mdio;
+ const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
size_t chip_data_sz;
};
-/* SMI core calls */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data);
-int realtek_smi_setup_mdio(struct realtek_smi *smi);
-
/* RTL8366 library helpers */
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
-int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -139,7 +144,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-extern const struct realtek_smi_variant rtl8366rb_variant;
-extern const struct realtek_smi_variant rtl8365mb_variant;
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 3b729544798b..da31d8b839ac 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -99,18 +99,15 @@
#include <linux/regmap.h>
#include <linux/if_bridge.h>
-#include "realtek-smi-core.h"
-
-/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+#include "realtek.h"
/* Family-specific data and limits */
-#define RTL8365MB_PHYADDRMAX 7
-#define RTL8365MB_NUM_PHYREGS 32
-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS 11
+#define RTL8365MB_MAX_NUM_EXTINTS 3
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -191,7 +188,7 @@
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -207,39 +204,44 @@
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* EXT port interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
- ((_extport) >> 1) * (0x13C3 - 0x1305))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
- (0xF << (((_extport) % 2)))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
- (((_extport) % 2) * 4)
-
-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
- (RTL8365MB_EXT_RGMXF_REG1 + \
- (((_extport) >> 1) * (0x13C5 - 0x1307)))
+/* External interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* External interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT port force configuration registers 0~2 */
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
- ((_extport) & 0x1) + \
- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+/* External interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
@@ -463,6 +465,95 @@ static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
{ 0x1D32, 0x0002 },
};
+enum rtl8365mb_phy_interface_mode {
+ RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
+ RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
+ RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
+ RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
+ RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
+ RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
+ RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
+ RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
+};
+
+/**
+ * struct rtl8365mb_extint - external interface info
+ * @port: the port with an external interface
+ * @id: the external interface ID, which is either 0, 1, or 2
+ * @supported_interfaces: a bitmask of supported PHY interface modes
+ *
+ * Represents a mapping: port -> { id, supported_interfaces }. To be embedded
+ * in &struct rtl8365mb_chip_info for every port with an external interface.
+ */
+struct rtl8365mb_extint {
+ int port;
+ int id;
+ unsigned int supported_interfaces;
+};
+
+/**
+ * struct rtl8365mb_chip_info - static chip-specific info
+ * @name: human-readable chip name
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @extints: available external interfaces
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * These data are specific to a given chip in the family of switches supported
+ * by this driver. When adding support for another chip in the family, a new
+ * chip info should be added to the rtl8365mb_chip_infos array.
+ */
+struct rtl8365mb_chip_info {
+ const char *name;
+ u32 chip_id;
+ u32 chip_ver;
+ const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+/* Chip info for each supported switch in the family */
+#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
+static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
+ {
+ .name = "RTL8365MB-VC",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0040,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367S",
+ .chip_id = 0x6367,
+ .chip_ver = 0x00A0,
+ .extints = {
+ { 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367RB-VB",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0020,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+};
+
enum rtl8365mb_stp_state {
RTL8365MB_STP_STATE_DISABLED = 0,
RTL8365MB_STP_STATE_BLOCKING = 1,
@@ -516,7 +607,7 @@ struct rtl8365mb_cpu {
/**
* struct rtl8365mb_port - private per-port data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
@@ -524,7 +615,7 @@ struct rtl8365mb_cpu {
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
@@ -532,45 +623,35 @@ struct rtl8365mb_port {
};
/**
- * struct rtl8365mb - private chip-specific driver data
- * @smi: pointer to parent realtek_smi data
+ * struct rtl8365mb - driver private data
+ * @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
- * @chip_id: chip identifier
- * @chip_ver: chip silicon revision
- * @port_mask: mask of all ports
- * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @chip_info: chip-specific info about the attached switch
* @cpu: CPU tagging and CPU port configuration for this chip
* @mib_lock: prevent concurrent reads of MIB counters
* @ports: per-port data
- * @jam_table: chip-specific initialization jam table
- * @jam_size: size of the chip's jam table
*
* Private data for this driver.
*/
struct rtl8365mb {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
int irq;
- u32 chip_id;
- u32 chip_ver;
- u32 port_mask;
- u32 learn_limit_max;
+ const struct rtl8365mb_chip_info *chip_info;
struct rtl8365mb_cpu cpu;
struct mutex mib_lock;
struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
- const struct rtl8365mb_jam_tbl_entry *jam_table;
- size_t jam_size;
};
-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
- return regmap_read_poll_timeout(smi->map,
+ return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
@@ -579,7 +660,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
@@ -592,89 +673,101 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
- val);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
/* Get PHY register data */
- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
- &val);
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
- return ret;
+ goto out;
*data = val & 0xFFFF;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Set PHY register data */
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
- data);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
- return ret;
+ goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
return 0;
}
-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
@@ -688,21 +781,21 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
@@ -716,46 +809,84 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
+static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8365mb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static const struct rtl8365mb_extint *
+rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
+ const struct rtl8365mb_extint *extint =
+ &mb->chip_info->extints[i];
+
+ if (!extint->supported_interfaces)
+ continue;
+
+ if (extint->port == port)
+ return extint;
+ }
+
+ return NULL;
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
return DSA_TAG_PROTO_RTL8_4;
}
-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_port;
u32 val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
- dp = dsa_to_port(smi->ds, port);
+ dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -786,8 +917,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val == 0 || val == 2)
tx_delay = val / 2;
else
- dev_warn(smi->dev,
- "EXT port TX delay must be 0 or 2 ns\n");
+ dev_warn(priv->dev,
+ "RGMII TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
@@ -796,12 +927,12 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val <= 7)
rx_delay = val;
else
- dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2.1 ns\n");
+ dev_warn(priv->dev,
+ "RGMII RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -810,36 +941,33 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
return ret;
ret = regmap_update_bits(
- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_port));
+ extint->id));
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
u32 r_tx_pause;
u32 r_rx_pause;
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_port;
int val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
if (link) {
/* Force the link up with the desired configuration */
@@ -854,7 +982,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
- dev_err(smi->dev, "unsupported port speed %s\n",
+ dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
@@ -864,7 +992,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
- dev_err(smi->dev, "unsupported duplex %s\n",
+ dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
@@ -886,8 +1014,8 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
- ret = regmap_write(smi->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
val);
if (ret)
return ret;
@@ -895,82 +1023,54 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
return 0;
}
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
- phy_interface_t interface)
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port) &&
- (interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL ||
- interface == PHY_INTERFACE_MODE_GMII))
- /* Internal PHY */
- return true;
- else if (dsa_is_cpu_port(ds, port) &&
- phy_interface_mode_is_rgmii(interface))
- /* Extension MAC */
- return true;
-
- return false;
-}
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(ds->priv, port);
-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct realtek_smi *smi = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- linkmode_zero(supported);
+ if (!extint) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
return;
}
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+ /* Populate according to the modes supported by _this driver_,
+ * not necessarily the modes supported by the hardware, some of
+ * which remain unimplemented.
+ */
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
+ phy_interface_set_rgmii(config->supported_interfaces);
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
@@ -985,20 +1085,20 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
@@ -1013,21 +1113,21 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
@@ -1038,7 +1138,7 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
@@ -1057,36 +1157,34 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "invalid STP state: %u\n", state);
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = smi->chip_data;
-
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
- enable ? mb->learn_limit_max : 0);
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
}
-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
@@ -1098,13 +1196,13 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
* and then poll the control register before reading the value from some
* counter registers.
*/
- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
@@ -1126,7 +1224,7 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
@@ -1142,21 +1240,21 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
@@ -1190,15 +1288,15 @@ static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
@@ -1226,12 +1324,12 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
@@ -1241,7 +1339,7 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
@@ -1291,20 +1389,20 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
@@ -1323,7 +1421,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
@@ -1338,7 +1436,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
@@ -1388,9 +1486,9 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
- struct realtek_smi *smi = p->smi;
+ struct realtek_priv *priv = p->priv;
- rtl8365mb_stats_update(smi, p->index);
+ rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
@@ -1398,11 +1496,11 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
@@ -1410,9 +1508,9 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1420,10 +1518,10 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
*/
mutex_init(&mb->mib_lock);
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1436,45 +1534,42 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
}
}
-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
- ret = regmap_read(smi->map, reg, val);
+ ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
- return regmap_write(smi->map, reg, *val);
+ return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
unsigned long line_changes = 0;
- struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = smi->chip_data;
-
- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
@@ -1485,27 +1580,27 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
- line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ line_changes = linkup_ind | linkdown_ind;
}
if (!line_changes)
goto out_none;
- for_each_set_bit(line, &line_changes, smi->num_ports) {
- int child_irq = irq_find_mapping(smi->irqdomain, line);
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
@@ -1513,7 +1608,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
return IRQ_HANDLED;
out_error:
- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
@@ -1548,27 +1643,27 @@ static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, true);
+ return rtl8365mb_set_irq_enable(priv, true);
}
-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, false);
+ return rtl8365mb_set_irq_enable(priv, false);
}
-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
@@ -1577,9 +1672,9 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
int ret;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
@@ -1587,24 +1682,24 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
- dev_err(smi->dev, "failed to get parent irq: %d\n",
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
- &rtl8365mb_irqdomain_ops, smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to add irq domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_create_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
@@ -1625,40 +1720,40 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
- dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
- ret = rtl8365mb_irq_disable(smi);
+ ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
- "rtl8365mb", smi);
+ "rtl8365mb", priv);
if (ret) {
- dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
- ret = rtl8365mb_irq_enable(smi);
+ ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
@@ -1667,17 +1762,17 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
return 0;
out_free_irq:
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
@@ -1685,36 +1780,36 @@ out_put_node:
return ret;
}
-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
}
- if (smi->irqdomain) {
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
}
}
-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
@@ -1726,27 +1821,61 @@ static int rtl8365mb_cpu_config(struct realtek_smi *smi)
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
- cpu->trap_port >> 3);
- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_switch_init(struct realtek_smi *smi)
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
+ const struct rtl8365mb_chip_info *ci;
int ret;
int i;
+ ci = mb->chip_info;
+
/* Do any chip-specific init jam before getting to the common stuff */
- if (mb->jam_table) {
- for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(smi->map, mb->jam_table[i].reg,
- mb->jam_table[i].val);
+ if (ci->jam_table) {
+ for (i = 0; i < ci->jam_size; i++) {
+ ret = regmap_write(priv->map, ci->jam_table[i].reg,
+ ci->jam_table[i].val);
if (ret)
return ret;
}
@@ -1754,7 +1883,7 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
@@ -1763,75 +1892,80 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
return 0;
}
-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
- 1));
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
- ret = rtl8365mb_reset_chip(smi);
+ ret = rtl8365mb_reset_chip(priv);
if (ret) {
- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
- ret = rtl8365mb_switch_init(smi);
+ ret = rtl8365mb_switch_init(priv);
if (ret) {
- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
- ret = rtl8365mb_irq_setup(smi);
+ ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- ret = rtl8365mb_cpu_config(smi);
+ dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
- /* Set up per-port private data */
- p->smi = smi;
- p->index = i;
-
/* Forward only to the CPU */
- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
- ret = rtl8365mb_port_set_learning(smi, i, false);
+ ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
@@ -1839,29 +1973,35 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
}
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
RTL8365MB_CFG0_MAX_LEN_MASK,
FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
if (ret)
goto out_teardown_irq;
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_err(smi->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
}
/* Start statistics counter polling */
- rtl8365mb_stats_setup(smi);
+ rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_irq_teardown(priv);
out_error:
return ret;
@@ -1869,10 +2009,10 @@ out_error:
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
- rtl8365mb_stats_teardown(smi);
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
@@ -1902,64 +2042,80 @@ static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
return 0;
}
-static int rtl8365mb_detect(struct realtek_smi *smi)
+static int rtl8365mb_detect(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
+ int i;
- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
- dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
- switch (chip_id) {
- case RTL8365MB_CHIP_ID_8365MB_VC:
- dev_info(smi->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
-
- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
- smi->num_ports = smi->cpu_port + 1;
-
- mb->smi = smi;
- mb->chip_id = chip_id;
- mb->chip_ver = chip_ver;
- mb->port_mask = BIT(smi->num_ports) - 1;
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
- mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
- mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
-
- mb->cpu.enable = 1;
- mb->cpu.mask = BIT(smi->cpu_port);
- mb->cpu.trap_port = smi->cpu_port;
- mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
- mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
- mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
- mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
+ const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
- break;
- default:
- dev_err(smi->dev,
- "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
- chip_id, chip_ver);
+ if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
+ mb->chip_info = ci;
+ break;
+ }
+ }
+
+ if (!mb->chip_info) {
+ dev_err(priv->dev,
+ "unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
+ chip_ver);
return -ENODEV;
}
+ dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
+
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
+ mb->priv = priv;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
- .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phy_read = rtl8365mb_dsa_phy_read,
+ .phy_write = rtl8365mb_dsa_phy_write,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -1970,18 +2126,23 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_stats64 = rtl8365mb_get_stats64,
};
-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
-const struct realtek_smi_variant rtl8365mb_variant = {
- .ds_ops = &rtl8365mb_switch_ops,
- .ops = &rtl8365mb_smi_ops,
+const struct realtek_variant rtl8365mb_variant = {
+ .ds_ops_smi = &rtl8365mb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/realtek/rtl8366-core.c
index bdb8d8d34880..dc5f75be3017 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -11,18 +11,18 @@
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
int index = 0;
- ret = smi->ops->get_mc_index(smi, i, &index);
+ ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
@@ -38,13 +38,13 @@ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
- * @smi: the Realtek SMI device instance
+ * @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
@@ -52,10 +52,10 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
int i;
/* Try to find an existing member config entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -65,19 +65,19 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
}
/* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -86,30 +86,30 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
- ret = rtl8366_mc_is_used(smi, i, &used);
+ ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
@@ -117,23 +117,23 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
- dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
@@ -141,31 +141,31 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
@@ -176,12 +176,12 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
vlanmc.fid = fid;
/* Commit updates to the MC entry */
- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
@@ -189,37 +189,37 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
- ret = smi->ops->set_mc_index(smi, port, mc);
+ ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
@@ -229,52 +229,52 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
*/
if (enable) {
/* Make sure VLAN is ON */
- ret = smi->ops->enable_vlan(smi, true);
+ ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
- smi->vlan_enabled = true;
+ priv->vlan_enabled = true;
}
- ret = smi->ops->enable_vlan4k(smi, enable);
+ ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
- smi->vlan4k_enabled = enable;
+ priv->vlan4k_enabled = enable;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
- ret = smi->ops->enable_vlan(smi, enable);
+ ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
- smi->vlan_enabled = enable;
+ priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
- smi->vlan4k_enabled = false;
- ret = smi->ops->enable_vlan4k(smi, false);
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
-int rtl8366_reset_vlan(struct realtek_smi *smi)
+int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
- rtl8366_enable_vlan(smi, false);
- rtl8366_enable_vlan4k(smi, false);
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
@@ -282,8 +282,8 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
@@ -298,12 +298,12 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
@@ -312,13 +312,13 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
+ ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
@@ -327,18 +327,18 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (untagged)
untag |= BIT(port);
- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
- ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
@@ -350,15 +350,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret, i;
- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
@@ -376,9 +376,9 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
vlanmc.priority = 0;
vlanmc.fid = 0;
}
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
@@ -394,15 +394,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366_mib_counter *mib;
int i;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
- mib = &smi->mib_counters[i];
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ mib = &priv->mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN,
mib->name, ETH_GSTRING_LEN);
}
@@ -411,35 +411,35 @@ EXPORT_SYMBOL_GPL(rtl8366_get_strings);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- return smi->num_mib_counters;
+ return priv->num_mib_counters;
}
EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int i;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
+ for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
- mib = &smi->mib_counters[i];
- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
- dev_err(smi->dev, "error reading MIB counter %s\n",
+ dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index ecc19bd5115f..25f88022b9e4 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,7 +21,7 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -396,7 +396,7 @@ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
@@ -412,12 +412,12 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
@@ -430,7 +430,7 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
- ret = regmap_read(smi->map, addr + (i - 1), &val);
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
@@ -455,38 +455,38 @@ static u32 rtl8366rb_get_irqmask(struct irq_data *d)
static void rtl8366rb_mask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
- dev_err(smi->dev, "could not mask IRQ\n");
+ dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
- dev_err(smi->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
@@ -502,7 +502,7 @@ static irqreturn_t rtl8366rb_irq(int irq, void *data)
*/
if (line < 12 && line > 5)
line -= 5;
- child_irq = irq_find_mapping(smi->irqdomain, line);
+ child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
@@ -538,7 +538,7 @@ static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
@@ -547,24 +547,24 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
u32 val;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
- dev_err(smi->dev, "failed to get parent IRQ\n");
+ dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
@@ -573,48 +573,48 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
- dev_info(smi->dev, "active high/rising IRQ\n");
+ dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
- dev_info(smi->dev, "active low/falling IRQ\n");
+ dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
- dev_err(smi->dev, "could not configure IRQ polarity\n");
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
- "RTL8366RB", smi);
+ "RTL8366RB", priv);
if (ret) {
- dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to create IRQ domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++)
- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
-static int rtl8366rb_set_addr(struct realtek_smi *smi)
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
@@ -622,18 +622,18 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
eth_random_addr(addr);
- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
@@ -765,7 +765,7 @@ static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
- int jam_size, struct realtek_smi *smi,
+ int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
@@ -774,24 +774,24 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
@@ -802,7 +802,7 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
static int rtl8366rb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
@@ -812,11 +812,11 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
- dev_err(smi->dev, "unable to read chip id\n");
+ dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
@@ -824,18 +824,18 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
case RTL8366RB_CHIP_ID_8366:
break;
default:
- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
- dev_err(smi->dev, "unable to read chip version\n");
+ dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
@@ -872,20 +872,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
@@ -893,26 +893,26 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
- ret = regmap_write(smi->map, 0x0c, 0x240);
+ ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
- ret = regmap_write(smi->map, 0x0d, 0x240);
+ ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
- ret = rtl8366rb_set_addr(smi);
+ ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
@@ -921,21 +921,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
* the custom tag is turned off.
*/
- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
0xFFFF,
- BIT(smi->cpu_port));
+ BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
- BIT(smi->cpu_port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
0);
if (ret)
return ret;
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
@@ -945,13 +945,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
rb->max_mtu[i] = 1532;
/* Disable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -962,30 +962,30 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
/* Set blinking, TODO: make this configurable */
- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
@@ -996,15 +996,15 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* behaviour (no individual config) but we can set up each
* LED separately.
*/
- if (smi->leds_disabled) {
+ if (priv->leds_disabled) {
/* Turn everything off */
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
@@ -1014,7 +1014,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
val = RTL8366RB_LED_FORCE;
}
for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_CTRL_REG,
0xf << (i * 4),
val << (i * 4));
@@ -1022,18 +1022,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_reset_vlan(smi);
+ ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
- ret = rtl8366rb_setup_cascaded_irq(smi);
+ ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_info(smi->dev, "could not set up MDIO bus\n");
- return -ENODEV;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
}
return 0;
@@ -1052,35 +1054,35 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
return;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
if (ret) {
- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
/* Enable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
- dev_err(smi->dev, "failed to enable the CPU port\n");
+ dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
@@ -1089,107 +1091,108 @@ static void
rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to disable the CPU port\n");
+ dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
}
-static void rb8366rb_set_port_led(struct realtek_smi *smi,
+static void rb8366rb_set_port_led(struct realtek_priv *priv,
int port, bool enable)
{
u16 val = enable ? 0x3f : 0;
int ret;
- if (smi->leds_disabled)
+ if (priv->leds_disabled)
return;
switch (port) {
case 0:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F, val);
break;
case 1:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F << RTL8366RB_LED_1_OFFSET,
val << RTL8366RB_LED_1_OFFSET);
break;
case 2:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F, val);
break;
case 3:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F << RTL8366RB_LED_3_OFFSET,
val << RTL8366RB_LED_3_OFFSET);
break;
case 4:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
enable ? RTL8366RB_P4_RGMII_LED : 0);
break;
default:
- dev_err(smi->dev, "no LED for port %d\n", port);
+ dev_err(priv->dev, "no LED for port %d\n", port);
return;
}
if (ret)
- dev_err(smi->dev, "error updating LED on port %d\n", port);
+ dev_err(priv->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "enable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
- rb8366rb_set_port_led(smi, port, true);
+ rb8366rb_set_port_led(priv, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "disable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
- rb8366rb_set_port_led(smi, port, false);
+ rb8366rb_set_port_led(priv, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1202,17 +1205,17 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
- dev_err(smi->dev, "failed to join port %d\n", port);
+ dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
@@ -1221,7 +1224,7 @@ static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1234,28 +1237,30 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
- dev_err(smi->dev, "failed to leave port %d\n", port);
+ dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
- * @smi: SMI state container
+ * @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
*/
-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
@@ -1264,17 +1269,17 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
vlan_filtering ? "enable" : "disable");
/* If the port is not in the member set, the frame will be dropped */
- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
@@ -1284,9 +1289,9 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
- ret = rtl8366rb_drop_untagged(smi, port, false);
+ ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
@@ -1308,11 +1313,11 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
@@ -1325,7 +1330,7 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 val;
int i;
@@ -1344,13 +1349,13 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "unknown bridge state requested\n");
+ dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
@@ -1359,26 +1364,26 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
- rb = smi->chip_data;
+ rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
@@ -1406,7 +1411,7 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
@@ -1419,7 +1424,7 @@ static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
return 15996;
}
-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1432,19 +1437,19 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return -EINVAL;
/* write VID */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
@@ -1460,7 +1465,7 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return 0;
}
-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1480,7 +1485,7 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
@@ -1488,13 +1493,13 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
}
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1507,7 +1512,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return -EINVAL;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
@@ -1525,7 +1530,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1549,7 +1554,7 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
@@ -1559,15 +1564,15 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
@@ -1578,22 +1583,22 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
return 0;
}
-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
pvid_enabled = !!index;
- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
- RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
@@ -1604,17 +1609,17 @@ static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+ if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
- if (smi->vlan4k_enabled)
+ if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
@@ -1623,23 +1628,23 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
return true;
}
-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map,
+ dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
@@ -1648,32 +1653,40 @@ static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(smi->map, reg, 0);
+ ret = regmap_write(priv->map_nolock, reg, 0);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
- return ret;
+ goto out;
}
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
+ &val);
if (ret)
- return ret;
+ goto out;
- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ ret = val;
+
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
- return val;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
@@ -1682,34 +1695,50 @@ static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(smi->map, reg, val);
+ ret = regmap_write(priv->map_nolock, reg, val);
if (ret)
- return ret;
+ goto out;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
+}
+
+static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8366rb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
}
-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
- RTL8366RB_CHIP_CTRL_RESET_HW);
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
@@ -1718,21 +1747,21 @@ static int rtl8366rb_reset_chip(struct realtek_smi *smi)
} while (--timeout);
if (!timeout) {
- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
-static int rtl8366rb_detect(struct realtek_smi *smi)
+static int rtl8366rb_detect(struct realtek_priv *priv)
{
- struct device *dev = smi->dev;
+ struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
- ret = regmap_read(smi->map, 0x5c, &val);
+ ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
@@ -1745,11 +1774,11 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
- smi->num_ports = RTL8366RB_NUM_PORTS;
- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
- smi->mib_counters = rtl8366rb_mib_counters;
- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
@@ -1757,16 +1786,41 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
break;
}
- ret = rtl8366rb_reset_chip(smi);
+ ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
+ .phy_read = rtl8366rb_dsa_phy_read,
+ .phy_write = rtl8366rb_dsa_phy_write,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
.phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
@@ -1787,7 +1841,7 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
@@ -1803,12 +1857,17 @@ static const struct realtek_smi_ops rtl8366rb_smi_ops = {
.phy_write = rtl8366rb_phy_write,
};
-const struct realtek_smi_variant rtl8366rb_variant = {
- .ds_ops = &rtl8366rb_switch_ops,
- .ops = &rtl8366rb_smi_ops,
+const struct realtek_variant rtl8366rb_variant = {
+ .ds_ops_smi = &rtl8366rb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000000..ed413d555bec
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
+ return a5psw->pcs[port];
+
+ return NULL;
+}
+
+static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct a5psw *a5psw = ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
+ A5PSW_MCAST_DEF_MASK};
+ int i;
+
+ if (set)
+ a5psw->bridged_ports |= BIT(port);
+ else
+ a5psw->bridged_ports &= ~BIT(port);
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_mgmtfwd_set(a5psw, port, false);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_flooding_set_resolution(a5psw, port, false);
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+ struct a5psw *a5psw = ds->priv;
+ u32 reg = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_LISTENING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ break;
+ case BR_STATE_LEARNING:
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ break;
+ }
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
+ memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding for CPU port */
+ if (dsa_port_is_cpu(dp))
+ a5psw_flooding_set_resolution(a5psw, port, true);
+
+ /* Enable management forward only for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .phylink_mac_link_down = a5psw_phylink_mac_link_down,
+ .phylink_mac_link_up = a5psw_phylink_mac_link_up,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ ret = clk_prepare_enable(a5psw->clk);
+ if (ret)
+ goto free_pcs;
+
+ ret = clk_prepare_enable(a5psw->hclk);
+ if (ret)
+ goto clk_disable;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ if (ret) {
+ of_node_put(mdio);
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto hclk_disable;
+ }
+ }
+
+ of_node_put(mdio);
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto hclk_disable;
+ }
+
+ return 0;
+
+hclk_disable:
+ clk_disable_unprepare(a5psw->hclk);
+clk_disable:
+ clk_disable_unprepare(a5psw->clk);
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return 0;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+ clk_disable_unprepare(a5psw->hclk);
+ clk_disable_unprepare(a5psw->clk);
+
+ return 0;
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = of_match_ptr(a5psw_of_mtable),
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000000..c67abd49c013
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_DISCARD BIT(7)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 7dcdd784aea4..fad5afe3819c 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -300,6 +300,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
@@ -321,12 +361,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- rc = -EOPNOTSUPP;
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
goto out;
- }
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b513713be610..412666111b0c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -393,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
- /* This selects between Independent VLAN Learning (IVL) and
- * Shared VLAN Learning (SVL)
- */
- .shared_learn = true,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -1358,37 +1356,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-/* The SJA1105 MAC programming model is through the static config (the xMII
- * Mode table cannot be dynamically reconfigured), and we have to program
- * that early (earlier than PHYLINK calls us, anyway).
- * So just error out in case the connected PHY attempts to change the initial
- * system interface MII protocol from what is defined in the DT, at least for
- * now.
- */
-static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
{
- return priv->phy_mode[port] != interface;
-}
-
-static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs;
-
- if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
- phy_modes(state->interface));
- return;
- }
-
- xpcs = priv->xpcs[port];
+ struct dw_xpcs *xpcs = priv->xpcs[port];
if (xpcs)
- phylink_set_pcs(dp->pl, &xpcs->pcs);
+ return &xpcs->pcs;
+
+ return NULL;
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1412,48 +1389,53 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_inhibit_tx(priv, BIT(port), false);
}
-static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- /* Construct a new mask which exhaustively contains all link features
- * supported by the MAC, and then apply that (logical AND) to what will
- * be sent to the PHY for "marketing".
- */
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
*/
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- linkmode_zero(supported);
- return;
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
- phylink_set(mask, Autoneg);
- phylink_set(mask, MII);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT1_Full);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
- phylink_set(mask, 1000baseT_Full);
- if (priv->info->supports_2500basex[port]) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ config->mac_capabilities |= MAC_1000FD;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
}
static int
@@ -1819,25 +1801,52 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1874,7 +1883,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dp))
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1885,7 +1894,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
int i;
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
@@ -1913,7 +1930,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
@@ -1924,15 +1941,17 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
@@ -2075,7 +2094,8 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
int rc;
@@ -2083,7 +2103,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
@@ -2097,7 +2117,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge);
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
sja1105_bridge_member(ds, port, bridge, false);
}
@@ -2232,14 +2252,13 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
- u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
-
speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
mac[i].speed);
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
if (priv->xpcs[i])
- bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
}
/* No PTP operations can run right now */
@@ -2311,7 +2330,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
mode = MLO_AN_PHY;
- rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode, NULL);
if (rc < 0)
goto out;
@@ -2357,7 +2376,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
- struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -2395,28 +2413,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- /* VLAN filtering => independent VLAN learning.
- * No VLAN filtering (or best effort) => shared VLAN learning.
- *
- * In shared VLAN learning mode, untagged traffic still gets
- * pvid-tagged, and the FDB table gets populated with entries
- * containing the "real" (pvid or from VLAN tag) VLAN ID.
- * However the switch performs a masked L2 lookup in the FDB,
- * effectively only looking up a frame's DMAC (and not VID) for the
- * forwarding decision.
- *
- * This is extremely convenient for us, because in modes with
- * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
- * each front panel port. This is good for identification but breaks
- * learning badly - the VID of the learnt FDB entry is unique, aka
- * no frames coming from any other port are going to have it. So
- * for forwarding purposes, this is as though learning was broken
- * (all frames get flooded).
- */
- table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
- l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
-
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -2525,7 +2521,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
- "Range 1024-3071 reserved for dsa_8021q operation");
+ "Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
@@ -2850,7 +2846,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
@@ -3102,6 +3098,7 @@ static int sja1105_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
ds->max_num_bridges = 7;
@@ -3152,8 +3149,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
- .phylink_validate = sja1105_phylink_validate,
- .phylink_mac_config = sja1105_mac_config,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
@@ -3346,18 +3343,14 @@ static int sja1105_probe(struct spi_device *spi)
return dsa_register_switch(priv->ds);
}
-static int sja1105_remove(struct spi_device *spi)
+static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void sja1105_shutdown(struct spi_device *spi)
@@ -3387,12 +3380,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
+ .id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index be3068a935af..30fb2cc40164 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -399,7 +399,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (ptp_data->extts_enabled)
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f5dca6a9b0f9..b7e95d60a6e4 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
return false;
}
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
@@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
+ /* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
- u16 vid = dsa_tag_8021q_rx_vid(dp);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..bd4206e8f9af 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -121,8 +121,6 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
vsc73xx_remove(&vsc_platform->vsc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 645398901e05..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -159,18 +159,14 @@ static int vsc73xx_spi_probe(struct spi_device *spi)
return vsc73xx_probe(&vsc_spi->vsc);
}
-static int vsc73xx_spi_remove(struct spi_device *spi)
+static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
- return 0;
+ return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
@@ -207,10 +203,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 0730352cdd57..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
@@ -442,34 +443,27 @@ static void xrs700x_teardown(struct dsa_switch *ds)
cancel_delayed_work_sync(&priv->mib_work);
}
-static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
+
case 1:
case 2:
case 3:
- phylink_set(mask, 1000baseT_Full);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
+
default:
- linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
+ break;
}
-
- phylink_set_port_modes(mask);
-
- /* The switch only supports full duplex. */
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
@@ -541,7 +535,8 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
@@ -703,7 +698,7 @@ static const struct dsa_switch_ops xrs700x_ops = {
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
- .phylink_validate = xrs700x_phylink_validate,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
.phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..54065cdedd35 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -105,18 +105,14 @@ static int xrs700x_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int xrs700x_i2c_remove(struct i2c_client *i2c)
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
- return 0;
+ return;
xrs700x_switch_remove(priv);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..aa0fc00faecb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -102,7 +102,7 @@ static const struct net_device_ops dummy_netdev_ops = {
static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static const struct ethtool_ops dummy_ethtool_ops = {
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 1111d1f33865..ca3e4700a813 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE;
- dev_put_track(slave->dev, &slave->dev_tracker);
+ netdev_put(slave->dev, &slave->dev_tracker);
kfree(slave);
}
@@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
- dev_hold_track(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
+ netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..d2f4358cc550 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1527,7 +1527,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..82f94b1635bf 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -480,7 +480,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..082388bb6169 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 481f1df3106c..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -138,11 +138,6 @@ MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
-#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
-#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
-#undef NETIF_F_TSO
-#endif
-
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
@@ -974,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -989,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
@@ -2261,9 +2256,28 @@ out:
return mode;
}
+#if MAX_SKB_FRAGS > 32
+
+#include <net/vxlan.h>
+
+static netdev_features_t typhoon_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags > 32 && skb_is_gso(skb))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
+#if MAX_SKB_FRAGS > 32
+ .ndo_features_check = typhoon_features_check,
+#endif
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
@@ -2278,6 +2292,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2409,8 +2424,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
@@ -2448,7 +2464,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &typhoon_netdev_ops;
- netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &typhoon_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..af603256b724 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..05d39ecb97ff 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e320cccba61a..8a7918d33419 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -405,15 +405,13 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- dev_err(&pdev->dev, "no IRQ specified?\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return -ENXIO;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
@@ -433,7 +431,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
@@ -452,8 +450,7 @@ static int mcf8390_remove(struct platform_device *pdev)
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index db3ec4768159..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -78,13 +77,14 @@ source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -121,14 +121,16 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -140,10 +142,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -163,6 +165,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -170,14 +173,15 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/sunplus/Kconfig"
source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8a87c1083d1d..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
@@ -41,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
@@ -89,12 +91,14 @@ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..cd4d71b83c33 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1576,7 +1576,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c6982f7caf9b..857361c74f5d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -772,7 +772,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &ethtool_ops;
- netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
+ netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
if (mtu)
dev->mtu = mtu;
@@ -1844,8 +1844,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..da3bdd302502
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..606c97610808
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1720 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <net/switchdev.h>
+
+#include <asm/unaligned.h>
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t[2] = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t[1].rx_buf = &priv->data[header_len];
+ t[1].len = read_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t[2] = {0};
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return ret;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ t[1].rx_buf = &rxb->data[0];
+ t[1].len = round_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = 1;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, priv->spidev->chip_select);
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ mii_bus->probe_capabilities = MDIOBUS_C22;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ memset(mask, 0xFF, ETH_ALEN);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ if (!adin1110_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(port_priv, false, true);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return 0;
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+
+static int __init adin1110_driver_init(void)
+{
+ int ret;
+
+ ret = adin1110_setup_notifiers();
+ if (ret < 0)
+ return ret;
+
+ ret = spi_register_driver(&adin1110_driver);
+ if (ret < 0) {
+ adin1110_unregister_notifiers();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit adin1110_exit(void)
+{
+ adin1110_unregister_notifiers();
+ spi_unregister_driver(&adin1110_driver);
+}
+module_init(adin1110_driver_init);
+module_exit(adin1110_exit);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..e104fb02817d 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1112,9 +1112,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1507,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 537e6a85e18d..5fab589b3ddf 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
@@ -2413,11 +2413,13 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(tx_ring->tcb_ring);
}
+#define MAX_TX_DESC_PER_PKT 24
+
/* nic_send_packet - NIC specific send handler for version B silicon. */
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
u32 i;
- struct tx_desc desc[24];
+ struct tx_desc desc[MAX_TX_DESC_PER_PKT];
u32 frag = 0;
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
@@ -2432,9 +2434,6 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* more than 5 fragments.
*/
- /* nr_frags should be no more than 18. */
- BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
-
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
for (i = 0; i < nr_frags; i++) {
@@ -2953,8 +2952,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3762,6 +3761,13 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
+ /* This driver does not support TSO, it is very unlikely
+ * this condition is true.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
+ if (skb_linearize(skb))
+ goto drop_err;
+ }
/* stop the queue if it's getting full */
if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
@@ -3963,7 +3969,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 3add305d34b4..4eecbdfff3ff 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -265,8 +265,6 @@
#define SLIC_NUM_STAT_DESC_ARRAYS 4
#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
-#define SLIC_NAPI_WEIGHT 64
-
#define SLIC_UPR_LSTAT 0
#define SLIC_UPR_CONFIG 1
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 1fc9a1cd3ef8..a30d0f172986 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1531,8 +1531,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 849de4564709..a94c62956eed 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -106,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -264,7 +264,7 @@ static void emac_dma_done_callback(void *arg)
/* re enable interrupt */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0x01 << 8);
+ reg_val |= EMAC_INT_CTL_RX_EN;
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -331,8 +331,8 @@ prepare_err:
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
@@ -429,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -441,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -506,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -637,7 +637,9 @@ static void emac_rx(struct net_device *dev)
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -669,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -783,20 +787,20 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
} else {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
@@ -1068,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 22fe98555b24..d7762da8b2c0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2691,12 +2691,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..dd7fd41ccde5 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -3,6 +3,8 @@ config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
select PHYLIB
+ select PHYLINK
+ select PCS_ALTERA_TSE
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index db97170da8c7..7f247ccbe6ba 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..db5eed06e92d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -423,6 +413,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -480,6 +473,10 @@ struct altera_tse_private {
u32 msg_enable;
struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 993b2fb42961..7633b227b2ca 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/pcs-altera-tse.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
@@ -72,7 +73,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
@@ -86,27 +87,6 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -163,7 +143,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +161,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +171,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
@@ -232,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -354,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -367,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
unsigned int next_entry;
+ unsigned int count = 0;
struct sk_buff *skb;
- unsigned int entry = priv->rx_cons % priv->rx_ring_size;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -390,7 +375,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
@@ -444,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -493,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -557,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int nopaged_len = skb_headlen(skb);
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -615,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -764,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -1008,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1044,7 +833,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
@@ -1064,7 +853,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
@@ -1083,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1167,14 +896,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1232,8 +953,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1261,13 +986,10 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1313,11 +1035,79 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_an_restart(struct phylink_config *config)
+{
+}
+
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_an_restart = alt_tse_mac_an_restart,
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1346,13 +1136,15 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ const struct of_device_id *of_id = NULL;
+ struct altera_tse_private *priv;
struct resource *control_port;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int pcs_reg_width = 2;
+ int ret = -ENODEV;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1463,6 +1255,17 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res,
+ &priv->pcs_base);
+ if (ret) {
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_reg_width = 4;
+ }
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1562,7 +1365,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
@@ -1586,11 +1389,32 @@ static int altera_tse_probe(struct platform_device *pdev)
(unsigned long) control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width);
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
goto err_init_phy;
}
+
return 0;
err_init_phy:
@@ -1610,16 +1434,10 @@ static int altera_tse_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index b7d772f2dcbb..3c2e32fb7389 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -3,11 +3,12 @@
* Copyright (C) 2014 Altera Corporation. All rights reserved
*/
-#include <linux/kernel.h>
-
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 39242c5a1729..98d6386b7f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -462,8 +462,8 @@ static void ena_get_drvinfo(struct net_device *dev,
{
struct ena_adapter *adapter = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 53080fd143dc..d350eeec8bad 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -31,8 +31,6 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
-#define ENA_NAPI_BUDGET 64
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
@@ -1400,10 +1398,9 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
struct sk_buff *skb;
if (!first_frag)
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
+ skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
else
- skb = build_skb(first_frag, ENA_PAGE_SIZE);
+ skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
@@ -2268,10 +2265,8 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- ENA_NAPI_BUDGET);
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
@@ -3169,7 +3164,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
+ strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 899c8a2a34b6..ab42f75b9413 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -130,16 +130,6 @@ config PCMCIA_NMCLAN
To compile this driver as a module, choose M here: the module will be
called nmclan_cs. If unsure, say N.
-config NI65
- tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM && !PPC32
- select NETDEV_LEGACY_INIT
- help
- If you have a network (Ethernet) card of this type, say Y here.
-
- To compile this driver as a module, choose M here. The module
- will be called ni65.
-
config SUN3LANCE
tristate "Sun3/Sun3x on-board LANCE support"
depends on (SUN3 || SUN3X)
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 0d2f478b49a5..42742afe9115 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
-obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3a351d3396bf..68983b717145 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -695,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9421afb950f7..ea6cfc2095e1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
@@ -1828,11 +1827,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU;
- netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+ netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
-#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..9d570adb295b 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -600,7 +600,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4ea7b9f3c424..38153e633231 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -731,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 27869164c6e6..3222c48ce6ae 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -581,15 +581,15 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
+ case NEW_RIEBL:
lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
addr[i] =
@@ -854,7 +854,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -995,7 +995,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c6f003975621..c5cec4e79489 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
pr_cont("\n");
}
}
- prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
wmb(); /* drain writebuffer */
@@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
ps->tx_packets++;
ps->tx_bytes += ptxd->len;
- ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
wmb(); /* drain writebuffer */
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
@@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0);
+ aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
- pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
+ pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
pDB++;
}
aup->pDBfree = pDBfree;
@@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->rx_db_inuse[i] = pDB;
}
@@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
}
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
@@ -1310,7 +1310,7 @@ err_remap2:
iounmap(aup->mac);
err_remap1:
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index e3a3ed29db61..2489c2f4fd8a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,8 +106,8 @@ struct au1000_private {
struct mac_reg *mac; /* mac registers */
u32 *enable; /* address of MAC Enable Register */
void __iomem *macdma; /* base of MAC DMA port */
- u32 vaddr; /* virtual address of rx/tx buffers */
- dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+ void *vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 462016666752..fb8686214a32 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -880,7 +880,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -1186,7 +1186,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
deleted file mode 100644
index 8ba579b89b75..000000000000
--- a/drivers/net/ethernet/amd/ni65.c
+++ /dev/null
@@ -1,1251 +0,0 @@
-/*
- * ni6510 (am7990 'lance' chip) driver for Linux-net-3
- * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
- * copyrights (c) 1994,1995,1996 by M.Hipp
- *
- * This driver can handle the old ni6510 board and the newer ni6510
- * EtherBlaster. (probably it also works with every full NE2100
- * compatible card)
- *
- * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers the Linux-kernel.
- *
- * comments/bugs/suggestions can be sent to:
- * Michael Hipp
- * email: hippm@informatik.uni-tuebingen.de
- *
- * sources:
- * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
- * and from the original drivers by D.Becker
- *
- * known problems:
- * - on some PCI boards (including my own) the card/board/ISA-bridge has
- * problems with bus master DMA. This results in lotsa overruns.
- * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
- * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
- * Or just play with your BIOS options to optimize ISA-DMA access.
- * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
- * defines -> please report me your experience then
- * - Harald reported for ASUS SP3G mainboards, that you should use
- * the 'optimal settings' from the user's manual on page 3-12!
- *
- * credits:
- * thanx to Jason Sullivan for sending me a ni6510 card!
- * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
- *
- * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
- * average: FTP -> 8384421 bytes received in 8.5 seconds
- * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
- * peak: FTP -> 8384421 bytes received in 7.5 seconds
- * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
- */
-
-/*
- * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
- * 96.Sept.29: virt_to_bus stuff added for new memory modell
- * 96.April.29: Added Harald Koenig's Patches (MH)
- * 96.April.13: enhanced error handling .. more tests (MH)
- * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
- * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
- * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
- * hopefully no more 16MB limit
- *
- * 95.Nov.18: multicast tweaked (AC).
- *
- * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
- *
- * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "ni65.h"
-
-/*
- * the current setting allows an acceptable performance
- * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
- * the header of this file
- * 'invert' the defines for max. performance. This may cause DMA problems
- * on some boards (e.g on my ASUS SP3G)
- */
-#undef XMT_VIA_SKB
-#undef RCV_VIA_SKB
-#define RCV_PARANOIA_CHECK
-
-#define MID_PERFORMANCE
-
-#if defined( LOW_PERFORMANCE )
- static int isa0=7,isa1=7,csr80=0x0c10;
-#elif defined( MID_PERFORMANCE )
- static int isa0=5,isa1=5,csr80=0x2810;
-#else /* high performance */
- static int isa0=4,isa1=4,csr80=0x0017;
-#endif
-
-/*
- * a few card/vendor specific defines
- */
-#define NI65_ID0 0x00
-#define NI65_ID1 0x55
-#define NI65_EB_ID0 0x52
-#define NI65_EB_ID1 0x44
-#define NE2100_ID0 0x57
-#define NE2100_ID1 0x57
-
-#define PORT p->cmdr_addr
-
-/*
- * buffer configuration
- */
-#if 1
-#define RMDNUM 16
-#define RMDNUMMASK 0x80000000
-#else
-#define RMDNUM 8
-#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
-#endif
-
-#if 0
-#define TMDNUM 1
-#define TMDNUMMASK 0x00000000
-#else
-#define TMDNUM 4
-#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
-#endif
-
-/* slightly oversized */
-#define R_BUF_SIZE 1544
-#define T_BUF_SIZE 1544
-
-/*
- * lance register defines
- */
-#define L_DATAREG 0x00
-#define L_ADDRREG 0x02
-#define L_RESET 0x04
-#define L_CONFIG 0x05
-#define L_BUSIF 0x06
-
-/*
- * to access the lance/am7990-regs, you have to write
- * reg-number into L_ADDRREG, then you can access it using L_DATAREG
- */
-#define CSR0 0x00
-#define CSR1 0x01
-#define CSR2 0x02
-#define CSR3 0x03
-
-#define INIT_RING_BEFORE_START 0x1
-#define FULL_RESET_ON_ERROR 0x2
-
-#if 0
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
- outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
- inw(PORT+L_DATAREG))
-#if 0
-#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#else
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-#else
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-
-static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
-
-static struct card {
- unsigned char id0,id1;
- short id_offset;
- short total_size;
- short cmd_offset;
- short addr_offset;
- unsigned char *vendor_id;
- char *cardname;
- unsigned long config;
-} cards[] = {
- {
- .id0 = NI65_ID0,
- .id1 = NI65_ID1,
- .id_offset = 0x0e,
- .total_size = 0x10,
- .cmd_offset = 0x0,
- .addr_offset = 0x8,
- .vendor_id = ni_vendor,
- .cardname = "ni6510",
- .config = 0x1,
- },
- {
- .id0 = NI65_EB_ID0,
- .id1 = NI65_EB_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = ni_vendor,
- .cardname = "ni6510 EtherBlaster",
- .config = 0x2,
- },
- {
- .id0 = NE2100_ID0,
- .id1 = NE2100_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = NULL,
- .cardname = "generic NE2100",
- .config = 0x0,
- },
-};
-#define NUM_CARDS 3
-
-struct priv
-{
- struct rmd rmdhead[RMDNUM];
- struct tmd tmdhead[TMDNUM];
- struct init_block ib;
- int rmdnum;
- int tmdnum,tmdlast;
-#ifdef RCV_VIA_SKB
- struct sk_buff *recv_skb[RMDNUM];
-#else
- void *recvbounce[RMDNUM];
-#endif
-#ifdef XMT_VIA_SKB
- struct sk_buff *tmd_skb[TMDNUM];
-#endif
- void *tmdbounce[TMDNUM];
- int tmdbouncenum;
- int lock,xmit_queued;
-
- void *self;
- int cmdr_addr;
- int cardno;
- int features;
- spinlock_t ring_lock;
-};
-
-static int ni65_probe1(struct net_device *dev,int);
-static irqreturn_t ni65_interrupt(int irq, void * dev_id);
-static void ni65_recv_intr(struct net_device *dev,int);
-static void ni65_xmit_intr(struct net_device *dev,int);
-static int ni65_open(struct net_device *dev);
-static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,const unsigned char*,int,int);
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
-static int ni65_close(struct net_device *dev);
-static int ni65_alloc_buffer(struct net_device *dev);
-static void ni65_free_buffer(struct priv *p);
-static void set_multicast_list(struct net_device *dev);
-
-static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
-static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
-
-static int debuglevel = 1;
-
-/*
- * set 'performance' registers .. we must STOP lance for that
- */
-static void ni65_set_performance(struct priv *p)
-{
- writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
-
- if( !(cards[p->cardno].config & 0x02) )
- return;
-
- outw(80,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) != 80)
- return;
-
- writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
- outw(0,PORT+L_ADDRREG);
- outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
- outw(1,PORT+L_ADDRREG);
- outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
-
- outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
-}
-
-/*
- * open interface (up)
- */
-static int ni65_open(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- int irqval = request_irq(dev->irq, ni65_interrupt,0,
- cards[p->cardno].cardname,dev);
- if (irqval) {
- printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
- dev->name,dev->irq, irqval);
- return -EAGAIN;
- }
-
- if(ni65_lance_reinit(dev))
- {
- netif_start_queue(dev);
- return 0;
- }
- else
- {
- free_irq(dev->irq,dev);
- return -EAGAIN;
- }
-}
-
-/*
- * close interface (down)
- */
-static int ni65_close(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
-
-#ifdef XMT_VIA_SKB
- {
- int i;
- for(i=0;i<TMDNUM;i++)
- {
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
- }
- }
-#endif
- free_irq(dev->irq,dev);
- return 0;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- disable_dma(dev->dma);
- free_dma(dev->dma);
- release_region(dev->base_addr, cards[p->cardno].total_size);
- ni65_free_buffer(p);
-}
-
-/* set: io,irq,dma or set it when calling insmod */
-static int irq;
-static int io;
-static int dma;
-
-/*
- * Probe The Card (not the lance-chip)
- */
-struct net_device * __init ni65_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(0);
- static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
- const int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- irq = dev->irq;
- dma = dev->dma;
- } else {
- dev->base_addr = io;
- }
-
- if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
- err = ni65_probe1(dev, dev->base_addr);
- } else if (dev->base_addr > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni65_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
-
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni65_netdev_ops = {
- .ndo_open = ni65_open,
- .ndo_stop = ni65_close,
- .ndo_start_xmit = ni65_send_packet,
- .ndo_tx_timeout = ni65_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * this is the real card probe ..
- */
-static int __init ni65_probe1(struct net_device *dev,int ioaddr)
-{
- int i,j;
- struct priv *p;
- u8 addr[ETH_ALEN];
- unsigned long flags;
-
- dev->irq = irq;
- dev->dma = dma;
-
- for(i=0;i<NUM_CARDS;i++) {
- if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
- continue;
- if(cards[i].id_offset >= 0) {
- if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
- inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
- release_region(ioaddr, cards[i].total_size);
- continue;
- }
- }
- if(cards[i].vendor_id) {
- for(j=0;j<3;j++)
- if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
- release_region(ioaddr, cards[i].total_size);
- }
- break;
- }
- if(i == NUM_CARDS)
- return -ENODEV;
-
- for(j=0;j<6;j++)
- addr[j] = inb(ioaddr+cards[i].addr_offset+j);
- eth_hw_addr_set(dev, addr);
-
- if( (j=ni65_alloc_buffer(dev)) < 0) {
- release_region(ioaddr, cards[i].total_size);
- return j;
- }
- p = dev->ml_priv;
- p->cmdr_addr = ioaddr + cards[i].cmd_offset;
- p->cardno = i;
- spin_lock_init(&p->ring_lock);
-
- printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (j=readreg(CSR0)) != 0x4) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- outw(88,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) == 88) {
- unsigned long v;
- v = inw(PORT+L_DATAREG);
- v <<= 16;
- outw(89,PORT+L_ADDRREG);
- v |= inw(PORT+L_DATAREG);
- printk("Version %#08lx, ",v);
- p->features = INIT_RING_BEFORE_START;
- }
- else {
- printk("ancient LANCE, ");
- p->features = 0x0;
- }
-
- if(test_bit(0,&cards[i].config)) {
- dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
- dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
- printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
- }
- else {
- if(dev->dma == 0) {
- /* 'stuck test' from lance.c */
- unsigned long dma_channels =
- ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
- | (inb(DMA2_STAT_REG) & 0xf0);
- for(i=1;i<5;i++) {
- int dma = dmatab[i];
- if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
- continue;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- set_dma_mode(dma,DMA_MODE_CASCADE);
- enable_dma(dma);
- release_dma_lock(flags);
-
- ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
-
- flags=claim_dma_lock();
- disable_dma(dma);
- free_dma(dma);
- release_dma_lock(flags);
-
- if(readreg(CSR0) & CSR0_IDON)
- break;
- }
- if(i == 5) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- dev->dma = dmatab[i];
- printk("DMA %d (autodetected), ",dev->dma);
- }
- else
- printk("DMA %d (assigned), ",dev->dma);
-
- if(dev->irq < 2)
- {
- unsigned long irq_mask;
-
- ni65_init_lance(p,dev->dev_addr,0,0);
- irq_mask = probe_irq_on();
- writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
- msleep(20);
- dev->irq = probe_irq_off(irq_mask);
- if(!dev->irq)
- {
- printk("Failed to detect IRQ line!\n");
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- printk("IRQ %d (autodetected).\n",dev->irq);
- }
- else
- printk("IRQ %d (assigned).\n",dev->irq);
- }
-
- if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
- {
- printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- dev->base_addr = ioaddr;
- dev->netdev_ops = &ni65_netdev_ops;
- dev->watchdog_timeo = HZ/2;
-
- return 0; /* everything is OK */
-}
-
-/*
- * set lance register and trigger init
- */
-static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode)
-{
- int i;
- u32 pib;
-
- writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
-
- for(i=0;i<6;i++)
- p->ib.eaddr[i] = daddr[i];
-
- for(i=0;i<8;i++)
- p->ib.filter[i] = filter;
- p->ib.mode = mode;
-
- p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
- p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
- writereg(0,CSR3); /* busmaster/no word-swap */
- pib = (u32) isa_virt_to_bus(&p->ib);
- writereg(pib & 0xffff,CSR1);
- writereg(pib >> 16,CSR2);
-
- writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
-
- for(i=0;i<32;i++)
- {
- mdelay(4);
- if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
- break; /* init ok ? */
- }
-}
-
-/*
- * allocate memory area and check the 16MB border
- */
-static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
-{
- struct sk_buff *skb=NULL;
- unsigned char *ptr;
- void *ret;
-
- if(type) {
- ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
- if(!skb) {
- printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
- return NULL;
- }
- skb_reserve(skb,2+16);
- skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
- ptr = skb->data;
- }
- else {
- ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
- if(!ret)
- return NULL;
- }
- if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
- printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
- if(type)
- kfree_skb(skb);
- else
- kfree(ptr);
- return NULL;
- }
- return ret;
-}
-
-/*
- * allocate all memory structures .. send/recv buffers etc ...
- */
-static int ni65_alloc_buffer(struct net_device *dev)
-{
- unsigned char *ptr;
- struct priv *p;
- int i;
-
- /*
- * we need 8-aligned memory ..
- */
- ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
- if(!ptr)
- return -ENOMEM;
-
- p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
- memset((char *)p, 0, sizeof(struct priv));
- p->self = ptr;
-
- for(i=0;i<TMDNUM;i++)
- {
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = NULL;
-#endif
- p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
- if(!p->tmdbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
- if(!p->recv_skb[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#else
- p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
- if(!p->recvbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#endif
- }
-
- return 0; /* everything is OK */
-}
-
-/*
- * free buffers and private struct
- */
-static void ni65_free_buffer(struct priv *p)
-{
- int i;
-
- if(!p)
- return;
-
- for(i=0;i<TMDNUM;i++) {
- kfree(p->tmdbounce[i]);
-#ifdef XMT_VIA_SKB
- dev_kfree_skb(p->tmd_skb[i]);
-#endif
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- dev_kfree_skb(p->recv_skb[i]);
-#else
- kfree(p->recvbounce[i]);
-#endif
- }
- kfree(p->self);
-}
-
-
-/*
- * stop and (re)start lance .. e.g after an error
- */
-static void ni65_stop_start(struct net_device *dev,struct priv *p)
-{
- int csr0 = CSR0_INEA;
-
- writedatareg(CSR0_STOP);
-
- if(debuglevel > 1)
- printk(KERN_DEBUG "ni65_stop_start\n");
-
- if(p->features & INIT_RING_BEFORE_START) {
- int i;
-#ifdef XMT_VIA_SKB
- struct sk_buff *skb_save[TMDNUM];
-#endif
- unsigned long buffer[TMDNUM];
- short blen[TMDNUM];
-
- if(p->xmit_queued) {
- while(1) {
- if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
- break;
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- break;
- }
- }
-
- for(i=0;i<TMDNUM;i++) {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- skb_save[i] = p->tmd_skb[i];
-#endif
- buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
- blen[i] = tmdp->blen;
- tmdp->u.s.status = 0x0;
- }
-
- for(i=0;i<RMDNUM;i++) {
- struct rmd *rmdp = p->rmdhead + i;
- rmdp->u.s.status = RCV_OWN;
- }
- p->tmdnum = p->xmit_queued = 0;
- writedatareg(CSR0_STRT | csr0);
-
- for(i=0;i<TMDNUM;i++) {
- int num = (i + p->tmdlast) & (TMDNUM-1);
- p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
- p->tmdhead[i].blen = blen[num];
- if(p->tmdhead[i].u.s.status & XMIT_OWN) {
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
- p->xmit_queued = 1;
- writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
- }
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = skb_save[num];
-#endif
- }
- p->rmdnum = p->tmdlast = 0;
- if(!p->lock)
- if (p->tmdnum || !p->xmit_queued)
- netif_wake_queue(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- }
- else
- writedatareg(CSR0_STRT | csr0);
-}
-
-/*
- * init lance (write init-values .. init-buffers) (open-helper)
- */
-static int ni65_lance_reinit(struct net_device *dev)
-{
- int i;
- struct priv *p = dev->ml_priv;
- unsigned long flags;
-
- p->lock = 0;
- p->xmit_queued = 0;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
- set_dma_mode(dev->dma,DMA_MODE_CASCADE);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (i=readreg(CSR0) ) != 0x4)
- {
- printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
- cards[p->cardno].cardname,(int) i);
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0;
- }
-
- p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
- for(i=0;i<TMDNUM;i++)
- {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
-#endif
- tmdp->u.buffer = 0x0;
- tmdp->u.s.status = XMIT_START | XMIT_END;
- tmdp->blen = tmdp->status2 = 0;
- }
-
- for(i=0;i<RMDNUM;i++)
- {
- struct rmd *rmdp = p->rmdhead + i;
-#ifdef RCV_VIA_SKB
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
-#else
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
-#endif
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN;
- }
-
- if(dev->flags & IFF_PROMISC)
- ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
- ni65_init_lance(p,dev->dev_addr,0xff,0x0);
- else
- ni65_init_lance(p,dev->dev_addr,0x00,0x00);
-
- /*
- * ni65_set_lance_mem() sets L_ADDRREG to CSR0
- * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
- */
-
- if(inw(PORT+L_DATAREG) & CSR0_IDON) {
- ni65_set_performance(p);
- /* init OK: start lance , enable interrupts */
- writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
- return 1; /* ->OK */
- }
- printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0; /* ->Error */
-}
-
-/*
- * interrupt handler
- */
-static irqreturn_t ni65_interrupt(int irq, void * dev_id)
-{
- int csr0 = 0;
- struct net_device *dev = dev_id;
- struct priv *p;
- int bcnt = 32;
-
- p = dev->ml_priv;
-
- spin_lock(&p->ring_lock);
-
- while(--bcnt) {
- csr0 = inw(PORT+L_DATAREG);
-
-#if 0
- writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
-#else
- writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
-#endif
-
- if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
- break;
-
- if(csr0 & CSR0_RINT) /* RECV-int? */
- ni65_recv_intr(dev,csr0);
- if(csr0 & CSR0_TINT) /* XMIT-int? */
- ni65_xmit_intr(dev,csr0);
-
- if(csr0 & CSR0_ERR)
- {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
- if(csr0 & CSR0_BABL)
- dev->stats.tx_errors++;
- if(csr0 & CSR0_MISS) {
- int i;
- for(i=0;i<RMDNUM;i++)
- printk("%02x ",p->rmdhead[i].u.s.status);
- printk("\n");
- dev->stats.rx_errors++;
- }
- if(csr0 & CSR0_MERR) {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
- ni65_stop_start(dev,p);
- }
- }
- }
-
-#ifdef RCV_PARANOIA_CHECK
-{
- int j;
- for(j=0;j<RMDNUM;j++)
- {
- int i, num2;
- for(i=RMDNUM-1;i>0;i--) {
- num2 = (p->rmdnum + i) & (RMDNUM-1);
- if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
- break;
- }
-
- if(i) {
- int k, num1;
- for(k=0;k<RMDNUM;k++) {
- num1 = (p->rmdnum + k) & (RMDNUM-1);
- if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
- break;
- }
- if(!k)
- break;
-
- if(debuglevel > 0)
- {
- char buf[256],*buf1;
- buf1 = buf;
- for(k=0;k<RMDNUM;k++) {
- sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
- buf1 += 3;
- }
- *buf1 = 0;
- printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
- }
-
- p->rmdnum = num1;
- ni65_recv_intr(dev,csr0);
- if((p->rmdhead[num2].u.s.status & RCV_OWN))
- break; /* ok, we are 'in sync' again */
- }
- else
- break;
- }
-}
-#endif
-
- if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
- printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
- ni65_stop_start(dev,p);
- }
- else
- writedatareg(CSR0_INEA);
-
- spin_unlock(&p->ring_lock);
- return IRQ_HANDLED;
-}
-
-/*
- * We have received an Xmit-Interrupt ..
- * send a new packet if necessary
- */
-static void ni65_xmit_intr(struct net_device *dev,int csr0)
-{
- struct priv *p = dev->ml_priv;
-
- while(p->xmit_queued)
- {
- struct tmd *tmdp = p->tmdhead + p->tmdlast;
- int tmdstat = tmdp->u.s.status;
-
- if(tmdstat & XMIT_OWN)
- break;
-
- if(tmdstat & XMIT_ERR)
- {
-#if 0
- if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
- printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
-#endif
- /* checking some errors */
- if(tmdp->status2 & XMIT_RTRY)
- dev->stats.tx_aborted_errors++;
- if(tmdp->status2 & XMIT_LCAR)
- dev->stats.tx_carrier_errors++;
- if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
- /* this stops the xmitter */
- dev->stats.tx_fifo_errors++;
- if(debuglevel > 0)
- printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
- if(p->features & INIT_RING_BEFORE_START) {
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
- ni65_stop_start(dev,p);
- break; /* no more Xmit processing .. */
- }
- else
- ni65_stop_start(dev,p);
- }
- if(debuglevel > 2)
- printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
- if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
- dev->stats.tx_errors++;
- tmdp->status2 = 0;
- }
- else {
- dev->stats.tx_bytes -= (short)(tmdp->blen);
- dev->stats.tx_packets++;
- }
-
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[p->tmdlast]) {
- dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
- p->tmd_skb[p->tmdlast] = NULL;
- }
-#endif
-
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- p->xmit_queued = 0;
- }
- netif_wake_queue(dev);
-}
-
-/*
- * We have received a packet
- */
-static void ni65_recv_intr(struct net_device *dev,int csr0)
-{
- struct rmd *rmdp;
- int rmdstat,len;
- int cnt=0;
- struct priv *p = dev->ml_priv;
-
- rmdp = p->rmdhead + p->rmdnum;
- while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
- {
- cnt++;
- if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
- {
- if(!(rmdstat & RCV_ERR)) {
- if(rmdstat & RCV_START)
- {
- dev->stats.rx_length_errors++;
- printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
- }
- }
- else {
- if(debuglevel > 2)
- printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
- dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
- if(rmdstat & RCV_FRAM)
- dev->stats.rx_frame_errors++;
- if(rmdstat & RCV_OFLO)
- dev->stats.rx_over_errors++;
- if(rmdstat & RCV_CRC)
- dev->stats.rx_crc_errors++;
- if(rmdstat & RCV_BUF_ERR)
- dev->stats.rx_fifo_errors++;
- }
- if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
- dev->stats.rx_errors++;
- }
- else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
- {
-#ifdef RCV_VIA_SKB
- struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
- if (skb)
- skb_reserve(skb,16);
-#else
- struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
-#endif
- if(skb)
- {
- skb_reserve(skb,2);
-#ifdef RCV_VIA_SKB
- if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
- }
- else {
- struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
- skb_put(skb,R_BUF_SIZE);
- p->recv_skb[p->rmdnum] = skb;
- rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- skb = skb1;
- skb_trim(skb,len);
- }
-#else
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
-#endif
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- }
- else
- {
- printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
- dev->stats.rx_dropped++;
- }
- }
- else {
- printk(KERN_INFO "%s: received runt packet\n",dev->name);
- dev->stats.rx_errors++;
- }
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN; /* change owner */
- p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
- rmdp = p->rmdhead + p->rmdnum;
- }
-}
-
-/*
- * kick xmitter ..
- */
-
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
-{
- int i;
- struct priv *p = dev->ml_priv;
-
- printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
- for(i=0;i<TMDNUM;i++)
- printk("%02x ",p->tmdhead[i].u.s.status);
- printk("\n");
- ni65_lance_reinit(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-/*
- * Send a packet
- */
-
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- if (test_and_set_bit(0, (void*)&p->lock)) {
- printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
- return NETDEV_TX_BUSY;
- }
-
- {
- short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- struct tmd *tmdp;
- unsigned long flags;
-
-#ifdef XMT_VIA_SKB
- if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
-#endif
-
- skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
- skb->len > T_BUF_SIZE ? T_BUF_SIZE :
- skb->len);
- if (len > skb->len)
- memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
- dev_kfree_skb (skb);
-
- spin_lock_irqsave(&p->ring_lock, flags);
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
- p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
-
-#ifdef XMT_VIA_SKB
- }
- else {
- spin_lock_irqsave(&p->ring_lock, flags);
-
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- p->tmd_skb[p->tmdnum] = skb;
- }
-#endif
- tmdp->blen = -len;
-
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
- writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
-
- p->xmit_queued = 1;
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
-
- if(p->tmdnum != p->tmdlast)
- netif_wake_queue(dev);
-
- p->lock = 0;
-
- spin_unlock_irqrestore(&p->ring_lock, flags);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- if(!ni65_lance_reinit(dev))
- printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni65;
-
-module_param_hw(irq, int, irq, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
-MODULE_PARM_DESC(io, "ni6510 I/O base address");
-MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-
-static int __init ni65_init_module(void)
-{
- dev_ni65 = ni65_probe(-1);
- return PTR_ERR_OR_ZERO(dev_ni65);
-}
-module_init(ni65_init_module);
-
-static void __exit ni65_cleanup_module(void)
-{
- unregister_netdev(dev_ni65);
- cleanup_card(dev_ni65);
- free_netdev(dev_ni65);
-}
-module_exit(ni65_cleanup_module);
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
deleted file mode 100644
index e6217e35edf0..000000000000
--- a/drivers/net/ethernet/amd/ni65.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* am7990 (lance) definitions
- *
- * This is an extension to the Linux operating system, and is covered by
- * same GNU General Public License that covers that work.
- *
- * Michael Hipp
- * email: mhipp@student.uni-tuebingen.de
- *
- * sources: (mail me or ask archie if you need them)
- * crynwr-packet-driver
- */
-
-/*
- * Control and Status Register 0 (CSR0) bit definitions
- * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
- *
- */
-
-#define CSR0_ERR 0x8000 /* Error summary (R) */
-#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
-#define CSR0_CERR 0x2000 /* Collision Error (RC) */
-#define CSR0_MISS 0x1000 /* Missed packet (RC) */
-#define CSR0_MERR 0x0800 /* Memory Error (RC) */
-#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
-#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
-#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
-#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
-#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
-#define CSR0_RXON 0x0020 /* Receiver on (R) */
-#define CSR0_TXON 0x0010 /* Transmitter on (R) */
-#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
-#define CSR0_STOP 0x0004 /* Stop (RS) */
-#define CSR0_STRT 0x0002 /* Start (RS) */
-#define CSR0_INIT 0x0001 /* Initialize (RS) */
-
-#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
-/*
- * Initialization Block Mode operation Bit Definitions.
- */
-
-#define M_PROM 0x8000 /* Promiscuous Mode */
-#define M_INTL 0x0040 /* Internal Loopback */
-#define M_DRTY 0x0020 /* Disable Retry */
-#define M_COLL 0x0010 /* Force Collision */
-#define M_DTCR 0x0008 /* Disable Transmit CRC) */
-#define M_LOOP 0x0004 /* Loopback */
-#define M_DTX 0x0002 /* Disable the Transmitter */
-#define M_DRX 0x0001 /* Disable the Receiver */
-
-
-/*
- * Receive message descriptor bit definitions.
- */
-
-#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define RCV_ERR 0x40 /* Error Summary */
-#define RCV_FRAM 0x20 /* Framing Error */
-#define RCV_OFLO 0x10 /* Overflow Error */
-#define RCV_CRC 0x08 /* CRC Error */
-#define RCV_BUF_ERR 0x04 /* Buffer Error */
-#define RCV_START 0x02 /* Start of Packet */
-#define RCV_END 0x01 /* End of Packet */
-
-
-/*
- * Transmit message descriptor bit definitions.
- */
-
-#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define XMIT_ERR 0x40 /* Error Summary */
-#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
-#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
-#define XMIT_DEF 0x04 /* Deferred */
-#define XMIT_START 0x02 /* Start of Packet */
-#define XMIT_END 0x01 /* End of Packet */
-
-/*
- * transmit status (2) (valid if XMIT_ERR == 1)
- */
-
-#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
-#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
-#define XMIT_LCAR 0x0800 /* Loss of Carrier */
-#define XMIT_LCOL 0x1000 /* Late collision */
-#define XMIT_RESERV 0x2000 /* Reserved */
-#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
-#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
-
-struct init_block {
- unsigned short mode;
- unsigned char eaddr[6];
- unsigned char filter[8];
- /* bit 29-31: number of rmd's (power of 2) */
- u32 rrp; /* receive ring pointer (align 8) */
- /* bit 29-31: number of tmd's (power of 2) */
- u32 trp; /* transmit ring pointer (align 8) */
-};
-
-struct rmd { /* Receive Message Descriptor */
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile short blen;
- volatile unsigned short mlen;
-};
-
-struct tmd {
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile unsigned short blen;
- volatile unsigned short status2;
-};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..823a329a921f 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index c20c369c7eb8..72db9f9e7bee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -1249,7 +1249,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -1881,7 +1881,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
- netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+ netif_napi_add_weight(dev, &lp->napi, pcnet32_poll,
+ lp->rx_ring_size / 2);
if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2017,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2025,7 +2026,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2364,7 +2365,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..246f34c43765 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -341,7 +341,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +796,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..68ca1225eedc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 895d35639129..c68ace804e37 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -230,7 +230,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ "cap=%d, en=%#x, mbc=%d, delay=%d\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 492ac383f16d..7b666106feee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
+ /* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -950,14 +952,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
@@ -1671,12 +1673,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
@@ -2550,6 +2550,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2579,8 +2587,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
@@ -2772,7 +2782,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
for (i = 0; i < skb->len; i += 32) {
unsigned int len = min(skb->len - i, 32U);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6ceb1cdf6eba..6e83ff59172a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -402,8 +402,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index efdcf484a510..f409d7bd1f1e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -285,6 +285,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Yellow Carp devices do not need cdr workaround */
pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -425,6 +428,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
@@ -480,6 +486,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
static struct xgbe_version_data xgbe_v2b = {
@@ -495,6 +502,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 2156600641b6..4064c3e3dd49 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -239,6 +239,7 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom {
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
- max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+ max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+ else
+ max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
@@ -1151,7 +1158,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
}
/* Determine the type of SFP */
- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1167,9 +1177,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
@@ -1979,6 +1986,10 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
{
+ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */
+ if (pdata->phy.autoneg != AUTONEG_DISABLE)
+ return;
+
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
XGBE_PMA_PLL_CTRL_MASK,
enable ? XGBE_PMA_PLL_CTRL_ENABLE
@@ -1989,7 +2000,7 @@ static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
}
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
- unsigned int cmd, unsigned int sub_cmd)
+ enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd)
{
unsigned int s0 = 0;
unsigned int wait;
@@ -2029,14 +2040,16 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
xgbe_phy_rx_reset(pdata);
reenable_pll:
- /* Enable PLL re-initialization */
- xgbe_phy_pll_ctrl(pdata, true);
+ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
+ if (cmd != XGBE_MB_CMD_POWER_OFF &&
+ cmd != XGBE_MB_CMD_RRC)
+ xgbe_phy_pll_ctrl(pdata, true);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
{
/* Receiver Reset Cycle */
- xgbe_phy_perform_ratechange(pdata, 5, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE);
netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
}
@@ -2046,7 +2059,7 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
struct xgbe_phy_data *phy_data = pdata->phy_data;
/* Power off */
- xgbe_phy_perform_ratechange(pdata, 0, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_UNKNOWN;
@@ -2061,14 +2074,17 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
- xgbe_phy_perform_ratechange(pdata, 3, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
} else {
if (phy_data->sfp_cable_len <= 1)
- xgbe_phy_perform_ratechange(pdata, 3, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_1M);
else if (phy_data->sfp_cable_len <= 3)
- xgbe_phy_perform_ratechange(pdata, 3, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_3M);
else
- xgbe_phy_perform_ratechange(pdata, 3, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER);
}
phy_data->cur_mode = XGBE_MODE_SFI;
@@ -2083,7 +2099,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/X */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_X;
@@ -2097,7 +2113,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII);
phy_data->cur_mode = XGBE_MODE_SGMII_1000;
@@ -2111,7 +2127,7 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 100M/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS);
phy_data->cur_mode = XGBE_MODE_SGMII_100;
@@ -2125,7 +2141,7 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, 4, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2139,7 +2155,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 2.5G/KX */
- xgbe_phy_perform_ratechange(pdata, 2, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KX_2500;
@@ -2153,7 +2169,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/KX */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_KX_1000;
@@ -2640,7 +2656,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
}
/* No link, attempt a receiver reset cycle */
- if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
xgbe_phy_rrc(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4ebd2410185a..4d790a89fe77 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ phy_irqnum = platform_irq_count(pdev) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
@@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
- dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ dma_irqend = platform_irq_count(pdev);
}
/* Obtain the mmio areas for the device */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 607a2c90513b..71f24cb47935 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -151,7 +151,8 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+#define XGBE_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
@@ -416,7 +417,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
@@ -610,6 +611,31 @@ enum xgbe_mdio_mode {
XGBE_MDIO_MODE_CL45,
};
+enum xgbe_mb_cmd {
+ XGBE_MB_CMD_POWER_OFF = 0,
+ XGBE_MB_CMD_SET_1G,
+ XGBE_MB_CMD_SET_2_5G,
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_CMD_RRC
+};
+
+enum xgbe_mb_subcmd {
+ XGBE_MB_SUBCMD_NONE = 0,
+
+ /* 10GbE SFP subcommands */
+ XGBE_MB_SUBCMD_ACTIVE = 0,
+ XGBE_MB_SUBCMD_PASSIVE_1M,
+ XGBE_MB_SUBCMD_PASSIVE_3M,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER,
+
+ /* 1GbE Mode subcommands */
+ XGBE_MB_SUBCMD_10MBITS = 0,
+ XGBE_MB_SUBCMD_100MBITS,
+ XGBE_MB_SUBCMD_1G_SGMII,
+ XGBE_MB_SUBCMD_1G_KX
+};
+
struct xgbe_phy {
struct ethtool_link_ksettings lks;
@@ -1012,6 +1038,7 @@ struct xgbe_version_data {
unsigned int tx_desc_prefetch;
unsigned int rx_desc_prefetch;
unsigned int an_cdr_workaround;
+ unsigned int enable_rrc;
};
struct xgbe_prv_data {
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..379d19d18dbe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -672,7 +672,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index ff2d099aab21..390671640388 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ /* strip off CRC as HW isn't doing this */
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, ndev);
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
}
- nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv) {
- /* strip off CRC as HW isn't doing this */
- datalen -= 4;
+ if (!nv)
goto skip_jumbo;
- }
slots = page_pool->slots - 1;
head = page_pool->head;
@@ -1002,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
@@ -1975,14 +1979,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..334de0d93c89 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -25,7 +25,6 @@
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -1237,6 +1236,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1284,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..d0a771b65e88 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -20,7 +20,6 @@
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/macio.h>
@@ -90,7 +89,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +111,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +167,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -369,11 +370,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +387,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..7e9c74b141ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
@@ -64,8 +65,6 @@
*/
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
-#define AQ_CFG_NAPI_WEIGHT 64U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a418238f6309..a08f221e30d4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
@@ -229,7 +238,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..7eb5851eb95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -292,9 +292,6 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -306,9 +303,6 @@ static int aq_mdo_dev_stop(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -543,9 +531,6 @@ static int aq_mdo_del_secy(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -585,6 +570,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ memzero_explicit(&key_rec, sizeof(key_rec));
return ret;
}
@@ -601,9 +587,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -631,9 +614,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -681,9 +661,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -780,9 +757,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -809,9 +783,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -876,9 +847,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -932,6 +900,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret;
}
@@ -948,9 +917,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -978,9 +944,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1029,9 +992,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1044,9 +1004,6 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1073,9 +1030,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1106,9 +1060,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1147,9 +1098,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1196,9 +1144,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
@@ -1451,26 +1396,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)
egress_sa_threshold_expired);
}
+#define AQ_LOCKED_MDO_DEF(mdo) \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
+{ \
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev); \
+ int ret; \
+ mutex_lock(&nic->macsec_mutex); \
+ ret = aq_mdo_##mdo(ctx); \
+ mutex_unlock(&nic->macsec_mutex); \
+ return ret; \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
const struct macsec_ops aq_macsec_ops = {
- .mdo_dev_open = aq_mdo_dev_open,
- .mdo_dev_stop = aq_mdo_dev_stop,
- .mdo_add_secy = aq_mdo_add_secy,
- .mdo_upd_secy = aq_mdo_upd_secy,
- .mdo_del_secy = aq_mdo_del_secy,
- .mdo_add_rxsc = aq_mdo_add_rxsc,
- .mdo_upd_rxsc = aq_mdo_upd_rxsc,
- .mdo_del_rxsc = aq_mdo_del_rxsc,
- .mdo_add_rxsa = aq_mdo_add_rxsa,
- .mdo_upd_rxsa = aq_mdo_upd_rxsa,
- .mdo_del_rxsa = aq_mdo_del_rxsa,
- .mdo_add_txsa = aq_mdo_add_txsa,
- .mdo_upd_txsa = aq_mdo_upd_txsa,
- .mdo_del_txsa = aq_mdo_del_txsa,
- .mdo_get_dev_stats = aq_mdo_get_dev_stats,
- .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
- .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
- .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
- .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+ .mdo_dev_open = aq_locked_mdo_dev_open,
+ .mdo_dev_stop = aq_locked_mdo_dev_stop,
+ .mdo_add_secy = aq_locked_mdo_add_secy,
+ .mdo_upd_secy = aq_locked_mdo_upd_secy,
+ .mdo_del_secy = aq_locked_mdo_del_secy,
+ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+ .mdo_add_txsa = aq_locked_mdo_add_txsa,
+ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+ .mdo_del_txsa = aq_locked_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
};
int aq_macsec_init(struct aq_nic_s *nic)
@@ -1492,6 +1468,7 @@ int aq_macsec_init(struct aq_nic_s *nic)
nic->ndev->features |= NETIF_F_HW_MACSEC;
nic->ndev->macsec_ops = &aq_macsec_ops;
+ mutex_init(&nic->macsec_mutex);
return 0;
}
@@ -1515,7 +1492,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
if (!nic->macsec_cfg)
return 0;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
if (nic->aq_fw_ops->send_macsec_req) {
struct macsec_cfg_request cfg = { 0 };
@@ -1564,7 +1541,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
ret = aq_apply_macsec_cfg(nic);
unlock:
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
return ret;
}
@@ -1576,9 +1553,9 @@ void aq_macsec_work(struct aq_nic_s *nic)
if (!netif_carrier_ok(nic->ndev))
return;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
aq_check_txsa_expiration(nic);
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
}
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
@@ -1589,21 +1566,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->rxsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
{
+ int cnt;
+
if (!nic->macsec_cfg)
return 0;
- return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_lock(&nic->macsec_mutex);
+ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_unlock(&nic->macsec_mutex);
+
+ return cnt;
}
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
@@ -1614,12 +1600,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->txsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
@@ -1691,6 +1680,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
if (!cfg)
return data;
+ mutex_lock(&nic->macsec_mutex);
+
aq_macsec_update_stats(nic);
common_stats = &cfg->stats;
@@ -1773,5 +1764,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
data += i;
+ mutex_unlock(&nic->macsec_mutex);
+
return data;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e65ce7199dac..8a0af371e7dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,22 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -89,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
@@ -126,9 +128,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
@@ -204,6 +216,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -410,6 +441,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -418,10 +499,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..99870865f66d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,6 +12,8 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 33f1a1377588..06508eebb585 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -486,8 +484,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -517,8 +515,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
@@ -569,6 +567,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -697,6 +792,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -725,6 +821,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
@@ -878,7 +1012,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -928,11 +1061,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1246,7 +1379,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1264,9 +1396,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..ad33f8586532 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -154,6 +157,8 @@ struct aq_nic_s {
struct mutex fwreq_mutex;
#if IS_ENABLED(CONFIG_MACSEC)
struct aq_macsec_cfg *macsec_cfg;
+ /* mutex to protect data in macsec_cfg */
+ struct mutex macsec_mutex;
#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
@@ -177,6 +182,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 797a95142d1f..8647125d60ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -444,22 +437,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 06de19f63287..80b44043e6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1217,8 +1217,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 77e76c9efd32..25129e723b57 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -339,14 +389,161 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ goto pass;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+pass:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -364,12 +561,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -377,18 +579,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -446,16 +647,15 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
@@ -469,7 +669,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -510,6 +710,149 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -529,7 +872,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -543,9 +885,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -600,6 +942,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..0a6c34438c1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf051c9..f5db1c44e9b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -43,8 +38,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -124,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
@@ -153,9 +147,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
@@ -182,8 +190,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +232,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +256,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +276,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,11 +305,13 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..9dfd68f0fda9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..54e70f07b573 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
@@ -969,15 +976,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 36c7cf05630a..431924959520 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
u16 table_index)
{
u16 packed_record[18];
+ int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
@@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
packed_record[16] = rec->key_len & 0x3;
- return set_raw_ingress_record(hw, packed_record, 18, 2,
- ROWOFFSET_INGRESSSAKEYRECORD +
- table_index);
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
- return ret;
+ goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
- if (unlikely(ret))
- return ret;
- return 0;
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index c642c3d3e600..ba0646b3b122 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
@@ -981,7 +981,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
dev_info(dev, "connected to %s phy with id 0x%x\n",
phydev->drv->name, phydev->phy_id);
- netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, arc_emac_poll,
+ ARC_EMAC_NAPI_WEIGHT);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 9acf589b1178..87f40c2ba904 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
+ const char *name = "Synopsys MII Bus";
struct mii_bus *bus;
int error;
@@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus";
+ bus->name = name;
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
@@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
- "cannot register MDIO bus %s\n", bus->name);
+ "cannot register MDIO bus %s\n", name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e7a9f9863258..8b7cdf015a16 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -293,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -381,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
@@ -433,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local,
netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
- status = netif_rx_ni(skb);
+ status = netif_rx(skb);
if (status != NET_RX_SUCCESS && net_ratelimit())
netif_info(ax_local, rx_err, ndev,
"netif_rx status %d\n", status);
@@ -1102,7 +1102,7 @@ err:
return ret;
}
-static int ax88796c_remove(struct spi_device *spi)
+static void ax88796c_remove(struct spi_device *spi)
{
struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
struct net_device *ndev = ax_local->ndev;
@@ -1112,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi)
netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
dev_driver_string(&spi->dev),
dev_name(&spi->dev));
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index ec167af0e3b2..cc932b3cf873 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -451,8 +451,8 @@ static void ag71xx_get_drvinfo(struct net_device *ndev,
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
@@ -786,7 +786,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +825,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -946,7 +946,7 @@ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
{
u32 t;
@@ -970,7 +970,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1657,7 +1657,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
@@ -1703,7 +1703,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
@@ -1922,7 +1922,8 @@ static int ag71xx_probe(struct platform_device *pdev)
return err;
}
- netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
+ AG71XX_NAPI_WEIGHT);
err = clk_prepare_enable(ag->clk_eth);
if (err) {
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ad3fc72e74e..d30d11872719 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -752,7 +752,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
+ mutex_unlock(&alx->mtx);
+ }
return 0;
}
@@ -1909,11 +1912,14 @@ static int alx_suspend(struct device *dev)
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1924,6 +1930,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1940,6 +1947,7 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..40c781695d58 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
@@ -1051,7 +1051,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
@@ -2072,7 +2072,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2107,7 +2107,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2132,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2219,7 +2219,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2731,10 +2732,10 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
- netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
- atl1c_clean_tx, 64);
+ netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2849,7 +2850,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 56e5f440e666..5db0f3495a32 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2354,7 +2354,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
@@ -2395,7 +2395,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
- netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
@@ -2482,7 +2482,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 6a969969d221..c8444bcdf527 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2978,7 +2977,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3341,8 +3340,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..1b487c071cb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1980,9 +1980,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 56e0fb07aec7..55dfdb34e37b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -53,8 +53,8 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
- depends on ARCH_BCM4908 || COMPILE_TEST
- default y if ARCH_BCM4908
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ default y if ARCH_BCMBCA
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
@@ -77,7 +77,7 @@ config BCMGENET
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
select DIMLIB
- select BROADCOM_PHY if ARCH_BCM2835
+ select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL)
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5857e88c207..7f876721596c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1790,13 +1790,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -2375,7 +2375,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 4a2622b05ee1..a737b1913cf9 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -507,7 +507,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -561,8 +561,6 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
if (++ring->write_idx == ring->length - 1)
ring->write_idx = 0;
- enet->netdev->stats.tx_bytes += skb->len;
- enet->netdev->stats.tx_packets++;
return NETDEV_TX_OK;
}
@@ -635,6 +633,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot *slot;
struct device *dev = enet->dev;
+ unsigned int bytes = 0;
int handled = 0;
while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
@@ -645,12 +644,17 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
- if (++tx_ring->read_idx == tx_ring->length)
- tx_ring->read_idx = 0;
handled++;
+ bytes += slot->len;
+
+ if (++tx_ring->read_idx == tx_ring->length)
+ tx_ring->read_idx = 0;
}
+ enet->netdev->stats.tx_packets += handled;
+ enet->netdev->stats.tx_bytes += bytes;
+
if (handled < weight) {
napi_complete_done(napi, handled);
bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
@@ -716,24 +720,29 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
netdev->min_mtu = ETH_ZLEN;
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
- netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
static int bcm4908_enet_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b04e423c446a..d91fdb0c2649 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -388,7 +388,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
- skb = build_skb(buf, priv->rx_frag_size);
+ skb = napi_build_skb(buf, priv->rx_frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
dev->stats.rx_dropped++;
@@ -423,7 +423,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{
struct bcm_enet_priv *priv;
unsigned int bytes;
@@ -468,7 +468,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
dev->stats.tx_errors++;
bytes += skb->len;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, budget);
released++;
}
@@ -499,7 +499,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
@@ -1211,7 +1211,7 @@ static int bcm_enet_stop(struct net_device *dev)
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -1716,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_irq, *res_irq_rx, *res_irq_tx;
+ int irq, irq_rx, irq_tx;
struct mii_bus *bus;
int i, ret;
if (!bcm_enet_shared_base[0])
return -EPROBE_DEFER;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_irq || !res_irq_rx || !res_irq_tx)
+ irq = platform_get_irq(pdev, 0);
+ irq_rx = platform_get_irq(pdev, 1);
+ irq_tx = platform_get_irq(pdev, 2);
+ if (irq < 0 || irq_rx < 0 || irq_tx < 0)
return -ENODEV;
dev = alloc_etherdev(sizeof(*priv));
@@ -1748,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
goto out;
}
- dev->irq = priv->irq = res_irq->start;
- priv->irq_rx = res_irq_rx->start;
- priv->irq_tx = res_irq_tx->start;
+ dev->irq = priv->irq = irq;
+ priv->irq_rx = irq_rx;
+ priv->irq_tx = irq_tx;
priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
@@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enet_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops;
/* MTU range: 46 - 2028 */
@@ -1935,7 +1935,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = bcm_enet_remove,
.driver = {
@@ -2362,7 +2362,7 @@ static int bcm_enetsw_stop(struct net_device *dev)
bcm_enet_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -2714,7 +2714,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2756,7 +2756,7 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
.remove = bcm_enetsw_remove,
.driver = {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 60dde29974bf..425d6ccd5413 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -308,8 +308,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -1517,7 +1517,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* Initialize SW view of the ring */
spin_lock_init(&ring->lock);
ring->priv = priv;
- netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
ring->index = index;
ring->size = size;
ring->clean_index = 0;
@@ -1991,6 +1991,9 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_clk_disable;
}
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
+
/* Reset house keeping link status */
priv->old_duplex = -1;
priv->old_link = -1;
@@ -2180,13 +2183,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
return -EOPNOTSUPP;
- /* All filters are already in use, we cannot match more rules */
- if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
- RXCHK_BRCM_TAG_MAX)
- return -ENOSPC;
-
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index >= RXCHK_BRCM_TAG_MAX)
+ /* All filters are already in use, we cannot match more rules */
return -ENOSPC;
/* Location is the classification ID, and index is the position
@@ -2568,7 +2567,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2585,8 +2584,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 16b73bb9acc7..5af16e5f9ad0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -484,7 +484,7 @@ struct bcm_rsb {
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
-#define SP_LT_NUM_HW_RX_DESC_WORDS 256
+#define SP_LT_NUM_HW_RX_DESC_WORDS 512
/* Internal linked-list RAM size */
#define SP_NUM_TX_DESC 1536
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 086739e4f40a..9b83d5361699 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
np = of_get_child_by_name(core->dev.of_node, "mdio");
err = of_mdiobus_register(mii_bus, np);
+ of_node_put(np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index e6f48786949c..02bd3cf9a260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 7b525c65bacb..5fb3af5670ec 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
@@ -1367,7 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
@@ -1395,8 +1395,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 110088e662ea..e05ac92c0650 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -364,8 +364,6 @@
#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
-#define BGMAC_WEIGHT 64
-
#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e20aafeb4ca9..fec57f1982c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -176,12 +176,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -7042,9 +7042,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
@@ -8522,7 +8522,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a19dd6797070..dd5945c4bfec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1271,7 +1271,7 @@ struct bnx2x_fw_stats_data {
struct per_port_stats port;
struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[];
};
/* Public slow path states */
@@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-int bnx2x_init_firmware(struct bnx2x *bp);
-void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8d36ebbf08e1..16c490692f42 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -44,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -55,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -150,7 +148,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
+ strscpy(buf, bp->fw_ver, buf_len);
snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
@@ -789,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
@@ -2364,24 +2363,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build my FW version dword */
- u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
- (bp->fw_rev << 16) + (bp->fw_eng << 24);
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
/* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
- loaded_fw, my_fw);
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
if (print_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
else
- BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
- loaded_fw, my_fw);
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
return -EBUSY;
}
}
@@ -3415,12 +3420,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3528,15 +3530,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3562,12 +3562,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 0e319ac7799f..bda3ccc28eca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,7 +1126,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
@@ -1135,7 +1135,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4e85e7dbc2be..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6178,7 +6178,8 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
*len -= ret;
return 0;
}
@@ -6193,7 +6194,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
@@ -13842,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 774c1f1a57c3..51b1690fd045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
@@ -3382,7 +3385,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
@@ -12316,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
- if (IS_PF(bp)) {
- rc = bnx2x_init_firmware(bp);
-
- if (rc) {
- bnx2x_free_mem_bp(bp);
- return rc;
- }
- }
-
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12337,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13406,7 +13399,7 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -13506,7 +13499,7 @@ request_firmware_exit:
return rc;
}
-void bnx2x_release_firmware(struct bnx2x *bp)
+static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14023,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
- bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
@@ -14161,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@@ -14269,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b41b73..4e9215bce4ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2212,7 +2212,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2381,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2493,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2529,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -6218,7 +6218,7 @@
#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 2dac704dc346..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..0657a0f5170f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4f94136a011a..c78b6e9dea2c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -56,6 +56,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
+#include <linux/align.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -233,6 +234,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -369,7 +371,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -533,12 +535,9 @@ normal_tx:
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -645,7 +644,7 @@ tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -660,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -669,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -689,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -699,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -737,7 +736,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
- *mapping += bp->rx_dma_offset;
return page;
}
@@ -779,6 +777,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
@@ -839,33 +838,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+ if (!page)
+ return -ENOMEM;
+
+ } else {
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
}
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -961,6 +968,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+ bp->rx_dma_offset);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_dma_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -983,7 +1023,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -994,6 +1033,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1037,22 +1077,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct skb_shared_info *shinfo,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1068,8 +1110,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ skb_frag_off_set(frag, cons_rx_buf->offset);
+ skb_frag_size_set(frag, frag_len);
+ __skb_frag_set_page(frag, cons_rx_buf->page);
+ shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1080,16 +1124,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
+ if (xdp && page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
unsigned int nr_frags;
- shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
- dev_kfree_skb(skb);
-
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
@@ -1097,23 +1139,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- DMA_FROM_DEVICE,
+ bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
-
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+ agg_bufs, tpa, NULL);
+ if (!total_frag_len) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb->data_len += total_frag_len;
+ skb->len += total_frag_len;
+ skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+ idx, agg_bufs, tpa, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1643,7 +1724,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1728,8 +1809,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 flags, misc;
void *data;
int rc = 0;
@@ -1838,18 +1921,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+ if (!frag_len) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
@@ -1871,11 +1975,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
}
}
@@ -1922,7 +2037,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+ RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
@@ -2079,6 +2194,16 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
(BNXT_EVENT_RING_TYPE(data2) == \
ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -2258,6 +2383,24 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_event_error_report(bp, data1, data2);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+ break;
+ }
+ goto async_event_process_exit;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
@@ -2463,10 +2606,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ }
bnapi->events = 0;
}
@@ -2678,6 +2824,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
+ /* No more budget for RX work */
+ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ break;
+
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
@@ -2843,14 +2993,23 @@ skip_rx_buf_free:
if (!page)
continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ } else {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
+ __free_page(page);
+ }
}
skip_rx_agg_free:
@@ -3224,6 +3383,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
+ spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3714,7 +3874,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
+ get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
@@ -3763,7 +3923,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3804,9 +3964,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+ rx_size = BNXT_MAX_PAGE_MODE_MTU;
+ } else {
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3847,14 +4013,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
} else {
@@ -4304,7 +4477,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -4323,9 +4496,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4747,8 +4918,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
@@ -5194,12 +5367,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req->enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+ if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ }
/* thresholds not implemented in firmware yet */
req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
@@ -7414,6 +7590,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ bool phc_cfg;
u8 flags;
int rc;
@@ -7456,7 +7633,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto exit;
}
- rc = bnxt_ptp_init(bp);
+ phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp, phc_cfg);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -7476,7 +7654,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext;
+ u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -7514,11 +7692,17 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ flags_ext2 = le32_to_cpu(resp->flags_ext2);
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
BNXT_FW_MAJ(bp) > 217)
@@ -7601,7 +7785,7 @@ hwrm_dbg_qcaps_exit:
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
@@ -7787,6 +7971,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8639,6 +8836,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8648,7 +8848,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8659,6 +8859,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9165,16 +9366,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
@@ -9248,7 +9449,7 @@ void bnxt_tx_enable(struct bnxt *bp)
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
@@ -9278,7 +9479,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
void bnxt_report_link(struct bnxt *bp)
{
- if (bp->link_info.link_up) {
+ if (BNXT_LINK_IS_UP(bp)) {
const char *signal = "";
const char *flow_ctrl;
const char *duplex;
@@ -9364,7 +9565,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- bp->phy_flags = resp->flags;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
@@ -9414,7 +9615,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
- u8 link_up = link_info->link_up;
+ u8 link_state = link_info->link_state;
bool support_changed = false;
int rc;
@@ -9515,14 +9716,14 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
hwrm_req_drop(bp, req);
@@ -9722,7 +9923,18 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return rc;
req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_req_send(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
}
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
@@ -9771,17 +9983,12 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int rc;
if (!BNXT_NEW_RM(bp))
- return 0; /* no resource reservations required */
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
+ return; /* no resource reservations required */
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
@@ -9794,6 +10001,20 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
+}
+
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ bnxt_clear_reservations(bp, fw_reset);
return rc;
}
@@ -9848,10 +10069,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10153,7 +10375,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -10275,6 +10497,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (irq_re_init)
udp_tunnel_nic_reset_ntf(bp->dev);
+ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+ if (!static_key_enabled(&bnxt_xdp_locking_key))
+ static_branch_enable(&bnxt_xdp_locking_key);
+ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+ static_branch_disable(&bnxt_xdp_locking_key);
+ }
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
@@ -10288,6 +10516,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
open_err_irq:
@@ -10330,13 +10560,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10344,7 +10576,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -10354,9 +10586,10 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10430,7 +10663,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
@@ -10772,7 +11005,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -10849,9 +11082,10 @@ skip_uc:
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
@@ -10908,7 +11142,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -10953,7 +11187,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
@@ -11380,7 +11614,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && bp->stats_coal_ticks) {
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -12081,11 +12315,6 @@ int bnxt_fw_init_one(struct bnxt *bp)
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -12674,8 +12903,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -12914,7 +13143,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&bp->fw_reset_task);
bp->sp_event = 0;
- bnxt_dl_fw_reporters_destroy(bp, true);
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -13164,10 +13393,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
@@ -13407,7 +13635,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
@@ -13695,7 +13922,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0, off;
+ int retry = 0;
+ int err = 0;
+ int off;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -13723,11 +13952,36 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ bnxt_inv_fw_health_reg(bp);
+ bnxt_try_map_fw_health_reg(bp);
+
+ /* In some PCIe AER scenarios, firmware may take up to
+ * 10 seconds to become ready in the worst case.
+ */
+ do {
+ err = bnxt_try_recover_fw(bp);
+ if (!err)
+ break;
+ retry++;
+ } while (retry < BNXT_FW_SLOT_RESET_RETRY);
+
+ if (err) {
+ dev_err(&pdev->dev, "Firmware not ready\n");
+ goto reset_exit;
+ }
+
err = bnxt_hwrm_func_reset(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ err = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, err);
}
+reset_exit:
+ bnxt_clear_reservations(bp, true);
rtnl_unlock();
return result;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 440dfeb4948b..d5fa43cfe524 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -591,9 +591,12 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+#define BNXT_PAGE_MODE_BUF_SIZE \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ BNXT_PAGE_MODE_BUF_SIZE - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
#define BNXT_MIN_PKT_SIZE 52
@@ -698,13 +701,12 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
u8 is_gso;
u8 is_push;
u8 action;
- union {
- unsigned short nr_frags;
- u16 rx_prod;
- };
+ unsigned short nr_frags;
+ u16 rx_prod;
};
struct bnxt_sw_rx_bd {
@@ -800,6 +802,8 @@ struct bnxt_tx_ring_info {
u32 dev_state;
struct bnxt_ring_struct tx_ring_struct;
+ /* Synchronize simultaneous xdp_xmit on same ring */
+ spinlock_t xdp_tx_lock;
};
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
@@ -1175,7 +1179,11 @@ struct bnxt_link_info {
#define BNXT_PHY_STATE_ENABLED 0
#define BNXT_PHY_STATE_DISABLED 1
- u8 link_up;
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1613,6 +1621,7 @@ struct bnxt_fw_health {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
enum board_idx {
BCM57301,
@@ -1810,6 +1819,7 @@ struct bnxt {
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
#define BNXT_CHIP_SR2(bp) \
((bp)->chip_num == CHIP_NUM_58818)
@@ -1921,6 +1931,7 @@ struct bnxt {
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1957,6 +1968,8 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
+ #define BNXT_FW_CAP_PTP_RTC 0x00400000
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS 0x00800000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
@@ -2066,12 +2079,6 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
@@ -2098,8 +2105,8 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
- /* copied from flags in hwrm_port_phy_qcaps_output */
- u8 phy_flags;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
@@ -2108,6 +2115,8 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2122,8 +2131,10 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
/* devlink interface and vf-rep structs */
struct devlink *dl;
@@ -2305,6 +2316,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 217ff597cdf2..caab3d626a2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -627,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4da31b1b84f9..8a6f788f6294 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -20,6 +20,7 @@
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -45,7 +46,7 @@ bnxt_dl_flash_update(struct devlink *dl,
}
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
@@ -241,37 +242,37 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.recover = bnxt_fw_recover,
};
-void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
{
- struct bnxt_fw_health *health = bp->fw_health;
-
- if (!health || health->fw_reporter)
- return;
+ struct devlink_health_reporter *reporter;
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
}
+
+ return reporter;
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
}
@@ -367,6 +368,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
}
}
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
@@ -374,8 +385,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
u32 installed = 0;
- u16 flags;
u8 target;
int rc;
@@ -394,7 +406,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
hwrm_req_drop(bp, query_req);
return rc;
}
- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
@@ -407,12 +418,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
flags = le16_to_cpu(query_resp->status_flags);
- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
continue;
- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
- !strncmp(query_resp->active_ver, query_resp->install_ver,
- sizeof(query_resp->active_ver)))
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
@@ -424,8 +443,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
if (!rc && !installed) {
- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
- rc = -ENOENT;
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
@@ -587,6 +611,64 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
return rc;
}
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ bool rc = false;
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
+
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
+
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto done;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto done;
+ }
+
+ rc = true;
+
+done:
+ kfree(buf);
+ return rc;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -599,6 +681,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -956,9 +1040,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
- if (rc)
- return rc;
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ }
return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
}
@@ -1221,6 +1307,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index a715458abc30..b8105065367b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -75,7 +75,7 @@ void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 003330e8cd58..8cad15c458b3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -22,9 +23,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
@@ -32,6 +35,13 @@
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
+#define BNXT_NVM_ERR_MSG(dev, extack, msg) \
+ do { \
+ if (extack) \
+ NL_SET_ERR_MSG_MOD(extack, msg); \
+ netdev_err(dev, "%s\n", msg); \
+ } while (0)
+
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -152,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
@@ -802,9 +812,11 @@ static void bnxt_get_ringparam(struct net_device *dev,
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
@@ -1359,9 +1371,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -1658,15 +1670,19 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
fw_speeds = link_info->support_pam4_speeds;
BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+ }
if (link_info->support_auto_speeds ||
link_info->support_pam4_auto_speeds)
@@ -1897,7 +1913,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
u8 phy_type = link_info->phy_type;
@@ -1969,6 +1986,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
@@ -2086,7 +2106,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2097,9 +2117,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -2128,7 +2146,7 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
}
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
@@ -2158,14 +2176,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2489,12 +2507,65 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
+#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
+#define MSG_INVALID_PKG "PKG install error : Invalid package"
+#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
+#define MSG_INVALID_DEV "PKG install error : Invalid device"
+#define MSG_INTERNAL_ERR "PKG install error : Internal error"
+#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
+#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
+#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
+
+static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
+ struct netlink_ext_ack *extack)
+{
+ switch (result) {
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
+ return -EINVAL;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
+ return -ENOPKG;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
+ return -EPERM;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
+ return -EOPNOTSUPP;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
+ return -EIO;
+ }
+}
+
#define BNXT_PKG_DMA_SIZE 0x40000
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
struct hwrm_nvm_install_update_input *install;
struct hwrm_nvm_install_update_output *resp;
@@ -2505,6 +2576,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
+ u8 cmd_err;
u16 index;
int rc;
@@ -2556,12 +2628,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_EXT_NONE,
&index, &item_len, NULL);
if (rc) {
- netdev_err(dev, "PKG update area not created in nvram\n");
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
break;
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
- (unsigned long)fw->size);
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
rc = -EFBIG;
break;
}
@@ -2588,6 +2659,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
if (defrag_attempted) {
/* We have tried to defragment already in the previous
@@ -2596,15 +2669,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
@@ -2614,11 +2696,12 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL, 0);
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ if (!rc)
+ break;
}
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ fallthrough;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
}
} while (defrag_attempted && !rc);
@@ -2629,7 +2712,7 @@ pkg_abort:
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- rc = -ENOPKG;
+ rc = nvm_update_err_to_stderr(dev, resp->result, extack);
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
@@ -2637,7 +2720,7 @@ pkg_abort:
}
static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
const struct firmware *fw;
int rc;
@@ -2649,7 +2732,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, const char *file
return rc;
}
- rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
release_firmware(fw);
@@ -2667,7 +2750,7 @@ static int bnxt_flash_device(struct net_device *dev,
if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
flash->region > 0xffff)
return bnxt_flash_package_from_file(dev, flash->data,
- flash->region);
+ flash->region, NULL);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -2753,8 +2836,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2788,9 +2871,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
@@ -3320,7 +3403,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
@@ -3454,7 +3537,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3467,7 +3550,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
@@ -3548,9 +3631,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3560,6 +3646,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -3585,7 +3672,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -3730,6 +3818,9 @@ static int bnxt_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -3785,7 +3876,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strscpy(str, fw_str, ETH_GSTRING_LEN);
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
if (test_info->offline_mask & (1 << i))
strncat(str, " (offline)",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 6aa44840f13a..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -54,9 +54,21 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type);
+ u32 install_type, struct netlink_ext_ack *extack);
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index ea86c54247c7..b753032a1047 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2021 Broadcom Inc.
+ * Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -311,6 +311,8 @@ struct cmd_nums {
#define HWRM_CFA_TFLIB 0x125UL
#define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
#define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -369,6 +371,14 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
#define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -390,6 +400,10 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
#define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -532,8 +546,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 63
-#define HWRM_VERSION_STR "1.10.2.63"
+#define HWRM_VERSION_RSVD 95
+#define HWRM_VERSION_STR "1.10.2.95"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -757,10 +771,13 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1112,34 +1129,37 @@ struct hwrm_async_event_cmpl_echo_request {
__le32 event_data1;
};
-/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_master {
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
__le16 type;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
__le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
};
/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
@@ -1246,7 +1266,8 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1330,6 +1351,32 @@ struct hwrm_async_event_cmpl_error_report_nvm {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
};
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1563,32 +1610,38 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
__le16 max_msix_vfs;
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1597,7 +1650,23 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
- u8 unused_1[7];
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 unused_1;
u8 valid;
};
@@ -1761,11 +1830,17 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 alloc_tx_key_ctxs;
__le16 alloc_rx_key_ctxs;
- u8 unused_3[5];
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_3;
u8 valid;
};
-/* hwrm_func_cfg_input (size:896b/112B) */
+/* hwrm_func_cfg_input (size:960b/120B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1945,7 +2020,13 @@ struct hwrm_func_cfg_input {
__le16 host_mtu;
__le16 num_tx_key_ctxs;
__le16 num_rx_key_ctxs;
- u8 unused_0[4];
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 unused_0[7];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2455,7 +2536,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd[7];
+ u8 rsvd1[7];
u8 valid;
};
@@ -3164,7 +3245,7 @@ struct hwrm_func_ptp_pin_cfg_output {
u8 valid;
};
-/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
struct hwrm_func_ptp_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3178,6 +3259,7 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
u8 ptp_pps_event;
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
@@ -3204,6 +3286,7 @@ struct hwrm_func_ptp_cfg_input {
__le32 ptp_freq_adj_ext_up;
__le32 ptp_freq_adj_ext_phase_lower;
__le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
};
/* hwrm_func_ptp_cfg_output (size:128b/16B) */
@@ -3243,6 +3326,335 @@ struct hwrm_func_ptp_ts_query_output {
u8 valid;
};
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 rsvd[2];
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 rsvd2;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd3[3];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -3741,7 +4153,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3763,6 +4175,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -3807,7 +4221,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- __le32 ptp_adj_phase;
+ u8 unused_1[4];
+ __le64 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3850,6 +4265,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4339,7 +4755,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -4399,7 +4816,7 @@ struct hwrm_port_phy_qcaps_output {
__le16 flags2;
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- u8 unused_0[1];
+ u8 internal_port_cnt;
u8 valid;
};
@@ -6042,6 +6459,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6056,7 +6474,12 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 unused0[5];
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ u8 unused0[4];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -6089,25 +6512,31 @@ struct hwrm_vnic_qcaps_output {
__le16 mru;
u8 unused_0[2];
__le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6221,12 +6650,17 @@ struct hwrm_vnic_rss_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -6241,11 +6675,11 @@ struct hwrm_vnic_rss_cfg_input {
u8 flags;
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- u8 rss_hash_function;
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
u8 unused_1[4];
};
@@ -6390,7 +6824,9 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7574,12 +8010,17 @@ struct hwrm_cfa_flow_info_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
u8 unused_0[6];
__le64 ext_flow_handle;
};
@@ -7668,7 +8109,8 @@ struct hwrm_cfa_flow_stats_output {
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
- u8 unused_0[7];
+ __le16 flow_hits;
+ u8 unused_0[5];
u8 valid;
};
@@ -7894,10 +8336,12 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
u8 unused_0[3];
u8 valid;
};
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8233,6 +8677,56 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1216b/152B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+};
+
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
@@ -8909,6 +9403,50 @@ struct hwrm_dbg_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -9372,8 +9910,35 @@ struct hwrm_nvm_install_update_output {
__le16 resp_len;
__le64 installed_items;
u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
u8 problem_item;
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
@@ -9390,11 +9955,12 @@ struct hwrm_nvm_install_update_output {
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
u8 unused_0[7];
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 566c9487ef55..132442f16fe6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -476,7 +476,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
req_type);
goto exit;
@@ -644,17 +645,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i), req_type,
+ hwrm_total_timeout(i) + j, req_type,
le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index d52bd2d63aec..c98032e38188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 48520967746f..2132ce63193c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -19,6 +19,20 @@
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
@@ -48,6 +62,9 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_init(&ptp->tc, &ptp->cc, ns);
spin_unlock_bh(&ptp->ptp_lock);
@@ -59,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return -EIO;
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
ptp_read_system_prets(sts);
- *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
return 0;
}
@@ -131,11 +157,47 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
return 0;
}
+/* Caller holds ptp_lock */
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(ptp->bp);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+
+ return rc;
+}
+
static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_adjphc(ptp, delta);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_adjtime(&ptp->tc, delta);
spin_unlock_bh(&ptp->ptp_lock);
@@ -242,6 +304,40 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct hwrm_port_mac_cfg_input *req;
+
+ if (!ptp || !ptp->tstamp_filters)
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ goto out;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+ req->flags = cpu_to_le32(ptp->tstamp_filters);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ if (!hwrm_req_send(bp, req)) {
+ bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
+ return;
+ }
+ ptp->tstamp_filters = 0;
+out:
+ bp->ptp_all_rx_tstamp = 0;
+ netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+}
+
void bnxt_ptp_reapply_pps(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -329,7 +425,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
- u8 pin_id;
+ int pin_id;
int rc;
switch (rq->type) {
@@ -337,6 +433,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure an External PPS IN */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
@@ -350,6 +448,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure a Periodic PPS OUT */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
@@ -378,27 +478,45 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
- int rc;
+ int rc = 0;
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ break;
+ }
- if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
- else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
- req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- return hwrm_req_send(bp, req);
+ ptp->tstamp_filters = flags;
+
+ if (netif_running(bp->dev)) {
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
+ if (!rc && !ptp->tstamp_filters)
+ rc = -EIO;
+ }
+
+ return rc;
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -429,6 +547,12 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
+ case HWTSTAMP_FILTER_ALL:
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) {
+ ptp->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ return -EOPNOTSUPP;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -714,7 +838,70 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
}
-int bnxt_ptp_init(struct bnxt *bp)
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc)
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ if (rc)
+ return rc;
+ }
+ spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
+}
+
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -729,23 +916,19 @@ int bnxt_ptp_init(struct bnxt *bp)
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
- if (ptp->ptp_clock) {
- ptp_clock_unregister(ptp->ptp_clock);
- ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
- }
+ bnxt_ptp_free(bp);
+
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = bnxt_cc_read;
- ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
- timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ } else {
+ bnxt_ptp_timecounter_init(bp, true);
+ }
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
@@ -757,8 +940,8 @@ int bnxt_ptp_init(struct bnxt *bp)
int err = PTR_ERR(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- bnxt_unmap_ptp_regs(bp);
- return err;
+ rc = err;
+ goto out;
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
@@ -768,6 +951,11 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp_schedule_worker(ptp->ptp_clock, 0);
}
return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
}
void bnxt_ptp_clear(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 7c528e1f8713..4ce0a14c1e23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -31,7 +31,7 @@ struct pps_pin {
u8 state;
};
-#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
+#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
@@ -113,6 +113,7 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
int rx_filter;
+ u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
@@ -131,12 +132,16 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 1d177fed44a6..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -307,7 +307,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
@@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc)
+ if (rc) {
+ bnxt_ulp_sriov_cfg(bp, 0);
goto err_out2;
+ }
return 0;
@@ -832,6 +834,9 @@ err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
@@ -846,7 +851,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -859,7 +864,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fde0c3e8ac57..2e54bf4fc7a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -25,7 +25,7 @@
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct net_device *dev = edev->net;
@@ -62,7 +62,7 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
return 0;
}
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -115,7 +115,7 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
@@ -179,7 +179,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return avail_msix;
}
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -233,7 +233,7 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -447,7 +447,7 @@ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
rcu_read_unlock();
}
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 54d59f681b86..42b50abc3e91 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -77,15 +77,15 @@ struct bnxt_en_dev {
};
struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
unsigned long *, u16);
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 8eb28e088582..fcc65890820a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
@@ -559,44 +559,34 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
- break;
+ return 0;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
+ return -EPERM;
}
- rc = bnxt_vf_reps_create(bp);
- break;
+ return bnxt_vf_reps_create(bp);
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 52fad0fdeacf..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -20,38 +20,95 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo;
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
+
+ /* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ flags = (len << TX_BD_LEN_SHIFT) |
+ ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+ return NULL;
+
+ dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
txr->tx_prod = prod;
+
return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -61,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -76,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int i, j, frags;
for (i = 0; i < nr_pkts; i++) {
tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -94,6 +151,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
} else if (tx_buf->action == XDP_TX) {
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
}
tx_cons = NEXT_TX(tx_cons);
}
@@ -101,22 +165,71 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
}
}
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp)
+{
+ struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
+ }
+ shinfo->nr_frags = 0;
+}
+
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -126,16 +239,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
txr = rxr->bnapi->tx_ring;
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
- xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -148,26 +255,38 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
+ if (orig_data != xdp.data)
offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
- }
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event = 0;
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -175,6 +294,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -182,6 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -201,6 +323,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -227,11 +350,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
+ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
- if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+ if (!bnxt_tx_avail(bp, txr))
break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
@@ -250,6 +378,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_unlock(&txr->xdp_tx_lock);
+
return nxmit;
}
@@ -260,8 +391,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
int tx_xdp = 0, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
@@ -269,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
@@ -327,3 +461,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ PAGE_SIZE * sinfo->nr_frags,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..505911ae095d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,15 +10,29 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff xdp, struct page *page, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..2198e35d9e18 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +669,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
@@ -4105,8 +4105,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
atomic_set(&cp->csk_tbl[i].ref_count, 0);
- port_id = prandom_u32();
- port_id %= CNIC_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE);
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
@@ -4165,7 +4164,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = prandom_u32();
+ seed = get_random_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..25c450606985 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1146,7 +1146,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1368,7 +1368,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
+ ret = phy_init_eee(dev->phydev, false);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -2287,8 +2294,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dma_length_status = status->length_status;
if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (rx_csum) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -2662,8 +2671,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
}
/* Initialize a RDMA ring */
@@ -2699,8 +2707,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
@@ -3990,6 +3997,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -4020,10 +4031,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index e31a5a397f11..f55d9d9c01a8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -40,6 +40,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..7ded559842e8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -393,6 +393,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..f02facb60fd1 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
@@ -2205,7 +2203,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->min_mtu = 0;
dev->max_mtu = ENET_PACKET_SIZE;
- netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+ netif_napi_add_weight(dev, &sc->napi, sbmac_poll, 16);
dev->irq = UNIT_INT(idx);
@@ -2536,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c28f8cc00d1c..4179a12fc881 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7380,9 +7380,9 @@ static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -7944,7 +7944,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
@@ -12302,9 +12302,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f1d2c4cd5da2..d6d90f9722a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1881,7 +1881,6 @@ poll_exit:
return rcvd;
}
-#define BNAD_NAPI_POLL_QUOTA 64
static void
bnad_napi_add(struct bnad *bnad, u32 rx_id)
{
@@ -1892,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ bnad_napi_poll_rx);
}
}
@@ -2824,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2873,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 8aca768571b2..df10edff5603 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9ddbee7de72b..9c410f93a103 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
+#include <linux/phy/phy.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -716,14 +717,15 @@
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
+#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -1203,11 +1205,15 @@ struct macb_queue {
unsigned int RBQP;
unsigned int RBQPH;
+ /* Lock to protect tx_head and tx_tail */
+ spinlock_t tx_ptr_lock;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
struct macb_tx_skb *tx_skb;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ bool txubr_pending;
+ struct napi_struct napi_tx;
dma_addr_t rx_ring_dma;
dma_addr_t rx_buffers_dma;
@@ -1216,7 +1222,7 @@ struct macb_queue {
struct macb_dma_desc *rx_ring;
struct sk_buff **rx_skbuff;
void *rx_buffers;
- struct napi_struct napi;
+ struct napi_struct napi_rx;
struct queue_stats stats;
#ifdef CONFIG_MACB_USE_HWSTAMP
@@ -1291,6 +1297,9 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
+
+ struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a363da928e8b..4f63f1ba3161 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -34,7 +34,11 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -335,11 +339,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -389,11 +391,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -806,6 +806,7 @@ static int macb_mii_probe(struct net_device *dev)
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
+ bp->phylink_config.mac_managed_pm = true;
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
bp->phylink_config.poll_fixed_state = true;
@@ -961,7 +962,7 @@ static int macb_halt_tx(struct macb *bp)
return -ETIMEDOUT;
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
{
if (tx_skb->mapping) {
if (tx_skb->mapped_as_page)
@@ -974,7 +975,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
if (tx_skb->skb) {
- dev_kfree_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
}
@@ -1027,12 +1028,13 @@ static void macb_tx_error_task(struct work_struct *work)
(unsigned int)(queue - bp->queues),
queue->tx_tail, queue->tx_head);
- /* Prevent the queue IRQ handlers from running: each of them may call
- * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ /* Prevent the queue NAPI TX poll from running, as it calls
+ * macb_tx_complete(), which in turn may call netif_wake_subqueue().
* As explained below, we have to halt the transmission before updating
* TBQP registers so we call netif_tx_stop_all_queues() to notify the
* network engine about the macb/gem being halted.
*/
+ napi_disable(&queue->napi_tx);
spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
@@ -1060,7 +1062,7 @@ static void macb_tx_error_task(struct work_struct *work)
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
tail++;
tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
@@ -1090,7 +1092,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
/* Set end of TX queue */
@@ -1120,27 +1122,50 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irqrestore(&bp->lock, flags);
+ napi_enable(&queue->napi_tx);
}
-static void macb_tx_interrupt(struct macb_queue *queue)
+static bool ptp_one_step_sync(struct sk_buff *skb)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
- struct macb *bp = queue->bp;
- u16 queue_index = queue - bp->queues;
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype;
+
+ /* No need to parse packet if PTP TS is not involved */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ goto not_oss;
+
+ /* Identify and return whether PTP one step sync is being processed */
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ goto not_oss;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ goto not_oss;
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
+ goto not_oss;
- netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ return true;
+not_oss:
+ return false;
+}
+
+static int macb_tx_complete(struct macb_queue *queue, int budget)
+{
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
+ unsigned int tail;
+ unsigned int head;
+ int packets = 0;
+
+ spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
- for (tail = queue->tx_tail; tail != head; tail++) {
+ for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1166,8 +1191,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP) &&
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !ptp_one_step_sync(skb) &&
gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
@@ -1181,10 +1206,11 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ packets++;
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -1200,6 +1226,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
+ spin_unlock(&queue->tx_ptr_lock);
+
+ return packets;
}
static void gem_rx_refill(struct macb_queue *queue)
@@ -1217,7 +1246,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1256,6 +1284,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1556,31 +1585,51 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
return received;
}
-static int macb_poll(struct napi_struct *napi, int budget)
+static bool macb_rx_pending(struct macb_queue *queue)
{
- struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
struct macb *bp = queue->bp;
- int work_done;
- u32 status;
+ unsigned int entry;
+ struct macb_dma_desc *desc;
- status = macb_readl(bp, RSR);
- macb_writel(bp, RSR, status);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
- netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ return (desc->addr & MACB_BIT(RX_USED)) != 0;
+}
+
+static int macb_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
+ struct macb *bp = queue->bp;
+ int work_done;
work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
- status = macb_readl(bp, RSR);
- if (status) {
+ netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_rx_pending(queue)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- napi_reschedule(napi);
- } else {
- queue_writel(queue, IER, bp->rx_intr_mask);
+ netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
+ napi_schedule(napi);
}
}
@@ -1589,6 +1638,90 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
+
+ spin_lock(&queue->tx_ptr_lock);
+
+ if (queue->tx_head == queue->tx_tail)
+ goto out_tx_ptr_unlock;
+
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
+
+ if (tbqp == head_idx)
+ goto out_tx_ptr_unlock;
+
+ spin_lock_irq(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
+
+out_tx_ptr_unlock:
+ spin_unlock(&queue->tx_ptr_lock);
+}
+
+static bool macb_tx_complete_pending(struct macb_queue *queue)
+{
+ bool retval = false;
+
+ spin_lock(&queue->tx_ptr_lock);
+ if (queue->tx_head != queue->tx_tail) {
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
+ retval = true;
+ }
+ spin_unlock(&queue->tx_ptr_lock);
+ return retval;
+}
+
+static int macb_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
+ struct macb *bp = queue->bp;
+ int work_done;
+
+ work_done = macb_tx_complete(queue, budget);
+
+ rmb(); // ensure txubr_pending is up to date
+ if (queue->txubr_pending) {
+ queue->txubr_pending = false;
+ netdev_vdbg(bp->dev, "poll: tx restart\n");
+ macb_tx_restart(queue);
+ }
+
+ netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, MACB_BIT(TCOMP));
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were sent while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_tx_complete_pending(queue)) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
+ napi_schedule(napi);
+ }
+ }
+
+ return work_done;
+}
+
static void macb_hresp_error_task(struct tasklet_struct *t)
{
struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
@@ -1628,21 +1761,6 @@ static void macb_hresp_error_task(struct tasklet_struct *t)
netif_tx_start_all_queues(dev);
}
-static void macb_tx_restart(struct macb_queue *queue)
-{
- unsigned int head = queue->tx_head;
- unsigned int tail = queue->tx_tail;
- struct macb *bp = queue->bp;
-
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TXUBR));
-
- if (head == tail)
- return;
-
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-}
-
static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1739,9 +1857,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&queue->napi)) {
+ if (napi_schedule_prep(&queue->napi_rx)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&queue->napi);
+ __napi_schedule(&queue->napi_rx);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR))) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR));
+
+ if (status & MACB_BIT(TXUBR)) {
+ queue->txubr_pending = true;
+ wmb(); // ensure softirq can see update
+ }
+
+ if (napi_schedule_prep(&queue->napi_tx)) {
+ netdev_vdbg(bp->dev, "scheduling TX softirq\n");
+ __napi_schedule(&queue->napi_tx);
}
}
@@ -1755,12 +1891,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
break;
}
- if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(queue);
-
- if (status & MACB_BIT(TXUBR))
- macb_tx_restart(queue);
-
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1966,7 +2096,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ !ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -1993,7 +2124,7 @@ dma_error:
for (i = queue->tx_head; i != tx_head; i++) {
tx_skb = macb_tx_skb(queue, i);
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
return 0;
@@ -2064,7 +2195,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
return 0;
if (padlen <= 0) {
@@ -2115,7 +2246,6 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue = &bp->queues[queue_index];
- unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
bool is_lso;
@@ -2139,7 +2269,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -2172,16 +2302,16 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_irqsave(&bp->lock, flags);
+ spin_lock_bh(&queue->tx_ptr_lock);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
- spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
- return NETDEV_TX_BUSY;
+ ret = NETDEV_TX_BUSY;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
@@ -2194,13 +2324,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
skb_tx_timestamp(skb);
+ spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_irqrestore(&bp->lock, flags);
+ spin_unlock_bh(&queue->tx_ptr_lock);
return ret;
}
@@ -2720,9 +2852,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2734,15 +2866,21 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_enable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
macb_init_hw(bp);
- err = macb_phylink_connect(bp);
+ err = phy_power_on(bp->sgmii_phy);
if (err)
goto reset_hw;
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto phy_off;
+
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
@@ -2750,10 +2888,15 @@ static int macb_open(struct net_device *dev)
return 0;
+phy_off:
+ phy_power_off(bp->sgmii_phy);
+
reset_hw:
macb_reset_hw(bp);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
macb_free_consistent(bp);
pm_exit:
pm_runtime_put_sync(&bp->pdev->dev);
@@ -2769,12 +2912,16 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->sgmii_phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -3337,7 +3484,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3390,8 +3538,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -3830,7 +3978,9 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
- netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
+ spin_lock_init(&queue->tx_ptr_lock);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -4100,11 +4250,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4455,6 +4603,61 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+ }
+
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+
+err_out_phy_exit:
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4481,8 +4684,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4513,8 +4716,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4546,11 +4749,11 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4564,6 +4767,17 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
MACB_CAPS_MIIONRGMII,
@@ -4582,8 +4796,17 @@ static const struct macb_config sama7g5_emac_config = {
.usrio = &sama7g5_usrio,
};
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,np4-macb", .data = &np4_config },
@@ -4597,11 +4820,15 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4609,8 +4836,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4712,7 +4939,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
@@ -4726,8 +4953,8 @@ static int macb_probe(struct platform_device *pdev)
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
@@ -4767,7 +4994,7 @@ static int macb_probe(struct platform_device *pdev)
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4792,6 +5019,9 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->sgmii_phy);
+
err_out_free_netdev:
free_netdev(dev);
@@ -4813,6 +5043,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -4893,12 +5124,15 @@ static int __maybe_unused macb_suspend(struct device *dev)
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_disable(&queue->napi);
+ ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -4972,8 +5206,10 @@ static int __maybe_unused macb_resume(struct device *dev)
}
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_enable(&queue->napi);
+ ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
@@ -4986,6 +5222,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
@@ -5003,7 +5242,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
if (!(device_may_wakeup(dev)))
macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
- else
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
@@ -5019,8 +5258,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index fb6b27f46b15..e6cb20aaa76a 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -434,7 +434,7 @@ int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
return 0;
}
-static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
{
u32 reg_val;
@@ -444,8 +444,6 @@ static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
else
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
-
- return 0;
}
int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -468,10 +466,11 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (gem_ptp_set_one_step_sync(bp, 1) != 0)
- return -ERANGE;
- fallthrough;
+ gem_ptp_set_one_step_sync(bp, 1);
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
case HWTSTAMP_TX_ON:
+ gem_ptp_set_one_step_sync(bp, 0);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
default:
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..f4f87dfa9687 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..882b2be06ea0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -851,7 +851,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index ba28aa444e5a..d312bd594935 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,11 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -3563,7 +3558,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_LRO;
}
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 568f211d91cc..ac196883f07e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -2094,7 +2094,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_GRO
| NETIF_F_LRO;
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 103591dcea1c..edde0b8fa49c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1342,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1396,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index f2f1ce81fd9c..0ec65ec634df 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -59,7 +59,7 @@ struct nicpf {
/* MSI-X */
u8 num_vec;
- bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf;
- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
+ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
mbx = 0;
else
mbx = 1;
@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic)
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(pci_irq_vector(nic->pdev, irq), nic);
- nic->irq_allocated[irq] = false;
+ free_irq(nic->irq_allocated[irq], nic);
+ nic->irq_allocated[irq] = 0;
}
}
static int nic_register_interrupts(struct nicpf *nic)
{
- int i, ret;
+ int i, ret, irq;
nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic)
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(pci_irq_vector(nic->pdev, i),
- nic_mbx_intr_handler, 0,
+ irq = pci_irq_vector(nic->pdev, i);
+ ret = request_irq(irq, nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
goto fail;
- nic->irq_allocated[i] = true;
+ nic->irq_allocated[i] = irq;
}
/* Enable mailbox interrupt */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5a9fad61e9ea..e5c71f907852 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a04aa206fddc..98f3dc460ca7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
@@ -2024,9 +2023,6 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
u8 mode;
struct xcast_addr_list *mc;
- if (!vf_work)
- return;
-
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4367edbdd579..06397cc8bb36 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1261,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1382,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574a32f23f96..2f6484dc186a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1409,7 +1409,8 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 0321be77366c..e56eff701395 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: common.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index bf43da6c6a63..12639b688ddc 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cphy.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index 5249686afe71..a30fb407115d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cpl5_cmd.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -635,4 +626,3 @@ struct cpl_mss_change {
};
#endif /* _CXGB_CPL5_CMD_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index f4054d2553ea..d2286adf09fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -1053,7 +1053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index 81526ad36339..0427e894c277 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: elmer0.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -154,4 +145,3 @@ enum {
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 3e182eee799e..ef70569435be 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: espi.c *
@@ -7,16 +8,6 @@
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 162de5259df9..f588e9f3b37a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: espi.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index 5913eaf442b5..96077da1ed5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: gmac.h *
@@ -7,16 +8,6 @@
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index 7ddb301bcba0..556c8ad68fa8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: mv88x201x.c *
@@ -7,16 +8,6 @@
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 0bb37e4680c7..cbfa03d5663a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: pm3393.c *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index 964ce59ee169..f751e680cf7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: regs.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 12e76fd0ae91..861edff5ed89 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: sge.c *
@@ -7,16 +8,6 @@
* DMA engine. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index 716705b96f26..f7e6f64040ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: sge.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 007c591b8bf5..367a9e4581d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: subr.c *
@@ -7,16 +8,6 @@
* Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index 7f79cc7ceb75..4c883170683b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: suni1x10gexp_regs.h *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1639,4 +1630,3 @@
#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63521312cb90..9b84c8d8d309 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -609,8 +609,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1302,6 +1301,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
@@ -1627,8 +1627,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
@@ -3349,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
goto out_free_dev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index da41eee2f25c..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 6c790af92170..8477a93cee6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
@@ -2227,7 +2227,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2270,9 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
goto free_eth_finfo;
@@ -2284,7 +2282,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c78c0db8937..9cbce1faab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3903,8 +3903,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -5047,28 +5047,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5413,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5854,8 +5850,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5865,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6184,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 28fd2de9e4cf..1672d3afe5be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -8,6 +8,46 @@
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
+static int cxgb4_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ ret = cxgb4_policer_validate(actions, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f889f404305c..46809e2d94ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
@@ -4467,7 +4467,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e7b4e3ed056c..8d719f82854a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
- strim((char *)p->na);
+ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+ strim(p->na);
out:
vfree(vpd);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 7de3800437c9..63b2bd084130 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
@@ -2859,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 43b2ceb6aa32..2d0cf76fb3c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2336,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..1c52592d3b65 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..da9973b711f4 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1907,7 +1907,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
@@ -1932,20 +1932,26 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..41714203ace8 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+ size_t len, int flags, int *addr_len);
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 4af5561cbfc5..c2e7037c7ba1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1063,14 +1063,13 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1236,11 +1235,11 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
@@ -1384,7 +1383,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -1392,7 +1391,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
- ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
@@ -1467,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = prandom_u32();
+ inet_sk(sk)->inet_id = get_random_u16();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..a4256087ac82 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
if (csk_mem_free(cdev, sk)) {
- current_timeo = (prandom_u32() % (HZ / 5)) + 2;
- vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = prandom_u32_max(HZ / 5) + 2;
+ vm_wait = prandom_u32_max(HZ / 5) + 2;
}
add_wait_queue(sk_sleep(sk), &wait);
@@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1616,7 +1616,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1737,7 +1737,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..1e55b12fee51 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 4a97aa8e1387..06a0c00af99c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -985,7 +985,7 @@ release_irq:
if (result == DETECTED_NONE) {
pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
- result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrier */
}
break;
case A_CNF_MEDIA_10B_2:
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..8627ab19d470 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -689,7 +689,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -812,7 +812,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index d6dd1b4edf6e..462c5435a206 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index ac37cacc6136..d25426470a29 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 52aaf1bb5205..a0964b629ffc 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index 3bdc74fba1e3..e3b700c28bc4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
index 6b9f9255af28..e01790fb0415 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.h
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -1,20 +1,5 @@
-/**
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef __ENIC_API_H__
#define __ENIC_API_H__
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index f8d2a6a34282..2cbae7c6cc3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/pci.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index f5bb058b3f96..698d0cb02064 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6c11f9d62526..08b7cc0a1809 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/ethtool.h>
@@ -146,10 +131,10 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4db6889b79ba..29500d32e362 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
@@ -2634,16 +2633,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 80f46dbd5117..4720a952725d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
index a09ff392c1c6..20a2687713ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.h
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_PP_H_
#define _ENIC_PP_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 40b20817ddd5..1c48aebdbab0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 81f98a8b60e9..b8ee42d297aa 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_RES_H_
diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
index e6dd30988d6f..0ab5fd6b8d46 100644
--- a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _RQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 519323460f26..27c885e91552 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 4e6aa65857f7..eed5bf59e5d2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_CQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 45015931b335..12a83fa1302d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 714fc1ed79e3..6273794b923b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fcc4a3ccdd94..db56d778877a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEVCMD_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 7d6fbb5635a4..5acc236069de 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_ENIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 23604e3d4455..25319f072a04 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
index 2b1636392294..33a72aa10b26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_INTR_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 84ff8ca17fcb..04fee45b5d39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_NIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index 4e45f88ac1d4..b4776e334d63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RESOURCE_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index a3e7b003ada1..5ae80551f17c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -216,4 +203,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0413103ebe94..0bc595abc03b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index 881fa18542b3..4dcf0e61cb13 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 74c81ed6fdab..2dd04322d760 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_STATS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 24ef8cd40545..20fcb20b42ed 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2010 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 057776908828..b51c1c52f8bf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2010 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_VIC_H_
#define _VNIC_VIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index eb75891974df..29c7900349b2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 01209613d57d..75c526911074 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_WQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
index c7021e3a631f..425e46a804ee 100644
--- a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _WQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index c78b99a497df..fdf10318758b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -68,7 +68,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define DEFAULT_GMAC_RXQ_ORDER 9
#define DEFAULT_GMAC_TXQ_ORDER 8
#define DEFAULT_RX_BUF_ORDER 11
-#define DEFAULT_NAPI_WEIGHT 64
#define TX_MAX_FRAGS 16
#define TX_QUEUE_NUM 1 /* max: 6 */
#define RX_MAX_ALLOC_ORDER 2
@@ -1920,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1932,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1946,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2032,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2051,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2066,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
@@ -2363,11 +2362,13 @@ static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
static int gemini_ethernet_port_probe(struct platform_device *pdev)
{
char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct device_node *np = pdev->dev.of_node;
struct gemini_ethernet_port *port;
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
struct device *parent;
+ u8 mac[ETH_ALEN];
unsigned int id;
int irq;
int ret;
@@ -2470,8 +2471,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll,
- DEFAULT_NAPI_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(port->mac_addr, mac, ETH_ALEN);
+ }
if (is_valid_ether_addr((void *)port->mac_addr)) {
eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7af86b6d4150..02e0caff98e3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -3,6 +3,19 @@
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
@@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
index 173c87d21076..225f85bc1f53 100644
--- a/drivers/net/ethernet/davicom/Makefile
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..b21e56de6167 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
new file mode 100644
index 000000000000..a523ddda7609
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -0,0 +1,1260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, &regconfigdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
+ if (IS_ERR(db->regmap_dmbulk))
+ return PTR_ERR(db->regmap_dmbulk);
+
+ return 0;
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ len = skb->len;
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(db->phydev))
+ return PTR_ERR_OR_ZERO(db->phydev);
+ return 0;
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static void dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h
new file mode 100644
index 000000000000..fef3120edd7c
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 79dc336ce709..078a12f07e96 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -104,21 +104,6 @@ config TULIP_DM910X
def_bool y
depends on TULIP && SPARC
-config DE4X5
- tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
- depends on (PCI || EISA)
- depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
- select CRC32
- help
- This is support for the DIGITAL series of PCI/EISA Ethernet cards.
- These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y. More specific
- information is contained in
- <file:Documentation/networking/device_drivers/ethernet/dec/de4x5.rst>.
-
- To compile this driver as a module, choose M here. The module will
- be called de4x5.
-
config WINBOND_840
tristate "Winbond W89c840 Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 8aab37564d5d..d4f1d21d29a0 100644
--- a/drivers/net/ethernet/dec/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_DM9102) += dmfe.o
obj-$(CONFIG_WINBOND_840) += winbond-840.o
obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_TULIP) += tulip.o
-obj-$(CONFIG_DE4X5) += de4x5.o
obj-$(CONFIG_ULI526X) += uli526x.o
# Declare multi-part drivers.
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..cd3dc4b89518 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
deleted file mode 100644
index 71730ef4cd57..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ /dev/null
@@ -1,5591 +0,0 @@
-/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
- ethernet driver for Linux.
-
- Copyright 1994, 1995 Digital Equipment Corporation.
-
- Testing resources for this driver have been made available
- in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
-
- The author may be reached at davies@maniac.ultranet.com.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2 of the License, or (at your
- option) any later version.
-
- THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Originally, this driver was written for the Digital Equipment
- Corporation series of EtherWORKS ethernet cards:
-
- DE425 TP/COAX EISA
- DE434 TP PCI
- DE435 TP/COAX/AUI PCI
- DE450 TP/COAX/AUI PCI
- DE500 10/100 PCI Fasternet
-
- but it will now attempt to support all cards which conform to the
- Digital Semiconductor SROM Specification. The driver currently
- recognises the following chips:
-
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
-
- So far the driver is known to work with the following cards:
-
- KINGSTON
- Linksys
- ZNYX342
- SMC8432
- SMC9332 (w/new SROM)
- ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
-
- The driver has been tested on a relatively busy network using the DE425,
- DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
- 16M of data to a DECstation 5000/200 as follows:
-
- TCP UDP
- TX RX TX RX
- DE425 1030k 997k 1170k 1128k
- DE434 1063k 995k 1170k 1125k
- DE435 1063k 995k 1170k 1125k
- DE500 1063k 998k 1170k 1125k in 10Mb/s mode
-
- All values are typical (in kBytes/sec) from a sample of 4 for each
- measurement. Their error is +/-20k on a quiet (private) network and also
- depend on what load the CPU has.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'ewrk3.c' and in turn from
- Donald Becker's 'lance.c' should be obvious. With the module autoload of
- every usable DECchip board, I pinched Donald's 'next_module' field to
- link my modules together.
-
- Up to 15 EISA cards can be supported under this driver, limited primarily
- by the available IRQ lines. I have checked different configurations of
- multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
- problem yet (provided you have at least depca.c v0.38) ...
-
- PCI support has been added to allow the driver to work with the DE434,
- DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
- to the differences in the EISA and PCI CSR address offsets from the base
- address.
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). Loadable module support under PCI and EISA has been
- achieved by letting the driver autoprobe as if it were compiled into the
- kernel. Do make sure you're not sharing interrupts with anything that
- cannot accommodate interrupt sharing!
-
- To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) for fixed autoprobes (not recommended), edit the source code near
- line 5594 to reflect the I/O address you're using, or assign these when
- loading by:
-
- insmod de4x5 io=0xghh where g = bus number
- hh = device number
-
- NB: autoprobing for modules is now supported by default. You may just
- use:
-
- insmod de4x5
-
- to load all available boards. For a specific board, still use
- the 'io=?' above.
- 3) compile de4x5.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the de4x5 configuration turned off and reboot.
- 5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- To unload a module, turn off the associated interface(s)
- 'ifconfig eth?? down' then 'rmmod de4x5'.
-
- Automedia detection is included so that in principal you can disconnect
- from, e.g. TP, reconnect to BNC and things will still work (after a
- pause whilst the driver figures out where its media went). My tests
- using ping showed that it appears to work....
-
- By default, the driver will now autodetect any DECchip based card.
- Should you have a need to restrict the driver to DIGITAL only cards, you
- can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
-
- I've changed the timing routines to use the kernel timer and scheduling
- functions so that the hangs and other assorted problems that occurred
- while autosensing the media should be gone. A bonus for the DC21040
- auto media sense algorithm is that it can now use one that is more in
- line with the rest (the DC21040 chip doesn't have a hardware timer).
- The downside is the 1 'jiffies' (10ms) resolution.
-
- IEEE 802.3u MII interface code has been added in anticipation that some
- products may use it in the future.
-
- The SMC9332 card has a non-compliant SROM which needs fixing - I have
- patched this driver to detect it because the SROM format used complies
- to a previous DEC-STD format.
-
- I have removed the buffer copies needed for receive on Intels. I cannot
- remove them for Alphas since the Tulip hardware only does longword
- aligned DMA transfers and the Alphas get alignment traps with non
- longword aligned data copies (which makes them really slow). No comment.
-
- I have added SROM decoding routines to make this driver work with any
- card that supports the Digital Semiconductor SROM spec. This will help
- all cards running the dc2114x series chips in particular. Cards using
- the dc2104x chips should run correctly with the basic driver. I'm in
- debt to <mjacob@feral.com> for the testing and feedback that helped get
- this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
- (with the latest SROM complying with the SROM spec V3: their first was
- broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
- (quad 21041 MAC) cards also appear to work despite their incorrectly
- wired IRQs.
-
- I have added a temporary fix for interrupt problems when some SCSI cards
- share the same interrupt as the DECchip based cards. The problem occurs
- because the SCSI card wants to grab the interrupt as a fast interrupt
- (runs the service routine with interrupts turned off) vs. this card
- which really needs to run the service routine with interrupts turned on.
- This driver will now add the interrupt service routine as a fast
- interrupt if it is bounced from the slow interrupt. THIS IS NOT A
- RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
- until people sort out their compatibility issues and the kernel
- interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
- INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
- run on the same interrupt. PCMCIA/CardBus is another can of worms...
-
- Finally, I think I have really fixed the module loading problem with
- more than one DECchip based card. As a side effect, I don't mess with
- the device structure any more which means that if more than 1 card in
- 2.0.x is installed (4 in 2.1.x), the user will have to edit
- linux/drivers/net/Space.c to make room for them. Hence, module loading
- is the preferred way to use this driver, since it doesn't have this
- limitation.
-
- Where SROM media detection is used and full duplex is specified in the
- SROM, the feature is ignored unless lp->params.fdx is set at compile
- time OR during a module load (insmod de4x5 args='eth??:fdx' [see
- below]). This is because there is no way to automatically detect full
- duplex links except through autonegotiation. When I include the
- autonegotiation feature in the SROM autoconf code, this detection will
- occur automatically for that case.
-
- Command line arguments are now allowed, similar to passing arguments
- through LILO. This will allow a per adapter board set up of full duplex
- and media. The only lexical constraints are: the board name (dev->name)
- appears in the list before its parameters. The list of parameters ends
- either at the end of the parameter list or with another board name. The
- following parameters are allowed:
-
- fdx for full duplex
- autosense to set the media/speed; with the following
- sub-parameters:
- TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
-
- Case sensitivity is important for the sub-parameters. They *must* be
- upper case. Examples:
-
- insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-
- For a compiled in driver, at or above line 548, place e.g.
- #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-
- Yes, I know full duplex isn't permissible on BNC or AUI; they're just
- examples. By default, full duplex is turned off and AUTO is the default
- autosense setting. In reality, I expect only the full duplex option to
- be used. Note the use of single quotes in the two examples above and the
- lack of commas to separate items. ALSO, you must get the requested media
- correct in relation to what the adapter SROM says it has. There's no way
- to determine this in advance other than by trial and error and common
- sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
-
- Changed the bus probing. EISA used to be done first, followed by PCI.
- Most people probably don't even know what a de425 is today and the EISA
- probe has messed up some SCSI cards in the past, so now PCI is always
- probed first followed by EISA if a) the architecture allows EISA and
- either b) there have been no PCI cards detected or c) an EISA probe is
- forced by the user. To force a probe include "force_eisa" in your
- insmod "args" line; for built-in kernels either change the driver to do
- this automatically or include #define DE4X5_FORCE_EISA on or before
- line 1040 in the driver.
-
- TO DO:
- ------
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 17-Nov-94 Initial writing. ALPHA code release.
- 0.2 13-Jan-95 Added PCI support for DE435's.
- 0.21 19-Jan-95 Added auto media detection.
- 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
- Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- Add request/release_region code.
- Add loadable modules support for PCI.
- Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
- Fix missed frame counter value and initialisation.
- Fixed EISA probe.
- 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
- Change TX_BUFFS_AVAIL macro.
- Change media autodetection to allow manual setting.
- Completed DE500 (DC21140) support.
- 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
- 0.242 10-May-95 Minor changes.
- 0.30 12-Jun-95 Timer fix for DC21140.
- Portability changes.
- Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
- Add DE500 semi automatic autosense.
- Add Link Fail interrupt TP failure detection.
- Add timer based link change detection.
- Plugged a memory leak in de4x5_queue_pkt().
- 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
- 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
- suggestion by <heiko@colossus.escape.de>.
- 0.33 8-Aug-95 Add shared interrupt support (not released yet).
- 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
- Fix de4x5_interrupt().
- Fix dc21140_autoconf() mess.
- No shared interrupt support.
- 0.332 11-Sep-95 Added MII management interface routines.
- 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
- Add kernel timer code (h/w is too flaky).
- Add MII based PHY autosense.
- Add new multicasting code.
- Add new autosense algorithms for media/mode
- selection using kernel scheduling/timing.
- Re-formatted.
- Made changes suggested by <jeff@router.patch.net>:
- Change driver to detect all DECchip based cards
- with DEC_ONLY restriction a special case.
- Changed driver to autoprobe as a module. No irq
- checking is done now - assume BIOS is good!
- Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
- 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
- only <niles@axp745gsfc.nasa.gov>
- Fix for multiple PCI cards reported by <jos@xos.nl>
- Duh, put the IRQF_SHARED flag into request_interrupt().
- Fix SMC ethernet address in enet_det[].
- Print chip name instead of "UNKNOWN" during boot.
- 0.42 26-Apr-96 Fix MII write TA bit error.
- Fix bug in dc21040 and dc21041 autosense code.
- Remove buffer copies on receive for Intels.
- Change sk_buff handling during media disconnects to
- eliminate DUP packets.
- Add dynamic TX thresholding.
- Change all chips to use perfect multicast filtering.
- Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 21-Jun-96 Fix unconnected media TX retry bug.
- Add Accton to the list of broken cards.
- Fix TX under-run bug for non DC21140 chips.
- Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
- <orava@nether.tky.hut.fi>.
- Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
- <baba@beckman.uiuc.edu>.
- Upgraded alloc_device() code.
- 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
- with <csd@microplex.com>
- 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
- Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
- and <michael@compurex.com>.
- 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
- with a loopback packet.
- 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
- by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
- by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
- 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
- suggestion from <mjacob@feral.com>.
- 0.5 30-Jan-97 Added SROM decoding functions.
- Updated debug flags.
- Fix sleep/wakeup calls for PCI cards, bug reported
- by <cross@gweep.lkg.dec.com>.
- Added multi-MAC, one SROM feature from discussion
- with <mjacob@feral.com>.
- Added full module autoprobe capability.
- Added attempt to use an SMC9332 with broken SROM.
- Added fix for ZYNX multi-mac cards that didn't
- get their IRQs wired correctly.
- 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
- <paubert@iram.es>
- Fix init_connection() to remove extra device reset.
- Fix MAC/PHY reset ordering in dc21140m_autoconf().
- Fix initialisation problem with lp->timeout in
- typeX_infoblock() from <paubert@iram.es>.
- Fix MII PHY reset problem from work done by
- <paubert@iram.es>.
- 0.52 26-Apr-97 Some changes may not credit the right people -
- a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
- <g.thomas@opengroup.org>.
- Fix srom_exec() to return for COMPACT and type 1
- infoblocks.
- Added DC21142 and DC21143 functions.
- Added byte counters from <phil@tazenda.demon.co.uk>
- Added IRQF_DISABLED temporary fix from
- <mjacob@feral.com>.
- 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
- module load: bug reported by
- <Piete.Brooks@cl.cam.ac.uk>
- Fix multi-MAC, one SROM, to work with 2114x chips:
- bug reported by <cmetz@inner.net>.
- Make above search independent of BIOS device scan
- direction.
- Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
- <robin@intercore.com
- Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
- <parmee@postecss.ncrfran.france.ncr.com> and
- <jo@ice.dillingen.baynet.de>.
- Added argument list to set up each board from either
- a module's command line or a compiled in #define.
- Added generic MII PHY functionality to deal with
- newer PHY chips.
- Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
- <redhat@cococo.net>.
- Fix bug in pci_probe() for 64 bit systems reported
- by <belliott@accessone.com>.
- 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
- 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
- 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
- 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
- **Incompatible with 2.0.x from here.**
- 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
- from <lma@varesearch.com>
- Add TP, AUI and BNC cases to 21140m_autoconf() for
- case where a 21140 under SROM control uses, e.g. AUI
- from problem report by <delchini@lpnp09.in2p3.fr>
- Add MII parallel detection to 2114x_autoconf() for
- case where no autonegotiation partner exists from
- problem report by <mlapsley@ndirect.co.uk>.
- Add ability to force connection type directly even
- when using SROM control from problem report by
- <earl@exis.net>.
- Updated the PCI interface to conform with the latest
- version. I hope nothing is broken...
- Add TX done interrupt modification from suggestion
- by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
- <Austin.Donnelly@cl.cam.ac.uk>.
- Fix type[13]_infoblock() bug: during MII search, PHY
- lp->rst not run because lp->ibn not initialised -
- from report & fix by <paubert@iram.es>.
- Fix probe bug with EISA & PCI cards present from
- report by <eirik@netcom.com>.
- 0.541 24-Aug-98 Fix compiler problems associated with i386-string
- ops from multiple bug reports and temporary fix
- from <paubert@iram.es>.
- Fix pci_probe() to correctly emulate the old
- pcibios_find_class() function.
- Add an_exception() for old ZYNX346 and fix compile
- warning on PPC & SPARC, from <ecd@skynet.be>.
- Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
- <Zlatko.Calusic@CARNet.hr> et al.
- 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
- when media is unconnected.
- Change dev->interrupt to lp->interrupt to ensure
- alignment for Alpha's and avoid their unaligned
- access traps. This flag is merely for log messages:
- should do something more definitive though...
- 0.543 30-Dec-98 Add SMP spin locking.
- 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
- a 21143 by <mmporter@home.com>.
- Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
- <mporter@eng.mcd.mot.com>
- Remove double checking for DEBUG_RX in de4x5_dbg_rx()
- from report by <geert@linux-m68k.org>
- 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
- was causing a page fault when initializing the
- variable 'pb', on a non de4x5 PCI device, in this
- case a PCI bridge (DEC chip 21152). The value of
- 'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
- 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
- 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
- generic DMA APIs. Fixed DE425 support on Alpha.
- <maz@wild-wind.fr.eu.org>
- =========================================================================
-*/
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/eisa.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/dma-mapping.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif /* CONFIG_PPC_PMAC */
-
-#include "de4x5.h"
-
-static const char version[] =
- KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
-
-#define c_char const char
-
-/*
-** MII Information
-*/
-struct phy_table {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time - 802.3u is confusing here */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
-};
-
-struct mii_phy {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
- int addr; /* MII address for the PHY */
- u_char *gep; /* Start of GEP sequence block in SROM */
- u_char *rst; /* Start of reset sequence in SROM */
- u_int mc; /* Media Capabilities */
- u_int ana; /* NWay Advertisement */
- u_int fdx; /* Full DupleX capabilities for each media */
- u_int ttm; /* Transmit Threshold Mode for each media */
- u_int mci; /* 21142 MII Connector Interrupt info */
-};
-
-#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
-
-struct sia_phy {
- u_char mc; /* Media Code */
- u_char ext; /* csr13-15 valid when set */
- int csr13; /* SIA Connectivity Register */
- int csr14; /* SIA TX/RX Register */
- int csr15; /* SIA General Register */
- int gepc; /* SIA GEP Control Information */
- int gep; /* SIA GEP Data */
-};
-
-/*
-** Define the know universe of PHY devices that can be
-** recognised by this driver.
-*/
-static struct phy_table phy_info[] = {
- {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
- {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
- {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
- {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
- {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
-};
-
-/*
-** These GENERIC values assumes that the PHY devices follow 802.3u and
-** allow parallel detection to set the link partner ability register.
-** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
-*/
-#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
-#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
-#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
-
-/*
-** Define special SROM detection cases
-*/
-static c_char enet_det[][ETH_ALEN] = {
- {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
-};
-
-#define SMC 1
-#define ACCTON 2
-
-/*
-** SROM Repair definitions. If a broken SROM is detected a card may
-** use this information to help figure out what to do. This is a
-** "stab in the dark" and so far for SMC9332's only.
-*/
-static c_char srom_repair_info[][100] = {
- {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
- 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
- 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
- 0x00,0x18,}
-};
-
-
-#ifdef DE4X5_DEBUG
-static int de4x5_debug = DE4X5_DEBUG;
-#else
-/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
-static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
-#endif
-
-/*
-** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
-** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-**
-** For a compiled in driver, place e.g.
-** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-** here
-*/
-#ifdef DE4X5_PARM
-static char *args = DE4X5_PARM;
-#else
-static char *args;
-#endif
-
-struct parameters {
- bool fdx;
- int autosense;
-};
-
-#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
-
-#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Ethernet Info
-*/
-#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
-#define IEEE802_3_SZ 1518 /* Packet + CRC */
-#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
-#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
-#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
-#define PKT_HDR_LEN 14 /* Addresses and data length info */
-#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
-#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
-
-
-/*
-** EISA bus defines
-*/
-#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
-
-#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
-
-#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
-#define DE4X5_NAME_LENGTH 8
-
-static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
-
-/*
-** Ethernet PROM defines for DC21040
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** PCI Bus defines
-*/
-#define PCI_MAX_BUS_NUM 8
-#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
-#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
-#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
-#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
-#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
-#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
-
-#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
-#define DE4X5_CACHE_ALIGN CAL_16LONG
-#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
-/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
-#define DESC_ALIGN
-
-#ifndef DEC_ONLY /* See README.de4x5 for using this */
-static int dec_only;
-#else
-static int dec_only = 1;
-#endif
-
-/*
-** DE4X5 IRQ ENABLE/DISABLE
-*/
-#define ENABLE_IRQs { \
- imr |= lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Disable the IRQs */\
-}
-
-#define UNMASK_IRQs {\
- imr |= lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
-}
-
-#define MASK_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Mask the IRQs */\
-}
-
-/*
-** DE4X5 START/STOP
-*/
-#define START_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr |= OMR_ST | OMR_SR;\
- outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr &= ~(OMR_ST|OMR_SR);\
- outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
-}
-
-/*
-** DE4X5 SIA RESET
-*/
-#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
-
-/*
-** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
-*/
-#define DE4X5_AUTOSENSE_MS 250
-
-/*
-** SROM Structure
-*/
-struct de4x5_srom {
- char sub_vendor_id[2];
- char sub_system_id[2];
- char reserved[12];
- char id_block_crc;
- char reserved2;
- char version;
- char num_controllers;
- char ieee_addr[6];
- char info[100];
- short chksum;
-};
-#define SUB_VENDOR_ID 0x500a
-
-/*
-** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
-** and have sizes of both a power of 2 and a multiple of 4.
-** A size of 256 bytes for each buffer could be chosen because over 90% of
-** all packets in our network are <256 bytes long and 64 longword alignment
-** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
-** descriptors are needed for machines with an ALPHA CPU.
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 32 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
- /* Multiple of 4 for DC21040 */
- /* Allows 512 byte alignment */
-struct de4x5_desc {
- volatile __le32 status;
- __le32 des1;
- __le32 buf;
- __le32 next;
- DESC_ALIGN
-};
-
-/*
-** The DE4X5 private structure
-*/
-#define DE4X5_PKT_STAT_SZ 16
-#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DE4X5_PKT_STAT_SZ */
-
-struct pkt_stats {
- u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
- u_int unicast;
- u_int multicast;
- u_int broadcast;
- u_int excessive_collisions;
- u_int tx_underruns;
- u_int excessive_underruns;
- u_int rx_runt_frames;
- u_int rx_collision;
- u_int rx_dribble;
- u_int rx_overflow;
-};
-
-struct de4x5_private {
- char adapter_name[80]; /* Adapter name */
- u_long interrupt; /* Aligned ISR flag */
- struct de4x5_desc *rx_ring; /* RX descriptor ring */
- struct de4x5_desc *tx_ring; /* TX descriptor ring */
- struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
- struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
- int rx_new, rx_old; /* RX descriptor ring pointers */
- int tx_new, tx_old; /* TX descriptor ring pointers */
- char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
- char frame[64]; /* Min sized packet for loopback*/
- spinlock_t lock; /* Adapter specific spinlock */
- struct net_device_stats stats; /* Public stats */
- struct pkt_stats pktStats; /* Private stats counters */
- char rxRingSize;
- char txRingSize;
- int bus; /* EISA or PCI */
- int bus_num; /* PCI Bus number */
- int device; /* Device number on PCI bus */
- int state; /* Adapter OPENED or CLOSED */
- int chipset; /* DC21040, DC21041 or DC21140 */
- s32 irq_mask; /* Interrupt Mask (Enable) bits */
- s32 irq_en; /* Summary interrupt bits */
- int media; /* Media (eg TP), mode (eg 100B)*/
- int c_media; /* Remember the last media conn */
- bool fdx; /* media full duplex flag */
- int linkOK; /* Link is OK */
- int autosense; /* Allow/disallow autosensing */
- bool tx_enable; /* Enable descriptor polling */
- int setup_f; /* Setup frame filtering type */
- int local_state; /* State within a 'media' state */
- struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
- struct sia_phy sia; /* SIA PHY Information */
- int active; /* Index to active PHY device */
- int mii_cnt; /* Number of attached PHY's */
- int timeout; /* Scheduling counter */
- struct timer_list timer; /* Timer info for kernel */
- int tmp; /* Temporary global per card */
- struct {
- u_long lock; /* Lock the cache accesses */
- s32 csr0; /* Saved Bus Mode Register */
- s32 csr6; /* Saved Operating Mode Reg. */
- s32 csr7; /* Saved IRQ Mask Register */
- s32 gep; /* Saved General Purpose Reg. */
- s32 gepc; /* Control info for GEP */
- s32 csr13; /* Saved SIA Connectivity Reg. */
- s32 csr14; /* Saved SIA TX/RX Register */
- s32 csr15; /* Saved SIA General Register */
- int save_cnt; /* Flag if state already saved */
- struct sk_buff_head queue; /* Save the (re-ordered) skb's */
- } cache;
- struct de4x5_srom srom; /* A copy of the SROM */
- int cfrv; /* Card CFRV copy */
- int rx_ovf; /* Check for 'RX overflow' tag */
- bool useSROM; /* For non-DEC card use SROM */
- bool useMII; /* Infoblock using the MII */
- int asBitValid; /* Autosense bits in GEP? */
- int asPolarity; /* 0 => asserted high */
- int asBit; /* Autosense bit number in GEP */
- int defMedium; /* SROM default medium */
- int tcount; /* Last infoblock number */
- int infoblock_init; /* Initialised this infoblock? */
- int infoleaf_offset; /* SROM infoleaf for controller */
- s32 infoblock_csr6; /* csr6 value in SROM infoblock */
- int infoblock_media; /* infoblock media */
- int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
- u_char *rst; /* Pointer to Type 5 reset info */
- u_char ibn; /* Infoblock number */
- struct parameters params; /* Command line/ #defined params */
- struct device *gendev; /* Generic device */
- dma_addr_t dma_rings; /* DMA handle for rings */
- int dma_size; /* Size of the DMA area */
- char *rx_bufs; /* rx bufs on alpha, sparc, ... */
-};
-
-/*
-** To get around certain poxy cards that don't provide an SROM
-** for the second and more DECchip, I have to key off the first
-** chip's address. I'll assume there's not a bad SROM iff:
-**
-** o the chipset is the same
-** o the bus number is the same and > 0
-** o the sum of all the returned hw address bytes is 0 or 0x5fa
-**
-** Also have to save the irq for those cards whose hardware designers
-** can't follow the PCI to PCI Bridge Architecture spec.
-*/
-static struct {
- int chipset;
- int bus;
- int irq;
- u_char addr[ETH_ALEN];
-} last = {0,};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingSize-lp->tx_new-1:\
- lp->tx_old -lp->tx_new-1)
-
-#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
-
-/*
-** Public Functions
-*/
-static int de4x5_open(struct net_device *dev);
-static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
-static int de4x5_close(struct net_device *dev);
-static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
-static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
-static void set_multicast_list(struct net_device *dev);
-static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
-
-/*
-** Private functions
-*/
-static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
-static int de4x5_init(struct net_device *dev);
-static int de4x5_sw_reset(struct net_device *dev);
-static int de4x5_rx(struct net_device *dev);
-static int de4x5_tx(struct net_device *dev);
-static void de4x5_ast(struct timer_list *t);
-static int de4x5_txur(struct net_device *dev);
-static int de4x5_rx_ovfc(struct net_device *dev);
-
-static int autoconf_media(struct net_device *dev);
-static void create_packet(struct net_device *dev, char *frame, int len);
-static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
-static int dc21040_autoconf(struct net_device *dev);
-static int dc21041_autoconf(struct net_device *dev);
-static int dc21140m_autoconf(struct net_device *dev);
-static int dc2114x_autoconf(struct net_device *dev);
-static int srom_autoconf(struct net_device *dev);
-static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
-static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
-static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
-static int test_for_100Mb(struct net_device *dev, int msec);
-static int wait_for_link(struct net_device *dev);
-static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
-static int is_spd_100(struct net_device *dev);
-static int is_100_up(struct net_device *dev);
-static int is_10_up(struct net_device *dev);
-static int is_anc_capable(struct net_device *dev);
-static int ping_media(struct net_device *dev, int msec);
-static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
-static void de4x5_free_rx_buffs(struct net_device *dev);
-static void de4x5_free_tx_buffs(struct net_device *dev);
-static void de4x5_save_skbs(struct net_device *dev);
-static void de4x5_rst_desc_ring(struct net_device *dev);
-static void de4x5_cache_state(struct net_device *dev, int flag);
-static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
-static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
-static struct sk_buff *de4x5_get_cache(struct net_device *dev);
-static void de4x5_setup_intr(struct net_device *dev);
-static void de4x5_init_connection(struct net_device *dev);
-static int de4x5_reset_phy(struct net_device *dev);
-static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
-static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
-static int test_tp(struct net_device *dev, s32 msec);
-static int EISA_signature(char *name, struct device *device);
-static void PCI_signature(char *name, struct de4x5_private *lp);
-static void DevicePresent(struct net_device *dev, u_long iobase);
-static void enet_addr_rst(u_long aprom_addr);
-static int de4x5_bad_srom(struct de4x5_private *lp);
-static short srom_rd(u_long address, u_char offset);
-static void srom_latch(u_int command, u_long address);
-static void srom_command(u_int command, u_long address);
-static void srom_address(u_int command, u_long address, u_char offset);
-static short srom_data(u_int command, u_long address);
-/*static void srom_busy(u_int command, u_long address);*/
-static void sendto_srom(u_int command, u_long addr);
-static int getfrom_srom(u_long addr);
-static int srom_map_media(struct net_device *dev);
-static int srom_infoleaf_info(struct net_device *dev);
-static void srom_init(struct net_device *dev);
-static void srom_exec(struct net_device *dev, u_char *p);
-static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
-static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
-static int mii_rdata(u_long ioaddr);
-static void mii_wdata(int data, int len, u_long ioaddr);
-static void mii_ta(u_long rw, u_long ioaddr);
-static int mii_swap(int data, int len);
-static void mii_address(u_char addr, u_long ioaddr);
-static void sendto_mii(u32 command, int data, u_long ioaddr);
-static int getfrom_mii(u32 command, u_long ioaddr);
-static int mii_get_oui(u_char phyaddr, u_long ioaddr);
-static int mii_get_phy(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int get_hw_addr(struct net_device *dev);
-static void srom_repair(struct net_device *dev, int card);
-static int test_bad_enet(struct net_device *dev, int status);
-static int an_exception(struct de4x5_private *lp);
-static char *build_setup_frame(struct net_device *dev, int mode);
-static void disable_ast(struct net_device *dev);
-static long de4x5_switch_mac_port(struct net_device *dev);
-static int gep_rd(struct net_device *dev);
-static void gep_wr(s32 data, struct net_device *dev);
-static void yawn(struct net_device *dev, int state);
-static void de4x5_parse_params(struct net_device *dev);
-static void de4x5_dbg_open(struct net_device *dev);
-static void de4x5_dbg_mii(struct net_device *dev, int k);
-static void de4x5_dbg_media(struct net_device *dev);
-static void de4x5_dbg_srom(struct de4x5_srom *p);
-static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int dc21041_infoleaf(struct net_device *dev);
-static int dc21140_infoleaf(struct net_device *dev);
-static int dc21142_infoleaf(struct net_device *dev);
-static int dc21143_infoleaf(struct net_device *dev);
-static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
-
-/*
-** Note now that module autoprobing is allowed under EISA and PCI. The
-** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
-** to "do the right thing".
-*/
-
-static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
-
-module_param_hw(io, int, ioport, 0);
-module_param(de4x5_debug, int, 0);
-module_param(dec_only, int, 0);
-module_param(args, charp, 0);
-
-MODULE_PARM_DESC(io, "de4x5 I/O base address");
-MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
-MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
-MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
-MODULE_LICENSE("GPL");
-
-/*
-** List the SROM infoleaf functions and chipsets
-*/
-struct InfoLeaf {
- int chipset;
- int (*fn)(struct net_device *);
-};
-static struct InfoLeaf infoleaf_array[] = {
- {DC21041, dc21041_infoleaf},
- {DC21140, dc21140_infoleaf},
- {DC21142, dc21142_infoleaf},
- {DC21143, dc21143_infoleaf}
-};
-#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
-
-/*
-** List the SROM info block functions
-*/
-static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
- type0_infoblock,
- type1_infoblock,
- type2_infoblock,
- type3_infoblock,
- type4_infoblock,
- type5_infoblock,
- compact_infoblock
-};
-
-#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
-
-/*
-** Miscellaneous defines...
-*/
-#define RESET_DE4X5 {\
- int i;\
- i=inl(DE4X5_BMR);\
- mdelay(1);\
- outl(i | BMR_SWR, DE4X5_BMR);\
- mdelay(1);\
- outl(i, DE4X5_BMR);\
- mdelay(1);\
- for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
- mdelay(1);\
-}
-
-#define PHY_HARD_RESET {\
- outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
- mdelay(1); /* Assert for 1ms */\
- outl(0x00, DE4X5_GEP);\
- mdelay(2); /* Wait for 2ms */\
-}
-
-static const struct net_device_ops de4x5_netdev_ops = {
- .ndo_open = de4x5_open,
- .ndo_stop = de4x5_close,
- .ndo_start_xmit = de4x5_queue_pkt,
- .ndo_get_stats = de4x5_get_stats,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_siocdevprivate = de4x5_siocdevprivate,
- .ndo_set_mac_address= eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static int
-de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
-{
- char name[DE4X5_NAME_LENGTH + 1];
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *pdev = NULL;
- int i, status=0;
-
- dev_set_drvdata(gendev, dev);
-
- /* Ensure we're not sleeping */
- if (lp->bus == EISA) {
- outb(WAKEUP, PCI_CFPM);
- } else {
- pdev = to_pci_dev (gendev);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- }
- mdelay(10);
-
- RESET_DE4X5;
-
- if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
- return -ENXIO; /* Hardware could not reset */
- }
-
- /*
- ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
- */
- lp->useSROM = false;
- if (lp->bus == PCI) {
- PCI_signature(name, lp);
- } else {
- EISA_signature(name, gendev);
- }
-
- if (*name == '\0') { /* Not found a board signature */
- return -ENXIO;
- }
-
- dev->base_addr = iobase;
- printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
-
- status = get_hw_addr(dev);
- printk(", h/w address %pM\n", dev->dev_addr);
-
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- } else {
- skb_queue_head_init(&lp->cache.queue);
- lp->cache.gepc = GEP_INIT;
- lp->asBit = GEP_SLNK;
- lp->asPolarity = GEP_SLNK;
- lp->asBitValid = ~0;
- lp->timeout = -1;
- lp->gendev = gendev;
- spin_lock_init(&lp->lock);
- timer_setup(&lp->timer, de4x5_ast, 0);
- de4x5_parse_params(dev);
-
- /*
- ** Choose correct autosensing in case someone messed up
- */
- lp->autosense = lp->params.autosense;
- if (lp->chipset != DC21140) {
- if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
- lp->params.autosense = TP;
- }
- if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
- lp->params.autosense = BNC;
- }
- }
- lp->fdx = lp->params.fdx;
- sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
-
- lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
-#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
- lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
-#endif
- lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
- &lp->dma_rings, GFP_ATOMIC);
- if (lp->rx_ring == NULL) {
- return -ENOMEM;
- }
-
- lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
- /*
- ** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
- */
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf = 0;
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
-#else
- {
- dma_addr_t dma_rx_bufs;
-
- dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
- * sizeof(struct de4x5_desc);
- dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
- lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
- + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf =
- cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
- }
-#endif
-
- barrier();
-
- lp->rxRingSize = NUM_RX_DESC;
- lp->txRingSize = NUM_TX_DESC;
-
- /* Write the end of list marker to the descriptor lists */
- lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
- lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
-
- /* Tell the adapter where the TX/RX rings are located. */
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- /* Initialise the IRQ mask and Enable/Disable */
- lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
- lp->irq_en = IMR_NIM | IMR_AIM;
-
- /* Create a loopback packet frame for later media probing */
- create_packet(dev, lp->frame, sizeof(lp->frame));
-
- /* Check if the RX overflow bug needs testing for */
- i = lp->cfrv & 0x000000fe;
- if ((lp->chipset == DC21140) && (i == 0x20)) {
- lp->rx_ovf = 1;
- }
-
- /* Initialise the SROM pointers if possible */
- if (lp->useSROM) {
- lp->state = INITIALISED;
- if (srom_infoleaf_info(dev)) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return -ENXIO;
- }
- srom_init(dev);
- }
-
- lp->state = CLOSED;
-
- /*
- ** Check for an MII interface
- */
- if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
- mii_get_phy(dev);
- }
-
- printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
- ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
- }
-
- if (de4x5_debug & DEBUG_VERSION) {
- printk(version);
- }
-
- /* The DE4X5-specific entries in the device structure. */
- SET_NETDEV_DEV(dev, gendev);
- dev->netdev_ops = &de4x5_netdev_ops;
- dev->mem_start = 0;
-
- /* Fill in the generic fields of the device structure. */
- if ((status = register_netdev (dev))) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return status;
- }
-
- /* Let the adapter sleep to save power */
- yawn(dev, SLEEP);
-
- return status;
-}
-
-
-static int
-de4x5_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- s32 omr;
-
- /* Allocate the RX buffers */
- for (i=0; i<lp->rxRingSize; i++) {
- if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
- de4x5_free_rx_buffs(dev);
- return -EAGAIN;
- }
- }
-
- /*
- ** Wake up the adapter
- */
- yawn(dev, WAKEUP);
-
- /*
- ** Re-initialize the DE4X5...
- */
- status = de4x5_init(dev);
- spin_lock_init(&lp->lock);
- lp->state = OPEN;
- de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("\n Cannot get IRQ- reconfigure your hardware.\n");
- disable_ast(dev);
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
- yawn(dev, SLEEP);
- lp->state = CLOSED;
- return -EAGAIN;
- } else {
- printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
- printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
- }
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- netif_trans_update(dev); /* prevent tx timeout */
-
- START_DE4X5;
-
- de4x5_setup_intr(dev);
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
- printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
- printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
- printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
- printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
- printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
- printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
- printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
- }
-
- return status;
-}
-
-/*
-** Initialize the DE4X5 operating conditions. NB: a chip problem with the
-** DC21140 requires using perfect filtering mode for that chip. Since I can't
-** see why I'd want > 14 multicast addresses, I have changed all chips to use
-** the perfect filtering mode. Keep the DMA burst length at 8: there seems
-** to be data corruption problems if it is larger (UDP errors seen from a
-** ttcp source).
-*/
-static int
-de4x5_init(struct net_device *dev)
-{
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- de4x5_sw_reset(dev);
-
- /* Autoconfigure the connected port */
- autoconf_media(dev);
-
- return 0;
-}
-
-static int
-de4x5_sw_reset(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 bmr, omr;
-
- /* Select the MII or SRL port now and RESET the MAC */
- if (!lp->useSROM) {
- if (lp->phy[lp->active].id != 0) {
- lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
- } else {
- lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
- }
- de4x5_switch_mac_port(dev);
- }
-
- /*
- ** Set the programmable burst length to 8 longwords for all the DC21140
- ** Fasternet chips and 4 longwords for all others: DMA errors result
- ** without these values. Cache align 16 long.
- */
- bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
- bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
- outl(bmr, DE4X5_BMR);
-
- omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
- if (lp->chipset == DC21140) {
- omr |= (OMR_SDP | OMR_SB);
- }
- lp->setup_f = PERFECT;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
-
- /* Build the setup frame depending on filtering mode */
- SetMulticastFilter(dev);
-
- load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
- outl(omr|OMR_ST, DE4X5_OMR);
-
- /* Poll for setup frame completion (adapter interrupts are disabled now) */
-
- for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
- mdelay(1);
- if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
- }
- outl(omr, DE4X5_OMR); /* Stop everything! */
-
- if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
- inl(DE4X5_STS));
- status = -EIO;
- }
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- lp->tx_old = lp->tx_new;
-
- return status;
-}
-
-/*
-** Writes a socket buffer address to the next available transmit descriptor.
-*/
-static netdev_tx_t
-de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- netif_stop_queue(dev);
- if (!lp->tx_enable) /* Cannot send for now */
- goto tx_err;
-
- /*
- ** Clean out the TX ring asynchronously to interrupts - sometimes the
- ** interrupts are lost by delayed descriptor status updates relative to
- ** the irq assertion, especially with a busy PCI bus.
- */
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_tx(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- goto tx_err;
-
- /* Transmit descriptor ring full or stale skb */
- if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
- if (lp->interrupt) {
- de4x5_putb_cache(dev, skb); /* Requeue the buffer */
- } else {
- de4x5_put_cache(dev, skb);
- }
- if (de4x5_debug & DEBUG_TX) {
- printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
- }
- } else if (skb->len > 0) {
- /* If we already have stuff queued locally, use that first */
- if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
- de4x5_put_cache(dev, skb);
- skb = de4x5_get_cache(dev);
- }
-
- while (skb && !netif_queue_stopped(dev) &&
- (u_long) lp->tx_skb[lp->tx_new] <= 1) {
- spin_lock_irqsave(&lp->lock, flags);
- netif_stop_queue(dev);
- load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
- lp->stats.tx_bytes += skb->len;
- outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
-
- if (TX_BUFFS_AVAIL) {
- netif_start_queue(dev); /* Another pkt may be queued */
- }
- skb = de4x5_get_cache(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- if (skb) de4x5_putb_cache(dev, skb);
- }
-
- lp->cache.lock = 0;
-
- return NETDEV_TX_OK;
-tx_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/*
-** The DE4X5 interrupt handler.
-**
-** I/O Read/Writes through intermediate PCI bridges are never 'posted',
-** so that the asserted interrupt always has some real data to work with -
-** if these I/O accesses are ever changed to memory accesses, ensure the
-** STS write is read immediately to complete the transaction if the adapter
-** is not on bus 0. Lost interrupts can still occur when the PCI bus load
-** is high and descriptor status bits cannot be set before the associated
-** interrupt is asserted and this routine entered.
-*/
-static irqreturn_t
-de4x5_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct de4x5_private *lp;
- s32 imr, omr, sts, limit;
- u_long iobase;
- unsigned int handled = 0;
-
- lp = netdev_priv(dev);
- spin_lock(&lp->lock);
- iobase = dev->base_addr;
-
- DISABLE_IRQs; /* Ensure non re-entrancy */
-
- if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
- printk("%s: Re-entering the interrupt handler.\n", dev->name);
-
- synchronize_irq(dev->irq);
-
- for (limit=0; limit<8; limit++) {
- sts = inl(DE4X5_STS); /* Read IRQ status */
- outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
- if (!(sts & lp->irq_mask)) break;/* All done */
- handled = 1;
-
- if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
- de4x5_rx(dev);
-
- if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
- if (sts & STS_LNF) { /* TP Link has failed */
- lp->irq_mask &= ~IMR_LFM;
- }
-
- if (sts & STS_UNF) { /* Transmit underrun */
- de4x5_txur(dev);
- }
-
- if (sts & STS_SE) { /* Bus Error */
- STOP_DE4X5;
- printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
- dev->name, sts);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
- }
- }
-
- /* Load the TX ring with any locally stored packets */
- if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
- while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
- de4x5_queue_pkt(de4x5_get_cache(dev), dev);
- }
- lp->cache.lock = 0;
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- ENABLE_IRQs;
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static int
-de4x5_rx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
- entry=lp->rx_new) {
- status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
- if (lp->rx_ovf) {
- if (inl(DE4X5_MFC) & MFC_FOCM) {
- de4x5_rx_ovfc(dev);
- break;
- }
- }
-
- if (status & RD_FS) { /* Remember the start of frame */
- lp->rx_old = entry;
- }
-
- if (status & RD_LS) { /* Valid frame status */
- if (lp->tx_enable) lp->linkOK++;
- if (status & RD_ES) { /* There was an error. */
- lp->stats.rx_errors++; /* Update the error stats. */
- if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
- if (status & RD_CE) lp->stats.rx_crc_errors++;
- if (status & RD_OF) lp->stats.rx_fifo_errors++;
- if (status & RD_TL) lp->stats.rx_length_errors++;
- if (status & RD_RF) lp->pktStats.rx_runt_frames++;
- if (status & RD_CS) lp->pktStats.rx_collision++;
- if (status & RD_DB) lp->pktStats.rx_dribble++;
- if (status & RD_OF) lp->pktStats.rx_overflow++;
- } else { /* A valid frame received */
- struct sk_buff *skb;
- short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
- >> 16) - 4;
-
- if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- } else {
- de4x5_dbg_rx(skb, pkt_len);
-
- /* Push up the protocol stack */
- skb->protocol=eth_type_trans(skb,dev);
- de4x5_local_stats(dev, skb->data, pkt_len);
- netif_rx(skb);
-
- /* Update stats */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- }
-
- /* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
- lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
- barrier();
- }
- lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
- barrier();
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- return 0;
-}
-
-static inline void
-de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
-{
- dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
- le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
- DMA_TO_DEVICE);
- if ((u_long) lp->tx_skb[entry] > 1)
- dev_kfree_skb_irq(lp->tx_skb[entry]);
- lp->tx_skb[entry] = NULL;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-*/
-static int
-de4x5_tx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
- if (status < 0) { /* Buffer not sent yet */
- break;
- } else if (status != 0x7fffffff) { /* Not setup frame */
- if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
- if (status & TD_NC) lp->stats.tx_carrier_errors++;
- if (status & TD_LC) lp->stats.tx_window_errors++;
- if (status & TD_UF) lp->stats.tx_fifo_errors++;
- if (status & TD_EC) lp->pktStats.excessive_collisions++;
- if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
- if (TX_PKT_PENDING) {
- outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
- }
- } else { /* Packet sent */
- lp->stats.tx_packets++;
- if (lp->tx_enable) lp->linkOK++;
- }
- /* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
- ((status & TD_CC) >> 3));
-
- /* Free the buffer. */
- if (lp->tx_skb[entry] != NULL)
- de4x5_free_tx_buff(lp, entry);
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
- }
-
- /* Any resources available? */
- if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
- if (lp->interrupt)
- netif_wake_queue(dev);
- else
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-static void
-de4x5_ast(struct timer_list *t)
-{
- struct de4x5_private *lp = from_timer(lp, t, timer);
- struct net_device *dev = dev_get_drvdata(lp->gendev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int dt;
-
- if (lp->useSROM)
- next_tick = srom_autoconf(dev);
- else if (lp->chipset == DC21140)
- next_tick = dc21140m_autoconf(dev);
- else if (lp->chipset == DC21041)
- next_tick = dc21041_autoconf(dev);
- else if (lp->chipset == DC21040)
- next_tick = dc21040_autoconf(dev);
- lp->linkOK = 0;
-
- dt = (next_tick * HZ) / 1000;
-
- if (!dt)
- dt = 1;
-
- mod_timer(&lp->timer, jiffies + dt);
-}
-
-static int
-de4x5_txur(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
- omr &= ~(OMR_ST|OMR_SR);
- outl(omr, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_TS);
- if ((omr & OMR_TR) < OMR_TR) {
- omr += 0x4000;
- } else {
- omr |= OMR_SF;
- }
- outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
- }
-
- return 0;
-}
-
-static int
-de4x5_rx_ovfc(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- outl(omr & ~OMR_SR, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_RS);
-
- for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
- lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- outl(omr, DE4X5_OMR);
-
- return 0;
-}
-
-static int
-de4x5_close(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, omr;
-
- disable_ast(dev);
-
- netif_stop_queue(dev);
-
- if (de4x5_debug & DEBUG_CLOSE) {
- printk("%s: Shutting down ethercard, status was %8.8x.\n",
- dev->name, inl(DE4X5_STS));
- }
-
- /*
- ** We stop the DE4X5 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
- STOP_DE4X5;
-
- /* Free the associated irq */
- free_irq(dev->irq, dev);
- lp->state = CLOSED;
-
- /* Free any socket buffers */
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
-
- /* Put the adapter to sleep to save power */
- yawn(dev, SLEEP);
-
- return 0;
-}
-
-static struct net_device_stats *
-de4x5_get_stats(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
- return &lp->stats;
-}
-
-static void
-de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
- if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DE4X5_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf, dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
- }
-}
-
-/*
-** Removes the TD_IC flag from previous descriptor to improve TX performance.
-** If the flag is changed on a descriptor that is being read by the hardware,
-** I assume PCI transaction ordering will mean you are either successful or
-** just miss asserting the change to the hardware. Anyway you're messing with
-** a descriptor you don't own, but this shouldn't kill the chip provided
-** the descriptor register is read only to the hardware.
-*/
-static void
-load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
- dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
-
- lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
- lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
- lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
- lp->tx_skb[lp->tx_new] = skb;
- lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
- barrier();
-
- lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
- barrier();
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- /* First, double check that the adapter is open */
- if (lp->state == OPEN) {
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- u32 omr;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PR;
- outl(omr, DE4X5_OMR);
- } else {
- SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_trans_update(dev); /* prevent tx timeout */
- }
- }
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Little endian crc one liner from Matt Thomas, DEC.
-*/
-static void
-SetMulticastFilter(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i, bit, byte;
- u16 hashcode;
- u32 omr, crc;
- char *pa;
- unsigned char *addrs;
-
- omr = inl(DE4X5_OMR);
- omr &= ~(OMR_PR | OMR_PM);
- pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
- if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
- omr |= OMR_PM; /* Pass all multicasts */
- } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
- byte <<= 1; /* calc offset into setup frame */
- if (byte & 0x02) {
- byte -= 1;
- }
- lp->setup_frame[byte] |= bit;
- }
- } else { /* Perfect filtering */
- netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- for (i=0; i<ETH_ALEN; i++) {
- *(pa + (i&1)) = *addrs++;
- if (i & 0x01) pa += 4;
- }
- }
- }
- outl(omr, DE4X5_OMR);
-}
-
-#ifdef CONFIG_EISA
-
-static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-
-static int de4x5_eisa_probe(struct device *gendev)
-{
- struct eisa_device *edev;
- u_long iobase;
- u_char irq, regval;
- u_short vendor;
- u32 cfid;
- int status, device;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- edev = to_eisa_device (gendev);
- iobase = edev->base_addr;
-
- if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
- return -EBUSY;
-
- if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
- DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
- status = -EBUSY;
- goto release_reg_1;
- }
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- status = -ENOMEM;
- goto release_reg_2;
- }
- lp = netdev_priv(dev);
-
- cfid = (u32) inl(PCI_CFID);
- lp->cfrv = (u_short) inl(PCI_CFRV);
- device = (cfid >> 8) & 0x00ffff00;
- vendor = (u_short) cfid;
-
- /* Read the EISA Configuration Registers */
- regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
-#ifdef CONFIG_ALPHA
- /* Looks like the Jensen firmware (rev 2.2) doesn't really
- * care about the EISA configuration, and thus doesn't
- * configure the PLX bridge properly. Oh well... Simply mimic
- * the EISA config file to sort it out. */
-
- /* EISA REG1: Assert DecChip 21040 HW Reset */
- outb (ER1_IAM | 1, EISA_REG1);
- mdelay (1);
-
- /* EISA REG1: Deassert DecChip 21040 HW Reset */
- outb (ER1_IAM, EISA_REG1);
- mdelay (1);
-
- /* EISA REG3: R/W Burst Transfer Enable */
- outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
- /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
- outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
-#endif
- irq = de4x5_irq[(regval >> 1) & 0x03];
-
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
- lp->bus = EISA;
-
- /* Write the PCI Configuration Registers */
- outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
- outl(0x00006000, PCI_CFLT);
- outl(iobase, PCI_CBIO);
-
- DevicePresent(dev, EISA_APROM);
-
- dev->irq = irq;
-
- if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
- return 0;
- }
-
- free_netdev (dev);
- release_reg_2:
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_reg_1:
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return status;
-}
-
-static int de4x5_eisa_remove(struct device *device)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = dev_get_drvdata(device);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return 0;
-}
-
-static const struct eisa_device_id de4x5_eisa_ids[] = {
- { "DEC4250", 0 }, /* 0 is the board name index... */
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
-
-static struct eisa_driver de4x5_eisa_driver = {
- .id_table = de4x5_eisa_ids,
- .driver = {
- .name = "de4x5",
- .probe = de4x5_eisa_probe,
- .remove = de4x5_eisa_remove,
- }
-};
-#endif
-
-#ifdef CONFIG_PCI
-
-/*
-** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
-** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
-** For single port cards this is a time waster...
-*/
-static void
-srom_search(struct net_device *dev, struct pci_dev *pdev)
-{
- u_char pb;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int i, j;
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *this_dev;
-
- list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
- vendor = this_dev->vendor;
- device = this_dev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
-
- /* Get the chip configuration revision register */
- pb = this_dev->bus->number;
-
- /* Set the device number information */
- lp->device = PCI_SLOT(this_dev->devfn);
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
- ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(this_dev, 0);
-
- /* Fetch the IRQ to be used */
- irq = this_dev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
- /* Check if I/O accesses are enabled */
- pci_read_config_word(this_dev, PCI_COMMAND, &status);
- if (!(status & PCI_COMMAND_IO)) continue;
-
- /* Search for a valid SROM attached to this DECchip */
- DevicePresent(dev, DE4X5_APROM);
- for (j=0, i=0; i<ETH_ALEN; i++) {
- j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
- }
- if (j != 0 && j != 6 * 0xff) {
- last.chipset = device;
- last.bus = pb;
- last.irq = irq;
- for (i=0; i<ETH_ALEN; i++) {
- last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
- }
- return;
- }
- }
-}
-
-/*
-** PCI bus I/O device probe
-** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
-** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
-** enabled by the user first in the set up utility. Hence we just check for
-** enabled features and silently ignore the card if they're not.
-**
-** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
-** bit. Here, check for I/O accesses and then set BM. If you put the card in
-** a non BM slot, you're on your own (and complain to the PC vendor that your
-** PC doesn't conform to the PCI standard)!
-**
-** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
-** kernels use the V0.535[n] drivers.
-*/
-
-static int de4x5_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u_char pb, pbus = 0, dev_num, dnum = 0, timer;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int error;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- dev_num = PCI_SLOT(pdev->devfn);
- pb = pdev->bus->number;
-
- if (io) { /* probe a single PCI device */
- pbus = (u_short)(io >> 8);
- dnum = (u_short)(io & 0xff);
- if ((pbus != pb) || (dnum != dev_num))
- return -ENODEV;
- }
-
- vendor = pdev->vendor;
- device = pdev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
- return -ENODEV;
-
- /* Ok, the device seems to be for us. */
- if ((error = pci_enable_device (pdev)))
- return error;
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- error = -ENOMEM;
- goto disable_dev;
- }
-
- lp = netdev_priv(dev);
- lp->bus = PCI;
- lp->bus_num = 0;
-
- /* Search for an SROM on this bus */
- if (lp->bus_num != pb) {
- lp->bus_num = pb;
- srom_search(dev, pdev);
- }
-
- /* Get the chip configuration revision register */
- lp->cfrv = pdev->revision;
-
- /* Set the device number information */
- lp->device = dev_num;
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(pdev, 0);
-
- /* Fetch the IRQ to be used */
- irq = pdev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check if I/O accesses and Bus Mastering are enabled */
- pci_read_config_word(pdev, PCI_COMMAND, &status);
-#ifdef __powerpc__
- if (!(status & PCI_COMMAND_IO)) {
- status |= PCI_COMMAND_IO;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
-#endif /* __powerpc__ */
- if (!(status & PCI_COMMAND_IO)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- if (!(status & PCI_COMMAND_MASTER)) {
- status |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
- if (!(status & PCI_COMMAND_MASTER)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check the latency timer for values >= 0x60 */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
- if (timer < 0x60) {
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
- }
-
- DevicePresent(dev, DE4X5_APROM);
-
- if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
- error = -EBUSY;
- goto free_dev;
- }
-
- dev->irq = irq;
-
- if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
- goto release;
- }
-
- return 0;
-
- release:
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- free_dev:
- free_netdev (dev);
- disable_dev:
- pci_disable_device (pdev);
- return error;
-}
-
-static void de4x5_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = pci_get_drvdata(pdev);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- pci_disable_device (pdev);
-}
-
-static const struct pci_device_id de4x5_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { },
-};
-
-static struct pci_driver de4x5_pci_driver = {
- .name = "de4x5",
- .id_table = de4x5_pci_tbl,
- .probe = de4x5_pci_probe,
- .remove = de4x5_pci_remove,
-};
-
-#endif
-
-/*
-** Auto configure the media here rather than setting the port at compile
-** time. This routine is called by de4x5_init() and when a loss of media is
-** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
-** sneaky and changed the port on us.
-*/
-static int
-autoconf_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- disable_ast(dev);
-
- lp->c_media = AUTO; /* Bogus last media */
- inl(DE4X5_MFC); /* Zero the lost frames counter */
- lp->media = INIT;
- lp->tcount = 0;
-
- de4x5_ast(&lp->timer);
-
- return lp->media;
-}
-
-/*
-** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
-** from BNC as the port has a jumper to set thick or thin wire. When set for
-** BNC, the BNC port will indicate activity if it's not terminated correctly.
-** The only way to test for that is to place a loopback packet onto the
-** network and watch for errors. Since we're messing with the interrupt mask
-** register, disable the board interrupts and do not allow any more packets to
-** be queued to the hardware. Re-enable everything only when the media is
-** found.
-** I may have to "age out" locally queued packets so that the higher layer
-** timeouts don't effectively duplicate packets on the network.
-*/
-static int
-dc21040_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- s32 imr;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev);
- if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
- lp->media = TP;
- } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
- lp->media = BNC_AUI;
- } else if (lp->autosense == EXT_SIA) {
- lp->media = EXT_SIA;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21040_autoconf(dev);
- break;
-
- case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
- TP_SUSPECT, test_tp);
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
- break;
-
- case BNC:
- case AUI:
- case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
- BNC_AUI_SUSPECT, ping_media);
- break;
-
- case BNC_AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
- break;
-
- case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
- NC, EXT_SIA_SUSPECT, ping_media);
- break;
-
- case EXT_SIA_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
- break;
-
- case NC:
- /* default to TP for all */
- reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-static int
-dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
- int (*fn)(struct net_device *, int))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 0:
- reset_init_sia(dev, csr13, csr14, csr15);
- lp->local_state++;
- next_tick = 500;
- break;
-
- case 1:
- if (!lp->tx_enable) {
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else {
- if (linkBad && (lp->autosense == AUTO)) {
- lp->local_state = 0;
- lp->media = next_state;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = suspect_state;
- next_tick = 3000;
- }
- break;
- }
-
- return next_tick;
-}
-
-static int
-de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
- int (*fn)(struct net_device *, int),
- int (*asfn)(struct net_device *))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 1:
- if (lp->linkOK) {
- lp->media = prev_state;
- } else {
- lp->local_state++;
- next_tick = asfn(dev);
- }
- break;
-
- case 2:
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else if (!linkBad) {
- lp->local_state--;
- lp->media = prev_state;
- } else {
- lp->media = INIT;
- lp->tcount++;
- }
- }
-
- return next_tick;
-}
-
-/*
-** Autoconfigure the media when using the DC21041. AUI needs to be tested
-** before BNC, because the BNC port will indicate activity if it's not
-** terminated correctly. The only way to test for that is to place a loopback
-** packet onto the network and watch for errors. Since we're messing with
-** the interrupt mask register, disable the board interrupts and do not allow
-** any more packets to be queued to the hardware. Re-enable everything only
-** when the media is found.
-*/
-static int
-dc21041_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, irqs, irq_mask, imr, omr;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
- lp->media = TP; /* On chip auto negotiation is broken */
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21041_autoconf(dev);
- break;
-
- case TP_NW:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts & STS_LNP) {
- lp->media = ANS;
- } else {
- lp->media = AUI;
- }
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case ANS:
- if (!lp->tx_enable) {
- irqs = STS_LNP;
- irq_mask = IMR_LPM;
- sts = test_ans(dev, irqs, irq_mask, 3000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- lp->media = TP;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = ANS_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case ANS_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
- break;
-
- case TP:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- if (inl(DE4X5_SISR) & SISR_NRA) {
- lp->media = AUI; /* Non selected port activity */
- } else {
- lp->media = BNC;
- }
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = TP_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->media = NC;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
- break;
-
- case NC:
- omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** Some autonegotiation chips are broken in that they do not return the
-** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
-** register, except at the first power up negotiation.
-*/
-static int
-dc21140m_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int ana, anlpa, cap, cr, slnk, sr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- u_long imr, omr, iobase = dev->base_addr;
-
- switch(lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->useSROM) {
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- return next_tick;
- }
- srom_exec(dev, lp->phy[lp->active].gep);
- if (lp->infoblock_media == ANS) {
- ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- }
- } else {
- lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
- SET_10Mb;
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- } else if (lp->autosense == AUTO) {
- lp->media = SPD_DET;
- } else if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = NC;
- }
- }
- lp->local_state = 0;
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case 1:
- if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
-
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc21140m_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
- (~gep_rd(dev) & GEP_LNP));
- SET_100Mb_PDET;
- }
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- next_tick = slnk & ~TIMER_CB;
- } else {
- if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
- lp->media = _10Mb;
- } else {
- lp->media = NC;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case _100Mb: /* Set 100Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case BNC:
- case AUI:
- case _10Mb: /* Set 10Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case NC:
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** This routine may be merged into dc21140m_autoconf() sometime as I'm
-** changing how I figure out the media - but trying to keep it backwards
-** compatible with the de500-xa and de500-aa.
-** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
-** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
-** This routine just has to figure out whether 10Mb/s or 100Mb/s is
-** active.
-** When autonegotiation is working, the ANS part searches the SROM for
-** the highest common speed (TP) link that both can run and if that can
-** be full duplex. That infoblock is executed and then the link speed set.
-**
-** Only _10Mb and _100Mb are tested here.
-*/
-static int
-dc2114x_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if (lp->params.autosense & ~AUTO) {
- srom_map_media(dev); /* Fixed media requested */
- if (lp->media != lp->params.autosense) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- lp->media = INIT;
- }
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- }
- }
- lp->local_state = 0;
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
- if (sr < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc2114x_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->tcount++;
- lp->media = INIT;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- if (lp->media == _100Mb) {
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- lp->media = SPD_DET;
- return slnk & ~TIMER_CB;
- }
- } else {
- if (wait_for_link(dev) < 0) {
- lp->media = SPD_DET;
- return PDET_LINK_WAIT;
- }
- }
- if (lp->media == ANS) { /* Do MII parallel detection */
- if (is_spd_100(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = _10Mb;
- }
- next_tick = dc2114x_autoconf(dev);
- } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
- (((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
- is_10_up(dev))) {
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->tcount++;
- lp->media = INIT;
- }
- break;
-
- case _10Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case _100Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- default:
- lp->tcount++;
-printk("Huh?: media:%02x\n", lp->media);
- lp->media = INIT;
- break;
- }
-
- return next_tick;
-}
-
-static int
-srom_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return lp->infoleaf_fn(dev);
-}
-
-/*
-** This mapping keeps the original media codes and FDX flag unchanged.
-** While it isn't strictly necessary, it helps me for the moment...
-** The early return avoids a media state / SROM media space clash.
-*/
-static int
-srom_map_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- lp->fdx = false;
- if (lp->infoblock_media == lp->media)
- return 0;
-
- switch(lp->infoblock_media) {
- case SROM_10BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_10BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
- lp->media = _10Mb;
- } else {
- lp->media = TP;
- }
- break;
-
- case SROM_10BASE2:
- lp->media = BNC;
- break;
-
- case SROM_10BASE5:
- lp->media = AUI;
- break;
-
- case SROM_100BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case SROM_100BASET4:
- lp->media = _100Mb;
- break;
-
- case SROM_100BASEFF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASEF:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case ANS:
- lp->media = ANS;
- lp->fdx = lp->params.fdx;
- break;
-
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
- lp->infoblock_media);
- return -1;
- }
-
- return 0;
-}
-
-static void
-de4x5_init_connection(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media; /* Stop scrolling media messages */
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_rst_desc_ring(dev);
- de4x5_setup_intr(dev);
- lp->tx_enable = true;
- spin_unlock_irqrestore(&lp->lock, flags);
- outl(POLL_DEMAND, DE4X5_TPD);
-
- netif_wake_queue(dev);
-}
-
-/*
-** General PHY reset function. Some MII devices don't reset correctly
-** since their MII address pins can float at voltages that are dependent
-** on the signal pin use. Do a double reset to ensure a reset.
-*/
-static int
-de4x5_reset_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = 0;
-
- if ((lp->useSROM) || (lp->phy[lp->active].id)) {
- if (lp->timeout < 0) {
- if (lp->useSROM) {
- if (lp->phy[lp->active].rst) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].rst);
- } else if (lp->rst) { /* Type 5 infoblock reset */
- srom_exec(dev, lp->rst);
- srom_exec(dev, lp->rst);
- }
- } else {
- PHY_HARD_RESET;
- }
- if (lp->useMII) {
- mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- }
- if (lp->useMII) {
- next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
- }
- } else if (lp->chipset == DC21140) {
- PHY_HARD_RESET;
- }
-
- return next_tick;
-}
-
-static int
-test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, csr12;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
- reset_init_sia(dev, csr13, csr14, csr15);
- }
-
- /* set up the interrupt mask */
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
-
- /* clear csr12 NRA and SRA bits */
- if ((lp->chipset == DC21041) || lp->useSROM) {
- csr12 = inl(DE4X5_SISR);
- outl(csr12, DE4X5_SISR);
- }
- }
-
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static int
-test_tp(struct net_device *dev, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
-
- if (sisr && --lp->timeout) {
- sisr = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** Samples the 100Mb Link State Signal. The sample interval is important
-** because too fast a rate can give erroneous results and confuse the
-** speed sense algorithm.
-*/
-#define SAMPLE_INTERVAL 500 /* ms */
-#define SAMPLE_DELAY 2000 /* ms */
-static int
-test_for_100Mb(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
-
- if (lp->timeout < 0) {
- if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
- if (msec > SAMPLE_DELAY) {
- lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
- gep = SAMPLE_DELAY | TIMER_CB;
- return gep;
- } else {
- lp->timeout = msec/SAMPLE_INTERVAL;
- }
- }
-
- if (lp->phy[lp->active].id || lp->useSROM) {
- gep = is_100_up(dev) | is_spd_100(dev);
- } else {
- gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
- }
- if (!(gep & ret) && --lp->timeout) {
- gep = SAMPLE_INTERVAL | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return gep;
-}
-
-static int
-wait_for_link(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->timeout < 0) {
- lp->timeout = 1;
- }
-
- if (lp->timeout--) {
- return TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return 0;
-}
-
-/*
-**
-**
-*/
-static int
-test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int test;
- u_long iobase = dev->base_addr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
- test = (reg ^ (pol ? ~0 : 0)) & mask;
-
- if (test && --lp->timeout) {
- reg = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return reg;
-}
-
-static int
-is_spd_100(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int spd;
-
- if (lp->useMII) {
- spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
- spd = ~(spd ^ lp->phy[lp->active].spd.value);
- spd &= lp->phy[lp->active].spd.mask;
- } else if (!lp->useSROM) { /* de500-xa */
- spd = ((~gep_rd(dev)) & GEP_SLNK);
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-
- return spd;
-}
-
-static int
-is_100_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_SLNK;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_10_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_LNP;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset & ~0x00ff) == DC2114x) ?
- (~inl(DE4X5_SISR)&SISR_LS10):
- 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_anc_capable(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
- } else {
- return 0;
- }
-}
-
-/*
-** Send a packet onto the media and watch for send errors that indicate the
-** media is bad or unconnected.
-*/
-static int
-ping_media(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
-
- lp->tmp = lp->tx_new; /* Remember the ring position */
- load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD);
- }
-
- sisr = inl(DE4X5_SISR);
-
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
- (--lp->timeout)) {
- sisr = 100 | TIMER_CB;
- } else {
- if ((!(sisr & SISR_NCR)) &&
- !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
- lp->timeout) {
- sisr = 0;
- } else {
- sisr = 1;
- }
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** This function does 2 things: on Intels it kmalloc's another buffer to
-** replace the one about to be passed up. On Alpha's it kmallocs a buffer
-** into which the packet is copied.
-*/
-static struct sk_buff *
-de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct sk_buff *p;
-
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- struct sk_buff *ret;
- u_long i=0, tmp;
-
- p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
- if (!p) return NULL;
-
- tmp = virt_to_bus(p->data);
- i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
- skb_reserve(p, i);
- lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
-
- ret = lp->rx_skb[index];
- lp->rx_skb[index] = p;
-
- if ((u_long) ret > 1) {
- skb_put(ret, len);
- }
-
- return ret;
-
-#else
- if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
-
- p = netdev_alloc_skb(dev, len + 2);
- if (!p) return NULL;
-
- skb_reserve(p, 2); /* Align */
- if (index < lp->rx_old) { /* Wrapped buffer */
- short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
- skb_put_data(p, lp->rx_bufs, len - tlen);
- } else { /* Linear buffer */
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
- }
-
- return p;
-#endif
-}
-
-static void
-de4x5_free_rx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->rxRingSize; i++) {
- if ((u_long) lp->rx_skb[i] > 1) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- lp->rx_ring[i].status = 0;
- lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
- }
-}
-
-static void
-de4x5_free_tx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->txRingSize; i++) {
- if (lp->tx_skb[i])
- de4x5_free_tx_buff(lp, i);
- lp->tx_ring[i].status = 0;
- }
-
- /* Unload the locally queued packets */
- __skb_queue_purge(&lp->cache.queue);
-}
-
-/*
-** When a user pulls a connection, the DECchip can end up in a
-** 'running - waiting for end of transmission' state. This means that we
-** have to perform a chip soft reset to ensure that we can synchronize
-** the hardware and software and make any media probes using a loopback
-** packet meaningful.
-*/
-static void
-de4x5_save_skbs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- if (!lp->cache.save_cnt) {
- STOP_DE4X5;
- de4x5_tx(dev); /* Flush any sent skb's */
- de4x5_free_tx_buffs(dev);
- de4x5_cache_state(dev, DE4X5_SAVE_STATE);
- de4x5_sw_reset(dev);
- de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
- lp->cache.save_cnt++;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_rst_desc_ring(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i;
- s32 omr;
-
- if (lp->cache.save_cnt) {
- STOP_DE4X5;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
- lp->cache.save_cnt--;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_cache_state(struct net_device *dev, int flag)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- switch(flag) {
- case DE4X5_SAVE_STATE:
- lp->cache.csr0 = inl(DE4X5_BMR);
- lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
- lp->cache.csr7 = inl(DE4X5_IMR);
- break;
-
- case DE4X5_RESTORE_STATE:
- outl(lp->cache.csr0, DE4X5_BMR);
- outl(lp->cache.csr6, DE4X5_OMR);
- outl(lp->cache.csr7, DE4X5_IMR);
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
- lp->cache.csr15);
- }
- break;
- }
-}
-
-static void
-de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_tail(&lp->cache.queue, skb);
-}
-
-static void
-de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_head(&lp->cache.queue, skb);
-}
-
-static struct sk_buff *
-de4x5_get_cache(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return __skb_dequeue(&lp->cache.queue);
-}
-
-/*
-** Check the Auto Negotiation State. Return OK when a link pass interrupt
-** is received and the auto-negotiation status is NWAY OK.
-*/
-static int
-test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, ans;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
- }
-
- ans = inl(DE4X5_SISR) & SISR_ANS;
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static void
-de4x5_setup_intr(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, sts;
-
- if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
- imr = 0;
- UNMASK_IRQs;
- sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
- outl(sts, DE4X5_STS);
- ENABLE_IRQs;
- }
-}
-
-/*
-**
-*/
-static void
-reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- RESET_SIA;
- if (lp->useSROM) {
- if (lp->ibn == 3) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].gep);
- outl(1, DE4X5_SICR);
- return;
- } else {
- csr15 = lp->cache.csr15;
- csr14 = lp->cache.csr14;
- csr13 = lp->cache.csr13;
- outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
- outl(csr15 | lp->cache.gep, DE4X5_SIGR);
- }
- } else {
- outl(csr15, DE4X5_SIGR);
- }
- outl(csr14, DE4X5_STRR);
- outl(csr13, DE4X5_SICR);
-
- mdelay(10);
-}
-
-/*
-** Create a loopback ethernet packet
-*/
-static void
-create_packet(struct net_device *dev, char *frame, int len)
-{
- int i;
- char *buf = frame;
-
- for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
- *buf++ = dev->dev_addr[i];
- }
- for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
- *buf++ = dev->dev_addr[i];
- }
-
- *buf++ = 0; /* Packet length (2 bytes) */
- *buf++ = 1;
-}
-
-/*
-** Look for a particular board name in the EISA configuration space
-*/
-static int
-EISA_signature(char *name, struct device *device)
-{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
- struct eisa_device *edev;
-
- *name = '\0';
- edev = to_eisa_device (device);
- i = edev->id.driver_data;
-
- if (i >= 0 && i < siglen) {
- strcpy (name, de4x5_signatures[i]);
- status = 1;
- }
-
- return status; /* return the device name string */
-}
-
-/*
-** Look for a particular board name in the PCI configuration space
-*/
-static void
-PCI_signature(char *name, struct de4x5_private *lp)
-{
- int i, siglen = ARRAY_SIZE(de4x5_signatures);
-
- if (lp->chipset == DC21040) {
- strcpy(name, "DE434/5");
- return;
- } else { /* Search for a DEC name in the SROM */
- int tmp = *((char *)&lp->srom + 19) * 3;
- strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
- }
- name[8] = '\0';
- for (i=0; i<siglen; i++) {
- if (strstr(name,de4x5_signatures[i])!=NULL) break;
- }
- if (i == siglen) {
- if (dec_only) {
- *name = '\0';
- } else { /* Use chip name to avoid confusion */
- strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
- ((lp->chipset == DC21041) ? "DC21041" :
- ((lp->chipset == DC21140) ? "DC21140" :
- ((lp->chipset == DC21142) ? "DC21142" :
- ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
- )))))));
- }
- if (lp->chipset != DC21041) {
- lp->useSROM = true; /* card is not recognisably DEC */
- }
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- lp->useSROM = true;
- }
-}
-
-/*
-** Set up the Ethernet PROM counter to the start of the Ethernet address on
-** the DC21040, else read the SROM for the other chips.
-** The SROM may not be present in a multi-MAC card, so first read the
-** MAC address and check for a bad address. If there is a bad one then exit
-** immediately with the prior srom contents intact (the h/w address will
-** be fixed up later).
-*/
-static void
-DevicePresent(struct net_device *dev, u_long aprom_addr)
-{
- int i, j=0;
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->chipset == DC21040) {
- if (lp->bus == EISA) {
- enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
- } else {
- outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
- }
- } else { /* Read new srom */
- u_short tmp;
- __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
- for (i=0; i<(ETH_ALEN>>1); i++) {
- tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
- j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
- *p = cpu_to_le16(tmp);
- }
- if (j == 0 || j == 3 * 0xffff) {
- /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
- return;
- }
-
- p = (__le16 *)&lp->srom;
- for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
- tmp = srom_rd(aprom_addr, i);
- *p++ = cpu_to_le16(tmp);
- }
- de4x5_dbg_srom(&lp->srom);
- }
-}
-
-/*
-** Since the write on the Enet PROM register doesn't seem to reset the PROM
-** pointer correctly (at least on my DE425 EISA card), this routine should do
-** it...from depca.c.
-*/
-static void
-enet_addr_rst(u_long aprom_addr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- } dev;
- short sigLength=0;
- s8 data;
- int i, j;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
- data = inb(aprom_addr);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j=1;
- } else {
- j=0;
- }
- }
- }
-}
-
-/*
-** For the bad status case and no SROM, then add one to the previous
-** address. However, need to add one backwards in case we have 0xff
-** as one or more of the bytes. Only the last 3 bytes should be checked
-** as the first three are invariant - assigned to an organisation.
-*/
-static int
-get_hw_addr(struct net_device *dev)
-{
- u_long iobase = dev->base_addr;
- int broken, i, k, tmp, status = 0;
- u_short j,chksum;
- struct de4x5_private *lp = netdev_priv(dev);
- u8 addr[ETH_ALEN];
-
- broken = de4x5_bad_srom(lp);
-
- for (i=0,k=0,j=0;j<3;j++) {
- k <<= 1;
- if (k > 0xffff) k-=0xffff;
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_char) tmp;
- addr[i++] = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_short) (tmp << 8);
- addr[i++] = (u_char) tmp;
- } else if (!broken) {
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- } else if ((broken == SMC) || (broken == ACCTON)) {
- addr[i] = *((u_char *)&lp->srom + i); i++;
- addr[i] = *((u_char *)&lp->srom + i); i++;
- }
- } else {
- k += (u_char) (tmp = inb(EISA_APROM));
- addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- addr[i++] = (u_char) tmp;
- }
-
- if (k > 0xffff) k-=0xffff;
- }
- if (k == 0xffff) k=0;
-
- eth_hw_addr_set(dev, addr);
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum |= (u_short) (tmp << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
- } else {
- chksum = (u_char) inb(EISA_APROM);
- chksum |= (u_short) (inb(EISA_APROM) << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
-
- /* If possible, try to fix a broken card - SMC only so far */
- srom_repair(dev, broken);
-
-#ifdef CONFIG_PPC_PMAC
- /*
- ** If the address starts with 00 a0, we have to bit-reverse
- ** each byte of the address.
- */
- if ( machine_is(powermac) &&
- (dev->dev_addr[0] == 0) &&
- (dev->dev_addr[1] == 0xa0) )
- {
- for (i = 0; i < ETH_ALEN; ++i)
- {
- int x = dev->dev_addr[i];
- x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
- x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
- }
- eth_hw_addr_set(dev, addr);
- }
-#endif /* CONFIG_PPC_PMAC */
-
- /* Test for a bad enet address */
- status = test_bad_enet(dev, status);
-
- return status;
-}
-
-/*
-** Test for enet addresses in the first 32 bytes.
-*/
-static int
-de4x5_bad_srom(struct de4x5_private *lp)
-{
- int i, status = 0;
-
- for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!memcmp(&lp->srom, &enet_det[i], 3) &&
- !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
- if (i == 0) {
- status = SMC;
- } else if (i == 1) {
- status = ACCTON;
- }
- break;
- }
- }
-
- return status;
-}
-
-static void
-srom_repair(struct net_device *dev, int card)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- switch(card) {
- case SMC:
- memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
- memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
- memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
- lp->useSROM = true;
- break;
- }
-}
-
-/*
-** Assume that the irq's do not follow the PCI spec - this is seems
-** to be true so far (2 for 2).
-*/
-static int
-test_bad_enet(struct net_device *dev, int status)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, tmp;
-
- for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
- if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
- (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- eth_addr_inc(last.addr);
- eth_hw_addr_set(dev, last.addr);
-
- if (!an_exception(lp)) {
- dev->irq = last.irq;
- }
-
- status = 0;
- }
- } else if (!status) {
- last.chipset = lp->chipset;
- last.bus = lp->bus_num;
- last.irq = dev->irq;
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
- }
-
- return status;
-}
-
-/*
-** List of board exceptions with correctly wired IRQs
-*/
-static int
-an_exception(struct de4x5_private *lp)
-{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
- (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
- return -1;
- }
-
- return 0;
-}
-
-/*
-** SROM Read
-*/
-static short
-srom_rd(u_long addr, u_char offset)
-{
- sendto_srom(SROM_RD | SROM_SR, addr);
-
- srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
- srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
- srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
- return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
-}
-
-static void
-srom_latch(u_int command, u_long addr)
-{
- sendto_srom(command, addr);
- sendto_srom(command | DT_CLK, addr);
- sendto_srom(command, addr);
-}
-
-static void
-srom_command(u_int command, u_long addr)
-{
- srom_latch(command, addr);
- srom_latch(command, addr);
- srom_latch((command & 0x0000ff00) | DT_CS, addr);
-}
-
-static void
-srom_address(u_int command, u_long addr, u_char offset)
-{
- int i, a;
-
- a = offset << 2;
- for (i=0; i<6; i++, a <<= 1) {
- srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
- }
- udelay(1);
-
- i = (getfrom_srom(addr) >> 3) & 0x01;
-}
-
-static short
-srom_data(u_int command, u_long addr)
-{
- int i;
- short word = 0;
- s32 tmp;
-
- for (i=0; i<16; i++) {
- sendto_srom(command | DT_CLK, addr);
- tmp = getfrom_srom(addr);
- sendto_srom(command, addr);
-
- word = (word << 1) | ((tmp >> 3) & 0x01);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-
- return word;
-}
-
-/*
-static void
-srom_busy(u_int command, u_long addr)
-{
- sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
- while (!((getfrom_srom(addr) >> 3) & 0x01)) {
- mdelay(1);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-}
-*/
-
-static void
-sendto_srom(u_int command, u_long addr)
-{
- outl(command, addr);
- udelay(1);
-}
-
-static int
-getfrom_srom(u_long addr)
-{
- s32 tmp;
-
- tmp = inl(addr);
- udelay(1);
-
- return tmp;
-}
-
-static int
-srom_infoleaf_info(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, count;
- u_char *p;
-
- /* Find the infoleaf decoder function that matches this chipset */
- for (i=0; i<INFOLEAF_SIZE; i++) {
- if (lp->chipset == infoleaf_array[i].chipset) break;
- }
- if (i == INFOLEAF_SIZE) {
- lp->useSROM = false;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
- dev->name);
- return -ENXIO;
- }
-
- lp->infoleaf_fn = infoleaf_array[i].fn;
-
- /* Find the information offset that this function should use */
- count = *((u_char *)&lp->srom + 19);
- p = (u_char *)&lp->srom + 26;
-
- if (count > 1) {
- for (i=count; i; --i, p+=3) {
- if (lp->device == *p) break;
- }
- if (i == 0) {
- lp->useSROM = false;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
- dev->name, lp->device);
- return -ENXIO;
- }
- }
-
- lp->infoleaf_offset = get_unaligned_le16(p + 1);
-
- return 0;
-}
-
-/*
-** This routine loads any type 1 or 3 MII info into the mii device
-** struct and executes any type 5 code to reset PHY devices for this
-** controller.
-** The info for the MII devices will be valid since the index used
-** will follow the discovery process from MII address 1-31 then 0.
-*/
-static void
-srom_init(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- u_char count;
-
- p+=2;
- if (lp->chipset == DC21140) {
- lp->cache.gepc = (*p++ | GEP_CTRL);
- gep_wr(lp->cache.gepc, dev);
- }
-
- /* Block count */
- count = *p++;
-
- /* Jump the infoblocks to find types */
- for (;count; --count) {
- if (*p < 128) {
- p += COMPACT_LEN;
- } else if (*(p+1) == 5) {
- type5_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 4) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 3) {
- type3_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 2) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 1) {
- type1_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else {
- p += ((*p & BLOCK_LEN) + 1);
- }
- }
-}
-
-/*
-** A generic routine that writes GEP control, data and reset information
-** to the GEP register (21140) or csr15 GEP portion (2114[23]).
-*/
-static void
-srom_exec(struct net_device *dev, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char count = (p ? *p++ : 0);
- u_short *w = (u_short *)p;
-
- if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
-
- if (lp->chipset != DC21140) RESET_SIA;
-
- while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
- *p++ : get_unaligned_le16(w++)), dev);
- mdelay(2); /* 2ms per action */
- }
-
- if (lp->chipset != DC21140) {
- outl(lp->cache.csr14, DE4X5_STRR);
- outl(lp->cache.csr13, DE4X5_SICR);
- }
-}
-
-/*
-** Basically this function is a NOP since it will never be called,
-** unless I implement the DC21041 SROM functions. There's no need
-** since the existing code will be satisfactory for all boards.
-*/
-static int
-dc21041_infoleaf(struct net_device *dev)
-{
- return DE4X5_AUTOSENSE_MS;
-}
-
-static int
-dc21140_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* GEP control */
- lp->cache.gepc = (*p++ | GEP_CTRL);
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21142_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21143_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-/*
-** The compact infoblock is only designed for DC21140[A] chips, so
-** we'll reuse the dc21140m_autoconf function. Non MII media only.
-*/
-static int
-compact_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+COMPACT_LEN) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
- } else {
- return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = COMPACT;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- lp->infoblock_media = (*p++) & COMPACT_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/*
-** This block describes non MII media for the DC21140[A] only.
-*/
-static int
-type0_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 0;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- p+=2;
- lp->infoblock_media = (*p++) & BLOCK0_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/* These functions are under construction! */
-
-static int
-type1_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 1;
- lp->active = *p++;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p);
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 1;
- lp->active = *p;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-static int
-type2_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 2;
- lp->active = 0;
- p += 2;
- lp->infoblock_media = (*p) & MEDIA_CODE;
-
- if ((*p++) & EXT_FIELD) {
- lp->cache.csr13 = get_unaligned_le16(p); p += 2;
- lp->cache.csr14 = get_unaligned_le16(p); p += 2;
- lp->cache.csr15 = get_unaligned_le16(p); p += 2;
- } else {
- lp->cache.csr13 = CSR13;
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- }
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
- lp->infoblock_csr6 = OMR_SIA;
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type3_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 3;
- lp->active = *p++;
- if (MOTO_SROM_BUG) lp->active = 0;
- /* if (MOTO_SROM_BUG) statement indicates lp->active could
- * be 8 (i.e. the size of array lp->phy) */
- if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
- return -EINVAL;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].mci = *p;
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 3;
- lp->active = *p;
- if (MOTO_SROM_BUG) lp->active = 0;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type4_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 4;
- lp->active = 0;
- p+=2;
- lp->infoblock_media = (*p++) & MEDIA_CODE;
- lp->cache.csr13 = CSR13; /* Hard coded defaults */
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-/*
-** This block type provides information for resetting external devices
-** (chips) through the General Purpose Register.
-*/
-static int
-type5_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- /* Must be initializing to run this code */
- if ((lp->state == INITIALISED) || (lp->media == INIT)) {
- p+=2;
- lp->rst = p;
- srom_exec(dev, lp->rst);
- }
-
- return DE4X5_AUTOSENSE_MS;
-}
-
-/*
-** MII Read/Write
-*/
-
-static int
-mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to read */
- mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
- return mii_rdata(ioaddr); /* Read data */
-}
-
-static void
-mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to write */
- mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
- data = mii_swap(data, 16); /* Swap data bit ordering */
- mii_wdata(data, 16, ioaddr); /* Write data */
-}
-
-static int
-mii_rdata(u_long ioaddr)
-{
- int i;
- s32 tmp = 0;
-
- for (i=0; i<16; i++) {
- tmp <<= 1;
- tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
- }
-
- return tmp;
-}
-
-static void
-mii_wdata(int data, int len, u_long ioaddr)
-{
- int i;
-
- for (i=0; i<len; i++) {
- sendto_mii(MII_MWR | MII_WR, data, ioaddr);
- data >>= 1;
- }
-}
-
-static void
-mii_address(u_char addr, u_long ioaddr)
-{
- int i;
-
- addr = mii_swap(addr, 5);
- for (i=0; i<5; i++) {
- sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
- addr >>= 1;
- }
-}
-
-static void
-mii_ta(u_long rw, u_long ioaddr)
-{
- if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
- } else {
- getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
- }
-}
-
-static int
-mii_swap(int data, int len)
-{
- int i, tmp = 0;
-
- for (i=0; i<len; i++) {
- tmp <<= 1;
- tmp |= (data & 1);
- data >>= 1;
- }
-
- return tmp;
-}
-
-static void
-sendto_mii(u32 command, int data, u_long ioaddr)
-{
- u32 j;
-
- j = (data & 1) << 17;
- outl(command | j, ioaddr);
- udelay(1);
- outl(command | MII_MDC | j, ioaddr);
- udelay(1);
-}
-
-static int
-getfrom_mii(u32 command, u_long ioaddr)
-{
- outl(command, ioaddr);
- udelay(1);
- outl(command | MII_MDC, ioaddr);
- udelay(1);
-
- return (inl(ioaddr) >> 19) & 1;
-}
-
-/*
-** Here's 3 ways to calculate the OUI from the ID registers.
-*/
-static int
-mii_get_oui(u_char phyaddr, u_long ioaddr)
-{
-/*
- union {
- u_short reg;
- u_char breg[2];
- } a;
- int i, r2, r3, ret=0;*/
- int r2;
-
- /* Read r2 and r3 */
- r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- mii_rd(MII_ID1, phyaddr, ioaddr);
- /* SEEQ and Cypress way * /
- / * Shuffle r2 and r3 * /
- a.reg=0;
- r3 = ((r3>>10)|(r2<<6))&0x0ff;
- r2 = ((r2>>2)&0x3fff);
-
- / * Bit reverse r3 * /
- for (i=0;i<8;i++) {
- ret<<=1;
- ret |= (r3&1);
- r3>>=1;
- }
-
- / * Bit reverse r2 * /
- for (i=0;i<16;i++) {
- a.reg<<=1;
- a.reg |= (r2&1);
- r2>>=1;
- }
-
- / * Swap r2 bytes * /
- i=a.breg[0];
- a.breg[0]=a.breg[1];
- a.breg[1]=i;
-
- return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
-/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
- return r2; /* (I did it) My way */
-}
-
-/*
-** The SROM spec forces us to search addresses [1-31 0]. Bummer.
-*/
-static int
-mii_get_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, k, n, limit=ARRAY_SIZE(phy_info);
- int id;
-
- lp->active = 0;
- lp->useMII = true;
-
- /* Search the MII address space for possible PHY devices */
- for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
- lp->phy[lp->active].addr = i;
- if (i==0) n++; /* Count cycles */
- while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
- if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
- for (j=0; j<limit; j++) { /* Search PHY table */
- if (id != phy_info[j].id) continue; /* ID match? */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- memcpy((char *)&lp->phy[k],
- (char *)&phy_info[j], sizeof(struct phy_table));
- lp->phy[k].addr = i;
- lp->mii_cnt++;
- lp->active++;
- } else {
- goto purgatory; /* Stop the search */
- }
- break;
- }
- if ((j == limit) && (i < DE4X5_MAX_MII)) {
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- lp->phy[k].addr = i;
- lp->phy[k].id = id;
- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
- lp->mii_cnt++;
- lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
- j = de4x5_debug;
- de4x5_debug |= DEBUG_MII;
- de4x5_dbg_mii(dev, k);
- de4x5_debug = j;
- printk("\n");
- } else {
- goto purgatory;
- }
- }
- }
- purgatory:
- lp->active = 0;
- if (lp->phy[0].id) { /* Reset the PHY devices */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
- mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
- while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
- de4x5_dbg_mii(dev, k);
- }
- }
- if (!lp->mii_cnt) lp->useMII = false;
-
- return lp->mii_cnt;
-}
-
-static char *
-build_setup_frame(struct net_device *dev, int mode)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
- char *pa = lp->setup_frame;
-
- /* Initialise the setup frame */
- if (mode == ALL) {
- memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
- }
-
- if (lp->setup_f == HASH_PERF) {
- for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
- *(pa + i) = dev->dev_addr[i]; /* Host address */
- if (i & 0x01) pa += 2;
- }
- *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
- } else {
- for (i=0; i<ETH_ALEN; i++) { /* Host address */
- *(pa + (i&1)) = dev->dev_addr[i];
- if (i & 0x01) pa += 4;
- }
- for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
- *(pa + (i&1)) = (char) 0xff;
- if (i & 0x01) pa += 4;
- }
- }
-
- return pa; /* Points to the next entry */
-}
-
-static void
-disable_ast(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- del_timer_sync(&lp->timer);
-}
-
-static long
-de4x5_switch_mac_port(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- STOP_DE4X5;
-
- /* Assert the OMR_PS bit in CSR6 */
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
- OMR_FDX));
- omr |= lp->infoblock_csr6;
- if (omr & OMR_PS) omr |= OMR_HBD;
- outl(omr, DE4X5_OMR);
-
- /* Soft Reset */
- RESET_DE4X5;
-
- /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else if ((lp->chipset & ~0x0ff) == DC2114x) {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
- }
-
- /* Restore CSR6 */
- outl(omr, DE4X5_OMR);
-
- /* Reset CSR8 */
- inl(DE4X5_MFC);
-
- return omr;
-}
-
-static void
-gep_wr(s32 data, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- outl(data, DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
- }
-}
-
-static int
-gep_rd(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- return inl(DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return inl(DE4X5_SIGR) & 0x000fffff;
- }
-
- return 0;
-}
-
-static void
-yawn(struct net_device *dev, int state)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
-
- if(lp->bus == EISA) {
- switch(state) {
- case WAKEUP:
- outb(WAKEUP, PCI_CFPM);
- mdelay(10);
- break;
-
- case SNOOZE:
- outb(SNOOZE, PCI_CFPM);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- outb(SLEEP, PCI_CFPM);
- break;
- }
- } else {
- struct pci_dev *pdev = to_pci_dev (lp->gendev);
- switch(state) {
- case WAKEUP:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- mdelay(10);
- break;
-
- case SNOOZE:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
- break;
- }
- }
-}
-
-static void
-de4x5_parse_params(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- char *p, *q, t;
-
- lp->params.fdx = false;
- lp->params.autosense = AUTO;
-
- if (args == NULL) return;
-
- if ((p = strstr(args, dev->name))) {
- if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
- t = *q;
- *q = '\0';
-
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
-
- if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP_NW")) {
- lp->params.autosense = TP_NW;
- } else if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "BNC")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "AUI")) {
- lp->params.autosense = AUI;
- } else if (strstr(p, "10Mb")) {
- lp->params.autosense = _10Mb;
- } else if (strstr(p, "100Mb")) {
- lp->params.autosense = _100Mb;
- } else if (strstr(p, "AUTO")) {
- lp->params.autosense = AUTO;
- }
- }
- *q = t;
- }
-}
-
-static void
-de4x5_dbg_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
- printk("\tphysical address: %pM\n", dev->dev_addr);
- printk("Descriptor head addresses:\n");
- printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
- printk("Descriptor addresses:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
- printk("Descriptor buffers:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size:\nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
- }
-}
-
-static void
-de4x5_dbg_mii(struct net_device *dev, int k)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (de4x5_debug & DEBUG_MII) {
- printk("\nMII device address: %d\n", lp->phy[k].addr);
- printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
- printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
- printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
- }
- printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
- printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
- } else {
- printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
- }
- }
-}
-
-static void
-de4x5_dbg_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->media != lp->c_media) {
- if (de4x5_debug & DEBUG_MEDIA) {
- printk("%s: media is %s%s\n", dev->name,
- (lp->media == NC ? "unconnected, link down or incompatible connection" :
- (lp->media == TP ? "TP" :
- (lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
- (lp->media == _100Mb ? "100Mb/s" :
- (lp->media == _10Mb ? "10Mb/s" :
- "???"
- ))))))))), (lp->fdx?" full duplex.":"."));
- }
- lp->c_media = lp->media;
- }
-}
-
-static void
-de4x5_dbg_srom(struct de4x5_srom *p)
-{
- int i;
-
- if (de4x5_debug & DEBUG_SROM) {
- printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
- printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
- printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
- printk("SROM version: %02x\n", (u_char)(p->version));
- printk("# controllers: %02x\n", (u_char)(p->num_controllers));
-
- printk("Hardware Address: %pM\n", p->ieee_addr);
- printk("CRC checksum: %04x\n", (u_short)(p->chksum));
- for (i=0; i<64; i++) {
- printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
- }
- }
-}
-
-static void
-de4x5_dbg_rx(struct sk_buff *skb, int len)
-{
- int i, j;
-
- if (de4x5_debug & DEBUG_RX) {
- printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
- skb->data, &skb->data[6],
- (u_char)skb->data[12],
- (u_char)skb->data[13],
- len);
- for (j=0; len>0;j+=16, len-=16) {
- printk(" %03x: ",j);
- for (i=0; i<16 && i<len; i++) {
- printk("%02x ",(u_char)skb->data[i+j]);
- }
- printk("\n");
- }
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases. In the normal course of events
-** this function is only used for my testing.
-*/
-static int
-de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 omr;
- union {
- u8 addr[144];
- u16 sval[72];
- u32 lval[36];
- } tmp;
- u_long flags = 0;
-
- if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
- return -EOPNOTSUPP;
-
- switch(ioc->cmd) {
- case DE4X5_GET_HWADDR: /* Get the hardware address */
- ioc->len = ETH_ALEN;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
- case DE4X5_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
- if (netif_queue_stopped(dev))
- return -EBUSY;
- netif_stop_queue(dev);
- eth_hw_addr_set(dev, tmp.addr);
- build_setup_frame(dev, PHYS_ADDR_ONLY);
- /* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_wake_queue(dev); /* Unlock the TX ring */
- break;
-
- case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PM;
- outl(omr, DE4X5_OMR);
- break;
-
- case DE4X5_GET_STATS: /* Get the driver statistics */
- {
- struct pkt_stats statbuf;
- ioc->len = sizeof(statbuf);
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(&statbuf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
- break;
- }
- case DE4X5_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DE4X5_GET_OMR: /* Get the OMR Register contents */
- tmp.addr[0] = inl(DE4X5_OMR);
- if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
- break;
-
- case DE4X5_SET_OMR: /* Set the OMR Register contents */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
- outl(tmp.addr[0], DE4X5_OMR);
- break;
-
- case DE4X5_GET_REG: /* Get the DE4X5 Registers */
- j = 0;
- tmp.lval[0] = inl(DE4X5_STS); j+=4;
- tmp.lval[1] = inl(DE4X5_BMR); j+=4;
- tmp.lval[2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[3] = inl(DE4X5_OMR); j+=4;
- tmp.lval[4] = inl(DE4X5_SISR); j+=4;
- tmp.lval[5] = inl(DE4X5_SICR); j+=4;
- tmp.lval[6] = inl(DE4X5_STRR); j+=4;
- tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.lval, ioc->len))
- return -EFAULT;
- break;
-
-#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
- case DE4X5_DUMP:
- j = 0;
- tmp.addr[j++] = dev->irq;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[j++] = dev->dev_addr[i];
- }
- tmp.addr[j++] = lp->rxRingSize;
- tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
- tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
- for (i=0;i<lp->rxRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
- }
- for (i=0;i<lp->txRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
- }
-
- tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
- if (lp->chipset == DC21140) {
- tmp.lval[j>>2] = gep_rd(dev); j+=4;
- } else {
- tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
- }
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
- tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- } else {
- tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- }
-
- tmp.addr[j++] = lp->txRingSize;
- tmp.addr[j++] = netif_queue_stopped(dev);
-
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
-*/
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init de4x5_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_PCI
- err = pci_register_driver(&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- err |= eisa_driver_register (&de4x5_eisa_driver);
-#endif
-
- return err;
-}
-
-static void __exit de4x5_module_exit (void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver (&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&de4x5_eisa_driver);
-#endif
-}
-
-module_init (de4x5_module_init);
-module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
deleted file mode 100644
index 1bfdc9b117f6..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** DC21040 CSR<1..15> Register Address Map
-*/
-#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
-#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
-#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
-#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
-#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
-#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
-#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
-#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
-#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
-#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
-#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
-#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
-#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
-#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
-#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
-#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
-#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
-#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
-#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
-#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
-#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
-
-/*
-** EISA Register Address Map
-*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase+0x0c84 /* EISA Control Register */
-#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
-#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
-#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
-#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
-#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
-
-/*
-** PCI/EISA Configuration Registers Address Map
-*/
-#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
-#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
-#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
-#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
-#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
-#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
-#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
-#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
-#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
-#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
-#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
-
-/*
-** EISA Configuration Register 0 bit definitions
-*/
-#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
-#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
-#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
-#define ER0_ISTS 0x10 /* Interrupt Status (X) */
-#define ER0_LI 0x08 /* Latch Interrupts */
-#define ER0_INTL 0x06 /* INTerrupt Level */
-#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
-
-/*
-** EISA Configuration Register 1 bit definitions
-*/
-#define ER1_IAM 0xe0 /* ISA Address Mode */
-#define ER1_IAE 0x10 /* ISA Addressing Enable */
-#define ER1_UPIN 0x0f /* User Pins */
-
-/*
-** EISA Configuration Register 2 bit definitions
-*/
-#define ER2_BRS 0xc0 /* Boot ROM Size */
-#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
-
-/*
-** EISA Configuration Register 3 bit definitions
-*/
-#define ER3_BWE 0x40 /* Burst Write Enable */
-#define ER3_BRE 0x04 /* Burst Read Enable */
-#define ER3_LSR 0x02 /* Local Software Reset */
-
-/*
-** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
-** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
-** the configuration revision register step number.
-*/
-#define CFID_DID 0xff00 /* Device ID */
-#define CFID_VID 0x00ff /* Vendor ID */
-#define DC21040_DID 0x0200 /* Unique Device ID # */
-#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
-#define DC21041_DID 0x1400 /* Unique Device ID # */
-#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
-#define DC21140_DID 0x0900 /* Unique Device ID # */
-#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
-#define DC2114x_DID 0x1900 /* Unique Device ID # */
-#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
-
-/*
-** Chipset defines
-*/
-#define DC21040 DC21040_DID
-#define DC21041 DC21041_DID
-#define DC21140 DC21140_DID
-#define DC2114x DC2114x_DID
-#define DC21142 (DC2114x_DID | 0x0010)
-#define DC21143 (DC2114x_DID | 0x0030)
-#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
-
-#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
-#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
-#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
-#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
-#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
-#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
-
-/*
-** PCI Configuration Command/Status Register (PCI_CFCS)
-*/
-#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
-#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
-#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
-#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
-#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
-#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
-#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
-#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
-#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
-#define CFCS_MO 0x00000004 /* Master Operation (C) */
-#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
-#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
-
-/*
-** PCI Configuration Revision Register (PCI_CFRV)
-*/
-#define CFRV_BC 0xff000000 /* Base Class */
-#define CFRV_SC 0x00ff0000 /* Subclass */
-#define CFRV_RN 0x000000f0 /* Revision Number */
-#define CFRV_SN 0x0000000f /* Step Number */
-#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
-#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
-#define STEP_NUMBER 0x00000020 /* Increments for future chips */
-#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
-#define CFRV_MASK 0xffff0000 /* Register mask */
-
-/*
-** PCI Configuration Latency Timer Register (PCI_CFLT)
-*/
-#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
-
-/*
-** PCI Configuration Base I/O Address Register (PCI_CBIO)
-*/
-#define CBIO_MASK -128 /* Base I/O Address Mask */
-#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
-
-/*
-** PCI Configuration Card Information Structure Register (PCI_CCIS)
-*/
-#define CCIS_ROMI 0xf0000000 /* ROM Image */
-#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
-#define CCIS_ASI 0x00000007 /* Address Space Indicator */
-
-/*
-** PCI Configuration Subsystem ID Register (PCI_SSID)
-*/
-#define SSID_SSID 0xffff0000 /* Subsystem ID */
-#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
-
-/*
-** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
-*/
-#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
-#define CBER_ROME 0x00000001 /* ROM Enable */
-
-/*
-** PCI Configuration Interrupt Register (PCI_CFIT)
-*/
-#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
-#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
-#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
-#define CFIT_IRQL 0x000000ff /* Interrupt Line */
-
-/*
-** PCI Configuration Power Management Area Register (PCI_CFPM)
-*/
-#define SLEEP 0x80 /* Power Saving Sleep Mode */
-#define SNOOZE 0x40 /* Power Saving Snooze Mode */
-#define WAKEUP 0x00 /* Power Saving Wakeup */
-
-#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
-#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
-
-/*
-** DC21040 Bus Mode Register (DE4X5_BMR)
-*/
-#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
-#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
-#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
-#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
-#define BMR_CAL 0x0000c000 /* Cache Alignment */
-#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
-#define BMR_BLE 0x00000080 /* Big/Little Endian */
-#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
-#define BMR_BAR 0x00000002 /* Bus ARbitration */
-#define BMR_SWR 0x00000001 /* Software Reset */
-
- /* Timings here are for 10BASE-T/AUI only*/
-#define TAP_NOPOLL 0x00000000 /* No automatic polling */
-#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
-#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
-#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
-#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
-#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
-#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
-#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
-
-#define CAL_NOUSE 0x00000000 /* Not used */
-#define CAL_8LONG 0x00004000 /* 8-longword alignment */
-#define CAL_16LONG 0x00008000 /* 16-longword alignment */
-#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
-
-#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
-#define PBL_1 0x00000100 /* 1 longword DMA burst length */
-#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
-#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
-#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
-#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
-#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
-
-#define DSL_0 0x00000000 /* 0 longword / descriptor */
-#define DSL_1 0x00000004 /* 1 longword / descriptor */
-#define DSL_2 0x00000008 /* 2 longwords / descriptor */
-#define DSL_4 0x00000010 /* 4 longwords / descriptor */
-#define DSL_8 0x00000020 /* 8 longwords / descriptor */
-#define DSL_16 0x00000040 /* 16 longwords / descriptor */
-#define DSL_32 0x00000080 /* 32 longwords / descriptor */
-
-/*
-** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
-*/
-#define TPD 0x00000001 /* Transmit Poll Demand */
-
-/*
-** DC21040 Receive Poll Demand Register (DE4X5_RPD)
-*/
-#define RPD 0x00000001 /* Receive Poll Demand */
-
-/*
-** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
-*/
-#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
-
-/*
-** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
-*/
-#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
-
-/*
-** Status Register (DE4X5_STS)
-*/
-#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
-#define STS_BE 0x03800000 /* Bus Error Bits */
-#define STS_TS 0x00700000 /* Transmit Process State */
-#define STS_RS 0x000e0000 /* Receive Process State */
-#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define STS_ER 0x00004000 /* Early Receive */
-#define STS_FBE 0x00002000 /* Fatal Bus Error */
-#define STS_SE 0x00002000 /* System Error */
-#define STS_LNF 0x00001000 /* Link Fail */
-#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
-#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
-#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define STS_AT 0x00000400 /* AUI/TP Pin */
-#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
-#define STS_RPS 0x00000100 /* Receive Process Stopped */
-#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define STS_RI 0x00000040 /* Receive Interrupt */
-#define STS_UNF 0x00000020 /* Transmit Underflow */
-#define STS_LNP 0x00000010 /* Link Pass */
-#define STS_ANC 0x00000010 /* Autonegotiation Complete */
-#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
-#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define STS_TPS 0x00000002 /* Transmit Process Stopped */
-#define STS_TI 0x00000001 /* Transmit Interrupt */
-
-#define EB_PAR 0x00000000 /* Parity Error */
-#define EB_MA 0x00800000 /* Master Abort */
-#define EB_TA 0x01000000 /* Target Abort */
-#define EB_RES0 0x01800000 /* Reserved */
-#define EB_RES1 0x02000000 /* Reserved */
-
-#define TS_STOP 0x00000000 /* Stopped */
-#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
-#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
-#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
-#define TS_RES 0x00400000 /* Reserved */
-#define TS_SPKT 0x00500000 /* Setup Packet */
-#define TS_SUSP 0x00600000 /* Suspended */
-#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
-
-#define RS_STOP 0x00000000 /* Stopped */
-#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
-#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
-#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
-#define RS_SUSP 0x00080000 /* Suspended */
-#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
-#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
-#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
-
-#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
-
-/*
-** Operation Mode Register (DE4X5_OMR)
-*/
-#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
-#define OMR_RA 0x40000000 /* Receive All */
-#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
-#define OMR_SCR 0x01000000 /* Scrambler Mode */
-#define OMR_PCS 0x00800000 /* PCS Function */
-#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
-#define OMR_SF 0x00200000 /* Store and Forward */
-#define OMR_HBD 0x00080000 /* HeartBeat Disable */
-#define OMR_PS 0x00040000 /* Port Select */
-#define OMR_CA 0x00020000 /* Capture Effect Enable */
-#define OMR_BP 0x00010000 /* Back Pressure */
-#define OMR_TR 0x0000c000 /* Threshold Control Bits */
-#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
-#define OMR_FC 0x00001000 /* Force Collision Mode */
-#define OMR_OM 0x00000c00 /* Operating Mode */
-#define OMR_FDX 0x00000200 /* Full Duplex Mode */
-#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
-#define OMR_PM 0x00000080 /* Pass All Multicast */
-#define OMR_PR 0x00000040 /* Promiscuous Mode */
-#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
-#define OMR_IF 0x00000010 /* Inverse Filtering */
-#define OMR_PB 0x00000008 /* Pass Bad Frames */
-#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
-#define OMR_SR 0x00000002 /* Start/Stop Receive */
-#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
-
-#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
-#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
-#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
-#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
-
-#define OMR_DEF (OMR_SDP)
-#define OMR_SIA (OMR_SDP | OMR_TTM)
-#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
-#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
-#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
-
-/*
-** DC21040 Interrupt Mask Register (DE4X5_IMR)
-*/
-#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
-#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
-#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
-#define IMR_ERM 0x00004000 /* Early Receive Mask */
-#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
-#define IMR_SEM 0x00002000 /* System Error Mask */
-#define IMR_LFM 0x00001000 /* Link Fail Mask */
-#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
-#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
-#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
-#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
-#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
-#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
-#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
-#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
-#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
-#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
-#define IMR_LPM 0x00000010 /* Link Pass */
-#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
-#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
-#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
-#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
-
-/*
-** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
-*/
-#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
-#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
-#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
-#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
-#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
-
-/*
-** DC21040 Ethernet Address PROM (DE4X5_APROM)
-*/
-#define APROM_DN 0x80000000 /* Data Not Valid */
-#define APROM_DT 0x000000ff /* Address Byte */
-
-/*
-** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
-*/
-#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define BROM_RD 0x00004000 /* Read from Boot ROM */
-#define BROM_WR 0x00002000 /* Write to Boot ROM */
-#define BROM_BR 0x00001000 /* Select Boot ROM when set */
-#define BROM_SR 0x00000800 /* Select Serial ROM when set */
-#define BROM_REG 0x00000400 /* External Register Select */
-#define BROM_DT 0x000000ff /* Data Byte */
-
-/*
-** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
-*/
-#define MII_MDI 0x00080000 /* MII Management Data In */
-#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
-#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
-#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
-#define MII_MDT 0x00020000 /* MII Management Data Out */
-#define MII_MDC 0x00010000 /* MII Management Clock */
-#define MII_RD 0x00004000 /* Read from MII */
-#define MII_WR 0x00002000 /* Write to MII */
-#define MII_SEL 0x00000800 /* Select MII when RESET */
-
-#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define SROM_RD 0x00004000 /* Read from Boot ROM */
-#define SROM_WR 0x00002000 /* Write to Boot ROM */
-#define SROM_BR 0x00001000 /* Select Boot ROM when set */
-#define SROM_SR 0x00000800 /* Select Serial ROM when set */
-#define SROM_REG 0x00000400 /* External Register Select */
-#define SROM_DT 0x000000ff /* Data Byte */
-
-#define DT_OUT 0x00000008 /* Serial Data Out */
-#define DT_IN 0x00000004 /* Serial Data In */
-#define DT_CLK 0x00000002 /* Serial ROM Clock */
-#define DT_CS 0x00000001 /* Serial ROM Chip Select */
-
-#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
-#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
-#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
-#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
-
-#define MII_CR 0x00 /* MII Management Control Register */
-#define MII_SR 0x01 /* MII Management Status Register */
-#define MII_ID0 0x02 /* PHY Identifier Register 0 */
-#define MII_ID1 0x03 /* PHY Identifier Register 1 */
-#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
-#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
-#define MII_ANE 0x06 /* Auto Negotiation Expansion */
-#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
-
-#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
-
-/*
-** MII Management Control Register
-*/
-#define MII_CR_RST 0x8000 /* RESET the PHY chip */
-#define MII_CR_LPBK 0x4000 /* Loopback enable */
-#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
-#define MII_CR_10 0x0000 /* Set 10Mb/s */
-#define MII_CR_100 0x2000 /* Set 100Mb/s */
-#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
-#define MII_CR_PD 0x0800 /* Power Down */
-#define MII_CR_ISOL 0x0400 /* Isolate Mode */
-#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
-#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
-#define MII_CR_CTE 0x0080 /* Collision Test Enable */
-
-/*
-** MII Management Status Register
-*/
-#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
-#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
-#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
-#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
-#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
-#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
-#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
-#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
-#define MII_SR_LKS 0x0004 /* Link Status */
-#define MII_SR_JABD 0x0002 /* Jabber Detect */
-#define MII_SR_XC 0x0001 /* Extended Capabilities */
-
-/*
-** MII Management Auto Negotiation Advertisement Register
-*/
-#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** MII Management Auto Negotiation Remote End Register
-*/
-#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
-#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
-#define MII_ANLPA_RF 0x2000 /* Remote Fault */
-#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** SROM Media Definitions (ABG SROM Section)
-*/
-#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
-#define MEDIA_MII 0x0040 /* MII Present on the adapter */
-#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
-#define MEDIA_AUI 0x0004 /* AUI Media present */
-#define MEDIA_TP 0x0002 /* TP Media present */
-#define MEDIA_BNC 0x0001 /* BNC Media present */
-
-/*
-** SROM Definitions (Digital Semiconductor Format)
-*/
-#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
-#define SROM_SSID 0x0002 /* Sub-system ID offset */
-#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
-#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
-#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
-#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
-#define SROM_SFV 0x0012 /* SROM Format Version offset */
-#define SROM_CCNT 0x0013 /* Controller Count offset */
-#define SROM_HWADD 0x0014 /* Hardware Address offset */
-#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
-#define SROM_CRC 0x007e /* SROM CRC offset */
-
-/*
-** SROM Media Connection Definitions
-*/
-#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
-#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
-#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
-#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
-#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
-#define SROM_100BT4 0x0006 /* 100BASE-T4 */
-#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
-#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
-#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
-#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
-#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
-#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
-#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
-#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
-#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
-#define SROM_PAO 0x8800 /* Powerup Autosense Only */
-#define SROM_NSMI 0xffff /* No Selected Media Information */
-
-/*
-** SROM Media Definitions
-*/
-#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
-#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
-#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
-#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
-#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
-#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
-
-#define BLOCK_LEN 0x7f /* Extended blocks length mask */
-#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
-#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
-
-/*
-** SROM Compact Format Block Masks
-*/
-#define COMPACT_FI 0x80 /* Format Indicator */
-#define COMPACT_LEN 0x04 /* Length */
-#define COMPACT_MC 0x3f /* Media Code */
-
-/*
-** SROM Extended Format Block Type 0 Masks
-*/
-#define BLOCK0_FI 0x80 /* Format Indicator */
-#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
-#define BLOCK0_MC 0x3f /* Media Code */
-
-/*
-** DC21040 Full Duplex Register (DE4X5_FDR)
-*/
-#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
-
-/*
-** DC21041 General Purpose Timer Register (DE4X5_GPT)
-*/
-#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
-#define GPT_VAL 0x0000ffff /* Timer Value */
-
-/*
-** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
-*/
-/* Valid ONLY for DE500 hardware */
-#define GEP_LNP 0x00000080 /* Link Pass (input) */
-#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
-#define GEP_SDET 0x00000020 /* Signal Detect (input) */
-#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
-#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
-#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
-#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
-#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
-#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
-#define GEP_CTRL 0x00000100 /* GEP control bit */
-
-/*
-** SIA Register Defaults
-*/
-#define CSR13 0x00000001
-#define CSR14 0x0003ff7f /* Autonegotiation disabled */
-#define CSR15 0x00000008
-
-/*
-** SIA Status Register (DE4X5_SISR)
-*/
-#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
-#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
-#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
-#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
-#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
-#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
-#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
-#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
-#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
-#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
-#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
-#define SISR_DAO 0x00000080 /* PLL All One */
-#define SISR_DAZ 0x00000040 /* PLL All Zero */
-#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
-#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
-#define SISR_APS 0x00000008 /* Auto Polarity State */
-#define SISR_LKF 0x00000004 /* Link Fail Status */
-#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
-#define SISR_NCR 0x00000002 /* Network Connection Error */
-#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
-#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
-#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
-
-#define ANS_NDIS 0x00000000 /* Nway disable */
-#define ANS_TDIS 0x00001000 /* Transmit Disable */
-#define ANS_ADET 0x00002000 /* Ability Detect */
-#define ANS_ACK 0x00003000 /* Acknowledge */
-#define ANS_CACK 0x00004000 /* Complete Acknowledge */
-#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
-#define ANS_LCHK 0x00006000 /* Link Check */
-
-#define SISR_RST 0x00000301 /* CSR12 reset */
-#define SISR_ANR 0x00001301 /* Autonegotiation restart */
-
-/*
-** SIA Connectivity Register (DE4X5_SICR)
-*/
-#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
-#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
-#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
-#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
-#define SICR_IE 0x00001000 /* Input Enable */
-#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
-#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
-#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
-#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
-#define SICR_ASE 0x00000080 /* APLL Start Enable*/
-#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
-#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
-#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
-#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
-#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
-#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
-#define SICR_SRL 0x00000001 /* SIA Reset */
-#define SIA_RESET 0x00000000 /* SIA Reset Value */
-
-/*
-** SIA Transmit and Receive Register (DE4X5_STRR)
-*/
-#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
-#define STRR_SPP 0x00004000 /* Set Polarity Plus */
-#define STRR_APE 0x00002000 /* Auto Polarity Enable */
-#define STRR_LTE 0x00001000 /* Link Test Enable */
-#define STRR_SQE 0x00000800 /* Signal Quality Enable */
-#define STRR_CLD 0x00000400 /* Collision Detect Enable */
-#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
-#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
-#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
-#define STRR_HDE 0x00000040 /* Half Duplex Enable */
-#define STRR_CPEN 0x00000030 /* Compensation Enable */
-#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
-#define STRR_DREN 0x00000004 /* Driver Enable */
-#define STRR_LBK 0x00000002 /* Loopback Enable */
-#define STRR_ECEN 0x00000001 /* Encoder Enable */
-#define STRR_RESET 0xffffffff /* Reset value for STRR */
-
-/*
-** SIA General Register (DE4X5_SIGR)
-*/
-#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
-#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
-#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
-#define SIGR_CWE 0x08000000 /* Control Write Enable */
-#define SIGR_RME 0x04000000 /* Receive Match Enable */
-#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
-#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
-#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
-#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
-#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
-#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
-#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
-#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
-#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
-#define SIGR_FRL 0x00002000 /* Force Receiver Low */
-#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
-#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
-#define SIGR_FLF 0x00000400 /* Force Link Fail */
-#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
-#define SIGR_TSCK 0x00000100 /* Test Clock */
-#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
-#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
-#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
-#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
-#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
-#define SIGR_JCK 0x00000004 /* Jabber Clock */
-#define SIGR_HUJ 0x00000002 /* Host Unjab */
-#define SIGR_JBD 0x00000001 /* Jabber Disable */
-#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
-
-/*
-** Receive Descriptor Bit Summary
-*/
-#define R_OWN 0x80000000 /* Own Bit */
-#define RD_FF 0x40000000 /* Filtering Fail */
-#define RD_FL 0x3fff0000 /* Frame Length */
-#define RD_ES 0x00008000 /* Error Summary */
-#define RD_LE 0x00004000 /* Length Error */
-#define RD_DT 0x00003000 /* Data Type */
-#define RD_RF 0x00000800 /* Runt Frame */
-#define RD_MF 0x00000400 /* Multicast Frame */
-#define RD_FS 0x00000200 /* First Descriptor */
-#define RD_LS 0x00000100 /* Last Descriptor */
-#define RD_TL 0x00000080 /* Frame Too Long */
-#define RD_CS 0x00000040 /* Collision Seen */
-#define RD_FT 0x00000020 /* Frame Type */
-#define RD_RJ 0x00000010 /* Receive Watchdog */
-#define RD_RE 0x00000008 /* Report on MII Error */
-#define RD_DB 0x00000004 /* Dribbling Bit */
-#define RD_CE 0x00000002 /* CRC Error */
-#define RD_OF 0x00000001 /* Overflow */
-
-#define RD_RER 0x02000000 /* Receive End Of Ring */
-#define RD_RCH 0x01000000 /* Second Address Chained */
-#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
-#define RD_RBS1 0x000007ff /* Buffer 1 Size */
-
-/*
-** Transmit Descriptor Bit Summary
-*/
-#define T_OWN 0x80000000 /* Own Bit */
-#define TD_ES 0x00008000 /* Error Summary */
-#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
-#define TD_LO 0x00000800 /* Loss Of Carrier */
-#define TD_NC 0x00000400 /* No Carrier */
-#define TD_LC 0x00000200 /* Late Collision */
-#define TD_EC 0x00000100 /* Excessive Collisions */
-#define TD_HF 0x00000080 /* Heartbeat Fail */
-#define TD_CC 0x00000078 /* Collision Counter */
-#define TD_LF 0x00000004 /* Link Fail */
-#define TD_UF 0x00000002 /* Underflow Error */
-#define TD_DE 0x00000001 /* Deferred */
-
-#define TD_IC 0x80000000 /* Interrupt On Completion */
-#define TD_LS 0x40000000 /* Last Segment */
-#define TD_FS 0x20000000 /* First Segment */
-#define TD_FT1 0x10000000 /* Filtering Type */
-#define TD_SET 0x08000000 /* Setup Packet */
-#define TD_AC 0x04000000 /* Add CRC Disable */
-#define TD_TER 0x02000000 /* Transmit End Of Ring */
-#define TD_TCH 0x01000000 /* Second Address Chained */
-#define TD_DPD 0x00800000 /* Disabled Padding */
-#define TD_FT0 0x00400000 /* Filtering Type */
-#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
-#define TD_TBS1 0x000007ff /* Buffer 1 Size */
-
-#define PERFECT_F 0x00000000
-#define HASH_F TD_FT0
-#define INVERSE_F TD_FT1
-#define HASH_O_F (TD_FT1 | TD_F0)
-
-/*
-** Media / mode state machine definitions
-** User selectable:
-*/
-#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
-#define TP_NW 0x0002 /* 10Base-T with Nway */
-#define BNC 0x0004 /* Thinwire */
-#define AUI 0x0008 /* Thickwire */
-#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
-#define _10Mb 0x0040 /* 10Mb/s Ethernet */
-#define _100Mb 0x0080 /* 100Mb/s Ethernet */
-#define AUTO 0x4000 /* Auto sense the media or speed */
-
-/*
-** Internal states
-*/
-#define NC 0x0000 /* No Connection */
-#define ANS 0x0020 /* Intermediate AutoNegotiation State */
-#define SPD_DET 0x0100 /* Parallel speed detection */
-#define INIT 0x0200 /* Initial state */
-#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
-#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
-#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
-#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
-#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
-#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
-#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
-#define MII 0x1000 /* MII on the 21143 */
-
-#define TIMER_CB 0x80000000 /* Timer callback detection */
-
-/*
-** DE4X5 DEBUG Options
-*/
-#define DEBUG_NONE 0x0000 /* No DEBUG messages */
-#define DEBUG_VERSION 0x0001 /* Print version message */
-#define DEBUG_MEDIA 0x0002 /* Print media messages */
-#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
-#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
-#define DEBUG_SROM 0x0010 /* Print SROM messages */
-#define DEBUG_MII 0x0020 /* Print MII messages */
-#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
-#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
-#define DEBUG_PCICFG 0x0100
-#define DEBUG_ALL 0x01ff
-
-/*
-** Miscellaneous
-*/
-#define PCI 0
-#define EISA 1
-
-#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
-#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
-
-#define SETUP_FRAME_LEN 192 /* Bytes */
-#define IMPERF_PA_OFFSET 156 /* Bytes */
-
-#define POLL_DEMAND 1
-
-#define LOST_MEDIA_THRESHOLD 3
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define DE4X5_STRLEN 8
-
-#define DE4X5_INIT 0 /* Initialisation time */
-#define DE4X5_RUN 1 /* Run time */
-
-#define DE4X5_SAVE_STATE 0
-#define DE4X5_RESTORE_STATE 1
-
-/*
-** Address Filtering Modes
-*/
-#define PERFECT 0 /* 16 perfect physical addresses */
-#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
-#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
-#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
-
-#define ALL 0 /* Clear out all the setup frame */
-#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
-
-/*
-** Adapter state
-*/
-#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
-#define CLOSED 1 /* Ready for opening */
-#define OPEN 2 /* Running */
-
-/*
-** Various wait times
-*/
-#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
-#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
-
-/*
-** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
-** the vendors seem split 50-50 on how to calculate the OUI register values
-** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
-*/
-#define NATIONAL_TX 0x2000
-#define BROADCOM_T4 0x03e0
-#define SEEQ_T4 0x0016
-#define CYPRESS_T4 0x0014
-
-/*
-** Speed Selection stuff
-*/
-#define SET_10Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-#define SET_100Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- int fdx=0;\
- if (lp->phy[lp->active].id == NATIONAL_TX) {\
- mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
- 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
- if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- if (fdx) omr |= OMR_FDX;\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/* FIX ME so I don't jam 10Mb networks */
-#define SET_100Mb_PDET {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
- lp->cache.gep = (GEP_FDXD | GEP_MODE);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct de4x5_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
-#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
-/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
-#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
-#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
-#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
-#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
-#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
-#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
-#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
-#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
-
-#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..3188ba7b450f 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ba0a69b363f8..d5657ff15e3c 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -117,8 +117,8 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp)
0x00, 0x06 /* ttm bit map */
};
- tp->mtable = kmalloc(sizeof(struct mediatable) +
- sizeof(struct medialeaf), GFP_KERNEL);
+ tp->mtable = devm_kmalloc(&tp->pdev->dev, sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
if (tp->mtable == NULL)
return; /* Horrible, impossible failure. */
@@ -224,7 +224,8 @@ subsequent_board:
return;
}
- mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL);
+ mtable = devm_kmalloc(&tp->pdev->dev, struct_size(mtable, mleaf, count),
+ GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 3fb39e32e1b4..653bde48ef44 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -21,7 +21,7 @@ void pnic_do_nway(struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 phy_reg = ioread32(ioaddr + 0xB8);
- u32 new_csr6 = tp->csr6 & ~0x40C40200;
+ u32 new_csr6;
if (phy_reg & 0x78000000) { /* Ignore baseT4 */
if (phy_reg & 0x20000000) dev->if_port = 5;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 79df5a72877b..ecfad43df45a 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1389,7 +1389,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* And back to business
*/
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) {
pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
return i;
@@ -1398,7 +1398,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
- dev = alloc_etherdev (sizeof (*tp));
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
if (!dev)
return -ENOMEM;
@@ -1408,18 +1408,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev),
(unsigned long long)pci_resource_len (pdev, 0),
(unsigned long long)pci_resource_start (pdev, 0));
- goto err_out_free_netdev;
+ return -ENODEV;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, DRV_NAME))
- goto err_out_free_netdev;
+ if (pci_request_regions(pdev, DRV_NAME))
+ return -ENODEV;
- ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+ ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
if (!ioaddr)
- goto err_out_free_res;
+ return -ENODEV;
/*
* initialize private data structure 'tp'
@@ -1428,12 +1428,12 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = dma_alloc_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma, GFP_KERNEL);
+ tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
- goto err_out_mtable;
+ return -ENODEV;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
@@ -1689,12 +1689,13 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_TULIP_NAPI
- netif_napi_add(dev, &tp->napi, tulip_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
#endif
dev->ethtool_ops = &ops;
- if (register_netdev(dev))
- goto err_out_free_ring;
+ i = register_netdev(dev);
+ if (i)
+ return i;
pci_set_drvdata(pdev, dev);
@@ -1769,23 +1770,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tulip_set_power_state (tp, 0, 1);
return 0;
-
-err_out_free_ring:
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
-
-err_out_mtable:
- kfree (tp->mtable);
- pci_iounmap(pdev, ioaddr);
-
-err_out_free_res:
- pci_release_regions (pdev);
-
-err_out_free_netdev:
- free_netdev (dev);
- return -ENODEV;
}
@@ -1885,24 +1869,11 @@ static int __maybe_unused tulip_resume(struct device *dev_d)
static void tulip_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct tulip_private *tp;
if (!dev)
return;
- tp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
- kfree (tp->mtable);
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
- pci_disable_device(pdev);
-
- /* pci_power_off (pdev, -1); */
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..ff080ab0f116 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..37fba39c0056 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
@@ -1376,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..2c67a857a42f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1235,8 +1235,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c710dc17be90..43def191f26f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -340,7 +340,7 @@ enum wake_event_bits {
struct netdev_desc {
__le32 next_desc;
__le32 status;
- struct desc_frag { __le32 addr, length; } frag[1];
+ struct desc_frag { __le32 addr, length; } frag;
};
/* Bits in netdev_desc.status */
@@ -980,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
- le32_to_cpu(np->tx_ring[i].frag[0].length));
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
}
printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
ioread32(np->base + TxListPtr),
@@ -1027,7 +1027,7 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
- np->rx_ring[i].frag[0].length = 0;
+ np->rx_ring[i].frag.length = 0;
np->rx_skbuff[i] = NULL;
}
@@ -1039,16 +1039,16 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ np->rx_ring[i].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag[0].addr)) {
+ np->rx_ring[i].frag.addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
- np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1097,12 +1097,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
skb->data, skb->len, DMA_TO_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag[0].addr))
+ txdesc->frag.addr))
goto drop_frame;
- txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
np->cur_tx++;
@@ -1151,7 +1151,7 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
@@ -1271,12 +1271,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
} else {
@@ -1290,12 +1290,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
}
@@ -1372,16 +1372,16 @@ static void rx_poll(struct tasklet_struct *t)
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
@@ -1427,18 +1427,18 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ np->rx_ring[entry].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag[0].addr)) {
+ np->rx_ring[entry].frag.addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
}
/* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag[0].length =
+ np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
cnt++;
@@ -1644,8 +1644,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
@@ -1870,14 +1870,14 @@ static int netdev_close(struct net_device *dev)
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
- np->tx_ring[i].frag[0].length);
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
- np->rx_ring[i].frag[0].length);
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
}
}
#endif /* __i386__ debugging only */
@@ -1892,19 +1892,19 @@ static int netdev_close(struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ le32_to_cpu(np->rx_ring[i].frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
- np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..08184f20f510 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8689d4a51fe5..61fe9625bed1 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -101,8 +101,7 @@
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
-#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_NUM_POST_ERX_DB 255u
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 528eb0f223b1..08ec84cd21c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1878,9 +1878,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
@@ -2287,7 +2287,7 @@ err:
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@@ -2373,7 +2373,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,11 +2440,11 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2473,7 +2473,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db1f3b908582..e2085c68c0ee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index dfa784339781..77edc3d9b505 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ad67b4216079..a92a74761546 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -737,9 +737,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -2982,8 +2982,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
@@ -3178,7 +3177,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
@@ -3491,7 +3490,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3551,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
@@ -5204,7 +5203,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
+ netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
index f4e2b1102d8f..3df6bf476ae7 100644
--- a/drivers/net/ethernet/engleder/Kconfig
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -21,6 +21,7 @@ config TSNEP
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
+ select PAGE_POOL
help
Support for the Engleder TSN endpoint Ethernet MAC IP Core.
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index cce2191cb889..b6e3b16623de 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- $(tsnep-y)
+ tsnep_rxnfc.o $(tsnep-y)
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 23bbece6b7de..09a723b827c7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -21,8 +21,6 @@
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
-#define TSNEP_QUEUES 1
-
struct tsnep_gcl {
void __iomem *addr;
@@ -39,6 +37,24 @@ struct tsnep_gcl {
bool change;
};
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
struct tsnep_tx_entry {
struct tsnep_tx_desc *desc;
struct tsnep_tx_desc_wb *desc_wb;
@@ -55,6 +71,7 @@ struct tsnep_tx_entry {
struct tsnep_tx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -79,14 +96,15 @@ struct tsnep_rx_entry {
u32 properties;
- struct sk_buff *skb;
+ struct page *page;
size_t len;
- DEFINE_DMA_UNMAP_ADDR(dma);
+ dma_addr_t dma;
};
struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -95,6 +113,7 @@ struct tsnep_rx {
int read;
u32 owner_counter;
int increment_owner_counter;
+ struct page_pool *page_pool;
u32 packets;
u32 bytes;
@@ -104,12 +123,14 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 9];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
struct napi_struct napi;
+ int irq;
u32 irq_mask;
};
@@ -125,7 +146,6 @@ struct tsnep_adapter {
struct platform_device *pdev;
struct device *dmadev;
void __iomem *addr;
- int irq;
bool gate_control;
/* gate control lock */
@@ -140,6 +160,12 @@ struct tsnep_adapter {
/* ptp clock lock */
spinlock_t ptp_lock;
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -160,6 +186,18 @@ void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index e6760dc68ddd..a713a126b227 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -250,6 +250,44 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int tsnep_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -287,6 +325,8 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_strings = tsnep_ethtool_get_strings,
.get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
.get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 71cc8577d640..315dada75323 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -34,6 +34,7 @@
#define ECM_INT_LINK 0x00000020
#define ECM_INT_TX_0 0x00000100
#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
#define ECM_INT_ALL 0x7FFFFFFF
#define ECM_INT_DISABLE 0x80000000
@@ -43,6 +44,10 @@
#define ECM_RESET_CHANNEL 0x00000100
#define ECM_RESET_TXRX 0x00010000
+/* counter */
+#define ECM_COUNTER_LOW 0x0028
+#define ECM_COUNTER_HIGH 0x002C
+
/* control and status */
#define ECM_STATUS 0x0080
#define ECM_LINK_MODE_OFF 0x01000000
@@ -88,8 +93,7 @@
/* tsnep register */
#define TSNEP_INFO 0x0100
-#define TSNEP_INFO_RX_ASSIGN 0x00010000
-#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_INFO_TX_TIME 0x00010000
#define TSNEP_CONTROL 0x0108
#define TSNEP_CONTROL_TX_RESET 0x00000001
#define TSNEP_CONTROL_TX_ENABLE 0x00000002
@@ -118,10 +122,6 @@
#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
-#define TSNEP_RX_ASSIGN 0x01A0
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
#define TSNEP_MAC_ADDRESS_LOW 0x0800
#define TSNEP_MAC_ADDRESS_HIGH 0x0804
#define TSNEP_RX_FILTER 0x0806
@@ -148,6 +148,14 @@
#define TSNEP_GCL_A 0x2000
#define TSNEP_GCL_B 0x2800
#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
/* tsnep gate control list operation */
struct tsnep_gcl_operation {
@@ -190,7 +198,8 @@ struct tsnep_tx_desc {
/* tsnep TX descriptor writeback */
struct tsnep_tx_desc_wb {
__le32 properties;
- __le32 reserved1[3];
+ __le32 reserved1;
+ __le64 counter;
__le64 timestamp;
__le32 dma_delay;
__le32 reserved2;
@@ -221,7 +230,7 @@ struct tsnep_rx_desc_wb {
/* tsnep RX inline meta */
struct tsnep_rx_inline {
- __le64 reserved;
+ __le64 counter;
__le64 timestamp;
};
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 904f3304727e..48fb391951dd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -27,10 +27,10 @@
#include <linux/phy.h>
#include <linux/iopoll.h>
-#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
- TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
-#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
-#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
@@ -60,22 +60,29 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
/* handle link interrupt */
- if ((active & ECM_INT_LINK) != 0) {
- if (adapter->netdev->phydev)
- phy_mac_interrupt(adapter->netdev->phydev);
- }
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- if (adapter->netdev) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
- }
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
}
return IRQ_HANDLED;
}
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(&queue->napi);
+
+ return IRQ_HANDLED;
+}
+
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
@@ -124,30 +131,51 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
return 0;
}
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
static void tsnep_phy_link_status_change(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- u32 mode;
- if (phydev->link) {
- switch (phydev->speed) {
- case SPEED_100:
- mode = ECM_LINK_MODE_100;
- break;
- case SPEED_1000:
- mode = ECM_LINK_MODE_1000;
- break;
- default:
- mode = ECM_LINK_MODE_OFF;
- break;
- }
- iowrite32(mode, adapter->addr + ECM_STATUS);
- }
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
phy_print_status(netdev->phydev);
}
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
@@ -241,14 +269,14 @@ alloc_failed:
return retval;
}
-static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
{
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
if (entry->skb) {
- entry->properties =
- skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
@@ -313,6 +341,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
struct tsnep_tx_entry *entry;
unsigned int len;
dma_addr_t dma;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -335,19 +364,22 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
}
- return 0;
+ return map_len;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
- entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+ entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
if (i == 0)
@@ -360,9 +392,12 @@ static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
+ map_len += entry->len;
entry->len = 0;
}
}
+
+ return map_len;
}
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
@@ -371,6 +406,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
+ int length;
int i;
int retval;
@@ -394,8 +430,8 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry->skb = skb;
retval = tsnep_tx_map(skb, tx, count);
- if (retval != 0) {
- tsnep_tx_unmap(tx, count);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -407,12 +443,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ length = retval;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < count; i++)
- tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
i == (count - 1));
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
@@ -428,9 +465,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev);
}
- tx->packets++;
- tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
-
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_OK;
@@ -442,6 +476,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
int budget = 128;
struct tsnep_tx_entry *entry;
int count;
+ int length;
spin_lock_irqsave(&tx->lock, flags);
@@ -464,14 +499,21 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, count);
+ length = tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
- u64 timestamp =
- __le64_to_cpu(entry->desc_wb->timestamp);
+ u64 timestamp;
+
+ if (skb_shinfo(entry->skb)->tx_flags &
+ SKBTX_HW_TSTAMP_USE_CYCLES)
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->counter);
+ else
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
memset(&hwtstamps, 0, sizeof(hwtstamps));
hwtstamps.hwtstamp = ns_to_ktime(timestamp);
@@ -484,6 +526,9 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
budget--;
} while (likely(budget));
@@ -498,7 +543,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
}
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_tx *tx)
+ int queue_index, struct tsnep_tx *tx)
{
dma_addr_t dma;
int retval;
@@ -506,6 +551,7 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(tx, 0, sizeof(*tx));
tx->adapter = adapter;
tx->addr = addr;
+ tx->queue_index = queue_index;
retval = tsnep_tx_ring_init(tx);
if (retval)
@@ -541,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
- if (dma_unmap_addr(entry, dma))
- dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
- dma_unmap_len(entry, len),
- DMA_FROM_DEVICE);
- if (entry->skb)
- dev_kfree_skb(entry->skb);
+ if (entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ entry->page = NULL;
}
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
@@ -561,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
}
}
-static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
- struct tsnep_rx_entry *entry)
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
{
- struct device *dmadev = rx->adapter->dmadev;
- struct sk_buff *skb;
- dma_addr_t dma;
-
- skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
- GFP_ATOMIC | GFP_DMA);
- if (!skb)
- return -ENOMEM;
-
- skb_reserve(skb, RX_SKB_RESERVE);
+ struct page *page;
- dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dmadev, dma)) {
- dev_kfree_skb(skb);
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- }
- entry->skb = skb;
- entry->len = RX_SKB_LENGTH;
- dma_unmap_addr_set(entry, dma, dma);
- entry->desc->rx = __cpu_to_le64(dma);
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
return 0;
}
@@ -594,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry;
int i, j;
int retval;
@@ -615,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
}
}
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_SKB_PAD;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (retval)
goto failed;
}
@@ -636,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
- /* RX_SKB_LENGTH is a multiple of 4 */
+ /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) {
@@ -659,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties);
}
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_SKB_PAD);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
int done = 0;
+ enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct page *page;
struct sk_buff *skb;
- size_t len;
- dma_addr_t dma;
int length;
bool enable = false;
int retval;
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+
while (likely(done < budget)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
@@ -684,42 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- skb = entry->skb;
- len = dma_unmap_len(entry, len);
- dma = dma_unmap_addr(entry, dma);
+ prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
+ length, dma_dir);
+ page = entry->page;
/* forward skb only if allocation is successful, otherwise
- * skb is reused and frame dropped
+ * page is reused and frame dropped
*/
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (!retval) {
- dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
-
- length = __le32_to_cpu(entry->desc_wb->properties) &
- TSNEP_DESC_LENGTH_MASK;
- skb_put(skb, length - ETH_FCS_LEN);
- if (rx->adapter->hwtstamp_config.rx_filter ==
- HWTSTAMP_FILTER_ALL) {
- struct skb_shared_hwtstamps *hwtstamps =
- skb_hwtstamps(skb);
- struct tsnep_rx_inline *rx_inline =
- (struct tsnep_rx_inline *)skb->data;
- u64 timestamp =
- __le64_to_cpu(rx_inline->timestamp);
-
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(timestamp);
- }
- skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, page);
+
+ rx->packets++;
+ rx->bytes += length -
+ TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
- rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
- if (skb->pkt_type == PACKET_MULTICAST)
- rx->multicast++;
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
- napi_gro_receive(napi, skb);
+ rx->dropped++;
+ }
done++;
} else {
rx->dropped++;
@@ -745,7 +822,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
}
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_rx *rx)
+ int queue_index, struct tsnep_rx *rx)
{
dma_addr_t dma;
int i;
@@ -754,6 +831,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(rx, 0, sizeof(*rx));
rx->adapter = adapter;
rx->addr = addr;
+ rx->queue_index = queue_index;
retval = tsnep_rx_ring_init(rx);
if (retval)
@@ -814,6 +892,56 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
return min(done, budget - 1);
}
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ sprintf(queue->name, "%s-txrx-%d", name,
+ queue->rx->queue_index);
+ else if (queue->tx)
+ sprintf(queue->name, "%s-tx-%d", name,
+ queue->tx->queue_index);
+ else
+ sprintf(queue->name, "%s-rx-%d", name,
+ queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -823,15 +951,11 @@ static int tsnep_netdev_open(struct net_device *netdev)
int rx_queue_index = 0;
int retval;
- retval = tsnep_phy_open(adapter);
- if (retval)
- return retval;
-
for (i = 0; i < adapter->num_queues; i++) {
adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
- retval = tsnep_tx_open(adapter, addr,
+ retval = tsnep_tx_open(adapter, addr, tx_queue_index,
adapter->queue[i].tx);
if (retval)
goto failed;
@@ -840,11 +964,20 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
retval = tsnep_rx_open(adapter, addr,
+ rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
+
+ retval = tsnep_request_irq(&adapter->queue[i], i == 0);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n",
+ adapter->queue[i].irq);
+ goto failed;
+ }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -856,9 +989,14 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (retval)
goto failed;
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
for (i = 0; i < adapter->num_queues; i++) {
netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll, 64);
+ tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -866,14 +1004,18 @@ static int tsnep_netdev_open(struct net_device *netdev)
return 0;
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
return retval;
}
@@ -882,20 +1024,23 @@ static int tsnep_netdev_close(struct net_device *netdev)
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
for (i = 0; i < adapter->num_queues; i++) {
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
netif_napi_del(&adapter->queue[i].napi);
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
-
return 0;
}
@@ -1010,15 +1155,47 @@ static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
+static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
+ u64 timestamp;
+
+ if (cycles)
+ timestamp = __le64_to_cpu(rx_inline->counter);
+ else
+ timestamp = __le64_to_cpu(rx_inline->timestamp);
+
+ return ns_to_ktime(timestamp);
+}
+
static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
.ndo_start_xmit = tsnep_netdev_xmit_frame,
.ndo_eth_ioctl = tsnep_netdev_ioctl,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
-
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
+ .ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
};
@@ -1091,8 +1268,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter)
retval = of_mdiobus_register(adapter->mdiobus, np);
out:
- if (np)
- of_node_put(np);
+ of_node_put(np);
return retval;
}
@@ -1119,6 +1295,52 @@ static int tsnep_phy_init(struct tsnep_adapter *adapter)
return 0;
}
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = irq_mask;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ }
+
+ return 0;
+}
+
static int tsnep_probe(struct platform_device *pdev)
{
struct tsnep_adapter *adapter;
@@ -1127,6 +1349,7 @@ static int tsnep_probe(struct platform_device *pdev)
u32 type;
int revision;
int version;
+ int queue_count;
int retval;
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -1148,41 +1371,39 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adapter->addr = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
- adapter->irq = platform_get_irq(pdev, 0);
netdev->mem_start = io->start;
netdev->mem_end = io->end;
- netdev->irq = adapter->irq;
type = ioread32(adapter->addr + ECM_TYPE);
revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
adapter->gate_control = type & ECM_GATE_CONTROL;
-
- adapter->num_tx_queues = TSNEP_QUEUES;
- adapter->num_rx_queues = TSNEP_QUEUES;
- adapter->num_queues = TSNEP_QUEUES;
- adapter->queue[0].tx = &adapter->tx[0];
- adapter->queue[0].rx = &adapter->rx[0];
- adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
tsnep_disable_irq(adapter, ECM_INT_ALL);
- retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
- 0, TSNEP, adapter);
- if (retval != 0) {
- dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
- adapter->irq);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
return retval;
}
- tsnep_enable_irq(adapter, ECM_INT_LINK);
retval = tsnep_mac_init(adapter);
if (retval)
- goto mac_init_failed;
+ return retval;
retval = tsnep_mdio_init(adapter);
if (retval)
@@ -1200,10 +1421,14 @@ static int tsnep_probe(struct platform_device *pdev)
if (retval)
goto tc_init_failed;
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
netdev->netdev_ops = &tsnep_netdev_ops;
netdev->ethtool_ops = &tsnep_ethtool_ops;
netdev->features = NETIF_F_SG;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -1220,6 +1445,8 @@ static int tsnep_probe(struct platform_device *pdev)
return 0;
register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
tsnep_tc_cleanup(adapter);
tc_init_failed:
tsnep_ptp_cleanup(adapter);
@@ -1228,8 +1455,6 @@ phy_init_failed:
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
mdio_init_failed:
-mac_init_failed:
- tsnep_disable_irq(adapter, ECM_INT_ALL);
return retval;
}
@@ -1239,6 +1464,8 @@ static int tsnep_remove(struct platform_device *pdev)
unregister_netdev(adapter->netdev);
+ tsnep_rxnfc_cleanup(adapter);
+
tsnep_tc_cleanup(adapter);
tsnep_ptp_cleanup(adapter);
@@ -1260,7 +1487,7 @@ MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
- .of_match_table = of_match_ptr(tsnep_of_match),
+ .of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
index eaad453d487e..54fbf0126815 100644
--- a/drivers/net/ethernet/engleder/tsnep_ptp.c
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -175,6 +175,33 @@ static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
+static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 counter;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_COUNTER_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ } while (high != high_before);
+ counter = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(counter);
+
+ return 0;
+}
+
int tsnep_ptp_init(struct tsnep_adapter *adapter)
{
int retval = 0;
@@ -192,6 +219,7 @@ int tsnep_ptp_init(struct tsnep_adapter *adapter)
adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+ adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64;
spin_lock_init(&adapter->ptp_lock);
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 437c5acfe222..95cbad198b4b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1224,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 323340826dab..f1eb660aaee2 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -608,13 +608,12 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
}
- netif_napi_add(ndev, &priv->napi, nps_enet_poll,
- NPS_ENET_NAPI_POLL_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, nps_enet_poll,
+ NPS_ENET_NAPI_POLL_WEIGHT);
/* Register the driver. Should be the last thing in probe */
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 3d1e9a302148..c699bd6bcbb9 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_FARADAY
bool "Faraday devices"
default y
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,24 +19,22 @@ if NET_VENDOR_FARADAY
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select MII
help
This driver supports the FTMAC100 10/100 Ethernet controller
- from Faraday. It is used on Faraday A320, Andes AG101 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A320 and some other ARM SoC's.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
- from Faraday. It is used on Faraday A369, Andes AG102 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A369 and some other ARM SoC's.
endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 691605c15265..a03879a27b04 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1174,8 +1063,8 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static void
@@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1491,7 +1506,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1686,10 +1701,14 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
if (!netdev->phydev)
return;
phy_disconnect(netdev->phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1749,6 +1768,19 @@ cleanup_clk:
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1820,11 +1852,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1844,6 +1871,26 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+ } else if (np && of_phy_is_fixed_link(np)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(np);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ goto err_phy_connect;
+ }
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+ of_phy_deregister_fixed_link(np);
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+
+ /* Display what we found */
+ phy_attached_info(phy);
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
@@ -1873,7 +1920,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
@@ -1896,6 +1943,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
@@ -1913,6 +1965,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..d95d78230828 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -807,8 +807,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1075,6 +1075,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1091,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,6 +1142,7 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..ed18450fd2cc 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..ce866ae3df03 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,16 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select PAGE_POOL
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index c78883c3a2c8..fc68a32ce2f7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,32 +1,7 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -222,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -241,10 +219,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
+ net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
@@ -271,7 +249,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -286,6 +264,9 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ mac_dev->net_dev = net_dev;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
@@ -313,10 +294,9 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -851,10 +831,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * callback.
*/
if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
cs_th = DPAA_CS_THRESHOLD_10G;
@@ -883,6 +863,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = mac_dev->net_dev;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -2911,6 +2916,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2918,6 +2924,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2930,9 +2937,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
@@ -2967,11 +2976,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phy_start(priv->mac_dev->phy_dev);
netif_tx_start_all_queues(net_dev);
@@ -3173,8 +3183,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index daf894a97050..35b8cea7f886 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,31 +1,6 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#ifndef __DPAA_H
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index ee62d25cac81..4fee74c024bd 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 409c1dc39430..889f89df9930 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -1,32 +1,6 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..769e936a263c 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -106,9 +80,9 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -489,11 +463,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d21ba70ef4a3..8d029addddad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -18,6 +18,7 @@
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
+#include <net/tso.h>
#include "dpaa2-eth.h"
@@ -34,6 +35,75 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL(dpaa2_ptp);
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -695,7 +765,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
struct ptp_tstamp origin_timestamp;
- struct dpni_single_step_cfg cfg;
u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
struct dpaa2_fas *fas;
@@ -749,17 +818,48 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
htonl(origin_timestamp.sec_lsb);
*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
- cfg.en = 1;
- cfg.ch_update = udp;
- cfg.offset = offset1;
- cfg.peer_delay = 0;
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
- if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
- &cfg))
- WARN_ONCE(1, "Failed to set single step register");
}
}
+static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -805,12 +905,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -846,6 +945,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
err = -ENOMEM;
goto dma_map_single_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
@@ -855,7 +955,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
return 0;
dma_map_single_failed:
- skb_free_frag(sgt_buf);
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
@@ -875,7 +975,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr;
@@ -884,18 +983,10 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
int err;
/* Prepare the HW SGT structure */
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
-
- if (sgt_cache->count == 0)
- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
- GFP_ATOMIC);
- else
- sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf))
return -ENOMEM;
-
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
@@ -923,6 +1014,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
goto sgt_map_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, sgt_addr);
@@ -934,10 +1026,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
sgt_map_failed:
dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
data_map_failed:
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(sgt_buf);
- else
- sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
return err;
}
@@ -978,6 +1067,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
@@ -1005,9 +1095,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd);
-
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ void *tso_hdr;
+ int i;
fd_addr = dpaa2_fd_get_addr(fd);
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
@@ -1039,6 +1130,29 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap and free the header */
+ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(tso_hdr);
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
} else {
skb = swa->single.skb;
@@ -1067,55 +1181,195 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (skb->cb[0] == TX_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(buffer_start, true);
- u64 ns;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
- mutex_unlock(&priv->onestep_tstamp_lock);
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
}
/* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single) {
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
- if (swa->type == DPAA2_ETH_SWA_SG) {
- skb_free_frag(buffer_start);
- } else {
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(buffer_start);
- else
- sgt_cache->buf[sgt_cache->count++] = buffer_start;
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
}
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
+ }
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
}
- /* Move on with skb release */
- napi_consume_skb(skb, in_napi);
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+
+ return err;
}
static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_fd fd;
- struct rtnl_link_stats64 *percpu_stats;
+ int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
struct dpaa2_eth_fq *fq;
struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
u16 queue_mapping;
- unsigned int needed_headroom;
- u32 fd_len;
+ void *swa = NULL;
u8 prio = 0;
int err, i;
- void *swa;
+ u32 fd_len;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
needed_headroom = dpaa2_eth_needed_headroom(skb);
@@ -1130,20 +1384,28 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
}
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
- if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
}
if (unlikely(err)) {
@@ -1151,11 +1413,12 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
goto err_build_fd;
}
- if (skb->cb[0])
- dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack.
* In case of a forwarded frame from another DPNI interface, we choose
@@ -1175,27 +1438,32 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
-
- fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
- if (err != -EBUSY)
- break;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
}
- percpu_extras->tx_portal_busy += i;
+ percpu_extras->tx_portal_busy += retries;
+
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- percpu_stats->tx_packets++;
+ percpu_stats->tx_packets += total_enqueued;
percpu_stats->tx_bytes += fd_len;
}
@@ -1392,8 +1660,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
@@ -1523,7 +1791,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
count = sgt_cache->count;
for (i = 0; i < count; i++)
- kfree(sgt_cache->buf[i]);
+ skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
@@ -1811,8 +2079,10 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_eth_is_type_phy(priv)) {
+ dpaa2_mac_start(priv->mac);
phylink_start(priv->mac->phylink);
+ }
return 0;
@@ -1887,6 +2157,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
if (dpaa2_eth_is_type_phy(priv)) {
phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
@@ -2207,6 +2478,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -4100,6 +4374,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
return err;
}
+ dpaa2_eth_detect_features(priv);
+
/* Capabilities listing */
supported |= IFF_LIVE_ADDR_CHANGE;
@@ -4115,7 +4391,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC;
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
if (priv->dpni_attrs.vlan_filter_entries)
@@ -4246,7 +4523,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4550,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
@@ -4288,8 +4565,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
@@ -4338,7 +4614,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4397,6 +4673,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
@@ -4484,6 +4767,8 @@ err_poll_thread:
err_alloc_rings:
err_csum:
err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
free_percpu(priv->sgt_cache);
err_alloc_sgt_cache:
free_percpu(priv->percpu_extras);
@@ -4523,12 +4808,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+
+ unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
- unregister_netdev(net_dev);
-
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);
@@ -4539,6 +4824,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_free_irqs(ls_dev);
dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
@@ -4547,6 +4833,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_free_dpbp(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index e54e70ebdd05..447718483ef4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -122,6 +122,7 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -142,6 +143,12 @@ struct dpaa2_eth_swa {
int dma_size;
struct xdp_frame *xdpf;
} xdp;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
};
};
@@ -354,6 +361,8 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Linear skbs sent as a S/G FD due to insufficient headroom */
@@ -493,8 +502,15 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -510,12 +526,15 @@ struct dpaa2_eth_priv {
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
-
+ unsigned long features;
struct dpni_attr dpni_attrs;
u16 dpni_ver_major;
u16 dpni_ver_minor;
u16 tx_data_offset;
-
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
@@ -577,6 +596,8 @@ struct dpaa2_eth_priv {
struct devlink_port devlink_port;
u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
};
struct dpaa2_eth_devlink_priv {
@@ -655,6 +676,13 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
#define DPNI_PAUSE_VER_MAJOR 7
#define DPNI_PAUSE_VER_MINOR 13
#define dpaa2_eth_has_pause_support(priv) \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 3fdbf87dccb1..eea7d7a07c00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -44,6 +44,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] tx converted sg frames",
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 623d113b6581..49ff85633783 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -11,6 +12,28 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
*if_mode = PHY_INTERFACE_MODE_NA;
@@ -38,6 +61,29 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
@@ -62,9 +108,6 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
return ERR_PTR(-EPROBE_DEFER);
}
- if (!parent)
- return NULL;
-
fwnode_for_each_child_node(parent, child) {
err = -EINVAL;
if (is_acpi_device_node(child))
@@ -100,6 +143,14 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+
+ return mac->pcs;
+}
+
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -117,6 +168,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
if (err)
netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
__func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
}
static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -172,6 +236,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -226,10 +291,66 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
}
}
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
struct phylink *phylink;
int err;
@@ -246,6 +367,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return -EINVAL;
mac->if_mode = err;
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
@@ -274,25 +409,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
MAC_10000FD;
- /* We support the current interface mode, and if we have a PCS
- * similar interface modes that do not require the PLLs to be
- * reconfigured.
- */
- __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
- if (mac->pcs) {
- switch (mac->if_mode) {
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- mac->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- mac->phylink_config.supported_interfaces);
- break;
-
- default:
- break;
- }
- }
+ dpaa2_mac_set_supported_interfaces(mac);
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
@@ -303,9 +420,6 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
- if (mac->pcs)
- phylink_set_pcs(mac->phylink, mac->pcs);
-
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
@@ -330,6 +444,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
}
int dpaa2_mac_open(struct dpaa2_mac *mac)
@@ -353,6 +469,14 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
@@ -425,7 +549,7 @@ void dpaa2_mac_get_strings(u8 *data)
int i;
for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 1331a8477fe4..a58cab188a99 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,8 @@ struct dpaa2_mac {
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -24,6 +26,8 @@ struct dpaa2_mac {
enum dpmac_link_type if_link_type;
struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
@@ -43,4 +47,8 @@ void dpaa2_mac_get_strings(u8 *data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..c8cb541572ff 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -129,7 +129,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -168,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
base = of_iomap(node, 0);
if (!base) {
err = -ENOMEM;
- goto err_close;
+ goto err_put;
}
err = fsl_mc_allocate_irqs(mc_dev);
@@ -177,8 +176,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
@@ -212,6 +210,8 @@ err_free_mc_irq:
fsl_mc_free_irqs(mc_dev);
err_unmap:
iounmap(base);
+err_put:
+ of_node_put(node);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..cacd454ac696 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 9b5512b4f15d..2b5909fa93cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -703,8 +703,10 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
dpaa2_switch_enable_ctrl_if_napi(ethsw);
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ dpaa2_mac_start(port_priv->mac);
phylink_start(port_priv->mac->phylink);
+ }
return 0;
}
@@ -717,6 +719,7 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
if (dpaa2_switch_port_is_type_phy(port_priv)) {
phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1554,8 +1557,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1581,7 +1583,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
@@ -3371,9 +3373,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- &ethsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index a24b20f76938..e9ac2ecef3be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -19,11 +19,15 @@
#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
__le64 counter;
};
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index d5997b654562..f440a4c3b70c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
return 0;
}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 8f7ceb731282..17488819ef68 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
enum dpmac_counter_id id, u64 *value);
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 9f80bdfeedec..828f538097af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -98,7 +98,7 @@
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
-#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -658,12 +658,16 @@ struct dpni_cmd_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_rsp_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_cmd_enable_vlan_filter {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6afada99fb6..6c3b36f20fb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -2136,6 +2136,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
PTP_CH_UPDATE) ? 1 : 0;
ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 7de0562bbf59..6fffd519aa00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1074,12 +1074,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
* @peer_delay: For peer-to-peer transparent clocks add this value to the
* correction field in addition to the transient time update.
* The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
*/
struct dpni_single_step_cfg {
u8 en;
u8 ch_update;
u16 offset;
u32 peer_delay;
+ u32 ptp_onestep_reg_base;
};
int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..e0e8dfd13793 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index d6930a797c6c..f8c06c3f9464 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -172,7 +172,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
- tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+ tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
@@ -792,9 +793,9 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
+ int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
struct net_device *ndev = tx_ring->ndev;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
bool do_twostep_tstamp;
@@ -821,6 +822,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
&tstamp);
do_twostep_tstamp = true;
}
+
+ if (tx_swbd->qbv_en &&
+ txbd->wb.status & ENETC_TXBD_STATS_WIN)
+ tx_win_drop++;
}
if (tx_swbd->is_xdp_tx)
@@ -873,6 +878,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
tx_ring->stats.packets += tx_frm_cnt;
tx_ring->stats.bytes += tx_byte_cnt;
+ tx_ring->stats.win_drop += tx_win_drop;
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
@@ -2084,7 +2090,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
else
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
@@ -2110,13 +2121,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@@ -2149,13 +2161,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1);
}
@@ -2163,13 +2176,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -2257,13 +2270,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -2426,10 +2440,11 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
u8 num_tc;
@@ -2446,7 +2461,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ enetc_set_bdr_prio(hw, tx_ring->index, 0);
}
return 0;
@@ -2465,7 +2480,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ enetc_set_bdr_prio(hw, tx_ring->index, i);
}
/* Reset the number of netdev queues based on the TC count */
@@ -2480,25 +2495,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2552,6 +2548,7 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned long packets = 0, bytes = 0;
+ unsigned long tx_dropped = 0;
int i;
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -2567,10 +2564,12 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
for (i = 0; i < priv->num_tx_rings; i++) {
packets += priv->tx_ring[i]->stats.packets;
bytes += priv->tx_ring[i]->stats.bytes;
+ tx_dropped += priv->tx_ring[i]->stats.win_drop;
}
stats->tx_packets = packets;
stats->tx_bytes = bytes;
+ stats->tx_dropped = tx_dropped;
return stats;
}
@@ -2591,52 +2590,29 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2648,11 +2624,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
@@ -2799,8 +2770,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v->rx_dim_en = true;
}
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb39e406b7fc..161930a65f61 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -18,6 +18,8 @@
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -34,6 +36,7 @@ struct enetc_tx_swbd {
u8 is_eof:1;
u8 is_xdp_tx:1;
u8 is_xdp_redirect:1;
+ u8 qbv_en:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -70,6 +73,7 @@ struct enetc_ring_stats {
unsigned int xdp_redirect_sg;
unsigned int recycles;
unsigned int recycle_failures;
+ unsigned int win_drop;
};
struct enetc_xdp_data {
@@ -389,11 +393,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -415,7 +417,47 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -425,22 +467,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -481,6 +525,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
@@ -500,4 +545,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 073e56dcca4e..af68dc46a795 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -166,70 +166,55 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
-#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
+ dma_addr_t dma;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
- cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RFSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
-
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- tmp, dma);
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
return err;
}
-#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
+ dma_addr_t dma;
int err, i;
- if (count < RSSE_ALIGN)
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RSSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
@@ -238,10 +223,6 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
- cbd.length = cpu_to_le16(count);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
@@ -251,7 +232,7 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index fa5b4f885b17..c8369e3752b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -125,68 +125,68 @@ static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -204,6 +204,7 @@ static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
"Tx ring %2d XDP frames",
"Tx ring %2d XDP drops",
+ "Tx window drop %2d frames",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
@@ -235,7 +236,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -257,7 +258,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
+ strscpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -279,6 +280,7 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->tx_ring[i]->stats.packets;
data[o++] = priv->tx_ring[i]->stats.xdp_tx;
data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ data[o++] = priv->tx_ring[i]->stats.win_drop;
}
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -299,6 +301,113 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+
+ *ranges = enetc_rmon_ranges;
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_mac_stats(hw, 0, mac_stats);
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+}
+
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -674,7 +783,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
@@ -761,6 +873,10 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1514e6a4a3ff..18ca1f42b1f7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -276,58 +276,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -543,6 +545,7 @@ enum enetc_txbd_flags {
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
};
+#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
@@ -881,7 +884,7 @@ struct sgcl_data {
u32 bth;
u32 ct;
u32 cte;
- struct sgce sgcl[0];
+ struct sgce sgcl[];
};
#define ENETC_CBDR_FMI_MR BIT(0)
@@ -942,13 +945,13 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
}
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 70e6d97b380f..1c8f5cc6dec4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -147,7 +147,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* return all Fs if nothing was there */
if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index 15f37c5b8dc1..dafb26f81f95 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -69,7 +69,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
@@ -88,7 +88,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
mdiobus_unregister(bus);
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed16a5ac9ad0..bdf94335ee99 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -516,15 +516,34 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_reset_ptcmsdur(hw);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -709,6 +728,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +748,30 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +788,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
@@ -777,9 +826,6 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->priv_flags |= IFF_UNICAST_FLT;
- if (si->hw_features & ENETC_SI_F_QBV)
- priv->active_offloads |= ENETC_F_QBV;
-
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
ndev->features |= NETIF_F_HW_TC;
@@ -934,18 +980,21 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- struct enetc_ndev_priv *priv;
enetc_mac_config(&pf->si->hw, state->interface);
-
- priv = netdev_priv(pf->si->ndev);
- if (pf->pcs)
- phylink_set_pcs(priv->phylink, pf->pcs);
}
static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
@@ -990,7 +1039,8 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
int idx;
priv = netdev_priv(pf->si->ndev);
- if (priv->active_offloads & ENETC_F_QBV)
+
+ if (pf->si->hw_features & ENETC_SI_F_QBV)
enetc_sched_speed_set(priv, speed);
if (!phylink_autoneg_inband(mode) &&
@@ -1062,6 +1112,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
@@ -1103,8 +1154,7 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
{
- if (priv->phylink)
- phylink_destroy(priv->phylink);
+ phylink_destroy(priv->phylink);
}
/* Initialize the entire shared memory for the flow steering entries
@@ -1271,16 +1321,20 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- if (!of_get_phy_mode(node, &pf->if_mode)) {
- err = enetc_mdiobus_create(pf, node);
- if (err)
- goto err_mdiobus_create;
-
- err = enetc_phylink_create(priv, node);
- if (err)
- goto err_phylink_create;
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to read PHY mode\n");
+ goto err_phy_mode;
}
+ err = enetc_mdiobus_create(pf, node);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv, node);
+ if (err)
+ goto err_phylink_create;
+
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
@@ -1292,6 +1346,7 @@ err_reg_netdev:
err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
+err_phy_mode:
enetc_free_msix(priv);
err_config_si:
err_alloc_msix:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 3555c12edb45..a842e1999122 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -11,14 +11,14 @@
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,36 +39,38 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
- struct gce *gce;
dma_addr_t dma;
+ struct gce *gce;
u16 data_size;
u16 gcl_len;
+ void *tmp;
u32 tge;
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_PTGCR);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
+
+ priv->active_offloads &= ~ENETC_F_QBV;
+
return 0;
}
@@ -82,8 +84,9 @@ static int enetc_setup_taprio(struct net_device *ndev,
gcl_config = &cbd.gcl_conf;
data_size = struct_size(gcl_data, entry, gcl_len);
- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!gcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
return -ENOMEM;
gce = (struct gce *)(gcl_data + 1);
@@ -107,41 +110,33 @@ static int enetc_setup_taprio(struct net_device *ndev,
temp_gce->period = cpu_to_le32(temp_entry->interval);
}
- cbd.length = cpu_to_le16(data_size);
cbd.status_flags = 0;
- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
- data_size, DMA_TO_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(gcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
- kfree(gcl_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- return err;
+ if (err)
+ return err;
+
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int err;
int i;
@@ -151,16 +146,14 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
return err;
@@ -182,7 +175,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -203,15 +196,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -228,13 +221,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -243,7 +236,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -263,8 +256,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -284,10 +277,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -297,6 +290,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -307,17 +301,12 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
- /* Do not support TXSTART and TX CSUM offload simutaniously */
- if (ndev->features & NETIF_F_CSUM_MASK)
- return -EBUSY;
-
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -450,6 +439,7 @@ static struct actions_fwd enetc_act_fwd[] = {
};
static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
.psfp_sfi_bitmap = NULL,
};
@@ -463,8 +453,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_cbd cbd = {.cmd = 0};
struct streamid_data *si_data;
struct streamid_conf *si_conf;
- u16 data_size;
dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
int port;
int err;
@@ -485,21 +476,11 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct streamid_data);
- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!si_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
- dma = dma_map_single(&priv->si->pdev->dev, si_data,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto out;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -520,11 +501,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
- memset(&cbd, 0, sizeof(cbd));
-
- cbd.index = cpu_to_le16((u16)sid->index);
- cbd.cmd = 0;
- cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
cbd.status_flags = 0;
si_conf->en = 0x80;
@@ -537,11 +513,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
memset(si_data, 0, data_size);
- cbd.length = cpu_to_le16(data_size);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
* any VID is considered a match. VIDM setting is only used
@@ -561,10 +532,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
out:
- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
-
- kfree(si_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -635,6 +603,7 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
struct sfi_counter_data *data_buf;
dma_addr_t dma;
u16 data_size;
+ void *tmp;
int err;
cbd.index = cpu_to_le16((u16)index);
@@ -643,21 +612,11 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct sfi_counter_data);
- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto exit;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
- cbd.length = cpu_to_le16(data_size);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
err = enetc_send_cmd(priv->si, &cbd);
if (err)
@@ -684,7 +643,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
data_buf->flow_meter_dropl;
exit:
- kfree(data_buf);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
return err;
}
@@ -726,6 +686,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
dma_addr_t dma;
u16 data_size;
int err, i;
+ void *tmp;
u64 now;
cbd.index = cpu_to_le16(sgi->index);
@@ -772,24 +733,10 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
-
- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!sgcl_data)
- return -ENOMEM;
-
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev,
- sgcl_data, data_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(sgcl_data);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
sgce = &sgcl_data->sgcl[0];
@@ -844,8 +791,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
exit:
- kfree(sgcl_data);
-
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -1074,6 +1020,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
return NULL;
}
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
struct flow_cls_offload *f)
{
@@ -1230,11 +1216,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
- if (entryp->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- err = -EOPNOTSUPP;
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
goto free_sfi;
- }
+
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
@@ -1529,6 +1514,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
@@ -1590,3 +1598,23 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..33f84a30e167 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,8 +16,12 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -343,8 +347,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
+
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -498,6 +505,9 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -511,6 +521,12 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -526,7 +542,14 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -579,6 +602,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -608,6 +632,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
@@ -634,6 +659,8 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct imx_sc_ipc *ipc_handle;
+
u64 ethtool_stats[];
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 796133de527e..f623c12eaf95 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -66,6 +66,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
@@ -111,7 +113,8 @@ static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -155,6 +158,13 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_DELAYED_CLKS_SUPPORT,
};
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -188,6 +198,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx8qm-fec",
.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
}, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
+ }, {
/* sentinel */
}
};
@@ -203,6 +216,7 @@ enum imx_fec_type {
IMX6UL_FEC,
IMX8MQ_FEC,
IMX8QM_FEC,
+ S32V234_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -215,6 +229,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -409,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -656,7 +713,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@@ -691,7 +748,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
@@ -718,7 +775,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -1167,6 +1224,34 @@ fec_restart(struct net_device *ndev)
}
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
+}
+
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
{
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -1182,6 +1267,8 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
@@ -1407,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int
+static int __maybe_unused
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1427,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
return 0;
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static bool __maybe_unused
+fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1453,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true;
}
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
+{
+ struct page *new_page;
+ dma_addr_t phys_addr;
+
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
+
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -1465,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
@@ -1474,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct page *page;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1527,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- }
-
- prefetch(skb->data - NET_IP_ALIGN);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
data = skb->data;
- if (!is_copybreak && need_swap)
+ if (need_swap)
swap_buffer(data, pkt_len);
#if !defined(CONFIG_M5272)
@@ -1606,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -2104,13 +2190,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2294,9 +2380,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2346,6 +2432,31 @@ static u32 fec_enet_register_offset[] = {
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
#else
static __u32 fec_enet_register_version = 1;
static u32 fec_enet_register_offset[] = {
@@ -2370,7 +2481,24 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+ u32 *reg_list;
+ u32 reg_cnt;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ reg_list = fec_enet_register_offset;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+ } else {
+ reg_list = fec_enet_register_offset_6ul;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+ }
+#else
+ /* coldfire */
+ static u32 *reg_list = fec_enet_register_offset;
+ static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+#endif
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2379,8 +2507,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
- for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i];
+ for (i = 0; i < reg_cnt; i++) {
+ off = reg_list[i];
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
@@ -2797,7 +2925,7 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
int ret = 0;
if (enable) {
- ret = phy_init_eee(ndev->phydev, 0);
+ ret = phy_init_eee(ndev->phydev, false);
if (ret)
return ret;
@@ -2959,26 +3087,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3068,24 +3189,31 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3210,6 +3338,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3251,6 +3382,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
@@ -3559,14 +3693,14 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS);
+ netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -3731,7 +3865,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
@@ -3817,6 +3951,10 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@ -3866,17 +4004,21 @@ fec_probe(struct platform_device *pdev)
fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
/* enet_out is optional, depends on board */
- fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
- if (IS_ERR(fep->clk_enet_out))
- fep->clk_enet_out = NULL;
+ fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out)) {
+ ret = PTR_ERR(fep->clk_enet_out);
+ goto failed_clk;
+ }
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */
- fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
- if (IS_ERR(fep->clk_ref))
- fep->clk_ref = NULL;
+ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref)) {
+ ret = PTR_ERR(fep->clk_ref);
+ goto failed_clk;
+ }
fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
/* clk_2x_txclk is optional, depends on board */
@@ -4010,6 +4152,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4054,6 +4197,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4078,6 +4222,15 @@ static int __maybe_unused fec_suspend(struct device *dev)
}
/* It's safe to disable clocks since interrupts are masked */
fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4108,6 +4261,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..a7f4c3c29f3e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,7 +29,9 @@
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -99,13 +101,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +895,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
@@ -920,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index b5497e308302..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -15,6 +15,7 @@
#include <linux/phy.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
@@ -99,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index af99017a5453..cffd9ad499dd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -101,7 +101,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
u32 val, tempval;
struct timespec64 ts;
u64 ns;
- val = 0;
if (fep->pps_enable == enable)
return 0;
@@ -136,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -583,7 +578,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 8f0db61cb1f6..9d85fb136e34 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..2ea575a46675 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..6617932fd3fd 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -327,7 +301,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -840,73 +814,45 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->maximum_frame = new_val;
-
- return 0;
-}
-
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
-
- return 0;
-}
-
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) &
- ~TCTRL_GTS, &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) &
- ~RCTRL_GRS, &regs->rctrl);
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, &regs->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -916,58 +862,42 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
/* Enable */
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
iowrite32be(tmp, &regs->maccfg1);
/* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
/* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
iowrite32be(tmp, &regs->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
@@ -989,26 +919,20 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
if (en)
@@ -1017,25 +941,18 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
-
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +960,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1114,7 +1032,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1133,7 +1051,7 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
@@ -1158,7 +1076,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1229,7 +1148,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -1258,21 +1177,15 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg2);
@@ -1293,12 +1206,12 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
tmp &= ~DTSEC_ECNTRL_R100M;
iowrite32be(tmp, &regs->ecntrl);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
+static int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
@@ -1316,20 +1229,31 @@ int dtsec_restart_autoneg(struct fman_mac *dtsec)
return 0;
}
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+static void adjust_link_dtsec(struct mac_device *mac_dev)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
- *mac_version = ioread32be(&regs->tsec_id);
+ return;
+ }
- return 0;
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
@@ -1382,7 +1306,7 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
@@ -1476,7 +1400,7 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
@@ -1487,13 +1411,11 @@ int dtsec_free(struct fman_mac *dtsec)
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1432,10 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1452,87 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- if (!params->internal_phy_node) {
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_dtsec;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node) {
pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ dtsec->tbiphy = of_phy_find_device(phy_node);
if (!dtsec->tbiphy) {
pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
-
put_device(&dtsec->tbiphy->mdio.dev);
- /* Save FMan revision */
- fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
- return dtsec;
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
-err_dtsec:
- kfree(dtsec);
- return NULL;
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..65887a3160d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include <linux/if_ether.h>
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,30 +159,23 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
/* Note that the speed should indicate the maximum rate that
* this MAC should support rather than the actual speed;
*/
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
/* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
@@ -200,8 +184,6 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..32d26cf17843 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/io.h>
@@ -337,7 +311,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -712,7 +686,7 @@ static bool is_init_done(struct memac_cfg *memac_drv_params)
return false;
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_enable(struct fman_mac *memac)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -721,36 +695,26 @@ int memac_enable(struct fman_mac *memac, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
-
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static void memac_disable(struct fman_mac *memac)
+
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -769,7 +733,7 @@ int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static int memac_adjust_link(struct fman_mac *memac, u16 speed)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -809,39 +773,26 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->max_frame_length = new_val;
-
- return 0;
-}
-
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
-}
-
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
+static void adjust_link_memac(struct mac_device *mac_dev)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- memac->memac_drv_param->fixed_link = fixed_link;
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
- return 0;
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -878,7 +829,7 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
return 0;
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -897,7 +848,8 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
@@ -907,7 +859,8 @@ int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_add
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
@@ -940,7 +893,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
@@ -963,12 +916,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -1001,8 +955,8 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
@@ -1024,13 +978,13 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
+ struct fixed_phy_status *fixed_link = NULL;
int err;
u32 reg32 = 0;
@@ -1141,7 +1095,7 @@ int memac_init(struct fman_mac *memac)
return 0;
}
-int memac_free(struct fman_mac *memac)
+static int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
@@ -1154,13 +1108,12 @@ int memac_free(struct fman_mac *memac)
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1131,121 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
+ memac->regs = mac_dev->vaddr;
memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->phy_if = mac_dev->phy_if;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ return memac;
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *phy_node;
+ struct fixed_phy_status *fixed_link;
+ struct fman_mac *memac;
+
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_memac;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ if (params->max_speed == SPEED_10000)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
+ phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
+ if (!phy_node) {
pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ memac->pcsphy = of_phy_find_device(phy_node);
if (!memac->pcsphy) {
pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
}
- return memac;
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_fm_mac_free;
+
+ fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
+ if (!fixed_link) {
+ err = -ENOMEM;
+ goto _return_fm_mac_free;
+ }
+
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
+ if (!phy) {
+ err = -EINVAL;
+ of_node_put(mac_dev->phy_node);
+ goto _return_fixed_link_free;
+ }
+
+ fixed_link->link = phy->link;
+ fixed_link->speed = phy->speed;
+ fixed_link->duplex = phy->duplex;
+ fixed_link->pause = phy->pause;
+ fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
+ memac->memac_drv_param->fixed_link = fixed_link;
+ }
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fixed_link_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fixed_link_free:
+ kfree(fixed_link);
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..f557d68e5b76 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 4c9d05c45c03..ab90fe2bee5e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..5a4be54ad459 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -206,7 +180,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -419,7 +393,7 @@ static bool is_init_done(struct tgec_cfg *cfg)
return false;
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_enable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -428,34 +402,25 @@ int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static void tgec_disable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(tgec->cfg));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -473,18 +438,9 @@ int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
-{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
-}
-
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
@@ -496,7 +452,7 @@ int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
return 0;
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -514,7 +470,8 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
@@ -525,7 +482,8 @@ int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_add
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
@@ -562,7 +520,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
@@ -585,7 +543,7 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -605,7 +563,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -642,20 +601,15 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+static void tgec_adjust_link(struct mac_device *mac_dev)
{
- struct tgec_regs __iomem *regs = tgec->regs;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
- *mac_version = ioread32be(&regs->tgec_id);
-
- return 0;
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
}
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
@@ -681,7 +635,7 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
@@ -764,7 +718,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +728,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,8 +751,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
@@ -819,7 +772,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +780,52 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = tgec_adjust_link;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 39ae965cd4f6..13e67f2864be 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -54,20 +28,12 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,222 +41,21 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static int set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!params->base_addr)
- return -ENOMEM;
-
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-
- return 0;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = tgec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = dtsec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
{
struct mac_priv_s *priv;
struct mac_address *old_addr, *tmp;
@@ -424,109 +189,6 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
}
EXPORT_SYMBOL(fman_get_pause_cfg);
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
#define DTSEC_SUPPORTED \
(SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
@@ -577,7 +239,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -601,9 +263,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -611,50 +273,32 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
+ mac_dev->dev = dev;
INIT_LIST_HEAD(&priv->mc_addr_list);
@@ -663,8 +307,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -694,42 +337,34 @@ static int mac_probe(struct platform_device *_of_dev)
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+ if (!mac_dev->res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+ mac_dev->res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
- goto _return_of_get_parent;
+ return -EIO;
}
- if (!of_device_is_available(mac_node)) {
- err = -ENODEV;
- goto _return_of_get_parent;
- }
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
priv->cell_index = (u8)val;
@@ -743,15 +378,13 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = nph;
- goto _return_of_get_parent;
+ return nph;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -760,8 +393,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_node_put;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -793,7 +425,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->phy_if = phy_if;
priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
+ params.max_speed = priv->speed;
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
@@ -801,7 +433,7 @@ static int mac_probe(struct platform_device *_of_dev)
SUPPORTED_100baseT_Half);
/* Gigabit support (no half-duplex) */
- if (priv->max_speed == 1000)
+ if (params.max_speed == 1000)
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
@@ -810,42 +442,18 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the rest of the PHY information */
mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_of_get_parent;
-
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
- goto _return_of_get_parent;
- }
-
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
- priv->fixed_link->link = phy->link;
- priv->fixed_link->speed = phy->speed;
- priv->fixed_link->duplex = phy->duplex;
- priv->fixed_link->pause = phy->pause;
- priv->fixed_link->asym_pause = phy->asym_pause;
+ params.basex_if = false;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
- put_device(&phy->mdio.dev);
- }
-
- err = mac_dev->init(mac_dev);
+ err = init(mac_dev, mac_node, &params);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
+ return err;
}
/* pause frame autonegotiation enabled */
@@ -872,22 +480,28 @@ static int mac_probe(struct platform_device *_of_dev)
priv->eth_dev = NULL;
}
- goto _return;
+ return err;
_return_of_node_put:
of_node_put(dev_node);
-_return_of_get_parent:
- kfree(priv->fixed_link);
-_return:
return err;
}
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
static struct platform_driver mac_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mac_match,
},
.probe = mac_probe,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index daa285a9b8b2..13b69ca5f00c 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MAC_H
@@ -45,6 +19,8 @@ struct fman_mac;
struct mac_priv_s;
struct mac_device {
+ void __iomem *vaddr;
+ struct device *dev;
struct resource *res;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
@@ -52,6 +28,7 @@ struct mac_device {
struct phy_device *phy_dev;
phy_interface_t phy_if;
struct device_node *phy_node;
+ struct net_device *net_dev;
bool autoneg_pause;
bool rx_pause_req;
@@ -61,9 +38,8 @@ struct mac_device {
bool promisc;
bool allmulti;
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
@@ -81,6 +57,8 @@ struct mac_device {
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
};
@@ -97,5 +75,6 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index bacf25318f87..8844a9a04fcf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -791,7 +791,7 @@ static int fs_enet_close(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int fs_get_regs_len(struct net_device *dev)
@@ -883,9 +883,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-extern int fs_mii_connect(struct net_device *dev);
-extern void fs_mii_disconnect(struct net_device *dev);
-
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
@@ -1020,7 +1017,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
INIT_WORK(&fep->timeout_work, fs_timeout_work);
- netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
+ netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi,
+ fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ff2634bee2f..cb419aef8d1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -201,7 +201,7 @@ void fs_enet_platform_cleanup(void);
/* access macros */
#if defined(CONFIG_CPM1)
-/* for a a CPM1 __raw_xxx's are sufficient */
+/* for a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 206b7a35eaf5..b2def295523a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1944,6 +1944,7 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, bytes_sent);
gfar_wmb();
@@ -3232,9 +3233,9 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx_sq, 2);
+ gfar_poll_rx_sq);
+ netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index ca5e14f908fe..68b59d3202e3 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -52,9 +52,6 @@ struct ethtool_rx_list {
unsigned int count;
};
-/* The maximum number of packets to be handled in one call of gfar_poll */
-#define GFAR_DEV_WEIGHT 64
-
/* Length for FCB */
#define GMAC_FCB_LEN 8
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ff756265d58f..b2b0d3c26fcc 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -163,7 +163,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
}
/* Return the length of the register structure */
@@ -1457,6 +1457,7 @@ static int gfar_get_ts_info(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
@@ -1464,6 +1465,7 @@ static int gfar_get_ts_info(struct net_device *dev,
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
}
@@ -1473,7 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 823221c912ab..7a4cb4f07c32 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3712,7 +3712,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
dev->mtu = 1500;
dev->max_mtu = 1518;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 69b2b98b1525..601beb93d3b3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -337,8 +337,8 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 5b8b9bcf41a2..d7d39a58cd80 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mdio.h>
@@ -36,9 +37,10 @@ struct tgec_mdio_controller {
} __packed;
#define MDIO_STAT_ENC BIT(6)
-#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_CLKDIV(x) (((x) & 0x1ff) << 7)
#define MDIO_STAT_BSY BIT(0)
#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_STAT_PRE_DIS BIT(5)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS BIT(10)
@@ -50,7 +52,10 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
+ struct clk *enet_clk;
+ u32 mdc_freq;
bool is_little_endian;
+ bool has_a009885;
bool has_a011043;
};
@@ -186,10 +191,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ unsigned long flags;
uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
- uint16_t value;
int ret;
bool endian = priv->is_little_endian;
@@ -221,26 +226,80 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
/* Initiate the read */
xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
- return ret;
+ goto irq_restore;
/* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
- return 0xffff;
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
- value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
- dev_dbg(&bus->dev, "read %04x\n", value);
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
- return value;
+ return ret;
+}
+
+static int xgmac_mdio_set_mdc_freq(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat, div;
+
+ if (device_property_read_u32(dev, "clock-frequency", &priv->mdc_freq))
+ return 0;
+
+ priv->enet_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->enet_clk)) {
+ dev_err(dev, "Input clock unknown, not changing MDC frequency");
+ return PTR_ERR(priv->enet_clk);
+ }
+
+ div = ((clk_get_rate(priv->enet_clk) / priv->mdc_freq) - 1) / 2;
+ if (div < 5 || div > 0x1ff) {
+ dev_err(dev, "Requested MDC frequency is out of range, ignoring");
+ return -EINVAL;
+ }
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat &= ~MDIO_STAT_CLKDIV(0x1ff);
+ mdio_stat |= MDIO_STAT_CLKDIV(div);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+ return 0;
+}
+
+static void xgmac_mdio_set_suppress_preamble(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat;
+
+ if (!device_property_read_bool(dev, "suppress-preamble"))
+ return;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat |= MDIO_STAT_PRE_DIS;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
}
static int xgmac_mdio_probe(struct platform_device *pdev)
@@ -262,7 +321,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -EINVAL;
}
- bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct mdio_fsl_priv));
if (!bus)
return -ENOMEM;
@@ -273,13 +332,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
- /* Set the PHY base address */
priv = bus->priv;
- priv->mdio_base = ioremap(res->start, resource_size(res));
- if (!priv->mdio_base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ priv->mdio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!priv->mdio_base)
+ return -ENOMEM;
/* For both ACPI and DT cases, endianness of MDIO controller
* needs to be specified using "little-endian" property.
@@ -287,10 +344,18 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
- fwnode = pdev->dev.fwnode;
+ xgmac_mdio_set_suppress_preamble(bus);
+
+ ret = xgmac_mdio_set_mdc_freq(bus);
+ if (ret)
+ return ret;
+
+ fwnode = dev_fwnode(&pdev->dev);
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
else if (is_acpi_node(fwnode))
@@ -299,31 +364,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return ret;
}
platform_set_drvdata(pdev, bus);
return 0;
-
-err_registration:
- iounmap(priv->mdio_base);
-
-err_ioremap:
- mdiobus_free(bus);
-
- return ret;
-}
-
-static int xgmac_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
-
- mdiobus_unregister(bus);
- iounmap(bus->priv);
- mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id xgmac_mdio_match[] = {
@@ -350,7 +396,6 @@ static struct platform_driver xgmac_mdio_driver = {
.acpi_match_table = xgmac_acpi_match,
},
.probe = xgmac_mdio_probe,
- .remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0d733e9a7c6..4859493471db 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1046,8 +1046,8 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/fungible/Kconfig b/drivers/net/ethernet/fungible/Kconfig
new file mode 100644
index 000000000000..1ecedecc0f6c
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible network driver configuration
+#
+
+config NET_VENDOR_FUNGIBLE
+ bool "Fungible devices"
+ default y
+ help
+ If you have a Fungible network device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Fungible cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_FUNGIBLE
+
+config FUN_CORE
+ tristate
+ select SBITMAP
+ help
+ A service module offering basic common services to Fungible
+ device drivers.
+
+source "drivers/net/ethernet/fungible/funeth/Kconfig"
+
+endif # NET_VENDOR_FUNGIBLE
diff --git a/drivers/net/ethernet/fungible/Makefile b/drivers/net/ethernet/fungible/Makefile
new file mode 100644
index 000000000000..df759f1585a1
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+#
+# Makefile for the Fungible network device drivers.
+#
+
+obj-$(CONFIG_FUN_CORE) += funcore/
+obj-$(CONFIG_FUN_ETH) += funeth/
diff --git a/drivers/net/ethernet/fungible/funcore/Makefile b/drivers/net/ethernet/fungible/funcore/Makefile
new file mode 100644
index 000000000000..bc16b264b53e
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+obj-$(CONFIG_FUN_CORE) += funcore.o
+
+funcore-y := fun_dev.o fun_queue.o
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
new file mode 100644
index 000000000000..fb5120d90f26
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+
+#include "fun_queue.h"
+#include "fun_dev.h"
+
+#define FUN_ADMIN_CMD_TO_MS 3000
+
+enum {
+ AQA_ASQS_SHIFT = 0,
+ AQA_ACQS_SHIFT = 16,
+ AQA_MIN_QUEUE_SIZE = 2,
+ AQA_MAX_QUEUE_SIZE = 4096
+};
+
+/* context for admin commands */
+struct fun_cmd_ctx {
+ fun_admin_callback_t cb; /* callback to invoke on completion */
+ void *cb_data; /* user data provided to callback */
+ int cpu; /* CPU where the cmd's tag was allocated */
+};
+
+/* Context for synchronous admin commands. */
+struct fun_sync_cmd_ctx {
+ struct completion compl;
+ u8 *rsp_buf; /* caller provided response buffer */
+ unsigned int rsp_len; /* response buffer size */
+ u8 rsp_status; /* command response status */
+};
+
+/* Wait for the CSTS.RDY bit to match @enabled. */
+static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
+{
+ unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+ unsigned long deadline;
+
+ deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
+
+ for (;;) {
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+
+ if ((csts & NVME_CSTS_RDY) == bit)
+ return 0;
+
+ if (time_is_before_jiffies(deadline))
+ break;
+
+ msleep(100);
+ }
+
+ dev_err(fdev->dev,
+ "Timed out waiting for device to indicate RDY %u; aborting %s\n",
+ enabled, enabled ? "initialization" : "reset");
+ return -ETIMEDOUT;
+}
+
+/* Check CSTS and return an error if it is unreadable or has unexpected
+ * RDY value.
+ */
+static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+ u32 actual_rdy = csts & NVME_CSTS_RDY;
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+ if (actual_rdy != expected_rdy) {
+ dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Check that CSTS RDY has the expected value. Then write a new value to the CC
+ * register and wait for CSTS RDY to match the new CC ENABLE state.
+ */
+static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
+{
+ int rc = fun_check_csts_rdy(fdev, initial_rdy);
+
+ if (rc)
+ return rc;
+ writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
+ return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
+}
+
+static int fun_disable_ctrl(struct fun_dev *fdev)
+{
+ fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+ return fun_update_cc_enable(fdev, 1);
+}
+
+static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
+ u32 admin_sqesz_log2)
+{
+ fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
+ (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
+ ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
+ NVME_CC_ENABLE;
+
+ return fun_update_cc_enable(fdev, 0);
+}
+
+static int fun_map_bars(struct fun_dev *fdev, const char *name)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ int err;
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't get PCI memory resources, err %d\n", err);
+ return err;
+ }
+
+ fdev->bar = pci_ioremap_bar(pdev, 0);
+ if (!fdev->bar) {
+ dev_err(&pdev->dev, "Couldn't map BAR 0\n");
+ pci_release_mem_regions(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void fun_unmap_bars(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ if (fdev->bar) {
+ iounmap(fdev->bar);
+ fdev->bar = NULL;
+ pci_release_mem_regions(pdev);
+ }
+}
+
+static int fun_set_dma_masks(struct device *dev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err)
+ dev_err(dev, "DMA mask configuration failed, err %d\n", err);
+ return err;
+}
+
+static irqreturn_t fun_admin_irq(int irq, void *data)
+{
+ struct fun_queue *funq = data;
+
+ return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
+ void *entry, const struct fun_cqe_info *info)
+{
+ const struct fun_admin_rsp_common *rsp_common = entry;
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_cmd_ctx *cmd_ctx;
+ int cpu;
+ u16 cid;
+
+ if (info->sqhd == cpu_to_be16(0xffff)) {
+ dev_dbg(fdev->dev, "adminq event");
+ if (fdev->adminq_cb)
+ fdev->adminq_cb(fdev, entry);
+ return;
+ }
+
+ cid = be16_to_cpu(rsp_common->cid);
+ dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
+ rsp_common->op, rsp_common->ret);
+
+ cmd_ctx = &fdev->cmd_ctx[cid];
+ if (cmd_ctx->cpu < 0) {
+ dev_err(fdev->dev,
+ "admin CQE with CID=%u, op=%u does not match a pending command\n",
+ cid, rsp_common->op);
+ return;
+ }
+
+ if (cmd_ctx->cb)
+ cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
+
+ cpu = cmd_ctx->cpu;
+ cmd_ctx->cpu = -1;
+ sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
+}
+
+static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
+{
+ unsigned int i;
+
+ fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
+ if (!fdev->cmd_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < ntags; i++)
+ fdev->cmd_ctx[i].cpu = -1;
+
+ return 0;
+}
+
+/* Allocate and enable an admin queue and assign it the first IRQ vector. */
+static int fun_enable_admin_queue(struct fun_dev *fdev,
+ const struct fun_dev_params *areq)
+{
+ struct fun_queue_alloc_req qreq = {
+ .cqe_size_log2 = areq->cqe_size_log2,
+ .sqe_size_log2 = areq->sqe_size_log2,
+ .cq_depth = areq->cq_depth,
+ .sq_depth = areq->sq_depth,
+ .rq_depth = areq->rq_depth,
+ };
+ unsigned int ntags = areq->sq_depth - 1;
+ struct fun_queue *funq;
+ int rc;
+
+ if (fdev->admin_q)
+ return -EEXIST;
+
+ if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
+ areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->cq_depth > AQA_MAX_QUEUE_SIZE)
+ return -EINVAL;
+
+ fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
+ if (!fdev->admin_q)
+ return -ENOMEM;
+
+ rc = fun_init_cmd_ctx(fdev, ntags);
+ if (rc)
+ goto free_q;
+
+ rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
+ GFP_KERNEL, dev_to_node(fdev->dev));
+ if (rc)
+ goto free_cmd_ctx;
+
+ funq = fdev->admin_q;
+ funq->cq_vector = 0;
+ rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
+ if (rc)
+ goto free_sbq;
+
+ fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
+ fdev->adminq_cb = areq->event_cb;
+
+ writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
+ (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
+ fdev->bar + NVME_REG_AQA);
+
+ writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
+ writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
+
+ rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
+ if (rc)
+ goto free_irq;
+
+ if (areq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto disable_ctrl;
+
+ funq_rq_post(funq);
+ }
+
+ return 0;
+
+disable_ctrl:
+ fun_disable_ctrl(fdev);
+free_irq:
+ fun_free_irq(funq);
+free_sbq:
+ sbitmap_queue_free(&fdev->admin_sbq);
+free_cmd_ctx:
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+free_q:
+ fun_free_queue(fdev->admin_q);
+ fdev->admin_q = NULL;
+ return rc;
+}
+
+static void fun_disable_admin_queue(struct fun_dev *fdev)
+{
+ struct fun_queue *admq = fdev->admin_q;
+
+ if (!admq)
+ return;
+
+ fun_disable_ctrl(fdev);
+
+ fun_free_irq(admq);
+ __fun_process_cq(admq, 0);
+
+ sbitmap_queue_free(&fdev->admin_sbq);
+
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+
+ fun_free_queue(admq);
+ fdev->admin_q = NULL;
+}
+
+/* Return %true if the admin queue has stopped servicing commands as can be
+ * detected through registers. This isn't exhaustive and may provide false
+ * negatives.
+ */
+static bool fun_adminq_stopped(struct fun_dev *fdev)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
+}
+
+static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
+{
+ struct sbitmap_queue *sbq = &fdev->admin_sbq;
+ struct sbq_wait_state *ws = &sbq->ws[0];
+ DEFINE_SBQ_WAIT(wait);
+ int tag;
+
+ for (;;) {
+ sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
+ if (fdev->suppress_cmds) {
+ tag = -ESHUTDOWN;
+ break;
+ }
+ tag = sbitmap_queue_get(sbq, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ sbitmap_finish_wait(sbq, ws, &wait);
+ return tag;
+}
+
+/* Submit an asynchronous admin command. Caller is responsible for implementing
+ * any waiting or timeout. Upon command completion the callback @cb is called.
+ */
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok)
+{
+ struct fun_queue *funq = fdev->admin_q;
+ unsigned int cmdsize = cmd->len8 * 8;
+ struct fun_cmd_ctx *cmd_ctx;
+ int tag, cpu, rc = 0;
+
+ if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
+ return -EMSGSIZE;
+
+ tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
+ if (tag < 0) {
+ if (!wait_ok)
+ return -EAGAIN;
+ tag = fun_wait_for_tag(fdev, &cpu);
+ if (tag < 0)
+ return tag;
+ }
+
+ cmd->cid = cpu_to_be16(tag);
+
+ cmd_ctx = &fdev->cmd_ctx[tag];
+ cmd_ctx->cb = cb;
+ cmd_ctx->cb_data = cb_data;
+
+ spin_lock(&funq->sq_lock);
+
+ if (unlikely(fdev->suppress_cmds)) {
+ rc = -ESHUTDOWN;
+ sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
+ } else {
+ cmd_ctx->cpu = cpu;
+ memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
+
+ dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
+ cmd);
+
+ if (++funq->sq_tail == funq->sq_depth)
+ funq->sq_tail = 0;
+ writel(funq->sq_tail, funq->sq_db);
+ }
+ spin_unlock(&funq->sq_lock);
+ return rc;
+}
+
+/* Abandon a pending admin command by clearing the issuer's callback data.
+ * Failure indicates that the command either has already completed or its
+ * completion is racing with this call.
+ */
+static bool fun_abandon_admin_cmd(struct fun_dev *fd,
+ const struct fun_admin_req_common *cmd,
+ void *cb_data)
+{
+ u16 cid = be16_to_cpu(cmd->cid);
+ struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
+
+ return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
+}
+
+/* Stop submission of new admin commands and wake up any processes waiting for
+ * tags. Already submitted commands are left to complete or time out.
+ */
+static void fun_admin_stop(struct fun_dev *fdev)
+{
+ spin_lock(&fdev->admin_q->sq_lock);
+ fdev->suppress_cmds = true;
+ spin_unlock(&fdev->admin_q->sq_lock);
+ sbitmap_queue_wake_all(&fdev->admin_sbq);
+}
+
+/* The callback for synchronous execution of admin commands. It copies the
+ * command response to the caller's buffer and signals completion.
+ */
+static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
+{
+ const struct fun_admin_rsp_common *rsp_common = rsp;
+ struct fun_sync_cmd_ctx *ctx = cb_data;
+
+ if (!ctx)
+ return; /* command issuer timed out and left */
+ if (ctx->rsp_buf) {
+ unsigned int rsp_len = rsp_common->len8 * 8;
+
+ if (unlikely(rsp_len > ctx->rsp_len)) {
+ dev_err(fd->dev,
+ "response for op %u is %uB > response buffer %uB\n",
+ rsp_common->op, rsp_len, ctx->rsp_len);
+ rsp_len = ctx->rsp_len;
+ }
+ memcpy(ctx->rsp_buf, rsp, rsp_len);
+ }
+ ctx->rsp_status = rsp_common->ret;
+ complete(&ctx->compl);
+}
+
+/* Submit a synchronous admin command. */
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout)
+{
+ struct fun_sync_cmd_ctx ctx = {
+ .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
+ .rsp_buf = rsp,
+ .rsp_len = rspsize,
+ };
+ unsigned int cmdlen = cmd->len8 * 8;
+ unsigned long jiffies_left;
+ int ret;
+
+ ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
+ true);
+ if (ret)
+ return ret;
+
+ if (!timeout)
+ timeout = FUN_ADMIN_CMD_TO_MS;
+
+ jiffies_left = wait_for_completion_timeout(&ctx.compl,
+ msecs_to_jiffies(timeout));
+ if (!jiffies_left) {
+ /* The command timed out. Attempt to cancel it so we can return.
+ * But if the command is in the process of completing we'll
+ * wait for it.
+ */
+ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
+ dev_err(fdev->dev, "admin command timed out: %*ph\n",
+ cmdlen, cmd);
+ fun_admin_stop(fdev);
+ /* see if the timeout was due to a queue failure */
+ if (fun_adminq_stopped(fdev))
+ dev_err(fdev->dev,
+ "device does not accept admin commands\n");
+
+ return -ETIMEDOUT;
+ }
+ wait_for_completion(&ctx.compl);
+ }
+
+ if (ctx.rsp_status) {
+ dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
+ ctx.rsp_status, cmdlen, cmd);
+ }
+
+ return -ctx.rsp_status;
+}
+EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
+
+/* Return the number of device resources of the requested type. */
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
+{
+ union {
+ struct fun_admin_res_count_req req;
+ struct fun_admin_res_count_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
+ cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
+ 0, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
+}
+EXPORT_SYMBOL_GPL(fun_get_res_count);
+
+/* Request that the instance of resource @res with the given id be deleted. */
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id)
+{
+ struct fun_admin_generic_destroy_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
+ .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
+ flags, id)
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_res_destroy);
+
+/* Bind two entities of the given types and IDs. */
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1)
+{
+ struct {
+ struct fun_admin_bind_req req;
+ struct fun_admin_bind_entry entry[2];
+ } cmd = {
+ .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ sizeof(cmd)),
+ .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
+ .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_bind);
+
+static int fun_get_dev_limits(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ unsigned int cq_count, sq_count, num_dbs;
+ int rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
+ if (rc < 0)
+ return rc;
+ cq_count = rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
+ if (rc < 0)
+ return rc;
+ sq_count = rc;
+
+ /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
+ * device must provide additional queues.
+ */
+ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
+ return -EINVAL;
+
+ /* Calculate the max QID based on SQ/CQ/doorbell counts.
+ * SQ/CQ doorbells alternate.
+ */
+ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
+ (2 + NVME_CAP_STRIDE(fdev->cap_reg));
+ fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
+ fdev->kern_end_qid = fdev->max_qid + 1;
+ return 0;
+}
+
+/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
+static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
+{
+ int vecs, num_msix = pci_msix_vec_count(pdev);
+
+ if (num_msix < 0)
+ return num_msix;
+ if (min_vecs > num_msix)
+ return -ERANGE;
+
+ vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
+ if (vecs > 0) {
+ dev_info(&pdev->dev,
+ "Allocated %d IRQ vectors of %d requested\n",
+ vecs, num_msix);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to allocate at least %u IRQ vectors\n",
+ min_vecs);
+ }
+ return vecs;
+}
+
+/* Allocate and initialize the IRQ manager state. */
+static int fun_alloc_irq_mgr(struct fun_dev *fdev)
+{
+ fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
+ if (!fdev->irq_map)
+ return -ENOMEM;
+
+ spin_lock_init(&fdev->irqmgr_lock);
+ /* mark IRQ 0 allocated, it is used by the admin queue */
+ __set_bit(0, fdev->irq_map);
+ fdev->irqs_avail = fdev->num_irqs - 1;
+ return 0;
+}
+
+/* Reserve @nirqs of the currently available IRQs and return their indices. */
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
+{
+ unsigned int b, n = 0;
+ int err = -ENOSPC;
+
+ if (!nirqs)
+ return 0;
+
+ spin_lock(&fdev->irqmgr_lock);
+ if (nirqs > fdev->irqs_avail)
+ goto unlock;
+
+ for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
+ __set_bit(b, fdev->irq_map);
+ irq_indices[n++] = b;
+ if (n >= nirqs)
+ break;
+ }
+
+ WARN_ON(n < nirqs);
+ fdev->irqs_avail -= n;
+ err = n;
+unlock:
+ spin_unlock(&fdev->irqmgr_lock);
+ return err;
+}
+EXPORT_SYMBOL(fun_reserve_irqs);
+
+/* Release @nirqs previously allocated IRQS with the supplied indices. */
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices)
+{
+ unsigned int i;
+
+ spin_lock(&fdev->irqmgr_lock);
+ for (i = 0; i < nirqs; i++)
+ __clear_bit(irq_indices[i], fdev->irq_map);
+ fdev->irqs_avail += nirqs;
+ spin_unlock(&fdev->irqmgr_lock);
+}
+EXPORT_SYMBOL(fun_release_irqs);
+
+static void fun_serv_handler(struct work_struct *work)
+{
+ struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
+
+ if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ return;
+ if (fd->serv_cb)
+ fd->serv_cb(fd);
+}
+
+void fun_serv_stop(struct fun_dev *fd)
+{
+ set_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ cancel_work_sync(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_stop);
+
+void fun_serv_restart(struct fun_dev *fd)
+{
+ clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ if (fd->service_flags)
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_restart);
+
+void fun_serv_sched(struct fun_dev *fd)
+{
+ if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_sched);
+
+/* Check and try to get the device into a proper state for initialization,
+ * i.e., CSTS.RDY = CC.EN = 0.
+ */
+static int sanitize_dev(struct fun_dev *fdev)
+{
+ int rc;
+
+ fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
+ fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
+
+ /* First get RDY to agree with the current EN. Give RDY the opportunity
+ * to complete a potential recent EN change.
+ */
+ rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
+ if (rc)
+ return rc;
+
+ /* Next, reset the device if EN is currently 1. */
+ if (fdev->cc_reg & NVME_CC_ENABLE)
+ rc = fun_disable_ctrl(fdev);
+
+ return rc;
+}
+
+/* Undo the device initialization of fun_dev_enable(). */
+void fun_dev_disable(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
+ fdev->fw_handle);
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ }
+
+ fun_disable_admin_queue(fdev);
+
+ bitmap_free(fdev->irq_map);
+ pci_free_irq_vectors(pdev);
+
+ pci_clear_master(pdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+
+ fun_unmap_bars(fdev);
+}
+EXPORT_SYMBOL(fun_dev_disable);
+
+/* Perform basic initialization of a device, including
+ * - PCI config space setup and BAR0 mapping
+ * - interrupt management initialization
+ * - 1 admin queue setup
+ * - determination of some device limits, such as number of queues.
+ */
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name)
+{
+ int rc;
+
+ fdev->dev = &pdev->dev;
+ rc = fun_map_bars(fdev, name);
+ if (rc)
+ return rc;
+
+ rc = fun_set_dma_masks(fdev->dev);
+ if (rc)
+ goto unmap;
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
+ goto unmap;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ rc = sanitize_dev(fdev);
+ if (rc)
+ goto disable_dev;
+
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
+ fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
+ fdev->dbs = fdev->bar + NVME_REG_DBS;
+
+ INIT_WORK(&fdev->service_task, fun_serv_handler);
+ fdev->service_flags = FUN_SERV_DISABLED;
+ fdev->serv_cb = areq->serv_cb;
+
+ rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
+ if (rc < 0)
+ goto disable_dev;
+ fdev->num_irqs = rc;
+
+ rc = fun_alloc_irq_mgr(fdev);
+ if (rc)
+ goto free_irqs;
+
+ pci_set_master(pdev);
+ rc = fun_enable_admin_queue(fdev, areq);
+ if (rc)
+ goto free_irq_mgr;
+
+ rc = fun_get_dev_limits(fdev);
+ if (rc < 0)
+ goto disable_admin;
+
+ pci_save_state(pdev);
+ pci_set_drvdata(pdev, fdev);
+ pcie_print_link_status(pdev);
+ dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
+ fdev->q_depth, fdev->db_stride, fdev->max_qid,
+ fdev->kern_end_qid);
+ return 0;
+
+disable_admin:
+ fun_disable_admin_queue(fdev);
+free_irq_mgr:
+ pci_clear_master(pdev);
+ bitmap_free(fdev->irq_map);
+free_irqs:
+ pci_free_irq_vectors(pdev);
+disable_dev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+unmap:
+ fun_unmap_bars(fdev);
+ return rc;
+}
+EXPORT_SYMBOL(fun_dev_enable);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Core services driver for Fungible devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.h b/drivers/net/ethernet/fungible/funcore/fun_dev.h
new file mode 100644
index 000000000000..9e8c17ce8887
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNDEV_H
+#define _FUNDEV_H
+
+#include <linux/sbitmap.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+#include "fun_hci.h"
+
+struct pci_dev;
+struct fun_dev;
+struct fun_queue;
+struct fun_cmd_ctx;
+struct fun_queue_alloc_req;
+
+/* doorbell fields */
+enum {
+ FUN_DB_QIDX_S = 0,
+ FUN_DB_INTCOAL_ENTRIES_S = 16,
+ FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
+ FUN_DB_INTCOAL_USEC_S = 23,
+ FUN_DB_INTCOAL_USEC_M = 0x7f,
+ FUN_DB_IRQ_S = 30,
+ FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
+ FUN_DB_IRQ_ARM_S = 31,
+ FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
+};
+
+/* Callback for asynchronous admin commands.
+ * Invoked on reception of command response.
+ */
+typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
+ void *cb_data);
+
+/* Callback for events/notifications received by an admin queue. */
+typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
+
+/* Callback for pending work handled by the service task. */
+typedef void (*fun_serv_cb)(struct fun_dev *fd);
+
+/* service task flags */
+enum {
+ FUN_SERV_DISABLED, /* service task is disabled */
+ FUN_SERV_FIRST_AVAIL
+};
+
+/* Driver state associated with a PCI function. */
+struct fun_dev {
+ struct device *dev;
+
+ void __iomem *bar; /* start of BAR0 mapping */
+ u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
+
+ /* admin queue */
+ struct fun_queue *admin_q;
+ struct sbitmap_queue admin_sbq;
+ struct fun_cmd_ctx *cmd_ctx;
+ fun_admin_event_cb adminq_cb;
+ bool suppress_cmds; /* if set don't write commands to SQ */
+
+ /* address increment between consecutive doorbells, in 4B units */
+ unsigned int db_stride;
+
+ /* SW versions of device registers */
+ u32 cc_reg; /* CC register */
+ u64 cap_reg; /* CAPability register */
+
+ unsigned int q_depth; /* max queue depth supported by device */
+ unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
+ unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
+
+ unsigned int fw_handle;
+
+ /* IRQ manager */
+ unsigned int num_irqs;
+ unsigned int irqs_avail;
+ spinlock_t irqmgr_lock;
+ unsigned long *irq_map;
+
+ /* The service task handles work that needs a process context */
+ struct work_struct service_task;
+ unsigned long service_flags;
+ fun_serv_cb serv_cb;
+};
+
+struct fun_dev_params {
+ u8 cqe_size_log2; /* admin q CQE size */
+ u8 sqe_size_log2; /* admin q SQE size */
+
+ /* admin q depths */
+ u16 cq_depth;
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u16 min_msix; /* min vectors needed by requesting driver */
+
+ fun_admin_event_cb event_cb;
+ fun_serv_cb serv_cb;
+};
+
+/* Return the BAR address of a doorbell. */
+static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
+ unsigned int db_index)
+{
+ return &fdev->dbs[db_index * fdev->db_stride];
+}
+
+/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
+ * SQs have even DB indices.
+ */
+static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
+ unsigned int sqid)
+{
+ return fun_db_addr(fdev, sqid * 2);
+}
+
+static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
+ unsigned int cqid)
+{
+ return fun_db_addr(fdev, cqid * 2 + 1);
+}
+
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id);
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1);
+
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok);
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout);
+
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name);
+void fun_dev_disable(struct fun_dev *fdev);
+
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+
+void fun_serv_stop(struct fun_dev *fd);
+void fun_serv_restart(struct fun_dev *fd);
+void fun_serv_sched(struct fun_dev *fd);
+
+#endif /* _FUNDEV_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
new file mode 100644
index 000000000000..f21819670106
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -0,0 +1,1242 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUN_HCI_H
+#define __FUN_HCI_H
+
+enum {
+ FUN_HCI_ID_INVALID = 0xffffffff,
+};
+
+enum fun_admin_op {
+ FUN_ADMIN_OP_BIND = 0x1,
+ FUN_ADMIN_OP_EPCQ = 0x11,
+ FUN_ADMIN_OP_EPSQ = 0x12,
+ FUN_ADMIN_OP_PORT = 0x13,
+ FUN_ADMIN_OP_ETH = 0x14,
+ FUN_ADMIN_OP_VI = 0x15,
+ FUN_ADMIN_OP_SWUPGRADE = 0x1f,
+ FUN_ADMIN_OP_RSS = 0x21,
+ FUN_ADMIN_OP_ADI = 0x25,
+ FUN_ADMIN_OP_KTLS = 0x26,
+};
+
+enum {
+ FUN_REQ_COMMON_FLAG_RSP = 0x1,
+ FUN_REQ_COMMON_FLAG_HEAD_WB = 0x2,
+ FUN_REQ_COMMON_FLAG_INT = 0x4,
+ FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF = 0x8,
+};
+
+struct fun_admin_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+#define FUN_ADMIN_REQ_COMMON_INIT(_op, _len8, _flags, _suboff8, _cid) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len8), .flags = cpu_to_be16(_flags), \
+ .suboff8 = (_suboff8), .cid = cpu_to_be16(_cid), \
+ }
+
+#define FUN_ADMIN_REQ_COMMON_INIT2(_op, _len) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len) / 8, \
+ }
+
+struct fun_admin_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_admin_write48_req {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_WRITE48_REQ_KEY_S 56U
+#define FUN_ADMIN_WRITE48_REQ_KEY_M 0xff
+#define FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_KEY_S)
+
+#define FUN_ADMIN_WRITE48_REQ_DATA_S 0U
+#define FUN_ADMIN_WRITE48_REQ_DATA_M 0xffffffffffff
+#define FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_DATA_S)
+
+#define FUN_ADMIN_WRITE48_REQ_INIT(key, data) \
+ (struct fun_admin_write48_req) { \
+ .key_to_data = cpu_to_be64( \
+ FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(key) | \
+ FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(data)), \
+ }
+
+struct fun_admin_write48_rsp {
+ __be64 key_to_data;
+};
+
+struct fun_admin_read48_req {
+ __be64 key_pack;
+};
+
+#define FUN_ADMIN_READ48_REQ_KEY_S 56U
+#define FUN_ADMIN_READ48_REQ_KEY_M 0xff
+#define FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_READ48_REQ_KEY_S)
+
+#define FUN_ADMIN_READ48_REQ_INIT(key) \
+ (struct fun_admin_read48_req) { \
+ .key_pack = \
+ cpu_to_be64(FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(key)), \
+ }
+
+struct fun_admin_read48_rsp {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_READ48_RSP_KEY_S 56U
+#define FUN_ADMIN_READ48_RSP_KEY_M 0xff
+#define FUN_ADMIN_READ48_RSP_KEY_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_KEY_S) & \
+ FUN_ADMIN_READ48_RSP_KEY_M)
+
+#define FUN_ADMIN_READ48_RSP_RET_S 48U
+#define FUN_ADMIN_READ48_RSP_RET_M 0xff
+#define FUN_ADMIN_READ48_RSP_RET_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_RET_S) & \
+ FUN_ADMIN_READ48_RSP_RET_M)
+
+#define FUN_ADMIN_READ48_RSP_DATA_S 0U
+#define FUN_ADMIN_READ48_RSP_DATA_M 0xffffffffffff
+#define FUN_ADMIN_READ48_RSP_DATA_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_DATA_S) & \
+ FUN_ADMIN_READ48_RSP_DATA_M)
+
+enum fun_admin_bind_type {
+ FUN_ADMIN_BIND_TYPE_EPCQ = 0x1,
+ FUN_ADMIN_BIND_TYPE_EPSQ = 0x2,
+ FUN_ADMIN_BIND_TYPE_PORT = 0x3,
+ FUN_ADMIN_BIND_TYPE_RSS = 0x4,
+ FUN_ADMIN_BIND_TYPE_VI = 0x5,
+ FUN_ADMIN_BIND_TYPE_ETH = 0x6,
+};
+
+struct fun_admin_bind_entry {
+ __u8 type;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+#define FUN_ADMIN_BIND_ENTRY_INIT(_type, _id) \
+ (struct fun_admin_bind_entry) { \
+ .type = (_type), .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_bind_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_bind_entry entry[];
+};
+
+struct fun_admin_bind_rsp {
+ struct fun_admin_rsp_common bind_rsp_common;
+};
+
+struct fun_admin_simple_subop {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 data;
+};
+
+#define FUN_ADMIN_SIMPLE_SUBOP_INIT(_subop, _flags, _data) \
+ (struct fun_admin_simple_subop) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .data = cpu_to_be32(_data), \
+ }
+
+enum fun_admin_subop {
+ FUN_ADMIN_SUBOP_CREATE = 0x10,
+ FUN_ADMIN_SUBOP_DESTROY = 0x11,
+ FUN_ADMIN_SUBOP_MODIFY = 0x12,
+ FUN_ADMIN_SUBOP_RES_COUNT = 0x14,
+ FUN_ADMIN_SUBOP_READ = 0x15,
+ FUN_ADMIN_SUBOP_WRITE = 0x16,
+ FUN_ADMIN_SUBOP_NOTIFY = 0x17,
+};
+
+enum {
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR = 0x1,
+};
+
+struct fun_admin_generic_destroy_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop destroy;
+};
+
+struct fun_admin_generic_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+struct fun_admin_res_count_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop count;
+};
+
+struct fun_admin_res_count_rsp {
+ struct fun_admin_rsp_common common;
+ struct fun_admin_simple_subop count;
+};
+
+enum {
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_EPCQ = 0x2,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_ENTRY_WR_TPH = 0x4,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_SL_WR_TPH = 0x8,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_NOARM = 0x200,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_DROP_ON_OVERFLOW = 0x400,
+};
+
+struct fun_admin_epcq_req {
+ struct fun_admin_req_common common;
+ union epcq_req_subop {
+ struct fun_admin_epcq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epsqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address;
+
+ __be16 tailroom; /* per packet tailroom in bytes */
+ __u8 headroom; /* per packet headroom in 2B units */
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __be16 tph_cpuid;
+ __u8 rsvd3[6];
+ } create;
+
+ struct fun_admin_epcq_modify_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be16 headroom; /* headroom in bytes */
+ __u8 rsvd1[6];
+ } modify;
+ } u;
+};
+
+#define FUN_ADMIN_EPCQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epsqid, _entry_size_log2, _nentries, _address, \
+ _tailroom, _headroom, _intcoal_kbytes, _intcoal_holdoff_nentries, \
+ _intcoal_holdoff_usecs, _intid, _scan_start_id, _scan_end_id, \
+ _tph_cpuid) \
+ (struct fun_admin_epcq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epsqid = cpu_to_be32(_epsqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .tailroom = cpu_to_be16(_tailroom), .headroom = _headroom, \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ }
+
+#define FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(_subop, _flags, _id, _headroom) \
+ (struct fun_admin_epcq_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .headroom = cpu_to_be16(_headroom), \
+ }
+
+enum {
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_EPSQ = 0x2,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_ENTRY_RD_TPH = 0x4,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_GL_RD_TPH = 0x8,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS = 0x10,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS_TPH = 0x20,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_EPCQ = 0x40,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_NO_CMPL = 0x200,
+};
+
+struct fun_admin_epsq_req {
+ struct fun_admin_req_common common;
+
+ union epsq_req_subop {
+ struct fun_admin_epsq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epcqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address; /* DMA address of epsq */
+
+ __u8 rsvd2[3];
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __u8 rsvd3[4];
+ __be16 tph_cpuid;
+ __u8 buf_size_log2; /* log2 of RQ buffer size */
+ __u8 head_wb_size_log2; /* log2 of head write back size */
+
+ __be64 head_wb_address; /* DMA address for head writeback */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_EPSQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epcqid, _entry_size_log2, _nentries, _address, \
+ _intcoal_kbytes, _intcoal_holdoff_nentries, _intcoal_holdoff_usecs, \
+ _intid, _scan_start_id, _scan_end_id, _tph_cpuid, _buf_size_log2, \
+ _head_wb_size_log2, _head_wb_address) \
+ (struct fun_admin_epsq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epcqid = cpu_to_be32(_epcqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ .buf_size_log2 = _buf_size_log2, \
+ .head_wb_size_log2 = _head_wb_size_log2, \
+ .head_wb_address = cpu_to_be64(_head_wb_address), \
+ }
+
+enum {
+ FUN_PORT_CAP_OFFLOADS = 0x1,
+ FUN_PORT_CAP_STATS = 0x2,
+ FUN_PORT_CAP_LOOPBACK = 0x4,
+ FUN_PORT_CAP_VPORT = 0x8,
+ FUN_PORT_CAP_TX_PAUSE = 0x10,
+ FUN_PORT_CAP_RX_PAUSE = 0x20,
+ FUN_PORT_CAP_AUTONEG = 0x40,
+ FUN_PORT_CAP_RSS = 0x80,
+ FUN_PORT_CAP_VLAN_OFFLOADS = 0x100,
+ FUN_PORT_CAP_ENCAP_OFFLOADS = 0x200,
+ FUN_PORT_CAP_1000_X = 0x1000,
+ FUN_PORT_CAP_10G_R = 0x2000,
+ FUN_PORT_CAP_40G_R4 = 0x4000,
+ FUN_PORT_CAP_25G_R = 0x8000,
+ FUN_PORT_CAP_50G_R2 = 0x10000,
+ FUN_PORT_CAP_50G_R = 0x20000,
+ FUN_PORT_CAP_100G_R4 = 0x40000,
+ FUN_PORT_CAP_100G_R2 = 0x80000,
+ FUN_PORT_CAP_200G_R4 = 0x100000,
+ FUN_PORT_CAP_FEC_NONE = 0x10000000,
+ FUN_PORT_CAP_FEC_FC = 0x20000000,
+ FUN_PORT_CAP_FEC_RS = 0x40000000,
+};
+
+enum fun_port_brkout_mode {
+ FUN_PORT_BRKMODE_NA = 0x0,
+ FUN_PORT_BRKMODE_NONE = 0x1,
+ FUN_PORT_BRKMODE_2X = 0x2,
+ FUN_PORT_BRKMODE_4X = 0x3,
+};
+
+enum {
+ FUN_PORT_SPEED_AUTO = 0x0,
+ FUN_PORT_SPEED_10M = 0x1,
+ FUN_PORT_SPEED_100M = 0x2,
+ FUN_PORT_SPEED_1G = 0x4,
+ FUN_PORT_SPEED_10G = 0x8,
+ FUN_PORT_SPEED_25G = 0x10,
+ FUN_PORT_SPEED_40G = 0x20,
+ FUN_PORT_SPEED_50G = 0x40,
+ FUN_PORT_SPEED_100G = 0x80,
+ FUN_PORT_SPEED_200G = 0x100,
+};
+
+enum fun_port_duplex_mode {
+ FUN_PORT_FULL_DUPLEX = 0x0,
+ FUN_PORT_HALF_DUPLEX = 0x1,
+};
+
+enum {
+ FUN_PORT_FEC_NA = 0x0,
+ FUN_PORT_FEC_OFF = 0x1,
+ FUN_PORT_FEC_RS = 0x2,
+ FUN_PORT_FEC_FC = 0x4,
+ FUN_PORT_FEC_AUTO = 0x8,
+};
+
+enum fun_port_link_status {
+ FUN_PORT_LINK_UP = 0x0,
+ FUN_PORT_LINK_UP_WITH_ERR = 0x1,
+ FUN_PORT_LINK_DOWN = 0x2,
+};
+
+enum fun_port_led_type {
+ FUN_PORT_LED_OFF = 0x0,
+ FUN_PORT_LED_AMBER = 0x1,
+ FUN_PORT_LED_GREEN = 0x2,
+ FUN_PORT_LED_BEACON_ON = 0x3,
+ FUN_PORT_LED_BEACON_OFF = 0x4,
+};
+
+enum {
+ FUN_PORT_FLAG_MAC_DOWN = 0x1,
+ FUN_PORT_FLAG_MAC_UP = 0x2,
+ FUN_PORT_FLAG_NH_DOWN = 0x4,
+ FUN_PORT_FLAG_NH_UP = 0x8,
+};
+
+enum {
+ FUN_PORT_FLAG_ENABLE_NOTIFY = 0x1,
+};
+
+enum fun_port_lane_attr {
+ FUN_PORT_LANE_1 = 0x1,
+ FUN_PORT_LANE_2 = 0x2,
+ FUN_PORT_LANE_4 = 0x4,
+ FUN_PORT_LANE_SPEED_10G = 0x100,
+ FUN_PORT_LANE_SPEED_25G = 0x200,
+ FUN_PORT_LANE_SPEED_50G = 0x400,
+ FUN_PORT_LANE_SPLIT = 0x8000,
+};
+
+enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_XCVR_READ = 0x23,
+ FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
+};
+
+enum fun_admin_port_key {
+ FUN_ADMIN_PORT_KEY_ILLEGAL = 0x0,
+ FUN_ADMIN_PORT_KEY_MTU = 0x1,
+ FUN_ADMIN_PORT_KEY_FEC = 0x2,
+ FUN_ADMIN_PORT_KEY_SPEED = 0x3,
+ FUN_ADMIN_PORT_KEY_DEBOUNCE = 0x4,
+ FUN_ADMIN_PORT_KEY_DUPLEX = 0x5,
+ FUN_ADMIN_PORT_KEY_MACADDR = 0x6,
+ FUN_ADMIN_PORT_KEY_LINKMODE = 0x7,
+ FUN_ADMIN_PORT_KEY_BREAKOUT = 0x8,
+ FUN_ADMIN_PORT_KEY_ENABLE = 0x9,
+ FUN_ADMIN_PORT_KEY_DISABLE = 0xa,
+ FUN_ADMIN_PORT_KEY_ERR_DISABLE = 0xb,
+ FUN_ADMIN_PORT_KEY_CAPABILITIES = 0xc,
+ FUN_ADMIN_PORT_KEY_LP_CAPABILITIES = 0xd,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW = 0xe,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH = 0xf,
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS = 0x10,
+ FUN_ADMIN_PORT_KEY_LED = 0x11,
+ FUN_ADMIN_PORT_KEY_ADVERT = 0x12,
+};
+
+struct fun_subop_imm {
+ __u8 subop; /* see fun_data_subop enum */
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 len;
+
+ __u8 data[];
+};
+
+enum fun_subop_sgl_flags {
+ FUN_SUBOP_SGL_USE_OFF8 = 0x1,
+ FUN_SUBOP_FLAG_FREE_BUF = 0x2,
+ FUN_SUBOP_FLAG_IS_REFBUF = 0x4,
+ FUN_SUBOP_SGL_FLAG_LOCAL = 0x8,
+};
+
+enum fun_data_op {
+ FUN_DATAOP_INVALID = 0x0,
+ FUN_DATAOP_SL = 0x1, /* scatter */
+ FUN_DATAOP_GL = 0x2, /* gather */
+ FUN_DATAOP_SGL = 0x3, /* scatter-gather */
+ FUN_DATAOP_IMM = 0x4, /* immediate data */
+ FUN_DATAOP_RQBUF = 0x8, /* rq buffer */
+};
+
+struct fun_dataop_gl {
+ __u8 subop;
+ __u8 flags;
+ __be16 sgl_off;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+static inline void fun_dataop_gl_init(struct fun_dataop_gl *s, u8 flags,
+ u16 sgl_off, u32 sgl_len, u64 sgl_data)
+{
+ s->subop = FUN_DATAOP_GL;
+ s->flags = flags;
+ s->sgl_off = cpu_to_be16(sgl_off);
+ s->sgl_len = cpu_to_be32(sgl_len);
+ s->sgl_data = cpu_to_be64(sgl_data);
+}
+
+struct fun_dataop_imm {
+ __u8 subop;
+ __u8 flags;
+ __be16 rsvd0;
+ __be32 sgl_len;
+};
+
+struct fun_subop_sgl {
+ __u8 subop;
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+#define FUN_SUBOP_SGL_INIT(_subop, _flags, _nsgl, _sgl_len, _sgl_data) \
+ (struct fun_subop_sgl) { \
+ .subop = (_subop), .flags = (_flags), .nsgl = (_nsgl), \
+ .sgl_len = cpu_to_be32(_sgl_len), \
+ .sgl_data = cpu_to_be64(_sgl_data), \
+ }
+
+struct fun_dataop_rqbuf {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 cid;
+ __be32 bufoff;
+};
+
+struct fun_dataop_hdr {
+ __u8 nsgl;
+ __u8 flags;
+ __u8 ngather;
+ __u8 nscatter;
+ __be32 total_len;
+
+ struct fun_dataop_imm imm[];
+};
+
+#define FUN_DATAOP_HDR_INIT(_nsgl, _flags, _ngather, _nscatter, _total_len) \
+ (struct fun_dataop_hdr) { \
+ .nsgl = _nsgl, .flags = _flags, .ngather = _ngather, \
+ .nscatter = _nscatter, .total_len = cpu_to_be32(_total_len), \
+ }
+
+enum fun_port_inetaddr_event_type {
+ FUN_PORT_INETADDR_ADD = 0x1,
+ FUN_PORT_INETADDR_DEL = 0x2,
+};
+
+enum fun_port_inetaddr_addr_family {
+ FUN_PORT_INETADDR_IPV4 = 0x1,
+ FUN_PORT_INETADDR_IPV6 = 0x2,
+};
+
+struct fun_admin_port_req {
+ struct fun_admin_req_common common;
+
+ union port_req_subop {
+ struct fun_admin_port_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_port_write_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_req write48[];
+ } write;
+ struct fun_admin_port_read_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_req read48[];
+ } read;
+ struct fun_admin_port_xcvr_read_req {
+ u8 subop;
+ u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+ } xcvr_read;
+ struct fun_admin_port_inetaddr_event_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __u8 event_type;
+ __u8 addr_family;
+ __be32 id;
+
+ __u8 addr[];
+ } inetaddr_event;
+ } u;
+};
+
+#define FUN_ADMIN_PORT_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_WRITE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_write_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_READ_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_read_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(_flags, _id, _bank, _page, \
+ _offset, _length, _dev_addr) \
+ ((struct fun_admin_port_xcvr_read_req) { \
+ .subop = FUN_ADMIN_PORT_SUBOP_XCVR_READ, \
+ .flags = cpu_to_be16(_flags), .id = cpu_to_be32(_id), \
+ .bank = (_bank), .page = (_page), .offset = (_offset), \
+ .length = (_length), .dev_addr = (_dev_addr), \
+ })
+
+struct fun_admin_port_rsp {
+ struct fun_admin_rsp_common common;
+
+ union port_rsp_subop {
+ struct fun_admin_port_create_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be16 lport;
+ __u8 rsvd1[6];
+ } create;
+ struct fun_admin_port_write_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_rsp write48[];
+ } write;
+ struct fun_admin_port_read_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_rsp read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+ } inetaddr_event;
+ } u;
+};
+
+struct fun_admin_port_xcvr_read_rsp {
+ struct fun_admin_rsp_common common;
+
+ u8 subop;
+ u8 rsvd0[3];
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+
+ u8 data[128];
+};
+
+enum fun_xcvr_type {
+ FUN_XCVR_BASET = 0x0,
+ FUN_XCVR_CU = 0x1,
+ FUN_XCVR_SMF = 0x2,
+ FUN_XCVR_MMF = 0x3,
+ FUN_XCVR_AOC = 0x4,
+ FUN_XCVR_SFPP = 0x10, /* SFP+ or later */
+ FUN_XCVR_QSFPP = 0x11, /* QSFP+ or later */
+ FUN_XCVR_QSFPDD = 0x12, /* QSFP-DD */
+};
+
+struct fun_admin_port_notif {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 id;
+ __be32 speed; /* in 10 Mbps units */
+
+ __u8 link_state;
+ __u8 missed_events;
+ __u8 link_down_reason;
+ __u8 xcvr_type;
+ __u8 flow_ctrl;
+ __u8 fec;
+ __u8 active_lanes;
+ __u8 rsvd1;
+
+ __be64 advertising;
+
+ __be64 lp_advertising;
+};
+
+enum fun_eth_rss_const {
+ FUN_ETH_RSS_MAX_KEY_SIZE = 0x28,
+ FUN_ETH_RSS_MAX_INDIR_ENT = 0x40,
+};
+
+enum fun_eth_hash_alg {
+ FUN_ETH_RSS_ALG_INVALID = 0x0,
+ FUN_ETH_RSS_ALG_TOEPLITZ = 0x1,
+ FUN_ETH_RSS_ALG_CRC32 = 0x2,
+};
+
+struct fun_admin_rss_req {
+ struct fun_admin_req_common common;
+
+ union rss_req_subop {
+ struct fun_admin_rss_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 viid; /* VI flow id */
+
+ __be64 metadata[1];
+
+ __u8 alg;
+ __u8 keylen;
+ __u8 indir_nent;
+ __u8 rsvd2;
+ __be16 key_off;
+ __be16 indir_off;
+
+ struct fun_dataop_hdr dataop;
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_RSS_CREATE_REQ_INIT(_subop, _flags, _id, _viid, _alg, \
+ _keylen, _indir_nent, _key_off, \
+ _indir_off) \
+ (struct fun_admin_rss_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .viid = cpu_to_be32(_viid), \
+ .alg = _alg, .keylen = _keylen, .indir_nent = _indir_nent, \
+ .key_off = cpu_to_be16(_key_off), \
+ .indir_off = cpu_to_be16(_indir_off), \
+ }
+
+struct fun_admin_vi_req {
+ struct fun_admin_req_common common;
+
+ union vi_req_subop {
+ struct fun_admin_vi_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_VI_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_vi_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+struct fun_admin_eth_req {
+ struct fun_admin_req_common common;
+
+ union eth_req_subop {
+ struct fun_admin_eth_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_ETH_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_eth_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+enum {
+ FUN_ADMIN_SWU_UPGRADE_FLAG_INIT = 0x10,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_COMPLETE = 0x20,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_DOWNGRADE = 0x40,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ACTIVE_IMAGE = 0x80,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ASYNC = 0x1,
+};
+
+enum fun_admin_swu_subop {
+ FUN_ADMIN_SWU_SUBOP_GET_VERSION = 0x20,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE = 0x21,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE_DATA = 0x22,
+ FUN_ADMIN_SWU_SUBOP_GET_ALL_VERSIONS = 0x23,
+};
+
+struct fun_admin_swu_req {
+ struct fun_admin_req_common common;
+
+ union swu_req_subop {
+ struct fun_admin_swu_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 rsvd1;
+
+ __be64 image_size; /* upgrade image length */
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset; /* offset of data in this command */
+ __be32 size; /* total size of data in this command */
+ } upgrade_data;
+ } u;
+
+ struct fun_subop_sgl sgl[]; /* in, out buffers through sgl */
+};
+
+#define FUN_ADMIN_SWU_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_swu_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_REQ_INIT(_subop, _flags, _id, _fourcc, \
+ _image_size) \
+ (struct fun_admin_swu_upgrade_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .fourcc = cpu_to_be32(_fourcc), \
+ .image_size = cpu_to_be64(_image_size), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_DATA_REQ_INIT(_subop, _flags, _id, _offset, \
+ _size) \
+ (struct fun_admin_swu_upgrade_data_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .offset = cpu_to_be32(_offset), \
+ .size = cpu_to_be32(_size), \
+ }
+
+struct fun_admin_swu_rsp {
+ struct fun_admin_rsp_common common;
+
+ union swu_rsp_subop {
+ struct fun_admin_swu_create_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 status;
+
+ __be32 progress;
+ __be32 unused;
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset;
+ __be32 size;
+ } upgrade_data;
+ } u;
+};
+
+enum fun_ktls_version {
+ FUN_KTLS_TLSV2 = 0x20,
+ FUN_KTLS_TLSV3 = 0x30,
+};
+
+enum fun_ktls_cipher {
+ FUN_KTLS_CIPHER_AES_GCM_128 = 0x33,
+ FUN_KTLS_CIPHER_AES_GCM_256 = 0x34,
+ FUN_KTLS_CIPHER_AES_CCM_128 = 0x35,
+ FUN_KTLS_CIPHER_CHACHA20_POLY1305 = 0x36,
+};
+
+enum fun_ktls_modify_flags {
+ FUN_KTLS_MODIFY_REMOVE = 0x1,
+};
+
+struct fun_admin_ktls_create_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+#define FUN_ADMIN_KTLS_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_ktls_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_ktls_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+struct fun_admin_ktls_modify_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be64 tlsid;
+
+ __be32 tcp_seq;
+ __u8 version;
+ __u8 cipher;
+ __u8 rsvd1[2];
+
+ __u8 record_seq[8];
+
+ __u8 key[32];
+
+ __u8 iv[16];
+
+ __u8 salt[8];
+};
+
+#define FUN_ADMIN_KTLS_MODIFY_REQ_INIT(_subop, _flags, _id, _tlsid, _tcp_seq, \
+ _version, _cipher) \
+ (struct fun_admin_ktls_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .tlsid = cpu_to_be64(_tlsid), \
+ .tcp_seq = cpu_to_be32(_tcp_seq), .version = _version, \
+ .cipher = _cipher, \
+ }
+
+struct fun_admin_ktls_modify_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be64 tlsid;
+};
+
+struct fun_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+struct fun_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_cqe_info {
+ __be16 sqhd;
+ __be16 sqid;
+ __be16 cid;
+ __be16 sf_p;
+};
+
+enum fun_eprq_def {
+ FUN_EPRQ_PKT_ALIGN = 0x80,
+};
+
+struct fun_eprq_rqbuf {
+ __be64 bufaddr;
+};
+
+#define FUN_EPRQ_RQBUF_INIT(_bufaddr) \
+ (struct fun_eprq_rqbuf) { \
+ .bufaddr = cpu_to_be64(_bufaddr), \
+ }
+
+enum fun_eth_op {
+ FUN_ETH_OP_TX = 0x1,
+ FUN_ETH_OP_RX = 0x2,
+};
+
+enum {
+ FUN_ETH_OFFLOAD_EN = 0x8000,
+ FUN_ETH_OUTER_EN = 0x4000,
+ FUN_ETH_INNER_LSO = 0x2000,
+ FUN_ETH_INNER_TSO = 0x1000,
+ FUN_ETH_OUTER_IPV6 = 0x800,
+ FUN_ETH_OUTER_UDP = 0x400,
+ FUN_ETH_INNER_IPV6 = 0x200,
+ FUN_ETH_INNER_UDP = 0x100,
+ FUN_ETH_UPDATE_OUTER_L3_LEN = 0x80,
+ FUN_ETH_UPDATE_OUTER_L3_CKSUM = 0x40,
+ FUN_ETH_UPDATE_OUTER_L4_LEN = 0x20,
+ FUN_ETH_UPDATE_OUTER_L4_CKSUM = 0x10,
+ FUN_ETH_UPDATE_INNER_L3_LEN = 0x8,
+ FUN_ETH_UPDATE_INNER_L3_CKSUM = 0x4,
+ FUN_ETH_UPDATE_INNER_L4_LEN = 0x2,
+ FUN_ETH_UPDATE_INNER_L4_CKSUM = 0x1,
+};
+
+struct fun_eth_offload {
+ __be16 flags; /* combination of above flags */
+ __be16 mss; /* TSO max seg size */
+ __be16 tcp_doff_flags; /* TCP data offset + flags 16b word */
+ __be16 vlan;
+
+ __be16 inner_l3_off; /* Inner L3 header offset */
+ __be16 inner_l4_off; /* Inner L4 header offset */
+ __be16 outer_l3_off; /* Outer L3 header offset */
+ __be16 outer_l4_off; /* Outer L4 header offset */
+};
+
+static inline void fun_eth_offload_init(struct fun_eth_offload *s, u16 flags,
+ u16 mss, __be16 tcp_doff_flags,
+ __be16 vlan, u16 inner_l3_off,
+ u16 inner_l4_off, u16 outer_l3_off,
+ u16 outer_l4_off)
+{
+ s->flags = cpu_to_be16(flags);
+ s->mss = cpu_to_be16(mss);
+ s->tcp_doff_flags = tcp_doff_flags;
+ s->vlan = vlan;
+ s->inner_l3_off = cpu_to_be16(inner_l3_off);
+ s->inner_l4_off = cpu_to_be16(inner_l4_off);
+ s->outer_l3_off = cpu_to_be16(outer_l3_off);
+ s->outer_l4_off = cpu_to_be16(outer_l4_off);
+}
+
+struct fun_eth_tls {
+ __be64 tlsid;
+};
+
+enum {
+ FUN_ETH_TX_TLS = 0x8000,
+};
+
+struct fun_eth_tx_req {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 repr_idn;
+ __be16 encap_proto;
+
+ struct fun_eth_offload offload;
+
+ struct fun_dataop_hdr dataop;
+};
+
+struct fun_eth_rx_cv {
+ __be16 il4_prot_to_l2_type;
+};
+
+#define FUN_ETH_RX_CV_IL4_PROT_S 13U
+#define FUN_ETH_RX_CV_IL4_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_IL3_PROT_S 11U
+#define FUN_ETH_RX_CV_IL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_OL4_PROT_S 8U
+#define FUN_ETH_RX_CV_OL4_PROT_M 0x7
+
+#define FUN_ETH_RX_CV_ENCAP_TYPE_S 6U
+#define FUN_ETH_RX_CV_ENCAP_TYPE_M 0x3
+
+#define FUN_ETH_RX_CV_OL3_PROT_S 4U
+#define FUN_ETH_RX_CV_OL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_VLAN_TYPE_S 3U
+#define FUN_ETH_RX_CV_VLAN_TYPE_M 0x1
+
+#define FUN_ETH_RX_CV_L2_TYPE_S 2U
+#define FUN_ETH_RX_CV_L2_TYPE_M 0x1
+
+enum fun_rx_cv {
+ FUN_RX_CV_NONE = 0x0,
+ FUN_RX_CV_IP = 0x2,
+ FUN_RX_CV_IP6 = 0x3,
+ FUN_RX_CV_TCP = 0x2,
+ FUN_RX_CV_UDP = 0x3,
+ FUN_RX_CV_VXLAN = 0x2,
+ FUN_RX_CV_MPLS = 0x3,
+};
+
+struct fun_eth_cqe {
+ __u8 op;
+ __u8 len8;
+ __u8 nsgl;
+ __u8 repr_idn;
+ __be32 pkt_len;
+
+ __be64 timestamp;
+
+ __be16 pkt_cv;
+ __be16 rsvd0;
+ __be32 hash;
+
+ __be16 encap_proto;
+ __be16 vlan;
+ __be32 rsvd1;
+
+ __be32 buf_offset;
+ __be16 headroom;
+ __be16 csum;
+};
+
+enum fun_admin_adi_attr {
+ FUN_ADMIN_ADI_ATTR_MACADDR = 0x1,
+ FUN_ADMIN_ADI_ATTR_VLAN = 0x2,
+ FUN_ADMIN_ADI_ATTR_RATE = 0x3,
+};
+
+struct fun_adi_param {
+ union adi_param {
+ struct fun_adi_mac {
+ __be64 addr;
+ } mac;
+ struct fun_adi_vlan {
+ __be32 rsvd;
+ __be16 eth_type;
+ __be16 tci;
+ } vlan;
+ struct fun_adi_rate {
+ __be32 rsvd;
+ __be32 tx_mbps;
+ } rate;
+ } u;
+};
+
+#define FUN_ADI_MAC_INIT(_addr) \
+ (struct fun_adi_mac) { \
+ .addr = cpu_to_be64(_addr), \
+ }
+
+#define FUN_ADI_VLAN_INIT(_eth_type, _tci) \
+ (struct fun_adi_vlan) { \
+ .eth_type = cpu_to_be16(_eth_type), .tci = cpu_to_be16(_tci), \
+ }
+
+#define FUN_ADI_RATE_INIT(_tx_mbps) \
+ (struct fun_adi_rate) { \
+ .tx_mbps = cpu_to_be32(_tx_mbps), \
+ }
+
+struct fun_admin_adi_req {
+ struct fun_admin_req_common common;
+
+ union adi_req_subop {
+ struct fun_admin_adi_write_req {
+ __u8 subop;
+ __u8 attribute;
+ __be16 rsvd;
+ __be32 id;
+
+ struct fun_adi_param param;
+ } write;
+ } u;
+};
+
+#define FUN_ADMIN_ADI_WRITE_REQ_INIT(_subop, _attribute, _id) \
+ (struct fun_admin_adi_write_req) { \
+ .subop = (_subop), .attribute = (_attribute), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#endif /* __FUN_HCI_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
new file mode 100644
index 000000000000..8ab9f68434f5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "fun_dev.h"
+#include "fun_queue.h"
+
+/* Allocate memory for a queue. This includes the memory for the HW descriptor
+ * ring, an optional 64b HW write-back area, and an optional SW state ring.
+ * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring,
+ * and the VA of the write-back area.
+ */
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va)
+{
+ int dev_node = dev_to_node(dma_dev);
+ size_t dma_sz;
+ void *va;
+
+ if (numa_node == NUMA_NO_NODE)
+ numa_node = dev_node;
+
+ /* Place optional write-back area at end of descriptor ring. */
+ dma_sz = hw_desc_sz * depth;
+ if (wb)
+ dma_sz += sizeof(u64);
+
+ set_dev_node(dma_dev, numa_node);
+ va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
+ set_dev_node(dma_dev, dev_node);
+ if (!va)
+ return NULL;
+
+ if (sw_desc_sz) {
+ *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
+ numa_node);
+ if (!*sw_va) {
+ dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
+ return NULL;
+ }
+ }
+
+ if (wb)
+ *wb_va = va + dma_sz - sizeof(u64);
+ return va;
+}
+EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
+
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
+{
+ if (hw_va) {
+ size_t sz = depth * hw_desc_sz;
+
+ if (wb)
+ sz += sizeof(u64);
+ dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
+ }
+ kvfree(sw_va);
+}
+EXPORT_SYMBOL_GPL(fun_free_ring_mem);
+
+/* Prepare and issue an admin command to create an SQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *sqidp.
+ */
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epsq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ dma_addr_t wb_addr;
+ u32 hw_qid;
+ int rc;
+
+ if (sq_depth > fdev->q_depth)
+ return -EINVAL;
+ if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
+ sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
+
+ wb_addr = dma_addr + (sq_depth << sqe_size_log2);
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ sqid, cqid, sqe_size_log2,
+ sq_depth - 1, dma_addr, 0,
+ coal_nentries, coal_usec,
+ irq_num, scan_start_id,
+ scan_end_id, 0,
+ rq_buf_size_log2,
+ ilog2(sizeof(u64)), wb_addr);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_sq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *sqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_sq_create);
+
+/* Prepare and issue an admin command to create a CQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *cqidp.
+ */
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
+ u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epcq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ u32 hw_qid;
+ int rc;
+
+ if (cq_depth > fdev->q_depth)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ cqid, rqid, cqe_size_log2,
+ cq_depth - 1, dma_addr, tailroom,
+ headroom / 2, 0, coal_nentries,
+ coal_usec, irq_num,
+ scan_start_id, scan_end_id, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_cq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *cqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_cq_create);
+
+static bool fun_sq_is_head_wb(const struct fun_queue *funq)
+{
+ return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
+}
+
+static void fun_clean_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_rq_info *rqinfo;
+ unsigned int i;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ if (rqinfo->page) {
+ dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ }
+ }
+}
+
+static int fun_fill_rq(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+ int i, node = dev_to_node(dev);
+ struct fun_rq_info *rqinfo;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (unlikely(!rqinfo->page))
+ return -ENOMEM;
+
+ rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ return -ENOMEM;
+ }
+
+ funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
+ }
+
+ funq->rq_tail = funq->rq_depth - 1;
+ return 0;
+}
+
+static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
+{
+ if (buf_offset <= funq->rq_buf_offset) {
+ struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ struct device *dev = funq->fdev->dev;
+
+ dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ funq->num_rqe_to_fill++;
+ if (++funq->rq_buf_idx == funq->rq_depth)
+ funq->rq_buf_idx = 0;
+ }
+ funq->rq_buf_offset = buf_offset;
+}
+
+/* Given a command response with data scattered across >= 1 RQ buffers return
+ * a pointer to a contiguous buffer containing all the data. If the data is in
+ * one RQ buffer the start address within that buffer is returned, otherwise a
+ * new buffer is allocated and the data is gathered into it.
+ */
+static void *fun_data_from_rq(struct fun_queue *funq,
+ const struct fun_rsp_common *rsp, bool *need_free)
+{
+ u32 bufoff, total_len, remaining, fragsize, dataoff;
+ struct device *dma_dev = funq->fdev->dev;
+ const struct fun_dataop_rqbuf *databuf;
+ const struct fun_dataop_hdr *dataop;
+ const struct fun_rq_info *rqinfo;
+ void *data;
+
+ dataop = (void *)rsp + rsp->suboff8 * 8;
+ total_len = be32_to_cpu(dataop->total_len);
+
+ if (likely(dataop->nsgl == 1)) {
+ databuf = (struct fun_dataop_rqbuf *)dataop->imm;
+ bufoff = be32_to_cpu(databuf->bufoff);
+ fun_rq_update_pos(funq, bufoff);
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
+ total_len, DMA_FROM_DEVICE);
+ *need_free = false;
+ return page_address(rqinfo->page) + bufoff;
+ }
+
+ /* For scattered completions gather the fragments into one buffer. */
+
+ data = kmalloc(total_len, GFP_ATOMIC);
+ /* NULL is OK here. In case of failure we still need to consume the data
+ * for proper buffer accounting but indicate an error in the response.
+ */
+ if (likely(data))
+ *need_free = true;
+
+ dataoff = 0;
+ for (remaining = total_len; remaining; remaining -= fragsize) {
+ fun_rq_update_pos(funq, 0);
+ fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
+ if (data) {
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
+ DMA_FROM_DEVICE);
+ memcpy(data + dataoff, page_address(rqinfo->page),
+ fragsize);
+ dataoff += fragsize;
+ }
+ }
+ return data;
+}
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ const struct fun_cqe_info *info;
+ struct fun_rsp_common *rsp;
+ unsigned int new_cqes;
+ u16 sf_p, flags;
+ bool need_free;
+ void *cqe;
+
+ if (!max)
+ max = funq->cq_depth - 1;
+
+ for (new_cqes = 0; new_cqes < max; new_cqes++) {
+ cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
+ info = funq_cqe_info(funq, cqe);
+ sf_p = be16_to_cpu(info->sf_p);
+
+ if ((sf_p & 1) != funq->cq_phase)
+ break;
+
+ /* ensure the phase tag is read before other CQE fields */
+ dma_rmb();
+
+ if (++funq->cq_head == funq->cq_depth) {
+ funq->cq_head = 0;
+ funq->cq_phase = !funq->cq_phase;
+ }
+
+ rsp = cqe;
+ flags = be16_to_cpu(rsp->flags);
+
+ need_free = false;
+ if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
+ rsp = fun_data_from_rq(funq, rsp, &need_free);
+ if (!rsp) {
+ rsp = cqe;
+ rsp->len8 = 1;
+ if (rsp->ret == 0)
+ rsp->ret = ENOMEM;
+ }
+ }
+
+ if (funq->cq_cb)
+ funq->cq_cb(funq, funq->cb_data, rsp, info);
+ if (need_free)
+ kfree(rsp);
+ }
+
+ dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
+ funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
+ return new_cqes;
+}
+
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ unsigned int processed;
+ u32 db;
+
+ processed = __fun_process_cq(funq, max);
+
+ if (funq->num_rqe_to_fill) {
+ funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
+ funq->rq_depth;
+ funq->num_rqe_to_fill = 0;
+ writel(funq->rq_tail, funq->rq_db);
+ }
+
+ db = funq->cq_head | FUN_DB_IRQ_ARM_F;
+ writel(db, funq->cq_db);
+ return processed;
+}
+
+static int fun_alloc_sqes(struct fun_queue *funq)
+{
+ funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
+ 1 << funq->sqe_size_log2, 0,
+ fun_sq_is_head_wb(funq),
+ NUMA_NO_NODE, &funq->sq_dma_addr,
+ NULL, &funq->sq_head);
+ return funq->sq_cmds ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_cqes(struct fun_queue *funq)
+{
+ funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
+ 1 << funq->cqe_size_log2, 0, false,
+ NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
+ NULL);
+ return funq->cqes ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_rqes(struct fun_queue *funq)
+{
+ funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
+ sizeof(*funq->rqes),
+ sizeof(*funq->rq_info), false,
+ NUMA_NO_NODE, &funq->rq_dma_addr,
+ (void **)&funq->rq_info, NULL);
+ return funq->rqes ? 0 : -ENOMEM;
+}
+
+/* Free a queue's structures. */
+void fun_free_queue(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+
+ fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
+ funq->cqes, funq->cq_dma_addr, NULL);
+ fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
+ fun_sq_is_head_wb(funq), funq->sq_cmds,
+ funq->sq_dma_addr, NULL);
+
+ if (funq->rqes) {
+ fun_clean_rq(funq);
+ fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
+ false, funq->rqes, funq->rq_dma_addr,
+ funq->rq_info);
+ }
+
+ kfree(funq);
+}
+
+/* Allocate and initialize a funq's structures. */
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req)
+{
+ struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
+
+ if (!funq)
+ return NULL;
+
+ funq->fdev = fdev;
+ spin_lock_init(&funq->sq_lock);
+
+ funq->qid = qid;
+
+ /* Initial CQ/SQ/RQ ids */
+ if (req->rq_depth) {
+ funq->cqid = 2 * qid;
+ if (funq->qid) {
+ /* I/O Q: use rqid = cqid, sqid = +1 */
+ funq->rqid = funq->cqid;
+ funq->sqid = funq->rqid + 1;
+ } else {
+ /* Admin Q: sqid is always 0, use ID 1 for RQ */
+ funq->sqid = 0;
+ funq->rqid = 1;
+ }
+ } else {
+ funq->cqid = qid;
+ funq->sqid = qid;
+ }
+
+ funq->cq_flags = req->cq_flags;
+ funq->sq_flags = req->sq_flags;
+
+ funq->cqe_size_log2 = req->cqe_size_log2;
+ funq->sqe_size_log2 = req->sqe_size_log2;
+
+ funq->cq_depth = req->cq_depth;
+ funq->sq_depth = req->sq_depth;
+
+ funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
+ funq->cq_intcoal_usec = req->cq_intcoal_usec;
+
+ funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
+ funq->sq_intcoal_usec = req->sq_intcoal_usec;
+
+ if (fun_alloc_cqes(funq))
+ goto free_funq;
+
+ funq->cq_phase = 1;
+
+ if (fun_alloc_sqes(funq))
+ goto free_funq;
+
+ if (req->rq_depth) {
+ funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
+ funq->rq_depth = req->rq_depth;
+ funq->rq_buf_offset = -1;
+
+ if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
+ goto free_funq;
+ }
+
+ funq->cq_vector = -1;
+ funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
+
+ /* SQ/CQ 0 are implicitly created, assign their doorbells now.
+ * Other queues are assigned doorbells at their explicit creation.
+ */
+ if (funq->sqid == 0)
+ funq->sq_db = fun_sq_db_addr(fdev, 0);
+ if (funq->cqid == 0)
+ funq->cq_db = fun_cq_db_addr(fdev, 0);
+
+ return funq;
+
+free_funq:
+ fun_free_queue(funq);
+ return NULL;
+}
+
+/* Create a funq's CQ on the device. */
+static int fun_create_cq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ unsigned int rqid;
+ int rc;
+
+ rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
+ funq->rqid : FUN_HCI_ID_INVALID;
+ rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
+ funq->cqe_size_log2, funq->cq_depth,
+ funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
+ funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
+ &funq->cqid, &funq->cq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
+
+ return rc;
+}
+
+/* Create a funq's SQ on the device. */
+static int fun_create_sq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
+ funq->sqe_size_log2, funq->sq_depth,
+ funq->sq_dma_addr, funq->sq_intcoal_nentries,
+ funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
+ 0, &funq->sqid, &funq->sq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
+
+ return rc;
+}
+
+/* Create a funq's RQ on the device. */
+int fun_create_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
+ funq->rq_depth, funq->rq_dma_addr, 0, 0,
+ funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
+ &funq->rq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
+
+ return rc;
+}
+
+static unsigned int funq_irq(struct fun_queue *funq)
+{
+ return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
+}
+
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data)
+{
+ int rc;
+
+ if (funq->cq_vector < 0)
+ return -EINVAL;
+
+ funq->irq_handler = handler;
+ funq->irq_data = data;
+
+ snprintf(funq->irqname, sizeof(funq->irqname),
+ funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
+
+ rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
+ if (rc)
+ funq->irq_handler = NULL;
+
+ return rc;
+}
+
+/* Create all component queues of a funq on the device. */
+int fun_create_queue(struct fun_queue *funq)
+{
+ int rc;
+
+ rc = fun_create_cq(funq);
+ if (rc)
+ return rc;
+
+ if (funq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto release_cq;
+ }
+
+ rc = fun_create_sq(funq);
+ if (rc)
+ goto release_rq;
+
+ return 0;
+
+release_rq:
+ fun_destroy_sq(funq->fdev, funq->rqid);
+release_cq:
+ fun_destroy_cq(funq->fdev, funq->cqid);
+ return rc;
+}
+
+void fun_free_irq(struct fun_queue *funq)
+{
+ if (funq->irq_handler) {
+ unsigned int vector = funq_irq(funq);
+
+ free_irq(vector, funq->irq_data);
+ funq->irq_handler = NULL;
+ funq->irq_data = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
new file mode 100644
index 000000000000..7fb53d0ae8b0
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_QEUEUE_H
+#define _FUN_QEUEUE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct device;
+struct fun_dev;
+struct fun_queue;
+struct fun_cqe_info;
+struct fun_rsp_common;
+
+typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
+ const struct fun_cqe_info *info);
+
+struct fun_rq_info {
+ dma_addr_t dma;
+ struct page *page;
+};
+
+/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
+struct fun_queue {
+ struct fun_dev *fdev;
+ spinlock_t sq_lock;
+
+ dma_addr_t cq_dma_addr;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t rq_dma_addr;
+
+ u32 __iomem *cq_db;
+ u32 __iomem *sq_db;
+ u32 __iomem *rq_db;
+
+ void *cqes;
+ void *sq_cmds;
+ struct fun_eprq_rqbuf *rqes;
+ struct fun_rq_info *rq_info;
+
+ u32 cqid;
+ u32 sqid;
+ u32 rqid;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u16 cq_head;
+ u16 sq_tail;
+ u16 rq_tail;
+
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cqe_info_offset;
+
+ u16 rq_buf_idx;
+ int rq_buf_offset;
+ u16 num_rqe_to_fill;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ /* SQ head writeback */
+ u16 sq_comp;
+
+ volatile __be64 *sq_head;
+
+ cq_callback_t cq_cb;
+ void *cb_data;
+
+ irq_handler_t irq_handler;
+ void *irq_data;
+ s16 cq_vector;
+ u8 cq_phase;
+
+ /* I/O q index */
+ u16 qid;
+
+ char irqname[24];
+};
+
+static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
+{
+ return funq->sq_cmds + (pos << funq->sqe_size_log2);
+}
+
+static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
+{
+ if (++tail == funq->sq_depth)
+ tail = 0;
+ funq->sq_tail = tail;
+ writel(tail, funq->sq_db);
+}
+
+static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
+ void *cqe)
+{
+ return cqe + funq->cqe_info_offset;
+}
+
+static inline void funq_rq_post(struct fun_queue *funq)
+{
+ writel(funq->rq_tail, funq->rq_db);
+}
+
+struct fun_queue_alloc_req {
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+};
+
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id,
+ u32 *cqidp, u32 __iomem **dbp);
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_size, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va);
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
+
+#define fun_destroy_sq(fdev, sqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
+#define fun_destroy_cq(fdev, cqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
+
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req);
+void fun_free_queue(struct fun_queue *funq);
+
+static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
+ void *cb_data)
+{
+ funq->cq_cb = cb;
+ funq->cb_data = cb_data;
+}
+
+int fun_create_rq(struct fun_queue *funq);
+int fun_create_queue(struct fun_queue *funq);
+
+void fun_free_irq(struct fun_queue *funq);
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data);
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
+
+#endif /* _FUN_QEUEUE_H */
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
new file mode 100644
index 000000000000..c72ad9386400
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible Ethernet driver configuration
+#
+
+config FUN_ETH
+ tristate "Fungible Ethernet device driver"
+ depends on PCI && PCI_MSI
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
+ select NET_DEVLINK
+ select FUN_CORE
+ help
+ This driver supports the Ethernet functionality of Fungible adapters.
+ It works with both physical and virtual functions.
+
+ To compile this driver as a module, choose M here. The module
+ will be called funeth.
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
new file mode 100644
index 000000000000..646d69595b4f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+
+obj-$(CONFIG_FUN_ETH) += funeth.o
+
+funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
+ funeth_ethtool.o
+
+funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
diff --git a/drivers/net/ethernet/fungible/funeth/fun_port.h b/drivers/net/ethernet/fungible/funeth/fun_port.h
new file mode 100644
index 000000000000..0f9da44e3786
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/fun_port.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_PORT_H
+#define _FUN_PORT_H
+
+enum port_mac_rx_stats {
+ PORT_MAC_RX_etherStatsOctets = 0x0,
+ PORT_MAC_RX_OctetsReceivedOK = 0x1,
+ PORT_MAC_RX_aAlignmentErrors = 0x2,
+ PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
+ PORT_MAC_RX_aFrameTooLongErrors = 0x4,
+ PORT_MAC_RX_aInRangeLengthErrors = 0x5,
+ PORT_MAC_RX_aFramesReceivedOK = 0x6,
+ PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
+ PORT_MAC_RX_VLANReceivedOK = 0x8,
+ PORT_MAC_RX_ifInErrors = 0x9,
+ PORT_MAC_RX_ifInUcastPkts = 0xa,
+ PORT_MAC_RX_ifInMulticastPkts = 0xb,
+ PORT_MAC_RX_ifInBroadcastPkts = 0xc,
+ PORT_MAC_RX_etherStatsDropEvents = 0xd,
+ PORT_MAC_RX_etherStatsPkts = 0xe,
+ PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
+ PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
+ PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
+ PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
+ PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
+ PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
+ PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
+ PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
+ PORT_MAC_RX_etherStatsOversizePkts = 0x17,
+ PORT_MAC_RX_etherStatsJabbers = 0x18,
+ PORT_MAC_RX_etherStatsFragments = 0x19,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
+ PORT_MAC_RX_MACControlFramesReceived = 0x2a,
+ PORT_MAC_RX_STATS_MAX = 0x2b,
+};
+
+enum port_mac_tx_stats {
+ PORT_MAC_TX_etherStatsOctets = 0x0,
+ PORT_MAC_TX_OctetsTransmittedOK = 0x1,
+ PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
+ PORT_MAC_TX_aFramesTransmittedOK = 0x3,
+ PORT_MAC_TX_VLANTransmittedOK = 0x4,
+ PORT_MAC_TX_ifOutErrors = 0x5,
+ PORT_MAC_TX_ifOutUcastPkts = 0x6,
+ PORT_MAC_TX_ifOutMulticastPkts = 0x7,
+ PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
+ PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
+ PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
+ PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
+ PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
+ PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
+ PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
+ PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
+ PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
+ PORT_MAC_TX_etherStatsPkts = 0x21,
+ PORT_MAC_TX_STATS_MAX = 0x22,
+};
+
+enum port_mac_fec_stats {
+ PORT_MAC_FEC_Correctable = 0x0,
+ PORT_MAC_FEC_Uncorrectable = 0x1,
+ PORT_MAC_FEC_STATS_MAX = 0x2,
+};
+
+#endif /* _FUN_PORT_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
new file mode 100644
index 000000000000..1250e10d21db
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_H
+#define _FUNETH_H
+
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/net_tstamp.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include "fun_dev.h"
+
+#define ADMIN_SQE_SIZE SZ_128
+#define ADMIN_CQE_SIZE SZ_64
+#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+#define FUN_MAX_MTU 9024
+
+#define SQ_DEPTH 512U
+#define CQ_DEPTH 1024U
+#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
+
+#define CQ_INTCOAL_USEC 10
+#define CQ_INTCOAL_NPKT 16
+#define SQ_INTCOAL_USEC 10
+#define SQ_INTCOAL_NPKT 16
+
+#define INVALID_LPORT 0xffff
+
+#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
+
+struct fun_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ __be16 vlan_proto;
+ u8 qos;
+ u8 spoofchk:1;
+ u8 trusted:1;
+ unsigned int max_rate;
+};
+
+/* "subclass" of fun_dev for Ethernet functions */
+struct fun_ethdev {
+ struct fun_dev fdev;
+
+ /* the function's network ports */
+ struct net_device **netdevs;
+ unsigned int num_ports;
+
+ /* configuration for the function's virtual ports */
+ unsigned int num_vports;
+ struct fun_vport_info *vport_info;
+
+ struct mutex state_mutex; /* nests inside RTNL if both taken */
+
+ unsigned int nsqs_per_port;
+};
+
+static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
+{
+ return container_of(p, struct fun_ethdev, fdev);
+}
+
+struct fun_qset {
+ struct funeth_rxq **rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq **xdpqs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ unsigned int nxdpqs;
+ unsigned int rxq_start;
+ unsigned int txq_start;
+ unsigned int xdpq_start;
+ unsigned int cq_depth;
+ unsigned int rq_depth;
+ unsigned int sq_depth;
+ int state;
+};
+
+/* Per netdevice driver state, i.e., netdev_priv. */
+struct funeth_priv {
+ struct fun_dev *fdev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ struct funeth_rxq * __rcu *rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq * __rcu *xdpqs;
+
+ struct xarray irqs;
+ unsigned int num_tx_irqs;
+ unsigned int num_rx_irqs;
+ unsigned int rx_irq_ofst;
+
+ unsigned int lane_attrs;
+ u16 lport;
+
+ /* link settings */
+ u64 port_caps;
+ u64 advertising;
+ u64 lp_advertising;
+ unsigned int link_speed;
+ u8 xcvr_type;
+ u8 active_fc;
+ u8 active_fec;
+ u8 link_down_reason;
+ seqcount_t link_seq;
+
+ u32 msg_enable;
+
+ unsigned int num_xdpqs;
+
+ /* ethtool, etc. config parameters */
+ unsigned int sq_depth;
+ unsigned int rq_depth;
+ unsigned int cq_depth;
+ unsigned int cq_irq_db;
+ u8 tx_coal_usec;
+ u8 tx_coal_count;
+ u8 rx_coal_usec;
+ u8 rx_coal_count;
+
+ struct hwtstamp_config hwtstamp_cfg;
+
+ /* cumulative queue stats from earlier queue instances */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+
+ /* RSS */
+ unsigned int rss_hw_id;
+ enum fun_eth_hash_alg hash_algo;
+ u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
+ unsigned int indir_table_nentries;
+ u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
+ dma_addr_t rss_dma_addr;
+ void *rss_cfg;
+
+ /* DMA area for port stats */
+ dma_addr_t stats_dma_addr;
+ __be64 *stats;
+
+ struct bpf_prog *xdp_prog;
+
+ struct devlink_port dl_port;
+
+ /* kTLS state */
+ unsigned int ktls_id;
+ atomic64_t tx_tls_add;
+ atomic64_t tx_tls_del;
+ atomic64_t tx_tls_resync;
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev);
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack);
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx);
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx);
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op);
+
+#endif /* _FUNETH_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
new file mode 100644
index 000000000000..d50c222948b4
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+
+static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, KBUILD_MODNAME);
+}
+
+static const struct devlink_ops fun_dl_ops = {
+ .info_get = fun_dl_info_get,
+};
+
+struct devlink *fun_devlink_alloc(struct device *dev)
+{
+ return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
+}
+
+void fun_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void fun_devlink_register(struct devlink *devlink)
+{
+ devlink_register(devlink);
+}
+
+void fun_devlink_unregister(struct devlink *devlink)
+{
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.h b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
new file mode 100644
index 000000000000..e40464d57ff4
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUNETH_DEVLINK_H
+#define __FUNETH_DEVLINK_H
+
+#include <net/devlink.h>
+
+struct devlink *fun_devlink_alloc(struct device *dev);
+void fun_devlink_free(struct devlink *devlink);
+void fun_devlink_register(struct devlink *devlink);
+void fun_devlink_unregister(struct devlink *devlink);
+
+#endif /* __FUNETH_DEVLINK_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
new file mode 100644
index 000000000000..31aa185f4d17
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/netdevice.h>
+#include <linux/nvme.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include "funeth.h"
+#include "fun_port.h"
+#include "funeth_txrx.h"
+
+/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
+ * pages is 8. Require it for all types of queues though some could work with
+ * fewer entries.
+ */
+#define FUNETH_MIN_QDEPTH 8
+
+static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_tx_octets_total",
+ "mac_tx_frames_total",
+ "mac_tx_vlan_frames_ok",
+ "mac_tx_unicast_frames",
+ "mac_tx_multicast_frames",
+ "mac_tx_broadcast_frames",
+ "mac_tx_errors",
+ "mac_tx_CBFCPAUSE0",
+ "mac_tx_CBFCPAUSE1",
+ "mac_tx_CBFCPAUSE2",
+ "mac_tx_CBFCPAUSE3",
+ "mac_tx_CBFCPAUSE4",
+ "mac_tx_CBFCPAUSE5",
+ "mac_tx_CBFCPAUSE6",
+ "mac_tx_CBFCPAUSE7",
+ "mac_tx_CBFCPAUSE8",
+ "mac_tx_CBFCPAUSE9",
+ "mac_tx_CBFCPAUSE10",
+ "mac_tx_CBFCPAUSE11",
+ "mac_tx_CBFCPAUSE12",
+ "mac_tx_CBFCPAUSE13",
+ "mac_tx_CBFCPAUSE14",
+ "mac_tx_CBFCPAUSE15",
+};
+
+static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_rx_octets_total",
+ "mac_rx_frames_total",
+ "mac_rx_VLAN_frames_ok",
+ "mac_rx_unicast_frames",
+ "mac_rx_multicast_frames",
+ "mac_rx_broadcast_frames",
+ "mac_rx_drop_events",
+ "mac_rx_errors",
+ "mac_rx_alignment_errors",
+ "mac_rx_CBFCPAUSE0",
+ "mac_rx_CBFCPAUSE1",
+ "mac_rx_CBFCPAUSE2",
+ "mac_rx_CBFCPAUSE3",
+ "mac_rx_CBFCPAUSE4",
+ "mac_rx_CBFCPAUSE5",
+ "mac_rx_CBFCPAUSE6",
+ "mac_rx_CBFCPAUSE7",
+ "mac_rx_CBFCPAUSE8",
+ "mac_rx_CBFCPAUSE9",
+ "mac_rx_CBFCPAUSE10",
+ "mac_rx_CBFCPAUSE11",
+ "mac_rx_CBFCPAUSE12",
+ "mac_rx_CBFCPAUSE13",
+ "mac_rx_CBFCPAUSE14",
+ "mac_rx_CBFCPAUSE15",
+};
+
+static const char * const txq_stat_names[] = {
+ "tx_pkts",
+ "tx_bytes",
+ "tx_cso",
+ "tx_tso",
+ "tx_encapsulated_tso",
+ "tx_uso",
+ "tx_more",
+ "tx_queue_stops",
+ "tx_queue_restarts",
+ "tx_mapping_errors",
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes",
+ "tx_tls_ooo",
+ "tx_tls_drop_no_sync_data",
+};
+
+static const char * const xdpq_stat_names[] = {
+ "tx_xdp_pkts",
+ "tx_xdp_bytes",
+ "tx_xdp_full",
+ "tx_xdp_mapping_errors",
+};
+
+static const char * const rxq_stat_names[] = {
+ "rx_pkts",
+ "rx_bytes",
+ "rx_cso",
+ "gro_pkts",
+ "gro_merged",
+ "rx_xdp_tx",
+ "rx_xdp_redir",
+ "rx_xdp_drops",
+ "rx_buffers",
+ "rx_page_allocs",
+ "rx_drops",
+ "rx_budget_exhausted",
+ "rx_mapping_errors",
+};
+
+static const char * const tls_stat_names[] = {
+ "tx_tls_ctx",
+ "tx_tls_del",
+ "tx_tls_resync",
+};
+
+static void fun_link_modes_to_ethtool(u64 modes,
+ unsigned long *ethtool_modes_map)
+{
+#define ADD_LINK_MODE(mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
+
+ if (modes & FUN_PORT_CAP_AUTONEG)
+ ADD_LINK_MODE(Autoneg);
+ if (modes & FUN_PORT_CAP_1000_X)
+ ADD_LINK_MODE(1000baseX_Full);
+ if (modes & FUN_PORT_CAP_10G_R) {
+ ADD_LINK_MODE(10000baseCR_Full);
+ ADD_LINK_MODE(10000baseSR_Full);
+ ADD_LINK_MODE(10000baseLR_Full);
+ ADD_LINK_MODE(10000baseER_Full);
+ }
+ if (modes & FUN_PORT_CAP_25G_R) {
+ ADD_LINK_MODE(25000baseCR_Full);
+ ADD_LINK_MODE(25000baseSR_Full);
+ }
+ if (modes & FUN_PORT_CAP_40G_R4) {
+ ADD_LINK_MODE(40000baseCR4_Full);
+ ADD_LINK_MODE(40000baseSR4_Full);
+ ADD_LINK_MODE(40000baseLR4_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R2) {
+ ADD_LINK_MODE(50000baseCR2_Full);
+ ADD_LINK_MODE(50000baseSR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R) {
+ ADD_LINK_MODE(50000baseCR_Full);
+ ADD_LINK_MODE(50000baseSR_Full);
+ ADD_LINK_MODE(50000baseLR_ER_FR_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R4) {
+ ADD_LINK_MODE(100000baseCR4_Full);
+ ADD_LINK_MODE(100000baseSR4_Full);
+ ADD_LINK_MODE(100000baseLR4_ER4_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R2) {
+ ADD_LINK_MODE(100000baseCR2_Full);
+ ADD_LINK_MODE(100000baseSR2_Full);
+ ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_FEC_NONE)
+ ADD_LINK_MODE(FEC_NONE);
+ if (modes & FUN_PORT_CAP_FEC_FC)
+ ADD_LINK_MODE(FEC_BASER);
+ if (modes & FUN_PORT_CAP_FEC_RS)
+ ADD_LINK_MODE(FEC_RS);
+ if (modes & FUN_PORT_CAP_RX_PAUSE)
+ ADD_LINK_MODE(Pause);
+
+#undef ADD_LINK_MODE
+}
+
+static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
+{
+ bool rx_pause, tx_pause;
+
+ rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
+ tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+}
+
+static unsigned int fun_port_type(unsigned int xcvr)
+{
+ if (!xcvr)
+ return PORT_NONE;
+
+ switch (xcvr & 7) {
+ case FUN_XCVR_BASET:
+ return PORT_TP;
+ case FUN_XCVR_CU:
+ return PORT_DA;
+ default:
+ return PORT_FIBRE;
+ }
+}
+
+static int fun_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int seq, speed, xcvr;
+ u64 lp_advertising;
+ bool link_up;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
+
+ /* Link settings change asynchronously, take a consistent snapshot */
+ do {
+ seq = read_seqcount_begin(&fp->link_seq);
+ link_up = netif_carrier_ok(netdev);
+ speed = fp->link_speed;
+ xcvr = fp->xcvr_type;
+ lp_advertising = fp->lp_advertising;
+ } while (read_seqcount_retry(&fp->link_seq, seq));
+
+ if (link_up) {
+ ks->base.speed = speed;
+ ks->base.duplex = DUPLEX_FULL;
+ fun_link_modes_to_ethtool(lp_advertising,
+ ks->link_modes.lp_advertising);
+ } else {
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ks->base.port = fun_port_type(xcvr);
+
+ fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
+ if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+
+ fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
+ set_asym_pause(fp->advertising, ks);
+ return 0;
+}
+
+static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
+{
+ u64 modes = 0;
+
+#define HAS_MODE(mode) \
+ ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
+
+ if (HAS_MODE(1000baseX_Full))
+ modes |= FUN_PORT_CAP_1000_X;
+ if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
+ HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
+ modes |= FUN_PORT_CAP_10G_R;
+ if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
+ modes |= FUN_PORT_CAP_25G_R;
+ if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
+ HAS_MODE(40000baseLR4_Full))
+ modes |= FUN_PORT_CAP_40G_R4;
+ if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
+ modes |= FUN_PORT_CAP_50G_R2;
+ if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
+ HAS_MODE(50000baseLR_ER_FR_Full))
+ modes |= FUN_PORT_CAP_50G_R;
+ if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
+ HAS_MODE(100000baseLR4_ER4_Full))
+ modes |= FUN_PORT_CAP_100G_R4;
+ if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
+ HAS_MODE(100000baseLR2_ER2_FR2_Full))
+ modes |= FUN_PORT_CAP_100G_R2;
+
+ return modes;
+#undef HAS_MODE
+}
+
+static u64 fun_speed_to_link_mode(unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_100000:
+ return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
+ case SPEED_50000:
+ return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
+ case SPEED_40000:
+ return FUN_PORT_CAP_40G_R4;
+ case SPEED_25000:
+ return FUN_PORT_CAP_25G_R;
+ case SPEED_10000:
+ return FUN_PORT_CAP_10G_R;
+ case SPEED_1000:
+ return FUN_PORT_CAP_1000_X;
+ default:
+ return 0;
+ }
+}
+
+static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
+{
+ int err;
+
+ if (new_advert == fp->advertising)
+ return 0;
+
+ err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
+ if (!err)
+ fp->advertising = new_advert;
+ return err;
+}
+
+#define FUN_PORT_CAP_FEC_MASK \
+ (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
+
+static int fun_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ /* eswitch ports don't support mode changes */
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+
+ if (ks->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (ks->base.autoneg == AUTONEG_ENABLE &&
+ !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+
+ if (ks->base.autoneg == AUTONEG_ENABLE) {
+ if (linkmode_empty(ks->link_modes.advertising))
+ return -EINVAL;
+
+ fun_link_modes_to_ethtool(fp->port_caps, supported);
+ if (!linkmode_subset(ks->link_modes.advertising, supported))
+ return -EINVAL;
+
+ new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
+ } else {
+ new_advert = fun_speed_to_link_mode(ks->base.speed);
+ new_advert &= fp->port_caps;
+ if (!new_advert)
+ return -EINVAL;
+ }
+ new_advert |= fp->advertising &
+ (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static void fun_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ u8 active_pause = fp->active_fc;
+
+ pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
+ pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
+ pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ /* Forcing PAUSE settings with AN enabled is unsupported. */
+ if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+ if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+ if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
+ return -EINVAL;
+ if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
+ return -EINVAL;
+
+ new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
+ if (pause->tx_pause)
+ new_advert |= FUN_PORT_CAP_TX_PAUSE;
+ if (pause->rx_pause)
+ new_advert |= FUN_PORT_CAP_RX_PAUSE;
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static int fun_restart_an(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
+ FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int beacon;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
+ return -EOPNOTSUPP;
+
+ beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
+ FUN_PORT_LED_BEACON_OFF;
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
+}
+
+static void fun_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
+}
+
+static u32 fun_get_msglevel(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->msg_enable;
+}
+
+static void fun_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ fp->msg_enable = value;
+}
+
+static int fun_get_regs_len(struct net_device *dev)
+{
+ return NVME_REG_ACQ + sizeof(u64);
+}
+
+static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ void __iomem *bar = fp->fdev->bar;
+
+ regs->version = 0;
+ *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP);
+ *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS);
+ *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
+ *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
+ *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC);
+ *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS);
+ *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA);
+ *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ);
+ *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ);
+}
+
+static int fun_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = fp->rx_coal_usec;
+ coal->rx_max_coalesced_frames = fp->rx_coal_count;
+ coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
+ coal->tx_coalesce_usecs = fp->tx_coal_usec;
+ coal->tx_max_coalesced_frames = fp->tx_coal_count;
+ return 0;
+}
+
+static int fun_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_rxq **rxqs;
+ unsigned int i, db_val;
+
+ if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
+ coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
+ return -EINVAL;
+
+ /* a timer is required if there's any coalescing */
+ if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
+ (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
+ return -EINVAL;
+
+ fp->rx_coal_usec = coal->rx_coalesce_usecs;
+ fp->rx_coal_count = coal->rx_max_coalesced_frames;
+ fp->tx_coal_usec = coal->tx_coalesce_usecs;
+ fp->tx_coal_count = coal->tx_max_coalesced_frames;
+
+ db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+ WRITE_ONCE(fp->cq_irq_db, db_val);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return 0;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
+
+ db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
+
+ return 0;
+}
+
+static void fun_get_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ chan->max_rx = netdev->num_rx_queues;
+ chan->rx_count = netdev->real_num_rx_queues;
+
+ chan->max_tx = netdev->num_tx_queues;
+ chan->tx_count = netdev->real_num_tx_queues;
+}
+
+static int fun_set_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ if (!chan->tx_count || !chan->rx_count)
+ return -EINVAL;
+
+ if (chan->tx_count == netdev->real_num_tx_queues &&
+ chan->rx_count == netdev->real_num_rx_queues)
+ return 0;
+
+ if (netif_running(netdev))
+ return fun_change_num_queues(netdev, chan->tx_count,
+ chan->rx_count);
+
+ fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
+ return 0;
+}
+
+static void fun_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int max_depth = fp->fdev->q_depth;
+
+ /* We size CQs to be twice the RQ depth so max RQ depth is half the
+ * max queue depth.
+ */
+ ring->rx_max_pending = max_depth / 2;
+ ring->tx_max_pending = max_depth;
+
+ ring->rx_pending = fp->rq_depth;
+ ring->tx_pending = fp->sq_depth;
+
+ kring->rx_buf_len = PAGE_SIZE;
+ kring->cqe_size = FUNETH_CQE_SIZE;
+}
+
+static int fun_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* queue depths must be powers-of-2 */
+ if (!is_power_of_2(ring->rx_pending) ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
+ ring->tx_pending < FUNETH_MIN_QDEPTH)
+ return -EINVAL;
+
+ if (fp->sq_depth == ring->tx_pending &&
+ fp->rq_depth == ring->rx_pending)
+ return 0;
+
+ if (netif_running(netdev)) {
+ struct fun_qset req = {
+ .cq_depth = 2 * ring->rx_pending,
+ .rq_depth = ring->rx_pending,
+ .sq_depth = ring->tx_pending
+ };
+
+ rc = fun_replace_queues(netdev, &req, extack);
+ if (rc)
+ return rc;
+ }
+
+ fp->sq_depth = ring->tx_pending;
+ fp->rq_depth = ring->rx_pending;
+ fp->cq_depth = 2 * fp->rq_depth;
+ return 0;
+}
+
+static int fun_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ int n;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
+ (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
+ (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
+ ARRAY_SIZE(tls_stat_names);
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ n += ARRAY_SIZE(mac_tx_stat_names) +
+ ARRAY_SIZE(mac_rx_stat_names);
+ }
+ return n;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int i, j;
+ u8 *p = data;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
+ p += sizeof(mac_tx_stat_names);
+ memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
+ p += sizeof(mac_rx_stat_names);
+ }
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, txq_stat_names[j]);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]",
+ xdpq_stat_names[j], i);
+ }
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, xdpq_stat_names[j]);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, rxq_stat_names[j]);
+
+ for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
+ ethtool_sprintf(&p, tls_stat_names[j]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
+{
+#define TX_STAT(s) \
+ *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+
+ TX_STAT(etherStatsOctets);
+ TX_STAT(etherStatsPkts);
+ TX_STAT(VLANTransmittedOK);
+ TX_STAT(ifOutUcastPkts);
+ TX_STAT(ifOutMulticastPkts);
+ TX_STAT(ifOutBroadcastPkts);
+ TX_STAT(ifOutErrors);
+ TX_STAT(CBFCPAUSEFramesTransmitted_0);
+ TX_STAT(CBFCPAUSEFramesTransmitted_1);
+ TX_STAT(CBFCPAUSEFramesTransmitted_2);
+ TX_STAT(CBFCPAUSEFramesTransmitted_3);
+ TX_STAT(CBFCPAUSEFramesTransmitted_4);
+ TX_STAT(CBFCPAUSEFramesTransmitted_5);
+ TX_STAT(CBFCPAUSEFramesTransmitted_6);
+ TX_STAT(CBFCPAUSEFramesTransmitted_7);
+ TX_STAT(CBFCPAUSEFramesTransmitted_8);
+ TX_STAT(CBFCPAUSEFramesTransmitted_9);
+ TX_STAT(CBFCPAUSEFramesTransmitted_10);
+ TX_STAT(CBFCPAUSEFramesTransmitted_11);
+ TX_STAT(CBFCPAUSEFramesTransmitted_12);
+ TX_STAT(CBFCPAUSEFramesTransmitted_13);
+ TX_STAT(CBFCPAUSEFramesTransmitted_14);
+ TX_STAT(CBFCPAUSEFramesTransmitted_15);
+
+#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
+
+ RX_STAT(etherStatsOctets);
+ RX_STAT(etherStatsPkts);
+ RX_STAT(VLANReceivedOK);
+ RX_STAT(ifInUcastPkts);
+ RX_STAT(ifInMulticastPkts);
+ RX_STAT(ifInBroadcastPkts);
+ RX_STAT(etherStatsDropEvents);
+ RX_STAT(ifInErrors);
+ RX_STAT(aAlignmentErrors);
+ RX_STAT(CBFCPAUSEFramesReceived_0);
+ RX_STAT(CBFCPAUSEFramesReceived_1);
+ RX_STAT(CBFCPAUSEFramesReceived_2);
+ RX_STAT(CBFCPAUSEFramesReceived_3);
+ RX_STAT(CBFCPAUSEFramesReceived_4);
+ RX_STAT(CBFCPAUSEFramesReceived_5);
+ RX_STAT(CBFCPAUSEFramesReceived_6);
+ RX_STAT(CBFCPAUSEFramesReceived_7);
+ RX_STAT(CBFCPAUSEFramesReceived_8);
+ RX_STAT(CBFCPAUSEFramesReceived_9);
+ RX_STAT(CBFCPAUSEFramesReceived_10);
+ RX_STAT(CBFCPAUSEFramesReceived_11);
+ RX_STAT(CBFCPAUSEFramesReceived_12);
+ RX_STAT(CBFCPAUSEFramesReceived_13);
+ RX_STAT(CBFCPAUSEFramesReceived_14);
+ RX_STAT(CBFCPAUSEFramesReceived_15);
+
+ return data;
+
+#undef TX_STAT
+#undef RX_STAT
+}
+
+static void fun_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq_stats txs;
+ struct funeth_rxq_stats rxs;
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+ u64 *totals, *tot;
+
+ if (fp->port_caps & FUN_PORT_CAP_STATS)
+ data = get_mac_stats(fp, data);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return;
+
+#define ADD_STAT(cnt) do { \
+ *data = (cnt); *tot++ += *data++; \
+} while (0)
+
+ /* Tx queues */
+ totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_cso);
+ ADD_STAT(txs.tx_tso);
+ ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_uso);
+ ADD_STAT(txs.tx_more);
+ ADD_STAT(txs.tx_nstops);
+ ADD_STAT(txs.tx_nrestarts);
+ ADD_STAT(txs.tx_map_err);
+ ADD_STAT(txs.tx_tls_pkts);
+ ADD_STAT(txs.tx_tls_bytes);
+ ADD_STAT(txs.tx_tls_fallback);
+ ADD_STAT(txs.tx_tls_drops);
+ }
+ data += ARRAY_SIZE(txq_stat_names);
+
+ /* XDP Tx queues */
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_xdp_full);
+ ADD_STAT(txs.tx_map_err);
+ }
+ data += ARRAY_SIZE(xdpq_stat_names);
+
+ /* Rx queues */
+ totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+
+ ADD_STAT(rxs.rx_pkts);
+ ADD_STAT(rxs.rx_bytes);
+ ADD_STAT(rxs.rx_cso);
+ ADD_STAT(rxs.gro_pkts);
+ ADD_STAT(rxs.gro_merged);
+ ADD_STAT(rxs.xdp_tx);
+ ADD_STAT(rxs.xdp_redir);
+ ADD_STAT(rxs.xdp_drops);
+ ADD_STAT(rxs.rx_bufs);
+ ADD_STAT(rxs.rx_page_alloc);
+ ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
+ ADD_STAT(rxs.rx_budget);
+ ADD_STAT(rxs.rx_map_err);
+ }
+ data += ARRAY_SIZE(rxq_stat_names);
+#undef ADD_STAT
+
+ *data++ = atomic64_read(&fp->tx_tls_add);
+ *data++ = atomic64_read(&fp->tx_tls_del);
+ *data++ = atomic64_read(&fp->tx_tls_resync);
+}
+
+#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
+#define TX_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+#define FEC_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
+ PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
+
+static void fun_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
+ stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
+}
+
+static void fun_get_802_3_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
+ stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
+ stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
+ stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
+ stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
+ stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
+ stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
+}
+
+static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
+ stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
+}
+
+static void fun_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 32767 },
+ {}
+ };
+
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
+ stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
+ stats->fragments = RX_STAT(fp, etherStatsFragments);
+ stats->jabbers = RX_STAT(fp, etherStatsJabbers);
+
+ stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ *ranges = rmon_ranges;
+}
+
+static void fun_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
+ stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
+}
+
+#undef RX_STAT
+#undef TX_STAT
+#undef FEC_STAT
+
+static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = netdev->real_num_rx_queues;
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ return 0;
+}
+
+static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->indir_table_nentries;
+}
+
+static u32 fun_get_rxfh_key_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return sizeof(fp->rss_key);
+}
+
+static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, fp->indir_table,
+ sizeof(u32) * fp->indir_table_nentries);
+
+ if (key)
+ memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+
+ if (hfunc)
+ *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ const u32 *rss_indir = indir ? indir : fp->indir_table;
+ const u8 *rss_key = key ? key : fp->rss_key;
+ enum fun_eth_hash_alg algo;
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ algo = fp->hash_algo;
+ else if (hfunc == ETH_RSS_HASH_CRC32)
+ algo = FUN_ETH_RSS_ALG_CRC32;
+ else if (hfunc == ETH_RSS_HASH_TOP)
+ algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ else
+ return -EINVAL;
+
+ /* If the port is enabled try to reconfigure RSS and keep the new
+ * settings if successful. If it is down we update the RSS settings
+ * and apply them at the next UP time.
+ */
+ if (netif_running(netdev)) {
+ int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
+ FUN_ADMIN_SUBOP_MODIFY);
+ if (rc)
+ return rc;
+ }
+
+ fp->hash_algo = algo;
+ if (key)
+ memcpy(fp->rss_key, key, sizeof(fp->rss_key));
+ if (indir)
+ memcpy(fp->indir_table, indir,
+ sizeof(u32) * fp->indir_table_nentries);
+ return 0;
+}
+
+static int fun_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static unsigned int to_ethtool_fec(unsigned int fun_fec)
+{
+ unsigned int fec = 0;
+
+ if (fun_fec == FUN_PORT_FEC_NA)
+ fec |= ETHTOOL_FEC_NONE;
+ if (fun_fec & FUN_PORT_FEC_OFF)
+ fec |= ETHTOOL_FEC_OFF;
+ if (fun_fec & FUN_PORT_FEC_RS)
+ fec |= ETHTOOL_FEC_RS;
+ if (fun_fec & FUN_PORT_FEC_FC)
+ fec |= ETHTOOL_FEC_BASER;
+ if (fun_fec & FUN_PORT_FEC_AUTO)
+ fec |= ETHTOOL_FEC_AUTO;
+ return fec;
+}
+
+static int fun_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_data;
+ int rc;
+
+ rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
+ if (rc)
+ return rc;
+
+ fec->active_fec = to_ethtool_fec(fec_data & 0xff);
+ fec->fec = to_ethtool_fec(fec_data >> 8);
+ return 0;
+}
+
+static int fun_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_mode;
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_mode = FUN_PORT_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_OFF:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_BASER:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_FC;
+ break;
+ case ETHTOOL_FEC_RS:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_RS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
+}
+
+static int fun_get_port_module_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *req,
+ struct netlink_ext_ack *extack)
+{
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_xcvr_read_rsp rsp;
+ } cmd;
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified port is virtual, only physical ports have modules");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.xcvr_read =
+ FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
+ req->bank, req->page,
+ req->offset, req->length,
+ req->i2c_address);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ memcpy(req->data, cmd.rsp.data, req->length);
+ return req->length;
+}
+
+static const struct ethtool_ops fun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link_ksettings = fun_get_link_ksettings,
+ .set_link_ksettings = fun_set_link_ksettings,
+ .set_phys_id = fun_set_phys_id,
+ .get_drvinfo = fun_get_drvinfo,
+ .get_msglevel = fun_get_msglevel,
+ .set_msglevel = fun_set_msglevel,
+ .get_regs_len = fun_get_regs_len,
+ .get_regs = fun_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fun_get_coalesce,
+ .set_coalesce = fun_set_coalesce,
+ .get_ts_info = fun_get_ts_info,
+ .get_ringparam = fun_get_ringparam,
+ .set_ringparam = fun_set_ringparam,
+ .get_sset_count = fun_get_sset_count,
+ .get_strings = fun_get_strings,
+ .get_ethtool_stats = fun_get_ethtool_stats,
+ .get_rxnfc = fun_get_rxnfc,
+ .set_rxnfc = fun_set_rxnfc,
+ .get_rxfh_indir_size = fun_get_rxfh_indir_size,
+ .get_rxfh_key_size = fun_get_rxfh_key_size,
+ .get_rxfh = fun_get_rxfh,
+ .set_rxfh = fun_set_rxfh,
+ .get_channels = fun_get_channels,
+ .set_channels = fun_set_channels,
+ .get_fecparam = fun_get_fecparam,
+ .set_fecparam = fun_set_fecparam,
+ .get_pauseparam = fun_get_pauseparam,
+ .set_pauseparam = fun_set_pauseparam,
+ .nway_reset = fun_restart_an,
+ .get_pause_stats = fun_get_pause_stats,
+ .get_fec_stats = fun_get_fec_stats,
+ .get_eth_mac_stats = fun_get_802_3_stats,
+ .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
+ .get_rmon_stats = fun_get_rmon_stats,
+ .get_module_eeprom_by_page = fun_get_port_module_page,
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &fun_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.c b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
new file mode 100644
index 000000000000..f871def70d70
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_ktls.h"
+
+static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
+{
+ struct fun_admin_ktls_create_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_CREATE,
+ .id = cpu_to_be32(id),
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_MODIFY,
+ .id = cpu_to_be32(fp->ktls_id),
+ .tcp_seq = cpu_to_be32(start_offload_tcp_sn),
+ };
+ struct fun_admin_ktls_modify_rsp rsp;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ if (crypto_info->version == TLS_1_2_VERSION)
+ req.version = FUN_KTLS_TLSV2;
+ else
+ return -EOPNOTSUPP;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
+
+ req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
+ memcpy(req.key, c->key, sizeof(c->key));
+ memcpy(req.iv, c->iv, sizeof(c->iv));
+ memcpy(req.salt, c->salt, sizeof(c->salt));
+ memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
+ sizeof(rsp), 0);
+ memzero_explicit(&req, sizeof(req));
+ if (rc)
+ return rc;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+ tx_ctx->tlsid = rsp.tlsid;
+ tx_ctx->next_seq = start_offload_tcp_sn;
+ atomic64_inc(&fp->tx_tls_add);
+ return 0;
+}
+
+static void fun_ktls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return;
+
+ tx_ctx = __tls_driver_ctx(tls_ctx, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+
+ fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ atomic64_inc(&fp->tx_tls_del);
+}
+
+static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, key));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = 0;
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+ req.tcp_seq = cpu_to_be32(seq);
+ req.version = 0;
+ req.cipher = 0;
+ memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
+
+ atomic64_inc(&fp->tx_tls_resync);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ if (!rc)
+ tx_ctx->next_seq = seq;
+ return rc;
+}
+
+static const struct tlsdev_ops fun_ktls_ops = {
+ .tls_dev_add = fun_ktls_add,
+ .tls_dev_del = fun_ktls_del,
+ .tls_dev_resync = fun_ktls_resync,
+};
+
+int fun_ktls_init(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_admin_ktls_create(fp, netdev->dev_port);
+ if (rc)
+ return rc;
+
+ fp->ktls_id = netdev->dev_port;
+ netdev->tlsdev_ops = &fun_ktls_ops;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ return 0;
+}
+
+void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+ if (fp->ktls_id == FUN_HCI_ID_INVALID)
+ return;
+
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.h b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
new file mode 100644
index 000000000000..9d6f2141a959
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_KTLS_H
+#define _FUN_KTLS_H
+
+#include <net/tls.h>
+
+struct funeth_priv;
+
+struct fun_ktls_tx_ctx {
+ __be64 tlsid;
+ u32 next_seq;
+};
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+int fun_ktls_init(struct net_device *netdev);
+void fun_ktls_cleanup(struct funeth_priv *fp);
+
+#else
+
+static inline void fun_ktls_init(struct net_device *netdev)
+{
+}
+
+static inline void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+}
+#endif
+
+#endif /* _FUN_KTLS_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
new file mode 100644
index 000000000000..095f51c4d9d9
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -0,0 +1,2086 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf.h>
+#include <linux/crash_dump.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/idr.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+#include "funeth_ktls.h"
+#include "fun_port.h"
+#include "fun_queue.h"
+#include "funeth_txrx.h"
+
+#define ADMIN_SQ_DEPTH 32
+#define ADMIN_CQ_DEPTH 64
+#define ADMIN_RQ_DEPTH 16
+
+/* Default number of Tx/Rx queues. */
+#define FUN_DFLT_QUEUES 16U
+
+enum {
+ FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
+ FUN_SERV_DEL_PORTS,
+};
+
+static const struct pci_device_id funeth_id_table[] = {
+ { PCI_VDEVICE(FUNGIBLE, 0x0101) },
+ { PCI_VDEVICE(FUNGIBLE, 0x0181) },
+ { 0, }
+};
+
+/* Issue a port write admin command with @n key/value pairs. */
+static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, const u64 *data)
+{
+ unsigned int cmd_size, i;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
+ n * sizeof(struct fun_admin_write48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.write =
+ FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.write.write48[i] =
+ FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+}
+
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
+{
+ return fun_port_write_cmds(fp, 1, &key, &data);
+}
+
+/* Issue a port read admin command with @n key/value pairs. */
+static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, u64 *data)
+{
+ const struct fun_admin_read48_rsp *r48rsp;
+ unsigned int cmd_size, i;
+ int rc;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
+ n * sizeof(struct fun_admin_read48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.read =
+ FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+ if (rc)
+ return rc;
+
+ for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
+ data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
+ dev_dbg(fp->fdev->dev,
+ "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
+ fp->lport, r48rsp->key_to_data, keys[i], data[i],
+ FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
+ }
+ return 0;
+}
+
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
+{
+ return fun_port_read_cmds(fp, 1, &key, data);
+}
+
+static void fun_report_link(struct net_device *netdev)
+{
+ if (netif_carrier_ok(netdev)) {
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ const char *fec = "", *pause = "";
+ int speed = fp->link_speed;
+ char unit = 'M';
+
+ if (fp->link_speed >= SPEED_1000) {
+ speed /= 1000;
+ unit = 'G';
+ }
+
+ if (fp->active_fec & FUN_PORT_FEC_RS)
+ fec = ", RS-FEC";
+ else if (fp->active_fec & FUN_PORT_FEC_FC)
+ fec = ", BASER-FEC";
+
+ if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
+ pause = ", Tx/Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
+ pause = ", Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
+ pause = ", Tx PAUSE";
+
+ netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
+ speed, unit, pause, fec);
+ } else {
+ netdev_info(netdev, "Link down\n");
+ }
+}
+
+static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
+ unsigned int adi_id, const struct fun_adi_param *param)
+{
+ struct fun_admin_adi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
+ sizeof(req)),
+ .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
+ .u.write.attribute = attr,
+ .u.write.id = cpu_to_be32(adi_id),
+ .u.write.param = *param
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+
+/* Configure RSS for the given port. @op determines whether a new RSS context
+ * is to be created or whether an existing one should be reconfigured. The
+ * remaining parameters specify the hashing algorithm, key, and indirection
+ * table.
+ *
+ * This initiates packet delivery to the Rx queues set in the indirection
+ * table.
+ */
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int table_len = fp->indir_table_nentries;
+ unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+ union {
+ struct {
+ struct fun_admin_rss_req req;
+ struct fun_dataop_gl gl;
+ };
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ __be32 *indir_tab;
+ u16 flags;
+ int rc;
+
+ if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
+ return -EINVAL;
+
+ flags = op == FUN_ADMIN_SUBOP_CREATE ?
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
+ sizeof(cmd));
+ cmd.req.u.create =
+ FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
+ dev->dev_port, algo,
+ FUN_ETH_RSS_MAX_KEY_SIZE,
+ table_len, 0,
+ FUN_ETH_RSS_MAX_KEY_SIZE);
+ cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
+
+ /* write the key and indirection table into the RSS DMA area */
+ memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
+ indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
+ for (rc = 0; rc < table_len; rc++)
+ *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
+ fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
+ return rc;
+}
+
+/* Destroy the HW RSS conntext associated with the given port. This also stops
+ * all packet delivery to our Rx queues.
+ */
+static void fun_destroy_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ }
+}
+
+static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
+
+ cpumask_copy(&p->affinity_mask, mask);
+}
+
+static void fun_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
+/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
+ * and add it to the IRQ XArray.
+ */
+static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
+ int node, unsigned int xa_idx_offset)
+{
+ struct fun_irq *irq;
+ int cpu, res;
+
+ cpu = cpumask_local_spread(idx, node);
+ node = cpu_to_mem(cpu);
+
+ irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
+ if (res != 1)
+ goto free_irq;
+
+ res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
+ if (res)
+ goto release_irq;
+
+ irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
+ cpumask_set_cpu(cpu, &irq->affinity_mask);
+ irq->aff_notify.notify = fun_irq_aff_notify;
+ irq->aff_notify.release = fun_irq_aff_release;
+ irq->state = FUN_IRQ_INIT;
+ return irq;
+
+release_irq:
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+free_irq:
+ kfree(irq);
+ return ERR_PTR(res);
+}
+
+static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
+{
+ netif_napi_del(&irq->napi);
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+ kfree(irq);
+}
+
+/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
+static void fun_prune_queue_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int nreleased = 0;
+ struct fun_irq *irq;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, irq) {
+ if (irq->txq || irq->rxq) /* skip those in use */
+ continue;
+
+ xa_erase(&fp->irqs, idx);
+ fun_free_qirq(fp, irq);
+ nreleased++;
+ if (idx < fp->rx_irq_ofst)
+ fp->num_tx_irqs--;
+ else
+ fp->num_rx_irqs--;
+ }
+ netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
+}
+
+/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
+ * and @nrx. IRQs are added incrementally to those we already have.
+ * We hold on to allocated IRQs until garbage collection of unused IRQs is
+ * separately requested.
+ */
+static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int node = dev_to_node(&fp->pdev->dev);
+ struct fun_irq *irq;
+ unsigned int i;
+
+ for (i = fp->num_tx_irqs; i < ntx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, 0);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_tx_irqs++;
+ netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll);
+ }
+
+ for (i = fp->num_rx_irqs; i < nrx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_rx_irqs++;
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
+ }
+
+ netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
+ ntx, nrx);
+ return 0;
+}
+
+static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && txqs[i]; i++)
+ txqs[i] = funeth_txq_free(txqs[i], state);
+}
+
+static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
+ unsigned int nqs, unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
+ state, &txqs[i]);
+ if (err) {
+ free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && rxqs[i]; i++)
+ rxqs[i] = funeth_rxq_free(rxqs[i], state);
+}
+
+static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
+ unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
+ unsigned int start, int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_rxq_create(dev, i, ncqe, nrqe,
+ xa_load(&fp->irqs, i + fp->rx_irq_ofst),
+ state, &rxqs[i]);
+ if (err) {
+ free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && xdpqs[i]; i++)
+ xdpqs[i] = funeth_txq_free(xdpqs[i], state);
+
+ if (state == FUN_QSTATE_DESTROYED)
+ kfree(xdpqs);
+}
+
+static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
+ unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_txq **xdpqs;
+ unsigned int i;
+ int err;
+
+ xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL);
+ if (!xdpqs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
+ if (err) {
+ free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return ERR_PTR(err);
+ }
+ }
+ return xdpqs;
+}
+
+static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs = qset->xdpqs;
+ struct funeth_rxq **rxqs = qset->rxqs;
+
+ /* qset may not specify any queues to operate on. In that case the
+ * currently installed queues are implied.
+ */
+ if (!rxqs) {
+ rxqs = rtnl_dereference(fp->rxqs);
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ qset->txqs = fp->txqs;
+ qset->nrxqs = netdev->real_num_rx_queues;
+ qset->ntxqs = netdev->real_num_tx_queues;
+ qset->nxdpqs = fp->num_xdpqs;
+ }
+ if (!rxqs)
+ return;
+
+ if (rxqs == rtnl_dereference(fp->rxqs)) {
+ rcu_assign_pointer(fp->rxqs, NULL);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ fp->txqs = NULL;
+ }
+
+ free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
+ free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
+ if (qset->state == FUN_QSTATE_DESTROYED)
+ kfree(rxqs);
+
+ /* Tell the caller which queues were operated on. */
+ qset->rxqs = rxqs;
+ qset->xdpqs = xdpqs;
+}
+
+static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_txq **xdpqs = NULL, **txqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
+ if (err)
+ return err;
+
+ rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL);
+ if (!rxqs)
+ return -ENOMEM;
+
+ if (qset->nxdpqs) {
+ xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
+ qset->xdpq_start, qset->state);
+ if (IS_ERR(xdpqs)) {
+ err = PTR_ERR(xdpqs);
+ goto free_qvec;
+ }
+ }
+
+ txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
+ err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
+ qset->txq_start, qset->state);
+ if (err)
+ goto free_xdpqs;
+
+ err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
+ qset->rq_depth, qset->rxq_start, qset->state);
+ if (err)
+ goto free_txqs;
+
+ qset->rxqs = rxqs;
+ qset->txqs = txqs;
+ qset->xdpqs = xdpqs;
+ return 0;
+
+free_txqs:
+ free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
+free_xdpqs:
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
+free_qvec:
+ kfree(rxqs);
+ return err;
+}
+
+/* Take queues to the next level. Presently this means creating them on the
+ * device.
+ */
+static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ for (i = 0; i < qset->nrxqs; i++) {
+ err = fun_rxq_create_dev(qset->rxqs[i],
+ xa_load(&fp->irqs,
+ i + fp->rx_irq_ofst));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->ntxqs; i++) {
+ err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->nxdpqs; i++) {
+ err = fun_txq_create_dev(qset->xdpqs[i], NULL);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int fun_port_create(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ } cmd;
+ int rc;
+
+ if (fp->lport != INVALID_LPORT)
+ return 0;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
+ netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+
+ if (!rc)
+ fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
+ return rc;
+}
+
+static int fun_port_destroy(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (fp->lport == INVALID_LPORT)
+ return 0;
+
+ fp->lport = INVALID_LPORT;
+ return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
+ netdev->dev_port);
+}
+
+static int fun_eth_create(struct funeth_priv *fp)
+{
+ union {
+ struct fun_admin_eth_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
+ sizeof(cmd.req));
+ cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
+ FUN_ADMIN_SUBOP_CREATE,
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
+ 0, fp->netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.id);
+}
+
+static int fun_vi_create(struct funeth_priv *fp)
+{
+ struct fun_admin_vi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
+ sizeof(req)),
+ .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
+ 0,
+ fp->netdev->dev_port,
+ fp->netdev->dev_port)
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+/* Helper to create an ETH flow and bind an SQ to it.
+ * Returns the ETH id (>= 0) on success or a negative error.
+ */
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
+{
+ int rc, ethid;
+
+ ethid = fun_eth_create(fp);
+ if (ethid >= 0) {
+ rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
+ FUN_ADMIN_BIND_TYPE_ETH, ethid);
+ if (rc) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
+ ethid = rc;
+ }
+ }
+ return ethid;
+}
+
+static irqreturn_t fun_queue_irq_handler(int irq, void *data)
+{
+ struct fun_irq *p = data;
+
+ if (p->rxq) {
+ prefetch(p->rxq->next_cqe_info);
+ p->rxq->irq_cnt++;
+ }
+ napi_schedule_irqoff(&p->napi);
+ return IRQ_HANDLED;
+}
+
+static int fun_enable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned long idx, last;
+ unsigned int qidx;
+ struct fun_irq *p;
+ const char *qtype;
+ int err;
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->txq) {
+ qtype = "tx";
+ qidx = p->txq->qidx;
+ } else if (p->rxq) {
+ qtype = "rx";
+ qidx = p->rxq->qidx;
+ } else {
+ continue;
+ }
+
+ if (p->state != FUN_IRQ_INIT)
+ continue;
+
+ snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
+ qtype, qidx);
+ err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
+ if (err) {
+ netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
+ p->irq, err);
+ goto unroll;
+ }
+ p->state = FUN_IRQ_REQUESTED;
+ }
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->state != FUN_IRQ_REQUESTED)
+ continue;
+ irq_set_affinity_notifier(p->irq, &p->aff_notify);
+ irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
+ napi_enable(&p->napi);
+ p->state = FUN_IRQ_ENABLED;
+ }
+
+ return 0;
+
+unroll:
+ last = idx - 1;
+ xa_for_each_range(&fp->irqs, idx, p, 0, last)
+ if (p->state == FUN_IRQ_REQUESTED) {
+ free_irq(p->irq, p);
+ p->state = FUN_IRQ_INIT;
+ }
+
+ return err;
+}
+
+static void fun_disable_one_irq(struct fun_irq *irq)
+{
+ napi_disable(&irq->napi);
+ irq_set_affinity_notifier(irq->irq, NULL);
+ irq_update_affinity_hint(irq->irq, NULL);
+ free_irq(irq->irq, irq);
+ irq->state = FUN_IRQ_INIT;
+}
+
+static void fun_disable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_irq *p;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, p)
+ if (p->state == FUN_IRQ_ENABLED)
+ fun_disable_one_irq(p);
+}
+
+static void fun_down(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ /* If we don't have queues the data path is already down.
+ * Note netif_running(dev) may be true.
+ */
+ if (!rcu_access_pointer(fp->rxqs))
+ return;
+
+ /* It is also down if the queues aren't on the device. */
+ if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
+ netif_info(fp, ifdown, dev,
+ "Tearing down data path on device\n");
+ fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ fun_destroy_rss(fp);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+ fun_disable_irqs(dev);
+ }
+
+ fun_free_rings(dev, qset);
+}
+
+static int fun_up(struct net_device *dev, struct fun_qset *qset)
+{
+ static const int port_keys[] = {
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
+ FUN_ADMIN_PORT_KEY_ENABLE
+ };
+
+ struct funeth_priv *fp = netdev_priv(dev);
+ u64 vals[] = {
+ lower_32_bits(fp->stats_dma_addr),
+ upper_32_bits(fp->stats_dma_addr),
+ FUN_PORT_FLAG_ENABLE_NOTIFY
+ };
+ int err;
+
+ netif_info(fp, ifup, dev, "Setting up data path on device\n");
+
+ if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
+ err = fun_advance_ring_state(dev, qset);
+ if (err)
+ return err;
+ }
+
+ err = fun_vi_create(fp);
+ if (err)
+ goto free_queues;
+
+ fp->txqs = qset->txqs;
+ rcu_assign_pointer(fp->rxqs, qset->rxqs);
+ rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
+
+ err = fun_enable_irqs(dev);
+ if (err)
+ goto destroy_vi;
+
+ if (fp->rss_cfg) {
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
+ } else {
+ /* The non-RSS case has only 1 queue. */
+ err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
+ FUN_ADMIN_BIND_TYPE_EPCQ,
+ qset->rxqs[0]->hw_cqid);
+ }
+ if (err)
+ goto disable_irqs;
+
+ err = fun_port_write_cmds(fp, 3, port_keys, vals);
+ if (err)
+ goto free_rss;
+
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+free_rss:
+ fun_destroy_rss(fp);
+disable_irqs:
+ fun_disable_irqs(dev);
+destroy_vi:
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+free_queues:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int funeth_open(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_qset qset = {
+ .nrxqs = netdev->real_num_rx_queues,
+ .ntxqs = netdev->real_num_tx_queues,
+ .nxdpqs = fp->num_xdpqs,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL,
+ };
+ int rc;
+
+ rc = fun_alloc_rings(netdev, &qset);
+ if (rc)
+ return rc;
+
+ rc = fun_up(netdev, &qset);
+ if (rc) {
+ qset.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(netdev, &qset);
+ }
+
+ return rc;
+}
+
+static int funeth_close(struct net_device *netdev)
+{
+ struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
+
+ fun_down(netdev, &qset);
+ return 0;
+}
+
+static void fun_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+
+ stats->tx_packets = fp->tx_packets;
+ stats->tx_bytes = fp->tx_bytes;
+ stats->tx_dropped = fp->tx_dropped;
+
+ stats->rx_packets = fp->rx_packets;
+ stats->rx_bytes = fp->rx_bytes;
+ stats->rx_dropped = fp->rx_dropped;
+
+ rcu_read_lock();
+ rxqs = rcu_dereference(fp->rxqs);
+ if (!rxqs)
+ goto unlock;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ stats->tx_dropped += txs.tx_map_err;
+ }
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ struct funeth_rxq_stats rxs;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+ stats->rx_packets += rxs.rx_pkts;
+ stats->rx_bytes += rxs.rx_bytes;
+ stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
+ }
+
+ xdpqs = rcu_dereference(fp->xdpqs);
+ if (!xdpqs)
+ goto unlock;
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ }
+unlock:
+ rcu_read_unlock();
+}
+
+static int fun_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
+ if (!rc)
+ netdev->mtu = new_mtu;
+ return rc;
+}
+
+static int fun_set_macaddr(struct net_device *netdev, void *addr)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int rc;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(saddr->sa_data));
+ if (!rc)
+ eth_hw_addr_set(netdev, saddr->sa_data);
+ return rc;
+}
+
+static int fun_get_port_attributes(struct net_device *netdev)
+{
+ static const int keys[] = {
+ FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
+ FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
+ };
+ static const int phys_keys[] = {
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS,
+ };
+
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 data[ARRAY_SIZE(keys)];
+ u8 mac[ETH_ALEN];
+ int i, rc;
+
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++) {
+ switch (keys[i]) {
+ case FUN_ADMIN_PORT_KEY_MACADDR:
+ u64_to_ether_addr(data[i], mac);
+ if (is_zero_ether_addr(mac)) {
+ eth_hw_addr_random(netdev);
+ } else if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(netdev, mac);
+ } else {
+ netdev_err(netdev,
+ "device provided a bad MAC address %pM\n",
+ mac);
+ return -EINVAL;
+ }
+ break;
+
+ case FUN_ADMIN_PORT_KEY_CAPABILITIES:
+ fp->port_caps = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_ADVERT:
+ fp->advertising = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_MTU:
+ netdev->mtu = data[i];
+ break;
+ }
+ }
+
+ if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
+ data);
+ if (rc)
+ return rc;
+
+ fp->lane_attrs = data[0];
+ }
+
+ if (netdev->addr_assign_type == NET_ADDR_RANDOM)
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(netdev->dev_addr));
+ return 0;
+}
+
+static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
+ sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+}
+
+static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* no TX HW timestamps */
+ cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ fp->hwtstamp_cfg = cfg;
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return fun_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return fun_hwtstamp_get(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Prepare the queues for XDP. */
+static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i, nqs = num_online_cpus();
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
+ if (IS_ERR(xdpqs))
+ return PTR_ERR(xdpqs);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ err = fun_rxq_set_bpf(rxqs[i], prog);
+ if (err)
+ goto out;
+ }
+
+ fp->num_xdpqs = nqs;
+ rcu_assign_pointer(fp->xdpqs, xdpqs);
+ return 0;
+out:
+ while (i--)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+
+ free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
+ return err;
+}
+
+/* Set the queues for non-XDP operation. */
+static void fun_end_xdp(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i;
+
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ /* at this point both Rx and Tx XDP processing has ended */
+
+ free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
+ fp->num_xdpqs = 0;
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+}
+
+#define XDP_MAX_MTU \
+ (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
+
+static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct bpf_prog *old_prog, *prog = xdp->prog;
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ /* XDP uses at most one buffer */
+ if (prog && dev->mtu > XDP_MAX_MTU) {
+ netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Device MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ if (!netif_running(dev)) {
+ fp->num_xdpqs = prog ? num_online_cpus() : 0;
+ } else if (prog && !fp->xdp_prog) {
+ err = fun_enter_xdp(dev, prog);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Failed to set queues for XDP.");
+ return err;
+ }
+ } else if (!prog && fp->xdp_prog) {
+ fun_end_xdp(dev);
+ } else {
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->xdp_prog, prog);
+ }
+
+ dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
+ old_prog = xchg(&fp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return fun_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct devlink_port *fun_get_devlink_port(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ return &fp->dl_port;
+}
+
+static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
+{
+ if (ed->num_vports)
+ return -EINVAL;
+
+ ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL);
+ if (!ed->vport_info)
+ return -ENOMEM;
+ ed->num_vports = n;
+ return 0;
+}
+
+static void fun_free_vports(struct fun_ethdev *ed)
+{
+ kvfree(ed->vport_info);
+ ed->vport_info = NULL;
+ ed->num_vports = 0;
+}
+
+static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
+ unsigned int vport)
+{
+ if (!ed->vport_info || vport >= ed->num_vports)
+ return NULL;
+
+ return ed->vport_info + vport;
+}
+
+static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param mac_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
+ &mac_param);
+ if (!rc)
+ ether_addr_copy(vi->mac, mac);
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param vlan_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
+ ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
+ if (!rc) {
+ vi->vlan = vlan;
+ vi->qos = qos;
+ vi->vlan_proto = vlan_proto;
+ }
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param rate_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (min_tx_rate)
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
+ if (!rc)
+ vi->max_rate = max_tx_rate;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
+ const struct fun_vport_info *vi;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ memset(ivi, 0, sizeof(*ivi));
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, vi->mac);
+ ivi->vlan = vi->vlan;
+ ivi->qos = vi->qos;
+ ivi->vlan_proto = vi->vlan_proto;
+ ivi->max_tx_rate = vi->max_rate;
+ ivi->spoofchk = vi->spoofchk;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return vi ? 0 : -EINVAL;
+}
+
+static void fun_uninit(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ fun_prune_queue_irqs(dev);
+ xa_destroy(&fp->irqs);
+}
+
+static const struct net_device_ops fun_netdev_ops = {
+ .ndo_open = funeth_open,
+ .ndo_stop = funeth_close,
+ .ndo_start_xmit = fun_start_xmit,
+ .ndo_get_stats64 = fun_get_stats64,
+ .ndo_change_mtu = fun_change_mtu,
+ .ndo_set_mac_address = fun_set_macaddr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = fun_ioctl,
+ .ndo_uninit = fun_uninit,
+ .ndo_bpf = fun_xdp,
+ .ndo_xdp_xmit = fun_xdp_xmit_frames,
+ .ndo_set_vf_mac = fun_set_vf_mac,
+ .ndo_set_vf_vlan = fun_set_vf_vlan,
+ .ndo_set_vf_rate = fun_set_vf_rate,
+ .ndo_get_vf_config = fun_get_vf_config,
+ .ndo_get_devlink_port = fun_get_devlink_port,
+};
+
+#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
+ GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
+
+static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
+{
+ unsigned int i;
+
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
+}
+
+/* Reset the RSS indirection table to equal distribution across the current
+ * number of Rx queues. Called at init time and whenever the number of Rx
+ * queues changes subsequently. Note that this may also resize the indirection
+ * table.
+ */
+static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ if (!fp->rss_cfg)
+ return;
+
+ /* Set the table size to the max possible that allows an equal number
+ * of occurrences of each CQ.
+ */
+ fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
+ fun_dflt_rss_indir(fp, nrx);
+}
+
+/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
+ * update the LUT to an equal distribution among nrx queues, If @only_if_needed
+ * is set the LUT is left unchanged if it already does not reference any queues
+ * >= nrx.
+ */
+static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
+ bool only_if_needed)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
+ unsigned int i, oldsz;
+ int err;
+
+ if (!fp->rss_cfg)
+ return 0;
+
+ if (only_if_needed) {
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ if (fp->indir_table[i] >= nrx)
+ break;
+
+ if (i >= fp->indir_table_nentries)
+ return 0;
+ }
+
+ memcpy(old_lut, fp->indir_table, sizeof(old_lut));
+ oldsz = fp->indir_table_nentries;
+ fun_reset_rss_indir(dev, nrx);
+
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
+ if (!err)
+ return 0;
+
+ memcpy(fp->indir_table, old_lut, sizeof(old_lut));
+ fp->indir_table_nentries = oldsz;
+ return err;
+}
+
+/* Allocate the DMA area for the RSS configuration commands to the device, and
+ * initialize the hash, hash key, indirection table size and its entries to
+ * their defaults. The indirection table defaults to equal distribution across
+ * the Rx queues.
+ */
+static int fun_init_rss(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
+
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
+ return 0;
+
+ fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
+ &fp->rss_dma_addr, GFP_KERNEL);
+ if (!fp->rss_cfg)
+ return -ENOMEM;
+
+ fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
+ fun_reset_rss_indir(dev, dev->real_num_rx_queues);
+ return 0;
+}
+
+static void fun_free_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_cfg) {
+ dma_free_coherent(&fp->pdev->dev,
+ sizeof(fp->rss_key) + sizeof(fp->indir_table),
+ fp->rss_cfg, fp->rss_dma_addr);
+ fp->rss_cfg = NULL;
+ }
+}
+
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx)
+{
+ netif_set_real_num_tx_queues(netdev, ntx);
+ if (nrx != netdev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(netdev, nrx);
+ fun_reset_rss_indir(netdev, nrx);
+ }
+}
+
+static int fun_init_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return 0;
+
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
+ PORT_MAC_FEC_STATS_MAX;
+
+ fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ &fp->stats_dma_addr, GFP_KERNEL);
+ if (!fp->stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void fun_free_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (fp->stats) {
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
+ dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ fp->stats, fp->stats_dma_addr);
+ fp->stats = NULL;
+ }
+}
+
+static int fun_dl_port_register(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct devlink *dl = priv_to_devlink(fp->fdev);
+ struct devlink_port_attrs attrs = {};
+ unsigned int idx;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ idx = fp->lport;
+ } else {
+ idx = netdev->dev_port;
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.lanes = fp->lane_attrs & 7;
+ if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
+ attrs.split = 1;
+ attrs.phys.port_number = fp->lport & ~3;
+ attrs.phys.split_subport_number = fp->lport & 3;
+ } else {
+ attrs.phys.port_number = fp->lport;
+ }
+ }
+
+ devlink_port_attrs_set(&fp->dl_port, &attrs);
+
+ return devlink_port_register(dl, &fp->dl_port, idx);
+}
+
+/* Determine the max Tx/Rx queues for a port. */
+static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
+ unsigned int *nrx)
+{
+ int neth;
+
+ if (ed->num_ports > 1 || is_kdump_kernel()) {
+ *ntx = 1;
+ *nrx = 1;
+ return 0;
+ }
+
+ neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
+ if (neth < 0)
+ return neth;
+
+ /* We determine the max number of queues based on the CPU
+ * cores, device interrupts and queues, RSS size, and device Tx flows.
+ *
+ * - At least 1 Rx and 1 Tx queues.
+ * - At most 1 Rx/Tx queue per core.
+ * - Each Rx/Tx queue needs 1 SQ.
+ */
+ *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
+ *nrx = *ntx;
+ if (*ntx > neth)
+ *ntx = neth;
+ if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
+ *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
+ return 0;
+}
+
+static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
+{
+ unsigned int ntx, nrx;
+
+ ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
+ nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
+ if (ntx <= nrx) {
+ ntx = min(ntx, nsqs / 2);
+ nrx = min(nrx, nsqs - ntx);
+ } else {
+ nrx = min(nrx, nsqs / 2);
+ ntx = min(ntx, nsqs - nrx);
+ }
+
+ netif_set_real_num_tx_queues(dev, ntx);
+ netif_set_real_num_rx_queues(dev, nrx);
+}
+
+/* Replace the existing Rx/Tx/XDP queues with equal number of queues with
+ * different settings, e.g. depth. This is a disruptive replacement that
+ * temporarily shuts down the data path and should be limited to changes that
+ * can't be applied to live queues. The old queues are always discarded.
+ */
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack)
+{
+ struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
+ struct funeth_priv *fp = netdev_priv(dev);
+ int err;
+
+ newqs->nrxqs = dev->real_num_rx_queues;
+ newqs->ntxqs = dev->real_num_tx_queues;
+ newqs->nxdpqs = fp->num_xdpqs;
+ newqs->state = FUN_QSTATE_INIT_SW;
+ err = fun_alloc_rings(dev, newqs);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to allocate memory for new queues, keeping current settings");
+ return err;
+ }
+
+ fun_down(dev, &oldqs);
+
+ err = fun_up(dev, newqs);
+ if (!err)
+ return 0;
+
+ /* The new queues couldn't be installed. We do not retry the old queues
+ * as they are the same to the device as the new queues and would
+ * similarly fail.
+ */
+ newqs->state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, newqs);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
+ return err;
+}
+
+/* Change the number of Rx/Tx queues of a device while it is up. This is done
+ * by incrementally adding/removing queues to meet the new requirements while
+ * handling ongoing traffic.
+ */
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
+ unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_qset oldqs = {
+ .rxqs = rtnl_dereference(fp->rxqs),
+ .txqs = fp->txqs,
+ .nrxqs = dev->real_num_rx_queues,
+ .ntxqs = dev->real_num_tx_queues,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .state = FUN_QSTATE_DESTROYED
+ };
+ struct fun_qset newqs = {
+ .nrxqs = nrx,
+ .ntxqs = ntx,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL
+ };
+ int i, err;
+
+ err = fun_alloc_rings(dev, &newqs);
+ if (err)
+ goto free_irqs;
+
+ err = fun_enable_irqs(dev); /* of any newly added queues */
+ if (err)
+ goto free_rings;
+
+ /* copy the queues we are keeping to the new set */
+ memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
+ memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
+
+ if (nrx < dev->real_num_rx_queues) {
+ err = fun_rss_set_qnum(dev, nrx, true);
+ if (err)
+ goto disable_tx_irqs;
+
+ for (i = nrx; i < dev->real_num_rx_queues; i++)
+ fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
+ struct fun_irq, napi));
+
+ netif_set_real_num_rx_queues(dev, nrx);
+ }
+
+ if (ntx < dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ rcu_assign_pointer(fp->rxqs, newqs.rxqs);
+ fp->txqs = newqs.txqs;
+ synchronize_net();
+
+ if (ntx > dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ if (nrx > dev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(dev, nrx);
+ fun_rss_set_qnum(dev, nrx, false);
+ }
+
+ /* disable interrupts of any excess Tx queues */
+ for (i = keep_tx; i < oldqs.ntxqs; i++)
+ fun_disable_one_irq(oldqs.txqs[i]->irq);
+
+ fun_free_rings(dev, &oldqs);
+ fun_prune_queue_irqs(dev);
+ return 0;
+
+disable_tx_irqs:
+ for (i = oldqs.ntxqs; i < ntx; i++)
+ fun_disable_one_irq(newqs.txqs[i]->irq);
+free_rings:
+ newqs.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, &newqs);
+free_irqs:
+ fun_prune_queue_irqs(dev);
+ return err;
+}
+
+static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
+{
+ struct fun_dev *fdev = &ed->fdev;
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+ unsigned int ntx, nrx;
+ int rc;
+
+ rc = fun_max_qs(ed, &ntx, &nrx);
+ if (rc)
+ return rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
+ if (!netdev) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ netdev->dev_port = portid;
+ fun_queue_defaults(netdev, ed->nsqs_per_port);
+
+ fp = netdev_priv(netdev);
+ fp->fdev = fdev;
+ fp->pdev = to_pci_dev(fdev->dev);
+ fp->netdev = netdev;
+ xa_init(&fp->irqs);
+ fp->rx_irq_ofst = ntx;
+ seqcount_init(&fp->link_seq);
+
+ fp->lport = INVALID_LPORT;
+ rc = fun_port_create(netdev);
+ if (rc)
+ goto free_netdev;
+
+ /* bind port to admin CQ for async events */
+ rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
+ FUN_ADMIN_BIND_TYPE_EPCQ, 0);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_get_port_attributes(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_rss(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_stats_area(fp);
+ if (rc)
+ goto free_rss;
+
+ SET_NETDEV_DEV(netdev, fdev->dev);
+ netdev->netdev_ops = &fun_netdev_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
+ if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
+ netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
+ if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
+ netdev->hw_features |= GSO_ENCAP_FLAGS;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+ netdev->mpls_features = netdev->vlan_features;
+ netdev->hw_enc_features = netdev->hw_features;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = FUN_MAX_MTU;
+
+ fun_set_ethtool_ops(netdev);
+
+ /* configurable parameters */
+ fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
+ fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
+ fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
+ fp->rx_coal_usec = CQ_INTCOAL_USEC;
+ fp->rx_coal_count = CQ_INTCOAL_NPKT;
+ fp->tx_coal_usec = SQ_INTCOAL_USEC;
+ fp->tx_coal_count = SQ_INTCOAL_NPKT;
+ fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+
+ rc = fun_dl_port_register(netdev);
+ if (rc)
+ goto free_stats;
+
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+ fun_ktls_init(netdev); /* optional, failure OK */
+
+ netif_carrier_off(netdev);
+ ed->netdevs[portid] = netdev;
+ rc = register_netdev(netdev);
+ if (rc)
+ goto unreg_devlink;
+
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
+
+ return 0;
+
+unreg_devlink:
+ ed->netdevs[portid] = NULL;
+ fun_ktls_cleanup(fp);
+ devlink_port_unregister(&fp->dl_port);
+free_stats:
+ fun_free_stats_area(fp);
+free_rss:
+ fun_free_rss(fp);
+destroy_port:
+ fun_port_destroy(netdev);
+free_netdev:
+ free_netdev(netdev);
+done:
+ dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
+ return rc;
+}
+
+static void fun_destroy_netdev(struct net_device *netdev)
+{
+ struct funeth_priv *fp;
+
+ fp = netdev_priv(netdev);
+ devlink_port_type_clear(&fp->dl_port);
+ unregister_netdev(netdev);
+ devlink_port_unregister(&fp->dl_port);
+ fun_ktls_cleanup(fp);
+ fun_free_stats_area(fp);
+ fun_free_rss(fp);
+ fun_port_destroy(netdev);
+ free_netdev(netdev);
+}
+
+static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
+{
+ struct fun_dev *fd = &ed->fdev;
+ int i, rc;
+
+ /* The admin queue takes 1 IRQ and 2 SQs. */
+ ed->nsqs_per_port = min(fd->num_irqs - 1,
+ fd->kern_end_qid - 2) / nports;
+ if (ed->nsqs_per_port < 2) {
+ dev_err(fd->dev, "Too few SQs for %u ports", nports);
+ return -EINVAL;
+ }
+
+ ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL);
+ if (!ed->netdevs)
+ return -ENOMEM;
+
+ ed->num_ports = nports;
+ for (i = 0; i < nports; i++) {
+ rc = fun_create_netdev(ed, i);
+ if (rc)
+ goto free_netdevs;
+ }
+
+ return 0;
+
+free_netdevs:
+ while (i)
+ fun_destroy_netdev(ed->netdevs[--i]);
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+ return rc;
+}
+
+static void fun_destroy_ports(struct fun_ethdev *ed)
+{
+ unsigned int i;
+
+ for (i = 0; i < ed->num_ports; i++)
+ fun_destroy_netdev(ed->netdevs[i]);
+
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+}
+
+static void fun_update_link_state(const struct fun_ethdev *ed,
+ const struct fun_admin_port_notif *notif)
+{
+ unsigned int port_idx = be16_to_cpu(notif->id);
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+
+ if (port_idx >= ed->num_ports)
+ return;
+
+ netdev = ed->netdevs[port_idx];
+ fp = netdev_priv(netdev);
+
+ write_seqcount_begin(&fp->link_seq);
+ fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */
+ fp->active_fc = notif->flow_ctrl;
+ fp->active_fec = notif->fec;
+ fp->xcvr_type = notif->xcvr_type;
+ fp->link_down_reason = notif->link_down_reason;
+ fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
+
+ if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
+ netif_carrier_off(netdev);
+ if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
+ netif_carrier_on(netdev);
+
+ write_seqcount_end(&fp->link_seq);
+ fun_report_link(netdev);
+}
+
+/* handler for async events delivered through the admin CQ */
+static void fun_event_cb(struct fun_dev *fdev, void *entry)
+{
+ u8 op = ((struct fun_admin_rsp_common *)entry)->op;
+
+ if (op == FUN_ADMIN_OP_PORT) {
+ const struct fun_admin_port_notif *rsp = entry;
+
+ if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
+ fun_update_link_state(to_fun_ethdev(fdev), rsp);
+ } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
+ const struct fun_admin_res_count_rsp *r = entry;
+
+ if (r->count.data)
+ set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
+ else
+ set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
+ fun_serv_sched(fdev);
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
+ op, rsp->subop);
+ }
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u", op);
+ }
+}
+
+/* handler for pending work managed by the service task */
+static void fun_service_cb(struct fun_dev *fdev)
+{
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
+ fun_destroy_ports(ed);
+
+ if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
+ return;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc < 0 || rc == ed->num_ports)
+ return;
+
+ if (ed->num_ports)
+ fun_destroy_ports(ed);
+ if (rc)
+ fun_create_ports(ed, rc);
+}
+
+static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (nvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ed->state_mutex);
+ fun_free_vports(ed);
+ mutex_unlock(&ed->state_mutex);
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ rc = pci_enable_sriov(pdev, nvfs);
+ if (rc)
+ return rc;
+
+ mutex_lock(&ed->state_mutex);
+ rc = fun_init_vports(ed, nvfs);
+ mutex_unlock(&ed->state_mutex);
+ if (rc) {
+ pci_disable_sriov(pdev);
+ return rc;
+ }
+
+ return nvfs;
+}
+
+static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct fun_dev_params aqreq = {
+ .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
+ .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
+ .cq_depth = ADMIN_CQ_DEPTH,
+ .sq_depth = ADMIN_SQ_DEPTH,
+ .rq_depth = ADMIN_RQ_DEPTH,
+ .min_msix = 2, /* 1 Rx + 1 Tx */
+ .event_cb = fun_event_cb,
+ .serv_cb = fun_service_cb,
+ };
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+ struct fun_dev *fdev;
+ int rc;
+
+ devlink = fun_devlink_alloc(&pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ed = devlink_priv(devlink);
+ mutex_init(&ed->state_mutex);
+
+ fdev = &ed->fdev;
+ rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
+ if (rc)
+ goto free_devlink;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc > 0)
+ rc = fun_create_ports(ed, rc);
+ if (rc < 0)
+ goto disable_dev;
+
+ fun_serv_restart(fdev);
+ fun_devlink_register(devlink);
+ return 0;
+
+disable_dev:
+ fun_dev_disable(fdev);
+free_devlink:
+ mutex_destroy(&ed->state_mutex);
+ fun_devlink_free(devlink);
+ return rc;
+}
+
+static void funeth_remove(struct pci_dev *pdev)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+
+ ed = to_fun_ethdev(fdev);
+ devlink = priv_to_devlink(ed);
+ fun_devlink_unregister(devlink);
+
+#ifdef CONFIG_PCI_IOV
+ funeth_sriov_configure(pdev, 0);
+#endif
+
+ fun_serv_stop(fdev);
+ fun_destroy_ports(ed);
+ fun_dev_disable(fdev);
+ mutex_destroy(&ed->state_mutex);
+
+ fun_devlink_free(devlink);
+}
+
+static struct pci_driver funeth_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = funeth_id_table,
+ .probe = funeth_probe,
+ .remove = funeth_remove,
+ .shutdown = funeth_remove,
+ .sriov_configure = funeth_sriov_configure,
+};
+
+module_pci_driver(funeth_driver);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEVICE_TABLE(pci, funeth_id_table);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
new file mode 100644
index 000000000000..29a6c2ede43a
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf_trace.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include "funeth_txrx.h"
+#include "funeth.h"
+#include "fun_queue.h"
+
+#define CREATE_TRACE_POINTS
+#include "funeth_trace.h"
+
+/* Given the device's max supported MTU and pages of at least 4KB a packet can
+ * be scattered into at most 4 buffers.
+ */
+#define RX_MAX_FRAGS 4
+
+/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
+#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
+ * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
+ * occupying the buffer.
+ */
+#define EXTRA_PAGE_REFS 1000000
+#define MIN_PAGE_REFS 1000
+
+enum {
+ FUN_XDP_FLUSH_REDIR = 1,
+ FUN_XDP_FLUSH_TX = 2,
+};
+
+/* See if a page is running low on refs we are holding and if so take more. */
+static void refresh_refs(struct funeth_rxbuf *buf)
+{
+ if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
+ buf->pg_refs += EXTRA_PAGE_REFS;
+ page_ref_add(buf->page, EXTRA_PAGE_REFS);
+ }
+}
+
+/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
+ * page is worth retaining and there's room for it. Otherwise the page is
+ * unmapped and our references released.
+ */
+static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
+{
+ struct funeth_rx_cache *c = &q->cache;
+
+ if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
+ c->bufs[c->prod_cnt & c->mask] = *buf;
+ c->prod_cnt++;
+ } else {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ }
+}
+
+/* Get a page from the Rx buffer cache. We only consider the next available
+ * page and return it if we own all its references.
+ */
+static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ struct funeth_rx_cache *c = &q->cache;
+ struct funeth_rxbuf *buf;
+
+ if (c->prod_cnt == c->cons_cnt)
+ return false; /* empty cache */
+
+ buf = &c->bufs[c->cons_cnt & c->mask];
+ if (page_ref_count(buf->page) == buf->pg_refs) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ *rb = *buf;
+ buf->page = NULL;
+ refresh_refs(rb);
+ c->cons_cnt++;
+ return true;
+ }
+
+ /* Page can't be reused. If the cache is full drop this page. */
+ if (c->prod_cnt - c->cons_cnt > c->mask) {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ buf->page = NULL;
+ c->cons_cnt++;
+ }
+ return false;
+}
+
+/* Allocate and DMA-map a page for receive. */
+static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
+ int node, gfp_t gfp)
+{
+ struct page *p;
+
+ if (cache_get(q, rb))
+ return 0;
+
+ p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
+ FUN_QSTAT_INC(q, rx_map_err);
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ FUN_QSTAT_INC(q, rx_page_alloc);
+
+ rb->page = p;
+ rb->pg_refs = 1;
+ refresh_refs(rb);
+ rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
+ return 0;
+}
+
+static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ if (rb->page) {
+ dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __page_frag_cache_drain(rb->page, rb->pg_refs);
+ rb->page = NULL;
+ }
+}
+
+/* Run the XDP program assigned to an Rx queue.
+ * Return %NULL if the buffer is consumed, or the virtual address of the packet
+ * to turn into an skb.
+ */
+static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
+ int ref_ok, struct funeth_txq *xdp_q)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 act;
+
+ /* VA includes the headroom, frag size includes headroom + tailroom */
+ xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
+ &q->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
+ (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ /* remove headroom, which may not be FUN_XDP_HEADROOM now */
+ skb_frag_size_set(frags, xdp.data_end - xdp.data);
+ skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
+ goto pass;
+ case XDP_TX:
+ if (unlikely(!ref_ok))
+ goto pass;
+
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_tx);
+ q->xdp_flush |= FUN_XDP_FLUSH_TX;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_redir);
+ q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(q->netdev, xdp_prog, act);
+xdp_error:
+ q->cur_buf->pg_refs++; /* return frags' page reference */
+ FUN_QSTAT_INC(q, xdp_err);
+ break;
+ case XDP_DROP:
+ q->cur_buf->pg_refs++;
+ FUN_QSTAT_INC(q, xdp_drops);
+ break;
+ }
+ return NULL;
+
+pass:
+ return xdp.data;
+}
+
+/* A CQE contains a fixed completion structure along with optional metadata and
+ * even packet data. Given the start address of a CQE return the start of the
+ * contained fixed structure, which lies at the end.
+ */
+static const void *cqe_to_info(const void *cqe)
+{
+ return cqe + FUNETH_CQE_INFO_OFFSET;
+}
+
+/* The inverse of cqe_to_info(). */
+static const void *info_to_cqe(const void *cqe_info)
+{
+ return cqe_info - FUNETH_CQE_INFO_OFFSET;
+}
+
+/* Return the type of hash provided by the device based on the L3 and L4
+ * protocols it parsed for the packet.
+ */
+static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
+{
+ static const enum pkt_hash_types htype_map[] = {
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
+ };
+ u16 key;
+
+ /* Build the key from the TCP/UDP and IP/IPv6 bits */
+ key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
+ ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
+
+ return htype_map[key];
+}
+
+/* Each received packet can be scattered across several Rx buffers or can
+ * share a buffer with previously received packets depending on the buffer
+ * and packet sizes and the room available in the most recently used buffer.
+ *
+ * The rules are:
+ * - If the buffer at the head of an RQ has not been used it gets (part of) the
+ * next incoming packet.
+ * - Otherwise, if the packet fully fits in the buffer's remaining space the
+ * packet is written there.
+ * - Otherwise, the packet goes into the next Rx buffer.
+ *
+ * This function returns the Rx buffer for a packet or fragment thereof of the
+ * given length. If it isn't @buf it either recycles or frees that buffer
+ * before advancing the queue to the next buffer.
+ *
+ * If called repeatedly with the remaining length of a packet it will walk
+ * through all the buffers containing the packet.
+ */
+static struct funeth_rxbuf *
+get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
+{
+ if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
+ return buf; /* @buf holds (part of) the packet */
+
+ /* The packet occupies part of the next buffer. Move there after
+ * replenishing the current buffer slot either with the spare page or
+ * by reusing the slot's existing page. Note that if a spare page isn't
+ * available and the current packet occupies @buf it is a multi-frag
+ * packet that will be dropped leaving @buf available for reuse.
+ */
+ if ((page_ref_count(buf->page) == buf->pg_refs &&
+ buf->node == numa_mem_id()) || !q->spare_buf.page) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ refresh_refs(buf);
+ } else {
+ cache_offer(q, buf);
+ *buf = q->spare_buf;
+ q->spare_buf.page = NULL;
+ q->rqes[q->rq_cons & q->rq_mask] =
+ FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
+ }
+ q->buf_offset = 0;
+ q->rq_cons++;
+ return &q->bufs[q->rq_cons & q->rq_mask];
+}
+
+/* Gather the page fragments making up the first Rx packet on @q. Its total
+ * length @tot_len includes optional head- and tail-rooms.
+ *
+ * Return 0 if the device retains ownership of at least some of the pages.
+ * In this case the caller may only copy the packet.
+ *
+ * A non-zero return value gives the caller permission to use references to the
+ * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
+ * one of the pages is PF_MEMALLOC.
+ *
+ * Regardless of outcome the caller is granted a reference to each of the pages.
+ */
+static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
+ skb_frag_t *frags)
+{
+ struct funeth_rxbuf *buf = q->cur_buf;
+ unsigned int frag_len;
+ int ref_ok = 1;
+
+ for (;;) {
+ buf = get_buf(q, buf, tot_len);
+
+ /* We always keep the RQ full of buffers so before we can give
+ * one of our pages to the stack we require that we can obtain
+ * a replacement page. If we can't the packet will either be
+ * copied or dropped so we can retain ownership of the page and
+ * reuse it.
+ */
+ if (!q->spare_buf.page &&
+ funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
+ GFP_ATOMIC | __GFP_MEMALLOC))
+ ref_ok = 0;
+
+ frag_len = min_t(unsigned int, tot_len,
+ PAGE_SIZE - q->buf_offset);
+ dma_sync_single_for_cpu(q->dma_dev,
+ buf->dma_addr + q->buf_offset,
+ frag_len, DMA_FROM_DEVICE);
+ buf->pg_refs--;
+ if (ref_ok)
+ ref_ok |= buf->node;
+
+ __skb_frag_set_page(frags, buf->page);
+ skb_frag_off_set(frags, q->buf_offset);
+ skb_frag_size_set(frags++, frag_len);
+
+ tot_len -= frag_len;
+ if (!tot_len)
+ break;
+
+ q->buf_offset = PAGE_SIZE;
+ }
+ q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
+ q->cur_buf = buf;
+ return ref_ok;
+}
+
+static bool rx_hwtstamp_enabled(const struct net_device *dev)
+{
+ const struct funeth_priv *d = netdev_priv(dev);
+
+ return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
+/* Advance the CQ pointers and phase tag to the next CQE. */
+static void advance_cq(struct funeth_rxq *q)
+{
+ if (unlikely(q->cq_head == q->cq_mask)) {
+ q->cq_head = 0;
+ q->phase ^= 1;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+ } else {
+ q->cq_head++;
+ q->next_cqe_info += FUNETH_CQE_SIZE;
+ }
+ prefetch(q->next_cqe_info);
+}
+
+/* Process the packet represented by the head CQE of @q. Gather the packet's
+ * fragments, run it through the optional XDP program, and if needed construct
+ * an skb and pass it to the stack.
+ */
+static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
+{
+ const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
+ unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
+ struct net_device *ndev = q->netdev;
+ skb_frag_t frags[RX_MAX_FRAGS];
+ struct skb_shared_info *si;
+ unsigned int headroom;
+ gro_result_t gro_res;
+ struct sk_buff *skb;
+ int ref_ok;
+ void *va;
+ u16 cv;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_pkts++;
+ q->stats.rx_bytes += pkt_len;
+ u64_stats_update_end(&q->syncp);
+
+ advance_cq(q);
+
+ /* account for head- and tail-room, present only for 1-buffer packets */
+ tot_len = pkt_len;
+ headroom = be16_to_cpu(rxreq->headroom);
+ if (likely(headroom))
+ tot_len += FUN_RX_TAILROOM + headroom;
+
+ ref_ok = fun_gather_pkt(q, tot_len, frags);
+ va = skb_frag_address(frags);
+ if (xdp_q && headroom == FUN_XDP_HEADROOM) {
+ va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
+ if (!va)
+ return;
+ headroom = 0; /* XDP_PASS trims it */
+ }
+ if (unlikely(!ref_ok))
+ goto no_mem;
+
+ if (likely(headroom)) {
+ /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
+ prefetch(va + headroom);
+ skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
+ if (unlikely(!skb))
+ goto no_mem;
+
+ skb_reserve(skb, headroom);
+ __skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ } else {
+ prefetch(va);
+ skb = napi_get_frags(q->napi);
+ if (unlikely(!skb))
+ goto no_mem;
+
+ if (ref_ok < 0)
+ skb->pfmemalloc = 1;
+
+ si = skb_shinfo(skb);
+ si->nr_frags = rxreq->nsgl;
+ for (i = 0; i < si->nr_frags; i++)
+ si->frags[i] = frags[i];
+
+ skb->len = pkt_len;
+ skb->data_len = pkt_len;
+ skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
+ }
+
+ skb_record_rx_queue(skb, q->qidx);
+ cv = be16_to_cpu(rxreq->pkt_cv);
+ if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
+ skb_set_hash(skb, be32_to_cpu(rxreq->hash),
+ cqe_to_pkt_hash_type(cv));
+ if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
+ FUN_QSTAT_INC(q, rx_cso);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
+ }
+ if (unlikely(rx_hwtstamp_enabled(q->netdev)))
+ skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
+
+ trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
+
+ gro_res = skb->data_len ? napi_gro_frags(q->napi) :
+ napi_gro_receive(q->napi, skb);
+ if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
+ FUN_QSTAT_INC(q, gro_merged);
+ else if (gro_res == GRO_HELD)
+ FUN_QSTAT_INC(q, gro_pkts);
+ return;
+
+no_mem:
+ FUN_QSTAT_INC(q, rx_mem_drops);
+
+ /* Release the references we've been granted for the frag pages.
+ * We return the ref of the last frag and free the rest.
+ */
+ q->cur_buf->pg_refs++;
+ for (i = 0; i < rxreq->nsgl - 1; i++)
+ __free_page(skb_frag_page(frags + i));
+}
+
+/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
+ * indicating the CQE is new.
+ */
+static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
+{
+ u16 sf_p = be16_to_cpu(ci->sf_p);
+
+ return (sf_p & 1) ^ phase;
+}
+
+/* Walk through a CQ identifying and processing fresh CQEs up to the given
+ * budget. Return the remaining budget.
+ */
+static int fun_process_cqes(struct funeth_rxq *q, int budget)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct funeth_txq **xdpqs, *xdp_q = NULL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (xdpqs)
+ xdp_q = xdpqs[smp_processor_id()];
+
+ while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
+ /* access other descriptor fields after the phase check */
+ dma_rmb();
+
+ fun_handle_cqe_pkt(q, xdp_q);
+ budget--;
+ }
+
+ if (unlikely(q->xdp_flush)) {
+ if (q->xdp_flush & FUN_XDP_FLUSH_TX)
+ fun_txq_wr_db(xdp_q);
+ if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
+ xdp_do_flush();
+ q->xdp_flush = 0;
+ }
+
+ return budget;
+}
+
+/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
+ * doorbells as needed.
+ */
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_rxq *q = irq->rxq;
+ int work_done = budget - fun_process_cqes(q, budget);
+ u32 cq_db_val = q->cq_head;
+
+ if (unlikely(work_done >= budget))
+ FUN_QSTAT_INC(q, rx_budget);
+ else if (napi_complete_done(napi, work_done))
+ cq_db_val |= q->irq_db_val;
+
+ /* check whether to post new Rx buffers */
+ if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
+ u64_stats_update_end(&q->syncp);
+ q->rq_cons_db = q->rq_cons;
+ writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
+ }
+
+ writel(cq_db_val, q->cq_db);
+ return work_done;
+}
+
+/* Free the Rx buffers of an Rx queue. */
+static void fun_rxq_free_bufs(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++)
+ funeth_free_page(q, b);
+
+ funeth_free_page(q, &q->spare_buf);
+ q->cur_buf = NULL;
+}
+
+/* Initially provision an Rx queue with Rx buffers. */
+static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++) {
+ if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
+ fun_rxq_free_bufs(q);
+ return -ENOMEM;
+ }
+ q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
+ }
+ q->cur_buf = q->bufs;
+ return 0;
+}
+
+/* Initialize a used-buffer cache of the given depth. */
+static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
+ int node)
+{
+ c->mask = depth - 1;
+ c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
+ return c->bufs ? 0 : -ENOMEM;
+}
+
+/* Deallocate an Rx queue's used-buffer cache and its contents. */
+static void fun_rxq_free_cache(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->cache.bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->cache.mask; i++, b++)
+ funeth_free_page(q, b);
+
+ kvfree(q->cache.bufs);
+ q->cache.bufs = NULL;
+}
+
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_admin_epcq_req cmd;
+ u16 headroom;
+ int err;
+
+ headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+ if (headroom != q->headroom) {
+ cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd));
+ cmd.u.modify =
+ FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
+ 0, q->hw_cqid, headroom);
+ err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
+ 0);
+ if (err)
+ return err;
+ q->headroom = headroom;
+ }
+
+ WRITE_ONCE(q->xdp_prog, prog);
+ return 0;
+}
+
+/* Create an Rx queue, allocating the host memory it needs. */
+static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ncqe,
+ unsigned int nrqe,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_rxq *q;
+ int err = -ENOMEM;
+ int numa_node;
+
+ numa_node = fun_irq_node(irq);
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->qidx = qidx;
+ q->netdev = dev;
+ q->cq_mask = ncqe - 1;
+ q->rq_mask = nrqe - 1;
+ q->numa_node = numa_node;
+ q->rq_db_thres = nrqe / 4;
+ u64_stats_init(&q->syncp);
+ q->dma_dev = &fp->pdev->dev;
+
+ q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
+ sizeof(*q->bufs), false, numa_node,
+ &q->rq_dma_addr, (void **)&q->bufs, NULL);
+ if (!q->rqes)
+ goto free_q;
+
+ q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
+ false, numa_node, &q->cq_dma_addr, NULL,
+ NULL);
+ if (!q->cqes)
+ goto free_rqes;
+
+ err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
+ if (err)
+ goto free_cqes;
+
+ err = fun_rxq_alloc_bufs(q, numa_node);
+ if (err)
+ goto free_cache;
+
+ q->stats.rx_bufs = q->rq_mask;
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_cache:
+ fun_rxq_free_cache(q);
+free_cqes:
+ dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
+ q->cq_dma_addr);
+free_rqes:
+ fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
+ q->rq_dma_addr, q->bufs);
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
+ return ERR_PTR(err);
+}
+
+static void fun_rxq_free_sw(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_rxq_free_cache(q);
+ fun_rxq_free_bufs(q);
+ fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
+ q->rqes, q->rq_dma_addr, q->bufs);
+ dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
+ q->cqes, q->cq_dma_addr);
+
+ /* Before freeing the queue transfer key counters to the device. */
+ fp->rx_packets += q->stats.rx_pkts;
+ fp->rx_bytes += q->stats.rx_bytes;
+ fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
+
+ kfree(q);
+}
+
+/* Create an Rx queue's resources on the device. */
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int ncqe = q->cq_mask + 1;
+ unsigned int nrqe = q->rq_mask + 1;
+ int err;
+
+ err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
+ irq->napi.napi_id);
+ if (err)
+ goto out;
+
+ err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ goto xdp_unreg;
+
+ q->phase = 1;
+ q->irq_cnt = 0;
+ q->cq_head = 0;
+ q->rq_cons = 0;
+ q->rq_cons_db = 0;
+ q->buf_offset = 0;
+ q->napi = &irq->napi;
+ q->irq_db_val = fp->cq_irq_db;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+
+ q->xdp_prog = fp->xdp_prog;
+ q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+
+ err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
+ FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
+ 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
+ &q->hw_sqid, &q->rq_db);
+ if (err)
+ goto xdp_unreg;
+
+ err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
+ q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
+ q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
+ irq->irq_idx, 0, fp->fdev->kern_end_qid,
+ &q->hw_cqid, &q->cq_db);
+ if (err)
+ goto free_rq;
+
+ irq->rxq = q;
+ writel(q->rq_mask, q->rq_db);
+ q->init_state = FUN_QSTATE_INIT_FULL;
+
+ netif_info(fp, ifup, q->netdev,
+ "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
+ q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
+ q->numa_node, q->headroom);
+ return 0;
+
+free_rq:
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+xdp_unreg:
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+out:
+ netdev_err(q->netdev,
+ "Failed to create Rx queue %u on device, error %d\n",
+ q->qidx, err);
+ return err;
+}
+
+static void fun_rxq_free_dev(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_irq *irq;
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ irq = container_of(q->napi, struct fun_irq, napi);
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing Rx queue %u (id %u/%u), IRQ %u\n",
+ q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
+
+ irq->rxq = NULL;
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+ fun_destroy_cq(fp->fdev, q->hw_cqid);
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance an Rx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp)
+{
+ struct funeth_rxq *q = *qp;
+ int err;
+
+ if (!q) {
+ q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+ }
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_rxq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_rxq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Rx queue resources until it reaches the target state. */
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_rxq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_rxq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
new file mode 100644
index 000000000000..9e58dfec19d5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM funeth
+
+#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUNETH_H
+
+#include <linux/tracepoint.h>
+
+#include "funeth_txrx.h"
+
+TRACE_EVENT(funeth_tx,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 len,
+ u32 sqe_idx,
+ u32 ngle),
+
+ TP_ARGS(txq, len, sqe_idx, ngle),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, len)
+ __field(u32, sqe_idx)
+ __field(u32, ngle)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->len = len;
+ __entry->sqe_idx = sqe_idx;
+ __entry->ngle = ngle;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->len, __entry->ngle)
+);
+
+TRACE_EVENT(funeth_tx_free,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 sqe_idx,
+ u32 num_sqes,
+ u32 hw_head),
+
+ TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, sqe_idx)
+ __field(u32, num_sqes)
+ __field(u32, hw_head)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->sqe_idx = sqe_idx;
+ __entry->num_sqes = num_sqes;
+ __entry->hw_head = hw_head;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->num_sqes, __entry->hw_head)
+);
+
+TRACE_EVENT(funeth_rx,
+
+ TP_PROTO(const struct funeth_rxq *rxq,
+ u32 num_rqes,
+ u32 pkt_len,
+ u32 hash,
+ u32 cls_vec),
+
+ TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, cq_head)
+ __field(u32, num_rqes)
+ __field(u32, len)
+ __field(u32, hash)
+ __field(u32, cls_vec)
+ __string(devname, rxq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = rxq->qidx;
+ __entry->cq_head = rxq->cq_head;
+ __entry->num_rqes = num_rqes;
+ __entry->len = pkt_len;
+ __entry->hash = hash;
+ __entry->cls_vec = cls_vec;
+ __assign_str(devname, rxq->netdev->name);
+ ),
+
+ TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
+ __get_str(devname), __entry->qidx, __entry->cq_head,
+ __entry->num_rqes, __entry->len, __entry->hash,
+ __entry->cls_vec)
+);
+
+#endif /* _TRACE_FUNETH_H */
+
+/* Below must be outside protection. */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE funeth_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
new file mode 100644
index 000000000000..706d81e39a54
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/ip.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <uapi/linux/udp.h>
+#include "funeth.h"
+#include "funeth_ktls.h"
+#include "funeth_txrx.h"
+#include "funeth_trace.h"
+#include "fun_queue.h"
+
+#define FUN_XDP_CLEAN_THRES 32
+#define FUN_XDP_CLEAN_BATCH 16
+
+/* DMA-map a packet and return the (length, DMA_address) pairs for its
+ * segments. If a mapping error occurs -ENOMEM is returned. The packet
+ * consists of an skb_shared_info and one additional address/length pair.
+ */
+static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
+ void *data, unsigned int data_len,
+ dma_addr_t *addr, unsigned int *len)
+{
+ const skb_frag_t *fp, *end;
+
+ *len = data_len;
+ *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ return -ENOMEM;
+
+ if (!si)
+ return 0;
+
+ for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
+ *++len = skb_frag_size(fp);
+ *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+/* Return the address just past the end of a Tx queue's descriptor ring.
+ * It exploits the fact that the HW writeback area is just after the end
+ * of the descriptor ring.
+ */
+static void *txq_end(const struct funeth_txq *q)
+{
+ return (void *)q->hw_wb;
+}
+
+/* Return the amount of space within a Tx ring from the given address to the
+ * end.
+ */
+static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
+{
+ return txq_end(q) - p;
+}
+
+/* Return the number of Tx descriptors occupied by a Tx request. */
+static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
+{
+ return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
+}
+
+/* Write a gather list to the Tx descriptor at @req from @ngle address/length
+ * pairs.
+ */
+static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
+ struct fun_eth_tx_req *req,
+ const dma_addr_t *addrs,
+ const unsigned int *lens,
+ unsigned int ngle)
+{
+ struct fun_dataop_gl *gle;
+ unsigned int i;
+
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ return gle;
+}
+
+static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
+{
+ return *(__be16 *)&tcp_flag_word(th);
+}
+
+static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int *tls_len)
+{
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct fun_ktls_tx_ctx *tls_ctx;
+ u32 datalen, seq;
+
+ datalen = skb->len - skb_tcp_all_headers(skb);
+ if (!datalen)
+ return skb;
+
+ if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
+ seq = ntohl(tcp_hdr(skb)->seq);
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+
+ if (likely(tls_ctx->next_seq == seq)) {
+ *tls_len = datalen;
+ return skb;
+ }
+ if (seq - tls_ctx->next_seq < U32_MAX / 4) {
+ tls_offload_tx_resync_request(skb->sk, seq,
+ tls_ctx->next_seq);
+ }
+ }
+
+ FUN_QSTAT_INC(q, tx_tls_fallback);
+ skb = tls_encrypt_skb(skb);
+ if (!skb)
+ FUN_QSTAT_INC(q, tx_tls_drops);
+
+ return skb;
+#else
+ return NULL;
+#endif
+}
+
+/* Write as many descriptors as needed for the supplied skb starting at the
+ * current producer location. The caller has made certain enough descriptors
+ * are available.
+ *
+ * Returns the number of descriptors written, 0 on error.
+ */
+static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int tls_len)
+{
+ unsigned int extra_bytes = 0, extra_pkts = 0;
+ unsigned int idx = q->prod_cnt & q->mask;
+ const struct skb_shared_info *shinfo;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t addrs[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ const struct tcphdr *th;
+ unsigned int l4_hlen;
+ unsigned int ngle;
+ u16 flags;
+
+ shinfo = skb_shinfo(skb);
+ if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
+ skb_headlen(skb), addrs, lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return 0;
+ }
+
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+
+ if (likely(shinfo->gso_size)) {
+ if (skb->encapsulation) {
+ u16 ol4_ofst;
+
+ flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_OUTER_L3_LEN;
+ if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
+ FUN_ETH_OUTER_UDP;
+ if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
+ ol4_ofst = skb_transport_offset(skb);
+ } else {
+ ol4_ofst = skb_inner_network_offset(skb);
+ }
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_OUTER_IPV6;
+
+ if (skb->inner_network_header) {
+ if (inner_ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ else
+ flags |= FUN_ETH_INNER_IPV6 |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ }
+ th = inner_tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_inner_network_offset(skb),
+ skb_inner_transport_offset(skb),
+ skb_network_offset(skb), ol4_ofst);
+ FUN_QSTAT_INC(q, tx_encap_tso);
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L4_LEN |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_INNER_IPV6;
+
+ l4_hlen = sizeof(struct udphdr);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ cpu_to_be16(l4_hlen << 10), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_uso);
+ } else {
+ /* HW considers one set of headers as inner */
+ flags = FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ if (shinfo->gso_type & SKB_GSO_TCPV6)
+ flags |= FUN_ETH_INNER_IPV6;
+ else
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ th = tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_tso);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_cso += shinfo->gso_segs;
+ u64_stats_update_end(&q->syncp);
+
+ extra_pkts = shinfo->gso_segs - 1;
+ extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
+ l4_hlen) * extra_pkts;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ flags |= FUN_ETH_INNER_UDP;
+ fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
+ skb_checksum_start_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_cso);
+ } else {
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ ngle = shinfo->nr_frags + 1;
+ req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
+
+ gle = fun_write_gl(q, req, addrs, lens, ngle);
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
+ struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
+ struct fun_ktls_tx_ctx *tls_ctx;
+
+ req->len8 += FUNETH_TLS_SZ / 8;
+ req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
+
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ tls->tlsid = tls_ctx->tlsid;
+ tls_ctx->next_seq += tls_len;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_tls_bytes += tls_len;
+ q->stats.tx_tls_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += skb->len + extra_bytes;
+ q->stats.tx_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+
+ q->info[idx].skb = skb;
+
+ trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
+ return tx_req_ndesc(req);
+}
+
+/* Return the number of available descriptors of a Tx queue.
+ * HW assumes head==tail means the ring is empty so we need to keep one
+ * descriptor unused.
+ */
+static unsigned int fun_txq_avail(const struct funeth_txq *q)
+{
+ return q->mask - q->prod_cnt + q->cons_cnt;
+}
+
+/* Stop a queue if it can't handle another worst-case packet. */
+static void fun_tx_check_stop(struct funeth_txq *q)
+{
+ if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
+ return;
+
+ netif_tx_stop_queue(q->ndq);
+
+ /* NAPI reclaim is freeing packets in parallel with us and we may race.
+ * We have stopped the queue but check again after synchronizing with
+ * reclaim.
+ */
+ smp_mb();
+ if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
+ FUN_QSTAT_INC(q, tx_nstops);
+ else
+ netif_tx_start_queue(q->ndq);
+}
+
+/* Return true if a queue has enough space to restart. Current condition is
+ * that the queue must be >= 1/4 empty.
+ */
+static bool fun_txq_may_restart(struct funeth_txq *q)
+{
+ return fun_txq_avail(q) >= q->mask / 4;
+}
+
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int qid = skb_get_queue_mapping(skb);
+ struct funeth_txq *q = fp->txqs[qid];
+ unsigned int tls_len = 0;
+ unsigned int ndesc;
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
+ tls_is_sk_tx_device_offloaded(skb->sk)) {
+ skb = fun_tls_tx(skb, q, &tls_len);
+ if (unlikely(!skb))
+ goto dropped;
+ }
+
+ ndesc = write_pkt_desc(skb, q, tls_len);
+ if (unlikely(!ndesc)) {
+ dev_kfree_skb_any(skb);
+ goto dropped;
+ }
+
+ q->prod_cnt += ndesc;
+ fun_tx_check_stop(q);
+
+ skb_tx_timestamp(skb);
+
+ if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
+ fun_txq_wr_db(q);
+ else
+ FUN_QSTAT_INC(q, tx_more);
+
+ return NETDEV_TX_OK;
+
+dropped:
+ /* A dropped packet may be the last one in a xmit_more train,
+ * ring the doorbell just in case.
+ */
+ if (!netdev_xmit_more())
+ fun_txq_wr_db(q);
+ return NETDEV_TX_OK;
+}
+
+/* Return a Tx queue's HW head index written back to host memory. */
+static u16 txq_hw_head(const struct funeth_txq *q)
+{
+ return (u16)be64_to_cpu(*q->hw_wb);
+}
+
+/* Unmap the Tx packet starting at the given descriptor index and
+ * return the number of Tx descriptors it occupied.
+ */
+static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ unsigned int ngle = req->dataop.ngather;
+ struct fun_dataop_gl *gle;
+
+ if (ngle) {
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+
+ for (gle++; --ngle && txq_to_end(q, gle); gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+
+ for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+ }
+
+ return tx_req_ndesc(req);
+}
+
+/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
+ * queue if we freed enough descriptors.
+ *
+ * Return true if we exhausted the budget while there is more work to be done.
+ */
+static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
+{
+ unsigned int npkts = 0, nbytes = 0, ndesc = 0;
+ unsigned int head, limit, reclaim_idx;
+
+ /* budget may be 0, e.g., netpoll */
+ limit = budget ? budget : UINT_MAX;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+ struct sk_buff *skb = q->info[reclaim_idx].skb;
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ nbytes += skb->len;
+ napi_consume_skb(skb, budget);
+ ndesc += pkt_desc;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < limit);
+ }
+
+ q->cons_cnt += ndesc;
+ netdev_tx_completed_queue(q->ndq, npkts, nbytes);
+ smp_mb(); /* pairs with the one in fun_tx_check_stop() */
+
+ if (unlikely(netif_tx_queue_stopped(q->ndq) &&
+ fun_txq_may_restart(q))) {
+ netif_tx_wake_queue(q->ndq);
+ FUN_QSTAT_INC(q, tx_nrestarts);
+ }
+
+ return reclaim_idx != head;
+}
+
+/* The NAPI handler for Tx queues. */
+int fun_txq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_txq *q = irq->txq;
+ unsigned int db_val;
+
+ if (fun_txq_reclaim(q, budget))
+ return budget; /* exhausted budget */
+
+ napi_complete(napi); /* exhausted pending work */
+ db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
+ writel(db_val, q->db);
+ return 0;
+}
+
+/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
+static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
+{
+ unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+
+ xdp_return_frame(q->info[reclaim_idx].xdpf);
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ ndesc += pkt_desc;
+ npkts++;
+ } while (reclaim_idx != head && npkts < budget);
+ }
+
+ q->cons_cnt += ndesc;
+ return npkts;
+}
+
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
+{
+ unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
+ const struct skb_shared_info *si = NULL;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+
+ if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
+ fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ si = xdp_get_shared_info_from_frame(xdpf);
+ tot_len = xdp_get_frame_len(xdpf);
+ nfrags += si->nr_frags;
+ ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
+ sizeof(struct fun_dataop_gl)),
+ FUNETH_SQE_SIZE);
+ }
+
+ if (unlikely(fun_txq_avail(q) < ndesc)) {
+ FUN_QSTAT_INC(q, tx_xdp_full);
+ return false;
+ }
+
+ if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
+ lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return false;
+ }
+
+ idx = q->prod_cnt & q->mask;
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
+
+ fun_write_gl(q, req, dma, lens, nfrags);
+
+ q->info[idx].xdpf = xdpf;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += tot_len;
+ q->stats.tx_pkts++;
+ u64_stats_update_end(&q->syncp);
+
+ trace_funeth_tx(q, tot_len, idx, nfrags);
+ q->prod_cnt += ndesc;
+
+ return true;
+}
+
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q, **xdpqs;
+ int i, q_idx;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (unlikely(!xdpqs))
+ return -ENETDOWN;
+
+ q_idx = smp_processor_id();
+ if (unlikely(q_idx >= fp->num_xdpqs))
+ return -ENXIO;
+
+ for (q = xdpqs[q_idx], i = 0; i < n; i++)
+ if (!fun_xdp_tx(q, frames[i]))
+ break;
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ fun_txq_wr_db(q);
+ return i;
+}
+
+/* Purge a Tx queue of any queued packets. Should be called once HW access
+ * to the packets has been revoked, e.g., after the queue has been disabled.
+ */
+static void fun_txq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ dev_kfree_skb_any(q->info[idx].skb);
+ }
+ netdev_tx_reset_queue(q->ndq);
+}
+
+static void fun_xdpq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ xdp_return_frame(q->info[idx].xdpf);
+ }
+}
+
+/* Create a Tx queue, allocating all the host resources needed. */
+static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ndesc,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q;
+ int numa_node;
+
+ if (irq)
+ numa_node = fun_irq_node(irq); /* skb Tx queue */
+ else
+ numa_node = cpu_to_node(qidx); /* XDP Tx queue */
+
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->dma_dev = &fp->pdev->dev;
+ q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
+ sizeof(*q->info), true, numa_node,
+ &q->dma_addr, (void **)&q->info,
+ &q->hw_wb);
+ if (!q->desc)
+ goto free_q;
+
+ q->netdev = dev;
+ q->mask = ndesc - 1;
+ q->qidx = qidx;
+ q->numa_node = numa_node;
+ u64_stats_init(&q->syncp);
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Can't allocate memory for %s queue %u\n",
+ irq ? "Tx" : "XDP", qidx);
+ return NULL;
+}
+
+static void fun_txq_free_sw(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
+ q->desc, q->dma_addr, q->info);
+
+ fp->tx_packets += q->stats.tx_pkts;
+ fp->tx_bytes += q->stats.tx_bytes;
+ fp->tx_dropped += q->stats.tx_map_err;
+
+ kfree(q);
+}
+
+/* Allocate the device portion of a Tx queue. */
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int irq_idx, ndesc = q->mask + 1;
+ int err;
+
+ q->irq = irq;
+ *q->hw_wb = 0;
+ q->prod_cnt = 0;
+ q->cons_cnt = 0;
+ irq_idx = irq ? irq->irq_idx : 0;
+
+ err = fun_sq_create(fp->fdev,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
+ FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
+ q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
+ irq_idx, 0, fp->fdev->kern_end_qid, 0,
+ &q->hw_qid, &q->db);
+ if (err)
+ goto out;
+
+ err = fun_create_and_bind_tx(fp, q->hw_qid);
+ if (err < 0)
+ goto free_devq;
+ q->ethid = err;
+
+ if (irq) {
+ irq->txq = q;
+ q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
+ q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
+ fp->tx_coal_count);
+ writel(q->irq_db_val, q->db);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_FULL;
+ netif_info(fp, ifup, q->netdev,
+ "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
+ irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
+ q->ethid, q->numa_node);
+ return 0;
+
+free_devq:
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+out:
+ netdev_err(q->netdev,
+ "Failed to create %s queue %u on device, error %d\n",
+ irq ? "Tx" : "XDP", q->qidx, err);
+ return err;
+}
+
+static void fun_txq_free_dev(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
+ q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
+ q->irq ? q->irq->irq_idx : 0, q->ethid);
+
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
+
+ if (q->irq) {
+ q->irq->txq = NULL;
+ fun_txq_purge(q);
+ } else {
+ fun_xdpq_purge(q);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance a Tx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp)
+{
+ struct funeth_txq *q = *qp;
+ int err;
+
+ if (!q)
+ q = fun_txq_create_sw(dev, qidx, ndesc, irq);
+ if (!q)
+ return -ENOMEM;
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_txq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_txq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Tx queue resources until it reaches the target state.
+ * The queue must be already disconnected from the stack.
+ */
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_txq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_txq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
new file mode 100644
index 000000000000..671f51135c26
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_TXRX_H
+#define _FUNETH_TXRX_H
+
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+/* Tx descriptor size */
+#define FUNETH_SQE_SIZE 64U
+
+/* Size of device headers per Tx packet */
+#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
+
+/* Number of gather list entries per Tx descriptor */
+#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
+
+/* Max gather list size in bytes for an sk_buff. */
+#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
+#else
+# define FUNETH_TLS_SZ 0
+#endif
+
+/* Max number of Tx descriptors for an sk_buff using a gather list. */
+#define FUNETH_MAX_GL_DESC \
+ DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
+ FUNETH_SQE_SIZE)
+
+/* Max number of Tx descriptors for any packet. */
+#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
+
+/* Rx CQ descriptor size. */
+#define FUNETH_CQE_SIZE 64U
+
+/* Offset of cqe_info within a CQE. */
+#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
+ * interrupt with the supplied time delay and packet count moderation settings.
+ */
+#define FUN_IRQ_CQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* As above for SQ doorbells. */
+#define FUN_IRQ_SQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | \
+ ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* Per packet tailroom. Present only for 1-frag packets. */
+#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
+ * accommodate two packets per buffer for 4K pages and 1500B MTUs.
+ */
+#define FUN_XDP_HEADROOM 192
+
+/* Initialization state of a queue. */
+enum {
+ FUN_QSTATE_DESTROYED, /* what queue? */
+ FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */
+ FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */
+};
+
+/* Initialization state of an interrupt. */
+enum {
+ FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
+ FUN_IRQ_REQUESTED, /* request_irq() done */
+ FUN_IRQ_ENABLED, /* processing enabled */
+ FUN_IRQ_DISABLED, /* processing disabled */
+};
+
+struct bpf_prog;
+
+struct funeth_txq_stats { /* per Tx queue SW counters */
+ u64 tx_pkts; /* # of Tx packets */
+ u64 tx_bytes; /* total bytes of Tx packets */
+ u64 tx_cso; /* # of packets with checksum offload */
+ u64 tx_tso; /* # of non-encapsulated TSO super-packets */
+ u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
+ u64 tx_more; /* # of DBs elided due to xmit_more */
+ u64 tx_nstops; /* # of times the queue has stopped */
+ u64 tx_nrestarts; /* # of times the queue has restarted */
+ u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */
+ u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */
+ u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */
+ u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
+ u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */
+ u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */
+};
+
+struct funeth_tx_info { /* per Tx descriptor state */
+ union {
+ struct sk_buff *skb; /* associated packet (sk_buff path) */
+ struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
+ };
+};
+
+struct funeth_txq {
+ /* RO cacheline of frequently accessed data */
+ u32 mask; /* queue depth - 1 */
+ u32 hw_qid; /* device ID of the queue */
+ void *desc; /* base address of descriptor ring */
+ struct funeth_tx_info *info;
+ struct device *dma_dev; /* device for DMA mappings */
+ volatile __be64 *hw_wb; /* HW write-back location */
+ u32 __iomem *db; /* SQ doorbell register address */
+ struct netdev_queue *ndq;
+ dma_addr_t dma_addr; /* DMA address of descriptor ring */
+ /* producer R/W cacheline */
+ u16 qidx; /* queue index within net_device */
+ u16 ethid;
+ u32 prod_cnt; /* producer counter */
+ struct funeth_txq_stats stats;
+ /* shared R/W cacheline, primarily accessed by consumer */
+ u32 irq_db_val; /* value written to IRQ doorbell */
+ u32 cons_cnt; /* consumer (cleanup) counter */
+ struct net_device *netdev;
+ struct fun_irq *irq;
+ int numa_node;
+ u8 init_state; /* queue initialization state */
+ struct u64_stats_sync syncp;
+};
+
+struct funeth_rxq_stats { /* per Rx queue SW counters */
+ u64 rx_pkts; /* # of received packets, including SW drops */
+ u64 rx_bytes; /* total size of received packets */
+ u64 rx_cso; /* # of packets with checksum offload */
+ u64 rx_bufs; /* total # of Rx buffers provided to device */
+ u64 gro_pkts; /* # of GRO superpackets */
+ u64 gro_merged; /* # of pkts merged into existing GRO superpackets */
+ u64 rx_page_alloc; /* # of page allocations for Rx buffers */
+ u64 rx_budget; /* NAPI iterations that exhausted their budget */
+ u64 rx_mem_drops; /* # of packets dropped due to memory shortage */
+ u64 rx_map_err; /* # of page DMA mapping errors */
+ u64 xdp_drops; /* XDP_DROPped packets */
+ u64 xdp_tx; /* successful XDP transmits */
+ u64 xdp_redir; /* successful XDP redirects */
+ u64 xdp_err; /* packets dropped due to XDP errors */
+};
+
+struct funeth_rxbuf { /* per Rx buffer state */
+ struct page *page; /* associated page */
+ dma_addr_t dma_addr; /* DMA address of page start */
+ int pg_refs; /* page refs held by driver */
+ int node; /* page node, or -1 if it is PF_MEMALLOC */
+};
+
+struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ unsigned int prod_cnt; /* producer counter */
+ unsigned int cons_cnt; /* consumer counter */
+ unsigned int mask; /* depth - 1 */
+};
+
+/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
+struct funeth_rxq {
+ struct net_device *netdev;
+ struct napi_struct *napi;
+ struct device *dma_dev; /* device for DMA mappings */
+ void *cqes; /* base of CQ descriptor ring */
+ const void *next_cqe_info; /* fun_cqe_info of next CQE */
+ u32 __iomem *cq_db; /* CQ doorbell register address */
+ unsigned int cq_head; /* CQ head index */
+ unsigned int cq_mask; /* CQ depth - 1 */
+ u16 phase; /* CQ phase tag */
+ u16 qidx; /* queue index within net_device */
+ unsigned int irq_db_val; /* IRQ info for CQ doorbell */
+ struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ struct funeth_rxbuf *cur_buf; /* currently active buffer */
+ u32 __iomem *rq_db; /* RQ doorbell register address */
+ unsigned int rq_cons; /* RQ consumer counter */
+ unsigned int rq_mask; /* RQ depth - 1 */
+ unsigned int buf_offset; /* offset of next pkt in head buffer */
+ u8 xdp_flush; /* XDP flush types needed at NAPI end */
+ u8 init_state; /* queue initialization state */
+ u16 headroom; /* per packet headroom */
+ unsigned int rq_cons_db; /* value of rq_cons at last RQ db */
+ unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */
+ struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */
+ struct funeth_rx_cache cache; /* used buffer cache */
+ struct bpf_prog *xdp_prog; /* optional XDP BPF program */
+ struct funeth_rxq_stats stats;
+ dma_addr_t cq_dma_addr; /* DMA address of CQE ring */
+ dma_addr_t rq_dma_addr; /* DMA address of RQE ring */
+ u16 irq_cnt;
+ u32 hw_cqid; /* device ID of the queue's CQ */
+ u32 hw_sqid; /* device ID of the queue's SQ */
+ int numa_node;
+ struct u64_stats_sync syncp;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+#define FUN_QSTAT_INC(q, counter) \
+ do { \
+ u64_stats_update_begin(&(q)->syncp); \
+ (q)->stats.counter++; \
+ u64_stats_update_end(&(q)->syncp); \
+ } while (0)
+
+#define FUN_QSTAT_READ(q, seq, stats_copy) \
+ do { \
+ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
+ stats_copy = (q)->stats; \
+ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
+
+#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
+
+struct fun_irq {
+ struct napi_struct napi;
+ struct funeth_txq *txq;
+ struct funeth_rxq *rxq;
+ u8 state;
+ u16 irq_idx; /* index of MSI-X interrupt */
+ int irq; /* Linux IRQ vector */
+ cpumask_t affinity_mask; /* IRQ affinity */
+ struct irq_affinity_notify aff_notify;
+ char name[FUN_INT_NAME_LEN];
+} ____cacheline_internodealigned_in_smp;
+
+/* Return the start address of the idx-th Tx descriptor. */
+static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
+ unsigned int idx)
+{
+ return q->desc + idx * FUNETH_SQE_SIZE;
+}
+
+static inline void fun_txq_wr_db(const struct funeth_txq *q)
+{
+ unsigned int tail = q->prod_cnt & q->mask;
+
+ writel(tail, q->db);
+}
+
+static inline int fun_irq_node(const struct fun_irq *p)
+{
+ return cpu_to_mem(cpumask_first(&p->affinity_mask));
+}
+
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
+int fun_txq_napi_poll(struct napi_struct *napi, int budget);
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp);
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp);
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
+
+#endif /* _FUNETH_TXRX_H */
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 5f5d4f7aa813..160735484465 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -843,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 2ad7f57f7e5b..f7621ab672b9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
*/
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{
- u32 tail, head;
+ int tail, head;
int i;
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 50b384910c83..7b9a2d9d9624 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index f7f65c4bf993..d3e3ac242bfc 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -526,8 +526,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
}
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
@@ -766,9 +765,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -811,7 +810,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
@@ -857,8 +856,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
int i, j;
int err;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return 0;
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
@@ -901,8 +899,7 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
@@ -1276,9 +1273,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9ddcc497f48e..021bbf308d68 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -438,7 +439,7 @@ static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
if (frag_size > rx->packet_buffer_size) {
packet_size_error = true;
netdev_warn(priv->dev,
- "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+ "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
rx->packet_buffer_size, be16_to_cpu(desc->len));
}
page_info = &rx->data.page_info[idx];
@@ -608,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
+ skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..2e6461b0ea8b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ec394d991668..588d64819ed5 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
@@ -795,7 +795,7 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
/* No outstanding miss completion but packet allocated
* implies packet receives a re-injection completion
- * without a a prior miss completion. Return without
+ * without a prior miss completion. Return without
* completing the packet.
*/
net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84ef494bd60..50c3f5d6611f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -830,8 +830,8 @@ static int hip04_set_coalesce(struct net_device *netdev,
static void hip04_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static const struct ethtool_ops hip04_ethtool_ops = {
@@ -990,7 +990,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
- netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index a6c18b6527f9..93846bace028 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -852,7 +852,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->netdev_ops = &hisi_femac_netdev_ops;
ndev->ethtool_ops = &hisi_femac_ethtools_ops;
- netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll,
+ FEMAC_POLL_WEIGHT);
hisi_femac_port_init(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index d7e62eca050f..ffcf797dfa90 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1243,7 +1243,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto out_phy_node;
- netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
if (HAS_CAP_TSO(priv->hw_cap)) {
ret = hix5hd2_init_sg_desc_queue(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 00fafc0f8512..430eccea8e5e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
+ }
__module_get(THIS_MODULE);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 7edf8569514c..928d934cb21a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb)
+ if (!mac_cb) {
+ fwnode_handle_put(child);
return -ENOMEM;
+ }
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22a463e15678..7cf10d1e2b31 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -31,8 +31,6 @@
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
@@ -1782,7 +1780,7 @@ static int hns_nic_set_features(struct net_device *netdev,
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
- netif_set_gso_max_size(netdev, 7 * 4096);
+ netif_set_tso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
@@ -2111,8 +2109,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2124,8 +2121,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
@@ -2168,7 +2164,7 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
- netif_set_gso_max_size(netdev, 7 * 4096);
+ netif_set_tso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d7a27c244d48..54faf0f2d1d8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -887,8 +887,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
p[21] = net_stats->rx_compressed;
p[22] = net_stats->tx_compressed;
- p[23] = netdev->rx_dropped.counter;
- p[24] = netdev->tx_dropped.counter;
+ p[23] = 0; /* was netdev->rx_dropped.counter */
+ p[24] = 0; /* was netdev->tx_dropped.counter */
p[25] = priv->tx_timeout_count;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index b668df6193be..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
+ HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@@ -92,8 +93,8 @@ struct hclge_ring_chain_param {
struct hclge_basic_info {
u8 hw_tc_map;
u8 rsv;
- u16 mbx_api_version;
- u32 pf_caps;
+ __le16 mbx_api_version;
+ __le32 pf_caps;
};
struct hclgevf_mbx_resp_status {
@@ -134,11 +135,20 @@ struct hclge_vf_to_pf_msg {
};
struct hclge_pf_to_vf_msg {
- u16 code;
- u16 vf_mbx_msg_code;
- u16 vf_mbx_msg_subcode;
- u16 resp_status;
- u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ __le16 code;
+ union {
+ /* used for mbx response */
+ struct {
+ __le16 vf_mbx_msg_code;
+ __le16 vf_mbx_msg_subcode;
+ __le16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
};
struct hclge_mbx_vf_to_pf_cmd {
@@ -148,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[1];
u8 msg_len;
u8 rsv2;
- u16 match_id;
+ __le16 match_id;
struct hclge_vf_to_pf_msg msg;
};
@@ -159,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 rsv[3];
u8 msg_len;
u8 rsv1;
- u16 match_id;
+ __le16 match_id;
struct hclge_pf_to_vf_msg msg;
};
@@ -169,6 +179,49 @@ struct hclge_vf_rst_cmd {
u8 rsv[22];
};
+#pragma pack(1)
+struct hclge_mbx_link_status {
+ __le16 link_status;
+ __le32 speed;
+ __le16 duplex;
+ u8 flag;
+};
+
+struct hclge_mbx_link_mode {
+ __le16 idx;
+ __le64 link_mode;
+};
+
+struct hclge_mbx_port_base_vlan {
+ __le16 state;
+ __le16 vlan_proto;
+ __le16 qos;
+ __le16 vlan_tag;
+};
+
+struct hclge_mbx_vf_queue_info {
+ __le16 num_tqps;
+ __le16 rss_size;
+ __le16 rx_buf_len;
+};
+
+struct hclge_mbx_vf_queue_depth {
+ __le16 num_tx_desc;
+ __le16 num_rx_desc;
+};
+
+struct hclge_mbx_vlan_filter {
+ u8 is_kill;
+ __le16 vlan_id;
+ __le16 proto;
+};
+
+struct hclge_mbx_mtu_info {
+ __le32 mtu;
+};
+
+#pragma pack()
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
@@ -177,9 +230,20 @@ struct hclgevf_mbx_arq_ring {
u32 head;
u32 tail;
atomic_t count;
- u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+ __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+};
+
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
};
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 9298fbecb31a..0179fc288f5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -96,13 +96,16 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -155,6 +158,15 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -167,6 +179,7 @@ struct hnae3_handle;
struct hnae3_queue {
void __iomem *io_base;
+ void __iomem *mem_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
@@ -182,6 +195,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -218,6 +232,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -265,6 +281,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -303,6 +320,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -536,6 +558,8 @@ struct hnae3_ae_dev {
* Get 1588 rx hwstamp
* get_ts_info
* Get phc info
+ * clean_vf_config
+ * Clean residual vf info after disable sriov
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -553,14 +577,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -729,6 +756,9 @@ struct hnae3_ae_ops {
struct ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
+ void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
};
struct hnae3_dcb_ops {
@@ -737,6 +767,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -761,10 +793,13 @@ struct hnae3_tc_info {
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
u16 tqp_count[HNAE3_MAX_TC];
u16 tqp_offset[HNAE3_MAX_TC];
+ u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -775,6 +810,9 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -806,6 +844,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
@@ -841,6 +880,7 @@ struct hnae3_handle {
struct dentry *hnae3_dbgfs;
/* protects concurrent contention between debugfs commands */
struct mutex dbgfs_lock;
+ char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c15ca710dabb..f671a63cecde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -52,9 +52,9 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
{
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
}
@@ -91,6 +91,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
@@ -149,6 +150,11 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -160,6 +166,8 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
@@ -218,8 +226,10 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= ae_dev->pdev->revision;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
hclge_comm_parse_capability(ae_dev, is_pf, resp);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 876650eddac4..b1f9383b418f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -20,6 +20,7 @@
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
@@ -102,6 +103,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
@@ -338,6 +340,11 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index aa1d7a6ff4ca..946d166a452d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
- u32 *indir, __le16 rss_ind_tbl_size);
+ u32 *indir, u16 rss_ind_tbl_size);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 0c60f41fca8a..f3c9395d8351 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, tx = %u.\n",
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
@@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, rx = %u.\n",
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index f726a5b70f9e..66feb23f7b7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -106,6 +106,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.init = hns3_dbg_common_file_init,
},
{
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
@@ -395,6 +402,12 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
}
};
@@ -562,12 +575,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
for (i = 0; i < ring_num; i++) {
j = 0;
- sprintf(result[j++], "%8u", i);
- sprintf(result[j++], "%9u", ring->tx_copybreak);
- sprintf(result[j++], "%3u", tx_spare->len);
- sprintf(result[j++], "%3u", tx_spare->next_to_use);
- sprintf(result[j++], "%3u", tx_spare->next_to_clean);
- sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u", ring->tx_copybreak);
+ sprintf(result[j++], "%u", tx_spare->len);
+ sprintf(result[j++], "%u", tx_spare->next_to_use);
+ sprintf(result[j++], "%u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%u", tx_spare->last_to_clean);
sprintf(result[j++], "%pad", &tx_spare->dma);
hns3_dbg_fill_content(content, sizeof(content),
tx_spare_info_items,
@@ -598,35 +611,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
+ sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%9u", ring->rx_copybreak);
+ sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
@@ -700,36 +713,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG));
- sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
@@ -848,15 +861,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%5d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@@ -930,19 +943,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%6d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
sprintf(result[j++], "%#x",
le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%10u",
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
}
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
@@ -1227,7 +1240,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
return ret;
mutex_lock(&handle->dbgfs_lock);
- save_buf = &hns3_dbg_cmd[index].buf;
+ save_buf = &handle->dbgfs_buf[index];
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
@@ -1332,6 +1345,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
int ret;
u32 i;
+ handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+ ARRAY_SIZE(hns3_dbg_cmd),
+ sizeof(*handle->dbgfs_buf),
+ GFP_KERNEL);
+ if (!handle->dbgfs_buf)
+ return -ENOMEM;
+
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1380,9 +1400,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (hns3_dbg_cmd[i].buf) {
- kvfree(hns3_dbg_cmd[i].buf);
- hns3_dbg_cmd[i].buf = NULL;
+ if (handle->dbgfs_buf[i]) {
+ kvfree(handle->dbgfs_buf[i]);
+ handle->dbgfs_buf[i] = NULL;
}
mutex_destroy(&handle->dbgfs_lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 83aa1450ab9f..97578eabb7d8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -49,7 +49,6 @@ struct hns3_dbg_cmd_info {
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
u32 buf_len;
- char *buf;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index babc5d7a3b52..4cb2421e71a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
+ u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
struct hns3_tx_spare *tx_spare;
struct page *page;
- u32 alloc_size;
dma_addr_t dma;
int order;
- alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
order = get_order(alloc_size);
+ if (order >= MAX_ORDER) {
+ if (net_ratelimit())
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
+ return;
+ }
+
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
GFP_KERNEL);
if (!tx_spare) {
/* The driver still work without the tx spare buffer */
dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
- return;
+ goto devm_kzalloc_error;
}
page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
GFP_KERNEL, order);
if (!page) {
dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
- devm_kfree(ring_to_dev(ring), tx_spare);
- return;
+ goto alloc_pages_error;
}
dma = dma_map_page(ring_to_dev(ring), page, 0,
PAGE_SIZE << order, DMA_TO_DEVICE);
if (dma_mapping_error(ring_to_dev(ring), dma)) {
dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
- put_page(page);
- devm_kfree(ring_to_dev(ring), tx_spare);
- return;
+ goto dma_mapping_error;
}
tx_spare->dma = dma;
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ return;
+
+dma_mapping_error:
+ put_page(page);
+alloc_pages_error:
+ devm_kfree(ring_to_dev(ring), tx_spare);
+devm_kzalloc_error:
+ ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer
@@ -1828,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
@@ -2028,9 +2038,73 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
+{
+#define HNS3_BYTES_PER_64BIT 8
+
+ struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
+ int offset = 0;
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ do {
+ int idx = (ring->next_to_use - num + ring->desc_num) %
+ ring->desc_num;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_push++;
+ u64_stats_update_end(&ring->syncp);
+ memcpy(&desc[offset], &ring->desc[idx],
+ sizeof(struct hns3_desc));
+ offset++;
+ } while (--num);
+
+ __iowrite64_copy(ring->tqp->mem_base, desc,
+ (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
+ HNS3_BYTES_PER_64BIT);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
+{
+#define HNS3_MEM_DOORBELL_OFFSET 64
+
+ __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
+ &bd_num, 1);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_mem_doorbell += ring->pending_buf;
+ u64_stats_update_end(&ring->syncp);
+
+ io_stop_wc();
+}
+
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
bool doorbell)
{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ /* when tx push is enabled, the packet whose number of BD below
+ * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
+ */
+ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ hns3_tx_push_bd(ring, num);
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ return;
+ }
+
ring->pending_buf += num;
if (!doorbell) {
@@ -2038,11 +2112,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- if (!ring->pending_buf)
- return;
+ if (ring->tqp->mem_base)
+ hns3_tx_mem_doorbell(ring);
+ else
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
- writel(ring->pending_buf,
- ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
@@ -2732,6 +2807,9 @@ static void hns3_dump_queue_stats(struct net_device *ndev,
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
+ tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
}
static void hns3_dump_queue_reg(struct net_device *ndev,
@@ -2885,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2910,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -2982,6 +3103,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
+/**
+ * hns3_clean_vf_config
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs allocated
+ *
+ * Clean residual vf config after disable sriov
+ **/
+static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->clean_vf_config)
+ ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
+}
+
/* hns3_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -3020,7 +3156,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
else
return num_vfs;
} else if (!pci_vfs_assigned(pdev)) {
+ int num_vfs_pre = pci_num_vf(pdev);
+
pci_disable_sriov(pdev);
+ hns3_clean_vf_config(pdev, num_vfs_pre);
} else {
dev_warn(&pdev->dev,
"Unable to free VFs because some are assigned to VMs.\n");
@@ -3175,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4554,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -5063,10 +5201,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
- /* only device version above V3(include V3), GL can switch CQ/EQ
- * period mode.
- */
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
@@ -5094,6 +5229,9 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
@@ -5104,6 +5242,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
}
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+ struct hns3_nic_priv *priv = handle->priv;
+
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -5221,7 +5366,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_reg_netdev_fail:
+ hns3_state_uninit(handle);
hns3_dbg_uninit(handle);
+ hns3_client_stop(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
@@ -5677,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a05a0c7423ce..133a054af6b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -7,6 +7,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
#include <net/page_pool.h>
+#include <asm/barrier.h>
#include "hnae3.h"
@@ -25,9 +26,12 @@ enum hns3_nic_state {
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
+ HNS3_NIC_STATE_TX_PUSH_ENABLE,
HNS3_NIC_STATE_MAX
};
+#define HNS3_MAX_PUSH_BD_NUM 2
+
#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
@@ -410,6 +414,8 @@ struct ring_stats {
u64 tx_pkts;
u64 tx_bytes;
u64 tx_more;
+ u64 tx_push;
+ u64 tx_mem_doorbell;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
@@ -738,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c06c39ece80d..cdf76fb58d45 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -23,6 +23,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("push", tx_push),
+ HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
@@ -67,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -93,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -302,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -320,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -369,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -399,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -408,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -416,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -651,8 +680,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int rx_queue_index = h->kinfo.num_tqps;
- if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
return;
}
@@ -662,6 +691,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring[rx_queue_index].desc_num;
kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -708,7 +739,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -790,6 +822,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -802,9 +835,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -841,10 +874,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -882,7 +919,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1072,8 +1109,14 @@ static int hns3_check_ringparam(struct net_device *ndev,
{
#define RX_BUF_LEN_2K 2048
#define RX_BUF_LEN_4K 4096
- if (hns3_nic_resetting(ndev))
+
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (hns3_nic_resetting(ndev) || !priv->ring) {
+ netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
return -EBUSY;
+ }
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -1096,6 +1139,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -1112,62 +1185,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
return 0;
}
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
+ struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- u16 queue_num = h->kinfo.num_tqps;
- u32 old_rx_buf_len;
int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
- /* Hardware requires that its descriptors must be multiple of eight */
- new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
- new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring[0].desc_num;
- old_rx_desc_num = priv->ring[queue_num].desc_num;
- old_rx_buf_len = priv->ring[queue_num].buf_size;
- if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num &&
- kernel_param->rx_buf_len == old_rx_buf_len)
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
- netdev_err(ndev,
- "backup ring param failed by allocating memory fail\n");
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
- old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num,
- old_rx_buf_len, kernel_param->rx_buf_len);
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
- hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
- hns3_change_rx_buf_len(ndev, old_rx_buf_len);
- hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
@@ -1377,11 +1468,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
-static int hns3_check_coalesce_para(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
@@ -1456,7 +1569,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
- ret = hns3_check_coalesce_para(netdev, cmd);
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
@@ -1532,6 +1645,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1541,12 +1667,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1557,12 +1683,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1588,6 +1715,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -1764,9 +1893,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int ret;
- if (hns3_nic_resetting(netdev))
- return -EBUSY;
-
h->kinfo.tx_spare_buf_size = data;
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
@@ -1797,6 +1923,11 @@ static int hns3_set_tunable(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ return -EBUSY;
+ }
+
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
priv->tx_copybreak = *(u32 *)data;
@@ -1815,22 +1946,30 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
- if (ret) {
+ if (ret ||
+ (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
- netdev_warn(netdev,
- "change tx spare buf size fail, revert to old value\n");
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size);
if (ret1) {
- netdev_err(netdev,
- "revert to old tx spare buf size fail\n");
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
return ret1;
}
return ret;
}
+
+ if (!priv->ring->tx_spare)
+ netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n");
+ else
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
break;
default:
ret = -EOPNOTSUPP;
@@ -1847,7 +1986,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
-#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
@@ -1960,6 +2100,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -1990,6 +2131,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
index 822d6fcbc73b..da207d1d9aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 5153e5d41bbd..b8a1ecb4b8fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f9d89511eb32..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -321,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -347,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -359,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 69b8673436ca..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9b870e79c290..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,6 +14,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -1115,10 +1117,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1152,6 +1155,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1517,7 +1572,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1585,6 +1640,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -2374,6 +2432,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
{
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 42a9e73d8588..6efd768cc07c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 24f7afacae02..987271da6e9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -71,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -148,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -679,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -715,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -770,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -1003,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1101,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1546,9 +1582,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- int node, ret;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1575,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1594,13 +1629,6 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
- /* Set the affinity based on numa node */
- node = dev_to_node(&hdev->pdev->dev);
- if (node != NUMA_NO_NODE)
- cpumask = cpumask_of_node(node);
-
- cpumask_copy(&hdev->affinity_mask, cpumask);
-
return ret;
}
@@ -1625,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -1643,6 +1671,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -1676,6 +1705,14 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -1863,6 +1900,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->port_base_vlan_cfg.tbl_sta = true;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
@@ -2587,7 +2625,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2611,6 +2649,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2622,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2728,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2742,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2794,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2986,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3035,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3117,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3266,7 +3465,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3277,10 +3476,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -3292,16 +3494,18 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -3374,6 +3578,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
ret = hclge_push_vf_link_status(vport);
if (ret) {
vport->vf_info.link_state = link_state_old;
@@ -3554,17 +3764,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
-static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq,
- &hdev->affinity_mask);
-}
-
-static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
-}
-
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -5332,7 +5531,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6337,7 +6536,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6393,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6429,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6456,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6471,7 +6670,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6495,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6713,7 +6912,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6776,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6876,7 +7075,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7165,6 +7364,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7218,6 +7423,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7280,6 +7488,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7703,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7730,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -8429,12 +8642,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret) {
+ if (!ret || ret == -ENOENT) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
- } else if (ret == -ENOENT) {
- ret = 0;
+ return 0;
}
return ret;
@@ -8984,11 +9196,16 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
ether_addr_copy(vport->vf_info.mac, mac_addr);
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ */
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
"MAC of VF %d has been set to %s, and it will be reinitialized!\n",
vf, format_mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ (void)hclge_inform_reset_assert_to_vf(vport);
+ return 0;
}
dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
@@ -9809,19 +10026,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
- if (vlan->vlan_id == vlan_id)
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
+ }
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
+ if (!vlan) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
+ mutex_unlock(&hdev->vport_lock);
}
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
@@ -9830,6 +10056,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
int ret;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
@@ -9839,12 +10067,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
+
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
}
vlan->hd_tbl_status = true;
}
+ mutex_unlock(&hdev->vport_lock);
+
return 0;
}
@@ -9854,6 +10086,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@@ -9868,6 +10102,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@@ -9875,6 +10111,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->hd_tbl_status)
hclge_set_vlan_filter_hw(hdev,
@@ -9890,6 +10128,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
}
}
clear_bit(vport->vport_id, hdev->vf_vlan_full);
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -9898,6 +10137,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
+ mutex_lock(&hdev->vport_lock);
+
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -9905,37 +10146,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
-void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
{
- struct hclge_vport_vlan_cfg *vlan, *tmp;
- struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
u16 vlan_proto;
u16 vlan_id;
u16 state;
+ int vf_id;
int ret;
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ /* PF should restore all vfs port base vlan */
+ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
+ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
+ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
+ &vport->port_base_vlan_cfg.vlan_info :
+ &vport->port_base_vlan_cfg.old_vlan_info;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- return;
+ vlan_id = vlan_info->vlan_tag;
+ vlan_proto = vlan_info->vlan_proto;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id,
+ vlan_id, false);
+ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
+ }
}
+}
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
- vlan->hd_tbl_status = true;
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
}
+
+ mutex_unlock(&hdev->vport_lock);
}
/* For global reset and imp reset, hardware will clear the mac table,
@@ -9975,6 +10240,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_port_base_vlan_config(hdev);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
@@ -10031,6 +10297,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ vport->port_base_vlan_cfg.tbl_sta = false;
+
/* force add VLAN 0 */
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
if (ret)
@@ -10071,6 +10339,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
if (ret)
return ret;
+ vport->port_base_vlan_cfg.tbl_sta = false;
/* remove old VLAN tag */
if (old_info->vlan_tag == 0)
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
@@ -10120,7 +10389,9 @@ out:
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ vport->port_base_vlan_cfg.tbl_sta = true;
hclge_set_vport_vlan_fltr_change(vport);
return 0;
@@ -10188,14 +10459,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return ret;
}
- /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
* VLAN state.
*/
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id, state,
- &vlan_info);
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state, &vlan_info);
return 0;
}
@@ -10253,11 +10527,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
+ if (!is_kill)
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
+ else if (is_kill && vlan_id != 0)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -10379,6 +10653,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
@@ -10727,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10738,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11008,8 +11287,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
-#define HCLGE_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclge_hw *hw = &hdev->hw;
@@ -11379,6 +11656,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11386,11 +11667,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- /* Setup affinity after service timer setup because add_timer_on
- * is called in affinity notify.
- */
- hclge_misc_affinity_setup(hdev);
-
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -11451,6 +11727,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -11808,7 +12085,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_vf_rate(hdev);
hclge_clear_vf_vlan(hdev);
- hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
@@ -11831,8 +12107,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
- mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -12656,6 +12932,72 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
return 0;
}
+/* After disable sriov, VF still has some config and info need clean,
+ * which configed by PF.
+ */
+static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ int ret;
+
+ /* after disable sriov, clean VF rate configured by PF */
+ ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d rate config, ret = %d\n",
+ vfid, ret);
+
+ vlan_info.vlan_tag = 0;
+ vlan_info.qos = 0;
+ vlan_info.vlan_proto = ETH_P_8021Q;
+ ret = hclge_update_port_base_vlan_cfg(vport,
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ &vlan_info);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d port base vlan, ret = %d\n",
+ vfid, ret);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d spoof config, ret = %d\n",
+ vfid, ret);
+
+ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
+}
+
+static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ hclge_clear_vport_vf_info(vport, i);
+ }
+}
+
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
+ if (priority)
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12679,6 +13021,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12757,6 +13100,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
+ .clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12764,7 +13109,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -12779,7 +13124,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index adfb26e79262..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -169,6 +169,14 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_TRIGGER_IMP_RESET_B 7U
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
+
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
@@ -208,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -250,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -480,6 +490,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -772,8 +802,8 @@ struct hclge_vf_vlan_cfg {
union {
struct {
u8 is_kill;
- u16 vlan;
- u16 proto;
+ __le16 vlan;
+ __le16 proto;
};
u8 enable;
};
@@ -818,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -930,8 +961,6 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
- /* affinity mask and notify for misc interrupt */
- cpumask_t affinity_mask;
struct hclge_ptp *ptp;
struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg;
@@ -977,7 +1006,9 @@ struct hclge_vlan_info {
struct hclge_port_base_vlan_config {
u16 state;
+ bool tbl_sta;
struct hclge_vlan_info vlan_info;
+ struct hclge_vlan_info old_vlan_info;
};
struct hclge_vf_info {
@@ -1023,6 +1054,7 @@ struct hclge_vport {
spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
+
struct list_head vlan_list; /* Store VF vlan table */
};
@@ -1060,13 +1092,8 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
@@ -1097,6 +1124,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 6799d16de34b..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -57,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
- resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
- resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
- resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
+ resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP);
+ resp_pf_to_vf->msg.vf_mbx_msg_code =
+ cpu_to_le16(vf_to_pf_req->msg.code);
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode =
+ cpu_to_le16(vf_to_pf_req->msg.subcode);
resp = hclge_errno_to_resp(resp_msg->status);
if (resp < SHRT_MAX) {
- resp_pf_to_vf->msg.resp_status = resp;
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp);
} else {
dev_warn(&hdev->pdev->dev,
"failed to send response to VF, response status %u is out-of-bound\n",
resp);
- resp_pf_to_vf->msg.resp_status = EIO;
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO);
}
if (resp_msg->len > 0)
@@ -94,15 +96,22 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
resp_pf_to_vf->dest_vfid = dest_vfid;
resp_pf_to_vf->msg_len = msg_len;
- resp_pf_to_vf->msg.code = mbx_opcode;
+ resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode);
- memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
@@ -118,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
+ __le16 msg_data;
u16 reset_type;
- u8 msg_data[2];
u8 dest_vfid;
BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
@@ -133,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else
reset_type = HNAE3_VF_FUNC_RESET;
- memcpy(&msg_data[0], &reset_type, sizeof(u16));
+ msg_data = cpu_to_le16(reset_type);
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -176,7 +185,7 @@ static int hclge_get_ring_chain_from_mbx(
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
- return -ENOMEM;
+ return -EINVAL;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@@ -242,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
}
+static int hclge_query_ring_vector_map(struct hclge_vport *vport,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_desc *desc)
+{
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc->data;
+ struct hclge_dev *hdev = vport->back;
+ u16 tqp_type_and_id;
+ int status;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true);
+
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ ring_chain->tqp_index);
+ req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id);
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Get VF ring vector map info fail, status is %d.\n",
+ status);
+
+ return status;
+}
+
+static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hclge_respond_to_vf_msg *resp)
+{
+#define HCLGE_LIMIT_RING_NUM 1
+#define HCLGE_RING_TYPE_OFFSET 0
+#define HCLGE_TQP_INDEX_OFFSET 1
+#define HCLGE_INT_GL_INDEX_OFFSET 2
+#define HCLGE_VECTOR_ID_OFFSET 3
+#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4
+ struct hnae3_ring_chain_node ring_chain;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *data =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ u16 tqp_type_and_id;
+ u8 int_gl_index;
+ int ret;
+
+ req->msg.ring_num = HCLGE_LIMIT_RING_NUM;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc);
+ if (ret) {
+ hclge_free_vector_ring_chain(&ring_chain);
+ return ret;
+ }
+
+ tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]);
+ int_gl_index = hnae3_get_field(tqp_type_and_id,
+ HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S);
+
+ resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type;
+ resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index;
+ resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index;
+ resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l;
+ resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return ret;
+}
+
static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
@@ -332,16 +416,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state,
struct hclge_vlan_info *vlan_info)
{
-#define MSG_DATA_SIZE 8
-
- u8 msg_data[MSG_DATA_SIZE];
+ struct hclge_mbx_port_base_vlan base_vlan;
- memcpy(&msg_data[0], &state, sizeof(u16));
- memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16));
- memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16));
- memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16));
+ base_vlan.state = cpu_to_le16(state);
+ base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto);
+ base_vlan.qos = cpu_to_le16(vlan_info->qos);
+ base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag);
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan),
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
}
@@ -355,13 +437,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
+ __be16 proto;
+ u16 vlan_id;
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
switch (msg_cmd->subcode) {
case HCLGE_MBX_VLAN_FILTER:
- return hclge_set_vlan_filter(handle,
- cpu_to_be16(msg_cmd->proto),
- msg_cmd->vlan, msg_cmd->is_kill);
+ proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto));
+ vlan_id = le16_to_cpu(msg_cmd->vlan);
+ return hclge_set_vlan_filter(handle, proto, vlan_id,
+ msg_cmd->is_kill);
case HCLGE_MBX_VLAN_RX_OFF_CFG:
return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
@@ -404,15 +489,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport,
struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
struct hclge_basic_info *basic_info;
unsigned int i;
+ u32 pf_caps;
basic_info = (struct hclge_basic_info *)resp_msg->data;
for (i = 0; i < kinfo->tc_info.num_tc; i++)
basic_info->hw_tc_map |= BIT(i);
+ pf_caps = le32_to_cpu(basic_info->pf_caps);
if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
- hnae3_set_bit(basic_info->pf_caps,
- HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+ hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+ basic_info->pf_caps = cpu_to_le32(pf_caps);
resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
@@ -420,19 +507,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport,
struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_RSS_INFO_LEN 6
-#define HCLGE_TQPS_ALLOC_OFFSET 0
-#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
-#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
+ struct hclge_mbx_vf_queue_info *queue_info;
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
- memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
- &vport->alloc_tqps, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
- &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
- &hdev->rx_buf_len, sizeof(u16));
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data;
+ queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps);
+ queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size);
+ queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len);
resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
}
@@ -447,16 +530,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
-#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
-#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
+ struct hclge_mbx_vf_queue_depth *queue_depth;
struct hclge_dev *hdev = vport->back;
/* get the queue depth info */
- memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
- &hdev->num_tx_desc, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
- &hdev->num_rx_desc, sizeof(u16));
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data;
+ queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc);
+ queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc);
+
resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
}
@@ -481,10 +563,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport)
#define HCLGE_VF_LINK_STATE_UP 1U
#define HCLGE_VF_LINK_STATE_DOWN 0U
+ struct hclge_mbx_link_status link_info;
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[9];
- u16 duplex;
/* mac.link can only be 0 or 1 */
switch (vport->vf_info.link_state) {
@@ -500,14 +581,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport)
break;
}
- duplex = hdev->hw.mac.duplex;
- memcpy(&msg_data[0], &link_status, sizeof(u16));
- memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
- memcpy(&msg_data[6], &duplex, sizeof(u16));
- msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN;
+ link_info.link_status = cpu_to_le16(link_status);
+ link_info.speed = cpu_to_le32(hdev->hw.mac.speed);
+ link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex);
+ link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN;
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info),
HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id);
}
@@ -515,22 +595,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED 1
+ struct hclge_mbx_link_mode link_mode;
struct hclge_dev *hdev = vport->back;
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
- u8 msg_data[10] = {};
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
- msg_data[0] = mbx_req->msg.data[0];
-
- send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
+ send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported :
+ advertising;
+ link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]);
+ link_mode.link_mode = cpu_to_le64(send_data);
- memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode),
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
@@ -544,7 +624,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
u16 queue_id;
int ret;
- memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
resp_msg->len = sizeof(u8);
@@ -580,36 +660,39 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport)
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+ struct hclge_mbx_mtu_info *mtu_info;
u32 mtu;
- memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
+ mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data;
+ mtu = le32_to_cpu(mtu_info->mtu);
return hclge_set_vport_mtu(vport, mtu);
}
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
- memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid);
- return;
+ return -EINVAL;
}
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
- memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf);
resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
-static void hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
@@ -627,13 +710,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
- return;
+ return -EINVAL;
}
memcpy(resp_msg->data,
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -695,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
@@ -730,144 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- if (time_is_before_jiffies(hdev->last_mbx_scheduled +
- HCLGE_MBX_SCHED_TIMEOUT))
- dev_warn(&hdev->pdev->dev,
- "resp vport%u mbx(%u,%u) late\n",
- req->mbx_src_vfid,
- req->msg.code,
- req->msg.subcode);
-
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 63d2be4349e3..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
int ret;
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
- return 0;
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
int ret;
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
- return 0;
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 089f4444b7e3..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -282,8 +323,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
- u16 qs_id, u8 pri)
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+ bool link_vld)
{
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_desc desc;
@@ -294,7 +335,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
map->qs_id = cpu_to_le16(qs_id);
map->priority = pri;
- map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+ map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -420,7 +461,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
@@ -642,11 +683,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
if (vport->vport_id) {
+ kinfo->tc_info.max_tc = 1;
kinfo->tc_info.num_tc = 1;
vport->qs_offset = HNAE3_MAX_TC +
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
vport_max_rss_size = hdev->vf_rss_size_max;
} else {
+ kinfo->tc_info.max_tc = hdev->tc_max;
kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = 0;
@@ -679,7 +722,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
- hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ if (vport->vport_id == PF_VPORT_ID)
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
@@ -714,14 +759,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
{
- u8 i;
+ u8 i, tc_sch_mode;
+ u32 bw_limit;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ if (i < hdev->tm_info.num_tc) {
+ tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ } else {
+ tc_sch_mode = HCLGE_SCH_MODE_SP;
+ bw_limit = 0;
+ }
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
- hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
hdev->tm_info.tc_info[i].pgid = 0;
- hdev->tm_info.tc_info[i].bw_limit =
- hdev->tm_info.pg_info[0].bw_limit;
+ hdev->tm_info.tc_info[i].bw_limit = bw_limit;
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
@@ -926,10 +979,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
for (k = 0; k < hdev->num_alloc_vport; k++) {
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+ u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+ bool link_vld = i < kinfo->tc_info.num_tc;
+
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
vport[k].qs_offset + i,
- i);
+ pri, link_vld);
if (ret)
return ret;
}
@@ -949,7 +1005,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_TC; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
vport[k].qs_offset + i,
- k);
+ k, true);
if (ret)
return ret;
}
@@ -989,33 +1045,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
- u32 shaper_para;
+ u32 shaper_para_c, shaper_para_p;
int ret;
u32 i;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
u32 rate = hdev->tm_info.tc_info[i].bw_limit;
- ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
- &ir_para, max_tm_rate);
- if (ret)
- return ret;
+ if (rate) {
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ } else {
+ shaper_para_c = 0;
+ shaper_para_p = 0;
+ }
- shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para, rate);
+ shaper_para_c, rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
- ir_para.ir_u,
- ir_para.ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para, rate);
+ shaper_para_p, rate);
if (ret)
return ret;
}
@@ -1125,7 +1187,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
int ret;
u32 i, k;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
pg_info =
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
dwrr = pg_info->tc_dwrr[i];
@@ -1135,9 +1197,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ if (i >= kinfo->tc_info.max_tc)
+ continue;
+
+ dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
ret = hclge_tm_qs_weight_cfg(
hdev, vport[k].qs_offset + i,
- vport[k].dwrr);
+ dwrr);
if (ret)
return ret;
}
@@ -1225,7 +1293,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
- "fw %08x does't support ets tc weight cmd\n",
+ "fw %08x doesn't support ets tc weight cmd\n",
hdev->fw_version);
ret = 0;
}
@@ -1248,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1303,6 +1377,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
{
struct hclge_vport *vport = hdev->vport;
int ret;
+ u8 mode;
u16 i;
ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
@@ -1310,9 +1385,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+ if (pri_id >= kinfo->tc_info.max_tc)
+ continue;
+
+ mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+ HCLGE_SCH_MODE_SP;
ret = hclge_tm_qs_schd_mode_cfg(hdev,
vport[i].qs_offset + pri_id,
- HCLGE_SCH_MODE_DWRR);
+ mode);
if (ret)
return ret;
}
@@ -1353,7 +1435,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
@@ -1611,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 619cc30a2dfc..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -30,6 +30,9 @@ enum hclge_opcode_type;
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -237,6 +240,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
@@ -261,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index 5b0b71bd6120..8510b88d4982 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
- __entry->code = req->msg.code;
+ __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7df87610ad96..db6f7cdba958 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
basic_info = (struct hclge_basic_info *)resp_msg;
hdev->hw_tc_map = basic_info->hw_tc_map;
- hdev->mbx_api_version = basic_info->mbx_api_version;
- caps = basic_info->pf_caps;
+ hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
+ caps = le32_to_cpu(basic_info->pf_caps);
if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
@@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
-#define HCLGEVF_TQPS_ALLOC_OFFSET 0
-#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
-#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
+ struct hclge_mbx_vf_queue_info *queue_info;
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg;
int status;
@@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return status;
}
- memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
- sizeof(u16));
- memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
- sizeof(u16));
- memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
- sizeof(u16));
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
+ hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
+ hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
+ hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
return 0;
}
@@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
-#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
-#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
+ struct hclge_mbx_vf_queue_depth *queue_depth;
u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg;
int ret;
@@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
return ret;
}
- memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
- sizeof(u16));
- memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
- sizeof(u16));
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
+ hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
+ hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
return 0;
}
@@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
- memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ *(__le16 *)send_msg.data = cpu_to_le16(queue_id);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
sizeof(resp_data));
if (!ret)
- qid_in_pf = *(u16 *)resp_data;
+ qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
return qid_in_pf;
}
@@ -321,6 +315,7 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -354,6 +349,14 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
HCLGEVF_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -976,7 +979,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
@@ -1236,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
{
-#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
-#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
-#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
-
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_vlan_filter *vlan_filter;
struct hclge_vf_to_pf_msg send_msg;
int ret;
@@ -1262,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER);
- send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
- memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
- sizeof(vlan_id));
- memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
- sizeof(proto));
+ vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
+ vlan_filter->is_kill = is_kill;
+ vlan_filter->vlan_id = cpu_to_le16(vlan_id);
+ vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
+
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
@@ -1338,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
for (i = 1; i < handle->kinfo.num_tqps; i++) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
- memcpy(send_msg.data, &i, sizeof(i));
+ *(__le16 *)send_msg.data = cpu_to_le16(i);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret)
return ret;
@@ -1350,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_mtu_info *mtu_info;
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
- memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
+ mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
+ mtu_info->mtu = cpu_to_le32(new_mtu);
+
return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
@@ -2043,8 +2046,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2123,7 +2125,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
@@ -2547,8 +2549,6 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_hw *hw = &hdev->hw;
@@ -2856,6 +2856,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
hclgevf_init_rxd_adv_layout(hdev);
@@ -2952,7 +2957,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* ensure vf tbl list as empty before init*/
+ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
@@ -3172,7 +3177,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3304,7 +3309,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
@@ -3322,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
- u8 *port_base_vlan_info, u8 data_size)
+ struct hclge_mbx_port_base_vlan *port_base_vlan)
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
@@ -3347,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
- memcpy(send_msg.data, port_base_vlan_info, data_size);
+ memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
@@ -3424,7 +3429,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3439,7 +3444,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 502ca1ce1a90..59ca6c794d6d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -96,6 +96,14 @@
#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_TQP_MEM_SIZE 0x10000
+#define HCLGEVF_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGEVF_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1)
+#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i))
+
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
@@ -285,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
- u8 *port_base_vlan_info, u8 data_size);
+ struct hclge_mbx_port_base_vlan *port_base_vlan);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index d5e0a3f762f7..bbf7b14079de 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
- * to prtect the received_response from race condition
+ * to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
- * @resp_msg: pointer to store the original message type and response status
- * @len: the resp_msg data array length.
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
@@ -122,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
hclgevf_reset_mbx_resp_status(hdev);
- req->match_id = hdev->mbx_resp.match_id;
+ req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -160,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
struct hclge_mbx_pf_to_vf_cmd *req)
{
+ u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode);
+ u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code);
struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
+ u16 resp_status = le16_to_cpu(req->msg.resp_status);
+ u16 match_id = le16_to_cpu(req->match_id);
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%u)\n",
- req->msg.vf_mbx_msg_code);
-
- resp->origin_mbx_msg =
- (req->msg.vf_mbx_msg_code << 16);
- resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
- resp->resp_status =
- hclgevf_resp_to_errno(req->msg.resp_status);
+ "VF mbx resp flag not clear(%u)\n",
+ vf_mbx_msg_code);
+
+ resp->origin_mbx_msg = (vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= vf_mbx_msg_subcode;
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
- if (req->match_id) {
+ if (match_id) {
/* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or
* ignore the response. and driver will clear hdev->mbx_resp
* when send next message which need response.
*/
- if (req->match_id == resp->match_id)
+ if (match_id == resp->match_id)
resp->received_resp = true;
} else {
resp->received_resp = true;
@@ -197,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%u)\n",
- req->msg.code);
+ le16_to_cpu(req->msg.code));
return;
}
@@ -216,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
struct hclge_comm_cmq_ring *crq;
struct hclge_desc *desc;
u16 flag;
+ u16 code;
crq = &hdev->hw.hw.cmq.crq;
@@ -230,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ code = le16_to_cpu(req->msg.code);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg.code);
+ code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -249,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
* timeout and simultaneously queue the async messages for later
* prcessing in context of mailbox task i.e. the slow path.
*/
- switch (req->msg.code) {
+ switch (code) {
case HCLGE_MBX_PF_VF_RESP:
hclgevf_handle_mbx_response(hdev, req);
break;
@@ -263,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"VF received unsupported(%u) mbx msg from PF\n",
- req->msg.code);
+ code);
break;
}
crq->desc[crq->next_to_use].flag = 0;
@@ -285,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ struct hclge_mbx_port_base_vlan *vlan_info;
+ struct hclge_mbx_link_status *link_info;
+ struct hclge_mbx_link_mode *link_mode;
enum hnae3_reset_type reset_type;
u16 link_status, state;
- u16 *msg_q, *vlan_info;
+ __le16 *msg_q;
+ u16 opcode;
u8 duplex;
u32 speed;
u32 tail;
u8 flag;
- u8 idx;
+ u16 idx;
tail = hdev->arq.tail;
@@ -306,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
}
msg_q = hdev->arq.msg_q[hdev->arq.head];
-
- switch (msg_q[0]) {
+ opcode = le16_to_cpu(msg_q[0]);
+ switch (opcode) {
case HCLGE_MBX_LINK_STAT_CHANGE:
- link_status = msg_q[1];
- memcpy(&speed, &msg_q[2], sizeof(speed));
- duplex = (u8)msg_q[4];
- flag = (u8)msg_q[5];
+ link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
+ link_status = le16_to_cpu(link_info->link_status);
+ speed = le32_to_cpu(link_info->speed);
+ duplex = (u8)le16_to_cpu(link_info->duplex);
+ flag = link_info->flag;
/* update upper layer with new link link status */
hclgevf_update_speed_duplex(hdev, speed, duplex);
@@ -324,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_MODE:
- idx = (u8)msg_q[1];
+ link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
+ idx = le16_to_cpu(link_mode->idx);
if (idx)
- memcpy(&hdev->hw.mac.supported, &msg_q[2],
- sizeof(unsigned long));
+ hdev->hw.mac.supported =
+ le64_to_cpu(link_mode->link_mode);
else
- memcpy(&hdev->hw.mac.advertising, &msg_q[2],
- sizeof(unsigned long));
+ hdev->hw.mac.advertising =
+ le64_to_cpu(link_mode->link_mode);
break;
case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending
@@ -338,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- reset_type = (enum hnae3_reset_type)msg_q[1];
+ reset_type =
+ (enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
break;
case HCLGE_MBX_PUSH_VLAN_INFO:
- state = msg_q[1];
- vlan_info = &msg_q[1];
+ vlan_info =
+ (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
+ state = le16_to_cpu(vlan_info->state);
hclgevf_update_port_base_vlan_info(hdev, state,
- (u8 *)vlan_info, 8);
+ vlan_info);
break;
case HCLGE_MBX_PUSH_PROMISC_INFO:
- hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%u) message from arq\n",
- msg_q[0]);
+ opcode);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index e4bfb6191fef..5d4895bb57a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
- __entry->code = req->msg.code;
+ __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 07fdab58001d..c2ae1b4f9a5f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -174,7 +174,7 @@ static int hns_mdio_wait_ready(struct mii_bus *bus)
u32 cmd_reg_value;
int i;
- /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
MDIO_C45_READ, phy_id, devad);
}
- /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
index 19eb839177ec..061952c6c21a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
struct tag_sml_funcfg_tbl *funcfg_table_elem;
struct hinic_cmd_lt_rd *read_data;
u16 out_size = sizeof(*read_data);
+ int ret = ~0;
int err;
read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
@@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
switch (idx) {
case VALID:
- return funcfg_table_elem->dw0.bs.valid;
+ ret = funcfg_table_elem->dw0.bs.valid;
+ break;
case RX_MODE:
- return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
+ break;
case MTU:
- return funcfg_table_elem->dw1.bs.mtu;
+ ret = funcfg_table_elem->dw1.bs.mtu;
+ break;
case RQ_DEPTH:
- return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ break;
case QUEUE_NUM:
- return funcfg_table_elem->dw13.bs.cfg_q_num;
+ ret = funcfg_table_elem->dw13.bs.cfg_q_num;
+ break;
}
kfree(read_data);
- return ~0;
+ return ret;
}
static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
index e9e00cfa1329..e10f739d8339 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -12,7 +12,6 @@
#define TBL_ID_FUNC_CFG_SM_INST 1
#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-#define HINIC_FUNCTION_CONFIGURE_TABLE 1
struct hinic_cmd_lt_rd {
u8 status;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e89141a0d..a4fbf44f944c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -95,9 +95,6 @@ struct hinic_dev {
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 60ae8bfc5f69..1749d26f4bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
len += fw_image->fw_section_info[i].fw_section_len;
- memcpy(&host_image->image_section_info[i],
- &fw_image->fw_section_info[i],
- sizeof(struct fw_section_info_st));
+ host_image->image_section_info[i] = fw_image->fw_section_info[i];
}
if (len != fw_image->fw_len ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 93192f58ac88..f4b680286911 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -55,7 +55,6 @@
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
-#define OBJ_STR_MAX_LEN 32
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index a627237f694b..d39eec9c62bf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -82,11 +82,6 @@
struct hinic_func_to_io, \
cmdqs)
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
enum completion_format {
COMPLETE_DIRECT = 0,
COMPLETE_SGE = 1,
@@ -509,8 +504,8 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
*
* Return 0 - Success, negative - Failure
**/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
{
struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
struct hinic_hwif *hwif = cmdqs->hwif;
@@ -929,7 +924,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
err_set_cmdq_depth:
hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
+ free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 9c413e963a04..ff09cf0ed52b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -177,9 +177,6 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
enum hinic_mod_type mod, u8 cmd,
struct hinic_cmdq_buf *buf_in, u64 *out_param);
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index 7e84e4e33fff..d56e7413ace0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -22,7 +22,6 @@
(HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
(HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 2127a48749a8..27795288c586 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -29,7 +29,6 @@
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
-#define IO_STATUS_TIMEOUT 100
#define OUTBOUND_STATE_TIMEOUT 100
#define DB_STATE_TIMEOUT 100
@@ -42,11 +41,6 @@ enum intr_type {
INTR_MSIX_TYPE,
};
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
/**
* parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -837,8 +831,8 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
return 0;
}
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
{
u16 out_size = sizeof(*interrupt_info);
struct hinic_pfhwdev *pfhwdev;
@@ -883,7 +877,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
if (err)
return -EINVAL;
- interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
+ interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
@@ -1041,13 +1035,6 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->max_qps;
-}
-
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 416492e48274..d2d89b0a5ef0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -566,8 +566,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devli
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
-
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
@@ -587,9 +585,6 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
enum hinic_msix_state flag);
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
struct hinic_msix_config *interrupt_info);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 0428faa68e80..88567305d06e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -58,39 +58,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
}
/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
* hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
* @hwif: the HW interface of a pci function device
* @msix_index: msix_index
@@ -115,8 +82,6 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
* hinic_set_pf_action - set action on pf channel
* @hwif: the HW interface of a pci function device
* @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c06f2253151e..3d588896a367 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -131,10 +131,6 @@
(((u32)(val) & HINIC_MSIX_##member##_MASK) << \
HINIC_MSIX_##member##_SHIFT)
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
@@ -269,11 +265,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
u8 lli_timer_cfg, u8 lli_credit_limit,
u8 resend_timer);
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
enum hinic_msix_state flag);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 5078c0c73863..3f9c31d29215 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -117,7 +117,6 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
-#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -130,11 +129,8 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
-#define DST_AEQ_IDX_DEFAULT_VAL 0
-#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
-#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -146,7 +142,6 @@ enum hinic_mbox_tx_status {
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
-#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
@@ -621,7 +616,7 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
u64 mbox_header = *((u64 *)header);
@@ -649,7 +644,7 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
index 46953190d29e..33ac7814d3b3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -150,10 +150,6 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod);
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
-
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
-
int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index ebc77771f5da..4aa1f433ed24 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -643,6 +643,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -650,6 +651,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 336248aa2e48..537a8098bc4e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -472,8 +472,7 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
return atomic_read(&wq->delta) - 1;
}
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
{
u32 ctrl_size, task_size, bufdesc_size;
@@ -588,18 +587,16 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
- * @prod_idx: pi value
* @sq_wqe: wqe to prepare
* @sges: sges for use by the wqe for send for buf addresses
* @nr_sges: number of sges
**/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
{
int i;
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 0dfa51ad5855..178dcc874370 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -175,9 +175,8 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
unsigned int cos);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 2d9b06d7caad..e1a1735c00c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -175,8 +175,6 @@ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
@@ -386,7 +384,7 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
return -ENOMEM;
wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
- sizeof(wq->prod_idx), GFP_KERNEL);
+ sizeof(*wq->shadow_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
@@ -771,7 +769,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
@@ -841,7 +839,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*cons_idx = curr_cons_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index f4b6d2c1061f..c6bdeed5606e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -261,23 +261,6 @@
#define HINIC_RSS_TYPE_GET(val, member) \
(((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
enum hinic_l3_offload_type {
L3TYPE_UNKNOWN = 0,
IPV6_PKT = 1,
@@ -305,18 +288,10 @@ enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
};
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
enum hinic_l2type {
HINIC_L2TYPE_ETH = 0,
};
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
struct hinic_cmdq_header {
u32 header_info;
u32 saved_data;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 05329292d940..e1f54a2f28b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -560,8 +546,6 @@ int hinic_close(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -855,26 +839,19 @@ static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- up(&nic_dev->mgmt_lock);
-
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -983,8 +960,6 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @in_size: input size
* @buf_out: output buffer
* @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
**/
static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
@@ -1173,8 +1148,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1236,15 +1209,8 @@ static int nic_dev_init(struct pci_dev *pdev)
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
@@ -1414,8 +1380,6 @@ err_pci_regions:
return err;
}
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
-
static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
{
struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index fed3b6bc0d76..d649c6e323c8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -50,7 +50,7 @@
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
**/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
@@ -73,17 +73,15 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
@@ -481,7 +479,8 @@ static void rx_add_napi(struct hinic_rxq *rxq)
{
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
+ netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
+ nic_dev->rx_weight);
napi_enable(&rxq->napi);
}
@@ -548,7 +547,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err)
goto err_irq_affinity;
@@ -565,7 +564,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
- irq_set_affinity_hint(rq->irq, NULL);
+ irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 507dcbae9085..8f7bd6a049bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,8 +41,6 @@ struct hinic_rxq {
struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
-
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index 01e7d3c0b68e..f7e05b41385b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -24,6 +24,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -774,7 +775,7 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u16 vlanprio, cur_vlanprio;
sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
@@ -820,7 +821,7 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
cur_trust = nic_io->vf_infos[vf].trust;
/* same request, so just return success */
- if ((setting && cur_trust) || (!setting && !cur_trust))
+ if (setting == cur_trust)
return 0;
err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
@@ -852,12 +853,6 @@ int hinic_ndo_set_vf_bw(struct net_device *netdev,
return -EINVAL;
}
- if (max_tx_rate < min_tx_rate) {
- netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
- max_tx_rate, min_tx_rate);
- return -EINVAL;
- }
-
err = hinic_port_link_state(nic_dev, &link_state);
if (err) {
netif_err(nic_dev, drv, netdev,
@@ -946,7 +941,7 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
/* same request, so just return success */
- if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ if (setting == cur_spoofchk)
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
@@ -1137,8 +1132,8 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
}
-static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
{
struct hinic_dev *nic_dev;
u16 func_idx, idx;
@@ -1151,8 +1146,6 @@ static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
HINIC_HW_WQ_PAGE_SIZE);
hinic_clear_vf_infos(nic_dev, idx);
}
-
- return 0;
}
int hinic_vf_func_init(struct hinic_hwdev *hwdev)
@@ -1181,7 +1174,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev)
dev_err(&hwdev->hwif->pdev->dev,
"Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
err, register_info.status, out_size);
- hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
return -EIO;
}
} else {
@@ -1299,7 +1291,7 @@ int hinic_pci_sriov_disable(struct pci_dev *pdev)
return 0;
}
-int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
int err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index ba627a362f9a..d4d4e63d31ea 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -98,8 +98,6 @@ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
int hinic_pci_sriov_disable(struct pci_dev *dev);
-int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
-
int hinic_vf_func_init(struct hinic_hwdev *hwdev);
void hinic_vf_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 8d59babbf476..e91476c8ff8b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -74,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -98,17 +98,15 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
@@ -532,7 +530,7 @@ netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
@@ -616,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
@@ -809,7 +807,8 @@ static int tx_request_irq(struct hinic_txq *txq)
qp = container_of(sq, struct hinic_qp, sq);
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
+ netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
+ nic_dev->tx_weight);
hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index b3c8657774a7..91dc778362f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -40,8 +40,6 @@ struct hinic_txq {
struct napi_struct napi;
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
-
void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 27937c5d7956..daec9ce04531 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->dev_addr[5] = readb(eth_addr + 0x06);
iounmap(eth_addr);
- if (!netdevice->irq) {
+ if (netdevice->irq < 0) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
goto probe_failed;
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 79aef681ac85..d82eca563266 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -150,7 +150,7 @@ struct rfd_struct
#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_FTS 0x0080 /* Frame too short */
#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
@@ -250,7 +250,7 @@ struct mcsetup_cmd_struct
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short mc_cnt; /* number of bytes in the MC-List */
- unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+ unsigned char mc_list[][6]; /* pointer to 6 bytes entries */
};
/*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index b140835d4c23..208c440a602b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -19,6 +19,7 @@
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
+#include <linux/platform_device.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6cb86032ce46..1db5b6790a41 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -159,8 +159,8 @@ static int ehea_nway_reset(struct net_device *dev)
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bad94e4d50f4..b4aff59b3eb4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -29,6 +29,8 @@
#include <asm/kexec.h>
#include <linux/mutex.h>
#include <linux/prefetch.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <net/ip.h>
@@ -1544,7 +1546,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
kfree(init_attr);
- netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
ret = 0;
goto out;
@@ -1615,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
@@ -2898,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
pr_err("failed to register device. ret=%d\n", ret);
+ put_device(&port->ofdev.dev);
goto out;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index fbea9f7efe8c..9b08e41ccc29 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2284,8 +2284,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
dev->cell_index, dev->ofdev->dev.of_node);
}
@@ -2979,11 +2979,9 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->ofdev->dev, err,
+ "Can't get valid [local-]mac-address from OF !\n");
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 075c07303f16..ff5487bbebe3 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -605,8 +605,8 @@ static int mal_probe(struct platform_device *ofdev)
init_dummy_netdev(&mal->dummy_dev);
- netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
- CONFIG_IBM_EMAC_POLL_WEIGHT);
+ netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
+ CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
mal_reset(mal);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 22fb0d109a68..5b96cd94dcd2 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -141,6 +141,13 @@ static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
+static unsigned int ibmveth_real_max_tx_queues(void)
+{
+ unsigned int n_cpu = num_online_cpus();
+
+ return min(n_cpu, IBMVETH_MAX_QUEUES);
+}
+
/* setup the initial settings for a buffer pool */
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
u32 pool_index, u32 pool_size,
@@ -456,6 +463,38 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
+static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+}
+
+static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[idx]) {
+ netdev_err(adapter->netdev,
+ "unable to allocate tx long term buffer\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
+ adapter->tx_ltb_ptr[idx],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
+ netdev_err(adapter->netdev,
+ "unable to DMA map tx long term buffer\n");
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -538,6 +577,11 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ if (ibmveth_allocate_tx_ltb(adapter, i))
+ goto out_free_tx_ltb;
+ }
+
adapter->rx_queue.index = 0;
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
@@ -595,25 +639,15 @@ static int ibmveth_open(struct net_device *netdev)
rc = -ENOMEM;
- adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
- netdev->mtu + IBMVETH_BUFF_OH,
- &adapter->bounce_buffer_dma, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to alloc bounce buffer\n");
- goto out_free_irq;
- }
-
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
return 0;
-out_free_irq:
- free_irq(netdev->irq, netdev);
out_free_buffer_pools:
while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
@@ -623,6 +657,12 @@ out_free_buffer_pools:
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
+
+out_free_tx_ltb:
+ while (--i >= 0) {
+ ibmveth_free_tx_ltb(adapter, i);
+ }
+
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -651,7 +691,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -685,9 +725,8 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_free_coherent(&adapter->vdev->dev,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- adapter->bounce_buffer, adapter->bounce_buffer_dma);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
@@ -727,8 +766,8 @@ static void ibmveth_init_link_settings(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
- strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
+ strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
@@ -953,6 +992,69 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
}
+static void ibmveth_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ channels->max_tx = ibmveth_real_max_tx_queues();
+ channels->tx_count = netdev->real_num_tx_queues;
+
+ channels->max_rx = netdev->real_num_rx_queues;
+ channels->rx_count = netdev->real_num_rx_queues;
+}
+
+static int ibmveth_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int old = netdev->real_num_tx_queues,
+ goal = channels->tx_count;
+ int rc, i;
+
+ /* If ndo_open has not been called yet then don't allocate, just set
+ * desired netdev_queue's and return
+ */
+ if (!(netdev->flags & IFF_UP))
+ return netif_set_real_num_tx_queues(netdev, goal);
+
+ /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
+ * but we may need to alloc/free the ltb's.
+ */
+ netif_tx_stop_all_queues(netdev);
+
+ /* Allocate any queue that we need */
+ for (i = old; i < goal; i++) {
+ if (adapter->tx_ltb_ptr[i])
+ continue;
+
+ rc = ibmveth_allocate_tx_ltb(adapter, i);
+ if (!rc)
+ continue;
+
+ /* if something goes wrong, free everything we just allocated */
+ netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ break;
+ }
+ rc = netif_set_real_num_tx_queues(netdev, goal);
+ if (rc) {
+ netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ }
+ /* Free any that are no longer needed */
+ for (i = old; i > goal; i--) {
+ if (adapter->tx_ltb_ptr[i - 1])
+ ibmveth_free_tx_ltb(adapter, i - 1);
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -961,6 +1063,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ethtool_stats = ibmveth_get_ethtool_stats,
.get_link_ksettings = ibmveth_get_link_ksettings,
.set_link_ksettings = ibmveth_set_link_ksettings,
+ .get_channels = ibmveth_get_channels,
+ .set_channels = ibmveth_set_channels
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -969,7 +1073,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs, unsigned long mss)
+ unsigned long desc, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -982,12 +1086,9 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
retry_count = 1024;
correlator = 0;
do {
- ret = h_send_logical_lan(adapter->vdev->unit_address,
- descs[0].desc, descs[1].desc,
- descs[2].desc, descs[3].desc,
- descs[4].desc, descs[5].desc,
- correlator, &correlator, mss,
- adapter->fw_large_send_support);
+ ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -1020,34 +1121,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
- union ibmveth_buf_desc descs[6];
- int last, i;
- int force_bounce = 0;
- dma_addr_t dma_addr;
+ unsigned int desc_flags, total_bytes;
+ union ibmveth_buf_desc desc;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
- /* veth doesn't handle frag_list, so linearize the skb.
- * When GRO is enabled SKB's can have frag_list.
- */
- if (adapter->is_active_trunk &&
- skb_has_frag_list(skb) && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
- /*
- * veth handles a maximum of 6 segments including the header, so
- * we have to linearize the skb if there are more than this.
- */
- if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1077,56 +1157,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags |= IBMVETH_BUF_LRG_SND;
}
-retry_bounce:
- memset(descs, 0, sizeof(descs));
-
- /*
- * If a linear packet is below the rx threshold then
- * copy it into the static bounce buffer. This avoids the
- * cost of a TCE insert and remove.
- */
- if (force_bounce || (!skb_is_nonlinear(skb) &&
- (skb->len < tx_copybreak))) {
- skb_copy_from_linear_data(skb, adapter->bounce_buffer,
- skb->len);
-
- descs[0].fields.flags_len = desc_flags | skb->len;
- descs[0].fields.address = adapter->bounce_buffer_dma;
-
- if (ibmveth_send(adapter, descs, 0)) {
- adapter->tx_send_failed++;
- netdev->stats.tx_dropped++;
- } else {
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- }
-
- goto out;
- }
-
- /* Map the header */
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed;
-
- descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
- descs[0].fields.address = dma_addr;
-
- /* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed_frags;
-
- descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
- descs[i+1].fields.address = dma_addr;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
@@ -1143,7 +1173,36 @@ retry_bounce:
}
}
- if (ibmveth_send(adapter, descs, mss)) {
+ /* Copy header into mapped buffer */
+ if (unlikely(skb->len > adapter->tx_ltb_size)) {
+ netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
+ skb->len, adapter->tx_ltb_size);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
+ total_bytes = skb_headlen(skb);
+ /* Copy frags into mapped buffers */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
+ total_bytes += skb_frag_size(frag);
+ }
+
+ if (unlikely(total_bytes != skb->len)) {
+ netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
+ skb->len, total_bytes);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ desc.fields.flags_len = desc_flags | skb->len;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
+ /* finish writing to long_term_buff before VIOS accessing it */
+ dma_wmb();
+
+ if (ibmveth_send(adapter, desc.desc, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1151,41 +1210,11 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
out:
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
-map_failed_frags:
- last = i+1;
- for (i = 1; i < last; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-map_failed:
- if (!firmware_has_feature(FW_FEATURE_CMO))
- netdev_err(netdev, "tx: unable to map xmit buffer\n");
- adapter->tx_map_failed++;
- if (skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
- force_bounce = 1;
- goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
@@ -1568,6 +1597,8 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
+ /* add size of mapped tx buffers */
+ ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1660,8 +1691,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
-
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1674,7 +1704,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->pool_config = 0;
ibmveth_init_link_settings(netdev);
- netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+ netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
@@ -1727,6 +1757,18 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(),
+ IBMVETH_DEFAULT_QUEUES));
+ if (rc) {
+ netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
+ rc);
+ free_netdev(netdev);
+ return rc;
+ }
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
+ adapter->tx_ltb_ptr[i] = NULL;
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 27dfff200166..115d4c45aa77 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -46,23 +46,23 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+/* FW allows us to send 6 descriptors but we only use one so mark
+ * the other 5 as unused (0)
+ */
static inline long h_send_logical_lan(unsigned long unit_address,
- unsigned long desc1, unsigned long desc2, unsigned long desc3,
- unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out,
- unsigned long mss, unsigned long large_send_support)
+ unsigned long desc, unsigned long corellator_in,
+ unsigned long *corellator_out, unsigned long mss,
+ unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (large_send_support)
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in, mss);
+ desc, 0, 0, 0, 0, 0, corellator_in, mss);
else
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in);
+ desc, 0, 0, 0, 0, 0, corellator_in);
*corellator_out = retbuf[0];
@@ -98,6 +98,9 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
+#define IBMVETH_DEFAULT_QUEUES 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -137,6 +140,9 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
@@ -145,8 +151,6 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
- void *bounce_buffer;
- dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 59536bd5cab1..9282381a438f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -53,6 +53,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
@@ -60,6 +61,7 @@
#include <asm/hvcall.h>
#include <linux/atomic.h>
#include <asm/vio.h>
+#include <asm/xive.h>
#include <asm/iommu.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
@@ -110,6 +112,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -255,12 +258,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ u64 prev = 0;
int rc;
if (!reuse_ltb(ltb, size)) {
dev_dbg(dev,
"LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
+ prev = ltb->size;
free_long_term_buff(adapter, ltb);
}
@@ -281,8 +286,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
- "Allocated new LTB [map %d, size 0x%llx]\n",
- ltb->map_id, ltb->size);
+ "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
+ ltb->map_id, ltb->size, prev);
}
/* Ensure ltb is zeroed - specially when reusing it. */
@@ -343,6 +348,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = 0;
}
+/**
+ * free_ltb_set - free the given set of long term buffers (LTBS)
+ * @adapter: The ibmvnic adapter containing this ltb set
+ * @ltb_set: The ltb_set to be freed
+ *
+ * Free the set of LTBs in the given set.
+ */
+
+static void free_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set)
+{
+ int i;
+
+ for (i = 0; i < ltb_set->num_ltbs; i++)
+ free_long_term_buff(adapter, &ltb_set->ltbs[i]);
+
+ kfree(ltb_set->ltbs);
+ ltb_set->ltbs = NULL;
+ ltb_set->num_ltbs = 0;
+}
+
+/**
+ * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb_set: container object for the set of LTBs
+ * @num_buffs: Number of buffers in the LTB
+ * @buff_size: Size of each buffer in the LTB
+ *
+ * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
+ * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
+ * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
+ * If new set needs more than in old set, allocate the remaining ones.
+ * Try and reuse as many LTBs as possible and avoid reallocation.
+ *
+ * Any changes to this allocation strategy must be reflected in
+ * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
+ */
+static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set, int num_buffs,
+ int buff_size)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ltb_set old_set;
+ struct ibmvnic_ltb_set new_set;
+ int rem_size;
+ int tot_size; /* size of all ltbs */
+ int ltb_size; /* size of one ltb */
+ int nltbs;
+ int rc;
+ int n;
+ int i;
+
+ dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
+ buff_size);
+
+ ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
+ tot_size = num_buffs * buff_size;
+
+ if (ltb_size > tot_size)
+ ltb_size = tot_size;
+
+ nltbs = tot_size / ltb_size;
+ if (tot_size % ltb_size)
+ nltbs++;
+
+ old_set = *ltb_set;
+
+ if (old_set.num_ltbs == nltbs) {
+ new_set = old_set;
+ } else {
+ int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
+
+ new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
+ if (!new_set.ltbs)
+ return -ENOMEM;
+
+ new_set.num_ltbs = nltbs;
+
+ /* Free any excess ltbs in old set */
+ for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
+ free_long_term_buff(adapter, &old_set.ltbs[i]);
+
+ /* Copy remaining ltbs to new set. All LTBs except the
+ * last one are of the same size. alloc_long_term_buff()
+ * will realloc if the size changes.
+ */
+ n = min(old_set.num_ltbs, new_set.num_ltbs);
+ for (i = 0; i < n; i++)
+ new_set.ltbs[i] = old_set.ltbs[i];
+
+ /* Any additional ltbs in new set will have NULL ltbs for
+ * now and will be allocated in alloc_long_term_buff().
+ */
+
+ /* We no longer need the old_set so free it. Note that we
+ * may have reused some ltbs from old set and freed excess
+ * ltbs above. So we only need to free the container now
+ * not the LTBs themselves. (i.e. dont free_ltb_set()!)
+ */
+ kfree(old_set.ltbs);
+ old_set.ltbs = NULL;
+ old_set.num_ltbs = 0;
+
+ /* Install the new set. If allocations fail below, we will
+ * retry later and know what size LTBs we need.
+ */
+ *ltb_set = new_set;
+ }
+
+ i = 0;
+ rem_size = tot_size;
+ while (rem_size) {
+ if (ltb_size > rem_size)
+ ltb_size = rem_size;
+
+ rem_size -= ltb_size;
+
+ rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
+ if (rc)
+ goto out;
+ i++;
+ }
+
+ WARN_ON(i != new_set.num_ltbs);
+
+ return 0;
+out:
+ /* We may have allocated one/more LTBs before failing and we
+ * want to try and reuse on next reset. So don't free ltb set.
+ */
+ return rc;
+}
+
+/**
+ * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
+ * @rxpool: The receive buffer pool containing buffer
+ * @bufidx: Index of buffer in rxpool
+ * @ltbp: (Output) pointer to the long term buffer containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
+ * pool and its corresponding offset. Assume for now that each LTB is of
+ * different size but could possibly be optimized based on the allocation
+ * strategy in alloc_ltb_set().
+ */
+static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON(bufidx >= rxpool->size);
+
+ for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
+ ltb = &rxpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / rxpool->buff_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * rxpool->buff_size;
+}
+
+/**
+ * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
+ * @txpool: The transmit buffer pool containing buffer
+ * @bufidx: Index of buffer in txpool
+ * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [txpool, bufidx] to an LTB in the
+ * pool and its corresponding offset.
+ */
+static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON_ONCE(bufidx >= txpool->num_buffers);
+
+ for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
+ ltb = &txpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / txpool->buf_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * txpool->buf_size;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -359,6 +566,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -367,7 +575,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
dma_addr_t dma_addr;
unsigned char *dst;
int shift = 0;
- int index;
+ int bufidx;
int i;
if (!pool->active)
@@ -383,14 +591,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- index = pool->free_map[pool->next_free];
+ bufidx = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
- skb = pool->rx_buff[index].skb;
+ skb = pool->rx_buff[bufidx].skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
pool->buff_size);
@@ -405,26 +613,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
- offset = index * pool->buff_size;
- dst = pool->long_term_buff.buff + offset;
+ map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
+ dst = ltb->buff + offset;
memset(dst, 0, pool->buff_size);
- dma_addr = pool->long_term_buff.addr + offset;
+ dma_addr = ltb->addr + offset;
/* add the skb to an rx_buff in the pool */
- pool->rx_buff[index].data = dst;
- pool->rx_buff[index].dma = dma_addr;
- pool->rx_buff[index].skb = skb;
- pool->rx_buff[index].pool_index = pool->index;
- pool->rx_buff[index].size = pool->buff_size;
+ pool->rx_buff[bufidx].data = dst;
+ pool->rx_buff[bufidx].dma = dma_addr;
+ pool->rx_buff[bufidx].skb = skb;
+ pool->rx_buff[bufidx].pool_index = pool->index;
+ pool->rx_buff[bufidx].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
sub_crq->rx_add.correlator =
- cpu_to_be64((u64)&pool->rx_buff[index]);
+ cpu_to_be64((u64)&pool->rx_buff[bufidx]);
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.map_id = ltb->map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -464,10 +672,10 @@ failure:
sub_crq = &ind_bufp->indir_arr[i];
rx_buff = (struct ibmvnic_rx_buff *)
be64_to_cpu(sub_crq->rx_add.correlator);
- index = (int)(rx_buff - pool->rx_buff);
- pool->free_map[pool->next_free] = index;
- dev_kfree_skb_any(pool->rx_buff[index].skb);
- pool->rx_buff[index].skb = NULL;
+ bufidx = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = bufidx;
+ dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
+ pool->rx_buff[bufidx].skb = NULL;
}
adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
@@ -577,7 +785,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(rx_pool->free_map);
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
continue;
@@ -722,8 +930,8 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size);
+ rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
+ rx_pool->size, rx_pool->buff_size);
if (rc)
goto out;
@@ -780,7 +988,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
{
kfree(tx_pool->tx_buff);
kfree(tx_pool->free_map);
- free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_ltb_set(adapter, &tx_pool->ltb_set);
}
/**
@@ -970,17 +1178,16 @@ update_ltb:
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
- u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
- ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
- i, tx_pool->long_term_buff.buff,
- tx_pool->num_buffers, tx_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
+ i, tx_pool->num_buffers, tx_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
+ tx_pool->num_buffers, tx_pool->buf_size);
+ if (rc)
+ goto out;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
@@ -989,14 +1196,14 @@ update_ltb:
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
- ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
- i, tso_pool->long_term_buff.buff,
- tso_pool->num_buffers, tso_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
+ i, tso_pool->num_buffers, tso_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
+ tso_pool->num_buffers, tso_pool->buf_size);
+ if (rc)
+ goto out;
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
@@ -1055,7 +1262,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
netif_napi_add(adapter->netdev, &adapter->napi[i],
- ibmvnic_poll, NAPI_POLL_WEIGHT);
+ ibmvnic_poll);
}
adapter->num_active_rx_napi = adapter->req_rx_queues;
@@ -1424,10 +1631,19 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
+ adapter->tx_queues_active = true;
+
+ /* Since queues were stopped until now, there shouldn't be any
+ * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
+ * don't need the synchronize_rcu()? Leaving it for consistency
+ * with setting ->tx_queues_active = false.
+ */
+ synchronize_rcu();
+
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
@@ -1474,9 +1690,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
@@ -1493,6 +1706,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
@@ -1598,6 +1818,14 @@ static void ibmvnic_cleanup(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
/* ensure that transmissions are stopped if called by do_reset */
+
+ adapter->tx_queues_active = false;
+
+ /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
+ * update so they don't restart a queue after we stop it below.
+ */
+ synchronize_rcu();
+
if (test_bit(0, &adapter->resetting))
netif_tx_disable(netdev);
else
@@ -1837,14 +2065,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff->skb = NULL;
adapter->netdev->stats.tx_dropped++;
}
+
ind_bufp->index = 0;
+
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num) &&
- !test_bit(0, &adapter->resetting)) {
- netif_wake_subqueue(adapter->netdev, queue_num);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- queue_num);
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ rcu_read_lock();
+
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+
+ rcu_read_unlock();
}
}
@@ -1881,6 +2116,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -1896,14 +2132,15 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- int index = 0;
+ int bufidx = 0;
u8 proto = 0;
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, queue_num);
- ind_bufp = &tx_scrq->ind_buf;
-
- if (test_bit(0, &adapter->resetting)) {
+ /* If a reset is in progress, drop the packet since
+ * the scrqs may get torn down. Otherwise use the
+ * rcu to ensure reset waits for us to complete.
+ */
+ rcu_read_lock();
+ if (!adapter->tx_queues_active) {
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -1912,6 +2149,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
+
if (ibmvnic_xmit_workarounds(skb, netdev)) {
tx_dropped++;
tx_send_failed++;
@@ -1919,14 +2160,15 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
+
if (skb_is_gso(skb))
tx_pool = &adapter->tso_pool[queue_num];
else
tx_pool = &adapter->tx_pool[queue_num];
- index = tx_pool->free_map[tx_pool->consumer_index];
+ bufidx = tx_pool->free_map[tx_pool->consumer_index];
- if (index == IBMVNIC_INVALID_MAP) {
+ if (bufidx == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
@@ -1937,10 +2179,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
- offset = index * tx_pool->buf_size;
- dst = tx_pool->long_term_buff.buff + offset;
+ map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
+
+ dst = ltb->buff + offset;
memset(dst, 0, tx_pool->buf_size);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ data_dma_addr = ltb->addr + offset;
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -1967,9 +2210,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
- tx_buff = &tx_pool->tx_buff[index];
+ tx_buff = &tx_pool->tx_buff[bufidx];
tx_buff->skb = skb;
- tx_buff->index = index;
+ tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -1981,10 +2224,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_is_gso(skb))
tx_crq.v1.correlator =
- cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
+ cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
else
- tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.correlator = cpu_to_be32(bufidx);
+ tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -2073,6 +2316,7 @@ tx_err:
netif_carrier_off(netdev);
}
out:
+ rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
netdev->stats.tx_packets += tx_packets;
@@ -2208,6 +2452,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
}
/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+}
+
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2313,6 +2570,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_init_done(adapter);
+
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_crq_queue(adapter);
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2456,7 +2715,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- reinit_completion(&adapter->init_done);
+ reinit_init_done(adapter);
+
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -2597,22 +2857,82 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_adapter *adapter;
- bool saved_state = false;
+ unsigned int timeout = 5000;
struct ibmvnic_rwi *tmprwi;
+ bool saved_state = false;
struct ibmvnic_rwi *rwi;
unsigned long flags;
+ struct device *dev;
+ bool need_reset;
+ int num_fails = 0;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ dev = &adapter->vdev->dev;
- if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+ * or if another reset is in progress, defer work for now. If probe
+ * eventually fails it will flush and terminate our work.
+ *
+ * Three possibilities here:
+ * 1. Adpater being removed - just return
+ * 2. Timed out on probe or another reset in progress - delay the work
+ * 3. Completed probe - perform any resets in queue
+ */
+ if (adapter->state == VNIC_PROBING &&
+ !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+ dev_err(dev, "Reset thread timed out on probe");
queue_delayed_work(system_long_wq,
&adapter->ibmvnic_delayed_reset,
IBMVNIC_RESET_DELAY);
return;
}
+ /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+ if (adapter->state == VNIC_REMOVING)
+ return;
+
+ /* ->rwi_list is stable now (no one else is removing entries) */
+
+ /* ibmvnic_probe() may have purged the reset queue after we were
+ * scheduled to process a reset so there maybe no resets to process.
+ * Before setting the ->resetting bit though, we have to make sure
+ * that there is infact a reset to process. Otherwise we may race
+ * with ibmvnic_open() and end up leaving the vnic down:
+ *
+ * __ibmvnic_reset() ibmvnic_open()
+ * ----------------- --------------
+ *
+ * set ->resetting bit
+ * find ->resetting bit is set
+ * set ->state to IBMVNIC_OPEN (i.e
+ * assume reset will open device)
+ * return
+ * find reset queue empty
+ * return
+ *
+ * Neither performed vnic login/open and vnic stays down
+ *
+ * If we hold the lock and conditionally set the bit, either we
+ * or ibmvnic_open() will complete the open.
+ */
+ need_reset = false;
+ spin_lock(&adapter->rwi_lock);
+ if (!list_empty(&adapter->rwi_list)) {
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ } else {
+ need_reset = true;
+ }
+ }
+ spin_unlock(&adapter->rwi_lock);
+
+ if (!need_reset)
+ return;
+
rwi = get_next_rwi(adapter);
while (rwi) {
spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2655,11 +2975,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -2675,19 +3007,19 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
/*
- * If there is another reset queued, free the previous rwi
- * and process the new reset even if previous reset failed
- * (the previous reset could have failed because of a fail
- * over for instance, so process the fail over).
- *
* If there are no resets queued and the previous reset failed,
* the adapter would be in an undefined state. So retry the
* previous reset as a hard reset.
+ *
+ * Else, free the previous rwi and, if there is another reset
+ * queued, process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
*/
- if (rwi)
- kfree(tmprwi);
- else if (rc)
+ if (!rwi && rc)
rwi = tmprwi;
+ else
+ kfree(tmprwi);
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
@@ -2717,12 +3049,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
__ibmvnic_reset(&adapter->ibmvnic_reset);
}
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+ struct list_head *entry, *tmp_entry;
+
+ if (!list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+ list_del(entry);
+ kfree(list_entry(entry, struct ibmvnic_rwi, list));
+ }
+ }
+}
+
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
- struct list_head *entry, *tmp_entry;
- struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ struct ibmvnic_rwi *rwi, *tmp;
unsigned long flags;
int ret;
@@ -2741,13 +3084,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- if (adapter->state == VNIC_PROBING) {
- netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = -EAGAIN;
- ret = EAGAIN;
- goto err;
- }
-
list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2765,10 +3101,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
/* if we just received a transport event,
* flush reset queue and process this reset
*/
- if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
- list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
- list_del(entry);
- }
+ if (adapter->force_reset_recovery)
+ flush_reset_queue(adapter);
+
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -3082,13 +3417,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
- } else {
- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- }
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -3103,23 +3433,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
- netdev_info(netdev,
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- ring->rx_pending, ring->tx_pending,
- adapter->req_rx_add_entries_per_subcrq,
- adapter->req_tx_entries_per_subcrq);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -3127,14 +3455,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
- } else {
- channels->max_rx = IBMVNIC_MAX_QUEUES;
- channels->max_tx = IBMVNIC_MAX_QUEUES;
- }
-
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -3147,22 +3469,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_queues != channels->rx_count ||
- adapter->req_tx_queues != channels->tx_count))
- netdev_info(netdev,
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- channels->rx_count, channels->tx_count,
- adapter->req_rx_queues, adapter->req_tx_queues);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3170,43 +3481,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
- i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ if (stringset != ETH_SS_STATS)
+ return;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
- }
- break;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- ibmvnic_priv_flags[i]);
- break;
- default:
- return;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
}
}
@@ -3219,8 +3519,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
- case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -3273,26 +3571,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
- if (which_maxes)
- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
- else
- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
- return 0;
-}
-
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -3306,8 +3584,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
- .get_priv_flags = ibmvnic_get_priv_flags,
- .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
@@ -3544,6 +3820,30 @@ static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
return rc;
}
+/* We can not use the IRQ chip EOI handler because that has the
+ * unintended effect of changing the interrupt priority.
+ */
+static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
+{
+ u64 val = 0xff000000 | scrq->hw_irq;
+ unsigned long rc;
+
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
+}
+
+/* Due to a firmware bug, the hypervisor can send an interrupt to a
+ * transmit or receive queue just prior to a partition migration.
+ * Force an EOI after migration.
+ */
+static void ibmvnic_clear_pending_interrupt(struct device *dev,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ if (!xive_enabled())
+ ibmvnic_xics_eoi(dev, scrq);
+}
+
static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -3557,15 +3857,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
-
- rc = plpar_hcall_norets(H_EOI, val);
- /* H_EOI would fail with rc = H_FUNCTION when running
- * in XIVE mode which is expected, but not an error.
- */
- if (rc && (rc != H_FUNCTION))
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ ibmvnic_clear_pending_interrupt(dev, scrq);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -3636,9 +3928,15 @@ restart_loop:
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
- netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ rcu_read_lock();
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+ rcu_read_unlock();
}
}
@@ -3844,11 +4142,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3867,16 +4179,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->desired.rx_entries =
adapter->max_rx_add_entries_per_subcrq;
- max_entries = IBMVNIC_MAX_LTB_SIZE /
+ max_entries = IBMVNIC_LTB_SET_SIZE /
(adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.tx_entries = max_entries;
}
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.rx_entries = max_entries;
}
@@ -3909,44 +4221,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3954,16 +4267,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4357,118 +4675,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4772,6 +5104,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4835,10 +5169,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5136,10 +5468,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5271,9 +5601,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
if (!adapter->init_done_rc)
adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
}
break;
@@ -5296,6 +5626,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
adapter->fw_done_rc = -EIO;
complete(&adapter->fw_done);
}
+
+ /* if we got here during crq-init, retry crq-init */
+ if (!completion_done(&adapter->init_done)) {
+ adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
+ }
+
if (!completion_done(&adapter->stats_done))
complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
@@ -5435,33 +5772,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
@@ -5624,10 +5949,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
adapter->from_passive_init = false;
- if (reset)
- reinit_completion(&adapter->init_done);
-
- adapter->init_done_rc = 0;
rc = ibmvnic_send_crq_init(adapter);
if (rc) {
dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5641,12 +5962,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
return adapter->init_done_rc;
}
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
+ dev_err(dev, "CRQ-init failed, passive-init\n");
return -EINVAL;
}
@@ -5658,6 +5981,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
release_sub_crqs(adapter, 0);
rc = init_sub_crqs(adapter);
} else {
+ /* no need to reinitialize completely, but we do
+ * need to clean up transmits that were in flight
+ * when we processed the reset. Failure to do so
+ * will confound the upper layer, usually TCP, by
+ * creating the illusion of transmits that are
+ * awaiting completion.
+ */
+ clean_tx_pools(adapter);
+
rc = reset_sub_crq_queues(adapter);
}
} else {
@@ -5686,6 +6018,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ unsigned long flags;
bool init_success;
int rc;
@@ -5730,6 +6063,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->rwi_lock);
spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
+ init_completion(&adapter->probe_done);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
init_completion(&adapter->reset_done);
@@ -5740,6 +6074,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_success = false;
do {
+ reinit_init_done(adapter);
+
+ /* clear any failovers we got in the previous pass
+ * since we are reinitializing the CRQ
+ */
+ adapter->failover_pending = false;
+
+ /* If we had already initialized CRQ, we may have one or
+ * more resets queued already. Discard those and release
+ * the CRQ before initializing the CRQ again.
+ */
+ release_crq_queue(adapter);
+
+ /* Since we are still in PROBING state, __ibmvnic_reset()
+ * will not access the ->rwi_list and since we released CRQ,
+ * we won't get _new_ transport events. But there maybe an
+ * ongoing ibmvnic_reset() call. So serialize access to
+ * rwi_list. If we win the race, ibvmnic_reset() could add
+ * a reset after we purged but thats ok - we just may end
+ * up with an extra reset (i.e similar to having two or more
+ * resets in the queue at once).
+ * CHECK.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
rc = init_crq_queue(adapter);
if (rc) {
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5771,12 +6132,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto ibmvnic_register_fail;
- }
- dev_info(&dev->dev, "ibmvnic registered\n");
if (init_success) {
adapter->state = VNIC_PROBED;
@@ -5789,6 +6144,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto ibmvnic_register_fail;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ complete(&adapter->probe_done);
+
return 0;
ibmvnic_register_fail:
@@ -5803,6 +6168,17 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+
+ /* cleanup worker thread after releasing CRQ so we don't get
+ * transport events (i.e new work items for the worker thread).
+ */
+ adapter->state = VNIC_REMOVING;
+ complete(&adapter->probe_done);
+ flush_work(&adapter->ibmvnic_reset);
+ flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+ flush_reset_queue(adapter);
+
mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
@@ -5879,10 +6255,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc)
+ if (rc) {
netdev_err(netdev,
"H_VIOCTL initiated failover failed, rc %ld\n",
rc);
+ goto last_resort;
+ }
+
+ return count;
last_resort:
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4a8f36e0ab07..e5c6ff3d0c47 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -36,16 +36,52 @@
#define IBMVNIC_TSO_BUFS 64
#define IBMVNIC_TSO_POOL_MASK 0x80000000
-#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
-#define IBMVNIC_BUFFER_HLEN 500
+/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
+ * has a set of buffers. The size of each buffer is determined by the MTU.
+ *
+ * Each Rx/Tx pool is also associated with a DMA region that is shared
+ * with the "hardware" (VIOS) and used to send/receive packets. The DMA
+ * region is also referred to as a Long Term Buffer or LTB.
+ *
+ * The size of the DMA region required for an Rx/Tx pool depends on the
+ * number and size (MTU) of the buffers in the pool. At the max levels
+ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
+ * some padding.
+ *
+ * But the size of a single DMA region is limited by MAX_ORDER in the
+ * kernel (about 16MB currently). To support say 4K Jumbo frames, we
+ * use a set of LTBs (struct ltb_set) per pool.
+ *
+ * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
+ * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
+ * (must be <= IBMVNIC_ONE_LTB_MAX)
+ * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
+ *
+ * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
+ * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
+ *
+ * The Rx and Tx pools can have upto 4096 buffers. The max size of these
+ * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
+ * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
+ *
+ * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
+ * the allocation of the LTB can fail when system is low in memory. If
+ * its too small, we would need several mappings for each of the Rx/
+ * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
+ * VNIC protocol.
+ *
+ * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
+ * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
+ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
+ * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
+ */
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
+#define IBMVNIC_LTB_SET_SIZE (38 << 20)
+#define IBMVNIC_BUFFER_HLEN 500
#define IBMVNIC_RESET_DELAY 100
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
- "use-server-maxes"
-};
-
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -798,6 +834,11 @@ struct ibmvnic_long_term_buff {
u8 map_id;
};
+struct ibmvnic_ltb_set {
+ int num_ltbs;
+ struct ibmvnic_long_term_buff *ltbs;
+};
+
struct ibmvnic_tx_buff {
struct sk_buff *skb;
int index;
@@ -810,7 +851,7 @@ struct ibmvnic_tx_pool {
int *free_map;
int consumer_index;
int producer_index;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
int num_buffers;
int buf_size;
} ____cacheline_aligned;
@@ -833,7 +874,7 @@ struct ibmvnic_rx_pool {
int next_free;
int next_alloc;
int active;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
} ____cacheline_aligned;
struct ibmvnic_vpd {
@@ -883,7 +924,6 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
- u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
@@ -919,7 +959,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
@@ -931,6 +970,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_pool *tso_pool;
+ struct completion probe_done;
struct completion init_done;
int init_done_rc;
@@ -1006,11 +1046,14 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
- bool napi_enabled, from_passive_init;
- bool login_pending;
/* last device reset time */
unsigned long last_reset_time;
+ bool napi_enabled;
+ bool from_passive_init;
+ bool login_pending;
+ /* protected by rcu */
+ bool tx_queues_active;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 4a8013f20152..560d1d442232 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1430,7 +1430,6 @@ static int e100_phy_check_without_mii(struct nic *nic)
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
#define NSC_CONG_TXREADY 0x0400
-#define ADVERTISE_FC_SUPPORTED 0x0400
static int e100_phy_init(struct nic *nic)
{
struct net_device *netdev = nic->netdev;
@@ -2432,8 +2431,8 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
@@ -2848,7 +2847,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
nic = netdev_priv(netdev);
- netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
nic->netdev = netdev;
nic->pdev = pdev;
nic->msg_enable = (1 << debug) - 1;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 32803b0cf1e8..d06d29c6c037 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -531,10 +531,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000_driver_name,
+ strscpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 1042e79a1397..4542e2bc28e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2000,7 +2000,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -4376,7 +4376,7 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
/**
* e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
* @hw: Struct containing variables accessed by shared code
- * @offset: Offset in VLAN filer table to write
+ * @offset: Offset in VLAN filter table to write
* @value: Value to write into VLAN filter table
*/
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
@@ -4396,7 +4396,7 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * e1000_clear_vfta - Clears the VLAN filer table
+ * e1000_clear_vfta - Clears the VLAN filter table
* @hw: Struct containing variables accessed by shared code
*/
static void e1000_clear_vfta(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f5feb55cfba..61e60e4de600 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1012,7 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, e1000_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -2708,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -3139,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 4d4f5bf1e516..f4154ca7fcb4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -82,7 +82,6 @@ E1000_PARAM(Duplex, "Duplex setting");
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -95,7 +94,6 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c3def0ee7788..e8a9a9610ac6 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -115,7 +115,8 @@ enum e1000_boards {
board_pch_lpt,
board_pch_spt,
board_pch_cnp,
- board_pch_tgp
+ board_pch_tgp,
+ board_pch_adp
};
struct e1000_ps_page {
@@ -328,7 +329,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
- s32 ptp_delta;
+ long ptp_delta;
u16 eee_advert;
};
@@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
+extern const struct e1000_info e1000_pch_adp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index b80ae9a82224..51a5afe9df2f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -639,7 +639,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -650,7 +650,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 5e4fc9b4e2ad..9466f65a6da7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
@@ -4136,9 +4136,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
- e_dbg("NVM Checksum Invalid\n");
+ e_dbg("NVM Checksum valid bit not set\n");
- if (hw->mac.type < e1000_pch_cnp) {
+ if (hw->mac.type < e1000_pch_tgp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
@@ -6021,3 +6021,23 @@ const struct e1000_info e1000_pch_tgp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_adp_info = {
+ .mac = e1000_pch_adp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 51512a73fdd0..5df7ad93f3d7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -957,7 +957,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 635a95927e93..49e926959ad3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
+ [board_pch_adp] = &e1000_pch_adp_info,
};
struct e1000_reg_info {
@@ -3921,9 +3922,9 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
- if (info->adjfreq) {
+ if (info->adjfine) {
/* restore the previous ptp frequency delta */
- ret_val = info->adjfreq(info, adapter->ptp_delta);
+ ret_val = info->adjfine(info, adapter->ptp_delta);
} else {
/* set the default base frequency if no adjustment possible */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
@@ -5473,7 +5474,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -5845,7 +5846,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
@@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
@@ -6490,7 +6492,12 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Keep the GPT clock enabled for CSME */
+ mac_data = er32(FEXTNVM);
+ mac_data |= BIT(3);
+ ew32(FEXTNVM, mac_data);
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -7260,7 +7267,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ strscpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -7385,9 +7392,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, i, err;
s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
@@ -7401,17 +7408,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
bars = pci_select_bars(pdev, IORESOURCE_MEM);
@@ -7478,8 +7479,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -7547,10 +7548,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - max_hw_frame_size */
netdev->min_mtu = ETH_MIN_MTU;
@@ -7677,7 +7676,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -7898,22 +7897,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ebe121db4307..3132d8f2f207 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -101,8 +101,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
-#define MAX_INTMODE 2
-#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0f0efee5fc8e..060b263348ce 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -146,11 +146,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -210,11 +210,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Write PHY Red Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -2697,9 +2697,14 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg &= ~BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
}
@@ -2715,9 +2720,14 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg |= BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
@@ -3037,7 +3047,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, MII_BMCR, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val) {
+ e_dbg("Error reading PHY register\n");
+ return ret_val;
+ }
if (data & BMCR_LOOPBACK)
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eb5c014c02fb..0e488e4fa5c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -15,14 +15,16 @@
#endif
/**
- * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * e1000e_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @delta: Desired frequency chance in scaled parts per million
*
* Adjust the frequency of the PHC cycle counter by the indicated delta from
* the base frequency.
+ *
+ * Scaled parts per million is ppm but with a 16 bit binary fractional field.
**/
-static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
@@ -33,9 +35,6 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
u32 timinca, incvalue;
s32 ret_val;
- if ((delta > ptp->max_adj) || (delta <= -1000000000))
- return -EINVAL;
-
if (delta < 0) {
neg_adj = true;
delta = -delta;
@@ -50,9 +49,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
- adjustment = incvalue;
- adjustment *= delta;
- adjustment = div_u64(adjustment, 1000000000);
+ adjustment = mul_u64_u64_div_u64(incvalue, (u64)delta,
+ 1000000ULL << 16);
incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
@@ -260,7 +258,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = e1000e_phc_adjfreq,
+ .adjfine = e1000e_phc_adjfine,
.adjtime = e1000e_phc_adjtime,
.gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3362f26d7f99..4a6630586ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1595,8 +1595,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(interface->netdev, &q_vector->napi,
- fm10k_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
/* tie q_vector and interface together */
interface->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 30ca9ee1900b..87fa5874f16e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -809,7 +809,7 @@ static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
@@ -1825,7 +1825,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
fm10k_sm_mbx_connect_reset(mbx);
break;
case FM10K_STATE_CONNECT:
- /* try connnecting at lower version */
+ /* try connecting at lower version */
if (mbx->remote) {
while (mbx->local > 1)
mbx->local--;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index f6d56867f857..75cbdf2dbbe3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -78,7 +78,7 @@ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
- * it in the array pointed by by string. It will return success if provided
+ * it in the array pointed by string. It will return success if provided
* with a valid pointers.
**/
static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
@@ -584,7 +584,7 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
- * a minimum it just indicates that the message requested was
+ * minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..9a60d6b207f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -37,6 +37,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include <linux/net/intel/i40e_client.h>
@@ -144,6 +145,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
@@ -174,7 +176,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -398,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
struct i40e_flex_pit {
struct list_head list;
u16 src_offset;
@@ -565,6 +580,7 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -848,12 +864,17 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 tx_stopped;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
+ u64 rx_page_reuse;
+ u64 rx_page_alloc;
+ u64 rx_page_waive;
+ u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
@@ -1087,6 +1108,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
(u32)(val & 0xFFFFFFFFULL));
}
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
@@ -1270,4 +1306,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
+
+/**
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
+ * @pf: pointer to a pf.
+ *
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ *
+ * Return: I40E_FLAG_TC_MQPRIO set state.
+ **/
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+{
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 7abef88801fb..42439f725aa4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -769,7 +769,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command_atomic - send command to Admin Queue
+ * i40e_asq_send_command_atomic_exec - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
@@ -780,11 +780,13 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- bool is_atomic_context)
+static i40e_status
+i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
@@ -794,8 +796,6 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
u16 retval = 0;
u32 val = 0;
- mutex_lock(&hw->aq.asq_mutex);
-
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
@@ -969,6 +969,36 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
asq_send_command_error:
+ return status;
+}
+
+/**
+ * i40e_asq_send_command_atomic - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine.
+ **/
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+ cmd_details,
+ is_atomic_context);
+
mutex_unlock(&hw->aq.asq_mutex);
return status;
}
@@ -983,6 +1013,52 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
/**
+ * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine. Returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+ buff_size,
+ cmd_details,
+ is_atomic_context);
+ if (aq_status)
+ *aq_status = hw->aq.asq_last_status;
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..10d7a982a5b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 9ddeb015eb7e..4f01e2a6b6bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -47,6 +48,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -1899,8 +1901,9 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
if (status)
goto aq_add_vsi_exit;
@@ -2287,8 +2290,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
@@ -2632,33 +2636,28 @@ get_veb_exit:
}
/**
- * i40e_aq_add_macvlan
- * @hw: pointer to the hw struct
- * @seid: VSI for the mac address
+ * i40e_prepare_add_macvlan
* @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
* @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI for the mac address
*
- * Add MAC/VLAN addresses to the HW filtering
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
**/
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+ struct i40e_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
+ (struct i40e_aqc_macvlan *)&desc->params.raw;
u16 buf_size;
int i;
- if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
-
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
@@ -2669,14 +2668,71 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ return buf_size;
+}
- return status;
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
}
/**
@@ -2715,13 +2771,59 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
return status;
}
/**
+ * i40e_aq_remove_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aqc_macvlan *cmd;
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
* @hw: pointer to the hw struct
* @opcode: AQ opcode for add or delete mirror rule
@@ -3868,7 +3970,8 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
cmd->seid = cpu_to_le16(seid);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -4872,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4910,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..c9dcd6d92c83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
@@ -275,9 +275,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+ " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
i,
- rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i\n",
@@ -310,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
- tx_ring->tx_stats.tx_done_old);
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
@@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
vsi = pf->vsi[vf->lan_vsi_idx];
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
- dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
- vf->num_mdd_events,
- vf->num_invalid_msgs,
- vf->num_valid_msgs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld\n",
+ vf->num_mdd_events);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 1bcb0ec0f0c0..d9c51a238dcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \
- (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
@@ -33,6 +35,7 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 091f36adbbe1..4a6a6e48c615 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -236,8 +236,6 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
-#define I40E_QUEUE_STAT(_name, _stat) \
- I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -293,8 +291,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+ I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
+ I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+ I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
@@ -451,6 +455,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
+ I40E_PRIV_FLAG("vf-vlan-pruning",
+ I40E_FLAG_VF_VLAN_PRUNING, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1135,6 +1141,71 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
/**
* i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
@@ -1151,12 +1222,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
i40e_status status = 0;
int timeout = 50;
int err = 0;
+ __u32 speed;
u8 autoneg;
/* Changing port settings is not supported if this isn't the
@@ -1189,6 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
@@ -1207,6 +1281,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* set autoneg back to what it currently is */
copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
@@ -1323,6 +1398,27 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
@@ -1905,10 +2001,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
@@ -2087,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
@@ -2580,15 +2673,16 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
- data[I40E_ETH_TEST_REG] = 1;
- data[I40E_ETH_TEST_EEPROM] = 1;
- data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LINK] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -2635,9 +2729,17 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
}
-skip_ol_tests:
-
netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,
@@ -3083,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & I40E_L3_SRC_MASK)
- cmd->data |= RXH_IP_SRC;
- if (i_set & I40E_L3_DST_MASK)
- cmd->data |= RXH_IP_DST;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3444,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@@ -3468,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_MASK;
- dst_l3 = I40E_L3_DST_MASK;
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
} else {
/* Any other flow type are not supported here */
return i_set;
@@ -3487,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
return i_set;
}
+#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@@ -3499,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- u8 flow_pctype = 0;
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3517,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@@ -3579,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- if (flow_pctype) {
- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
- flow_pctype)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
- flow_pctype)) << 32);
- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
- (u32)i_set);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
- (u32)(i_set >> 32));
- hena |= BIT_ULL(flow_pctype);
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
@@ -4380,7 +4502,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
@@ -4916,7 +5038,7 @@ static int i40e_set_channels(struct net_device *dev,
/* We do not support setting channels via ethtool when TCs are
* configured through mqprio
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return -EINVAL;
/* verify they are not requesting separate vectors */
@@ -5279,6 +5401,13 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ pf->num_alloc_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 61afc220fc6c..b5dcd15ced36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
@@ -77,6 +78,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@ -196,10 +198,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -214,8 +212,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -234,7 +245,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -257,7 +267,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -269,8 +279,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -377,7 +385,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -544,6 +554,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
}
/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
* i40e_stat_update48 - read and update a 48 bit stat from the chip
* @hw: ptr to the hardware info
* @hireg: the high 32 bit reg to read
@@ -615,6 +666,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
}
/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -673,6 +752,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
vsi->stat_offsets_loaded = true;
}
@@ -767,18 +850,19 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
+ u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -798,8 +882,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
+ rx_reuse = 0;
+ rx_alloc = 0;
+ rx_waive = 0;
+ rx_busy = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
@@ -818,6 +907,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@ -833,6 +923,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+ rx_reuse += p->rx_stats.page_reuse_count;
+ rx_alloc += p->rx_stats.page_alloc_count;
+ rx_waive += p->rx_stats.page_waive_count;
+ rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */
@@ -858,8 +952,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
+ vsi->rx_page_reuse = rx_reuse;
+ vsi->rx_page_alloc = rx_alloc;
+ vsi->rx_page_waive = rx_waive;
+ vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p;
ns->rx_bytes = rx_b;
@@ -1346,6 +1445,114 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
}
/**
+ * i40e_get_vf_new_vlan - Get new vlan id on a vf
+ * @vsi: the vsi to configure
+ * @new_mac: new mac filter to be added
+ * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Get new VLAN id based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * Returns the value of the new vlan filter or
+ * the old value if no new filter is needed.
+ */
+static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
+ struct i40e_new_mac_filter *new_mac,
+ struct i40e_mac_filter *f,
+ int vlan_filters,
+ bool trusted)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_pf *pf = vsi->back;
+ bool is_any;
+
+ if (new_mac)
+ f = new_mac->f;
+
+ if (pvid && f->vlan != pvid)
+ return pvid;
+
+ is_any = (trusted ||
+ !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+
+ if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (is_any && !vlan_filters && f->vlan == 0)) {
+ if (is_any)
+ return I40E_VLAN_ANY;
+ else
+ return 0;
+ }
+
+ return f->vlan;
+}
+
+/**
+ * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Correct VF VLAN filters based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters,
+ bool trusted)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new_mac;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
+ new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
+ vlan_filters, trusted);
+ }
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
+ trusted);
+ if (new_vlan != f->vlan) {
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+ /* Create a temporary i40e_new_mac_filter */
+ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
+ if (!new_mac)
+ return -ENOMEM;
+ new_mac->f = add_head;
+ new_mac->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new_mac->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+ return 0;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1829,11 +2036,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */
@@ -2137,19 +2348,19 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
+ enum i40e_admin_queue_err aq_status;
i40e_status aq_ret;
- int aq_err;
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
- aq_err = hw->aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ &aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %s, aq_err %s\n",
vsi_name, i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
+ i40e_aq_str(hw, aq_status));
}
}
@@ -2172,10 +2383,10 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- int aq_err, fcnt;
+ enum i40e_admin_queue_err aq_status;
+ int fcnt;
- i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
+ i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
@@ -2183,17 +2394,19 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi->type);
}
}
}
@@ -2398,10 +2611,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vlan_filters++;
}
- retval = i40e_correct_mac_vlan_filters(vsi,
- &tmp_add_list,
- &tmp_del_list,
- vlan_filters);
+ if (vsi->type != I40E_VSI_SRIOV)
+ retval = i40e_correct_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters);
+ else
+ retval = i40e_correct_vf_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters, pf->vf[vsi->vf_id].trusted);
hlist_for_each_entry(new, &tmp_add_list, hlist)
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
@@ -2830,8 +3047,21 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
int bkt;
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
- if (f->state == I40E_FILTER_REMOVE)
+ /* If we're asked to add a filter that has been marked for
+ * removal, it is safe to simply restore it to active state.
+ * __i40e_del_filter will have simply deleted any filters which
+ * were previously marked NEW or FAILED, so if it is currently
+ * marked REMOVE it must have previously been ACTIVE. Since we
+ * haven't yet run the sync filters task, just restore this
+ * filter to the ACTIVE state so that the sync task leaves it
+ * in place.
+ */
+ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
+ f->state = I40E_FILTER_ACTIVE;
continue;
+ } else if (f->state == I40E_FILTER_REMOVE) {
+ continue;
+ }
add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -3336,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3359,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -3649,7 +3872,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */
+ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
@@ -3665,6 +3888,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) {
+ /* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3674,7 +3898,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
-
+ /* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3743,7 +3967,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 val;
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
@@ -3760,28 +3983,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_RQCTL(0), val);
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) {
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
}
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(0), val);
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
i40e_flush(hw);
}
@@ -3915,10 +4130,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
- * irq_set_affinity_hint without making a copy.
+ * irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3929,7 +4144,7 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -4012,7 +4227,6 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4750,8 +4964,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
@@ -5235,7 +5448,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
@@ -5267,7 +5480,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return i40e_mqprio_get_enabled_tc(pf);
/* If neither MQPRIO nor DCB is enabled for this PF then just return
@@ -5364,7 +5577,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
int i;
/* There is no need to reset BW when mqprio mode is on. */
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return 0;
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
@@ -5436,7 +5649,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset);
}
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return;
/* Assign UP2TC map for the VSI */
@@ -5597,7 +5810,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
if (ret)
goto out;
@@ -5682,6 +5895,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5703,10 +5936,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -6321,7 +6554,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
if (vsi->type == I40E_VSI_MAIN) {
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
else
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
@@ -6432,6 +6665,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
@@ -7529,42 +7765,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
@@ -7714,7 +7951,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
return ERR_PTR(-EINVAL);
}
@@ -7967,7 +8204,7 @@ config_tc:
/* Quiesce VSI queues */
i40e_quiesce_vsi(vsi);
- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
i40e_remove_queue_channels(vsi);
/* Configure VSI for enabled TCs */
@@ -7991,11 +8228,11 @@ config_tc:
"Setup channel (id:%u) utilizing num_queues %d\n",
vsi->seid, vsi->tc_config.tc_info[0].qcount);
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -8516,6 +8753,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
@@ -10468,7 +10710,7 @@ static void i40e_send_version(struct i40e_pf *pf)
dv.minor_version = 0xff;
dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -10546,7 +10788,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
@@ -10554,13 +10796,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- i40e_check_recovery_mode(pf)) {
+ is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
- }
if (test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !old_recovery_mode_bit)
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10574,15 +10814,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -10593,13 +10827,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization
* and deinitialization
*/
- if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
- old_recovery_mode_bit) {
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/
@@ -10649,7 +10882,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
* is not supported with new link speed
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
i40e_aq_set_dcb_parameters(hw, false, NULL);
} else {
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
@@ -10744,10 +10977,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -10853,6 +11086,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -11698,8 +11934,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -11792,7 +12027,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12213,6 +12447,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
pf->alloc_rss_size = new_rss_size;
@@ -12595,7 +12831,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
@@ -12709,7 +12944,8 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
- if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ if (!(features & NETIF_F_HW_TC) &&
+ (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
dev_err(&pf->pdev->dev,
"Offloaded tc filters active, can't turn hw_tc_offload off");
return -EINVAL;
@@ -13004,7 +13240,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -13040,6 +13276,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -13049,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -13281,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -13414,8 +13663,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@ -13446,6 +13694,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
@@ -13461,6 +13726,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->features &= ~NETIF_F_HW_TC;
+
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
@@ -15324,12 +15591,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
/* set up pci connections */
@@ -15802,23 +16066,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strlcpy(width, "8", PCI_WIDTH_SIZE); break;
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strlcpy(width, "4", PCI_WIDTH_SIZE); break;
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strlcpy(width, "2", PCI_WIDTH_SIZE); break;
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strlcpy(width, "1", PCI_WIDTH_SIZE); break;
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
@@ -15930,8 +16194,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
@@ -16130,6 +16399,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
i40e_reset_and_rebuild(pf, false, false);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index fe6dca846028..3a38bf8bcde7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -682,10 +682,11 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- le_sum = cpu_to_le16(checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
+ }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9241b6005ad3..ebdcde6f1aeb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,10 +27,25 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+i40e_status
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -150,9 +165,19 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
struct i40e_asq_cmd_details *cmd_details,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 61e5789d78db..ffea0c9c82f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -27,7 +27,6 @@
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
-#define to_dev(obj) container_of(obj, struct device, kobj)
enum i40e_ptp_pin {
SDP3_2 = 0,
@@ -335,44 +334,37 @@ static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
}
/**
- * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * i40e_ptp_adjfine - Adjust the PHC frequency
* @ptp: The PTP clock structure
- * @ppb: Parts per billion adjustment from the base
+ * @scaled_ppm: Scaled parts per million adjustment from base
*
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the frequency of the PHC by the indicated delta from the base
+ * frequency.
+ *
+ * Scaled parts per million is ppm with a 16 bit binary fractional field.
**/
-static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
u64 adj, freq, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- freq = I40E_PTP_40GB_INCVAL;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ smp_mb(); /* Force any pending update before accessing. */
+ freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
+ diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
adj = I40E_PTP_40GB_INCVAL - diff;
else
adj = I40E_PTP_40GB_INCVAL + diff;
- /* At some link speeds, the base incval is so large that directly
- * multiplying by ppb would result in arithmetic overflow even when
- * using a u64. Avoid this by instead calculating the new incval
- * always in terms of the 40GbE clock rate and then multiplying by the
- * link speed factor afterwards. This does result in slightly lower
- * precision at lower link speeds, but it is fairly minor.
- */
- smp_mb(); /* Force any pending update before accessing. */
- adj *= READ_ONCE(pf->ptp_adj_mult);
-
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -1398,11 +1390,11 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strlcpy(pf->ptp_caps.name, i40e_driver_name,
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjfine = i40e_ptp_adjfine;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..7339003aa17c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -211,6 +211,11 @@
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
@@ -413,6 +418,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
@@ -640,6 +648,14 @@
#define I40E_VFQF_HKEY1_MAX_INDEX 12
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 66cc79500c10..b97c95f89fa0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3,6 +3,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
@@ -371,7 +372,6 @@ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
}
}
-#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1382,8 +1380,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
- rx_ring->rx_stats.page_reuse_count++;
-
/* clear contents of buffer_info */
old_buff->page = NULL;
}
@@ -1433,13 +1429,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
- if (ring_is_xdp(tx_ring)) {
- tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
- GFP_KERNEL);
- if (!tx_ring->xsk_descs)
- goto err;
- }
-
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,21 +1452,11 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1495,10 +1474,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
@@ -1608,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
@@ -1675,6 +1657,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
return false;
}
+ rx_ring->rx_stats.page_alloc_count++;
+
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring),
@@ -1982,32 +1966,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
/**
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
+ * @rx_stats: rx stats structure for the rx ring
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
+ * page freed.
+ *
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
*/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct i40e_rx_queue_stats *rx_stats,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
+ if (!dev_page_is_reusable(page)) {
+ rx_stats->page_waive_count++;
return false;
+ }
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+ rx_stats->page_busy_count++;
return false;
+ }
#else
#define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+ rx_stats->page_busy_count++;
return false;
+ }
#endif
/* If we have drained the page fragment pool we need to update
@@ -2237,7 +2232,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2290,16 +2285,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
**/
-static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
if (!xdp_prog)
goto xdp_out;
@@ -2444,6 +2437,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int offset = rx_ring->rx_offset;
struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
struct xdp_buff xdp;
int xdp_res = 0;
@@ -2453,6 +2447,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
@@ -2508,11 +2504,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
}
if (xdp_res) {
@@ -3015,6 +3012,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
{
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -3026,7 +3024,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
- u16 gso_segs, gso_size;
+ u16 gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3039,15 +3037,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
} else {
ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
@@ -3100,10 +3106,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
- gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
- first->gso_segs = gso_segs;
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
@@ -3187,13 +3192,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *exthdr;
u32 offset, cmd = 0;
__be16 frag_off;
+ __be16 protocol;
u8 l4_proto = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
/* compute outer L2 header size */
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -3373,6 +3394,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
/* Memory barrier before checking head and tail */
smp_mb();
+ ++tx_ring->tx_stats.tx_stopped;
+
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3662,7 +3685,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
@@ -3688,35 +3712,55 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring)
{
- u16 i = xdp_ring->next_to_use;
- struct i40e_tx_buffer *tx_bi;
- struct i40e_tx_desc *tx_desc;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
+ struct i40e_tx_buffer *tx_bi = tx_head;
+ struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
void *data = xdpf->data;
u32 size = xdpf->len;
- dma_addr_t dma;
- if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return I40E_XDP_CONSUMED;
- tx_bi = &xdp_ring->tx_bi[i];
- tx_bi->bytecount = size;
- tx_bi->gso_segs = 1;
- tx_bi->xdpf = xdpf;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record length, and DMA address */
- dma_unmap_len_set(tx_bi, len, size);
- dma_unmap_addr_set(tx_bi, dma, dma);
+ for (;;) {
+ dma_addr_t dma;
- tx_desc = I40E_TX_DESC(xdp_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
- | I40E_TXD_CMD,
- 0, size, 0);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ goto unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
+
+ if (++index == xdp_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_bi = &xdp_ring->tx_bi[index];
+ tx_desc = I40E_TX_DESC(xdp_ring, index);
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ size = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -3724,14 +3768,30 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
smp_wmb();
xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count)
- i = 0;
- tx_bi->next_to_watch = tx_desc;
- xdp_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = index;
return I40E_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_bi = &xdp_ring->tx_bi[index];
+ if (dma_unmap_len(tx_bi, len))
+ dma_unmap_page(xdp_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+ if (tx_bi == tx_head)
+ break;
+
+ if (!index)
+ index += xdp_ring->count;
+ index--;
+ }
+
+ return I40E_XDP_CONSUMED;
}
/**
@@ -3749,7 +3809,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
struct i40e_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
- __be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso, count;
@@ -3791,15 +3850,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
- /* obtain protocol of skb */
- protocol = vlan_get_protocol(skb);
-
- /* setup IPv4/IPv6 offloads */
- if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
-
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bfc2845c99d1..768290dc6f48 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -290,6 +290,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
int prev_pkt_ctr;
};
@@ -298,7 +299,9 @@ struct i40e_rx_queue_stats {
u64 alloc_page_failed;
u64 alloc_buff_failed;
u64 page_reuse_count;
- u64 realloc_count;
+ u64 page_alloc_count;
+ u64 page_waive_count;
+ u64 page_busy_count;
};
enum i40e_ring_state_t {
@@ -390,7 +393,6 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
- struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -467,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 19da3b22160f..8c5118c8baaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_CONSUMED BIT(0)
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
+#define I40E_XDP_EXIT BIT(3)
/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 36a4ca1ffb1a..388c3d36d96a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1172,6 +1172,7 @@ struct i40e_eth_stats {
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
};
/* Statistics collected per VEB per TC */
@@ -1403,6 +1404,10 @@ struct i40e_lldp_variables {
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b785d09c19f8..72ddcefc45b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1377,6 +1377,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -1496,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1536,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1569,8 +1611,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1586,9 +1632,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1616,6 +1664,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1627,6 +1679,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1636,8 +1692,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
@@ -1877,19 +1938,17 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
- * i40e_vc_send_msg_to_vf_ex
+ * i40e_vc_send_msg_to_vf
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
- * @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
-static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen,
- bool is_quiet)
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@@ -1904,25 +1963,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
hw = &pf->hw;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* single place to detect unsuccessful return values */
- if (v_retval && !is_quiet) {
- vf->num_invalid_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
- if (vf->num_invalid_msgs >
- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
- "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_invalid_msgs = 0;
- }
-
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
@@ -1936,23 +1976,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
}
/**
- * i40e_vc_send_msg_to_vf
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
-{
- return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
- msg, msglen, false);
-}
-
-/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
* @opcode: operation code
@@ -2037,6 +2060,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2137,6 +2179,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
@@ -2145,6 +2188,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
+ }
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
@@ -2280,7 +2327,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
aq_ret = I40E_ERR_PARAM;
@@ -2618,6 +2665,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2651,6 +2751,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
@@ -2723,7 +2829,6 @@ error_param:
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
- * @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@@ -2738,8 +2843,7 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
- struct virtchnl_ether_addr_list *al,
- bool *is_quiet)
+ struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2747,7 +2851,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
int mac2add_cnt = 0;
int i;
- *is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@@ -2771,7 +2874,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
- *is_quiet = true;
return -EPERM;
}
@@ -2822,7 +2924,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- bool is_quiet = false;
i40e_status ret = 0;
int i;
@@ -2839,7 +2940,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al, &is_quiet);
+ ret = i40e_check_vf_permission(vf, al);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2877,8 +2978,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret, NULL, 0, is_quiet);
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
}
/**
@@ -4293,6 +4394,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
+ i40e_vlan_stripping_enable(vsi);
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -4308,7 +4410,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid) {
ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
@@ -4671,6 +4773,11 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
+
+ /* request PF to sync mac/vlan filters for the VF */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..358bbdb58795 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -10,8 +10,6 @@
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-
#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
@@ -19,6 +17,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
@@ -40,6 +39,7 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
};
/* VF capabilities */
@@ -91,9 +91,6 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */
- /* num of continuous malformed or invalid msgs detected */
- u64 num_invalid_msgs;
- u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 945b1bb9c6f4..cd7b52fb6b46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,14 +10,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -143,27 +194,28 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
* i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
-static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- /* NB! xdp_prog will always be !NULL, due to the fact that
- * this path is enabled by setting an XDP program.
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return I40E_XDP_REDIR;
+ if (!err)
+ return I40E_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = I40E_XDP_EXIT;
+ else
+ result = I40E_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -175,16 +227,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (result == I40E_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = I40E_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = I40E_XDP_CONSUMED;
- break;
}
return result;
}
@@ -218,7 +270,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
ntu += nb_buffs;
if (ntu == rx_ring->count) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- xdp = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -241,21 +292,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto out;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
out:
xsk_buff_free(xdp);
@@ -268,7 +323,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
unsigned int *rx_packets,
unsigned int *rx_bytes,
unsigned int size,
- unsigned int xdp_res)
+ unsigned int xdp_res,
+ bool *failure)
{
struct sk_buff *skb;
@@ -278,11 +334,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
+ if (xdp_res == I40E_XDP_EXIT) {
+ *failure = true;
+ return;
+ }
+
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff);
return;
}
-
if (xdp_res == I40E_XDP_PASS) {
/* NB! We are not checking for errors using
* i40e_test_staterr with
@@ -324,11 +384,17 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ u16 cleaned_count;
+
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -366,9 +432,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res);
+ &rx_bytes, size, xdp_res, &failure);
+ if (failure)
+ break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
@@ -379,7 +447,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -467,11 +535,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
- struct xdp_desc *descs = xdp_ring->xsk_descs;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
@@ -591,13 +659,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_queue_pairs)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index bb962987f300..821df248f8be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 59806d1f7e79..3f6187c16424 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -44,6 +44,9 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
+int iavf_status_to_errno(enum iavf_status status);
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
+
/* VSI state flags shared with common code */
enum iavf_vsi_state_t {
__IAVF_VSI_DOWN,
@@ -61,7 +64,6 @@ struct iavf_vsi {
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector;
- u16 work_limit;
u16 qs_handle;
void *priv; /* client driver data reference. */
};
@@ -90,6 +92,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
@@ -143,7 +146,8 @@ struct iavf_mac_filter {
u8 remove:1; /* filter needs to be removed */
u8 add:1; /* filter needs to be added */
u8 is_primary:1; /* filter is a default VF MAC */
- u8 padding:4;
+ u8 add_handled:1; /* received response for filter add */
+ u8 padding:3;
};
};
@@ -156,8 +160,12 @@ struct iavf_vlan {
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_vlan:1; /* filter is new, wait for PF answer */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 padding:5;
+ };
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -188,7 +196,7 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
- __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
+ __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
__IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
@@ -201,6 +209,10 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
+enum iavf_critical_section_t {
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -241,12 +253,12 @@ struct iavf_adapter {
struct work_struct adminq_task;
struct delayed_work client_task;
wait_queue_head_t down_waitqueue;
+ wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
- struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -284,6 +296,9 @@ struct iavf_adapter {
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
+#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_INITIAL_MAC_SET BIT(23)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
@@ -329,6 +344,21 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+ /* flags for processing extended capability messages during
+ * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
+ * both a SEND and a RECV step, which must be processed in sequence.
+ *
+ * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will
+ * process one flag at a time during each state loop.
+ */
+ u64 extended_caps;
+#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
+#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+
+#define IAVF_EXTENDED_CAPS \
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -404,6 +434,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+ /* snapshot of "num_active_queues" before setup_tc for qdisc add
+ * is invoked. This information is useful during qdisc del flow,
+ * to restore correct number of queues
+ */
+ int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
@@ -438,6 +473,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state)
return "__IAVF_INIT_VERSION_CHECK";
case __IAVF_INIT_GET_RESOURCES:
return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_EXTENDED_CAPS:
+ return "__IAVF_INIT_EXTENDED_CAPS";
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ return "__IAVF_INIT_CONFIG_ADAPTER";
case __IAVF_INIT_SW:
return "__IAVF_INIT_SW";
case __IAVF_INIT_FAILED:
@@ -497,6 +536,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -510,7 +550,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
void iavf_request_stats(struct iavf_adapter *adapter);
-void iavf_request_reset(struct iavf_adapter *adapter);
+int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
@@ -536,6 +576,8 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac);
void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
netdev_features_t prev_features,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index e9cc7f6ddc46..34e46a23894f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_INVALID_MAC_ADDR";
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
- case IAVF_ERR_MASTER_REQUESTS_PENDING:
- return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
case IAVF_ERR_INVALID_LINK_SETTINGS:
return "IAVF_ERR_INVALID_LINK_SETTINGS";
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 3bb56714beb0..a056e1545615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -581,9 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
@@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
@@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
@@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95116ef2f0ae..3fc572341781 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
+int iavf_status_to_errno(enum iavf_status status)
+{
+ switch (status) {
+ case IAVF_SUCCESS:
+ return 0;
+ case IAVF_ERR_PARAM:
+ case IAVF_ERR_MAC_TYPE:
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ case IAVF_ERR_INVALID_PD_ID:
+ case IAVF_ERR_INVALID_QP_ID:
+ case IAVF_ERR_INVALID_CQ_ID:
+ case IAVF_ERR_INVALID_CEQ_ID:
+ case IAVF_ERR_INVALID_AEQ_ID:
+ case IAVF_ERR_INVALID_SIZE:
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ case IAVF_ERR_INVALID_VF_ID:
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ case IAVF_ERR_INVALID_SD_INDEX:
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ case IAVF_ERR_INVALID_SD_TYPE:
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return -EINVAL;
+ case IAVF_ERR_NVM:
+ case IAVF_ERR_NVM_CHECKSUM:
+ case IAVF_ERR_PHY:
+ case IAVF_ERR_CONFIG:
+ case IAVF_ERR_UNKNOWN_PHY:
+ case IAVF_ERR_LINK_SETUP:
+ case IAVF_ERR_ADAPTER_STOPPED:
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ case IAVF_ERR_RESET_FAILED:
+ case IAVF_ERR_BAD_PTR:
+ case IAVF_ERR_SWFW_SYNC:
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ case IAVF_ERR_QUEUE_EMPTY:
+ case IAVF_ERR_FLUSHED_QUEUE:
+ case IAVF_ERR_OPCODE_MISMATCH:
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ case IAVF_ERR_MEMCPY_FAILED:
+ case IAVF_ERR_SRQ_ENABLED:
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ case IAVF_ERR_BAD_IWARP_CQE:
+ case IAVF_ERR_NVM_BLANK_MODE:
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return -EIO;
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return -ENODEV;
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ case IAVF_ERR_RING_FULL:
+ return -ENOSPC;
+ case IAVF_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case IAVF_ERR_TIMEOUT:
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return -ETIMEDOUT;
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ case IAVF_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return -EALREADY;
+ case IAVF_ERR_NOT_READY:
+ return -EBUSY;
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return -EMSGSIZE;
+ }
+
+ return -EIO;
+}
+
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
+{
+ switch (v_status) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return 0;
+ case VIRTCHNL_STATUS_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return -EINVAL;
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return -EIO;
+ case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ }
+
+ return -EIO;
+}
+
/**
* iavf_pdev_to_adapter - go from pci_dev to adapter
* @pdev: pci_dev pointer
@@ -302,8 +409,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR01);
rd32(hw, IAVF_VFINT_ICR0_ENA1);
- /* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+ queue_work(iavf_wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -492,10 +600,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_set_affinity_hint.
+ * it's safe to use as a hint for irq_update_affinity_hint.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -505,7 +613,7 @@ free_queue_irqs:
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -557,7 +665,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -735,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
* iavf_get_num_vlans_added - get number of VLANs added
* @adapter: board private structure
*/
-static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
@@ -798,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- if (proto == cpu_to_be16(ETH_P_8021Q))
- set_bit(vid, adapter->vsi.active_cvlans);
- else
- set_bit(vid, adapter->vsi.active_svlans);
-
return 0;
}
@@ -875,7 +978,9 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->add_handled = false;
f->is_new_mac = true;
+ f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -885,42 +990,129 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
}
/**
- * iavf_set_mac - NDO callback to set port mac address
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * iavf_replace_primary_mac - Replace current primary address
+ * @adapter: board private structure
+ * @new_mac: new MAC address to be applied
*
- * Returns 0 on success, negative on failure
+ * Replace current dev_addr and send request to PF for removal of previous
+ * primary MAC address filter and addition of new primary MAC filter.
+ * Return 0 for success, -ENOMEM for failure.
+ *
+ * Do not call this with mac_vlan_list_lock!
**/
-static int iavf_set_mac(struct net_device *netdev, void *p)
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac)
{
- struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
- return 0;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->is_primary = false;
+ }
+
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
- f = iavf_add_filter(adapter, addr->sa_data);
+ f = iavf_add_filter(adapter, new_mac);
+
+ if (f) {
+ /* Always send the request to add if changing primary MAC
+ * even if filter is already present on the list
+ */
+ f->is_primary = true;
+ f->add = true;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ ether_addr_copy(hw->mac.addr, new_mac);
+ }
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* schedule the watchdog task to immediately process the request */
if (f) {
- ether_addr_copy(hw->mac.addr, addr->sa_data);
+ queue_work(iavf_wq, &adapter->watchdog_task.work);
+ return 0;
}
+ return -ENOMEM;
+}
- return (f == NULL) ? -ENOMEM : 0;
+/**
+ * iavf_is_mac_set_handled - wait for a response to set MAC from PF
+ * @netdev: network interface device structure
+ * @macaddr: MAC address to set
+ *
+ * Returns true on success, false on failure
+ */
+static bool iavf_is_mac_set_handled(struct net_device *netdev,
+ const u8 *macaddr)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_mac_filter *f;
+ bool ret = false;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ f = iavf_find_filter(adapter, macaddr);
+
+ if (!f || (!f->add && f->add_handled))
+ ret = true;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ return ret;
+}
+
+/**
+ * iavf_set_mac - NDO callback to set port MAC address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int iavf_set_mac(struct net_device *netdev, void *p)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = iavf_replace_primary_mac(adapter, addr->sa_data);
+
+ if (ret)
+ return ret;
+
+ /* If this is an initial set MAC during VF spawn do not wait */
+ if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
+ adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
+ return 0;
+ }
+
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
+
+ /* If ret < 0 then it means wait was interrupted.
+ * If ret == 0 then it means we got a timeout.
+ * else it means we got response for set MAC from PF,
+ * check if netdev MAC was updated to requested MAC,
+ * if yes then set MAC succeeded otherwise it failed return -EACCES
+ */
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EAGAIN;
+
+ if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return -EACCES;
+
+ return 0;
}
/**
@@ -1075,80 +1267,156 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
}
/**
- * iavf_down - Shutdown the connection processing
+ * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
+ * yet and mark other to be removed.
* @adapter: board private structure
- *
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-void iavf_down(struct iavf_adapter *adapter)
+static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct iavf_vlan_filter *vlf;
- struct iavf_cloud_filter *cf;
- struct iavf_fdir_fltr *fdir;
- struct iavf_mac_filter *f;
- struct iavf_adv_rss *rss;
-
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- iavf_napi_disable_all(adapter);
- iavf_irq_disable(adapter);
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
/* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->remove = true;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+ list) {
+ if (f->add) {
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->remove = true;
+ }
}
/* remove all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->remove = true;
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list) {
+ if (vlf->add) {
+ list_del(&vlf->list);
+ kfree(vlf);
+ } else {
+ vlf->remove = true;
+ }
}
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
+ * mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */
spin_lock_bh(&adapter->cloud_filter_list_lock);
- list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- cf->del = true;
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->add) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ } else {
+ cf->del = true;
+ }
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
+ * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
+{
+ struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock);
- list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
- rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ list_del(&rss->list);
+ kfree(rss);
+ } else {
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ }
+ }
spin_unlock_bh(&adapter->adv_rss_lock);
+}
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __IAVF_RESETTING) {
+/**
+ * iavf_down - Shutdown the connection processing
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ **/
+void iavf_down(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->state <= __IAVF_DOWN_PENDING)
+ return;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ adapter->link_up = false;
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
+
+ iavf_clear_mac_vlan_filters(adapter);
+ iavf_clear_cloud_filters(adapter);
+ iavf_clear_fdir_filters(adapter);
+ iavf_clear_adv_rss_conf(adapter);
+
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ if (!list_empty(&adapter->mac_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!list_empty(&adapter->vlan_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!list_empty(&adapter->cloud_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ if (!list_empty(&adapter->fdir_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ if (!list_empty(&adapter->adv_rss_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1421,7 +1689,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
struct iavf_aqc_get_set_rss_key_data *rss_key =
(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
- int ret = 0;
+ enum iavf_status status;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1430,24 +1698,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
return -EBUSY;
}
- ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
- if (ret) {
+ status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
+ return iavf_status_to_errno(status);
}
- ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
- adapter->rss_lut, adapter->rss_lut_size);
- if (ret) {
+ status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
+ return iavf_status_to_errno(status);
}
- return ret;
+ return 0;
}
@@ -1517,7 +1786,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
static int iavf_init_rss(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- int ret;
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
@@ -1533,9 +1801,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
iavf_fill_rss_lut(adapter);
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
- ret = iavf_config_rss(adapter);
- return ret;
+ return iavf_config_rss(adapter);
}
/**
@@ -1564,7 +1831,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll);
}
return 0;
@@ -2003,23 +2270,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
- int err;
+ enum iavf_status status;
+ int ret;
WARN_ON(adapter->state != __IAVF_STARTUP);
/* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- err = iavf_set_mac_type(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
+ status = iavf_set_mac_type(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
goto err;
}
- err = iavf_check_reset_complete(hw);
- if (err) {
+ ret = iavf_check_reset_complete(hw);
+ if (ret) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ ret);
goto err;
}
hw->aq.num_arq_entries = IAVF_AQ_LEN;
@@ -2027,14 +2295,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = iavf_init_adminq(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
+ status = iavf_init_adminq(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ status);
goto err;
}
- err = iavf_send_api_ver(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ ret = iavf_send_api_ver(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
iavf_shutdown_adminq(hw);
goto err;
}
@@ -2070,7 +2339,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver(adapter);
if (err) {
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ if (err == -EALREADY)
err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
@@ -2120,7 +2389,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
"Requested %d queues, but PF only gave us %d.\n",
num_req_queues,
adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
iavf_schedule_reset(adapter);
@@ -2131,7 +2400,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2171,11 +2439,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
}
err = iavf_get_vf_config(adapter);
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
- } else if (err == IAVF_ERR_PARAM) {
- /* We only get ERR_PARAM if the device is in a very bad
+ goto err;
+ } else if (err == -EINVAL) {
+ /* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
@@ -2189,26 +2457,18 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
err = iavf_parse_vf_resource_msg(adapter);
- if (err)
- goto err_alloc;
-
- err = iavf_send_vf_offload_vlan_v2_msg(adapter);
- if (err == -EOPNOTSUPP) {
- /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
- * go directly to finishing initialization
- */
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
- return;
- } else if (err) {
- dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
+ if (err) {
+ dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
err);
goto err_alloc;
}
-
- /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
- * state accordingly
+ /* Some features require additional messages to negotiate extended
+ * capabilities. These are processed in sequence by the
+ * __IAVF_INIT_EXTENDED_CAPS driver state.
*/
- iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ adapter->extended_caps = IAVF_EXTENDED_CAPS;
+
+ iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
return;
err_alloc:
@@ -2219,35 +2479,93 @@ err:
}
/**
- * iavf_init_get_offload_vlan_v2_caps - part of driver startup
+ * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
* @adapter: board private structure
*
- * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
- * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
- * not negotiated, then this state will never be entered.
+ * Function processes send of the extended VLAN V2 capability message to the
+ * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
+ * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ */
+static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
+
+ ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret && ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
+ * we did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
+}
+
+/**
+ * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the extended VLAN V2 capability message from
+ * the PF.
**/
-static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
{
int ret;
- WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
ret = iavf_get_vf_vlan_v2_caps(adapter);
- if (ret) {
- if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
- iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret)
goto err;
- }
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ /* We've processed receipt of the VLAN V2 caps message */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
return;
err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
+ * iavf_init_process_extended_caps - Part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
+ * handles negotiating capabilities for features which require an additional
+ * message.
+ *
+ * Once all extended capabilities exchanges are finished, the driver will
+ * transition into __IAVF_INIT_CONFIG_ADAPTER.
+ */
+static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
+{
+ WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
+
+ /* Process capability exchange for VLAN V2 */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
+ iavf_init_send_offload_vlan_v2_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
+ iavf_init_recv_offload_vlan_v2_caps(adapter);
+ return;
+ }
+
+ /* When we reach here, no further extended capabilities exchanges are
+ * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+}
+
+/**
* iavf_init_config_adapter - last part of driver startup
* @adapter: board private structure
*
@@ -2287,6 +2605,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
+ adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
+
adapter->tx_desc_count = IAVF_DEFAULT_TXD;
adapter->rx_desc_count = IAVF_DEFAULT_RXD;
err = iavf_init_interrupt_scheme(adapter);
@@ -2374,17 +2694,22 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock))
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
goto restart_watchdog;
+ }
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
- if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
- adapter->state != __IAVF_RESETTING) {
- iavf_change_state(adapter, __IAVF_RESETTING);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+ queue_work(iavf_wq, &adapter->reset_task);
+ return;
}
switch (adapter->state) {
@@ -2406,8 +2731,8 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
- case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
- iavf_init_get_offload_vlan_v2_caps(adapter);
+ case __IAVF_INIT_EXTENDED_CAPS:
+ iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
@@ -2419,6 +2744,15 @@ static void iavf_watchdog_task(struct work_struct *work)
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Do not update the state and do not reschedule
+ * watchdog task, iavf_remove should handle this state
+ * as it can loop forever
+ */
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
@@ -2435,6 +2769,17 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Set state to __IAVF_INIT_FAILED and perform remove
+ * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+ * doesn't bring the state back to __IAVF_COMM_FAILED.
+ */
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2507,7 +2852,8 @@ static void iavf_watchdog_task(struct work_struct *work)
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state >= __IAVF_DOWN)
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
@@ -2515,6 +2861,13 @@ restart_watchdog:
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
}
+/**
+ * iavf_disable_vf - disable VF
+ * @adapter: board private structure
+ *
+ * Set communication failed flag and free all resources.
+ * NOTE: This function is expected to be called with crit_lock being held.
+ **/
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
@@ -2569,7 +2922,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
- mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -2594,20 +2946,26 @@ static void iavf_reset_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf;
+ enum iavf_status status;
u32 reg_val;
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (mutex_is_locked(&adapter->remove_lock))
- return;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(iavf_wq, &adapter->reset_task);
- if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
- schedule_work(&adapter->reset_task);
- return;
+ goto reset_finish;
}
+
while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
@@ -2662,6 +3020,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2670,13 +3029,10 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __IAVF_RUNNING) ||
- (adapter->state == __IAVF_RESETTING));
+ running = adapter->state == __IAVF_RUNNING;
if (running) {
- netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -2695,13 +3051,16 @@ continue_reset:
/* kill and reinit the admin queue */
iavf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- err = iavf_init_adminq(hw);
- if (err)
+ status = iavf_init_adminq(hw);
+ if (status) {
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
- err);
+ status);
+ goto reset_err;
+ }
adapter->aq_required = 0;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_reinit_interrupt_scheme(adapter);
if (err)
goto reset_err;
@@ -2757,6 +3116,9 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
+ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+ bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
@@ -2773,12 +3135,13 @@ continue_reset:
if (err)
goto reset_err;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto reset_err;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
}
iavf_configure(adapter);
@@ -2787,25 +3150,33 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
- netdev->flags |= IFF_UP;
+
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
+
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
- mutex_unlock(&adapter->client_lock);
- mutex_unlock(&adapter->crit_lock);
if (running) {
- iavf_change_state(adapter, __IAVF_RUNNING);
- netdev->flags |= IFF_UP;
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
}
+ iavf_disable_vf(adapter);
+
+ mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
@@ -2826,13 +3197,19 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ queue_work(iavf_wq, &adapter->adminq_task);
+ goto out;
+ }
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
- if (iavf_lock_timeout(&adapter->crit_lock, 200))
- goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2848,6 +3225,24 @@ static void iavf_adminq_task(struct work_struct *work)
} while (pending);
mutex_unlock(&adapter->crit_lock);
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+ if (adapter->netdev_registered ||
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features
+ (adapter, 0, netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+ }
+
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
@@ -3100,6 +3495,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -3114,12 +3510,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
@@ -3186,6 +3602,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
netif_tx_disable(netdev);
iavf_del_all_cloud_filters(adapter);
adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
+ total_qps = adapter->orig_num_active_queues;
goto exit;
} else {
return -EINVAL;
@@ -3229,7 +3646,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.ch_info[i].offset = 0;
}
}
+
+ /* Take snapshot of original config such as "num_active_queues"
+ * It is used later when delete ADQ flow is exercised, so that
+ * once delete ADQ flow completes, VF shall go back to its
+ * original queue configuration
+ */
+
+ adapter->orig_num_active_queues = adapter->num_active_queues;
+
+ /* Store queue info based on TC so that VF gets configured
+ * with correct number of queues when VF completes ADQ config
+ * flow
+ */
adapter->ch_config.total_qps = total_qps;
+
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
@@ -3246,6 +3677,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
}
}
exit:
+ if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return 0;
+
+ netif_set_real_num_rx_queues(netdev, total_qps);
+ netif_set_real_num_tx_queues(netdev, total_qps);
+
return ret;
}
@@ -3522,6 +3959,29 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
}
/**
+ * iavf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct iavf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
* iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct flow_cls_offload
@@ -3552,6 +4012,15 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter->cookie = cls_flower->cookie;
+ /* bail out here if filter already exists */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if (iavf_find_cf(adapter, &cls_flower->cookie)) {
+ dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
+ err = -EEXIST;
+ goto spin_unlock;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
/* set the mask to all zeroes to begin with */
memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
/* start out with flow type and eth type IPv4 to begin with */
@@ -3570,6 +4039,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
adapter->num_cloud_filters++;
filter->add = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+spin_unlock:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
if (err)
@@ -3579,28 +4049,6 @@ err:
return err;
}
-/* iavf_find_cf - Find the cloud filter in the list
- * @adapter: Board private structure
- * @cookie: filter specific cookie
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * cloud_filter_list_lock.
- */
-static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
- unsigned long *cookie)
-{
- struct iavf_cloud_filter *filter = NULL;
-
- if (!cookie)
- return NULL;
-
- list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
- if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
- return filter;
- }
- return NULL;
-}
-
/**
* iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
@@ -3722,8 +4170,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
@@ -3798,17 +4255,42 @@ err_unlock:
static int iavf_close(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ u64 aq_to_restore;
int status;
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return 0;
+ mutex_lock(&adapter->crit_lock);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ if (adapter->state <= __IAVF_DOWN_PENDING) {
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+ }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
+ /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
+ * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
+ * deadlock with adminq_task() until iavf_close timeouts. We must send
+ * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
+ * disable queues possible for vf. Give only necessary flags to
+ * iavf_down and save other to set them right before iavf_close()
+ * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
+ * iavf will be in DOWN state.
+ */
+ aq_to_restore = adapter->aq_required;
+ adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
+
+ /* Remove flags which we do not want to send after close or we want to
+ * send before disable queues.
+ */
+ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
+ IAVF_FLAG_AQ_ENABLE_QUEUES |
+ IAVF_FLAG_AQ_CONFIGURE_QUEUES |
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER |
+ IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
+ IAVF_FLAG_AQ_ADD_FDIR_FILTER |
+ IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
@@ -3832,6 +4314,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+
+ mutex_lock(&adapter->crit_lock);
+ adapter->aq_required |= aq_to_restore;
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -3853,8 +4339,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
iavf_notify_client_l2_params(&adapter->vsi);
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ queue_work(iavf_wq, &adapter->reset_task);
+ }
return 0;
}
@@ -3933,7 +4422,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -4368,12 +4857,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_regions(pdev, iavf_driver_name);
@@ -4431,7 +4917,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mutex_init(&adapter->crit_lock);
mutex_init(&adapter->client_lock);
- mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -4456,6 +4941,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
+ /* Setup the wait queue for indicating virtchannel events */
+ init_waitqueue_head(&adapter->vc_waitqueue);
+
return 0;
err_ioremap:
@@ -4547,7 +5035,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- enum iavf_state_t prev_state = adapter->last_state;
struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
@@ -4556,14 +5043,37 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
int err;
- /* Indicate we are in remove and not to run reset_task */
- mutex_lock(&adapter->remove_lock);
- cancel_work_sync(&adapter->reset_task);
+
+ /* When reboot/shutdown is in progress no need to do anything
+ * as the adapter is already REMOVE state that was set during
+ * iavf_shutdown() callback.
+ */
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+ /* Wait until port initialization is complete.
+ * There are flows where register/unregister netdev may race.
+ */
+ while (1) {
+ mutex_lock(&adapter->crit_lock);
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_DOWN ||
+ adapter->state == __IAVF_INIT_FAILED) {
+ mutex_unlock(&adapter->crit_lock);
+ break;
+ }
+
+ mutex_unlock(&adapter->crit_lock);
+ usleep_range(500, 1000);
+ }
cancel_delayed_work_sync(&adapter->watchdog_task);
- cancel_delayed_work_sync(&adapter->client_task);
+
if (adapter->netdev_registered) {
- unregister_netdev(netdev);
+ rtnl_lock();
+ unregister_netdevice(netdev);
adapter->netdev_registered = false;
+ rtnl_unlock();
}
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_del_device(adapter);
@@ -4572,6 +5082,10 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
+ mutex_lock(&adapter->crit_lock);
+ dev_info(&adapter->pdev->dev, "Remove device\n");
+ iavf_change_state(adapter, __IAVF_REMOVE);
+
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -4579,37 +5093,24 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- dev_info(&adapter->pdev->dev, "Removing device\n");
+ iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
- iavf_change_state(adapter, __IAVF_REMOVE);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->adminq_task);
+ cancel_delayed_work_sync(&adapter->client_task);
+
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
- iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
- /* In case we enter iavf_remove from erroneous state, free traffic irqs
- * here, so as to not cause a kernel crash, when calling
- * iavf_reset_interrupt_capability.
- */
- if ((adapter->last_state == __IAVF_RESETTING &&
- prev_state != __IAVF_DOWN) ||
- (adapter->last_state == __IAVF_RUNNING &&
- !(netdev->flags & IFF_UP)))
- iavf_free_traffic_irqs(adapter);
-
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- cancel_delayed_work_sync(&adapter->watchdog_task);
-
- cancel_work_sync(&adapter->adminq_task);
-
iavf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -4621,8 +5122,6 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
- mutex_unlock(&adapter->remove_lock);
- mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
@@ -4692,8 +5191,6 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
@@ -4704,8 +5201,7 @@ static int __init iavf_init_module(void)
pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
return -ENOMEM;
}
- ret = pci_register_driver(&iavf_driver);
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46e3d1f6b604..2ea5c7c339bc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -18,7 +18,7 @@ enum iavf_status {
IAVF_ERR_ADAPTER_STOPPED = -9,
IAVF_ERR_INVALID_MAC_ADDR = -10,
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
- IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12,
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
IAVF_ERR_RESET_FAILED = -15,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 8cbe7ad1347c..18b6a702a1d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -194,7 +197,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int budget = vsi->work_limit;
+ unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = IAVF_TX_DESC(tx_ring, i);
@@ -374,29 +377,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
return &q_vector->rx == rc;
}
-static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
+#define IAVF_AIM_MULTIPLIER_100G 2560
+#define IAVF_AIM_MULTIPLIER_50G 1280
+#define IAVF_AIM_MULTIPLIER_40G 1024
+#define IAVF_AIM_MULTIPLIER_20G 512
+#define IAVF_AIM_MULTIPLIER_10G 256
+#define IAVF_AIM_MULTIPLIER_1G 32
+
+static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
{
- unsigned int divisor;
+ switch (speed_mbps) {
+ case SPEED_100000:
+ return IAVF_AIM_MULTIPLIER_100G;
+ case SPEED_50000:
+ return IAVF_AIM_MULTIPLIER_50G;
+ case SPEED_40000:
+ return IAVF_AIM_MULTIPLIER_40G;
+ case SPEED_25000:
+ case SPEED_20000:
+ return IAVF_AIM_MULTIPLIER_20G;
+ case SPEED_10000:
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
+ case SPEED_1000:
+ case SPEED_100:
+ return IAVF_AIM_MULTIPLIER_1G;
+ }
+}
- switch (q_vector->adapter->link_speed) {
+static unsigned int
+iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
+{
+ switch (speed_virtchnl) {
case VIRTCHNL_LINK_SPEED_40GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
- break;
+ return IAVF_AIM_MULTIPLIER_40G;
case VIRTCHNL_LINK_SPEED_25GB:
case VIRTCHNL_LINK_SPEED_20GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
- break;
- default:
+ return IAVF_AIM_MULTIPLIER_20G;
case VIRTCHNL_LINK_SPEED_10GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
- break;
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
case VIRTCHNL_LINK_SPEED_1GB:
case VIRTCHNL_LINK_SPEED_100MB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
- break;
+ return IAVF_AIM_MULTIPLIER_1G;
}
+}
- return divisor;
+static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
+ else
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_virtchnl_itr_multiplier(adapter->link_speed);
}
/**
@@ -586,8 +620,9 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
- IAVF_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size,
+ iavf_itr_divisor(q_vector->adapter)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= IAVF_ITR_ADAPTIVE_LATENCY;
@@ -1253,11 +1288,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1359,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1517,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5ee1d118fd30..24a701fd140e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -5,10 +5,6 @@
#include "iavf_prototype.h"
#include "iavf_client.h"
-/* busy wait delay in msec */
-#define IAVF_BUSY_WAIT_DELAY 10
-#define IAVF_BUSY_WAIT_COUNT 50
-
/**
* iavf_send_pf_msg
* @adapter: adapter structure
@@ -22,17 +18,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- enum iavf_status err;
+ enum iavf_status status;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
- err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
- if (err)
- dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
- op, iavf_stat_str(hw, err),
+ status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (status)
+ dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
+ op, iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return err;
+ return iavf_status_to_errno(status);
}
/**
@@ -55,6 +51,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
}
/**
+ * iavf_poll_virtchnl_msg
+ * @hw: HW configuration structure
+ * @event: event to populate on success
+ * @op_to_poll: requested virtchnl op to poll for
+ *
+ * Initialize poll for virtchnl msg matching the requested_op. Returns 0
+ * if a message of the correct opcode is in the queue or an error code
+ * if no message matching the op code is waiting and other failures.
+ */
+static int
+iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
+ enum virtchnl_ops op_to_poll)
+{
+ enum virtchnl_ops received_op;
+ enum iavf_status status;
+ u32 v_retval;
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ status = iavf_clean_arq_element(hw, event, NULL);
+ if (status != IAVF_SUCCESS)
+ return iavf_status_to_errno(status);
+ received_op =
+ (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+ if (op_to_poll == received_op)
+ break;
+ }
+
+ v_retval = le32_to_cpu(event->desc.cookie_low);
+ return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
+}
+
+/**
* iavf_verify_api_ver
* @adapter: adapter structure
*
@@ -65,55 +96,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
- struct virtchnl_version_info *pf_vvi;
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- err = iavf_clean_arq_element(hw, &event, NULL);
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_VERSION)
- break;
- }
+ event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
+ if (!err) {
+ struct virtchnl_version_info *pf_vvi =
+ (struct virtchnl_version_info *)event.msg_buf;
+ adapter->pf_version = *pf_vvi;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
-
- if (op != VIRTCHNL_OP_VERSION) {
- dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- op);
- err = -EIO;
- goto out_alloc;
+ if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
+ (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
+ pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
}
- pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
- adapter->pf_version = *pf_vvi;
-
- if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
- err = -EIO;
-
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -208,33 +212,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
u16 len;
+ int err;
- len = sizeof(struct virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
- break;
- }
-
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -243,48 +231,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
if (!err)
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(hw, adapter->vf_res);
-out_alloc:
+
kfree(event.msg_buf);
-out:
+
return err;
}
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
{
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
u16 len;
- len = sizeof(struct virtchnl_vlan_caps);
+ len = sizeof(struct virtchnl_vlan_caps);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
- break;
- }
-
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
+ if (!err)
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf,
+ min(event.msg_len, len));
- memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -297,11 +269,14 @@ out:
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
@@ -454,6 +429,20 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_mac_addr_type - Set the correct request type from the filter type
+ * @virtchnl_ether_addr: pointer to requested list element
+ * @filter: pointer to requested filter
+ **/
+static void
+iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
+ const struct iavf_mac_filter *filter)
+{
+ virtchnl_ether_addr->type = filter->is_primary ?
+ VIRTCHNL_ETHER_ADDR_PRIMARY :
+ VIRTCHNL_ETHER_ADDR_EXTRA;
+}
+
+/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
@@ -508,6 +497,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
f->add = false;
if (i == count)
@@ -577,6 +567,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
list_del(&f->list);
kfree(f);
@@ -606,6 +597,8 @@ static void iavf_mac_add_ok(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -626,6 +619,9 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
+
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
@@ -635,6 +631,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
}
/**
+ * iavf_vlan_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove VLAN filters from list based on PF response.
+ **/
+static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+{
+ struct iavf_vlan_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ if (f->vlan.tpid == ETH_P_8021Q)
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -691,6 +714,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan.vid;
i++;
f->add = false;
+ f->is_new_vlan = true;
if (i == count)
break;
}
@@ -703,10 +727,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
} else {
+ u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
+ u16 current_vlans = iavf_get_num_vlans_added(adapter);
struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+ if ((count + current_vlans) > max_vlans &&
+ current_vlans < max_vlans) {
+ count = max_vlans - iavf_get_num_vlans_added(adapter);
+ more = true;
+ }
+
len = sizeof(*vvfl_v2) + ((count - 1) *
sizeof(struct virtchnl_vlan_filter));
if (len > IAVF_MAX_AQ_BUF_SIZE) {
@@ -733,6 +765,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
+ if (i == count)
+ break;
+
/* give priority over outer if it's enabled */
if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer;
@@ -744,8 +779,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
i++;
f->add = false;
- if (i == count)
- break;
+ f->is_new_vlan = true;
}
}
@@ -1827,11 +1861,29 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
*
* Request that the PF reset this VF. No response is expected.
**/
-void iavf_request_reset(struct iavf_adapter *adapter)
+int iavf_request_reset(struct iavf_adapter *adapter)
{
+ int err;
/* Don't check CURRENT_OP - this is always higher priority */
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ return err;
+}
+
+/**
+ * iavf_netdev_features_vlan_strip_set - update vlan strip status
+ * @netdev: ptr to netdev being adjusted
+ * @enable: enable or disable vlan strip
+ *
+ * Helper function to change vlan strip status in netdev->features.
+ */
+static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ const bool enable)
+{
+ if (enable)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/**
@@ -1922,6 +1974,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
@@ -2057,8 +2110,23 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be enabled by ethtool.
+ * Disable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be disabled by ethtool.
+ * Enable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ iavf_vlan_add_reject(adapter);
+ dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
@@ -2071,7 +2139,13 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ if (!ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr)) {
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+ }
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -2101,10 +2175,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
+ netif_addr_lock_bh(netdev);
/* refresh current mac address if changed */
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
@@ -2140,35 +2215,57 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
fallthrough;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ struct iavf_mac_filter *f;
+ bool was_mac_changed;
+ u64 aq_required = 0;
+
if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
memcpy(&adapter->vlan_v2_caps, msg,
min_t(u16, msglen,
sizeof(adapter->vlan_v2_caps)));
iavf_process_config(adapter);
+ adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ was_mac_changed = !ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr);
- /* unlock crit_lock before acquiring rtnl_lock as other
- * processes holding rtnl_lock could be waiting for the same
- * crit_lock
- */
- mutex_unlock(&adapter->crit_lock);
- /* VLAN capabilities can change during VFR, so make sure to
- * update the netdev features with the new capabilities
- */
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- if (iavf_lock_timeout(&adapter->crit_lock, 10000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
- __FUNCTION__);
-
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
- iavf_set_queue_vlan_tag_loc(adapter);
+ /* re-add all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (was_mac_changed &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ ether_addr_copy(f->macaddr,
+ adapter->hw.mac.addr);
+ f->is_new_mac = true;
+ f->add = true;
+ f->add_handled = false;
+ f->remove = false;
+ }
+
+ /* re-add all VLAN filters */
+ if (VLAN_FILTERING_ALLOWED(adapter)) {
+ struct iavf_vlan_filter *vlf;
+
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->add = true;
+
+ aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ aq_required;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -2334,6 +2431,40 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2: {
+ struct iavf_vlan_filter *f;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ f->is_new_vlan = false;
+ if (!f->vlan.vid)
+ continue;
+ if (f->vlan.tpid == ETH_P_8021Q)
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ /* PF enabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ /* PF disabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index c36faa7d1471..9183d480b70b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -18,8 +18,12 @@ ice-y := ice_main.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
+ ice_pf_vsi_vlan_ops.o \
+ ice_vsi_vlan_ops.o \
+ ice_vsi_vlan_lib.o \
ice_fdir.o \
ice_ethtool_fdir.o \
+ ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
@@ -29,9 +33,16 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PCI_IOV) += \
+ ice_sriov.o \
+ ice_virtchnl.o \
+ ice_virtchnl_allowlist.o \
+ ice_virtchnl_fdir.o \
+ ice_vf_mbx.o \
+ ice_vf_vsi_vlan_ops.o \
+ ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_TTY) += ice_gnss.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4e16d185077d..001500afc4a6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -51,9 +51,8 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
-#if IS_ENABLED(CONFIG_DCB)
-#include <scsi/iscsi_proto.h>
-#endif /* CONFIG_DCB */
+#include <net/gtp.h>
+#include <linux/ppp_defs.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -63,8 +62,8 @@
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
-#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_vf_mbx.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
@@ -72,6 +71,8 @@
#include "ice_repr.h"
#include "ice_eswitch.h"
#include "ice_lag.h"
+#include "ice_vsi_vlan_ops.h"
+#include "ice_gnss.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -107,7 +108,6 @@
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
-#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
@@ -182,7 +182,9 @@
enum ice_feature {
ICE_F_DSCP,
+ ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
+ ICE_F_GNSS,
ICE_F_MAX
};
@@ -246,8 +248,6 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
- struct ice_vsi *dflt_vsi; /* default VSI for this switch */
- u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_pf_state {
@@ -280,7 +280,6 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
- ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
@@ -291,6 +290,7 @@ enum ice_pf_state {
ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_PHY_INIT_COMPLETE,
ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
+ ICE_AUX_ERR_PENDING,
ICE_STATE_NBITS /* must be last */
};
@@ -301,7 +301,6 @@ enum ice_vsi_state {
ICE_VSI_NETDEV_REGISTERED,
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
- ICE_VSI_VLAN_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
ICE_VSI_STATE_NBITS /* must be last */
};
@@ -331,7 +330,7 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- s16 vf_id; /* VF ID for SR-IOV VSIs */
+ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
@@ -368,6 +367,8 @@ struct ice_vsi {
u8 irqs_ready:1;
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
+ struct ice_vsi_vlan_ops inner_vlan_ops;
+ struct ice_vsi_vlan_ops outer_vlan_ops;
u16 num_vlan;
/* queue information */
@@ -468,7 +469,6 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
- ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -482,7 +482,11 @@ enum ice_pf_flags {
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_MTU_CHANGED,
+ ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -523,15 +527,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
- /* Virtchnl/SR-IOV config info */
- struct ice_vf *vf;
- u16 num_alloc_vfs; /* actual number of VFs allocated */
- u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_qps_per_vf;
- u16 num_msix_per_vf;
- /* used to ratelimit the MDD event logging */
- unsigned long last_printed_mdd_jiffies;
- DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
@@ -544,8 +540,12 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
+ struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
+ struct tty_driver *ice_gnss_tty_driver;
+ struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
+ struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -558,6 +558,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
@@ -671,7 +672,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
@@ -683,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -698,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->num_xdp_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
@@ -758,6 +769,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
+/**
* ice_is_switchdev_running - check if switchdev is configured
* @pf: pointer to PF structure
*
@@ -833,8 +859,12 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
@@ -886,8 +916,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
@@ -897,8 +926,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- ice_unplug_aux_dev(pf);
+ /* We can directly unplug aux device here only if the flag bit
+ * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+ * could race with ice_plug_aux_dev() called from
+ * ice_service_task(). In this case we only clear that bit now and
+ * aux device will be unplugged later once ice_plug_aux_device()
+ * called from ice_service_task() finishes (see ice_service_task()).
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ad1dcfa5ff65..1bdc70aa979d 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -226,6 +226,15 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+ __le16 swid;
+ u8 reserved[10];
+};
+
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -283,6 +292,40 @@ struct ice_aqc_alloc_free_res_elem {
struct ice_aqc_res_elem elem[];
};
+/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */
+struct ice_aqc_set_vlan_mode {
+ u8 reserved;
+ u8 l2tag_prio_tagging;
+#define ICE_AQ_VLAN_PRIO_TAG_S 0
+#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S)
+#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0
+#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3
+#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7
+ u8 l2tag_reserved[64];
+ u8 rdma_packet;
+#define ICE_AQ_VLAN_RDMA_TAG_S 0
+#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S)
+#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10
+#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A
+ u8 rdma_reserved[2];
+ u8 mng_vlan_prot_id;
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11
+ u8 prot_id_reserved[30];
+};
+
+/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */
+struct ice_aqc_get_vlan_mode {
+ u8 vlan_mode;
+#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0)
+ u8 l2tag_prio_tagging;
+ u8 reserved[98];
+};
+
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@@ -343,108 +386,113 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
u8 sw_flags2;
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
-#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
- (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
-#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
/* security section */
u8 sec_flags;
#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- u8 pvlan_reserved[2];
- u8 vlan_flags;
-#define ICE_AQ_VSI_VLAN_MODE_S 0
-#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
-#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
-#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_VLAN_EMOD_S 3
-#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
- u8 pvlan_reserved2[3];
+ __le16 port_based_inner_vlan; /* VLANS include priority bits */
+ u8 inner_vlan_reserved[2];
+ u8 inner_vlan_flags;
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+ u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
-#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
-#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
-#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
-#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
-#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
-#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
-#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
-#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
-#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
-#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
-#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
-#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
-#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
-#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
-#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
-#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
__le32 egress_table; /* same defines as for ingress table */
/* outer tags section */
- __le16 outer_tag;
- u8 outer_tag_flags;
-#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
-#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
-#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
-#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
-#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
-#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
-#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
- u8 outer_tag_reserved;
+ __le16 port_based_outer_vlan;
+ u8 outer_vlan_flags;
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7)
+ u8 outer_vlan_reserved;
/* queue mapping section */
__le16 mapping_flags;
-#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
-#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
__le16 q_mapping[16];
-#define ICE_AQ_VSI_Q_S 0
-#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
__le16 tc_mapping[8];
-#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
-#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
-#define ICE_AQ_VSI_TC_Q_NUM_S 11
-#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
/* queueing option section */
u8 q_opt_rss;
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
u8 q_opt_tc;
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
-#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
u8 q_opt_flags;
-#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
u8 q_opt_reserved[3];
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
@@ -452,27 +500,27 @@ struct ice_aqc_vsi_props {
__le16 sect_10_reserved;
/* flow director section */
__le16 fd_options;
-#define ICE_AQ_VSI_FD_ENABLE BIT(0)
-#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
-#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
__le16 max_fd_fltr_dedicated;
__le16 max_fd_fltr_shared;
__le16 fd_def_q;
-#define ICE_AQ_VSI_FD_DEF_Q_S 0
-#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
-#define ICE_AQ_VSI_FD_DEF_GRP_S 12
-#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
__le16 fd_report_opt;
-#define ICE_AQ_VSI_FD_REPORT_Q_S 0
-#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
-#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
/* PASID section */
__le32 pasid_id;
-#define ICE_AQ_VSI_PASID_ID_S 0
-#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
-#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
u8 reserved[24];
};
@@ -489,9 +537,13 @@ struct ice_aqc_add_get_recipe {
struct ice_aqc_recipe_content {
u8 rid;
+#define ICE_AQ_RECIPE_ID_S 0
+#define ICE_AQ_RECIPE_ID_M (0x3F << ICE_AQ_RECIPE_ID_S)
#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
#define ICE_AQ_SW_ID_LKUP_IDX 0
u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_DATA_S 0
+#define ICE_AQ_RECIPE_LKUP_DATA_M (0x3F << ICE_AQ_RECIPE_LKUP_DATA_S)
#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
__le16 mask[5];
@@ -502,15 +554,25 @@ struct ice_aqc_recipe_content {
u8 rsvd0[3];
u8 act_ctrl_join_priority;
u8 act_ctrl_fwd_priority;
+#define ICE_AQ_RECIPE_FWD_PRIORITY_S 0
+#define ICE_AQ_RECIPE_FWD_PRIORITY_M (0xF << ICE_AQ_RECIPE_FWD_PRIORITY_S)
u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_NEED_PASS_L2 BIT(0)
+#define ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2 BIT(1)
#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_S 4
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_M (0x3 << ICE_AQ_RECIPE_ACT_PRUNE_INDX_S)
u8 rsvd1;
__le32 dflt_act;
+#define ICE_AQ_RECIPE_DFLT_ACT_S 0
+#define ICE_AQ_RECIPE_DFLT_ACT_M (0x7FFFF << ICE_AQ_RECIPE_DFLT_ACT_S)
+#define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31)
};
struct ice_aqc_recipe_data_elem {
u8 recipe_indx;
u8 resp_bits;
+#define ICE_AQ_RECIPE_WAS_UPDATED BIT(0)
u8 rsvd0[2];
u8 recipe_bitmap[8];
u8 rsvd1[4];
@@ -539,12 +601,30 @@ struct ice_aqc_sw_rules {
__le32 addr_low;
};
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem_hdr {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+} __packed __aligned(sizeof(__le16));
+
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
struct ice_sw_rule_lkup_rx_tx {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 recipe_id;
#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
/* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
@@ -621,14 +701,16 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[];
-};
+ u8 hdr_data[];
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the action for Update/Get/Remove commands.
*/
struct ice_sw_rule_lg_act {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index in large action table */
__le16 size;
/* Max number of large actions */
@@ -682,45 +764,19 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
__le32 act[]; /* array of size for actions */
-};
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove VSI list command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the VSI list for Update/Get/Remove commands.
*/
struct ice_sw_rule_vsi_list {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
__le16 vsi[]; /* Array of number_vsi VSI numbers */
-};
-
-/* Query VSI list command/response entry */
-struct ice_sw_rule_vsi_list_query {
- __le16 index;
- DECLARE_BITMAP(vsi_list, ICE_MAX_VSI);
-} __packed;
-
-/* Add switch rule response:
- * Content of return buffer is same as the input buffer. The status field and
- * LUT index are updated as part of the response
- */
-struct ice_aqc_sw_rules_elem {
- __le16 type; /* Switch rule type, one of T_... */
-#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
-#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
-#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
-#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
-#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
- __le16 status;
- union {
- struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
- struct ice_sw_rule_lg_act lg_act;
- struct ice_sw_rule_vsi_list vsi_list;
- struct ice_sw_rule_vsi_list_query vsi_list_query;
- } __packed pdata;
-};
+} __packed __aligned(sizeof(__le16));
/* Query PFC Mode (direct 0x0302)
* Set PFC Mode (direct 0x0303)
@@ -1339,6 +1395,24 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_M GENMASK(3, 0)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1349,6 +1423,56 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX 16
+
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+
+ u8 pending_port_option_status;
+#define ICE_AQC_PENDING_PORT_OPT_IDX_M GENMASK(3, 0)
+#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
+
+ u8 rsvd[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M GENMASK(3, 0)
+
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+
+ u8 global_scid[2];
+ u8 phy_scid[2];
+ u8 pf2port_cid[2];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
struct ice_aqc_gpio {
__le16 gpio_ctrl_handle;
@@ -1415,6 +1539,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -1883,7 +2013,7 @@ struct ice_aqc_get_clear_fw_log {
};
/* Download Package (indirect 0x0C40) */
-/* Also used for Update Package (indirect 0x0C42) */
+/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@@ -2008,7 +2138,10 @@ struct ice_aq_desc {
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_add_get_recipe add_get_recipe;
struct ice_aqc_recipe_to_profile recipe_to_profile;
@@ -2049,6 +2182,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_i2c read_write_i2c;
+ struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2110,10 +2245,13 @@ enum ice_adminq_opc {
/* internal switch commands */
ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
+ ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
/* VSI commands */
ice_aqc_opc_add_vsi = 0x0210,
@@ -2160,7 +2298,11 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
@@ -2204,6 +2346,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 5daade32ea62..fba178e07600 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
- if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ if (!vsi || vsi->type != ICE_VSI_PF)
return;
netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ return 0;
pf = vsi->back;
netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi)
return;
- ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi);
}
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return;
ice_remove_arfs(pf);
- if (ice_set_cpu_rx_rmap(pf_vsi)) {
- dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
- return;
- }
ice_init_arfs(pf_vsi);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 80ed76f0cace..9669ad9bf7b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -3,6 +3,9 @@
#ifndef _ICE_ARFS_H_
#define _ICE_ARFS_H_
+
+#include "ice_fdir.h"
+
enum ice_arfs_fltr_state {
ICE_ARFS_INACTIVE,
ICE_ARFS_ACTIVE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1a5ece3bce79..e864634d66bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -5,18 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
-
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
-{
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
- return !!rx_ring->xdp_buf;
-}
-
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
-{
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- return !!rx_ring->rx_buf;
-}
+#include "ice_sriov.h"
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
@@ -141,8 +130,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
out:
/* tie q_vector and VSI together */
@@ -322,7 +310,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
@@ -416,10 +404,24 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
+ rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
+ * and it needs to remain 1 for non-DVM capable configurations to not
+ * break backward compatibility for VF drivers. Setting this field to 0
+ * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
+ * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
+ * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
+ * check for the tag
+ */
+ if (ice_is_dvm_ena(hw))
+ if (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_port_vlan_ena(vsi->vf))
+ rlan_ctx.l2tsel = 1;
+ else
+ rlan_ctx.l2tsel = 0;
+ else
+ rlan_ctx.l2tsel = 1;
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
@@ -504,11 +506,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
- kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- if (!ice_alloc_rx_buf_zc(ring))
- return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -523,8 +522,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!ice_alloc_rx_buf(ring))
- return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
@@ -961,7 +958,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* associated to the queue to schedule NAPI handler
*/
q_vector = ring->q_vector;
- if (q_vector)
+ if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
ice_trigger_sw_intr(hw, q_vector);
status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 408d15a5b0e3..039342a0ed15 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -8,6 +8,108 @@
#define ICE_PF_RESET_WAIT_COUNT 300
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
+
+/**
+ * ice_dump_phy_type - helper function to dump phy_type
+ * @hw: pointer to the HW structure
+ * @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
+ * @prefix: prefix string to differentiate multiple dumps
+ */
+static void
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
+{
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
+}
+
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -80,9 +182,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T6:
+ case ICE_SUBDEV_ID_E810T7:
return true;
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T5:
+ return true;
+ }
break;
default:
break;
@@ -183,6 +299,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
+ const char *prefix;
struct ice_hw *hw;
int status;
@@ -204,29 +321,48 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
- ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
- report_mode);
- ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
- ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
+
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
+ prefix = "phy_caps_media";
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
+ prefix = "phy_caps_no_media";
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
+ prefix = "phy_caps_active";
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
+ prefix = "phy_caps_default";
+ break;
+ default:
+ prefix = "phy_caps_invalid";
+ }
+
+ ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
+ le64_to_cpu(pcaps->phy_type_high), prefix);
+
+ ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
+ prefix, report_mode);
+ ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
+ pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
- ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
- pcaps->module_compliance_enforcement);
- ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
- pcaps->extended_compliance_code);
- ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
+ prefix, pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
+ prefix, pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@@ -1518,16 +1654,27 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
- * Package, Get Version, Get Package Info List and Release Resource
- * (with resource ID set to Global Config Lock) AdminQ commands are
- * allowed; all others must block until the package download completes
- * and the Global Config Lock is released. See also
- * ice_acquire_global_cfg_lock().
+ * Package, Get Version, Get Package Info List, Upload Section,
+ * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
+ * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
+ * Recipes to Profile Association, and Release Resource (with resource
+ * ID set to Global Config Lock) AdminQ commands are allowed; all others
+ * must block until the package download completes and the Global Config
+ * Lock is released. See also ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
+ case ice_aqc_opc_upload_section:
+ case ice_aqc_opc_update_pkg:
+ case ice_aqc_opc_set_port_params:
+ case ice_aqc_opc_get_vlan_mode_parameters:
+ case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_add_recipe:
+ case ice_aqc_opc_recipe_to_profile:
+ case ice_aqc_opc_get_recipe:
+ case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
@@ -2386,6 +2533,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2403,6 +2552,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2737,6 +2888,54 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = cpu_to_le16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_is_100m_speed_supported
+ * @hw: pointer to the HW struct
+ *
+ * returns true if 100M speeds are supported by the device,
+ * false otherwise.
+ */
+bool ice_is_100m_speed_supported(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -3340,9 +3539,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
- struct ice_link_default_override_tlv tlv;
+ struct ice_link_default_override_tlv tlv = { 0 };
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
@@ -3495,6 +3695,121 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_get_port_options
+ * @hw: pointer to the HW struct
+ * @options: buffer for the resultant port options
+ * @option_count: input - size of the buffer in port options structures,
+ * output - number of returned port options
+ * @lport: logical port to call the command with (optional)
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @active_option_idx: index of active port option in returned buffer
+ * @active_option_valid: active option in returned buffer is valid
+ * @pending_option_idx: index of pending port option in returned buffer
+ * @pending_option_valid: pending option in returned buffer is valid
+ *
+ * Calls Get Port Options AQC (0x06ea) and verifies result.
+ */
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid)
+{
+ struct ice_aqc_get_port_options *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 i;
+
+ /* options buffer shall be able to hold max returned options */
+ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.get_port_options;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+ cmd->lport_num_valid = lport_valid;
+
+ status = ice_aq_send_cmd(hw, &desc, options,
+ *option_count * sizeof(*options), NULL);
+ if (status)
+ return status;
+
+ /* verify direct FW response & set output parameters */
+ *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
+ cmd->port_options_count);
+ ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
+ *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
+ cmd->port_options);
+ if (*active_option_valid) {
+ *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
+ cmd->port_options);
+ if (*active_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
+ *active_option_idx);
+ }
+
+ *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
+ cmd->pending_port_option_status);
+ if (*pending_option_valid) {
+ *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
+ cmd->pending_port_option_status);
+ if (*pending_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
+ *pending_option_idx);
+ }
+
+ /* mask output options fields */
+ for (i = 0; i < *option_count; i++) {
+ options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
+ options[i].pmd);
+ options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
+ options[i].max_lane_speed);
+ ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
+ options[i].pmd, options[i].max_lane_speed);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_port_option
+ * @hw: pointer to the HW struct
+ * @lport: logical port to call the command with
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @new_option: new port option to be written
+ *
+ * Calls Set Port Options AQC (0x06eb).
+ */
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option)
+{
+ struct ice_aqc_set_port_option *cmd;
+ struct ice_aq_desc desc;
+
+ if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.set_port_option;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+
+ cmd->lport_num_valid = lport_valid;
+ cmd->selected_port_option = new_option;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4758,6 +5073,104 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_read_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type,
+ * bits [3:0] - data size to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read I2C (0x06E2)
+ */
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return -EINVAL;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ struct ice_aqc_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_write_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Write I2C (0x06E3)
+ *
+ * * Return:
+ * * 0 - Successful write to the i2c device
+ * * -EINVAL - Data size greater than 4 bytes
+ * * -EIO - FW error
+ */
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return -EINVAL;
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ memcpy(cmd->i2c_data, data, data_size);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
@@ -4891,20 +5304,22 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware API is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ } else if (hw->api_maj_ver > maj) {
return true;
}
@@ -4912,6 +5327,19 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
}
/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
+/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
* @pi: pointer to the port info struct
@@ -5041,16 +5469,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5087,14 +5508,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 1c57097ddf0b..8b6712b92e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -4,12 +4,14 @@
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
-#include "ice.h"
+#include <linux/bitfield.h>
+
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
-#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+#include "ice_switch.h"
+#include "ice_fdir.h"
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
@@ -85,6 +87,9 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd);
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
@@ -146,6 +151,15 @@ int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid);
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option);
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -199,11 +213,20 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
+bool ice_is_100m_speed_supported(struct ice_hw *hw);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d73348f279f7..6abf28a14291 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -5,6 +5,7 @@
#define _ICE_DCB_H_
#include "ice_type.h"
+#include <scsi/iscsi_proto.h>
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index b94d8daeaa58..add90e75f05c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -916,7 +916,8 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
+ first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
@@ -925,7 +926,10 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 61dd2f18dee8..6d560d1c74a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,6 +5,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
@@ -23,6 +24,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index a230edb38466..e6ec20079ced 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -9,6 +9,8 @@
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+static int ice_active_port_option = -1;
+
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -466,12 +468,259 @@ ice_devlink_reload_empr_finish(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
.reload_down = ice_devlink_reload_empr_start,
.reload_up = ice_devlink_reload_empr_finish,
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
@@ -647,6 +896,23 @@ void ice_devlink_unregister(struct ice_pf *pf)
devlink_unregister(priv_to_devlink(pf));
}
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
@@ -678,6 +944,39 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
}
/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
*
@@ -704,6 +1003,15 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
@@ -753,13 +1061,18 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
- vsi = ice_get_vf_vsi(vf);
devlink_port = &vf->devlink_port;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = pf->hw.bus.func;
attrs.pci_vf.vf = vf->vf_id;
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
@@ -789,6 +1102,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devlink_port_unregister(devlink_port);
}
+#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
@@ -815,8 +1130,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- void *nvm_data;
- u32 nvm_size;
+ u8 *nvm_data, *tmp, i;
+ u32 nvm_size, left;
+ s8 num_blks;
int status;
nvm_size = hw->flash.flash_size;
@@ -824,26 +1140,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
if (!nvm_data)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- vfree(nvm_data);
- return status;
- }
- status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
- if (status) {
- dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
- nvm_size, status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
+ tmp = nvm_data;
+ left = nvm_size;
+
+ /* Some systems take longer to read the NVM than others which causes the
+ * FW to reclaim the NVM lock before the entire NVM has been read. Fix
+ * this by breaking the reads of the NVM into smaller chunks that will
+ * probably not take as long. This has some overhead since we are
+ * increasing the number of AQ commands, but it should always work
+ */
+ for (i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, tmp, false);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ read_sz, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
ice_release_nvm(hw);
- vfree(nvm_data);
- return status;
- }
- ice_release_nvm(hw);
+ tmp += read_sz;
+ left -= read_sz;
+ }
*data = nvm_data;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 864692b157b6..f9f15acae90a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
+ rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
@@ -115,9 +116,12 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+ vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
+ if (vlan_ops->dis_stripping(ctrl_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -126,11 +130,11 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
__dev_mc_unsync(uplink_netdev, NULL);
netif_addr_unlock_bh(uplink_netdev);
- if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
- if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
rule_added = true;
}
@@ -147,7 +151,7 @@ err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -172,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id;
ice_for_each_txq(vsi, q_id) {
- struct ice_repr *repr = pf->vf[q_id].repr;
- struct ice_q_vector *q_vector = repr->q_vector;
- struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
- struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, q_id);
+ if (WARN_ON(!vf))
+ continue;
+
+ repr = vf->repr;
+ q_vector = repr->q_vector;
+ tx_ring = vsi->tx_rings[q_id];
+ rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
@@ -195,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
+
+ ice_put_vf(vf);
+ }
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
+
+ /* Skip VFs that aren't configured */
+ if (!vf->repr->dst)
+ continue;
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
}
}
@@ -206,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
@@ -227,14 +275,16 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
goto err;
}
- if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err;
}
@@ -242,14 +292,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
+ ice_napi_poll);
netif_keep_dst(vf->repr->netdev);
}
- ice_for_each_vf(pf, i) {
- struct ice_repr *repr = pf->vf[i].repr;
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_repr *repr = vf->repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
@@ -262,43 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- }
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
return -ENODEV;
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
- * @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
- */
-static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
-
- netif_napi_del(&vf->repr->q_vector->napi);
- }
-}
-
-/**
* ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured
*/
@@ -312,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf))
return;
- vf = &pf->vf[vsi->vf_id];
+ vf = vsi->vf;
repr = vf->repr;
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -320,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
+ vsi->vf->vf_id);
}
}
@@ -341,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
- if (ice_is_reset_in_progress(vsi->back->state))
+ if (ice_is_reset_in_progress(vsi->back->state) ||
+ test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev);
@@ -390,7 +411,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
@@ -404,7 +425,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
}
/**
@@ -413,10 +434,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*/
static void ice_eswitch_napi_del(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ netif_napi_del(&vf->repr->q_vector->napi);
}
/**
@@ -425,10 +449,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_enable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_enable(&vf->repr->q_vector->napi);
}
/**
@@ -437,10 +464,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_disable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_disable(&vf->repr->q_vector->napi);
}
/**
@@ -518,7 +548,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode)
return 0;
- if (pf->num_alloc_vfs) {
+ if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP;
@@ -609,16 +639,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_start_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_start_tx_queues(vf->repr);
}
}
@@ -628,16 +659,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_stop_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_stop_tx_queues(vf->repr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index bd58d9d2e565..6a413331572b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index e2e3ef7fba7f..b7be84bbe72d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -136,6 +136,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
+ ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
+ ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
+ ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
+ ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
+ ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
};
static const u32 ice_regs_dump_list[] = {
@@ -164,6 +169,7 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
+ ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -189,19 +195,17 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
+
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
__ice_get_drvinfo(netdev, drvinfo, np->vsi);
-
- strscpy(drvinfo->bus_info, pci_name(pf->pdev),
- sizeof(drvinfo->bus_info));
-
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -315,16 +319,20 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- unsigned int i;
+ bool active = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- return true;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ active = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return active;
}
/**
@@ -655,7 +663,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 &
- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
rx_buf = &rx_ring->rx_buf[i];
@@ -1280,21 +1289,26 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- ice_down(vsi);
- ice_up(vsi);
- }
+ ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
- ice_is_any_vf_in_promisc(pf)) {
+ ice_is_any_vf_in_unicast_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+
+ if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
+ ice_has_vfs(pf)) {
+ dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
+ ret = -EOPNOTSUPP;
+ }
ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
@@ -1456,20 +1470,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
/**
* ice_mask_min_supported_speeds
+ * @hw: pointer to the HW structure
* @phy_types_high: PHY type high
* @phy_types_low: PHY type low to apply minimum supported speeds mask
*
* Apply minimum supported speeds mask to PHY type low. These are the speeds
* for ethtool supported link mode.
*/
-static
-void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+static void
+ice_mask_min_supported_speeds(struct ice_hw *hw,
+ u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
- else
+ else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
@@ -1519,7 +1535,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
- ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+ ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
+ &phy_types_low);
/* determine advertised modes based on link override only
* if it's supported and if the FW doesn't abstract the
* driver from having to account for link overrides
@@ -2179,6 +2196,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
}
/**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+ u64 *phy_type_low, u64 *phy_type_high,
+ u16 adv_link_speed)
+{
+ /* Handle 1000M speed in a special way because ice_update_phy_type
+ * enables all link modes, but having mixed copper and optical
+ * standards is not supported.
+ */
+ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
* ice_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
* @ks: ethtool ksettings
@@ -2298,7 +2351,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (err)
goto done;
- curr_link_speed = pi->phy.link_info.link_speed;
+ curr_link_speed = pi->phy.curr_user_speed_req;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
/* If speed didn't get set, set it to what it currently is.
@@ -2309,7 +2362,8 @@ ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+ adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -2777,6 +2831,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i--)
@@ -2803,6 +2858,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
+ xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
@@ -2833,6 +2890,7 @@ process_rx:
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
@@ -3098,36 +3156,47 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
-/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int err, i;
+ u16 qcount, offset;
+ int err, num_tc, i;
u8 *lut;
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ netdev_warn(netdev, "RSS is not supported on this VSI!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (rss_context && !ice_is_adq_active(pf)) {
+ netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
+ return -EINVAL;
+ }
+
+ qcount = vsi->mqprio_qopt.qopt.count[rss_context];
+ offset = vsi->mqprio_qopt.qopt.offset[rss_context];
+
+ if (rss_context && ice_is_adq_active(pf)) {
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ if (rss_context >= num_tc) {
+ netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
+ rss_context, num_tc);
+ return -EINVAL;
+ }
+ /* Use channel VSI of given TC */
+ vsi = vsi->tc_map_vsi[rss_context];
+ }
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
return 0;
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- /* RSS not supported return error here */
- netdev_warn(netdev, "RSS is not configured on this VSI!\n");
- return -EIO;
- }
-
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -3140,8 +3209,14 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (err)
goto out;
+ if (ice_is_adq_active(pf)) {
+ for (i = 0; i < vsi->rss_table_size; i++)
+ indir[i] = offset + lut[i] % qcount;
+ goto out;
+ }
+
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = (u32)(lut[i]);
+ indir[i] = lut[i];
out:
kfree(lut);
@@ -3149,6 +3224,21 @@ out:
}
/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware.
+ */
+static int
+ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
+}
+
+/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
@@ -3425,6 +3515,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
new_rx = ch->combined_count + ch->rx_count;
new_tx = ch->combined_count + ch->tx_count;
+ if (new_rx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
+ if (new_tx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
if (new_rx > ice_get_max_rxq(pf)) {
netdev_err(dev, "Maximum allowed Rx channels is %d\n",
ice_get_max_rxq(pf));
@@ -4089,6 +4189,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
+ .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 5d10c4f84a36..ead6d50fc0ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -852,7 +852,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1214,7 +1214,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4deb2c9446ec..4b3bb19e1d06 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -4,10 +4,19 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+#include "ice.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
+#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@@ -523,6 +532,55 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
}
/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
* ice_init_pkg_hints
* @hw: pointer to the HW structure
* @ice_seg: pointer to the segment of the package scan (non-NULL)
@@ -548,32 +606,23 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
&val);
- while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
- /* Cache the appropriate boost TCAM entry pointers */
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
@@ -583,6 +632,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
}
}
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
}
/* Key creation */
@@ -874,6 +928,27 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
* ice_aq_update_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package cmd buffer
@@ -957,25 +1032,21 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
- * ice_update_pkg
+ * ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
*/
-static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- u32 offset, info, i;
- int status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
+ int status = 0;
+ u32 i;
for (i = 0; i < count; i++) {
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
bool last = ((i + 1) == count);
+ u32 offset, info;
status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
last, &offset, &info, NULL);
@@ -987,6 +1058,27 @@ static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
}
}
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
ice_release_change_lock(hw);
return status;
@@ -1080,6 +1172,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
break;
}
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ }
+
ice_release_global_cfg_lock(hw);
return state;
@@ -1117,6 +1216,7 @@ static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
+ int status;
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_format_ver.major,
@@ -1133,8 +1233,12 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
- return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
}
/**
@@ -1701,16 +1805,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld;
}
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
* @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
*/
static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
{
u16 i;
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1757,7 +1888,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
if (fv) {
/* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
if (req_profs & prof_type)
set_bit((u16)offset, bm);
@@ -1768,20 +1899,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
/**
* ice_get_sw_fv_list
* @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
+ * @lkups: list of protocol types
* @bm: bitmap of field vectors to consider
* @fv_list: Head of a list
*
* Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
+ * a given protocol ID and offset and returns a list of structures of type
* "ice_sw_fv_list_entry". Every structure in the list has a field vector
* definition and profile ID information
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list)
{
struct ice_sw_fv_list_entry *fvl;
@@ -1793,7 +1923,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
- if (!ids_cnt || !hw->seg)
+ if (!lkups->n_val_words || !hw->seg)
return -EINVAL;
ice_seg = hw->seg;
@@ -1812,20 +1942,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
if (!test_bit((u16)offset, bm))
continue;
- for (i = 0; i < ids_cnt; i++) {
+ for (i = 0; i < lkups->n_val_words; i++) {
int j;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
break;
if (j >= hw->blk[ICE_BLK_SW].es.fvw)
break;
- if (i + 1 == ids_cnt) {
+ if (i + 1 == lkups->n_val_words) {
fvl = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fvl), GFP_KERNEL);
if (!fvl)
@@ -1837,8 +1964,11 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
}
}
} while (fv);
- if (list_empty(fv_list))
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
return -EIO;
+ }
+
return 0;
err:
@@ -1897,7 +2027,7 @@ void ice_init_prof_result_bm(struct ice_hw *hw)
*
* Frees a package buffer
*/
-static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
devm_kfree(ice_hw_to_dev(hw), bld);
}
@@ -1997,6 +2127,43 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
}
/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
* ice_pkg_buf_get_active_sections
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
*
@@ -2023,7 +2190,7 @@ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
*
* Return a pointer to the buffer's header
*/
-static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
@@ -2060,6 +2227,89 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
}
/**
+ * ice_upd_dvm_boost_entry
+ * @hw: pointer to the HW structure
+ * @entry: pointer to double vlan boost entry info
+ */
+static int
+ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ int status = -ENOSPC;
+ struct ice_buf_build *bld;
+ u8 val, dc, nm;
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return -ENOMEM;
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_upd_dvm_boost_entry_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ struct_size(sect_rx, tcam, 1));
+ if (!sect_rx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ struct_size(sect_tx, tcam, 1));
+ if (!sect_tx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
+
+ /* re-write the don't care and never match bits accordingly */
+ if (entry->enable) {
+ /* all bits are don't care */
+ val = 0x00;
+ dc = 0xFF;
+ nm = 0x00;
+ } else {
+ /* disable, one never match bit, the rest are don't care */
+ val = 0x00;
+ dc = 0xF7;
+ nm = 0x08;
+ }
+
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ &val, NULL, &dc, &nm, 0, sizeof(u8));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
+
+ice_upd_dvm_boost_entry_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_set_dvm_boost_entries
+ * @hw: pointer to the HW structure
+ *
+ * Enable double vlan by updating the appropriate boost tcam entries.
+ */
+int ice_set_dvm_boost_entries(struct ice_hw *hw)
+{
+ int status;
+ u16 i;
+
+ for (i = 0; i < hw->dvm_upd.count; i++) {
+ status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
* ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
* @type: type of tunnel
@@ -2392,7 +2642,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
- * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
static int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 6cbc29bcb02f..9c530c86703e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -87,8 +87,14 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
+int
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
enum ice_tunnel_type type);
@@ -96,6 +102,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
+int ice_set_dvm_boost_entries(struct ice_hw *hw);
/* Rx parser PTYPE functions */
bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
@@ -119,4 +126,10 @@ void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index fc087e0b5292..974d14a83b2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -162,6 +162,7 @@ struct ice_meta_sect {
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
@@ -416,6 +417,8 @@ enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
+ TNL_GTPC,
+ TNL_GTPU,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -442,6 +445,19 @@ struct ice_tunnel_table {
u16 valid_count[__TNL_TYPE_CNT];
};
+struct ice_dvm_entry {
+ u16 boost_addr;
+ u16 enable;
+ struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_DVM_MAX_ENTRIES 48
+
+struct ice_dvm_table {
+ struct ice_dvm_entry tbl[ICE_DVM_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
@@ -659,7 +675,35 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};
+
+/* Number of bits/bytes contained in meta init entry. Note, this should be a
+ * multiple of 32 bits.
+ */
+#define ICE_META_INIT_BITS 192
+#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \
+ BITS_PER_BYTE))
+
+/* The meta init Flag field starts at this bit */
+#define ICE_META_FLAGS_ST 123
+
+/* The entry and bit to check for Double VLAN Mode (DVM) support */
+#define ICE_META_VLAN_MODE_ENTRY 0
+#define ICE_META_FLAG_VLAN_MODE 60
+#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \
+ ICE_META_FLAG_VLAN_MODE)
+
+struct ice_meta_init_entry {
+ __le32 bm[ICE_META_INIT_DW_CNT];
+};
+
+struct ice_meta_init_section {
+ __le16 count;
+ __le16 offset;
+ struct ice_meta_init_entry entry;
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index beed4838dcbe..ef103e47a8dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -3,6 +3,7 @@
#include "ice_common.h"
#include "ice_flow.h"
+#include <net/gre.h>
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 84b6e4464a21..b465d27d9b80 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include "ice_flex_type.h"
+
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c29177c6bb9d..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -58,7 +58,16 @@ int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
u8 promisc_mask)
{
- return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
}
/**
@@ -73,7 +82,16 @@ int
ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
u8 promisc_mask)
{
- return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
}
/**
@@ -87,7 +105,16 @@ int
ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
- return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
}
/**
@@ -101,7 +128,16 @@ int
ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
- return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
}
/**
@@ -203,21 +239,22 @@ ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
* ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
- * @vlan_id: VLAN ID to add
- * @action: filter action
+ * @vlan: VLAN filter details
*/
static int
ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
- u16 vlan_id, enum ice_sw_fwd_act_type action)
+ struct ice_vlan *vlan)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_VLAN;
- info.fltr_act = action;
+ info.fltr_act = ICE_FWD_TO_VSI;
info.vsi_handle = vsi->idx;
- info.l_data.vlan.vlan_id = vlan_id;
+ info.l_data.vlan.vlan_id = vlan->vid;
+ info.l_data.vlan.tpid = vlan->tpid;
+ info.l_data.vlan.tpid_valid = true;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
@@ -310,19 +347,17 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_prepare_vlan - add or remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
* @vlan_action: pointer to add or remove VLAN function
*/
static int
-ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action,
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
LIST_HEAD(tmp_list);
int result;
- if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan))
return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
@@ -395,27 +430,21 @@ int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_add_vlan - add single VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
*/
-int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_add_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list);
}
/**
* ice_fltr_remove_vlan - remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: filter VLAN to remove
- * @action: action to remove
+ * @vlan: VLAN filter details
*/
-int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_remove_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 3eb42479175f..0f3dbc308eec 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
+#include "ice_vlan.h"
+
void ice_fltr_free_list(struct device *dev, struct list_head *h);
int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
@@ -32,12 +34,8 @@ ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-int
-ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
-int
-ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 665a344fb9c0..3dc5662d62a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context)
return 0;
}
-static const struct pldmfw_ops ice_fwu_ops = {
+struct ice_pldm_pci_record_id {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+};
+
+/**
+ * ice_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine if the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+static bool
+ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct ice_pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ /* the E822 device can have a generic device ID so check for that */
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device ||
+ id.device == ICE_DEV_ID_E822_SI_DFLT) &&
+ (id.subsystem_vendor == PCI_ANY_ID ||
+ id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID ||
+ id.subsystem_device == pdev->subsystem_device))
+ return true;
+
+ return false;
+}
+
+static const struct pldmfw_ops ice_fwu_ops_e810 = {
.match_record = &pldmfw_op_pci_match_record,
.send_package_data = &ice_send_package_data,
.send_component_table = &ice_send_component_table,
@@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = {
.finalize_update = &ice_finalize_update,
};
+static const struct pldmfw_ops ice_fwu_ops_e822 = {
+ .match_record = &ice_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
* ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
@@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink,
memset(&priv, 0, sizeof(priv));
- priv.context.ops = &ice_fwu_ops;
+ /* the E822 device needs a slightly different ops */
+ if (hw->mac_type == ICE_MAC_GENERIC)
+ priv.context.ops = &ice_fwu_ops_e822;
+ else
+ priv.context.ops = &ice_fwu_ops_e810;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
new file mode 100644
index 000000000000..b5a7f246d230
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021-2022, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include <linux/tty_driver.h>
+
+/**
+ * ice_gnss_do_write - Write data to internal GNSS
+ * @pf: board private structure
+ * @buf: command buffer
+ * @size: command buffer size
+ *
+ * Write UBX command data to the GNSS receiver
+ */
+static unsigned int
+ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ struct ice_hw *hw = &pf->hw;
+ unsigned int offset = 0;
+ int err = 0;
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ /* It's not possible to write a single byte to u-blox.
+ * Write all bytes in a loop until there are 6 or less bytes left. If
+ * there are exactly 6 bytes left, the last write would be only a byte.
+ * In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the
+ * last 2 to 5 bytes write.
+ */
+ while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES;
+ }
+
+ /* Single byte would be written. Write 4 bytes instead of 5. */
+ if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES - 1;
+ }
+
+ /* Do the last write, 2 to 5 bytes. */
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]), size - offset - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ return size;
+
+err_out:
+ dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
+ offset, size, err);
+
+ return offset;
+}
+
+/**
+ * ice_gnss_write_pending - Write all pending data to internal GNSS
+ * @work: GNSS write work structure
+ */
+static void ice_gnss_write_pending(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ write_work);
+ struct ice_pf *pf = gnss->back;
+
+ if (!list_empty(&gnss->queue)) {
+ struct gnss_write_buf *write_buf = NULL;
+ unsigned int bytes;
+
+ write_buf = list_first_entry(&gnss->queue,
+ struct gnss_write_buf, queue);
+
+ bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
+ dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
+
+ list_del(&write_buf->queue);
+ kfree(write_buf->buf);
+ kfree(write_buf);
+ }
+}
+
+/**
+ * ice_gnss_read - Read data from internal GNSS module
+ * @work: GNSS read work structure
+ *
+ * Read the data from internal GNSS receiver, number of bytes read will be
+ * returned in *read_data parameter.
+ */
+static void ice_gnss_read(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ read_work.work);
+ struct ice_aqc_link_topo_addr link_topo;
+ unsigned int i, bytes_read, data_len;
+ struct tty_port *port;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ __be16 data_len_b;
+ char *buf = NULL;
+ u8 i2c_params;
+ int err = 0;
+
+ pf = gnss->back;
+ if (!pf || !gnss->tty || !gnss->tty->port) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ hw = &pf->hw;
+ port = gnss->tty->port;
+
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
+ ICE_AQC_I2C_USE_REPEATED_START;
+
+ /* Read data length in a loop, when it's not 0 the data is ready */
+ for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) {
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
+ i2c_params, (u8 *)&data_len_b, NULL);
+ if (err)
+ goto exit_buf;
+
+ data_len = be16_to_cpu(data_len_b);
+ if (data_len != 0 && data_len != U16_MAX)
+ break;
+
+ mdelay(10);
+ }
+
+ data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
+ data_len = tty_buffer_request_room(port, data_len);
+ if (!data_len) {
+ err = -ENOMEM;
+ goto exit_buf;
+ }
+
+ /* Read received data */
+ for (i = 0; i < data_len; i += bytes_read) {
+ unsigned int bytes_left = data_len - i;
+
+ bytes_read = min_t(typeof(bytes_left), bytes_left,
+ ICE_MAX_I2C_DATA_SIZE);
+
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
+ bytes_read, &buf[i], NULL);
+ if (err)
+ goto exit_buf;
+ }
+
+ /* Send the data to the tty layer for users to read. This doesn't
+ * actually push the data through unless tty->low_latency is set.
+ */
+ tty_insert_flip_string(port, buf, i);
+ tty_flip_buffer_push(port);
+
+exit_buf:
+ free_page((unsigned long)buf);
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
+ ICE_GNSS_TIMER_DELAY_TIME);
+exit:
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+}
+
+/**
+ * ice_gnss_struct_init - Initialize GNSS structure for the TTY
+ * @pf: Board private structure
+ * @index: TTY device index
+ */
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct gnss_serial *gnss;
+
+ gnss = kzalloc(sizeof(*gnss), GFP_KERNEL);
+ if (!gnss)
+ return NULL;
+
+ mutex_init(&gnss->gnss_mutex);
+ gnss->open_count = 0;
+ gnss->back = pf;
+ pf->gnss_serial[index] = gnss;
+
+ kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ INIT_LIST_HEAD(&gnss->queue);
+ kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
+ /* Allocate a kworker for handling work required for the GNSS TTY
+ * writes.
+ */
+ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ kfree(gnss);
+ return NULL;
+ }
+
+ gnss->kworker = kworker;
+
+ return gnss;
+}
+
+/**
+ * ice_gnss_tty_open - Initialize GNSS structures on TTY device open
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ *
+ * This routine is mandatory. If this routine is not filled in, the attempted
+ * open will fail with ENODEV.
+ */
+static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss;
+ struct ice_pf *pf;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Clear the pointer in case something fails */
+ tty->driver_data = NULL;
+
+ /* Get the serial object associated with this tty pointer */
+ gnss = pf->gnss_serial[tty->index];
+ if (!gnss) {
+ /* Initialize GNSS struct on the first device open */
+ gnss = ice_gnss_struct_init(pf, tty->index);
+ if (!gnss)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ /* Save our structure within the tty structure */
+ tty->driver_data = gnss;
+ gnss->tty = tty;
+ gnss->open_count++;
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
+
+ mutex_unlock(&gnss->gnss_mutex);
+
+ return 0;
+}
+
+/**
+ * ice_gnss_tty_close - Cleanup GNSS structures on tty device close
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ */
+static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss = tty->driver_data;
+ struct ice_pf *pf;
+
+ if (!gnss)
+ return;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ /* Port was never opened */
+ dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
+ goto exit;
+ }
+
+ gnss->open_count--;
+ if (gnss->open_count <= 0) {
+ /* Port is in shutdown state */
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ }
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+}
+
+/**
+ * ice_gnss_tty_write - Write GNSS data
+ * @tty: pointer to the tty_struct
+ * @buf: pointer to the user data
+ * @count: the number of characters queued to be sent to the HW
+ *
+ * The write function call is called by the user when there is data to be sent
+ * to the hardware. First the tty core receives the call, and then it passes the
+ * data on to the tty driver's write function. The tty core also tells the tty
+ * driver the size of the data being sent.
+ * If any errors happen during the write call, a negative error value should be
+ * returned instead of the number of characters queued to be written.
+ */
+static int
+ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct gnss_write_buf *write_buf;
+ struct gnss_serial *gnss;
+ unsigned char *cmd_buf;
+ struct ice_pf *pf;
+ int err = count;
+
+ /* We cannot write a single byte using our I2C implementation. */
+ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
+ return -EINVAL;
+
+ gnss = tty->driver_data;
+ if (!gnss)
+ return -EFAULT;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Only allow to write on TTY 0 */
+ if (gnss != pf->gnss_serial[0])
+ return -EIO;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
+ if (!cmd_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(cmd_buf, buf, count);
+
+ /* Send the data out to a hardware port */
+ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
+ if (!write_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ write_buf->buf = cmd_buf;
+ write_buf->size = count;
+ INIT_LIST_HEAD(&write_buf->queue);
+ list_add_tail(&write_buf->queue, &gnss->queue);
+ kthread_queue_work(gnss->kworker, &gnss->write_work);
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+ return err;
+}
+
+/**
+ * ice_gnss_tty_write_room - Returns the numbers of characters to be written.
+ * @tty: pointer to the tty_struct
+ *
+ * This routine returns the numbers of characters the tty driver will accept
+ * for queuing to be written or 0 if either the TTY is not open or user
+ * tries to write to the TTY other than the first.
+ */
+static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
+{
+ struct gnss_serial *gnss = tty->driver_data;
+
+ /* Only allow to write on TTY 0 */
+ if (!gnss || gnss != gnss->back->gnss_serial[0])
+ return 0;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ mutex_unlock(&gnss->gnss_mutex);
+ return 0;
+ }
+
+ mutex_unlock(&gnss->gnss_mutex);
+ return ICE_GNSS_TTY_WRITE_BUF;
+}
+
+static const struct tty_operations tty_gps_ops = {
+ .open = ice_gnss_tty_open,
+ .close = ice_gnss_tty_close,
+ .write = ice_gnss_tty_write,
+ .write_room = ice_gnss_tty_write_room,
+};
+
+/**
+ * ice_gnss_create_tty_driver - Create a TTY driver for GNSS
+ * @pf: Board private structure
+ */
+static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const int ICE_TTYDRV_NAME_MAX = 14;
+ struct tty_driver *tty_driver;
+ char *ttydrv_name;
+ unsigned int i;
+ int err;
+
+ tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES,
+ TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(tty_driver)) {
+ dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
+ return NULL;
+ }
+
+ ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
+ if (!ttydrv_name) {
+ tty_driver_kref_put(tty_driver);
+ return NULL;
+ }
+
+ snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_",
+ (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
+
+ /* Initialize the tty driver*/
+ tty_driver->owner = THIS_MODULE;
+ tty_driver->driver_name = dev_driver_string(dev);
+ tty_driver->name = (const char *)ttydrv_name;
+ tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_driver->init_termios = tty_std_termios;
+ tty_driver->init_termios.c_iflag &= ~INLCR;
+ tty_driver->init_termios.c_iflag |= IGNCR;
+ tty_driver->init_termios.c_oflag &= ~OPOST;
+ tty_driver->init_termios.c_lflag &= ~ICANON;
+ tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
+ /* baud rate 9600 */
+ tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
+ tty_driver->driver_state = pf;
+ tty_set_operations(tty_driver, &tty_gps_ops);
+
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
+ GFP_KERNEL);
+ pf->gnss_serial[i] = NULL;
+
+ tty_port_init(pf->gnss_tty_port[i]);
+ tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i);
+ }
+
+ err = tty_register_driver(tty_driver);
+ if (err) {
+ dev_err(dev, "Failed to register TTY driver err=%d\n", err);
+
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
+ kfree(ttydrv_name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+ return NULL;
+ }
+
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
+ dev_info(dev, "%s%d registered\n", ttydrv_name, i);
+
+ return tty_driver;
+}
+
+/**
+ * ice_gnss_init - Initialize GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_init(struct ice_pf *pf)
+{
+ struct tty_driver *tty_driver;
+
+ tty_driver = ice_gnss_create_tty_driver(pf);
+ if (!tty_driver)
+ return;
+
+ pf->ice_gnss_tty_driver = tty_driver;
+
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
+}
+
+/**
+ * ice_gnss_exit - Disable GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_exit(struct ice_pf *pf)
+{
+ unsigned int i;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
+ return;
+
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ if (pf->gnss_tty_port[i]) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
+
+ if (pf->gnss_serial[i]) {
+ struct gnss_serial *gnss = pf->gnss_serial[i];
+
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kfree(gnss);
+ pf->gnss_serial[i] = NULL;
+ }
+ }
+
+ tty_unregister_driver(pf->ice_gnss_tty_driver);
+ kfree(pf->ice_gnss_tty_driver->name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+ pf->ice_gnss_tty_driver = NULL;
+}
+
+/**
+ * ice_gnss_is_gps_present - Check if GPS HW is present
+ * @hw: pointer to HW struct
+ */
+bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return false;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+ if (ice_is_e810t(hw)) {
+ int err;
+ u8 data;
+
+ err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
+ return false;
+ } else {
+ return false;
+ }
+#else
+ if (!ice_is_e810t(hw))
+ return false;
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
new file mode 100644
index 000000000000..f454dd1d9285
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2022, Intel Corporation. */
+
+#ifndef _ICE_GNSS_H_
+#define _ICE_GNSS_H_
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+/* Create 2 minor devices, both using the same GNSS module. First one is RW,
+ * second one RO.
+ */
+#define ICE_GNSS_TTY_MINOR_DEVICES 2
+#define ICE_GNSS_TTY_WRITE_BUF 250
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_I2C_WRITE_BYTES 4
+
+/* u-blox ZED-F9T specific definitions */
+#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
+/* Data length register is big endian */
+#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
+#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
+#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
+/* For u-blox writes are performed without address so the first byte to write is
+ * passed as I2C addr parameter.
+ */
+#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
+#define ICE_MAX_UBX_READ_TRIES 255
+#define ICE_MAX_UBX_ACK_READ_TRIES 4095
+
+struct gnss_write_buf {
+ struct list_head queue;
+ unsigned int size;
+ unsigned char *buf;
+};
+
+
+/**
+ * struct gnss_serial - data used to initialize GNSS TTY port
+ * @back: back pointer to PF
+ * @tty: pointer to the tty for this device
+ * @open_count: number of times this port has been opened
+ * @gnss_mutex: gnss_mutex used to protect GNSS serial operations
+ * @kworker: kwork thread for handling periodic work
+ * @read_work: read_work function for handling GNSS reads
+ * @write_work: write_work function for handling GNSS writes
+ * @queue: write buffers queue
+ */
+struct gnss_serial {
+ struct ice_pf *back;
+ struct tty_struct *tty;
+ int open_count;
+ struct mutex gnss_mutex; /* protects GNSS serial structure */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work read_work;
+ struct kthread_work write_work;
+ struct list_head queue;
+};
+
+#if IS_ENABLED(CONFIG_TTY)
+void ice_gnss_init(struct ice_pf *pf);
+void ice_gnss_exit(struct ice_pf *pf);
+bool ice_gnss_is_gps_present(struct ice_hw *hw);
+#else
+static inline void ice_gnss_init(struct ice_pf *pf) { }
+static inline void ice_gnss_exit(struct ice_pf *pf) { }
+static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_TTY) */
+#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index fc3580167e7b..895c32bcc8b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -34,29 +34,20 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
{
struct iidc_auxiliary_drv *iadrv;
- if (!pf->adev)
+ if (WARN_ON_ONCE(!in_task()))
return;
+ mutex_lock(&pf->adev_mutex);
+ if (!pf->adev)
+ goto finish;
+
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
-}
-
-/**
- * ice_find_vsi - Find the VSI from VSI ID
- * @pf: The PF pointer to search in
- * @vsi_num: The VSI ID to search for
- */
-static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
- return pf->vsi[i];
- return NULL;
+finish:
+ mutex_unlock(&pf->adev_mutex);
}
/**
@@ -79,7 +70,7 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (!ice_is_rdma_ena(pf))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
@@ -227,6 +218,11 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
+ qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
@@ -236,7 +232,7 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params);
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
@@ -274,7 +270,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
- if (!ice_is_aux_ena(pf))
+ if (!ice_is_rdma_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -282,7 +278,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM;
adev = &iadev->adev;
- pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
@@ -292,18 +287,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
ret = auxiliary_device_init(adev);
if (ret) {
- pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
- pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
+ mutex_lock(&pf->adev_mutex);
+ pf->adev = adev;
+ mutex_unlock(&pf->adev_mutex);
+
return 0;
}
@@ -312,12 +309,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
- if (!pf->adev)
- return;
+ struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev);
- auxiliary_device_uninit(pf->adev);
+ mutex_lock(&pf->adev_mutex);
+ adev = pf->adev;
pf->adev = NULL;
+ mutex_unlock(&pf->adev_mutex);
+
+ if (adev) {
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index b7796b8aecbd..4b0c86757df9 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -5,7 +5,6 @@
#define _ICE_IDC_INT_H_
#include <linux/net/intel/iidc.h>
-#include "ice.h"
struct ice_pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index e375ac849aec..ee5b36941ba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *master;
+ const char *name, *peer, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
- master = lag->master ? "TRUE" : "FALSE";
+ primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
switch (lag->role) {
@@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
- bonded, peer, upper, role, master);
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
+ bonded, peer, upper, role, primary);
}
/**
@@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
- netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
goto lag_out;
}
@@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
- /* if this is the first element in an LAG mark as master */
- lag->master = !!(peers == 1);
+ /* if this is the first element in an LAG mark as primary */
+ lag->primary = !!(peers == 1);
}
/**
@@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
@@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag,
}
/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
@@ -242,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
return;
}
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
@@ -425,11 +447,9 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- if (lag->upper_netdev)
- dev_put(lag->upper_netdev);
+ dev_put(lag->upper_netdev);
- if (lag->peer_netdev)
- dev_put(lag->peer_netdev);
+ dev_put(lag->peer_netdev);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index c2e3688dd8fd..51b5cf467ce2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -24,7 +24,7 @@ struct ice_lag {
struct net_device *upper_netdev; /* upper bonding netdev */
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
- u8 master:1; /* this is a master */
+ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed.
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d981dc6f2323..b3baf7c3f910 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -424,6 +424,8 @@ enum ice_rx_flex_desc_status_error_0_bits {
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ /* [10:5] reserved */
+ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
@@ -568,6 +570,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c187cf04fcf..7276badfa19e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_devlink.h"
+#include "ice_vsi_vlan_ops.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -165,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf_id: ID of the VF being configured
+ * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
{
+ enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
- struct ice_vf *vf = NULL;
- if (vsi->type == ICE_VSI_VF)
- vsi->vf_id = vf_id;
- else
- vsi->vf_id = ICE_INVAL_VFID;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return;
- switch (vsi->type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
@@ -216,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
/* The number of queues for ctrl VSI is equal to number of VFs.
* Each ring is associated to the corresponding VF_PR netdev.
*/
- vsi->alloc_txq = pf->num_alloc_vfs;
- vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->alloc_txq = ice_get_num_vfs(pf);
+ vsi->alloc_rxq = vsi->alloc_txq;
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
- vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_msix_per_vf includes (VF miscellaneous vector +
+ /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -247,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
break;
}
@@ -298,7 +296,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
return;
if (vsi->type == ICE_VSI_VF)
- ctxt->vf_num = vsi->vf_id;
+ ctxt->vf_num = vsi->vf->vf_id;
ctxt->vsi_num = vsi->vsi_num;
memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
@@ -383,8 +381,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
- vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -436,13 +433,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- ice_for_each_vf(pf, i)
- napi_schedule(&pf->vf[i].repr->q_vector->napi);
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ napi_schedule(&vf->repr->q_vector->napi);
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -452,17 +452,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
* @pf: board private structure
* @vsi_type: type of VSI
* @ch: ptr to channel
- * @vf_id: ID of the VF being configured
+ * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
+ *
+ * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
+ * it may be NULL in the case there is no association with a VF. For
+ * ICE_VSI_VF the VF pointer *must not* be NULL.
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, u16 vf_id)
+ struct ice_channel *ch, struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return NULL;
+
/* Need to protect the allocation of the VSIs at the PF level */
mutex_lock(&pf->sw_mutex);
@@ -484,9 +491,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf_id);
+ ice_vsi_set_num_qs(vsi, vf);
else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
@@ -509,10 +516,16 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+
+ /* For the PF control VSI this is NULL, for the VF control VSI
+ * this will be the first VF to allocate it.
+ */
+ vsi->vf = vf;
break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
+ vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
@@ -530,7 +543,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ if (vsi->type == ICE_VSI_CTRL && !vf) {
/* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
@@ -545,8 +558,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
pf->next_vsi);
}
- if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
- pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
+ if (vsi->type == ICE_VSI_CTRL && vf)
+ vf->ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -732,14 +745,14 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_is_aux_ena
+ * ice_is_rdma_ena
* @pf: pointer to the PF struct
*
- * returns true if AUX devices/drivers are supported, false otherwise
+ * returns true if RDMA is currently supported, false otherwise
*/
-bool ice_is_aux_ena(struct ice_pf *pf)
+bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
/**
@@ -838,11 +851,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
/**
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @hw: HW structure used to determine the VLAN mode of the device
* @ctxt: the VSI context being set
*
* This initializes a default VSI context for all sections except the Queues.
*/
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
@@ -853,13 +867,30 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
+ /* allow all untagged/tagged packets by default on Tx */
+ ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
+ * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
+ *
+ * DVM - leave inner VLAN in packet by default
*/
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ ctxt->info.outer_vlan_flags |=
+ (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
+ }
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -881,9 +912,9 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
* @vsi: the VSI being configured
* @ctxt: VSI context structure
*/
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -950,11 +981,24 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
+
+ if (rx_count > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ rx_count, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
+ if (tx_count > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ tx_count, vsi->alloc_txq);
+ return -EINVAL;
+ }
vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -972,6 +1016,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+
+ return 0;
}
/**
@@ -1114,7 +1160,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
- ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
break;
default:
ret = -ENODEV;
@@ -1136,7 +1182,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ice_set_dflt_vsi_ctx(ctxt);
+ ice_set_dflt_vsi_ctx(hw, ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
@@ -1159,7 +1205,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_CHNL) {
ice_chnl_vsi_setup_q_map(vsi, ctxt);
} else {
- ice_vsi_setup_q_map(vsi, ctxt);
+ ret = ice_vsi_setup_q_map(vsi, ctxt);
+ if (ret)
+ goto out;
+
if (!init_vsi) /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
@@ -1168,25 +1217,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
}
- /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
- * respectively
- */
- if (vsi->type == ICE_VSI_VF) {
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (pf->vf[vsi->vf_id].spoofchk) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
- }
-
/* Allow control frames out of main VSI */
if (vsi->type == ICE_VSI_PF) {
ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
@@ -1325,6 +1355,36 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
}
/**
+ * ice_get_vf_ctrl_res - Get VF control VSI resource
+ * @pf: pointer to the PF structure
+ * @vsi: the VSI to allocate a resource for
+ *
+ * Look up whether another VF has already allocated the control VSI resource.
+ * If so, re-use this resource so that we share it among all VFs.
+ *
+ * Otherwise, allocate the resource and return it.
+ */
+static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int base;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ rcu_read_unlock();
+ return base;
+ }
+ }
+ rcu_read_unlock();
+
+ return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -1356,20 +1416,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
- break;
- }
- }
- if (i == pf->num_alloc_vfs)
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- ICE_RES_VF_CTRL_VEC_ID);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ base = ice_get_vf_ctrl_res(pf, vsi);
} else {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
@@ -1431,6 +1479,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
+ bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 i;
@@ -1452,6 +1501,11 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ ring->txq_teid = ICE_INVAL_TEID;
+ if (dvm_ena)
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
+ else
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -1470,6 +1524,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
+ ring->cached_phctime = pf->ptp.cached_phc_time;
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1510,6 +1565,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
+ * @vsi: VSI to be configured
+ * @disable: set to true to have FCS / CRC in the frame data
+ */
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i)
+ if (disable)
+ vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+}
+
+/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
@@ -1684,6 +1755,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
@@ -1757,62 +1834,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_add_vlan - Add VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be added
- * @action: filter action to be performed on match
- */
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err = 0;
-
- dev = ice_pf_to_dev(pf);
-
- if (!ice_fltr_add_vlan(vsi, vid, action)) {
- vsi->num_vlan++;
- } else {
- err = -ENODEV;
- dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
- vsi->vsi_num);
- }
-
- return err;
-}
-
-/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be removed
- *
- * Returns 0 on success and negative on failure
- */
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err;
-
- dev = ice_pf_to_dev(pf);
-
- err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!err) {
- vsi->num_vlan--;
- } else if (err == -ENOENT) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
- vid, vsi->vsi_num, err);
- err = 0;
- } else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, err);
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
@@ -1984,8 +2005,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -2140,95 +2161,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the VSI being changed
- */
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- /* Preserve existing VLAN strip setting */
- ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
- ICE_AQ_VSI_VLAN_EMOD_M);
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the VSI being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- /* do not allow modifying VLAN stripping when a port VLAN is configured
- * on this VSI
- */
- if (vsi->info.pvid)
- return 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena)
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- else
- /* Disable stripping. Leave tag in packet */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
-
- /* Allow all packets untagged/tagged */
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
* ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be enabled
*
@@ -2308,72 +2240,42 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
- * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
- * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ * ice_vsi_is_rx_queue_active
+ * @vsi: the VSI being configured
*
- * returns true if Rx VLAN pruning is enabled and false otherwise.
+ * Return true if at least one queue is active.
*/
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
{
- if (!vsi)
- return false;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i;
- return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
+ ice_for_each_rxq(vsi, i) {
+ u32 rx_reg;
+ int pf_q;
+
+ pf_q = vsi->rxq_map[i];
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (rx_reg & QRX_CTRL_QENA_STAT_M)
+ return true;
+ }
+
+ return false;
}
/**
- * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
- * @vsi: VSI to enable or disable VLAN pruning on
- * @ena: set to true to enable VLAN pruning and false to disable it
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
*
- * returns 0 if VSI is updated, negative otherwise
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{
- struct ice_vsi_ctx *ctxt;
- struct ice_pf *pf;
- int status;
-
if (!vsi)
- return -EINVAL;
-
- /* Don't enable VLAN pruning if the netdev is currently in promiscuous
- * mode. VLAN pruning will be enabled when the interface exits
- * promiscuous mode if any VLAN filters are active.
- */
- if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
- return 0;
-
- pf = vsi->back;
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
-
- if (ena)
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- else
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
- if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- status, ice_aq_str(pf->hw.adminq.sq_last_status));
- goto err_out;
- }
-
- vsi->info.sw_flags2 = ctxt->info.sw_flags2;
-
- kfree(ctxt);
- return 0;
+ return false;
-err_out:
- kfree(ctxt);
- return -EIO;
+ return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
@@ -2410,7 +2312,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
}
if (vsi->type == ICE_VSI_VF) {
- struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+ struct ice_vf *vf = vsi->vf;
q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
} else {
@@ -2564,7 +2466,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
agg_id);
return;
}
- /* aggregator node is created, store the neeeded info */
+ /* aggregator node is created, store the needed info */
agg_node->valid = true;
agg_node->agg_id = agg_id;
}
@@ -2595,9 +2497,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @pf: board private structure
* @pi: pointer to the port_info instance
* @vsi_type: VSI type
- * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
- * used only for ICE_VSI_VF VSI type. For other VSI types, should
- * fill-in ICE_INVAL_VFID as input.
+ * @vf: pointer to VF to which this VSI connects. This field is used primarily
+ * for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
@@ -2607,7 +2508,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2615,11 +2517,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
int ret, i;
if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2631,9 +2533,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
- vsi->vf_id = vf_id;
-
ice_alloc_fd_res(vsi);
if (vsi_type != ICE_VSI_CHNL) {
@@ -2655,6 +2554,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_get_qs;
+ ice_vsi_init_vlan_ops(vsi);
+
switch (vsi->type) {
case ICE_VSI_CTRL:
case ICE_VSI_SWITCHDEV_CTRL:
@@ -2675,17 +2576,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
- /* Always add VLAN ID 0 switch rule by default. This is needed
- * in order to allow all untagged and 0 tagged priority traffic
- * if Rx VLAN pruning is enabled. Also there are cases where we
- * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
- * so this handles those cases (i.e. adding the PF to a bridge
- * without the 8021q module loaded).
- */
- ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (ret)
- goto unroll_clear_rings;
-
ice_vsi_map_rings_to_vectors(vsi);
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2862,6 +2752,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
+ ice_free_cpu_rx_rmap(vsi);
+
ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2875,7 +2767,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
continue;
/* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
@@ -3063,6 +2956,37 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
+ *
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
+ */
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3089,9 +3013,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -3105,23 +3026,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
- break;
- }
- if (i == pf->num_alloc_vfs) {
- /* No other VFs left that have control VSI, reclaim SW
- * interrupts back to the common pool
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -3141,6 +3047,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (ice_is_vsi_dflt_vsi(vsi))
+ ice_clear_dflt_vsi(vsi);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
@@ -3162,6 +3070,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
@@ -3195,8 +3106,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- coalesce[i].itr_tx = q_vector->tx.itr_setting;
- coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].itr_tx = q_vector->tx.itr_settings;
+ coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq)
@@ -3252,21 +3163,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[i].itr_rx;
+ rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
}
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[i].itr_tx;
+ rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
}
@@ -3280,12 +3191,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) {
/* transmit */
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
/* receive */
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
@@ -3305,7 +3216,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
- struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
struct ice_pf *pf;
int ret, i;
@@ -3315,8 +3225,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (vtype == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
+ return -EINVAL;
+
+ ice_vsi_init_vlan_ops(vsi);
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
@@ -3353,9 +3265,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf->vf_id);
+ ice_vsi_set_num_qs(vsi, vsi->vf);
else
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
@@ -3408,6 +3320,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
+
+ /* disable or enable CRC stripping */
+ if (vsi->netdev)
+ ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
+ NETIF_F_RXFCS));
+
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3615,13 +3533,14 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
*
* Prepares VSI tc_config to have queue configurations based on MQPRIO options.
*/
-static void
+static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u8 ena_tc)
{
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3662,9 +3581,23 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ new_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ new_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
/* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- vsi->num_rxq = offset + qcount_rx;
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
@@ -3682,6 +3615,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+
+ return 0;
}
/**
@@ -3695,6 +3630,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
struct device *dev;
int i, ret = 0;
@@ -3719,6 +3655,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3731,9 +3668,14 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
else
- ice_vsi_setup_q_map(vsi, ctx);
+ ret = ice_vsi_setup_q_map(vsi, ctx);
+
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
+ goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -3806,116 +3748,97 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
- * @sw: switch to check if its default forwarding VSI is free
+ * @pi: port info of the switch with default VSI
*
- * Return true if the default forwarding VSI is already being used, else returns
- * false signalling that it's available to use.
+ * Return true if the there is a single VSI in default forwarding VSI list
*/
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
{
- return (sw->dflt_vsi && sw->dflt_vsi_ena);
+ bool exists = false;
+
+ ice_check_if_dflt_vsi(pi, 0, &exists);
+ return exists;
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
- * @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
{
- return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+ return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
- * @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
- * If there is already a default VSI on the switch and it's enabled then return
- * -EEXIST since there can only be one default VSI per switch.
- *
- * Otherwise try to set the VSI passed in as the switch's default VSI and
- * return the result.
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
*/
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+int ice_set_dflt_vsi(struct ice_vsi *vsi)
{
struct device *dev;
int status;
- if (!sw || !vsi)
+ if (!vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
- if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
- /* another VSI is already the default VSI for this switch */
- if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
- sw->dflt_vsi->vsi_num);
- return -EEXIST;
- }
-
- status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return status;
}
- sw->dflt_vsi = vsi;
- sw->dflt_vsi_ena = true;
-
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
- * @sw: switch used to clear the default VSI
+ * @vsi: VSI to remove from filter list
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
-int ice_clear_dflt_vsi(struct ice_sw *sw)
+int ice_clear_dflt_vsi(struct ice_vsi *vsi)
{
- struct ice_vsi *dflt_vsi;
struct device *dev;
int status;
- if (!sw)
+ if (!vsi)
return -EINVAL;
- dev = ice_pf_to_dev(sw->pf);
-
- dflt_vsi = sw->dflt_vsi;
+ dev = ice_pf_to_dev(vsi->back);
/* there is no default VSI configured */
- if (!ice_is_dflt_vsi_in_use(sw))
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info))
return -ENODEV;
- status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ vsi->vsi_num, status);
return -EIO;
}
- sw->dflt_vsi = NULL;
- sw->dflt_vsi_ena = false;
-
return 0;
}
@@ -4117,9 +4040,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
*/
if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
+ ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
@@ -4131,6 +4054,124 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
+ * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
+ * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
+ * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
+ *
+ * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
+ * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
+ * traffic in SVM, since the VLAN TPID isn't part of filtering.
+ *
+ * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
+ * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
+ * part of filtering.
+ */
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * Delete the VLAN 0 filters in the same manner that they were added in
+ * ice_vsi_add_vlan_zero.
+ */
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
+}
+
+/**
+ * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
+ * @vsi: VSI used to get the VLAN mode
+ *
+ * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
+ * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
+ */
+static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
+{
+#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
+#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
+ /* no VLAN 0 filter is created when a port VLAN is active */
+ if (vsi->type == ICE_VSI_VF) {
+ if (WARN_ON(!vsi->vf))
+ return 0;
+
+ if (ice_vf_is_port_vlan_ena(vsi->vf))
+ return 0;
+ }
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
+ else
+ return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
+}
+
+/**
+ * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
+ * @vsi: VSI used to determine if any non-zero VLANs have been added
+ */
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
+ * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
+ * @vsi: VSI used to get the number of non-zero VLANs added
+ */
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
* ice_is_feature_supported
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to be checked
@@ -4184,8 +4225,12 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
+ if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index b2ed189527d6..dcdf69a693e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -5,6 +5,7 @@
#define _ICE_LIB_H_
#include "ice.h"
+#include "ice_vlan.h"
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
@@ -22,15 +23,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
-
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
-
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
-
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
@@ -45,8 +37,6 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -62,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -98,6 +89,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
+
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
@@ -110,14 +103,11 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-bool ice_is_aux_ena(struct ice_pf *pf);
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
-
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_clear_dflt_vsi(struct ice_sw *sw);
+bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
+int ice_set_dflt_vsi(struct ice_vsi *vsi);
+int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
@@ -132,8 +122,12 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
-
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30814435f779..0f6718719453 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -21,6 +21,7 @@
#include "ice_trace.h"
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
+#include "ice_vsi_vlan_ops.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -47,6 +48,21 @@ static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ return &pf->pdev->dev;
+}
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -227,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
{
return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
}
/**
@@ -244,11 +259,18 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
- status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
- else
- status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
- return status;
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
+ } else {
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
+ }
+ if (status && status != -EEXIST)
+ return status;
+
+ return 0;
}
/**
@@ -264,14 +286,33 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
- status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
- else
- status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
+ } else {
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
+ }
+
return status;
}
/**
+ * ice_get_devlink_port - Get devlink port from netdev
+ * @netdev: the netdevice structure
+ */
+static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (!ice_is_switchdev_running(pf))
+ return NULL;
+
+ return &pf->devlink_port;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -279,13 +320,13 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 changed_flags = 0;
- u8 promisc_m;
int err;
if (!vsi->netdev)
@@ -303,7 +344,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (ice_vsi_fltr_changed(vsi)) {
clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
- clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh(netdev);
@@ -352,29 +392,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->num_vlan > 1)
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_set_promisc(vsi, promisc_m);
+ err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->num_vlan > 1)
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_clear_promisc(vsi, promisc_m);
+ err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags |= IFF_ALLMULTI;
goto out_promisc;
}
@@ -386,8 +412,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
- err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
+ err = ice_set_dflt_vsi(vsi);
if (err && err != -EEXIST) {
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -396,12 +422,12 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
err = 0;
- ice_cfg_vlan_pruning(vsi, false);
+ vlan_ops->dis_rx_filtering(vsi);
}
} else {
/* Clear Rx filter to remove traffic from wire */
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
- err = ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi)) {
+ err = ice_clear_dflt_vsi(vsi);
if (err) {
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -409,8 +435,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true);
+ if (vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+ vlan_ops->ena_rx_filtering(vsi);
}
}
}
@@ -502,7 +529,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
@@ -517,8 +545,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- ice_for_each_vf(pf, i)
- ice_set_vf_state_qs_dis(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_set_vf_state_qs_dis(vf);
+ mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
if (reset_type != ICE_RESET_PFR)
@@ -565,6 +595,9 @@ skip:
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_prepare_for_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -610,7 +643,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
}
@@ -661,7 +694,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
return;
@@ -1660,7 +1693,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1748,47 +1782,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- reg = rd32(hw, VP_MDET_TX_PQM(i));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
- wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
- wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_RX(i));
+ reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) {
- wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
- i);
+ vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
@@ -1799,10 +1832,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(&pf->vf[i], false);
+ ice_reset_vf(vf, ICE_VF_RESET_LOCK);
}
}
}
+ mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf);
}
@@ -2253,6 +2287,43 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ swap(event->reg, pf->oicr_err_reg);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
+ if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+ /* Plug aux device per request */
+ ice_plug_aux_dev(pf);
+
+ /* Mark plugging as done but check whether unplug was
+ * requested during ice_plug_aux_dev() call
+ * (e.g. from ice_clear_rdma_cap()) and if so then
+ * plug aux device.
+ */
+ if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+ }
+
+ if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -2328,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2428,7 +2497,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
@@ -2455,6 +2524,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
+ err = ice_set_cpu_rx_rmap(vsi);
+ if (err) {
+ netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+ vsi->vsi_num, ERR_PTR(err));
+ goto free_q_irqs;
+ }
+
vsi->irqs_ready = true;
return 0;
@@ -2495,29 +2571,21 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
+ xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
+ xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
- tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ tx_desc->cmd_type_offset_bsz = 0;
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2607,6 +2675,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -2703,8 +2788,10 @@ free_qmap:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
- if (vsi->xdp_rings[i]->desc)
+ if (vsi->xdp_rings[i]->desc) {
+ synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -2809,10 +2896,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -2998,7 +3093,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- ice_ptp_process_ts(pf);
+ if (!hw->reset_ongoing)
+ ret = IRQ_WAKE_THREAD;
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3015,17 +3111,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
- struct iidc_event *event;
-
+ pf->oicr_err_reg |= oicr;
+ set_bit(ICE_AUX_ERR_PENDING, pf->state);
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
- /* report the entire OICR value to AUX driver */
- event->reg = oicr;
- ice_send_event_to_aux(pf, event);
- kfree(event);
- }
}
/* Report any remaining unexpected interrupts */
@@ -3041,7 +3129,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ice_service_task_schedule(pf);
}
}
- ret = IRQ_HANDLED;
+ if (!ret)
+ ret = IRQ_HANDLED;
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -3050,6 +3139,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
+ * ice_misc_intr_thread_fn - misc interrupt thread function
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+{
+ irqreturn_t ret = IRQ_HANDLED;
+ struct ice_pf *pf = data;
+ bool irq_handled;
+
+ irq_handled = ice_ptp_process_ts(pf);
+ if (!irq_handled)
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
*/
@@ -3161,10 +3268,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, 0, pf->int_name, pf);
+ err = devm_request_threaded_irq(dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, ice_misc_intr_thread_fn,
+ 0, pf->int_name, pf);
if (err) {
- dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -3201,7 +3310,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll, NAPI_POLL_WEIGHT);
+ ice_napi_poll);
}
/**
@@ -3230,6 +3339,7 @@ static void ice_set_ops(struct net_device *netdev)
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
@@ -3256,6 +3366,10 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
+ if (is_dvm_ena)
+ vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
@@ -3275,18 +3389,35 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
+
+ /* advertise support but don't enable by default since only one type of
+ * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
+ * type turns on the other has to be turned off. This is enforced by the
+ * ice_fix_features() ndo callback.
+ */
+ if (is_dvm_ena)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX;
+
+ /* Leave CRC / FCS stripping enabled by default, but allow the value to
+ * be changed at runtime
+ */
+ netdev->hw_features |= NETIF_F_RXFCS;
}
/**
@@ -3361,14 +3492,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
}
/**
@@ -3382,7 +3513,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
}
/**
@@ -3396,42 +3527,68 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
- /* Enable VLAN pruning when a VLAN other than 0 is added */
- if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ /* Add multicast promisc rule for the VLAN ID to be added if
+ * all-multicast is currently enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ vid);
if (ret)
- return ret;
+ goto finish;
}
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!ret)
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->add_vlan(vsi, &vlan);
+ if (ret)
+ goto finish;
+
+ /* If all-multicast is currently enabled and this VLAN ID is only one
+ * besides VLAN-0 we have to update look-up type of multicast promisc
+ * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
+ */
+ if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
+ ice_vsi_num_non_zero_vlans(vsi) == 1) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
+ }
+
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return ret;
}
@@ -3439,35 +3596,69 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
- /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Make sure VLAN delete is successful before updating VLAN
* information
*/
- ret = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
- return ret;
+ goto finish;
+
+ /* Remove multicast promisc rule for the removed VLAN ID if
+ * all-multicast is enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI)
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ /* Update look-up type of multicast promisc rule for VLAN 0
+ * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
+ * all-multicast is enabled and VLAN 0 is the only VLAN rule.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ }
+ }
- /* Disable pruning when VLAN 0 is the only VLAN rule */
- if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false);
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
@@ -3537,12 +3728,17 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
+ bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (status)
+ return -EIO;
+
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi)
return -ENOMEM;
@@ -3572,20 +3768,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- status = ice_set_cpu_rx_rmap(vsi);
- if (status) {
- dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
- vsi->vsi_num, status);
- goto unroll_napi_add;
- }
status = ice_init_mac_fltr(pf);
if (status)
- goto free_cpu_rx_map;
+ goto unroll_napi_add;
return 0;
-free_cpu_rx_map:
- ice_free_cpu_rx_rmap(vsi);
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
@@ -3650,9 +3838,11 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
+ mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
@@ -3677,19 +3867,16 @@ static void ice_set_pf_caps(struct ice_pf *pf)
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
- ICE_MAX_VF_COUNT);
+ pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
+ ICE_MAX_SRIOV_VFS);
}
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
@@ -3730,6 +3917,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -3750,96 +3938,146 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+
return 0;
}
/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int num_cpus, v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf);
- int needed, err, i;
+ int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */
- needed = ICE_MIN_LAN_OICR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
- /* reserve for flow director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
- needed = ICE_FDIR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
- }
-
- /* reserve for switchdev */
- needed = ICE_ESWITCH_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
-
- /* total used for non-traffic vectors */
- v_other = v_budget;
-
- /* reserve vectors for LAN traffic */
- needed = num_cpus;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
- needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_rdma_msix = needed;
- v_budget += needed;
- v_left -= needed;
- }
-
- pf->msix_entries = devm_kcalloc(dev, v_budget,
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
+ if (ice_is_rdma_ena(pf)) {
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
+ }
+
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
- for (i = 0; i < v_budget; i++)
+ for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_budget);
+ ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
- if (v_actual < v_budget) {
+ if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_budget, v_actual);
+ v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
@@ -3848,43 +4086,16 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err;
} else {
int v_remain = v_actual - v_other;
- int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
- /* Need at least 1 interrupt in addition to
- * AEQ MSIX
- */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
- v_min_rdma = ICE_MIN_RDMA_MSIX;
- }
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX ||
- v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
- dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining
- * vectors to LAN MSIX
- */
- pf->num_rdma_msix = v_min_rdma;
- pf->num_lan_msix = v_remain - v_min_rdma;
- } else {
- /* Split remaining MSIX with RDMA after
- * accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
+ ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
@@ -3894,12 +4105,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err:
devm_kfree(dev, pf->msix_entries);
- goto exit_err;
-no_hw_vecs_left_err:
- dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
- needed, v_left);
- err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
@@ -4064,8 +4270,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
- ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -4074,7 +4280,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
}
kfree(ctxt);
@@ -4393,6 +4599,10 @@ static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create;
+
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4400,17 +4610,13 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
-err_devlink_create:
- unregister_netdev(vsi->netdev);
- clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4459,8 +4665,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
@@ -4683,6 +4887,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4712,7 +4919,7 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
+ if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
@@ -4854,14 +5061,16 @@ static void ice_remove(struct pci_dev *pdev)
ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5043,7 +5252,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
- ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -5173,12 +5381,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_aer_clear_nonfatal_status(pdev);
- if (err)
- dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
- err);
- /* non-fatal, continue */
-
return result;
}
@@ -5273,6 +5475,7 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
/* required last entry */
{ 0, }
};
@@ -5401,16 +5604,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Add filter for new MAC. If filter exists, return success */
err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (err == -EEXIST)
+ if (err == -EEXIST) {
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (err)
+
+ return 0;
+ } else if (err) {
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
+ }
err_update_filters:
if (err) {
@@ -5547,11 +5753,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+ __always_unused u16 vid, struct netlink_ext_ack *extack)
{
int err;
@@ -5570,6 +5777,251 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/**
+ * ice_fix_features - fix the netdev features flags based on device limitations
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Make sure any fixups are made to features in this callback. This enables the
+ * driver to not have to check unsupported configurations throughout the driver
+ * because that's the responsiblity of this callback.
+ *
+ * Single VLAN Mode (SVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * Double VLAN Mode (DVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * NETIF_F_HW_VLAN_STAG_FILTER
+ * NETIF_HW_VLAN_STAG_RX
+ * NETIF_HW_VLAN_STAG_TX
+ *
+ * Features that need fixing:
+ * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
+ * These are mutually exlusive as the VSI context cannot support multiple
+ * VLAN ethertypes simultaneously for stripping and/or insertion. If this
+ * is not done, then default to clearing the requested STAG offload
+ * settings.
+ *
+ * All supported filtering has to be enabled or disabled together. For
+ * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
+ * together. If this is not done, then default to VLAN filtering disabled.
+ * These are mutually exclusive as there is currently no way to
+ * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
+ * prune rules.
+ */
+static netdev_features_t
+ice_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ netdev_features_t req_vlan_fltr, cur_vlan_fltr;
+ bool cur_ctag, cur_stag, req_ctag, req_stag;
+
+ cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
+ req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (req_vlan_fltr != cur_vlan_fltr) {
+ if (ice_is_dvm_ena(&np->vsi->back->hw)) {
+ if (req_ctag && req_stag) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ } else if (!req_ctag && !req_stag) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ } else if ((!cur_ctag && req_ctag && !cur_stag) ||
+ (!cur_stag && req_stag && !cur_ctag)) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
+ } else if ((cur_ctag && !req_ctag && cur_stag) ||
+ (cur_stag && !req_stag && cur_ctag)) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
+ }
+ } else {
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
+ netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
+
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ }
+
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
+ netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ if (!(netdev->features & NETIF_F_RXFCS) &&
+ (features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) &&
+ !ice_vsi_has_non_zero_vlans(np->vsi)) {
+ netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ }
+
+ return features;
+}
+
+/**
+ * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN offload settings
+ *
+ * First, determine the vlan_ethertype based on the VLAN offload bits in
+ * features. Then determine if stripping and insertion should be enabled or
+ * disabled. Finally enable or disable VLAN stripping and insertion.
+ */
+static int
+ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int strip_err = 0, insert_err = 0;
+ u16 vlan_ethertype = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (enable_stripping)
+ strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
+ else
+ strip_err = vlan_ops->dis_stripping(vsi);
+
+ if (enable_insertion)
+ insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
+ else
+ insert_err = vlan_ops->dis_insertion(vsi);
+
+ if (strip_err || insert_err)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN filtering settings
+ *
+ * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
+ * features.
+ */
+static int
+ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ int err = 0;
+
+ /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+ * if either bit is set
+ */
+ if (features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ err = vlan_ops->ena_rx_filtering(vsi);
+ else
+ err = vlan_ops->dis_rx_filtering(vsi);
+
+ return err;
+}
+
+/**
+ * ice_set_vlan_features - set VLAN settings based on suggested feature set
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ * Only update VLAN settings if the requested_vlan_features are different than
+ * the current_vlan_features.
+ */
+static int
+ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t current_vlan_features, requested_vlan_features;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
+ return -EIO;
+ }
+
+ err = ice_set_vlan_offload_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ current_vlan_features = netdev->features &
+ NETIF_VLAN_FILTERING_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_filtering_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev);
+ int ret;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
+ return ret;
+ }
+ }
+ ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ ret = ice_up(vsi);
+
+ return ret;
+}
+
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -5578,61 +6030,58 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
- if (ice_is_safe_mode(vsi->back)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
+ if (ice_is_safe_mode(pf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(pf),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ice_vsi_manage_rss_lut(vsi, true);
- else if (!(features & NETIF_F_RXHASH) &&
- netdev->features & NETIF_F_RXHASH)
- ice_vsi_manage_rss_lut(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false);
-
- if ((features & NETIF_F_NTUPLE) &&
- !(netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, true);
- ice_init_arfs(vsi);
- } else if (!(features & NETIF_F_NTUPLE) &&
- (netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, false);
- ice_clear_arfs(vsi);
+ if (changed & NETIF_F_RXHASH)
+ ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
+
+ ret = ice_set_vlan_features(netdev, features);
+ if (ret)
+ return ret;
+
+ /* Turn on receive of FCS aka CRC, and after setting this
+ * flag the packet data will have the 4 byte CRC appended
+ */
+ if (changed & NETIF_F_RXFCS) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
+ return -EIO;
+ }
+
+ ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
+ ret = ice_down_up(vsi);
+ if (ret)
+ return ret;
+ }
+
+ if (changed & NETIF_F_NTUPLE) {
+ bool ena = !!(features & NETIF_F_NTUPLE);
+
+ ice_vsi_manage_fdir(vsi, ena);
+ ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
}
/* don't turn off hw_tc_offload when ADQ is already enabled */
@@ -5641,29 +6090,36 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return -EACCES;
}
- if ((features & NETIF_F_HW_TC) &&
- !(netdev->features & NETIF_F_HW_TC))
- set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- else
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ if (changed & NETIF_F_HW_TC) {
+ bool ena = !!(features & NETIF_F_HW_TC);
+
+ ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
return ret;
}
/**
- * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
- int ret = 0;
+ int err;
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
- ret = ice_vsi_manage_vlan_insertion(vsi);
+ err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- return ret;
+ err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
+
+ return ice_vsi_add_vlan_zero(vsi);
}
/**
@@ -5679,10 +6135,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_vsi_vlan_setup(vsi);
+ if (vsi->type != ICE_VSI_LB) {
+ err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -5871,9 +6329,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
-
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;
@@ -5904,9 +6363,9 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
- u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{
unsigned int start;
@@ -5936,8 +6395,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (ring)
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ if (!ring)
+ continue;
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -6264,11 +6724,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ vlan_err = ice_vsi_del_vlan_zero(vsi);
if (!ice_is_e810(&vsi->back->hw))
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
@@ -6297,20 +6758,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6320,6 +6774,31 @@ int ice_down(struct ice_vsi *vsi)
}
/**
+ * ice_down_up - shutdown the VSI connection and bring it up
+ * @vsi: the VSI to be reconnected
+ */
+int ice_down_up(struct ice_vsi *vsi)
+{
+ int ret;
+
+ /* if DOWN already set, nothing to do */
+ if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ return 0;
+
+ ret = ice_down(vsi);
+ if (ret)
+ return ret;
+
+ ret = ice_up(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
*
@@ -6472,6 +6951,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -6620,6 +7101,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ bool dvm;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6627,12 +7109,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
+#define ICE_EMP_RESET_SLEEP_MS 5000
if (reset_type == ICE_RESET_EMPR) {
/* If an EMP reset has occurred, any previously pending flash
* update will have completed. We no longer know whether or
* not the NVM update EMP reset is restricted.
*/
pf->fw_emp_reset_disabled = false;
+
+ msleep(ICE_EMP_RESET_SLEEP_MS);
}
err = ice_init_all_ctrlq(hw);
@@ -6657,12 +7142,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
- if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
- /* clear the default VSI configuration if it exists */
- pf->first_sw->dflt_vsi = NULL;
- pf->first_sw->dflt_vsi_ena = false;
-
ice_clear_pxe_mode(hw);
err = ice_init_nvm(hw);
@@ -6683,6 +7162,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ dvm = ice_is_dvm_ena(hw);
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_init_ctrlq;
+
err = ice_sched_init_port(hw->port_info);
if (err)
goto err_sched_init_port;
@@ -6719,6 +7204,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -6817,7 +7305,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct iidc_event *event;
u8 count = 0;
int err = 0;
@@ -6852,14 +7339,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
@@ -6867,21 +7346,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- goto event_after;
+ return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- goto event_after;
+ return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
return err;
}
@@ -8509,6 +8985,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
@@ -8525,6 +9011,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +9024,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -8582,6 +9077,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_start_xmit = ice_start_xmit,
.ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
+ .ndo_fix_features = ice_fix_features,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -8612,4 +9108,5 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_get_devlink_port = ice_get_devlink_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 4eb0599714f4..c262dc886e6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
orom_data, hw->flash.banks.orom_size);
if (status) {
+ vfree(orom_data);
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
}
@@ -1113,14 +1114,18 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*
- * cmd_flags controls which banks to activate, and the preservation level to
- * use when activating the NVM bank.
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
@@ -1129,7 +1134,8 @@ int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!err && response_flags)
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 856d1ad4398b..774c2317967d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -34,7 +34,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
int ice_nvm_validate_checksum(struct ice_hw *hw);
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
int ice_aq_nvm_update_empr(struct ice_hw *hw);
int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index f57c414bc0a9..82bc54fec7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -5,10 +5,18 @@
#define _ICE_OSDEP_H_
#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
+#include <net/udp_tunnel.h>
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
@@ -24,8 +32,8 @@ struct ice_dma_mem {
size_t size;
};
-#define ice_hw_to_dev(ptr) \
- (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+struct ice_hw;
+struct device *ice_hw_to_dev(struct ice_hw *hw);
#ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..976a03d3bdd5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_pf_vsi_vlan_ops.h"
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..6741ec8c5f6b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_PF_VSI_VLAN_OPS_H_
+#define _ICE_PF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index dc1b0e9e6df5..02a4e1cf624e 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -29,6 +29,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
+ ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@@ -40,6 +41,12 @@ enum ice_protocol_type {
ICE_VXLAN,
ICE_GENEVE,
ICE_NVGRE,
+ ICE_GTP,
+ ICE_GTP_NO_PAY,
+ ICE_PPPOE,
+ ICE_L2TPV3,
+ ICE_VLAN_EX,
+ ICE_VLAN_IN,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -47,9 +54,12 @@ enum ice_protocol_type {
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
+ ICE_SW_TUN_GTPU,
+ ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -91,6 +101,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@@ -100,15 +111,22 @@ enum ice_prot_id {
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
+#define ICE_PPPOE_HW 103
+#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
+
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_VLAN_FLAG_MDID 20
+#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+
#define ICE_TUN_FLAG_FV_IND 2
/* Mapping of software defined protocol ID to hardware defined protocol ID */
@@ -179,6 +197,33 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */
};
+struct ice_udp_gtp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext;
+ u8 rsvrd_ext_len;
+ u8 pdu_type;
+ u8 qfi;
+ u8 rsvrd;
+};
+
+struct ice_pppoe_hdr {
+ u8 rsrvd_ver_type;
+ u8 rsrvd_code;
+ __be16 session_id;
+ __be16 length;
+ __be16 ppp_prot_id; /* control and data only */
+};
+
+struct ice_l2tpv3_sess_hdr {
+ __be32 session_id;
+ __be64 cookie;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -195,6 +240,9 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
+ struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pppoe_hdr pppoe_hdr;
+ struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ae291d442539..011b727ab190 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_trace.h"
#define E810_OUT_PROP_DELAY_NS 1
@@ -490,46 +491,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
}
/**
- * ice_ptp_update_cached_phctime - Update the cached PHC time values
- * @pf: Board specific private structure
- *
- * This function updates the system time values which are cached in the PF
- * structure and the Rx rings.
- *
- * This function must be called periodically to ensure that the cached value
- * is never more than 2 seconds old. It must also be called whenever the PHC
- * time has been changed.
- */
-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
-{
- u64 systime;
- int i;
-
- /* Read the current PHC time */
- systime = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* Update the cached PHC time stored in the PF structure */
- WRITE_ONCE(pf->ptp.cached_phc_time, systime);
-
- ice_for_each_vsi(pf, i) {
- struct ice_vsi *vsi = pf->vsi[i];
- int j;
-
- if (!vsi)
- continue;
-
- if (vsi->type != ICE_VSI_PF)
- continue;
-
- ice_for_each_rxq(vsi, j) {
- if (!vsi->rx_rings[j])
- continue;
- WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
- }
- }
-}
-
-/**
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
* @cached_phc_time: recently cached copy of PHC time
* @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
@@ -625,12 +586,400 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
+ unsigned long discard_time;
+
+ /* Discard the hardware timestamp if the cached PHC time is too old */
+ discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (time_is_before_jiffies(discard_time)) {
+ pf->ptp.tx_hwtstamp_discarded++;
+ return 0;
+ }
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * @tx: the PTP Tx timestamp tracker
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, return true. This may cause us
+ * effectively poll even when not strictly necessary. We do this because it's
+ * possible a new timestamp was requested around the same time as the interrupt.
+ * In some cases hardware might not interrupt us again when the timestamp is
+ * captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the task. If a Tx thread starts
+ * a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ struct ice_ptp_port *ptp_port;
+ bool ts_handled = true;
+ struct ice_pf *pf;
+ u8 idx;
+
+ if (!tx->init)
+ return false;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ continue;
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ ts_handled = false;
+ spin_unlock(&tx->lock);
+
+ return ts_handled;
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ spin_lock(&tx->lock);
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ pf->ptp.tx_hwtstamp_flushed++;
+ }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ bitmap_free(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @pf: pointer to the PF struct
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ struct ice_hw *hw = &pf->hw;
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+ u64 raw_tstamp;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Count the number of Tx timestamps which have timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Note that the cached copy in the PF PTP structure is always updated, even
+ * if we can't update the copy in the Rx rings.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ unsigned long update_before;
+ u64 systime;
+ int i;
+
+ update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (pf->ptp.cached_phc_time &&
+ time_is_before_jiffies(update_before)) {
+ unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
+
+ dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
+ jiffies_to_msecs(time_taken));
+ pf->ptp.late_cached_phc_updates++;
+ }
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+ WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
+
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
+ * @pf: Board specific private structure
+ *
+ * This function must be called when the cached PHC time is no longer valid,
+ * such as after a time adjustment. It discards any outstanding Tx timestamps,
+ * and updates the cached PHC time for both the PF and Rx rings. If updating
+ * the PHC time cannot be done immediately, a warning message is logged and
+ * the work item is scheduled.
+ *
+ * These steps are required in order to ensure that we do not accidentally
+ * report a timestamp extended by the wrong PHC cached copy. Note that we
+ * do not directly update the cached timestamp here because it is possible
+ * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
+ * would have to try again. During that time window, timestamps might be
+ * requested and returned with an invalid extension. Thus, on failure to
+ * immediately update the cached PHC time we would need to zero the value
+ * anyways. For this reason, we just zero the value immediately and queue the
+ * update work item.
+ */
+static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Update the cached PHC time immediately if possible, otherwise
+ * schedule the work item to execute soon.
+ */
+ err = ice_ptp_update_cached_phctime(pf);
+ if (err) {
+ /* If another thread is updating the Rx rings, we won't
+ * properly reset them here. This could lead to reporting of
+ * invalid timestamps, but there isn't much we can do.
+ */
+ dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
+ __func__);
+
+ /* Queue the work item to update the Rx rings when possible */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
+ msecs_to_jiffies(10));
+ }
+
+ /* Flush any outstanding Tx timestamps */
+ ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
+}
+
+/**
* ice_ptp_read_time - Read the time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
@@ -889,6 +1238,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
+ if (ice_is_reset_in_progress(pf->state))
+ return;
+
if (ice_ptp_check_offset_valid(port)) {
/* Offsets not ready yet, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1091,9 +1443,8 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- u64 freq, divisor = 1000000ULL;
struct ice_hw *hw = &pf->hw;
- s64 incval, diff;
+ u64 incval, diff;
int neg_adj = 0;
int err;
@@ -1104,17 +1455,8 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
- /* handle overflow by scaling down the scaled_ppm and
- * the divisor, losing some precision
- */
- scaled_ppm >>= 2;
- divisor >>= 2;
- }
-
- freq = (incval * (u64)scaled_ppm) >> 16;
- diff = div_u64(freq, divisor);
-
+ diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incval -= diff;
else
@@ -1508,7 +1850,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_unlock(hw);
if (!err)
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
@@ -1533,9 +1875,12 @@ exit:
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
+ int ret;
then = ns_to_timespec64(delta);
- ice_ptp_gettimex64(info, &now, NULL);
+ ret = ice_ptp_gettimex64(info, &now, NULL);
+ if (ret)
+ return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
@@ -1584,7 +1929,7 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return err;
}
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
return 0;
}
@@ -1792,26 +2137,31 @@ void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
+ struct skb_shared_hwtstamps *hwtstamps;
+ u64 ts_ns, cached_time;
u32 ts_high;
- u64 ts_ns;
- /* Populate timesync data into skb */
- if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
- struct skb_shared_hwtstamps *hwtstamps;
+ if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
+ return;
- /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
- * cached PHC value, rather than accessing the PF. This also
- * allows us to simply pass the upper 32bits of nanoseconds
- * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
- * it would just discard these bits itself.
- */
- ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
- ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+ cached_time = READ_ONCE(rx_ring->cached_phctime);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
- }
+ /* Do not report a timestamp if we don't have a cached PHC time */
+ if (!cached_time)
+ return;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
+ * PHC value, rather than accessing the PF. This also allows us to
+ * simply pass the upper 32bits of nanoseconds directly. Calling
+ * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
+ * bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
}
/**
@@ -1867,41 +2217,26 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
static void
-ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- /* Check if SMA controller is in the netlist */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
- !ice_is_pca9575_present(&pf->hw))
- ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
-
- if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
- info->n_per_out = N_PER_OUT_E810T_NO_SMA;
- return;
- }
+ info->n_per_out = N_PER_OUT_E810;
- info->n_per_out = N_PER_OUT_E810T;
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ info->n_ext_ts = N_EXT_TS_E810;
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
-}
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
-/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
- * @info: PTP clock capabilities
- */
-static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
-{
- info->n_per_out = N_PER_OUT_E810;
- info->n_ext_ts = N_EXT_TS_E810;
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
}
/**
@@ -1938,11 +2273,7 @@ static void
ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
-
- if (ice_is_e810t(&pf->hw))
- ice_ptp_setup_pins_e810t(pf, info);
- else
- ice_ptp_setup_pins_e810(info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
@@ -2004,106 +2335,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
}
/**
- * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
- * @work: pointer to the kthread_work struct
- *
- * Process timestamps captured by the PHY associated with this port. To do
- * this, loop over each index with a waiting skb.
- *
- * If a given index has a valid timestamp, perform the following steps:
- *
- * 1) copy the timestamp out of the PHY register
- * 4) clear the timestamp valid bit in the PHY register
- * 5) unlock the index by clearing the associated in_use bit.
- * 2) extend the 40b timestamp value to get a 64bit timestamp
- * 3) send that timestamp to the stack
- *
- * After looping, if we still have waiting SKBs, then re-queue the work. This
- * may cause us effectively poll even when not strictly necessary. We do this
- * because it's possible a new timestamp was requested around the same time as
- * the interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
- * Note that we only take the tracking lock when clearing the bit and when
- * checking if we need to re-queue this task. The only place where bits can be
- * set is the hard xmit routine where an SKB has a request flag set. The only
- * places where we clear bits are this work function, or the periodic cleanup
- * thread. If the cleanup thread clears a bit we're processing we catch it
- * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
- * starts a new timestamp, we might not begin processing it right away but we
- * will notice it at the end when we re-queue the work item. If a Tx thread
- * starts a new timestamp just after this function exits without re-queuing,
- * the interrupt when the timestamp finishes should trigger. Avoiding holding
- * the lock for the entire function is important in order to ensure that Tx
- * threads do not get blocked while waiting for the lock.
- */
-static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
-{
- struct ice_ptp_port *ptp_port;
- struct ice_ptp_tx *tx;
- struct ice_pf *pf;
- struct ice_hw *hw;
- u8 idx;
-
- tx = container_of(work, struct ice_ptp_tx, work);
- if (!tx->init)
- return;
-
- ptp_port = container_of(tx, struct ice_ptp_port, tx);
- pf = ptp_port_to_pf(ptp_port);
- hw = &pf->hw;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct skb_shared_hwtstamps shhwtstamps = {};
- u8 phy_idx = idx + tx->quad_offset;
- u64 raw_tstamp, tstamp;
- struct sk_buff *skb;
- int err;
-
- err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
- &raw_tstamp);
- if (err)
- continue;
-
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
- continue;
-
- /* The timestamp is valid, so we'll go ahead and clear this
- * index and then send the timestamp up to the stack.
- */
- spin_lock(&tx->lock);
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
- clear_bit(idx, tx->in_use);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- spin_unlock(&tx->lock);
-
- /* it's (unlikely but) possible we raced with the cleanup
- * thread for discarding old timestamp requests.
- */
- if (!skb)
- continue;
-
- /* Extend the timestamp using cached PHC time */
- tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
- shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
-
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
- }
-
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
- spin_lock(&tx->lock);
- if (!bitmap_empty(tx->in_use, tx->len))
- kthread_queue_work(pf->ptp.kworker, &tx->work);
- spin_unlock(&tx->lock);
-}
-
-/**
* ice_ptp_request_ts - Request an available Tx timestamp index
* @tx: the PTP Tx timestamp tracker to request from
* @skb: the SKB to associate with this timestamp request
@@ -2128,6 +2359,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
tx->tstamps[idx].start = jiffies;
tx->tstamps[idx].skb = skb_get(skb);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ ice_trace(tx_tstamp_request, skb, idx);
}
spin_unlock(&tx->lock);
@@ -2142,188 +2374,35 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
}
/**
- * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Queue work required to process the PTP Tx timestamps outside of interrupt
- * context.
+ * Returns true if timestamps are processed.
*/
-void ice_ptp_process_ts(struct ice_pf *pf)
+bool ice_ptp_process_ts(struct ice_pf *pf)
{
if (pf->ptp.port.tx.init)
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
-}
-
-/**
- * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
- * @tx: Tx tracking structure to initialize
- *
- * Assumes that the length has already been initialized. Do not call directly,
- * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
- */
-static int
-ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
-{
- tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
- if (!tx->tstamps)
- return -ENOMEM;
-
- tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
- if (!tx->in_use) {
- kfree(tx->tstamps);
- tx->tstamps = NULL;
- return -ENOMEM;
- }
-
- spin_lock_init(&tx->lock);
- kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
-
- tx->init = 1;
-
- return 0;
-}
-
-/**
- * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
- * @pf: Board private structure
- * @tx: the tracker to flush
- */
-static void
-ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- u8 idx;
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
- for (idx = 0; idx < tx->len; idx++) {
- u8 phy_idx = idx + tx->quad_offset;
-
- spin_lock(&tx->lock);
- if (tx->tstamps[idx].skb) {
- dev_kfree_skb_any(tx->tstamps[idx].skb);
- tx->tstamps[idx].skb = NULL;
- }
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
- }
-}
-
-/**
- * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
- * @pf: Board private structure
- * @tx: Tx tracking structure to release
- *
- * Free memory associated with the Tx timestamp tracker.
- */
-static void
-ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->init = 0;
-
- kthread_cancel_work_sync(&tx->work);
-
- ice_ptp_flush_tx_tracker(pf, tx);
-
- kfree(tx->tstamps);
- tx->tstamps = NULL;
-
- bitmap_free(tx->in_use);
- tx->in_use = NULL;
-
- tx->len = 0;
-}
-
-/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
- * the timestamp block is shared for all ports in the same quad. To avoid
- * ports using the same timestamp index, logically break the block of
- * registers into chunks based on the port number.
- */
-static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
-{
- tx->quad = port / ICE_PORTS_PER_QUAD;
- tx->quad_offset = tx->quad * INDEX_PER_PORT;
- tx->len = INDEX_PER_PORT;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- *
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
- */
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->quad = pf->hw.port_info->lport;
- tx->quad_offset = 0;
- tx->len = INDEX_PER_QUAD;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
- * @tx: PTP Tx tracker to clean up
- *
- * Loop through the Tx timestamp requests and see if any of them have been
- * waiting for a long time. Discard any SKBs that have been waiting for more
- * than 2 seconds. This is long enough to be reasonably sure that the
- * timestamp will never be captured. This might happen if the packet gets
- * discarded before it reaches the PHY timestamping block.
- */
-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- if (!tx->init)
- return;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct sk_buff *skb;
-
- /* Check if this SKB has been waiting for too long */
- if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
- continue;
-
- spin_lock(&tx->lock);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Free the SKB after we've cleared the bit */
- dev_kfree_skb_any(skb);
- }
+ return false;
}
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
- ice_ptp_update_cached_phctime(pf);
+ err = ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
- /* Run twice a second */
+ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
- msecs_to_jiffies(500));
+ msecs_to_jiffies(err ? 10 : 500));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index afd048d69959..028349295b71 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -49,6 +49,37 @@ struct ice_perout_channel {
* To allow multiple ports to access the shared register block independently,
* the blocks are split up so that indexes are assigned to each port based on
* hardware logical port number.
+ *
+ * The timestamp blocks are handled differently for E810- and E822-based
+ * devices. In E810 devices, each port has its own block of timestamps, while in
+ * E822 there is a need to logically break the block of registers into smaller
+ * chunks based on the port number to avoid collisions.
+ *
+ * Example for port 5 in E810:
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * |register|register|register|register|register|register|register|register|
+ * | block | block | block | block | block | block | block | block |
+ * | for | for | for | for | for | for | for | for |
+ * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * ^^
+ * ||
+ * |--- quad offset is always 0
+ * ---- quad number
+ *
+ * Example for port 5 in E822:
+ * +-----------------------------+-----------------------------+
+ * | register block for quad 0 | register block for quad 1 |
+ * |+------+------+------+------+|+------+------+------+------+|
+ * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
+ * |+------+------+------+------+|+------+------+------+------+|
+ * +-----------------------------+-------^---------------------+
+ * ^ |
+ * | --- quad offset*
+ * ---- quad number
+ *
+ * * PHY port 5 is port 1 in quad 1
+ *
*/
/**
@@ -74,7 +105,6 @@ struct ice_tx_tstamp {
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
- * @work: work function to handle processing of Tx timestamps
* @lock: lock to prevent concurrent write to in_use bitmap
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
@@ -86,7 +116,6 @@ struct ice_tx_tstamp {
* window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
- struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
@@ -132,6 +161,7 @@ struct ice_ptp_port {
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
@@ -140,12 +170,19 @@ struct ice_ptp_port {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
+ * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
+ * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
+ * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
+ * being too old to correctly extend timestamp
+ * @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
u8 ext_ts_irq;
struct kthread_worker *kworker;
@@ -154,6 +191,11 @@ struct ice_ptp {
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
u64 reset_time;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_flushed;
+ u32 tx_hwtstamp_discarded;
+ u32 late_cached_phc_updates;
};
#define __ptp_port_to_ptp(p) \
@@ -193,8 +235,8 @@ struct ice_ptp {
#define N_EXT_TS_E810 3
#define N_PER_OUT_E810 4
#define N_PER_OUT_E810T 3
-#define N_PER_OUT_E810T_NO_SMA 2
-#define N_EXT_TS_E810_NO_SMA 2
+#define N_PER_OUT_NO_SMA_E810T 2
+#define N_EXT_TS_NO_SMA_E810T 2
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
@@ -205,7 +247,7 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-void ice_ptp_process_ts(struct ice_pf *pf);
+bool ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
@@ -238,7 +280,10 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
-static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline bool ice_ptp_process_ts(struct ice_pf *pf)
+{
+ return true;
+}
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec8450f034e6..772b1f566d6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
+#include <linux/delay.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -2587,38 +2588,113 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
}
/**
- * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static int
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u32 val;
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
+ wr32(hw, PF_SB_ATQBAL, val);
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(FIELD_GET(TS_LL_READ_TS, val))) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return -EINVAL;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
* @hw: pointer to the HW struct
* @lport: the lport to read from
* @idx: the timestamp index to read
- * @tstamp: on return, the 40bit timestamp value
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
*
- * Read a 40bit timestamp value out of the timestamp block of the external PHY
- * on the E810 device.
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
*/
static int
-ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
{
- u32 lo_addr, hi_addr, lo, hi;
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
int err;
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
-
- err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
return err;
}
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static int
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ u32 lo = 0;
+ u8 hi = 0;
+ int err;
+
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
+
+ if (err)
+ return err;
+
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
@@ -3251,6 +3327,37 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_is_pca9575_present
* @hw: pointer to the hw struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 519e75462e67..2bda64c76abc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -191,6 +191,7 @@ int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -401,6 +402,7 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
@@ -412,6 +414,12 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
+#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS BIT(31)
+
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
@@ -443,4 +451,10 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define ICE_SMA_MAX_BIT_E810T 7
#define ICE_PCA9575_P1_OFFSET 8
+/* E810T PCA9575 IO controller registers */
+#define ICE_PCA9575_P0_IN 0x0
+
+/* E810T PCA9575 IO controller pin control */
+#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index dcc310e29300..bd31748aae1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "ice_devlink.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
#include "ice_tc_lib.h"
/**
@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev)
return &repr->vf->devlink_port;
}
+/**
+ * ice_repr_sp_stats64 - get slow path stats for port representor
+ * @dev: network interface device structure
+ * @stats: netlink stats structure
+ *
+ * RX/TX stats are being swapped here to be consistent with VF stats. In slow
+ * path, port representor receives data when the corresponding VF is sending it
+ * (and vice versa), TX and RX bytes/packets are effectively swapped on port
+ * representor.
+ */
+static int
+ice_repr_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ int vf_id = np->repr->vf->vf_id;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ u64 pkts, bytes;
+
+ tx_ring = np->vsi->tx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+ &pkts, &bytes);
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+
+ rx_ring = np->vsi->rx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+ &pkts, &bytes);
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
+ rx_ring->rx_stats.alloc_buf_failed;
+
+ return 0;
+}
+
+static bool
+ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower)
@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
@@ -238,8 +293,13 @@ static int ice_repr_add(struct ice_vf *vf)
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
+ struct ice_vsi *vsi;
int err;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return -ENOMEM;
@@ -258,7 +318,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
- repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->src_vsi = vsi;
repr->vf = vf;
vf->repr = repr;
np = netdev_priv(repr->netdev);
@@ -278,12 +338,15 @@ static int ice_repr_add(struct ice_vf *vf)
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
err = ice_repr_reg_netdev(repr->netdev);
if (err)
goto err_netdev;
devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+ ice_virtchnl_set_repr_ops(vf);
+
return 0;
err_netdev:
@@ -311,10 +374,13 @@ err_alloc_rule:
*/
static void ice_repr_rem(struct ice_vf *vf)
{
- ice_devlink_destroy_vf_port(vf);
+ if (!vf->repr)
+ return;
+
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
+ ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
#ifdef CONFIG_ICE_SWITCHDEV
@@ -323,6 +389,23 @@ static void ice_repr_rem(struct ice_vf *vf)
#endif
kfree(vf->repr);
vf->repr = NULL;
+
+ ice_virtchnl_set_dflt_ops(vf);
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ ice_repr_rem(vf);
}
/**
@@ -331,49 +414,27 @@ static void ice_repr_rem(struct ice_vf *vf)
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
+ struct ice_vf *vf;
+ unsigned int bkt;
int err;
- int i;
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ lockdep_assert_held(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf);
if (err)
goto err;
-
- ice_vc_change_ops_to_repr(&vf->vc_ops);
}
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
+ ice_repr_rem_from_all_vfs(pf);
return err;
}
/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
-}
-
-/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 0c77ff050d15..378a45bfa256 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -5,7 +5,6 @@
#define _ICE_REPR_H_
#include <net/dst_metadata.h>
-#include "ice.h"
struct ice_repr {
struct ice_vsi *src_vsi;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7947223536e3..118595763bba 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1290,7 +1290,7 @@ err_init_port:
pi->root = NULL;
}
- devm_kfree(ice_hw_to_dev(hw), buf);
+ kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 52c6bac41bf7..3ba1408c56a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1,532 +1,1895 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
-#include "ice_common.h"
-#include "ice_sriov.h"
+#include "ice.h"
+#include "ice_vf_lib_private.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
+#include "ice_flow.h"
+#include "ice_eswitch.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
/**
- * ice_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: VF ID to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cd: pointer to command details
+ * ice_free_vf_entries - Free all VF entries from the hash table
+ * @pf: pointer to the PF structure
*
- * Send message to VF driver (0x0802) using mailbox
- * queue and asynchronously sending message via
- * ice_sq_send_cmd() function
+ * Iterate over the VF hash table, removing and releasing all VF entries.
+ * Called during VF teardown or as cleanup during failed VF initialization.
*/
-int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+static void ice_free_vf_entries(struct ice_pf *pf)
+{
+ struct ice_vfs *vfs = &pf->vfs;
+ struct hlist_node *tmp;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* Remove all VFs from the hash table and release their main
+ * reference. Once all references to the VF are dropped, ice_put_vf()
+ * will call ice_release_vf which will remove the VF memory.
+ */
+ lockdep_assert_held(&vfs->table_lock);
+
+ hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ hash_del_rcu(&vf->entry);
+ ice_put_vf(vf);
+ }
+}
+
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
{
- struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct ice_pf *pf = vf->pf;
+ int i, last_vector_idx;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ /* free VSI and disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
+ vf->num_mac = 0;
+ }
+
+ last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int first, last, v;
+ struct ice_hw *hw;
- cmd = &desc.params.virt;
- cmd->id = cpu_to_le32(vfid);
+ hw = &pf->hw;
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
- desc.cookie_high = cpu_to_le32(v_opcode);
- desc.cookie_low = cpu_to_le32(v_retval);
+ dev = ice_pf_to_dev(pf);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
- if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ first = vf->first_vector_idx;
+ last = first + pf->vfs.num_msix_per - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
- return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
/**
- * ice_conv_link_speed_to_virtchnl
- * @adv_link_support: determines the format of the returned link speed
- * @link_speed: variable containing the link_speed to be converted
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
*
- * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
- * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
- * needs to cast back to an enum virtchnl_link_speed in the case where
- * adv_link_support is false, but when adv_link_support is true the caller can
- * expect the speed in Mbps.
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
+ *
+ * Returns 0 on success, and -EINVAL on error.
*/
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
{
- u32 speed;
+ struct ice_res_tracker *res;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ while (test_and_set_bit(ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
else
- /* Virtchnl speeds are not defined for every speed supported in
- * the hardware. To maintain compatibility with older AVF
- * drivers, while reporting the speed the new speed values are
- * resolved to the closest known virtchnl speeds
- */
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ mutex_lock(&vfs->table_lock);
+
+ ice_eswitch_release(pf);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ /* disable VF qp mappings and set VF disable state */
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
+ }
+
+ if (!pci_vfs_assigned(pf->pdev)) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- return speed;
+ /* clear malicious info since the VF is getting released */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
+
+ vfs->num_qps_per = 0;
+ ice_free_vf_entries(pf);
+
+ mutex_unlock(&vfs->table_lock);
+
+ clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @vf: VF to setup VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
+}
+
+/**
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
+ *
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
+}
+
+/**
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
+ *
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
+ */
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
+{
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
+ struct ice_pf *pf = vf->pf;
+ int device_based_vf_id;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ if (WARN_ON(!vsi))
+ return;
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
+ *
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
}
-/* The mailbox overflow detection algorithm helps to check if there
- * is a possibility of a malicious VF transmitting too many MBX messages to the
- * PF.
- * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
- * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
- * The struct ice_mbx_snapshot helps to track and traverse a static window of
- * messages within the mailbox queue while looking for a malicious VF.
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
- * 2. When the caller starts processing its mailbox queue in response to an
- * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * 3. For every message read by the caller from the MBX Queue, the caller must
- * call the detection algorithm's entry function ice_mbx_vf_state_handler().
- * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
- * filled as it is required to be passed to the algorithm.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
+ * in the PF's space available for SR-IOV.
+ */
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
+ int sriov_base_vector;
+
+ sriov_base_vector = total_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector < vectors_used)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ return 0;
+}
+
+/**
+ * ice_set_per_vf_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of SR-IOV VFs being configured
*
- * 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
*
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
*
- * 7. The function ice_mbx_reset_snapshot() is called to reset the information
- * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
*
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
-/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
- * the max messages check must be ignored in the algorithm
+static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
+{
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
+ int msix_avail_per_vf, msix_avail_for_sriov;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ if (max_valid_res_idx < 0)
+ return -ENOSPC;
+
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
+ } else {
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ num_vfs);
+ return -ENOSPC;
+ }
+
+ num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_txq = 0;
+ else if (num_txq > avail_qs)
+ num_txq = rounddown_pow_of_two(avail_qs);
+
+ num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_rxq = 0;
+ else if (num_rxq > avail_qs)
+ num_rxq = rounddown_pow_of_two(avail_qs);
+
+ if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, num_vfs);
+ return -ENOSPC;
+ }
+
+ err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
+ if (err) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
+ num_vfs, err);
+ return err;
+ }
+
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
+ pf->vfs.num_msix_per = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
+
+ return 0;
+}
+
+/**
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
+ *
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
*/
-#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
+ goto release_vsi;
+ }
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
/**
- * ice_mbx_traverse - Pass through mailbox snapshot
- * @hw: pointer to the HW struct
- * @new_state: new algorithm state
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ unsigned int bkt, it_cnt;
+ struct ice_vf *vf;
+ int retval;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ it_cnt = 0;
+ ice_for_each_vf(pf, bkt, vf) {
+ vf->vf_ops->clear_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ it_cnt++;
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ ice_for_each_vf(pf, bkt, vf) {
+ if (it_cnt == 0)
+ break;
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ it_cnt--;
+ }
+
+ return retval;
+}
+
+/**
+ * ice_sriov_free_vf - Free VF memory after all references are dropped
+ * @vf: pointer to VF to free
*
- * Traversing the mailbox static snapshot without checking
- * for malicious VFs.
+ * Called by ice_put_vf through ice_release_vf once the last reference to a VF
+ * structure has been dropped.
*/
-static void
-ice_mbx_traverse(struct ice_hw *hw,
- enum ice_mbx_snapshot_state *new_state)
+static void ice_sriov_free_vf(struct ice_vf *vf)
{
- struct ice_mbx_snap_buffer_data *snap_buf;
- u32 num_iterations;
+ mutex_destroy(&vf->cfg_lock);
- snap_buf = &hw->mbx_snapshot.mbx_buf;
+ kfree_rcu(vf, rcu);
+}
- /* As mailbox buffer is circular, applying a mask
- * on the incremented iteration count.
- */
- num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
-
- /* Checking either of the below conditions to exit snapshot traversal:
- * Condition-1: If the number of iterations in the mailbox is equal to
- * the mailbox head which would indicate that we have reached the end
- * of the static snapshot.
- * Condition-2: If the maximum messages serviced in the mailbox for a
- * given interrupt is the highest possible value then there is no need
- * to check if the number of messages processed is equal to it. If not
- * check if the number of messages processed is greater than or equal
- * to the maximum number of mailbox entries serviced in current work item.
+/**
+ * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+}
+
+/**
+ * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
+ * @vf: pointer to VF structure
+ * @is_vflr: true if reset occurred due to VFLR
+ *
+ * Trigger and cleanup after a VF reset for a SR-IOV VF.
+ */
+static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* In the case of a VFLR, HW has already reset the VF and we just need
+ * to clean up. Otherwise we must first trigger the reset using the
+ * VFRTRIG register.
*/
- if (num_iterations == snap_buf->head ||
- (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
- ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
- *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ if (!is_vflr) {
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
}
/**
- * ice_mbx_detect_malvf - Detect malicious VF in snapshot
- * @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
- * @new_state: new algorithm state
- * @is_malvf: boolean output to indicate if VF is malicious
+ * ice_sriov_poll_reset_status - poll SRIOV VF reset status
+ * @vf: pointer to VF structure
*
- * This function tracks the number of asynchronous messages
- * sent per VF and marks the VF as malicious if it exceeds
- * the permissible number of messages to send.
+ * Returns true when reset is successful, else returns false
*/
-static int
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
- enum ice_mbx_snapshot_state *new_state,
- bool *is_malvf)
+static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_pf *pf = vf->pf;
+ unsigned int i;
+ u32 reg;
- if (vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M)
+ return true;
- /* increment the message count in the VF array */
- snap->mbx_vf.vf_cntr[vf_id]++;
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
+ }
+ return false;
+}
- if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
- *is_malvf = true;
+/**
+ * ice_sriov_clear_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
+ */
+static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ ice_flush(hw);
+}
+
+/**
+ * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
- /* continue to iterate through the mailbox snapshot */
- ice_mbx_traverse(hw, new_state);
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ return -ENOMEM;
+ }
return 0;
}
/**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+static const struct ice_vf_ops ice_sriov_vf_ops = {
+ .reset_type = ICE_VF_RESET,
+ .free = ice_sriov_free_vf,
+ .clear_mbx_register = ice_sriov_clear_mbx_register,
+ .trigger_reset_register = ice_sriov_trigger_reset_register,
+ .poll_reset_status = ice_sriov_poll_reset_status,
+ .clear_reset_trigger = ice_sriov_clear_reset_trigger,
+ .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
+};
+
+/**
+ * ice_create_vf_entries - Allocate and insert VF entries
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of VFs to allocate
+ *
+ * Allocate new VF entries and insert them into the hash table. Set some
+ * basic default fields for initializing the new VFs.
*
- * Reset the mailbox snapshot structure and clear VF counter array.
+ * After this function exits, the hash table will have num_vfs entries
+ * inserted.
+ *
+ * Returns 0 on success or an integer error code on failure.
*/
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
{
- u32 vfcntr_len;
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_vf *vf;
+ u16 vf_id;
+ int err;
+
+ lockdep_assert_held(&vfs->table_lock);
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf) {
+ err = -ENOMEM;
+ goto err_free_entries;
+ }
+ kref_init(&vf->refcnt);
+
+ vf->pf = pf;
+ vf->vf_id = vf_id;
+
+ /* set sriov vf ops for VFs created during SRIOV flow */
+ vf->vf_ops = &ice_sriov_vf_ops;
- if (!snap || !snap->mbx_vf.vf_cntr)
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->vfs.num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when VF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ ice_virtchnl_set_dflt_ops(vf);
+
+ mutex_init(&vf->cfg_lock);
+
+ hash_add_rcu(vfs->table, &vf->entry, vf_id);
+ }
+
+ return 0;
+
+err_free_entries:
+ ice_free_vf_entries(pf);
+ return err;
+}
+
+/**
+ * ice_ena_vfs - enable VFs so they are ready to be used
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to enable
+ */
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ set_bit(ICE_OICR_INTR_DIS, pf->state);
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
+ if (ret)
+ goto err_unroll_intr;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ ret = ice_set_per_vf_res(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
+ num_vfs, ret);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_create_vf_entries(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
+ num_vfs);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_start_vfs(pf);
+ if (ret) {
+ dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
+ ret = -EAGAIN;
+ goto err_unroll_vf_entries;
+ }
+
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret) {
+ dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
+ goto err_unroll_sriov;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ mutex_unlock(&pf->vfs.table_lock);
+
+ return 0;
+
+err_unroll_vf_entries:
+ ice_free_vf_entries(pf);
+err_unroll_sriov:
+ mutex_unlock(&pf->vfs.table_lock);
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ return ret;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Returns 0 on success and negative on failure
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return 0;
+
+ if (num_vfs > pf->vfs.num_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->vfs.num_supported);
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
+
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
+ *
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ ice_mbx_deinit_snapshot(&pf->hw);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
+ return 0;
+ }
+
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
+
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err) {
+ ice_mbx_deinit_snapshot(&pf->hw);
+ return err;
+ }
+
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
+ return num_vfs;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VFLR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u32 reg;
+
+ if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !ice_has_vfs(pf))
return;
- /* Clear VF counters. */
- vfcntr_len = snap->mbx_vf.vfcntr_len;
- if (vfcntr_len)
- memset(snap->mbx_vf.vf_cntr, 0,
- (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
- /* Reset mailbox snapshot for a new capture. */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
- * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
- * @hw: pointer to the HW struct
- * @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
- *
- * The function serves as an entry point for the malicious VF
- * detection algorithm by handling the different states and state
- * transitions of the algorithm:
- * New snapshot: This state is entered when creating a new static
- * snapshot. The data from any previous mailbox snapshot is
- * cleared and a new capture of the mailbox head and tail is
- * logged. This will be the new static snapshot to detect
- * asynchronous messages sent by VFs. On capturing the snapshot
- * and depending on whether the number of pending messages in that
- * snapshot exceed the watermark value, the state machine enters
- * traverse or detect states.
- * Traverse: If pending message count is below watermark then iterate
- * through the snapshot without any action on VF.
- * Detect: If pending message count exceeds watermark traverse
- * the static snapshot and look for a malicious VF.
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ *
+ * If this function returns non-NULL, it acquires a reference count of the VF
+ * structure. The caller is responsible for calling ice_put_vf() to drop this
+ * reference.
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ continue;
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+ ice_put_vf(vf);
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ ret = -ENODEV;
+ goto out_put_vf;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
+ if (ret)
+ dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
+ ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
+ else
+ vf->spoofchk = ena;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
*/
int
-ice_mbx_vf_state_handler(struct ice_hw *hw,
- struct ice_mbx_data *mbx_data, u16 vf_id,
- bool *is_malvf)
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- struct ice_mbx_snap_buffer_data *snap_buf;
- struct ice_ctl_q_info *cq = &hw->mailboxq;
- enum ice_mbx_snapshot_state new_state;
- int status = 0;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
- if (!is_malvf || !mbx_data)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* When entering the mailbox state machine assume that the VF
- * is not malicious until detected.
- */
- *is_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* Checking if max messages allowed to be processed while servicing current
- * interrupt is not less than the defined AVF message threshold.
- */
- if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = ice_vf_get_port_vlan_id(vf);
+ ivi->qos = ice_vf_get_port_vlan_prio(vf);
+ if (ice_vf_is_port_vlan_ena(vf))
+ ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* The watermark value should not be lesser than the threshold limit
- * set for the number of asynchronous messages a VF can send to mailbox
- * nor should it be greater than the maximum number of messages in the
- * mailbox serviced in current interrupt.
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
- mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ ether_addr_copy(vf->dev_lan_addr.addr, mac);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- snap_buf = &snap->mbx_buf;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- switch (snap_buf->state) {
- case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
- /* Clear any previously held data in mailbox snapshot structure. */
- ice_mbx_reset_snapshot(snap);
+ /* Check if already trusted */
+ if (trusted == vf->trusted) {
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Collect the pending ARQ count, number of messages processed and
- * the maximum number of messages allowed to be processed from the
- * Mailbox for current interrupt.
- */
- snap_buf->num_pending_arq = mbx_data->num_pending_arq;
- snap_buf->num_msg_proc = mbx_data->num_msg_proc;
- snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+ mutex_lock(&vf->cfg_lock);
- /* Capture a new static snapshot of the mailbox by logging the
- * head and tail of snapshot and set num_iterations to the tail
- * value to mark the start of the iteration through the snapshot.
- */
- snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
- mbx_data->num_pending_arq);
- snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
- snap_buf->num_iterations = snap_buf->tail;
-
- /* Pending ARQ messages returned by ice_clean_rq_elem
- * is the difference between the head and tail of the
- * mailbox queue. Comparing this value against the watermark
- * helps to check if we potentially have malicious VFs.
- */
- if (snap_buf->num_pending_arq >=
- mbx_data->async_watermark_val) {
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- } else {
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- }
- break;
+ vf->trusted = trusted;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
- case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- break;
+ mutex_unlock(&vf->cfg_lock);
- case ICE_MAL_VF_DETECT_STATE_DETECT:
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- break;
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
default:
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = -EIO;
+ ret = -EINVAL;
+ goto out_put_vf;
}
- snap_buf->state = new_state;
+ ice_vc_notify_vf_link_state(vf);
- return status;
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
}
/**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int rate = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ rate += vf->min_tx_rate;
+ rcu_read_unlock();
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
*
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int all_vfs_min_tx_rate;
+ int link_speed_mbps;
+
+ if (WARN_ON(!vsi))
+ return false;
+
+ link_speed_mbps = ice_get_link_speed_mbps(vsi);
+ all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
*/
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf)
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- if (!all_malvfs || !report_malvf)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- *report_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ ret = -EOPNOTSUPP;
+ goto out_put_vf;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret;
- if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- if (vf_id >= bitmap_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* If the vf_id is found in the bitmap set bit and boolean to true */
- if (!test_and_set_bit(vf_id, all_malvfs))
- *report_malvf = true;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
- return 0;
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
+ * @hw: hardware structure used to check the VLAN mode
+ * @vlan_proto: VLAN TPID being checked
+ *
+ * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
+ * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
+ * Mode (SVM), then only ETH_P_8021Q is supported.
+ */
+static bool
+ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
+{
+ bool is_supported = false;
+
+ switch (vlan_proto) {
+ case ETH_P_8021Q:
+ is_supported = true;
+ break;
+ case ETH_P_8021AD:
+ if (ice_is_dvm_ena(hw))
+ is_supported = true;
+ break;
+ }
+
+ return is_supported;
}
/**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN ID being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
*
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * program VF Port VLAN ID and/or QoS
*/
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id)
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
- if (!snap || !all_malvfs)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u16 local_vlan_proto = ntohs(vlan_proto);
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
+ }
- if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
+ dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
+ local_vlan_proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* Ensure VF ID value is not larger than bitmap or VF counter length */
- if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
- clear_bit(vf_id, all_malvfs);
+ if (ice_vf_get_port_vlan_prio(vf) == qos &&
+ ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
+ ice_vf_get_port_vlan_id(vf) == vlan_id) {
+ /* duplicate request, so just return success */
+ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
+ vlan_id, qos, local_vlan_proto);
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
- * This is to ensure that if a VF is unloaded and a new one brought back
- * up with the same VF ID for a snapshot currently in traversal or detect
- * state the counter for that VF ID does not increment on top of existing
- * values in the mailbox overflow detection algorithm.
- */
- snap->mbx_vf.vf_cntr[vf_id] = 0;
+ mutex_lock(&vf->cfg_lock);
- return 0;
+ vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
+ if (ice_vf_is_port_vlan_ena(vf))
+ dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
+ vlan_id, qos, local_vlan_proto, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
}
/**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
- * @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
- *
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
*
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
*/
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
- /* Ensure that the number of VFs allocated is non-zero and
- * is not greater than the number of supported VFs defined in
- * the functional capabilities of the PF.
- */
- if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return -EINVAL;
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
- snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
- sizeof(*snap->mbx_vf.vf_cntr),
- GFP_KERNEL);
- if (!snap->mbx_vf.vf_cntr)
- return -ENOMEM;
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
+ return;
- /* Setting the VF counter length to the number of allocated
- * VFs for given PF's functional capabilities.
- */
- snap->mbx_vf.vfcntr_len = vf_count;
+ pf->vfs.last_printed_mdd_jiffies = jiffies;
- /* Clear mbx_buf in the mailbox snaphot structure and setting the
- * mailbox snapshot state to a new capture.
- */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+ ice_print_vf_rx_mdd_event(vf);
+ }
- return 0;
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr);
+ }
+ }
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
- * @hw: pointer to the hardware structure
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
*
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
*/
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ u16 vf_id;
+ int pos;
- /* Free VF counter array and reset VF counter length */
- devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
- snap->mbx_vf.vfcntr_len = 0;
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vfdev;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
+
+/**
+ * ice_is_malicious_vf - helper function to detect a malicious VF
+ * @pf: ptr to struct ice_pf
+ * @event: pointer to the AQ event
+ * @num_msg_proc: the number of messages processed so far
+ * @num_msg_pending: the number of messages peinding in admin queue
+ */
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending)
+{
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_mbx_data mbxdata;
+ bool malvf = false;
+ struct ice_vf *vf;
+ int status;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return false;
+
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ goto out_put_vf;
+
+ mbxdata.num_msg_proc = num_msg_proc;
+ mbxdata.num_pending_arq = num_msg_pending;
+ mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+ mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ /* check to see if we have a malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
+ if (status)
+ goto out_put_vf;
+
+ if (malvf) {
+ bool report_vf = false;
+
+ /* if the VF is malicious and we haven't let the user
+ * know about it, then let them know now
+ */
+ status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf_id,
+ &report_vf);
+ if (status)
+ dev_dbg(dev, "Error reporting malicious VF\n");
+
+ if (report_vf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (pf_vsi)
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ &vf->dev_lan_addr.addr[0],
+ pf_vsi->netdev->dev_addr);
+ }
+ }
- /* Clear mbx_buf in the mailbox snaphot structure */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+out_put_vf:
+ ice_put_vf(vf);
+ return malvf;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 68686a3fd7e8..955ab810a198 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,50 +3,159 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
+#include "ice_virtchnl_fdir.h"
+#include "ice_vf_lib.h"
+#include "ice_virtchnl.h"
-#include "ice_type.h"
-#include "ice_controlq.h"
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
-/* Defining the mailbox message threshold as 63 asynchronous
- * pending messages. Normal VF functionality does not require
- * sending more than 63 asynchronous pending message.
- */
-#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resource constraints */
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_NONQ_VECS_VF 1
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_TRIES 40
+#define ICE_MAX_VF_RESET_SLEEP_MS 20
#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending);
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
int
-ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
- u16 vf_id, bool *is_mal_vf);
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
+
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id);
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf);
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
#else /* CONFIG_PCI_IOV */
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+
+static inline bool
+ice_is_malicious_vf(struct ice_pf __always_unused *pf,
+ struct ice_rq_event_info __always_unused *event,
+ u16 __always_unused num_msg_proc,
+ u16 __always_unused num_msg_pending)
+{
+ return false;
+}
+
static inline int
-ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
- u16 __always_unused vfid, u32 __always_unused v_opcode,
- u32 __always_unused v_retval, u8 __always_unused *msg,
- u16 __always_unused msglen,
- struct ice_sq_cd __always_unused *cd)
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
}
-static inline u32
-ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
- u16 __always_unused link_speed)
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
{
return 0;
}
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 11ae0bee3590..9b762f7972ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -30,23 +30,80 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+enum {
+ ICE_PKT_OUTER_IPV6 = BIT(0),
+ ICE_PKT_TUN_GTPC = BIT(1),
+ ICE_PKT_TUN_GTPU = BIT(2),
+ ICE_PKT_TUN_NVGRE = BIT(3),
+ ICE_PKT_TUN_UDP = BIT(4),
+ ICE_PKT_INNER_IPV6 = BIT(5),
+ ICE_PKT_INNER_TCP = BIT(6),
+ ICE_PKT_INNER_UDP = BIT(7),
+ ICE_PKT_GTP_NOPAY = BIT(8),
+ ICE_PKT_KMALLOC = BIT(9),
+ ICE_PKT_PPPOE = BIT(10),
+ ICE_PKT_L2TPV3 = BIT(11),
+};
+
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
-static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+struct ice_dummy_pkt_profile {
+ const struct ice_dummy_pkt_offsets *offsets;
+ const u8 *pkt;
+ u32 match;
+ u16 pkt_len;
+ u16 offsets_len;
+};
+
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
+ ice_dummy_##type##_packet_offsets[]
+
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
+ static const u8 ice_dummy_##type##_packet[]
+
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+ .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
+}
+
+ICE_DECLARE_PKT_OFFSETS(vlan) = {
+ { ICE_VLAN_OFOS, 12 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(vlan) = {
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(qinq) = {
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(qinq) = {
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_TCP_IL, 76 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -65,7 +122,8 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -80,18 +138,19 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_UDP_ILOS, 76 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -110,7 +169,8 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -122,7 +182,7 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -131,12 +191,13 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_TCP_IL, 84 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -158,7 +219,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -173,7 +235,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -182,12 +244,13 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_UDP_ILOS, 84 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -209,7 +272,8 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -221,69 +285,251 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-/* offset info for MAC + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
- { ICE_UDP_ILOS, 34 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_TCP_IL, 96 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* Dummy packet for MAC + IPv4 + UDP */
-static const u8 dummy_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x08, 0x00, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_UDP_ILOS, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_TCP_IL, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x5a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_UDP_ILOS, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
- 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x4e, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
+ 0x00, 0x08, 0x00, 0x00,
};
-/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+/* offset info for MAC + IPv4 + UDP dummy packet */
+ICE_DECLARE_PKT_OFFSETS(udp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_UDP_ILOS, 38 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (801.1Q), IPv4:UDP dummy packet */
-static const u8 dummy_vlan_udp_packet[] = {
+/* Dummy packet for MAC + IPv4 + UDP */
+ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
0x00, 0x08, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -292,7 +538,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -314,50 +560,52 @@ static const u8 dummy_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_TCP_IL, 38 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+/* IPv6 + UDP */
+ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
- { ICE_TCP_IL, 54 },
+ { ICE_UDP_ILOS, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_tcp_ipv6_packet[] = {
+/* IPv6 + UDP dummy packet */
+ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -365,7 +613,7 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x86, 0xDD, /* ICE_ETYPE_OL 12 */
0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -375,7 +623,55 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_TCP_IL, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x58, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
@@ -384,39 +680,96 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_TCP_IL, 58 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_UDP_ILOS, 82 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+ 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x38, 0x00, 0x00,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
@@ -425,66 +778,231 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x60, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
- { ICE_UDP_ILOS, 54 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_TCP_IL, 102 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
- 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x44, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
- 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 82 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
- 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
- 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x38, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_UDP_ILOS, 58 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_TCP_IL, 122 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x58, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_UDP_ILOS, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x4c, 0x11, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -494,24 +1012,370 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
0x00, 0x08, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
- (DUMMY_ETH_HDR_LEN * \
- sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
-#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP_NO_PAY, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_TCP_IL, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_UDP_ILOS, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_TCP_IL, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_UDP_ILOS, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
+ ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
+ ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
+ ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
+ ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(tcp, 0),
+};
+
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
@@ -960,7 +1824,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
@@ -1097,6 +1962,64 @@ ice_aq_get_recipe(struct ice_hw *hw,
}
/**
+ * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
+ * @hw: pointer to the HW struct
+ * @params: parameters used to update the default recipe
+ *
+ * This function only supports updating default recipes and it only supports
+ * updating a single recipe based on the lkup_idx at a time.
+ *
+ * This is done as a read-modify-write operation. First, get the current recipe
+ * contents based on the recipe's ID. Then modify the field vector index and
+ * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
+ * the pre-existing recipe with the modifications.
+ */
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params)
+{
+ struct ice_aqc_recipe_data_elem *rcp_list;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ int status;
+
+ rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
+ if (!rcp_list)
+ return -ENOMEM;
+
+ /* read current recipe list from firmware */
+ rcp_list->recipe_indx = params->rid;
+ status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
+ params->rid, status);
+ goto error_out;
+ }
+
+ /* only modify existing recipe's lkup_idx and mask if valid, while
+ * leaving all other fields the same, then update the recipe firmware
+ */
+ rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
+ if (params->mask_valid)
+ rcp_list->content.mask[params->lkup_idx] =
+ cpu_to_le16(params->mask);
+
+ if (params->ignore_valid)
+ rcp_list->content.lkup_indx[params->lkup_idx] |=
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+
+ status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask, params->mask_valid ? "true" : "false",
+ status);
+
+error_out:
+ kfree(rcp_list);
+ return status;
+}
+
+/**
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
@@ -1395,8 +2318,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -1415,9 +2336,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
int status;
u16 i;
- rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
- GFP_KERNEL);
-
+ rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
if (!rbuf)
return -ENOMEM;
@@ -1465,7 +2384,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), rbuf);
+ kfree(rbuf);
return status;
}
@@ -1536,9 +2455,11 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*/
static void
ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
- struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
+ enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ u16 vlan_tpid = ETH_P_8021Q;
void *daddr = NULL;
u16 eth_hdr_sz;
u8 *eth_hdr;
@@ -1547,15 +2468,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(f_info->fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->hdr_len = 0;
return;
}
eth_hdr_sz = sizeof(dummy_eth_header);
- eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+ eth_hdr = s_rule->hdr_data;
/* initialize the ether header with a dummy header */
memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
@@ -1611,6 +2531,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_VLAN:
vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->l_data.vlan.tpid_valid)
+ vlan_tpid = f_info->l_data.vlan.tpid;
if (f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
act |= ICE_SINGLE_ACT_PRUNE;
@@ -1638,14 +2560,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
}
- s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+ s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
/* Recipe set depending on lookup type */
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
+ s_rule->src = cpu_to_le16(f_info->src);
+ s_rule->act = cpu_to_le32(act);
if (daddr)
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
@@ -1653,11 +2575,13 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ *off = cpu_to_be16(vlan_tpid);
}
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
+ s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -1674,7 +2598,8 @@ static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
- struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+ struct ice_sw_rule_lkup_rx_tx *rx_tx;
+ struct ice_sw_rule_lg_act *lg_act;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
* 2. GENERIC VALUE action to hold the profile ID
@@ -1695,18 +2620,18 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 1. Large Action
* 2. Look up Tx Rx
*/
- lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
- rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
return -ENOMEM;
- rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+ rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
- lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
- lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
- lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
+ lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->index = cpu_to_le16(l_id);
+ lg_act->size = cpu_to_le16(num_lg_acts);
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
@@ -1718,13 +2643,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
- lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
+ lg_act->act[0] = cpu_to_le32(act);
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
+ lg_act->act[1] = cpu_to_le32(act);
act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
@@ -1734,24 +2659,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
+ lg_act->act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->pdata.lkup_tx_rx.act =
- cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
+ ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+ ICE_SINGLE_ACT_PTR_VAL_M));
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
- rx_tx->pdata.lkup_tx_rx.index =
- cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
+ rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
ice_aqc_opc_update_sw_rules, NULL);
@@ -1813,7 +2736,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
u16 rule_type;
int status;
@@ -1827,7 +2750,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -1836,7 +2760,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
else
return -EINVAL;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -1846,13 +2770,13 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
- s_rule->pdata.vsi_list.vsi[i] =
+ s_rule->vsi[i] =
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(rule_type);
- s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(rule_type);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->index = cpu_to_le16(vsi_list_id);
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
@@ -1900,13 +2824,14 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
@@ -1927,17 +2852,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
ice_aqc_opc_add_sw_rules);
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_add_sw_rules, NULL);
if (status) {
devm_kfree(ice_hw_to_dev(hw), fm_entry);
goto ice_create_pkt_fwd_rule_exit;
}
- f_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
- fm_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
+ fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
/* The book keeping entries will get removed when base driver
* calls remove filter AQ command
@@ -1962,20 +2886,22 @@ ice_create_pkt_fwd_rule_exit:
static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_update_sw_rules, NULL);
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -2259,17 +3185,17 @@ static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
int status;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->index = cpu_to_le16(vsi_list_id);
/* Free the vsi_list resource that we allocated. It is assumed that the
* list is empty at this point.
@@ -2429,10 +3355,10 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
if (remove_rule) {
/* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
GFP_KERNEL);
if (!s_rule) {
status = -ENOMEM;
@@ -2443,8 +3369,8 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ice_aqc_opc_remove_sw_rules);
status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
+ 1, ice_aqc_opc_remove_sw_rules, NULL);
/* Remove a book keeping from the list */
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -2583,31 +3509,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
- *
- * IMPORTANT: When the ucast_shared flag is set to false and m_list has
- * multiple unicast addresses, the function assumes that all the
- * addresses are unique in a given add_mac call. It doesn't
- * check for duplicates in this case, removing duplicates from a given
- * list should be taken care of in the caller of this function.
*/
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
- struct list_head *rule_head;
- u16 total_elem_left, s_rule_size;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 num_unicast = 0;
int status = 0;
- u8 elem_sent;
if (!m_list || !hw)
return -EINVAL;
- s_rule = NULL;
- sw = hw->switch_info;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
@@ -2626,109 +3536,13 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
return -EINVAL;
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't overwrite the unicast address */
- mutex_lock(rule_lock);
- if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
- &m_list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -EEXIST;
- }
- mutex_unlock(rule_lock);
- num_unicast++;
- } else if (is_multicast_ether_addr(add) ||
- (is_unicast_ether_addr(add) && hw->ucast_shared)) {
- m_list_itr->status =
- ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
- m_list_itr);
- if (m_list_itr->status)
- return m_list_itr->status;
- }
- }
-
- mutex_lock(rule_lock);
- /* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast) {
- status = 0;
- goto ice_add_mac_exit;
- }
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
+ m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_unicast_ether_addr(mac_addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
- ice_aqc_opc_add_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
-
- elem_sent = min_t(u8, total_elem_left,
- (ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_add_sw_rules,
- NULL);
- if (status)
- goto ice_add_mac_exit;
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + (elem_sent * s_rule_size));
- }
-
- /* Fill up rule ID based on the value returned from FW */
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_fltr_mgmt_list_entry *fm_entry;
-
- if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id =
- le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
- f_info->fltr_act = ICE_FWD_TO_VSI;
- /* Create an entry to track this MAC address */
- fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fm_entry), GFP_KERNEL);
- if (!fm_entry) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
- fm_entry->fltr_info = *f_info;
- fm_entry->vsi_count = 1;
- /* The book keeping entries will get removed when
- * base driver calls remove filter AQ command
- */
-
- list_add(&fm_entry->list_entry, rule_head);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
-ice_add_mac_exit:
- mutex_unlock(rule_lock);
- if (s_rule)
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -3010,7 +3824,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
- * @hw: pointer to the hardware structure
+ * @pi: pointer to the port_info structure
* @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
@@ -3018,25 +3832,20 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info f_info;
- enum ice_adminq_opc opcode;
- u16 s_rule_size;
+ struct ice_hw *hw = pi->hw;
u16 hw_vsi_id;
int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&f_info, 0, sizeof(f_info));
@@ -3044,86 +3853,80 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
- }
- }
+ status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
-out:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
/**
- * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
- * @hw: pointer to the hardware structure
- * @recp_id: lookup type for which the specified rule needs to be searched
- * @f_info: rule information
- *
- * Helper function to search for a unicast rule entry - this is to be used
- * to remove unicast MAC filter that is not shared with other VSIs on the
- * PF switch.
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
*
- * Returns pointer to entry storing the rule if found
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
*/
-static struct ice_fltr_mgmt_list_entry *
-ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
- struct ice_fltr_info *f_info)
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
{
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *list_head;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_sw_recipe *recp_list;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ bool ret = false;
- list_head = &sw->recp_list[recp_id].filt_rules;
- list_for_each_entry(list_itr, list_head, list_entry) {
- if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
- sizeof(f_info->l_data)) &&
- f_info->fwd_id.hw_vsi_id ==
- list_itr->fltr_info.fwd_id.hw_vsi_id &&
- f_info->flag == list_itr->fltr_info.flag)
- return list_itr;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ mutex_lock(rule_lock);
+
+ if (rule_exists && !list_empty(rule_head))
+ *rule_exists = true;
+
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
+ }
}
- return NULL;
+
+ mutex_unlock(rule_lock);
+
+ return ret;
}
/**
@@ -3142,15 +3945,12 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return -EINVAL;
- rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
- u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
@@ -3162,19 +3962,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't remove the unicast address that belongs to
- * another VSI on the switch, since it is not being
- * shared...
- */
- mutex_lock(rule_lock);
- if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
- &list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -ENOENT;
- }
- mutex_unlock(rule_lock);
- }
+
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
@@ -3211,21 +3999,6 @@ int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
- * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
- * @fm_entry: filter entry to inspect
- * @vsi_handle: VSI handle to compare with filter info
- */
-static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
-{
- return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.vsi_handle == vsi_handle) ||
- (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- fm_entry->vsi_list_info &&
- (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
-}
-
-/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -3576,6 +4349,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -3583,7 +4363,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -3755,6 +4535,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_ETYPE_OL, { 0 } },
+ { ICE_ETYPE_IL, { 0 } },
{ ICE_VLAN_OFOS, { 2, 0 } },
{ ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
{ ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
@@ -3767,13 +4548,20 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, { 0, 2 } },
{ ICE_VXLAN, { 8, 10, 12, 14 } },
{ ICE_GENEVE, { 8, 10, 12, 14 } },
- { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
+ { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
+ { ICE_VLAN_EX, { 2, 0 } },
+ { ICE_VLAN_IN, { 2, 0 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
{ ICE_MAC_IL, ICE_MAC_IL_HW },
{ ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
{ ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
{ ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
{ ICE_IPV4_IL, ICE_IPV4_IL_HW },
@@ -3784,7 +4572,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
{ ICE_VXLAN, ICE_UDP_OF_HW },
{ ICE_GENEVE, ICE_UDP_OF_HW },
- { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_GTP, ICE_UDP_OF_HW },
+ { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_L2TPV3, ICE_L2TPV3_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
+ { ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
/**
@@ -3868,6 +4662,23 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/**
+ * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
+ *
+ * As protocol id for outer vlan is different in dvm and svm, if dvm is
+ * supported protocol array record for outer vlan has to be modified to
+ * reflect the value proper for DVM.
+ */
+void ice_change_proto_id_to_dvm(void)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
+ ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
+ ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
+}
+
+/**
* ice_prot_type_to_id - get protocol ID from protocol type
* @type: protocol type
* @id: pointer to variable that will receive the ID
@@ -4073,7 +4884,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
@@ -4427,41 +5238,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
/**
- * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
- * @hw: pointer to hardware structure
- * @lkups: lookup elements or match criteria for the advanced recipe, one
- * structure per protocol header
- * @lkups_cnt: number of protocols
- * @bm: bitmap of field vectors to consider
- * @fv_list: pointer to a list that holds the returned field vectors
- */
-static int
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- unsigned long *bm, struct list_head *fv_list)
-{
- u8 *prot_ids;
- int status;
- u16 i;
-
- prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
- if (!prot_ids)
- return -ENOMEM;
-
- for (i = 0; i < lkups_cnt; i++)
- if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = -EIO;
- goto free_mem;
- }
-
- /* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
- kfree(prot_ids);
- return status;
-}
-
-/**
* ice_tun_type_match_word - determine if tun type needs a match mask
* @tun_type: tunnel type
* @mask: mask to be used for the tunnel
@@ -4472,6 +5248,8 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
case ICE_SW_TUN_GENEVE:
case ICE_SW_TUN_VXLAN:
case ICE_SW_TUN_NVGRE:
+ case ICE_SW_TUN_GTPU:
+ case ICE_SW_TUN_GTPC:
*mask = ICE_TUN_FLAG_MASK;
return true;
@@ -4485,10 +5263,11 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* ice_add_special_words - Add words that are not protocols, such as metadata
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
+ * @dvm_ena: is double VLAN mode enabled
*/
static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts)
+ struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
{
u16 mask;
@@ -4507,6 +5286,19 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
}
}
+ if (rinfo->vlan_type != 0 && dvm_ena) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] =
+ ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
+ } else {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
@@ -4537,6 +5329,13 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_GTPU:
+ prof_type = ICE_PROF_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_GTPC:
+ prof_type = ICE_PROF_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
break;
@@ -4608,18 +5407,18 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
- * ice_get_fv.
+ * ice_get_sw_fv_list.
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, lkup_exts);
+ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
@@ -4720,115 +5519,158 @@ err_free_lkup_exts:
}
/**
+ * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
+ *
+ * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
+ * @num_vlan: number of VLAN tags
+ */
+static struct ice_dummy_pkt_profile *
+ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
+ u32 num_vlan)
+{
+ struct ice_dummy_pkt_profile *profile;
+ struct ice_dummy_pkt_offsets *offsets;
+ u32 buf_len, off, etype_off, i;
+ u8 *pkt;
+
+ if (num_vlan < 1 || num_vlan > 2)
+ return ERR_PTR(-EINVAL);
+
+ off = num_vlan * VLAN_HLEN;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
+ dummy_pkt->offsets_len;
+ offsets = kzalloc(buf_len, GFP_KERNEL);
+ if (!offsets)
+ return ERR_PTR(-ENOMEM);
+
+ offsets[0] = dummy_pkt->offsets[0];
+ if (num_vlan == 2) {
+ offsets[1] = ice_dummy_qinq_packet_offsets[0];
+ offsets[2] = ice_dummy_qinq_packet_offsets[1];
+ } else if (num_vlan == 1) {
+ offsets[1] = ice_dummy_vlan_packet_offsets[0];
+ }
+
+ for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
+ offsets[i + num_vlan].offset =
+ dummy_pkt->offsets[i].offset + off;
+ }
+ offsets[i + num_vlan] = dummy_pkt->offsets[i];
+
+ etype_off = dummy_pkt->offsets[1].offset;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
+ dummy_pkt->pkt_len;
+ pkt = kzalloc(buf_len, GFP_KERNEL);
+ if (!pkt) {
+ kfree(offsets);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(pkt, dummy_pkt->pkt, etype_off);
+ memcpy(pkt + etype_off,
+ num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
+ off);
+ memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
+ dummy_pkt->pkt_len - etype_off);
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ kfree(offsets);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ profile->offsets = offsets;
+ profile->pkt = pkt;
+ profile->pkt_len = buf_len;
+ profile->match |= ICE_PKT_KMALLOC;
+
+ return profile;
+}
+
+/**
* ice_find_dummy_packet - find dummy packet
*
* @lkups: lookup elements or match criteria for the advanced recipe, one
* structure per protocol header
* @lkups_cnt: number of protocols
* @tun_type: tunnel type
- * @pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: pointer to receive the pointer to the offsets for the packet
+ *
+ * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
*/
-static void
+static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- enum ice_sw_tunnel_type tun_type,
- const u8 **pkt, u16 *pkt_len,
- const struct ice_dummy_pkt_offsets **offsets)
+ enum ice_sw_tunnel_type tun_type)
{
- bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
+ u32 match = 0, vlan_count = 0;
u16 i;
+ switch (tun_type) {
+ case ICE_SW_TUN_GTPC:
+ match |= ICE_PKT_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_GTPU:
+ match |= ICE_PKT_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ match |= ICE_PKT_TUN_NVGRE;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ match |= ICE_PKT_TUN_UDP;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- udp = true;
+ match |= ICE_PKT_INNER_UDP;
else if (lkups[i].type == ICE_TCP_IL)
- tcp = true;
+ match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
- ipv6 = true;
- else if (lkups[i].type == ICE_VLAN_OFOS)
- vlan = true;
+ match |= ICE_PKT_OUTER_IPV6;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
+ vlan_count++;
+ else if (lkups[i].type == ICE_VLAN_IN)
+ vlan_count++;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
- cpu_to_be16(0xFFFF))
- ipv6 = true;
- }
-
- if (tun_type == ICE_SW_TUN_NVGRE) {
- if (tcp) {
- *pkt = dummy_gre_tcp_packet;
- *pkt_len = sizeof(dummy_gre_tcp_packet);
- *offsets = dummy_gre_tcp_packet_offsets;
- return;
- }
-
- *pkt = dummy_gre_udp_packet;
- *pkt_len = sizeof(dummy_gre_udp_packet);
- *offsets = dummy_gre_udp_packet_offsets;
- return;
+ cpu_to_be16(0xFFFF))
+ match |= ICE_PKT_OUTER_IPV6;
+ else if (lkups[i].type == ICE_ETYPE_IL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ match |= ICE_PKT_INNER_IPV6;
+ else if (lkups[i].type == ICE_IPV6_IL)
+ match |= ICE_PKT_INNER_IPV6;
+ else if (lkups[i].type == ICE_GTP_NO_PAY)
+ match |= ICE_PKT_GTP_NOPAY;
+ else if (lkups[i].type == ICE_PPPOE) {
+ match |= ICE_PKT_PPPOE;
+ if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
+ htons(PPP_IPV6))
+ match |= ICE_PKT_OUTER_IPV6;
+ } else if (lkups[i].type == ICE_L2TPV3)
+ match |= ICE_PKT_L2TPV3;
}
- if (tun_type == ICE_SW_TUN_VXLAN ||
- tun_type == ICE_SW_TUN_GENEVE) {
- if (tcp) {
- *pkt = dummy_udp_tun_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
- *offsets = dummy_udp_tun_tcp_packet_offsets;
- return;
- }
-
- *pkt = dummy_udp_tun_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_udp_packet);
- *offsets = dummy_udp_tun_udp_packet_offsets;
- return;
- }
+ while (ret->match && (match & ret->match) != ret->match)
+ ret++;
- if (udp && !ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_packet;
- *pkt_len = sizeof(dummy_vlan_udp_packet);
- *offsets = dummy_vlan_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_packet;
- *pkt_len = sizeof(dummy_udp_packet);
- *offsets = dummy_udp_packet_offsets;
- return;
- } else if (udp && ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
- *offsets = dummy_vlan_udp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_udp_ipv6_packet);
- *offsets = dummy_udp_ipv6_packet_offsets;
- return;
- } else if ((tcp && ipv6) || ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
- *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_tcp_ipv6_packet);
- *offsets = dummy_tcp_ipv6_packet_offsets;
- return;
- }
+ if (vlan_count != 0)
+ ret = ice_dummy_packet_add_vlan(ret, vlan_count);
- if (vlan) {
- *pkt = dummy_vlan_tcp_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_packet);
- *offsets = dummy_vlan_tcp_packet_offsets;
- } else {
- *pkt = dummy_tcp_packet;
- *pkt_len = sizeof(dummy_tcp_packet);
- *offsets = dummy_tcp_packet_offsets;
- }
+ return ret;
}
/**
@@ -4838,15 +5680,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* structure per protocol header
* @lkups_cnt: number of protocols
* @s_rule: stores rule information from the match criteria
- * @dummy_pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: offset info for the dummy packet
+ * @profile: dummy packet profile (the template, its size and header offsets)
*/
static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- struct ice_aqc_sw_rules_elem *s_rule,
- const u8 *dummy_pkt, u16 pkt_len,
- const struct ice_dummy_pkt_offsets *offsets)
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
+ const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
u16 i;
@@ -4854,11 +5693,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
/* Start with a packet with a pre-defined/dummy content. Then, fill
* in the header values to be looked up or matched.
*/
- pkt = s_rule->pdata.lkup_tx_rx.hdr;
+ pkt = s_rule->hdr_data;
- memcpy(pkt, dummy_pkt, pkt_len);
+ memcpy(pkt, profile->pkt, profile->pkt_len);
for (i = 0; i < lkups_cnt; i++) {
+ const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
enum ice_protocol_type type;
u16 offset = 0, len = 0, j;
bool found = false;
@@ -4884,9 +5724,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ether_hdr);
break;
case ICE_ETYPE_OL:
+ case ICE_ETYPE_IL:
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
+ case ICE_VLAN_IN:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
@@ -4912,6 +5755,16 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GENEVE:
len = sizeof(struct ice_udp_tnl_hdr);
break;
+ case ICE_GTP_NO_PAY:
+ case ICE_GTP:
+ len = sizeof(struct ice_udp_gtp_hdr);
+ break;
+ case ICE_PPPOE:
+ len = sizeof(struct ice_pppoe_hdr);
+ break;
+ case ICE_L2TPV3:
+ len = sizeof(struct ice_l2tpv3_sess_hdr);
+ break;
default:
return -EINVAL;
}
@@ -4927,16 +5780,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* indicated by the mask to make sure we don't improperly write
* over any significant packet data.
*/
- for (j = 0; j < len / sizeof(u16); j++)
- if (((u16 *)&lkups[i].m_u)[j])
- ((u16 *)(pkt + offset))[j] =
- (((u16 *)(pkt + offset))[j] &
- ~((u16 *)&lkups[i].m_u)[j]) |
- (((u16 *)&lkups[i].h_u)[j] &
- ((u16 *)&lkups[i].m_u)[j]);
+ for (j = 0; j < len / sizeof(u16); j++) {
+ u16 *ptr = (u16 *)(pkt + offset);
+ u16 mask = lkups[i].m_raw[j];
+
+ if (!mask)
+ continue;
+
+ ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
+ }
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+ s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -4986,6 +5841,36 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
/**
+ * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @vlan_type: VLAN tag type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static int
+ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 i;
+
+ /* Find VLAN header and insert VLAN TPID */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_VLAN_OFOS ||
+ offsets[i].type == ICE_VLAN_EX) {
+ struct ice_vlan_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_vlan_hdr *)&pkt[offset];
+ hdr->type = cpu_to_be16(vlan_type);
+
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
* ice_find_adv_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5020,6 +5905,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
rinfo->tun_type == list_itr->rule_info.tun_type &&
+ rinfo->vlan_type == list_itr->rule_info.vlan_type &&
lkups_matched)
return list_itr;
}
@@ -5159,12 +6045,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
- const struct ice_dummy_pkt_offsets *pkt_offsets;
- struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
+ const struct ice_dummy_pkt_profile *profile;
+ u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
struct ice_switch_info *sw;
- const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
int status;
@@ -5182,34 +6067,37 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* get # of words we need to match */
word_cnt = 0;
for (i = 0; i < lkups_cnt; i++) {
- u16 j, *ptr;
+ u16 j;
- ptr = (u16 *)&lkups[i].m_u;
- for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
- if (ptr[j] != 0)
+ for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
+ if (lkups[i].m_raw[j])
word_cnt++;
}
- if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ if (!word_cnt)
return -EINVAL;
- /* make sure that we can locate a dummy packet */
- ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
- &pkt_offsets);
- if (!pkt) {
- status = -EINVAL;
- goto err_ice_add_adv_rule;
- }
+ if (word_cnt > ICE_MAX_CHAIN_WORDS)
+ return -ENOSPC;
+
+ /* locate a dummy packet */
+ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return -EIO;
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ status = -EIO;
+ goto free_pkt_profile;
+ }
vsi_handle = rinfo->sw_act.vsi_handle;
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return -EINVAL;
+ if (!ice_is_vsi_valid(hw, vsi_handle)) {
+ status = -EINVAL;
+ goto free_pkt_profile;
+ }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -5219,7 +6107,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
if (status)
- return status;
+ goto free_pkt_profile;
m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
if (m_entry) {
/* we have to add VSI to VSI_LIST and increment vsi_count.
@@ -5238,12 +6126,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
}
- return status;
+ goto free_pkt_profile;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ if (!s_rule) {
+ status = -ENOMEM;
+ goto free_pkt_profile;
+ }
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -5289,26 +6179,33 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* by caller)
*/
if (rinfo->rx) {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
} else {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(rinfo->sw_act.src);
}
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(rid);
+ s_rule->act = cpu_to_le32(act);
- status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
- pkt_len, pkt_offsets);
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN) {
+ if (rinfo->tun_type != ICE_NON_TUN &&
+ rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
- s_rule->pdata.lkup_tx_rx.hdr,
- pkt_offsets);
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
+ if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
+ status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
if (status)
goto err_ice_add_adv_rule;
}
@@ -5335,8 +6232,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
adv_fltr->lkups_cnt = lkups_cnt;
adv_fltr->rule_info = *rinfo;
- adv_fltr->rule_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
sw = hw->switch_info;
sw->recp_list[rid].adv_rule = true;
rule_head = &sw->recp_list[rid].filt_rules;
@@ -5359,6 +6255,13 @@ err_ice_add_adv_rule:
kfree(s_rule);
+free_pkt_profile:
+ if (profile->match & ICE_PKT_KMALLOC) {
+ kfree(profile->offsets);
+ kfree(profile->pkt);
+ kfree(profile);
+ }
+
return status;
}
@@ -5551,7 +6454,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, &lkup_exts);
+ status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
if (status)
return status;
@@ -5584,17 +6487,16 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
mutex_unlock(rule_lock);
if (remove_rule) {
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 rule_buf_sz;
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(list_elem->rule_info.fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->hdr_len = 0;
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8334beaaa8a..68d8e8a6a189 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,8 +14,14 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+/* Switch Profile IDs for Profile related switch rules */
+#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
+#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
+#define ICE_PROFID_IPV6_GTPU_TEID 46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
@@ -33,15 +39,6 @@ struct ice_vsi_ctx {
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
-enum ice_sw_fwd_act_type {
- ICE_FWD_TO_VSI = 0,
- ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
- ICE_FWD_TO_Q,
- ICE_FWD_TO_QGRP,
- ICE_DROP_PACKET,
- ICE_INVAL_ACT
-};
-
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
@@ -86,6 +83,8 @@ struct ice_fltr_info {
} mac_vlan;
struct {
u16 vlan_id;
+ u16 tpid;
+ u8 tpid_valid;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@@ -125,10 +124,27 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_update_recipe_lkup_idx_params {
+ u16 rid;
+ u16 fv_idx;
+ bool ignore_valid;
+ u16 mask;
+ bool mask_valid;
+ u8 lkup_idx;
+};
+
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
- union ice_prot_hdr h_u; /* Header values */
- union ice_prot_hdr m_u; /* Mask of header values to match */
+ union {
+ union ice_prot_hdr h_u; /* Header values */
+ /* Used to iterate over the headers */
+ u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
+ union {
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+ /* Used to iterate over header mask */
+ u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
};
struct ice_sw_act_ctrl {
@@ -176,6 +192,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
@@ -342,7 +359,13 @@ int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
+
int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
@@ -367,4 +390,8 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params);
+void ice_change_proto_id_to_dvm(void);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e8aab664270a..f68c555be4e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -24,18 +24,26 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ lkups_cnt++;
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))
+ lkups_cnt++;
+
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
- /* currently inner etype filter isn't supported */
- if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
- fltr->tunnel_type == TNL_LAST)
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
lkups_cnt++;
/* are MAC fields specified? */
@@ -43,7 +51,16 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
lkups_cnt++;
/* is VLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
+ lkups_cnt++;
+
+ /* is CVLAN specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
+ lkups_cnt++;
+
+ /* are PPPoE options specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO))
lkups_cnt++;
/* are IPv[4|6] fields specified? */
@@ -51,6 +68,13 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
+ lkups_cnt++;
+
+ /* are L2TPv3 options specified? */
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
+ lkups_cnt++;
+
/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
@@ -64,6 +88,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
}
+static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
+{
+ return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
+}
+
static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
{
return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
@@ -96,6 +125,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GENEVE;
case TNL_GRETAP:
return ICE_NVGRE;
+ case TNL_GTPU:
+ /* NO_PAY profiles will not work with GTP-U */
+ return ICE_GTP;
+ case TNL_GTPC:
+ return ICE_GTP_NO_PAY;
default:
return 0;
}
@@ -111,11 +145,27 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GENEVE;
case TNL_GRETAP:
return ICE_SW_TUN_NVGRE;
+ case TNL_GTPU:
+ return ICE_SW_TUN_GTPU;
+ case TNL_GTPC:
+ return ICE_SW_TUN_GTPC;
default:
return ICE_NON_TUN;
}
}
+static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
+{
+ switch (vlan_tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ case ETH_P_QINQ1:
+ return vlan_tpid;
+ default:
+ return 0;
+ }
+}
+
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
struct ice_adv_lkup_elem *list)
@@ -137,7 +187,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
break;
case TNL_GRETAP:
list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
- memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
+ "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ case TNL_GTPC:
+ case TNL_GTPU:
+ list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
+ memcpy(&list[i].m_u.gtp_hdr.teid,
+ "\xff\xff\xff\xff", 4);
i++;
break;
default:
@@ -145,6 +203,33 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
}
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
+ list[i].type = ice_proto_type_from_mac(false);
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ hdr->l2_key.dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ hdr->l2_mask.dst_mac);
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+
+ if (fltr->gtp_pdu_info_masks.pdu_type) {
+ list[i].h_u.gtp_hdr.pdu_type =
+ fltr->gtp_pdu_info_keys.pdu_type << 4;
+ memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
+ }
+
+ if (fltr->gtp_pdu_info_masks.qfi) {
+ list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
+ memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
+ }
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -183,6 +268,50 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ hdr->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ hdr->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ hdr_h->hop_limit = hdr->l3_key.ttl;
+ hdr_m->hop_limit = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
hdr->l3_key.ip_proto == IPPROTO_UDP) {
list[i].type = ICE_UDP_OF;
@@ -216,16 +345,21 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
+ u16 vlan_tpid = 0;
int i = 0;
+ rule_info->vlan_type = vlan_tpid;
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
headers = &tc_fltr->inner_headers;
inner = true;
- } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
- list[i].type = ICE_ETYPE_OL;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ice_proto_type_from_etype(inner);
list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
i++;
@@ -255,10 +389,76 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
}
/* copy VLAN info */
- if (flags & ICE_TC_FLWR_FIELD_VLAN) {
- list[i].type = ICE_VLAN_OFOS;
- list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ list[i].type = ICE_VLAN_EX;
+ else
+ list[i].type = ICE_VLAN_OFOS;
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->vlan_hdr.vlan_prio;
+ }
+
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
+ list[i].type = ICE_VLAN_IN;
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->cvlan_hdr.vlan_prio;
+ }
+
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO)) {
+ struct ice_pppoe_hdr *vals, *masks;
+
+ vals = &list[i].h_u.pppoe_hdr;
+ masks = &list[i].m_u.pppoe_hdr;
+
+ list[i].type = ICE_PPPOE;
+
+ if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
+ vals->session_id = headers->pppoe_hdr.session_id;
+ masks->session_id = cpu_to_be16(0xFFFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
+ vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
+ masks->ppp_prot_id = cpu_to_be16(0xFFFF);
+ }
+
i++;
}
@@ -305,6 +505,61 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live =
+ headers->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live =
+ headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ headers->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ headers->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ hdr_h->hop_limit = headers->l3_key.ttl;
+ hdr_m->hop_limit = headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
+ list[i].type = ICE_L2TPV3;
+
+ list[i].h_u.l2tpv3_sess_hdr.session_id =
+ headers->l2tpv3_hdr.session_id;
+ list[i].m_u.l2tpv3_sess_hdr.session_id =
+ cpu_to_be32(0xFFFFFFFF);
+
+ i++;
+ }
+
/* copy L4 (src, dest) port */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
@@ -344,6 +599,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return TNL_GRETAP;
+
+ /* Assume GTP-U by default in case of GTP netdev.
+ * GTP-C may be selected later, based on enc_dst_port.
+ */
+ if (netif_is_gtp(tunnel_dev))
+ return TNL_GTPU;
return TNL_LAST;
}
@@ -463,6 +724,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
+ fltr->dest_id = rule_added.vsi_handle;
exit:
kfree(list);
@@ -561,7 +823,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -600,6 +861,31 @@ exit:
}
/**
+ * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: Pointer to outer header fields
+ * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
+ */
+static u16
+ice_tc_set_pppoe(struct flow_match_pppoe *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->mask->session_id) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
+ headers->pppoe_hdr.session_id = match->key->session_id;
+ }
+
+ if (match->mask->ppp_proto) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
+ headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
+ }
+
+ return be16_to_cpu(match->key->type);
+}
+
+/**
* ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
* @match: Pointer to flow match structure
* @fltr: Pointer to filter structure
@@ -693,6 +979,40 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
}
/**
+ * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel
+ */
+static void
+ice_tc_set_tos_ttl(struct flow_match_ip *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ bool is_encap)
+{
+ if (match->mask->tos) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
+
+ headers->l3_key.tos = match->key->tos;
+ headers->l3_mask.tos = match->mask->tos;
+ }
+
+ if (match->mask->ttl) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
+
+ headers->l3_key.ttl = match->key->ttl;
+ headers->l3_mask.ttl = match->mask->ttl;
+ }
+}
+
+/**
* ice_tc_set_port - Parse ports from TC flower filter
* @match: Flow match structure
* @fltr: Pointer to filter structure
@@ -709,7 +1029,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
headers->l4_key.dst_port = match.key->dst;
headers->l4_mask.dst_port = match.mask->dst;
}
@@ -718,7 +1038,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
headers->l4_key.src_port = match.key->src;
headers->l4_mask.src_port = match.mask->src;
}
@@ -743,6 +1063,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
return NULL;
}
+/**
+ * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ *
+ * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
+ * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
+ * therefore making GTP-U the default choice (when destination port number is
+ * not specified).
+ */
+static int
+ice_parse_gtp_type(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr)
+{
+ u16 dst_port;
+
+ if (match.key->dst) {
+ dst_port = be16_to_cpu(match.key->dst);
+
+ switch (dst_port) {
+ case 2152:
+ break;
+ case 2123:
+ fltr->tunnel_type = TNL_GTPC;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
@@ -787,10 +1141,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- headers->l3_key.tos = match.key->tos;
- headers->l3_key.ttl = match.key->ttl;
- headers->l3_mask.tos = match.mask->tos;
- headers->l3_mask.ttl = match.mask->ttl;
+ ice_tc_set_tos_ttl(&match, fltr, headers, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
@@ -798,8 +1149,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
- if (ice_tc_set_port(match, fltr, headers, true))
- return -EINVAL;
+
+ if (fltr->tunnel_type != TNL_GTPU) {
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ } else {
+ if (ice_parse_gtp_type(match, fltr))
+ return -EINVAL;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
}
return 0;
@@ -830,6 +1201,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
@@ -837,8 +1209,12 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -878,7 +1254,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
n_proto_key = ntohs(match.key->n_proto);
n_proto_mask = ntohs(match.mask->n_proto);
- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
+ fltr->tunnel_type == TNL_GTPU ||
+ fltr->tunnel_type == TNL_GTPC) {
n_proto_key = 0;
n_proto_mask = 0;
} else {
@@ -932,16 +1310,71 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
return -EINVAL;
}
}
- headers->vlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ headers->vlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
+
+ if (match.mask->vlan_tpid)
+ headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
+ return -EINVAL;
+ }
+
+ flow_rule_match_cvlan(rule, &match);
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Bad CVLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ headers->cvlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
+ struct flow_match_pppoe match;
+
+ flow_rule_match_pppoe(rule, &match);
+ n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
+
+ /* If ethertype equals ETH_P_PPP_SES, n_proto might be
+ * overwritten by encapsulated protocol (ppp_proto field) or set
+ * to 0. To correct this, flow_match_pppoe provides the type
+ * field, which contains the actual ethertype (ETH_P_PPP_SES).
+ */
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -968,6 +1401,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ ice_tc_set_tos_ttl(&match, fltr, headers, false);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
+ struct flow_match_l2tpv3 match;
+
+ flow_rule_match_l2tpv3(rule, &match);
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
+ headers->l2tpv3_hdr.session_id = match.key->session_id;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1059,12 +1508,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
* this code won't do anything
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
+ * 3. For tunnel, as of now TC-filter through flower classifier doesn't
+ * have provision for user to specify outer DMAC, hence driver to
+ * implicitly add outer dest MAC to be lower netdev's active unicast
+ * MAC address.
*/
- if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
+ if (fltr->tunnel_type != TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
+
+ if (fltr->tunnel_type == TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
- main_vsi->netdev->dev_addr);
+ vsi->netdev->dev_addr);
eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
- fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 319049477959..92642faad595 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -22,9 +22,22 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
+#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
+#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
+#define ICE_TC_FLWR_FIELD_IP_TOS BIT(22)
+#define ICE_TC_FLWR_FIELD_IP_TTL BIT(23)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(24)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(25)
+#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
+#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
+#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+#define ICE_IPV6_HDR_TC_MASK 0xFF00000
+
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
@@ -38,7 +51,13 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
- u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_tpid;
+};
+
+struct ice_tc_pppoe_hdr {
+ __be16 session_id;
+ __be16 ppp_proto;
};
struct ice_tc_l2_hdr {
@@ -70,6 +89,10 @@ struct ice_tc_l3_hdr {
u8 ttl;
};
+struct ice_tc_l2tpv3_hdr {
+ __be32 session_id;
+};
+
struct ice_tc_l4_hdr {
__be16 dst_port;
__be16 src_port;
@@ -80,6 +103,9 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_l2_hdr l2_key;
struct ice_tc_l2_hdr l2_mask;
struct ice_tc_vlan_hdr vlan_hdr;
+ struct ice_tc_vlan_hdr cvlan_hdr;
+ struct ice_tc_pppoe_hdr pppoe_hdr;
+ struct ice_tc_l2tpv3_hdr l2tpv3_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
@@ -119,6 +145,8 @@ struct ice_tc_flower_fltr {
struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
struct ice_vsi *src_vsi;
__be32 tenant_id;
+ struct gtp_pdu_session_info gtp_pdu_info_keys;
+ struct gtp_pdu_session_info gtp_pdu_info_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index cf685247c07a..ae98d5a8ff60 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -216,6 +216,30 @@ DEFINE_EVENT(ice_xmit_template, name, \
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
+ TP_PROTO(struct sk_buff *skb, int idx),
+
+ TP_ARGS(skb, idx),
+
+ TP_STRUCT__entry(__field(void *, skb)
+ __field(int, idx)),
+
+ TP_fast_assign(__entry->skb = skb;
+ __entry->idx = idx;),
+
+ TP_printk("skb %pK idx %d",
+ __entry->skb, __entry->idx)
+);
+#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_tstamp_template, name, \
+ TP_PROTO(struct sk_buff *skb, int idx), \
+ TP_ARGS(skb, idx))
+
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3e38695f1c9d..dbe80e5053a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -8,6 +8,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -173,6 +174,8 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
+ tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -221,8 +224,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_buf *tx_buf;
/* get the bql data ready */
- if (!ice_ring_is_xdp(tx_ring))
- netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
@@ -311,10 +313,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
@@ -612,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -623,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
@@ -983,15 +984,17 @@ static struct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -1003,8 +1006,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -1080,7 +1088,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1142,7 +1150,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
* hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -1156,7 +1164,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
@@ -1228,14 +1236,13 @@ construct_skb:
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits))) {
dev_kfree_skb_any(skb);
continue;
}
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb)) {
@@ -1460,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1513,7 +1520,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done))) {
+ if (napi_complete_done(napi, work_done)) {
ice_net_dim(q_vector);
ice_enable_interrupt(q_vector);
} else {
@@ -1745,18 +1752,26 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1917,12 +1932,16 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
return;
- /* currently, we always assume 802.1Q for VLAN insertion as VLAN
- * insertion for 802.1AD is not supported
+ /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
+ * feature flags, which the driver only allows either 802.1Q or 802.1ad
+ * VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
@@ -1950,6 +1969,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1965,8 +1985,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -2233,8 +2258,10 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
- if (idx < 0)
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
return;
+ }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
@@ -2295,6 +2322,13 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* prepare the VLAN tagging flags for Tx */
ice_tx_prepare_vlan_flags(tx_ring, first);
+ if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_IL2TAG2 <<
+ ICE_TXD_CTX_QW1_CMD_S));
+ offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
/* set up TSO offload */
tso = ice_tso(first, &offload);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b7b3bd4816f0..932b5661ec4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,7 +13,6 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
-#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -111,6 +110,8 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RING_QUARTER(R) ((R)->count >> 2)
+
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
@@ -122,6 +123,7 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
+#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -131,6 +133,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED BIT(0)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
+#define ICE_XDP_EXIT BIT(3)
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -292,10 +295,11 @@ struct ice_rx_ring {
struct xsk_buff_pool *xsk_pool;
struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
} ____cacheline_internodealigned_in_smp;
@@ -321,18 +325,21 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
struct ice_txq_stats tx_stats;
-
/* CL3 - 3rd cacheline starts here */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
+ /* CL4 - 4th cacheline starts here */
+ u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_tx;
@@ -379,9 +386,14 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting:13;
- u16 itr_reserved:2;
- u16 itr_mode:1;
+ union {
+ struct {
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
+ };
+ u16 itr_settings;
+ };
enum ice_container_type type;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 0e87b98e0966..7ee38d02d1e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -209,9 +209,14 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
+ netdev_features_t features = rx_ring->netdev->features;
+ bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -222,6 +227,7 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
unsigned int total_bytes = 0, total_pkts = 0;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *next_dd_desc;
u16 next_dd = xdp_ring->next_dd;
@@ -233,7 +239,7 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
return;
- for (i = 0; i < ICE_TX_THRESH; i++) {
+ for (i = 0; i < tx_thresh; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
total_bytes += tx_buf->bytecount;
@@ -254,9 +260,9 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
}
next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_dd = tx_thresh - 1;
xdp_ring->next_to_clean = ntc;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
}
@@ -269,12 +275,13 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
*/
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
@@ -300,13 +307,14 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
+ xdp_ring->xdp_tx_active++;
i++;
if (i == xdp_ring->count) {
i = 0;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = tx_thresh - 1;
}
xdp_ring->next_to_use = i;
@@ -314,7 +322,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += ICE_TX_THRESH;
+ xdp_ring->next_rs += tx_thresh;
}
return ICE_XDP_TX;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 11b6c1601986..c7d2954dc9ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -7,7 +7,7 @@
/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
@@ -16,9 +16,9 @@
* at offset zero.
*/
static inline bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
{
- return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+ return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
static inline __le64
@@ -32,6 +32,30 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * @rx_desc: Rx 32b flex descriptor with RXDID=2
+ *
+ * The OS and current PF implementation only support stripping a single VLAN tag
+ * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
+ * one is found return the tag, else return 0 to mean no VLAN tag was found.
+ */
+static inline u16
+ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+{
+ u16 stat_err_bits;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag1);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
+
+ return 0;
+}
+
+/**
* ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 546145dd1f02..e1abfcee96dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -9,12 +9,14 @@
#define ICE_CHNL_MAX_TC 16
#include "ice_hw_autogen.h"
+#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
+#include "ice_vlan_mode.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -54,6 +56,11 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \
+ ICE_DBG_AQ_DESC | \
+ ICE_DBG_AQ_DESC_BUF | \
+ ICE_DBG_AQ_CMD)
+
#define ICE_DBG_USER BIT_ULL(31)
enum ice_aq_res_ids {
@@ -340,6 +347,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -352,6 +360,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
@@ -557,6 +566,8 @@ enum ice_rl_type {
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+#define ICE_MAX_PORT_PER_PCI_DEV 8
+
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
@@ -686,10 +697,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
@@ -882,8 +889,6 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
- u8 ucast_shared; /* true if VSIs can share unicast addr */
-
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
@@ -920,6 +925,9 @@ struct ice_hw {
struct udp_tunnel_nic_shared udp_tunnel_shared;
struct udp_tunnel_nic_info udp_tunnel_nic;
+ /* dvm boost update information */
+ struct ice_dvm_table dvm_upd;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
@@ -943,6 +951,7 @@ struct ice_hw {
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
+ u8 dvm_ena;
u16 io_expander_handle;
};
@@ -1008,6 +1017,15 @@ struct ice_hw_port_stats {
u64 fd_sb_match;
};
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
struct ice_aq_get_set_rss_lut_params {
u16 vsi_handle; /* software VSI handle */
u16 lut_size; /* size of the LUT buffer */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
new file mode 100644
index 000000000000..1c51778db951
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -0,0 +1,1132 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+
+/* Public functions which may be accessed by all driver files */
+
+/**
+ * ice_get_vf_by_id - Get pointer to VF by ID
+ * @pf: the PF private structure
+ * @vf_id: the VF ID to locate
+ *
+ * Locate and return a pointer to the VF structure associated with a given ID.
+ * Returns NULL if the ID does not have a valid VF structure associated with
+ * it.
+ *
+ * This function takes a reference to the VF, which must be released by
+ * calling ice_put_vf() once the caller is finished accessing the VF structure
+ * returned.
+ */
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ struct ice_vf *vf;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
+ if (vf->vf_id == vf_id) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_release_vf - Release VF associated with a refcount
+ * @ref: the kref decremented to zero
+ *
+ * Callback function for kref_put to release a VF once its reference count has
+ * hit zero.
+ */
+static void ice_release_vf(struct kref *ref)
+{
+ struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+
+ vf->vf_ops->free(vf);
+}
+
+/**
+ * ice_put_vf - Release a reference to a VF
+ * @vf: the VF structure to decrease reference count on
+ *
+ * Decrease the reference count for a VF, and free the entry if it is no
+ * longer in use.
+ *
+ * This must be called after ice_get_vf_by_id() once the reference to the VF
+ * structure is no longer used. Otherwise, the VF structure will never be
+ * freed.
+ */
+void ice_put_vf(struct ice_vf *vf)
+{
+ kref_put(&vf->refcnt, ice_release_vf);
+}
+
+/**
+ * ice_has_vfs - Return true if the PF has any associated VFs
+ * @pf: the PF private structure
+ *
+ * Return whether or not the PF has any allocated VFs.
+ *
+ * Note that this function only guarantees that there are no VFs at the point
+ * of calling it. It does not guarantee that no more VFs will be added.
+ */
+bool ice_has_vfs(struct ice_pf *pf)
+{
+ /* A simple check that the hash table is not empty does not require
+ * the mutex or rcu_read_lock.
+ */
+ return !hash_empty(pf->vfs.table);
+}
+
+/**
+ * ice_get_num_vfs - Get number of allocated VFs
+ * @pf: the PF private structure
+ *
+ * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
+ * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
+ * the output of this function.
+ */
+u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u16 num_vfs = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ num_vfs++;
+ rcu_read_unlock();
+
+ return num_vfs;
+}
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ if (vf->lan_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return vf->pf->vsi[vf->lan_vsi_idx];
+}
+
+/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * If the PF has been disabled, there is no need resetting VF until PF is
+ * active again. Similarly, if the VF has been disabled, this means something
+ * else is resetting the VF, so we shouldn't continue.
+ *
+ * Returns true if the caller should consider the VF as disabled whether
+ * because that single VF is explicitly disabled or because the PF is
+ * currently disabled.
+ */
+bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ return (test_bit(ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+ }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ if (ice_check_vf_init(vf))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ * @is_pfr: true if the reset was triggered due to a previous PFR
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+{
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * when it's safe again to access VF's VSI.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
+ */
+ if (!is_pfr)
+ vf->vf_ops->clear_mbx_register(vf);
+
+ vf->vf_ops->trigger_reset_register(vf, is_vflr);
+}
+
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (vsi)
+ vsi->num_vlan = 0;
+
+ vf->num_mac = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
+/**
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
+ *
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
+ */
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_clear_counters(vf);
+ vf->vf_ops->clear_reset_trigger(vf);
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
+ }
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return 0;
+}
+
+/**
+ * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
+ * are in unicast promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
+{
+ bool is_vf_promisc = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ is_vf_promisc = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_vf_promisc;
+}
+
+/**
+ * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ * @ucast_m: promiscuous mask to apply to unicast
+ * @mcast_m: promiscuous mask to apply to multicast
+ *
+ * Decide which mask should be used for unicast and multicast filter,
+ * based on presence of VLANs
+ */
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m)
+{
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
+ *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ } else {
+ *mcast_m = ICE_MCAST_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+}
+
+/**
+ * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ *
+ * Clear all promiscuous/allmulticast filters for a VF
+ */
+static int
+ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 ucast_m, mcast_m;
+ int ret = 0;
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ } else {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+ }
+
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
+ }
+ }
+
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
+ }
+ }
+ return ret;
+}
+
+/**
+ * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to enable
+ */
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to disable
+ */
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ *
+ * Reset all VFs at once, in response to a PF or other device reset.
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ */
+void ice_reset_all_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!ice_has_vfs(pf))
+ return;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, bkt, vf)
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+ mutex_unlock(&pf->vfs.table_lock);
+ return;
+ }
+
+ /* Begin reset on all VFs at once */
+ ice_for_each_vf(pf, bkt, vf)
+ ice_trigger_vf_reset(vf, true, true);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Now that we've triggered all of the VFs, iterate
+ * the table again and wait for each VF to complete.
+ */
+ ice_for_each_vf(pf, bkt, vf) {
+ if (!vf->vf_ops->poll_reset_status(vf)) {
+ /* Display a warning if at least one VF didn't manage
+ * to reset in time, but continue on with the
+ * operation.
+ */
+ dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
+ break;
+ }
+ }
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ vf->vf_ops->post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
+ ice_flush(hw);
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_notify_vf_reset(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct virtchnl_pf_event pfe;
+
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @flags: flags controlling behavior of the reset
+ *
+ * Flags:
+ * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
+ * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
+ * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
+ *
+ * Returns 0 if the VF is currently in reset, if resets are disabled, or if
+ * the VF resets successfully. Returns an error code if the VF fails to
+ * rebuild.
+ */
+int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_hw *hw;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (ice_is_vf_disabled(vf)) {
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
+ return -EINVAL;
+ }
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+
+ if (ice_vsi_is_rx_queue_active(vsi))
+ ice_vsi_stop_all_rx_rings(vsi);
+
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ ice_dis_vf_qs(vf);
+
+ /* Call Disable LAN Tx queue AQ whether or not queues are
+ * enabled. This is needed for successful completion of VFR.
+ */
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ rsd = vf->vf_ops->poll_reset_status(vf);
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ ice_vf_clear_all_promisc_modes(vf, vsi);
+
+ ice_eswitch_del_vf_mac_rule(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+
+ if (vf->vf_ops->vsi_rebuild(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
+
+ /* if the VF has been reset allow it to come up again */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+out_unlock:
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
+ return err;
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/* Private functions only accessed from other virtualization files */
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_all_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @vf: the pointer to the VF to check
+ */
+int ice_check_vf_init(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+/**
+ * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
+ * @vsi: the VSI to configure
+ * @enable: whether to enable or disable the spoof checking
+ *
+ * Configure a VSI to enable (or disable) spoof checking behavior.
+ */
+static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ else
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+/**
+ * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
+ * @vsi: VSI to enable Tx spoof checking for
+ */
+static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+
+ return ice_cfg_mac_antispoof(vsi, true);
+}
+
+/**
+ * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
+ * @vsi: VSI to disable Tx spoof checking for
+ */
+static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->dis_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, false);
+}
+
+/**
+ * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
+ * @vsi: VSI associated to the VF
+ * @enable: whether to enable or disable the spoof checking
+ */
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
+{
+ int err;
+
+ if (enable)
+ err = ice_vsi_ena_spoofchk(vsi);
+ else
+ err = ice_vsi_dis_spoofchk(vsi);
+
+ return err;
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+
+ if (ice_check_vf_init(vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pi->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr.addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
new file mode 100644
index 000000000000..52bd9a3816bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_H_
+#define _ICE_VF_LIB_H_
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <net/devlink.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_type.h"
+#include "ice_virtchnl_fdir.h"
+#include "ice_vsi_vlan_ops.h"
+
+#define ICE_MAX_SRIOV_VFS 256
+
+/* VF resource constraints */
+#define ICE_MAX_RSS_QS_PER_VF 16
+
+struct ice_pf;
+struct ice_vf;
+struct ice_virtchnl_ops;
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+};
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ ICE_VF_STATES_NBITS
+};
+
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
+/* VF operations */
+struct ice_vf_ops {
+ enum ice_disq_rst_src reset_type;
+ void (*free)(struct ice_vf *vf);
+ void (*clear_mbx_register)(struct ice_vf *vf);
+ void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+ bool (*poll_reset_status)(struct ice_vf *vf);
+ void (*clear_reset_trigger)(struct ice_vf *vf);
+ int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*post_vsi_rebuild)(struct ice_vf *vf);
+};
+
+/* Virtchnl/SR-IOV config info */
+struct ice_vfs {
+ DECLARE_HASHTABLE(table, 8); /* table of VF entries */
+ struct mutex table_lock; /* Lock for protecting the hash table */
+ u16 num_supported; /* max supported VFs on this PF */
+ u16 num_qps_per; /* number of queue pairs per VF */
+ u16 num_msix_per; /* number of MSI-X vectors per VF */
+ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
+ DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct hlist_node entry;
+ struct rcu_head rcu;
+ struct kref refcnt;
+ struct ice_pf *pf;
+
+ /* Used during virtchnl message handling and NDO ops against the VF
+ * that will trigger a VFR
+ */
+ struct mutex cfg_lock;
+
+ u16 vf_id; /* VF ID in the PF space */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
+ struct virtchnl_ether_addr dev_lan_addr;
+ struct virtchnl_ether_addr hw_lan_addr;
+ struct ice_time_mac legacy_last_added_umac;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
+ struct virtchnl_vlan_caps vlan_v2_caps;
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
+ unsigned long vf_caps; /* VF's adv. capabilities */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+ u16 num_mac;
+ u16 num_vf_qs; /* num of queue configured per VF */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
+ DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+ const struct ice_virtchnl_ops *virtchnl_ops;
+ const struct ice_vf_ops *vf_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
+};
+
+/* Flags for controlling behavior of ice_reset_vf */
+enum ice_vf_reset_flags {
+ ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
+ ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
+ ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
+};
+
+static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.vid;
+}
+
+static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.prio;
+}
+
+static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
+{
+ return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
+}
+
+static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.tpid;
+}
+
+/* VF Hash Table access functions
+ *
+ * These functions provide abstraction for interacting with the VF hash table.
+ * In general, direct access to the hash table should be avoided outside of
+ * these functions where possible.
+ *
+ * The VF entries in the hash table are protected by reference counting to
+ * track lifetime of accesses from the table. The ice_get_vf_by_id() function
+ * obtains a reference to the VF structure which must be dropped by using
+ * ice_put_vf().
+ */
+
+/**
+ * ice_for_each_vf - Iterate over each VF entry
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under the table_lock mutex for the entire
+ * loop. Use this iterator if your loop is long or if it might sleep.
+ */
+#define ice_for_each_vf(pf, bkt, vf) \
+ hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
+
+/**
+ * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under rcu_read_lock() for the entire loop.
+ * Only use this iterator if your loop is short and you can guarantee it does
+ * not sleep.
+ */
+#define ice_for_each_vf_rcu(pf, bkt, vf) \
+ hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
+
+#ifdef CONFIG_PCI_IOV
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+void ice_put_vf(struct ice_vf *vf);
+bool ice_has_vfs(struct ice_pf *pf);
+u16 ice_get_num_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+bool ice_is_vf_disabled(struct ice_vf *vf);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m);
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int ice_reset_vf(struct ice_vf *vf, u32 flags);
+void ice_reset_all_vfs(struct ice_pf *pf);
+#else /* CONFIG_PCI_IOV */
+static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ return NULL;
+}
+
+static inline void ice_put_vf(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_has_vfs(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ return 0;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ return 0;
+}
+
+static inline void ice_reset_all_vfs(struct ice_pf *pf)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
new file mode 100644
index 000000000000..15887e772c76
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_PRIVATE_H_
+#define _ICE_VF_LIB_PRIVATE_H_
+
+#include "ice_vf_lib.h"
+
+/* This header file is for exposing functions in ice_vf_lib.c to other files
+ * which are also conditionally compiled depending on CONFIG_PCI_IOV.
+ * Functions which may be used by other files should be exposed as part of
+ * ice_vf_lib.h
+ *
+ * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and
+ * thus this header must not be included by .c files which may be compiled
+ * with CONFIG_PCI_IOV disabled.
+ *
+ * To avoid this, only include this header file directly within .c files that
+ * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block.
+ */
+
+#ifndef CONFIG_PCI_IOV
+#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
+#endif
+
+void ice_dis_vf_qs(struct ice_vf *vf);
+int ice_check_vf_init(struct ice_vf *vf);
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
+bool ice_is_vf_trusted(struct ice_vf *vf);
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
+bool ice_is_vf_link_up(struct ice_vf *vf);
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_set_initialized(struct ice_vf *vf);
+
+#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
new file mode 100644
index 000000000000..fc8c93fa4455
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ if (vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* increment the message count in the VF array */
+ snap->mbx_vf.vf_cntr[vf_id]++;
+
+ if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ u32 vfcntr_len;
+
+ if (!snap || !snap->mbx_vf.vf_cntr)
+ return;
+
+ /* Clear VF counters. */
+ vfcntr_len = snap->mbx_vf.vfcntr_len;
+ if (vfcntr_len)
+ memset(snap->mbx_vf.vf_cntr, 0,
+ (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
+
+ /* Reset mailbox snapshot for a new capture. */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+ struct ice_mbx_data *mbx_data, u16 vf_id,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ int status = 0;
+
+ if (!is_malvf || !mbx_data)
+ return -EINVAL;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ *is_malvf = false;
+
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return -EINVAL;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return -EINVAL;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = -EIO;
+ }
+
+ snap_buf->state = new_state;
+
+ return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+ if (!all_malvfs || !report_malvf)
+ return -EINVAL;
+
+ *report_malvf = false;
+
+ if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ if (vf_id >= bitmap_len)
+ return -EIO;
+
+ /* If the vf_id is found in the bitmap set bit and boolean to true */
+ if (!test_and_set_bit(vf_id, all_malvfs))
+ *report_malvf = true;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
+ * @snap: pointer to the mailbox snapshot structure
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ *
+ * In case of a VF reset, this function can be called to clear
+ * the bit corresponding to the VF ID in the bitmap tracking all
+ * malicious VFs attached to the PF. The function also clears the
+ * VF counter array at the index of the VF ID. This is to ensure
+ * that the new VF loaded is not considered malicious before going
+ * through the overflow detection algorithm.
+ */
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id)
+{
+ if (!snap || !all_malvfs)
+ return -EINVAL;
+
+ if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ /* Ensure VF ID value is not larger than bitmap or VF counter length */
+ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
+ clear_bit(vf_id, all_malvfs);
+
+ /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
+ * This is to ensure that if a VF is unloaded and a new one brought back
+ * up with the same VF ID for a snapshot currently in traversal or detect
+ * state the counter for that VF ID does not increment on top of existing
+ * values in the mailbox overflow detection algorithm.
+ */
+ snap->mbx_vf.vf_cntr[vf_id] = 0;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Ensure that the number of VFs allocated is non-zero and
+ * is not greater than the number of supported VFs defined in
+ * the functional capabilities of the PF.
+ */
+ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+ return -EINVAL;
+
+ snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
+ sizeof(*snap->mbx_vf.vf_cntr),
+ GFP_KERNEL);
+ if (!snap->mbx_vf.vf_cntr)
+ return -ENOMEM;
+
+ /* Setting the VF counter length to the number of allocated
+ * VFs for given PF's functional capabilities.
+ */
+ snap->mbx_vf.vfcntr_len = vf_count;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Free VF counter array and reset VF counter length */
+ devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
+ snap->mbx_vf.vfcntr_len = 0;
+
+ /* Clear mbx_buf in the mailbox snaphot structure */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
new file mode 100644
index 000000000000..582716e6d5f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+
+#ifdef CONFIG_PCI_IOV
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ u16 vf_id, bool *is_mal_vf);
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf);
+#else /* CONFIG_PCI_IOV */
+static inline int
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..5ecc0ee9a78e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sriov.h"
+
+static int
+noop_vlan_arg(struct ice_vsi __always_unused *vsi,
+ struct ice_vlan __always_unused *vlan)
+{
+ return 0;
+}
+
+static int
+noop_vlan(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
+ * @vsi: VF's VSI being configured
+ *
+ * If Double VLAN Mode (DVM) is enabled, assume that the VF supports the new
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2 capability and set up the VLAN ops accordingly.
+ * If SVM is enabled maintain the same level of VLAN support previous to
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2.
+ */
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* outer VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ /* inner VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ }
+}
+
+/**
+ * ice_vf_vsi_cfg_dvm_legacy_vlan_mode - Config VLAN mode for old VFs in DVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Double VLAN Mode (DVM) is enabled, there
+ * is not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * This function sets up the VF VSI's inner and outer ice_vsi_vlan_ops and also
+ * initializes software only VLAN mode (i.e. allow all VLANs). Also, use no-op
+ * implementations for any functions that may be called during the lifetime of
+ * the VF so these methods do nothing and succeed.
+ */
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_vf *vf = vsi->vf;
+ struct device *dev;
+
+ if (WARN_ON(!vf))
+ return;
+
+ dev = ice_pf_to_dev(vf->pf);
+
+ if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* Rx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ /* Don't fail when attempting to enable Rx VLAN filtering */
+ vlan_ops->ena_rx_filtering = noop_vlan;
+
+ /* Tx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ /* Don't fail when attempting to enable Tx VLAN filtering */
+ vlan_ops->ena_tx_filtering = noop_vlan;
+
+ if (vlan_ops->dis_rx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Rx VLAN filtering for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+ if (vlan_ops->dis_tx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Tx VLAN filtering for old VF without VIRTHCNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All outer VLAN offloads must be disabled */
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All inner VLAN offloads must be disabled */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+}
+
+/**
+ * ice_vf_vsi_cfg_svm_legacy_vlan_mode - Config VLAN mode for old VFs in SVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Single VLAN Mode (SVM) is enabled, there is
+ * not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * All of the normal SVM VLAN ops are identical for this case. However, by
+ * default Rx VLAN filtering should be turned off by default in this case.
+ */
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ if (vsi->inner_vlan_ops.dis_rx_filtering(vsi))
+ dev_dbg(ice_pf_to_dev(vf->pf), "Failed to disable Rx VLAN filtering for old VF with VIRTCHNL_VF_OFFLOAD_VLAN support\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..875a4e615f39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_VSI_VLAN_OPS_H_
+#define _ICE_VF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi);
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
+
+#ifdef CONFIG_PCI_IOV
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+#else
+static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
new file mode 100644
index 000000000000..2b4c791b6cba
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -0,0 +1,3826 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_virtchnl.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
+#include "ice_flex_pipe.h"
+#include "ice_dcb_lib.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_hw *hw = &vf->pf->hw;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (ice_is_vf_link_up(vf))
+ ice_set_pfe_link(vf, &pfe,
+ hw->port_info->phy.link_info.link_speed, true);
+ else
+ ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_vc_notify_vf_link_state(vf);
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ struct device *dev;
+ struct ice_pf *pf;
+ int aq_ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+ vf->vf_id, aq_ret,
+ ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_get_vlan_caps
+ * @hw: pointer to the hw
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VSI
+ * @driver_caps: current driver caps
+ *
+ * Return 0 if there is no VLAN caps supported, or VLAN caps value
+ */
+static u32
+ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
+ u32 driver_caps)
+{
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ /* In switchdev setting VLAN from VF isn't supported */
+ return 0;
+
+ if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_resource *vfres = NULL;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (ice_check_vf_init(vf)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ vf->driver_caps);
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = vf->pf->vfs.num_msix_per;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->hw_lan_addr.addr);
+
+ /* match guest capabilities */
+ vf->driver_caps = vfres->vf_cap_flags;
+
+ ice_vc_set_caps_allowlist(vf);
+ ice_vc_set_working_allowlist(vf);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
+ (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ ice_reset_vf(vf, 0);
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI ID
+ *
+ * check for the valid VSI ID
+ */
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi(pf, vsi_id);
+
+ return (vsi && (vsi->vf == vf));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
+ * to configure
+ * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool
+ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
+ u32 *addl_hdrs, u64 *hash_flds)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ bool hdr_found = false;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers. */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr) {
+ *addl_hdrs |= hdr_map.ice_hdr;
+ hdr_found = true;
+ }
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ struct ice_vsi_ctx *ctx;
+ u8 lut_type, hash_type;
+ int status;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto error_param;
+ }
+
+ ctx->info.q_opt_rss = ((lut_type <<
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ (hash_type &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+ ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+ } else {
+ u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ u64 hash_flds = ICE_HASH_INVALID;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
+ &hash_flds)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+ vsi->vsi_num, v_ret);
+ }
+ } else {
+ int status;
+
+ status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs);
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
+ */
+ if (status && status != -ENOENT) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
+ }
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ bool rm_promisc, alluni = false, allmulti = false;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int mcast_err = 0, ucast_err = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ u8 mcast_m, ucast_m;
+ struct device *dev;
+ int ret = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ dev = ice_pf_to_dev(pf);
+ if (!ice_is_vf_trusted(vf)) {
+ dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Leave v_ret alone, lie to the VF on purpose. */
+ goto error_param;
+ }
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+
+ rm_promisc = !allmulti && !alluni;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ if (rm_promisc)
+ ret = vlan_ops->ena_rx_filtering(vsi);
+ else
+ ret = vlan_ops->dis_rx_filtering(vsi);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (alluni) {
+ /* in this case we're turning on promiscuous mode */
+ ret = ice_set_dflt_vsi(vsi);
+ } else {
+ /* in this case we're turning off promiscuous mode */
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ }
+
+ /* in this case we're turning on/off only
+ * allmulticast
+ */
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ret) {
+ dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
+ vf->vf_id, ret);
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto error_param;
+ }
+ } else {
+ if (alluni)
+ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+ else
+ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ucast_err || mcast_err)
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+ if (!mcast_err) {
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!allmulti &&
+ test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
+ vf->vf_states))
+ dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, mcast_err);
+ }
+
+ if (!ucast_err) {
+ if (alluni &&
+ !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!alluni &&
+ test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
+ vf->vf_states))
+ dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, ucast_err);
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+static int
+ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < pf->vfs.num_msix_per) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = (enum virtchnl_status_code)
+ ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, i);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ vsi->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, i);
+ goto error_param;
+ }
+ }
+ }
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ */
+static u8
+ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
+}
+
+/**
+ * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ */
+static bool
+ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
+}
+
+/**
+ * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ */
+static bool
+ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
+}
+
+/**
+ * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ */
+static void
+ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return;
+
+ /* only allow legacy VF drivers to set the device and hardware MAC if it
+ * is zero and allow new VF drivers to set the hardware MAC if the type
+ * was correctly specified over VIRTCHNL
+ */
+ if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
+ is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ ice_is_vc_addr_primary(vc_ether_addr)) {
+ ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ }
+
+ /* hardware and device MACs are already set, but its possible that the
+ * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
+ * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
+ * away for the legacy VF driver case as it will be updated in the
+ * delete flow for this case
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr)) {
+ ether_addr_copy(vf->legacy_last_added_umac.addr,
+ mac_addr);
+ vf->legacy_last_added_umac.time_modified = jiffies;
+ }
+}
+
+/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
+ int ret;
+
+ /* device MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
+ return 0;
+
+ if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+ dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (ret == -EEXIST) {
+ dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ vf->vf_id);
+ /* don't return since we might need to update
+ * the primary MAC in ice_vfhw_mac_add() below
+ */
+ } else if (ret) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, ret);
+ return ret;
+ } else {
+ vf->num_mac++;
+ }
+
+ ice_vfhw_mac_add(vf, vc_ether_addr);
+
+ return ret;
+}
+
+/**
+ * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
+ * @last_added_umac: structure used to check expiration
+ */
+static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
+{
+#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
+ return time_is_before_jiffies(last_added_umac->time_modified +
+ ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
+}
+
+/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
+ * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
+ */
+static void
+ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr) ||
+ !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ return;
+
+ /* allow the device MAC to be repopulated in the add flow and don't
+ * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * to be persistent on VM reboot and across driver unload/load, which
+ * won't work if we clear the hardware MAC here
+ */
+ eth_zero_addr(vf->dev_lan_addr.addr);
+
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
+ int status;
+
+ if (!ice_can_vf_change_mac(vf) &&
+ ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ return 0;
+
+ status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (status == -ENOENT) {
+ dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -ENOENT;
+ } else if (status) {
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ ice_vfhw_mac_del(vf, vc_ether_addr);
+
+ vf->num_mac--;
+
+ return 0;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if MAC filters are being set, false otherwise
+ *
+ * add guest MAC address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ int (*ice_vc_cfg_mac)
+ (struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *virtchnl_ether_addr);
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (set) {
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_add_mac_addr;
+ } else {
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_del_mac_addr;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* If this VF is not privileged, then we can't add more than a
+ * limited number of addresses. Check to make sure that the
+ * additions do not push us over the limit.
+ */
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+ int result;
+
+ if (is_broadcast_ether_addr(mac_addr) ||
+ is_zero_ether_addr(mac_addr))
+ continue;
+
+ result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
+ if (result == -EEXIST || result == -ENOENT) {
+ continue;
+ } else if (result) {
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto handle_mac_exit;
+ }
+ }
+
+handle_mac_exit:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ */
+static bool ice_vf_vlan_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
+
+/**
+ * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
+ * @vf: VF used to determine if VLAN promiscuous config is allowed
+ */
+static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to enable VLAN promiscuous mode
+ * @vlan: VLAN used to enable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
+
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -EEXIST)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to disable VLAN promiscuous mode for
+ * @vlan: VLAN used to disable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
+
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -ENOENT)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
+ * @vf: VF to check against
+ * @vsi: VF's VSI
+ *
+ * If the VF is trusted then the VF is allowed to add as many VLANs as it
+ * wants to, so return false.
+ *
+ * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
+ * allowed VLANs for an untrusted VF. Return the result of this comparison.
+ */
+static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ if (ice_is_vf_trusted(vf))
+ return false;
+
+#define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
+ return ((ice_vsi_num_non_zero_vlans(vsi) +
+ ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN ID
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status = 0;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] >= VLAN_N_VID) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "invalid VF VLAN id %d\n",
+ vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ goto error_param;
+ }
+
+ /* in DVM a VF can add/delete inner VLAN filters when
+ * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
+ * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
+ * allow vlan_promisc = true in SVM and if no port VLAN is configured
+ */
+ vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
+ !ice_is_dvm_ena(&pf->hw) &&
+ !ice_vf_is_port_vlan_ena(vf);
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
+
+ if (ice_vf_has_max_vlans(vf, vsi)) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being
+ * not trusted, so we can just return success
+ * message here as well.
+ */
+ goto error_param;
+ }
+
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't need to add it again here
+ */
+ if (!vid)
+ continue;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
+ if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ } else if (vlan_promisc) {
+ status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
+ }
+ }
+ } else {
+ /* In case of non_trusted VF, number of VLAN elements passed
+ * to PF for removal might be greater than number of VLANs
+ * filter programmed for that VF - So, use actual number of
+ * VLANS added earlier with add VLAN opcode. In order to avoid
+ * removing VLAN that doesn't exist, which result to sending
+ * erroneous failed message back to the VF
+ */
+ int num_vf_vlan;
+
+ num_vf_vlan = vsi->num_vlan;
+ for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
+ u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
+
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't want a VIRTCHNL request to remove it
+ */
+ if (!vid)
+ continue;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN ID
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN ID
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->inner_vlan_ops.dis_stripping(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
+ * @vf: VF to enable/disable VLAN stripping for on initialization
+ *
+ * Set the default for VLAN stripping based on whether a port VLAN is configured
+ * and the current VLAN mode of the device.
+ */
+static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* don't modify stripping if port VLAN is configured in SVM since the
+ * port VLAN is based on the inner/single VLAN in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ if (ice_vf_vlan_offload_ena(vf->driver_caps))
+ return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ else
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
+}
+
+static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ return VLAN_N_VID;
+ else
+ return ICE_MAX_VLAN_PER_VF;
+}
+
+/**
+ * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
+ * @vf: VF that being checked for
+ *
+ * When the device is in double VLAN mode, check whether or not the outer VLAN
+ * is allowed.
+ */
+static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
+{
+ if (ice_vf_is_port_vlan_ena(vf))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF should use the inner
+ * filtering/offload capabilities since the port VLAN is using the outer VLAN
+ * capabilies.
+ */
+static void
+ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_outer_vlan_not_allowed(vf)) {
+ /* until support for inner VLAN filtering is added when a port
+ * VLAN is configured, only support software offloaded inner
+ * VLANs when a port VLAN is confgured in DVM
+ */
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_AND;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ }
+
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+}
+
+/**
+ * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF does not have any VLAN
+ * filtering or offload capabilities since the port VLAN is using the inner VLAN
+ * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
+ * VLAN fitlering and offload capabilities.
+ */
+static void
+ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.max_filters = 0;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+ }
+}
+
+/**
+ * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
+ * @vf: VF to determine VLAN capabilities for
+ *
+ * This will only be called if the VF and PF successfully negotiated
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ *
+ * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
+ * is configured or not.
+ */
+static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_caps *caps = NULL;
+ int err, len = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto out;
+ }
+ len = sizeof(*caps);
+
+ if (ice_is_dvm_ena(&vf->pf->hw))
+ ice_vc_set_dvm_caps(vf, caps);
+ else
+ ice_vc_set_svm_caps(vf, caps);
+
+ /* store negotiated caps to prevent invalid VF messages */
+ memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
+
+out:
+ err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ v_ret, (u8 *)caps, len);
+ kfree(caps);
+ return err;
+}
+
+/**
+ * ice_vc_validate_vlan_tpid - validate VLAN TPID
+ * @filtering_caps: negotiated/supported VLAN filtering capabilities
+ * @tpid: VLAN TPID used for validation
+ *
+ * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
+ * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
+ */
+static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
+{
+ enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ break;
+ case ETH_P_8021AD:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ break;
+ case ETH_P_QINQ1:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
+ break;
+ }
+
+ if (!(filtering_caps & vlan_ethertype))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_is_valid_vlan - validate the virtchnl_vlan
+ * @vc_vlan: virtchnl_vlan to validate
+ *
+ * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
+ * false. Otherwise return true.
+ */
+static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ if (!vc_vlan->tci || !vc_vlan->tpid)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF. If any of
+ * the checks fail then return false. Otherwise return true.
+ */
+static bool
+ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 i;
+
+ if (!vfl->num_elements)
+ return false;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vfc->filtering_support;
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *outer = &vlan_fltr->outer;
+ struct virtchnl_vlan *inner = &vlan_fltr->inner;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
+ return false;
+
+ if ((outer->tci_mask &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
+ (inner->tci_mask &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
+ return false;
+
+ if (((outer->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
+ ((inner->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
+ return false;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->outer,
+ outer->tpid)) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->inner,
+ inner->tpid)))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
+ * @vc_vlan: struct virtchnl_vlan to transform
+ */
+static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ struct ice_vlan vlan = { 0 };
+
+ vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
+ vlan.tpid = vc_vlan->tpid;
+
+ return vlan;
+}
+
+/**
+ * ice_vc_vlan_action - action to perform on the virthcnl_vlan
+ * @vsi: VF's VSI used to perform the action
+ * @vlan_action: function to perform the action with (i.e. add/del)
+ * @vlan: VLAN filter to perform the action with
+ */
+static int
+ice_vc_vlan_action(struct ice_vsi *vsi,
+ int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
+ struct ice_vlan *vlan)
+{
+ int err;
+
+ err = vlan_action(vsi, vlan);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
+ * @vf: VF used to delete the VLAN(s)
+ * @vsi: VF's VSI used to delete the VLAN(s)
+ * @vfl: virthchnl filter list used to delete the filters
+ */
+static int
+ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_del_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
+ * @vf: VF used to add the VLAN(s)
+ * @vsi: VF's VSI used to add the VLAN(s)
+ * @vfl: virthchnl filter list used to add the filters
+ */
+static int
+ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
+ * @vsi: VF VSI used to get number of existing VLAN filters
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF during the
+ * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
+ * Otherwise return true.
+ */
+static bool
+ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
+ struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+ vfl->num_elements;
+
+ if (num_requested_filters > vfc->max_filters)
+ return false;
+
+ return ice_vc_validate_vlan_filter_list(vfc, vfl);
+}
+
+/**
+ * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_validate_add_vlan_filter_list(vsi,
+ &vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_add_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_valid_vlan_setting - validate VLAN setting
+ * @negotiated_settings: negotiated VLAN settings during VF init
+ * @ethertype_setting: ethertype(s) requested for the VLAN setting
+ */
+static bool
+ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
+{
+ if (ethertype_setting && !(negotiated_settings & ethertype_setting))
+ return false;
+
+ /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
+ * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
+ */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
+ hweight32(ethertype_setting) > 1)
+ return false;
+
+ /* ability to modify the VLAN setting was not negotiated */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
+ * @caps: negotiated VLAN settings during VF init
+ * @msg: message to validate
+ *
+ * Used to validate any VLAN virtchnl message sent as a
+ * virtchnl_vlan_setting structure. Validates the message against the
+ * negotiated/supported caps during VF driver init.
+ */
+static bool
+ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
+ struct virtchnl_vlan_setting *msg)
+{
+ if ((!msg->outer_ethertype_setting &&
+ !msg->inner_ethertype_setting) ||
+ (!caps->outer && !caps->inner))
+ return false;
+
+ if (msg->outer_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->outer,
+ msg->outer_ethertype_setting))
+ return false;
+
+ if (msg->inner_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->inner,
+ msg->inner_ethertype_setting))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
+ * @tpid: VLAN TPID to populate
+ */
+static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
+{
+ switch (ethertype_setting) {
+ case VIRTCHNL_VLAN_ETHERTYPE_8100:
+ *tpid = ETH_P_8021Q;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_88A8:
+ *tpid = ETH_P_8021AD;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_9100:
+ *tpid = ETH_P_QINQ1;
+ break;
+ default:
+ *tpid = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
+ * @vsi: VF's VSI used to enable the VLAN offload
+ * @ena_offload: function used to enable the VLAN offload
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
+ */
+static int
+ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
+ int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
+ u32 ethertype_setting)
+{
+ u16 tpid;
+ int err;
+
+ err = ice_vc_get_tpid(ethertype_setting, &tpid);
+ if (err)
+ return err;
+
+ err = ena_offload(vsi, tpid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
+
+/**
+ * ice_vc_ena_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (ice_vc_ena_vlan_offload(vsi,
+ vsi->outer_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support outer stripping so the first tag always ends
+ * up in L2TAG2_2ND and the second/inner tag, if
+ * enabled, is extracted in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support inner stripping while outer stripping is
+ * disabled so that the first and only tag is extracted
+ * in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_ena_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
+{
+ vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+ int result;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
+ if (result) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, result);
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int
+ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't config promiscuous mode in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_repr_add_mac,
+ .del_mac_addr_msg = ice_vc_repr_del_mac,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
+{
+ vf->virtchnl_ops = &ice_virtchnl_repr_ops;
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ const struct ice_virtchnl_ops *ops;
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ struct device *dev;
+ int err = 0;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
+ vf_id, v_opcode, msglen);
+ return;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ ops = vf->virtchnl_ops;
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
+ dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ goto finish;
+ }
+
+ if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+ ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+ 0);
+ goto finish;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ops->get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ops->get_vf_res_msg(vf, msg);
+ if (ice_vf_init_vlan_stripping(vf))
+ dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
+ vf->vf_id);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ops->reset_vf(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ops->add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ops->del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ops->cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ops->ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ops->dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ops->request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ops->cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ops->config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ops->config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ops->get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ops->add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ops->remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ops->ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ops->dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ err = ops->add_fdir_fltr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ err = ops->del_fdir_fltr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ err = ops->get_offload_vlan_v2_caps(vf);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ err = ops->add_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ err = ops->remove_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ err = ops->ena_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ err = ops->dis_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ err = ops->ena_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ err = ops->dis_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
+ vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
+ vf_id, v_opcode, err);
+ }
+
+finish:
+ mutex_unlock(&vf->cfg_lock);
+ ice_put_vf(vf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
new file mode 100644
index 000000000000..b5a3fd8adbb4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_H_
+#define _ICE_VIRTCHNL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_vf_lib.h"
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
+
+struct ice_virtchnl_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
+ int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf);
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+#else /* CONFIG_PCI_IOV */
+static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
+static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+
+static inline int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ return false;
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 9feebe5f556c..5a82216e7d03 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -55,6 +55,15 @@ static const u32 vlan_allowlist_opcodes[] = {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
+/* VIRTCHNL_VF_OFFLOAD_VLAN_V2 */
+static const u32 vlan_v2_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, VIRTCHNL_OP_ADD_VLAN_V2,
+ VIRTCHNL_OP_DEL_VLAN_V2, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+};
+
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
@@ -89,6 +98,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index d64df81d4893..c6a58343d81d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_flow.h"
+#include "ice_vf_lib_private.h"
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
@@ -1288,15 +1289,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf *vf = ctrl_vsi->vf;
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct device *dev;
- struct ice_vf *vf;
int ret;
- vf = &pf->vf[ctrl_vsi->vf_id];
+ if (WARN_ON(!vf))
+ return;
fdir = &vf->fdir;
ctx_done = &fdir->ctx_done;
@@ -1342,12 +1344,17 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
pf = vf->pf;
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
+ return;
+ }
+
vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
+ dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
vf->vf_id,
(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
@@ -1571,15 +1578,16 @@ err_exit:
*/
void ice_flush_fdir_ctx(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
return;
- ice_for_each_vf(pf, i) {
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status;
- struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
int ret;
@@ -1633,6 +1641,7 @@ err_exit:
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index f4e629f4c09b..c5bcc8d7481c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -6,6 +6,7 @@
struct ice_vf;
struct ice_pf;
+struct ice_vsi;
enum ice_fdir_ctx_stat {
ICE_FDIR_CTX_READY,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
deleted file mode 100644
index 39b80124d282..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ /dev/null
@@ -1,5317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
-
-#include "ice.h"
-#include "ice_base.h"
-#include "ice_lib.h"
-#include "ice_fltr.h"
-#include "ice_dcb_lib.h"
-#include "ice_flow.h"
-#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
-#include "ice_flex_pipe.h"
-
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
-/**
- * ice_get_vf_vsi - get VF's VSI based on the stored index
- * @vf: VF used to get VSI
- */
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return vf->pf->vsi[vf->lan_vsi_idx];
-}
-
-/**
- * ice_validate_vf_id - helper to check if VF ID is valid
- * @pf: pointer to the PF structure
- * @vf_id: the ID of the VF to check
- */
-static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
-{
- /* vf_id range is only valid for 0-255, and should always be unsigned */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * ice_check_vf_init - helper to check if VF init complete
- * @pf: pointer to the PF structure
- * @vf: the pointer to the VF to check
- */
-static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
-{
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
- vf->vf_id);
- return -EBUSY;
- }
- return 0;
-}
-
-/**
- * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
- * @pf: pointer to the PF structure
- * @v_opcode: operation code
- * @v_retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- */
-static void
-ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- continue;
-
- /* Ignore return value on purpose - a given VF may fail, but
- * we need to keep going and send to all of them
- */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
- msglen, NULL);
- }
-}
-
-/**
- * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
- int ice_link_speed, bool link_up)
-{
- if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
- pfe->event_data.link_event_adv.link_status = link_up;
- /* Speed in Mbps */
- pfe->event_data.link_event_adv.link_speed =
- ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
- } else {
- pfe->event_data.link_event.link_status = link_up;
- /* Legacy method for virtchnl link speeds */
- pfe->event_data.link_event.link_speed =
- (enum virtchnl_link_speed)
- ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
- }
-}
-
-/**
- * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
- * @vf: the VF to check
- *
- * Returns true if the VF has no Rx and no Tx queues enabled and returns false
- * otherwise
- */
-static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
-{
- return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
- !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
-}
-
-/**
- * ice_is_vf_link_up - check if the VF's link is up
- * @vf: VF to check if link is up
- */
-static bool ice_is_vf_link_up(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- if (ice_check_vf_init(pf, vf))
- return false;
-
- if (ice_vf_has_no_qs_ena(vf))
- return false;
- else if (vf->link_forced)
- return vf->link_up;
- else
- return pf->hw.port_info->phy.link_info.link_info &
- ICE_AQ_LINK_UP;
-}
-
-/**
- * ice_vc_notify_vf_link_state - Inform a VF of link status
- * @vf: pointer to the VF structure
- *
- * send a link status message to a single VF
- */
-void ice_vc_notify_vf_link_state(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe = { 0 };
- struct ice_hw *hw = &vf->pf->hw;
-
- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- if (ice_is_vf_link_up(vf))
- ice_set_pfe_link(vf, &pfe,
- hw->port_info->phy.link_info.link_speed, true);
- else
- ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
-
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
- sizeof(pfe), NULL);
-}
-
-/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
- * @vf: VF to remove access to VSI for
- */
-static void ice_vf_invalidate_vsi(struct ice_vf *vf)
-{
- vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(ice_get_vf_vsi(vf));
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
- * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
- * @vf: VF that control VSI is being invalidated on
- */
-static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
-{
- vf->ctrl_vsi_idx = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
- * @vf: VF that control VSI is being released on
- */
-static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
- ice_vf_ctrl_invalidate_vsi(vf);
-}
-
-/**
- * ice_free_vf_res - Free a VF's resources
- * @vf: pointer to the VF info
- */
-static void ice_free_vf_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- int i, last_vector_idx;
-
- /* First, disable VF's configuration API to prevent OS from
- * accessing the VF's VSI after it's freed or invalidated.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_vf_fdir_exit(vf);
- /* free VF control VSI */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- /* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx != ICE_NO_VSI) {
- ice_vf_vsi_release(vf);
- vf->num_mac = 0;
- }
-
- last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
-
- /* clear VF MDD event information */
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-
- /* Disable interrupts so that VF starts in a known state */
- for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
- wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
- ice_flush(&pf->hw);
- }
- /* reset some of the state variables keeping track of the resources */
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_mappings
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_mappings(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
-
- dev = ice_pf_to_dev(pf);
- wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
-
- first = vf->first_vector_idx;
- last = first + pf->num_msix_per_vf - 1;
- for (v = first; v <= last; v++) {
- u32 reg;
-
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-}
-
-/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- struct ice_res_tracker *res;
-
- if (!pf)
- return -EINVAL;
-
- res = pf->irq_tracker;
- if (!res)
- return -EINVAL;
-
- /* give back irq_tracker resources used */
- WARN_ON(pf->sriov_base_vector < res->num_entries);
-
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_qs - Disable the VF queues
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_qs(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_all_rx_rings(vsi);
- ice_set_vf_state_qs_dis(vf);
-}
-
-/**
- * ice_free_vfs - Free all VFs
- * @pf: pointer to the PF structure
- */
-void ice_free_vfs(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- unsigned int tmp, i;
-
- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
- if (!pf->vf)
- return;
-
- ice_eswitch_release(pf);
-
- while (test_and_set_bit(ICE_VF_DIS, pf->state))
- usleep_range(1000, 2000);
-
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
-
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- ice_dis_vf_qs(&pf->vf[i]);
-
- tmp = pf->num_alloc_vfs;
- pf->num_qps_per_vf = 0;
- pf->num_alloc_vfs = 0;
- for (i = 0; i < tmp; i++) {
- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(&pf->vf[i]);
- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
- ice_free_vf_res(&pf->vf[i]);
- }
-
- mutex_destroy(&pf->vf[i].cfg_lock);
- }
-
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
-
- /* This check is for when the driver is unloaded while VFs are
- * assigned. Setting the number of VFs to 0 through sysfs is caught
- * before this function ever gets called.
- */
- if (!pci_vfs_assigned(pf->pdev)) {
- unsigned int vf_id;
-
- /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
- * work correctly when SR-IOV gets re-enabled.
- */
- for (vf_id = 0; vf_id < tmp; vf_id++) {
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- }
- }
-
- /* clear malicious info if the VFs are getting released */
- for (i = 0; i < tmp; i++)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
- ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- i);
-
- clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
- clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-}
-
-/**
- * ice_trigger_vf_reset - Reset a VF on HW
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- * @is_pfr: true if the reset was triggered due to a previous PFR
- *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
- */
-static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
-{
- struct ice_pf *pf = vf->pf;
- u32 reg, reg_idx, bit_idx;
- unsigned int vf_abs_id, i;
- struct device *dev;
- struct ice_hw *hw;
-
- dev = ice_pf_to_dev(pf);
- hw = &pf->hw;
- vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* Inform VF that it is no longer active, as a warning */
- clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
- /* Disable VF's configuration API during reset. The flag is re-enabled
- * when it's safe again to access VF's VSI.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
- * needs to clear them in the case of VFR/VFLR. If this is done for
- * PFR, it can mess up VF resets because the VF driver may already
- * have started cleanup by the time we get here.
- */
- if (!is_pfr) {
- wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
- wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
- }
-
- /* In the case of a VFLR, the HW has already reset the VF and we
- * just need to clean up, so don't hit the VFRTRIG register.
- */
- if (!is_vflr) {
- /* reset VF using VPGEN_VFRTRIG reg */
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg |= VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- }
- /* clear the VFLR bit in GLGEN_VFLRSTAT */
- reg_idx = (vf_abs_id) / 32;
- bit_idx = (vf_abs_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- ice_flush(hw);
-
- wr32(hw, PF_PCI_CIAA,
- VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
- reg = rd32(hw, PF_PCI_CIAD);
- /* no transactions pending so stop polling */
- if ((reg & VF_TRANS_PENDING_M) == 0)
- break;
-
- dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
- udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
- }
-}
-
-/**
- * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
- * @vsi: the VSI to update
- * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
- * @enable: true for enable PVID false for disable
- */
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_aqc_vsi_props *info;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
- info = &ctxt->info;
- if (enable) {
- info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR;
- info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
- ICE_AQ_VSI_VLAN_MODE_ALL;
- info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
-
- info->pvid = cpu_to_le16(pvid_info);
- info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = info->vlan_flags;
- vsi->info.sw_flags2 = info->sw_flags2;
- vsi->info.pvid = info->pvid;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vf_get_port_info - Get the VF's port info structure
- * @vf: VF used to get the port info structure for
- */
-static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
-{
- return vf->pf->hw.port_info;
-}
-
-/**
- * ice_vf_vsi_setup - Set up a VF VSI
- * @vf: VF to setup VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
-
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
- ice_vf_invalidate_vsi(vf);
- return NULL;
- }
-
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- return vsi;
-}
-
-/**
- * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
- * @vf: VF to setup control VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
- ice_vf_ctrl_invalidate_vsi(vf);
- }
-
- return vsi;
-}
-
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u16 vlan_id = 0;
- int err;
-
- if (vf->port_vlan_info) {
- err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
- }
-
- /* vlan_id will either be 0 or the port VLAN number */
- err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
- vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
- err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u8 broadcast[ETH_ALEN];
- int status;
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
- vf->vf_id, status);
- return status;
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
- status);
- return status;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
- * @vf: VF to enable MSIX mappings for
- *
- * Some of the registers need to be indexed/configured using hardware global
- * device values and other registers need 0-based values, which represent PF
- * based values.
- */
-static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
-{
- int device_based_first_msix, device_based_last_msix;
- int pf_based_first_msix, pf_based_last_msix, v;
- struct ice_pf *pf = vf->pf;
- int device_based_vf_id;
- struct ice_hw *hw;
- u32 reg;
-
- hw = &pf->hw;
- pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
-
- device_based_first_msix = pf_based_first_msix +
- pf->hw.func_caps.common_cap.msix_vector_first_id;
- device_based_last_msix =
- (device_based_first_msix + pf->num_msix_per_vf) - 1;
- device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
- wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
-
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
-
- /* map the interrupts to its functions */
- for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- /* Map mailbox interrupt to VF MSI-X vector 0 */
- wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
- * @vf: VF to enable the mappings for
- * @max_txq: max Tx queues allowed on the VF's VSI
- * @max_rxq: max Rx queues allowed on the VF's VSI
- */
-static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
-
- /* VF Tx queues allocation */
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Tx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
- }
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
-
- /* VF Rx queues allocation */
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Rx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
- }
-}
-
-/**
- * ice_ena_vf_mappings - enable VF MSIX and queue mapping
- * @vf: pointer to the VF structure
- */
-static void ice_ena_vf_mappings(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_ena_vf_msix_mappings(vf);
- ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
-}
-
-/**
- * ice_determine_res
- * @pf: pointer to the PF structure
- * @avail_res: available resources in the PF structure
- * @max_res: maximum resources that can be given per VF
- * @min_res: minimum resources that can be given per VF
- *
- * Returns non-zero value if resources (queues/vectors) are available or
- * returns zero if PF cannot accommodate for all num_alloc_vfs.
- */
-static int
-ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
-{
- bool checked_min_res = false;
- int res;
-
- /* start by checking if PF can assign max number of resources for
- * all num_alloc_vfs.
- * if yes, return number per VF
- * If no, divide by 2 and roundup, check again
- * repeat the loop till we reach a point where even minimum resources
- * are not available, in that case return 0
- */
- res = max_res;
- while ((res >= min_res) && !checked_min_res) {
- int num_all_res;
-
- num_all_res = pf->num_alloc_vfs * res;
- if (num_all_res <= avail_res)
- return res;
-
- if (res == min_res)
- checked_min_res = true;
-
- res = DIV_ROUND_UP(res, 2);
- }
- return 0;
-}
-
-/**
- * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
- * @vf: VF to calculate the register index for
- * @q_vector: a q_vector associated to the VF
- */
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
-{
- struct ice_pf *pf;
-
- if (!vf || !q_vector)
- return -EINVAL;
-
- pf = vf->pf;
-
- /* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
- q_vector->v_idx + 1;
-}
-
-/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
- int i;
-
- if (!res)
- return -EINVAL;
-
- for (i = res->num_entries - 1; i >= 0; i--)
- if (res->list[i] & ICE_RES_VALID_BIT)
- return i;
-
- return 0;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = pf->irq_tracker->num_entries;
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
- * ice_set_per_vf_res - check if vectors and queues are available
- * @pf: pointer to the PF structure
- *
- * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
- * get more vectors and can enable more queues per VF. Note that this does not
- * grab any vectors from the SW pool already allocated. Also note, that all
- * vector counts include one for each VF's miscellaneous interrupt vector
- * (i.e. OICR).
- *
- * Minimum VFs - 2 vectors, 1 queue pair
- * Small VFs - 5 vectors, 4 queue pairs
- * Medium VFs - 17 vectors, 16 queue pairs
- *
- * Second, determine number of queue pairs per VF by starting with a pre-defined
- * maximum each VF supports. If this is not possible, then we adjust based on
- * queue pairs available on the device.
- *
- * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
- * by each VF during VF initialization and reset.
- */
-static int ice_set_per_vf_res(struct ice_pf *pf)
-{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- int msix_avail_per_vf, msix_avail_for_sriov;
- struct device *dev = ice_pf_to_dev(pf);
- u16 num_msix_per_vf, num_txq, num_rxq;
-
- if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
- return -EINVAL;
-
- /* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- pf->irq_tracker->num_entries;
- msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
- if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
- } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
- num_msix_per_vf = ICE_MIN_INTR_PER_VF;
- } else {
- dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
- msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
- pf->num_alloc_vfs);
- return -EIO;
- }
-
- /* determine queue resources per VF */
- num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq) {
- dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
- ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
- return -EIO;
- }
-
- if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
- pf->num_alloc_vfs);
- return -EINVAL;
- }
-
- /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
- pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
- pf->num_msix_per_vf = num_msix_per_vf;
- dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
- pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
-
- return 0;
-}
-
-/**
- * ice_clear_vf_reset_trigger - enable VF to access hardware
- * @vf: VF to enabled hardware access for
- */
-static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
-{
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- ice_flush(hw);
-}
-
-static int
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -EEXIST) {
- dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static int
-ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -ENOENT) {
- dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static void ice_vf_clear_counters(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- vf->num_mac = 0;
- vsi->num_vlan = 0;
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-}
-
-/**
- * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
- * @vf: VF to perform pre VSI rebuild tasks
- *
- * These tasks are items that don't need to be amortized since they are most
- * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
- */
-static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
-{
- ice_vf_clear_counters(vf);
- ice_clear_vf_reset_trigger(vf);
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int status;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
- * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
- * @vf: VF to release and setup the VSI for
- *
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
- */
-static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
-{
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf))
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_vsi - rebuild the VF's VSI
- * @vf: VF to rebuild the VSI for
- *
- * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
- * host, PFR, CORER, etc.).
- */
-static int ice_vf_rebuild_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_pf *pf = vf->pf;
-
- if (ice_vsi_rebuild(vsi, true)) {
- dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
- vf->vf_id);
- return -EIO;
- }
- /* vsi->idx will remain the same in this case so don't update
- * vf->lan_vsi_idx
- */
- vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
-
- return 0;
-}
-
-/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-static void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-}
-
-/**
- * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
- * @vf: VF to perform tasks on
- */
-static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
-
- ice_vf_rebuild_host_cfg(vf);
-
- ice_vf_set_initialized(vf);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-}
-
-/**
- * ice_reset_all_vfs - reset all allocated VFs in one go
- * @pf: pointer to the PF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * First, tell the hardware to reset each VF, then do all the waiting in one
- * chunk, and finally finish restoring each VF after the wait. This is useful
- * during PF routines which need to reset all VFs, as otherwise it must perform
- * these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
- */
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf;
- int v, i;
-
- /* If we don't have any VFs, then there is nothing to reset */
- if (!pf->num_alloc_vfs)
- return false;
-
- /* clear all malicious info if the VFs are getting reset */
- ice_for_each_vf(pf, i)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- /* If VFs have been disabled, there is no need to reset */
- if (test_and_set_bit(ICE_VF_DIS, pf->state))
- return false;
-
- /* Begin reset on all VFs at once */
- ice_for_each_vf(pf, v)
- ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
-
- /* HW requires some time to make sure it can flush the FIFO for a VF
- * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
- * sequence to make sure that it has completed. We'll keep track of
- * the VFs using a simple iterator that increments once that VF has
- * finished resetting.
- */
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- /* Check each VF in sequence */
- while (v < pf->num_alloc_vfs) {
- u32 reg;
-
- vf = &pf->vf[v];
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
- /* only delay if the check failed */
- usleep_range(10, 20);
- break;
- }
-
- /* If the current VF has finished resetting, move on
- * to the next VF in sequence.
- */
- v++;
- }
- }
-
- /* Display a warning if at least one VF didn't manage to reset in
- * time, but continue on with the operation.
- */
- if (v < pf->num_alloc_vfs)
- dev_warn(dev, "VF reset check timeout\n");
-
- /* free VF resources to begin resetting the VSI state */
- ice_for_each_vf(pf, v) {
- vf = &pf->vf[v];
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VFs since it should be
- * setup only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_invalidate_vsi(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi(vf);
- ice_vf_post_vsi_rebuild(vf);
- }
-
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
- ice_flush(hw);
- clear_bit(ICE_VF_DIS, pf->state);
-
- return true;
-}
-
-/**
- * ice_is_vf_disabled
- * @vf: pointer to the VF info
- *
- * Returns true if the PF or VF is disabled, false otherwise.
- */
-bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again. Similarly, if the VF has been disabled, this
- * means something else is resetting the VF, so we shouldn't continue.
- * Otherwise, set disable VF state bit for actual reset, and continue.
- */
- return (test_bit(ICE_VF_DIS, pf->state) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states));
-}
-
-/**
- * ice_reset_vf - Reset a particular VF
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * Returns true if the VF is currently in reset, resets successfully, or resets
- * are disabled and false otherwise.
- */
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- bool rsd = false;
- u8 promisc_m;
- u32 reg;
- int i;
-
- dev = ice_pf_to_dev(pf);
-
- if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
- dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
- vf->vf_id);
- return true;
- }
-
- if (ice_is_vf_disabled(vf)) {
- dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
- vf->vf_id);
- return true;
- }
-
- /* Set VF disable bit state here, before triggering reset */
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_trigger_vf_reset(vf, is_vflr, false);
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_dis_vf_qs(vf);
-
- /* Call Disable LAN Tx queue AQ whether or not queues are
- * enabled. This is needed for successful completion of VFR.
- */
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
-
- hw = &pf->hw;
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 10; i++) {
- /* VF reset requires driver to first reset the VF and then
- * poll the status register to make sure that the reset
- * completed successfully.
- */
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (reg & VPGEN_VFRSTAT_VFRD_M) {
- rsd = true;
- break;
- }
-
- /* only sleep if the reset is not done */
- usleep_range(10, 20);
- }
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- /* Display a warning if VF didn't manage to reset in time, but need to
- * continue on with the operation.
- */
- if (!rsd)
- dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
-
- /* disable promiscuous modes in case they were enabled
- * ignore any error if disabling process failed
- */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
-
- ice_eswitch_del_vf_mac_rule(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VF since it should be setup
- * only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
-
- if (ice_vf_rebuild_vsi_with_release(vf)) {
- dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
- return false;
- }
-
- ice_vf_post_vsi_rebuild(vf);
- vsi = ice_get_vf_vsi(vf);
- ice_eswitch_update_repr(vsi);
- ice_eswitch_replay_vf_mac_rule(vf);
-
- /* if the VF has been reset allow it to come up again */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- return true;
-}
-
-/**
- * ice_vc_notify_link_state - Inform all VFs on a PF of link status
- * @pf: pointer to the PF structure
- */
-void ice_vc_notify_link_state(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i)
- ice_vc_notify_vf_link_state(&pf->vf[i]);
-}
-
-/**
- * ice_vc_notify_reset - Send pending reset message to all VFs
- * @pf: pointer to the PF structure
- *
- * indicate a pending reset to all VFs on a given PF
- */
-void ice_vc_notify_reset(struct ice_pf *pf)
-{
- struct virtchnl_pf_event pfe;
-
- if (!pf->num_alloc_vfs)
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
- (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
-}
-
-/**
- * ice_vc_notify_vf_reset - Notify VF of a reset event
- * @vf: pointer to the VF structure
- */
-static void ice_vc_notify_vf_reset(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe;
- struct ice_pf *pf;
-
- if (!vf)
- return;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return;
-
- /* Bail out if VF is in disabled state, neither initialized, nor active
- * state - otherwise proceed with notifications
- */
- if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
- NULL);
-}
-
-/**
- * ice_init_vf_vsi_res - initialize/setup VF VSI resources
- * @vf: VF to initialize/setup the VSI for
- *
- * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
- * VF VSI's broadcast filter and is only used during initial VF creation.
- */
-static int ice_init_vf_vsi_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
-
- dev = ice_pf_to_dev(pf);
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- vf->num_mac = 1;
-
- return 0;
-
-release_vsi:
- ice_vf_vsi_release(vf);
- return err;
-}
-
-/**
- * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
- * @pf: PF the VFs are associated with
- */
-static int ice_start_vfs(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- int retval, i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_clear_vf_reset_trigger(vf);
-
- retval = ice_init_vf_vsi_res(vf);
- if (retval) {
- dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
- vf->vf_id, retval);
- goto teardown;
- }
-
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
- }
-
- ice_flush(hw);
- return 0;
-
-teardown:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_dis_vf_mappings(vf);
- ice_vf_vsi_release(vf);
- }
-
- return retval;
-}
-
-/**
- * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
- * @pf: PF holding reference to all VFs for default configuration
- */
-static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- vf->pf = pf;
- vf->vf_id = i;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
- vf->spoofchk = true;
- vf->num_vf_qs = pf->num_qps_per_vf;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
-
- mutex_init(&vf->cfg_lock);
- }
-}
-
-/**
- * ice_alloc_vfs - allocate num_vfs in the PF structure
- * @pf: PF to store the allocated VFs in
- * @num_vfs: number of VFs to allocate
- */
-static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
-{
- struct ice_vf *vfs;
-
- vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
- GFP_KERNEL);
- if (!vfs)
- return -ENOMEM;
-
- pf->vf = vfs;
- pf->num_alloc_vfs = num_vfs;
-
- return 0;
-}
-
-/**
- * ice_ena_vfs - enable VFs so they are ready to be used
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to enable
- */
-static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int ret;
-
- /* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
- ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
- set_bit(ICE_OICR_INTR_DIS, pf->state);
- ice_flush(hw);
-
- ret = pci_enable_sriov(pf->pdev, num_vfs);
- if (ret) {
- pf->num_alloc_vfs = 0;
- goto err_unroll_intr;
- }
-
- ret = ice_alloc_vfs(pf, num_vfs);
- if (ret)
- goto err_pci_disable_sriov;
-
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
- num_vfs);
- ret = -ENOSPC;
- goto err_unroll_sriov;
- }
-
- ice_set_dflt_settings_vfs(pf);
-
- if (ice_start_vfs(pf)) {
- dev_err(dev, "Failed to start VF(s)\n");
- ret = -EAGAIN;
- goto err_unroll_sriov;
- }
-
- clear_bit(ICE_VF_DIS, pf->state);
-
- ret = ice_eswitch_configure(pf);
- if (ret)
- goto err_unroll_sriov;
-
- /* rearm global interrupts */
- if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
-
- return 0;
-
-err_unroll_sriov:
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
- pf->num_alloc_vfs = 0;
-err_pci_disable_sriov:
- pci_disable_sriov(pf->pdev);
-err_unroll_intr:
- /* rearm interrupts here */
- ice_irq_dynamic_ena(hw, NULL, NULL);
- clear_bit(ICE_OICR_INTR_DIS, pf->state);
- return ret;
-}
-
-/**
- * ice_pci_sriov_ena - Enable or change number of VFs
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to allocate
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
-{
- int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
- ice_free_vfs(pf);
- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return 0;
-
- if (num_vfs > pf->num_vfs_supported) {
- dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
- num_vfs, pf->num_vfs_supported);
- return -EOPNOTSUPP;
- }
-
- dev_info(dev, "Enabling %d VFs\n", num_vfs);
- err = ice_ena_vfs(pf, num_vfs);
- if (err) {
- dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
- return err;
- }
-
- set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return 0;
-}
-
-/**
- * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
- * @pf: PF to enabled SR-IOV on
- */
-static int ice_check_sriov_allowed(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
-
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/**
- * ice_sriov_configure - Enable or change number of VFs via sysfs
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate or 0 to free VFs
- *
- * This function is called when the user updates the number of VFs in sysfs. On
- * success return whatever num_vfs was set to by the caller. Return negative on
- * failure.
- */
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- struct ice_pf *pf = pci_get_drvdata(pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- err = ice_check_sriov_allowed(pf);
- if (err)
- return err;
-
- if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
- ice_free_vfs(pf);
- if (pf->lag)
- ice_enable_lag(pf->lag);
- return 0;
- }
-
- dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
- return -EBUSY;
- }
-
- err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (err)
- return err;
-
- err = ice_pci_sriov_ena(pf, num_vfs);
- if (err) {
- ice_mbx_deinit_snapshot(&pf->hw);
- return err;
- }
-
- if (pf->lag)
- ice_disable_lag(pf->lag);
- return num_vfs;
-}
-
-/**
- * ice_process_vflr_event - Free VF resources via IRQ calls
- * @pf: pointer to the PF structure
- *
- * called from the VFLR IRQ handler to
- * free up VF resources and state variables
- */
-void ice_process_vflr_event(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int vf_id;
- u32 reg;
-
- if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
- !pf->num_alloc_vfs)
- return;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr VFs */
- reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx))
- /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
- ice_reset_vf(vf, true);
- }
-}
-
-/**
- * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
- * @vf: pointer to the VF info
- */
-static void ice_vc_reset_vf(struct ice_vf *vf)
-{
- ice_vc_notify_vf_reset(vf);
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
- * @pf: PF used to index all VFs
- * @pfq: queue index relative to the PF's function space
- *
- * If no VF is found who owns the pfq then return NULL, otherwise return a
- * pointer to the VF who owns the pfq
- */
-static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
-{
- unsigned int vf_id;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- struct ice_vsi *vsi;
- u16 rxq_idx;
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_for_each_rxq(vsi, rxq_idx)
- if (vsi->rxq_map[rxq_idx] == pfq)
- return vf;
- }
-
- return NULL;
-}
-
-/**
- * ice_globalq_to_pfq - convert from global queue index to PF space queue index
- * @pf: PF used for conversion
- * @globalq: global queue index used to convert to PF space queue index
- */
-static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
-{
- return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
-}
-
-/**
- * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
- * @pf: PF that the LAN overflow event happened on
- * @event: structure holding the event information for the LAN overflow event
- *
- * Determine if the LAN overflow event was caused by a VF queue. If it was not
- * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
- * reset on the offending VF.
- */
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 gldcb_rtctq, queue;
- struct ice_vf *vf;
-
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
- dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
-
- /* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
-
- vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
- if (!vf)
- return;
-
- ice_vc_reset_vf(vf);
-}
-
-/**
- * ice_vc_send_msg_to_vf - Send message to VF
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- */
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
- struct device *dev;
- struct ice_pf *pf;
- int aq_ret;
-
- if (!vf)
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return -EINVAL;
-
- dev = ice_pf_to_dev(pf);
-
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_inval_msgs++;
- dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
- v_opcode, v_retval);
- if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(dev, "Use PF Control I/F to enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- return -EIO;
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_inval_msgs = 0;
- }
-
- aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
- vf->vf_id, aq_ret,
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * ice_vc_get_ver_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request the API version used by the PF
- */
-static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_version_info info = {
- VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
- };
-
- vf->vf_ver = *(struct virtchnl_version_info *)msg;
- /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
- if (VF_IS_V10(&vf->vf_ver))
- info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
-
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
- sizeof(struct virtchnl_version_info));
-}
-
-/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (vf->port_vlan_info)
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
- * ice_vc_get_vf_res_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request its resources
- */
-static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_resource *vfres = NULL;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int len = 0;
- int ret;
-
- if (ice_check_vf_init(pf, vf)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_vf_resource);
-
- vfres = kzalloc(len, GFP_KERNEL);
- if (!vfres) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
- if (VF_IS_V11(&vf->vf_ver))
- vf->driver_caps = *(u32 *)msg;
- else
- vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
- VIRTCHNL_VF_OFFLOAD_VLAN;
-
- vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!vsi->info.pvid)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
-
- if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
- vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
-
- vfres->num_vsis = 1;
- /* Tx and Rx queue are equal for VF */
- vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_msix_per_vf;
- vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
- vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vfres->max_mtu = ice_vc_get_max_frame_size(vf);
-
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
- vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
- vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
- ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->hw_lan_addr.addr);
-
- /* match guest capabilities */
- vf->driver_caps = vfres->vf_cap_flags;
-
- ice_vc_set_caps_allowlist(vf);
- ice_vc_set_working_allowlist(vf);
-
- set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
- (u8 *)vfres, len);
-
- kfree(vfres);
- return ret;
-}
-
-/**
- * ice_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- */
-static void ice_vc_reset_vf_msg(struct ice_vf *vf)
-{
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_find_vsi_from_id
- * @pf: the PF structure to search for the VSI
- * @id: ID of the VSI it is searching for
- *
- * searches for the VSI with the given ID
- */
-static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
- return pf->vsi[i];
-
- return NULL;
-}
-
-/**
- * ice_vc_isvalid_vsi_id
- * @vf: pointer to the VF info
- * @vsi_id: VF relative VSI ID
- *
- * check for the valid VSI ID
- */
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi_from_id(pf, vsi_id);
-
- return (vsi && (vsi->vf_id == vf->vf_id));
-}
-
-/**
- * ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
-{
- struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss = ((lut_type <<
- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
- * @vf: The VF being resseting
- *
- * The max poll time is about ~800ms, which is about the maximum time it takes
- * for a VF to be reset and/or a VF driver to be removed.
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
- int i;
-
- for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- break;
- msleep(ICE_MAX_VF_RESET_SLEEP_MS);
- }
-}
-
-/**
- * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
- * @vf: VF to check if it's ready to be configured/queried
- *
- * The purpose of this function is to make sure the VF is not in reset, not
- * disabled, and initialized so it can be configured and/or queried by a host
- * administrator.
- */
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- struct ice_pf *pf;
-
- ice_wait_on_vf_reset(vf);
-
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- return 0;
-}
-
-/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
- struct ice_vsi_ctx *ctx;
- struct ice_vsi *vf_vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vf_vsi = ice_get_vf_vsi(vf);
- if (!vf_vsi) {
- netdev_err(netdev, "VSI %d for VF %d is null\n",
- vf->lan_vsi_idx, vf->vf_id);
- return -EINVAL;
- }
-
- if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
- vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
- return -ENODEV;
- }
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.sec_flags = vf_vsi->info.sec_flags;
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (ena) {
- ctx->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctx->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
-
- ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (ret) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
- goto out;
- }
-
- /* only update spoofchk state and VSI context on success */
- vf_vsi->info.sec_flags = ctx->info.sec_flags;
- vf->spoofchk = ena;
-
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
- * @pf: PF structure for accessing VF(s)
- *
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
- * else return true
- */
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
-{
- int vf_idx;
-
- ice_for_each_vf(pf, vf_idx) {
- struct ice_vf *vf = &pf->vf[vf_idx];
-
- /* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_vc_cfg_promiscuous_mode_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure VF VSIs promiscuous mode
- */
-static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- bool rm_promisc, alluni = false, allmulti = false;
- struct virtchnl_promisc_info *info =
- (struct virtchnl_promisc_info *)msg;
- int mcast_err = 0, ucast_err = 0;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int ret = 0;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Leave v_ret alone, lie to the VF on purpose. */
- goto error_param;
- }
-
- if (info->flags & FLAG_VF_UNICAST_PROMISC)
- alluni = true;
-
- if (info->flags & FLAG_VF_MULTICAST_PROMISC)
- allmulti = true;
-
- rm_promisc = !allmulti && !alluni;
-
- if (vsi->num_vlan || vf->port_vlan_info) {
- if (rm_promisc)
- ret = ice_cfg_vlan_pruning(vsi, true);
- else
- ret = ice_cfg_vlan_pruning(vsi, false);
- if (ret) {
- dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
-
- if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = alluni || allmulti;
-
- if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
- /* only attempt to set the default forwarding VSI if
- * it's not currently set
- */
- ret = ice_set_dflt_vsi(pf->first_sw, vsi);
- else if (!set_dflt_vsi &&
- ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- /* only attempt to free the default forwarding VSI if we
- * are the owner
- */
- ret = ice_clear_dflt_vsi(pf->first_sw);
-
- if (ret) {
- dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
- set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- goto error_param;
- }
- } else {
- u8 mcast_m, ucast_m;
-
- if (vf->port_vlan_info || vsi->num_vlan > 1) {
- mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
- ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
- } else {
- mcast_m = ICE_MCAST_PROMISC_BITS;
- ucast_m = ICE_UCAST_PROMISC_BITS;
- }
-
- if (alluni)
- ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
- else
- ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
-
- if (allmulti)
- mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
- else
- mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
-
- if (ucast_err || mcast_err)
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- }
-
- if (!mcast_err) {
- if (allmulti &&
- !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
- vf->vf_id);
- else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- }
-
- if (!ucast_err) {
- if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
- vf->vf_id);
- else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_get_stats_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to get VSI stats
- */
-static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_eth_stats stats = { 0 };
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_update_eth_stats(vsi);
-
- stats = vsi->eth_stats;
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
- (u8 *)&stats, sizeof(stats));
-}
-
-/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific
- * queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
- struct ice_txq_meta txq_meta = { 0 };
-
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
- ring, &txq_meta)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->txq_ena);
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @vector_id: vector ID
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_msix_per_vf < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < pf->num_msix_per_vf) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i, q_idx;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
-
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
- */
- if (vf->port_vlan_info)
- vsi->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_is_vf_trusted
- * @vf: pointer to the VF info
- */
-static bool ice_is_vf_trusted(struct ice_vf *vf)
-{
- return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool ice_can_vf_change_mac(struct ice_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
- return false;
-
- return true;
-}
-
-/**
- * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
- * @vc_ether_addr: used to extract the type
- */
-static u8
-ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
-{
- return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
-}
-
-/**
- * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- */
-static bool
-ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
- return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
-}
-
-/**
- * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- *
- * This function should only be called when the MAC address in
- * virtchnl_ether_addr is a valid unicast MAC
- */
-static bool
-ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
-{
- u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
- return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
-}
-
-/**
- * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to add
- */
-static void
-ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 *mac_addr = vc_ether_addr->addr;
-
- if (!is_valid_ether_addr(mac_addr))
- return;
-
- /* only allow legacy VF drivers to set the device and hardware MAC if it
- * is zero and allow new VF drivers to set the hardware MAC if the type
- * was correctly specified over VIRTCHNL
- */
- if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
- is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
- ice_is_vc_addr_primary(vc_ether_addr)) {
- ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
- ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
- }
-
- /* hardware and device MACs are already set, but its possible that the
- * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
- * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
- * away for the legacy VF driver case as it will be updated in the
- * delete flow for this case
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr)) {
- ether_addr_copy(vf->legacy_last_added_umac.addr,
- mac_addr);
- vf->legacy_last_added_umac.time_modified = jiffies;
- }
-}
-
-/**
- * ice_vc_add_mac_addr - attempt to add the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
- */
-static int
-ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- u8 *mac_addr = vc_ether_addr->addr;
- int ret;
-
- /* device MAC already added */
- if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
- return 0;
-
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
- return -EPERM;
- }
-
- ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (ret == -EEXIST) {
- dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
- vf->vf_id);
- /* don't return since we might need to update
- * the primary MAC in ice_vfhw_mac_add() below
- */
- } else if (ret) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, ret);
- return ret;
- } else {
- vf->num_mac++;
- }
-
- ice_vfhw_mac_add(vf, vc_ether_addr);
-
- return ret;
-}
-
-/**
- * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
- * @last_added_umac: structure used to check expiration
- */
-static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
-{
-#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
- return time_is_before_jiffies(last_added_umac->time_modified +
- ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
-}
-
-/**
- * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to check
- *
- * only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
-static void
-ice_update_legacy_cached_mac(struct ice_vf *vf,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
- ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
- return;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
-}
-
-/**
- * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
- */
-static void
-ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 *mac_addr = vc_ether_addr->addr;
-
- if (!is_valid_ether_addr(mac_addr) ||
- !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
- return;
-
- /* allow the device MAC to be repopulated in the add flow and don't
- * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
- * to be persistent on VM reboot and across driver unload/load, which
- * won't work if we clear the hardware MAC here
- */
- eth_zero_addr(vf->dev_lan_addr.addr);
-
- ice_update_legacy_cached_mac(vf, vc_ether_addr);
-}
-
-/**
- * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
- */
-static int
-ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- u8 *mac_addr = vc_ether_addr->addr;
- int status;
-
- if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
- return 0;
-
- status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == -ENOENT) {
- dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
- vf->vf_id);
- return -ENOENT;
- } else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
- mac_addr, vf->vf_id, status);
- return -EIO;
- }
-
- ice_vfhw_mac_del(vf, vc_ether_addr);
-
- vf->num_mac--;
-
- return 0;
-}
-
-/**
- * ice_vc_handle_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @set: true if MAC filters are being set, false otherwise
- *
- * add guest MAC address filter
- */
-static int
-ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
-{
- int (*ice_vc_cfg_mac)
- (struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *virtchnl_ether_addr);
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
- struct ice_pf *pf = vf->pf;
- enum virtchnl_ops vc_op;
- struct ice_vsi *vsi;
- int i;
-
- if (set) {
- vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- ice_vc_cfg_mac = ice_vc_add_mac_addr;
- } else {
- vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- ice_vc_cfg_mac = ice_vc_del_mac_addr;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- /* If this VF is not privileged, then we can't add more than a
- * limited number of addresses. Check to make sure that the
- * additions do not push us over the limit.
- */
- if (set && !ice_is_vf_trusted(vf) &&
- (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- for (i = 0; i < al->num_elements; i++) {
- u8 *mac_addr = al->list[i].addr;
- int result;
-
- if (is_broadcast_ether_addr(mac_addr) ||
- is_zero_ether_addr(mac_addr))
- continue;
-
- result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
- if (result == -EEXIST || result == -ENOENT) {
- continue;
- } else if (result) {
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- goto handle_mac_exit;
- }
- }
-
-handle_mac_exit:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_add_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * add guest MAC address filter
- */
-static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_handle_mac_addr_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_del_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove guest MAC address filter
- */
-static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_handle_mac_addr_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_vc_reset_vf(vf);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
- * ice_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: VLAN ID being set
- * @qos: priority setting
- * @vlan_proto: VLAN protocol
- *
- * program VF Port VLAN ID and/or QoS
- */
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
- struct ice_vf *vf;
- u16 vlanprio;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (vlan_id >= VLAN_N_VID || qos > 7) {
- dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
- vf_id, vlan_id, qos);
- return -EINVAL;
- }
-
- if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(dev, "VF VLAN protocol is not supported\n");
- return -EPROTONOSUPPORT;
- }
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
-
- if (vf->port_vlan_info == vlanprio) {
- /* duplicate request, so just return success */
- dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
- return 0;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- vf->port_vlan_info = vlanprio;
-
- if (vf->port_vlan_info)
- dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
- vlan_id, qos, vf_id);
- else
- dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
- */
-static bool ice_vf_vlan_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
-}
-
-/**
- * ice_vc_process_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @add_v: Add VLAN if true, otherwise delete VLAN
- *
- * Process virtchnl op to add or remove programmed guest VLAN ID
- */
-static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vlan_filter_list *vfl =
- (struct virtchnl_vlan_filter_list *)msg;
- struct ice_pf *pf = vf->pf;
- bool vlan_promisc = false;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- int status = 0;
- u8 promisc_m;
- int i;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < vfl->num_elements; i++) {
- if (vfl->vlan_id[i] >= VLAN_N_VID) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "invalid VF VLAN id %d\n",
- vfl->vlan_id[i]);
- goto error_param;
- }
- }
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add_v && !ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being not trusted,
- * so we can just return success message here
- */
- goto error_param;
- }
-
- if (vsi->info.pvid) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
- test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
- vlan_promisc = true;
-
- if (add_v) {
- for (i = 0; i < vfl->num_elements; i++) {
- u16 vid = vfl->vlan_id[i];
-
- if (!ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being
- * not trusted, so we can just return success
- * message here as well.
- */
- goto error_param;
- }
-
- /* we add VLAN 0 by default for each VF so we can enable
- * Tx VLAN anti-spoof without triggering MDD events so
- * we don't need to add it again here
- */
- if (!vid)
- continue;
-
- status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable VLAN pruning when non-zero VLAN is added */
- if (!vlan_promisc && vid &&
- !ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
- vid, status);
- goto error_param;
- }
- } else if (vlan_promisc) {
- /* Enable Ucast/Mcast VLAN promiscuous mode */
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- status = ice_set_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
- vid, status);
- }
- }
- }
- } else {
- /* In case of non_trusted VF, number of VLAN elements passed
- * to PF for removal might be greater than number of VLANs
- * filter programmed for that VF - So, use actual number of
- * VLANS added earlier with add VLAN opcode. In order to avoid
- * removing VLAN that doesn't exist, which result to sending
- * erroneous failed message back to the VF
- */
- int num_vf_vlan;
-
- num_vf_vlan = vsi->num_vlan;
- for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
- u16 vid = vfl->vlan_id[i];
-
- /* we add VLAN 0 by default for each VF so we can enable
- * Tx VLAN anti-spoof without triggering MDD events so
- * we don't want a VIRTCHNL request to remove it
- */
- if (!vid)
- continue;
-
- /* Make sure ice_vsi_kill_vlan is successful before
- * updating VLAN information
- */
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Disable VLAN pruning when only VLAN 0 is left */
- if (vsi->num_vlan == 1 &&
- ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false);
-
- /* Disable Unicast/Multicast VLAN promiscuous mode */
- if (vlan_promisc) {
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- ice_clear_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- }
- }
- }
-
-error_param:
- /* send the response to the VF */
- if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
- NULL, 0);
- else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_add_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Add and program guest VLAN ID
- */
-static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_process_vlan_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_remove_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove programmed guest VLAN ID
- */
-static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_process_vlan_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_ena_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Enable VLAN header stripping for a given VF
- */
-static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (ice_vsi_manage_vlan_stripping(vsi, true))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_dis_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Disable VLAN header stripping for a given VF
- */
-static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vsi_manage_vlan_stripping(vsi, false))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
- * @vf: VF to enable/disable VLAN stripping for on initialization
- *
- * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
- * the flag is cleared then we want to disable stripping. For example, the flag
- * will be cleared when port VLANs are configured by the administrator before
- * passing the VF to the guest or if the AVF driver doesn't support VLAN
- * offloads.
- */
-static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (!vsi)
- return -EINVAL;
-
- /* don't modify stripping if port VLAN is configured */
- if (vsi->info.pvid)
- return 0;
-
- if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return ice_vsi_manage_vlan_stripping(vsi, true);
- else
- return ice_vsi_manage_vlan_stripping(vsi, false);
-}
-
-static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
- .get_ver_msg = ice_vc_get_ver_msg,
- .get_vf_res_msg = ice_vc_get_vf_res_msg,
- .reset_vf = ice_vc_reset_vf_msg,
- .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
- .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
- .cfg_qs_msg = ice_vc_cfg_qs_msg,
- .ena_qs_msg = ice_vc_ena_qs_msg,
- .dis_qs_msg = ice_vc_dis_qs_msg,
- .request_qs_msg = ice_vc_request_qs_msg,
- .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
- .config_rss_key = ice_vc_config_rss_key,
- .config_rss_lut = ice_vc_config_rss_lut,
- .get_stats_msg = ice_vc_get_stats_msg,
- .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
- .add_vlan_msg = ice_vc_add_vlan_msg,
- .remove_vlan_msg = ice_vc_remove_vlan_msg,
- .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
- .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
- .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
- .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
- .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
-};
-
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
-{
- *ops = ice_vc_vf_dflt_ops;
-}
-
-/**
- * ice_vc_repr_add_mac
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * When port representors are created, we do not add MAC rule
- * to firmware, we store it so that PF could report same
- * MAC as VF.
- */
-static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
- int i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- pf = vf->pf;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- for (i = 0; i < al->num_elements; i++) {
- u8 *mac_addr = al->list[i].addr;
- int result;
-
- if (!is_unicast_ether_addr(mac_addr) ||
- ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
- continue;
-
- if (vf->pf_set_mac) {
- dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto handle_mac_exit;
- }
-
- result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
- if (result) {
- dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, result);
- goto handle_mac_exit;
- }
-
- ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
- break;
- }
-
-handle_mac_exit:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_repr_del_mac - response with success for deleting MAC
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * Respond with success to not break normal VF flow.
- * For legacy VF driver try to update cached MAC address.
- */
-static int
-ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
-{
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
-
- ice_update_legacy_cached_mac(vf, &al->list[0]);
-
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't enable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't disable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int
-ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't config promiscuous mode in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
-{
- ops->add_mac_addr_msg = ice_vc_repr_add_mac;
- ops->del_mac_addr_msg = ice_vc_repr_del_mac;
- ops->add_vlan_msg = ice_vc_repr_add_vlan;
- ops->remove_vlan_msg = ice_vc_repr_del_vlan;
- ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
- ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
- ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
-}
-
-/**
- * ice_vc_process_vf_msg - Process request from VF
- * @pf: pointer to the PF structure
- * @event: pointer to the AQ event
- *
- * called from the common asq/arq handler to
- * process request from VF
- */
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
- s16 vf_id = le16_to_cpu(event->desc.retval);
- u16 msglen = event->msg_len;
- struct ice_vc_vf_ops *ops;
- u8 *msg = event->msg_buf;
- struct ice_vf *vf = NULL;
- struct device *dev;
- int err = 0;
-
- /* if de-init is underway, don't process messages from VF */
- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
- return;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id)) {
- err = -EINVAL;
- goto error_handler;
- }
-
- vf = &pf->vf[vf_id];
-
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
- err = -EPERM;
- goto error_handler;
- }
-
- ops = &vf->vc_ops;
-
- /* Perform basic checks on the msg */
- err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
- if (err) {
- if (err == VIRTCHNL_STATUS_ERR_PARAM)
- err = -EPERM;
- else
- err = -EINVAL;
- }
-
- if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
- ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
- 0);
- return;
- }
-
-error_handler:
- if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
- NULL, 0);
- dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
- vf_id, v_opcode, msglen, err);
- return;
- }
-
- /* VF is being configured in another context that triggers a VFR, so no
- * need to process this message
- */
- if (!mutex_trylock(&vf->cfg_lock)) {
- dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
- vf->vf_id);
- return;
- }
-
- switch (v_opcode) {
- case VIRTCHNL_OP_VERSION:
- err = ops->get_ver_msg(vf, msg);
- break;
- case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ops->get_vf_res_msg(vf, msg);
- if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
- vf->vf_id);
- ice_vc_notify_vf_link_state(vf);
- break;
- case VIRTCHNL_OP_RESET_VF:
- ops->reset_vf(vf);
- break;
- case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ops->add_mac_addr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ops->del_mac_addr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ops->cfg_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ops->ena_qs_msg(vf, msg);
- ice_vc_notify_vf_link_state(vf);
- break;
- case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ops->dis_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ops->request_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ops->cfg_irq_map_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ops->config_rss_key(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ops->config_rss_lut(vf, msg);
- break;
- case VIRTCHNL_OP_GET_STATS:
- err = ops->get_stats_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ops->cfg_promiscuous_mode_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ADD_VLAN:
- err = ops->add_vlan_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_VLAN:
- err = ops->remove_vlan_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ops->ena_vlan_stripping(vf);
- break;
- case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ops->dis_vlan_stripping(vf);
- break;
- case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ops->add_fdir_fltr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ops->del_fdir_fltr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ops->handle_rss_cfg_msg(vf, msg, true);
- break;
- case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ops->handle_rss_cfg_msg(vf, msg, false);
- break;
- case VIRTCHNL_OP_UNKNOWN:
- default:
- dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
- vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
- break;
- }
- if (err) {
- /* Helper function cares less about error return values here
- * as it is busy with pending work.
- */
- dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
- vf_id, v_opcode, err);
- }
-
- mutex_unlock(&vf->cfg_lock);
-}
-
-/**
- * ice_get_vf_cfg
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ivi: VF configuration structure
- *
- * return VF configuration
- */
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
-
- /* VF configuration for VLAN and applicable QoS */
- ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
- ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- ivi->trusted = vf->trusted;
- ivi->spoofchk = vf->spoofchk;
- if (!vf->link_forced)
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- else if (vf->link_up)
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- else
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->max_tx_rate;
- ivi->min_tx_rate = vf->min_tx_rate;
- return 0;
-}
-
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @mac: MAC address
- *
- * program VF MAC address
- */
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
- return -EINVAL;
- }
-
- vf = &pf->vf[vf_id];
- /* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac))
- return 0;
-
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- return -EINVAL;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- /* VF is notified of its new MAC via the PF's response to the
- * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
- */
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
- if (is_zero_ether_addr(mac)) {
- /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
- vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
- } else {
- /* PF will add MAC rule for the VF */
- vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
- }
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
- return 0;
-}
-
-/**
- * ice_set_vf_trust
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @trusted: Boolean value to enable/disable trusted VF
- *
- * Enable or disable a given VF as trusted
- */
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_is_eswitch_mode_switchdev(pf)) {
- dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- /* Check if already trusted */
- if (trusted == vf->trusted)
- return 0;
-
- mutex_lock(&vf->cfg_lock);
-
- vf->trusted = trusted;
- ice_vc_reset_vf(vf);
- dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
- vf_id, trusted ? "" : "un");
-
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_set_vf_link_state
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @link_state: required link state
- *
- * Set VF's link state, irrespective of physical link state status
- */
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- switch (link_state) {
- case IFLA_VF_LINK_STATE_AUTO:
- vf->link_forced = false;
- break;
- case IFLA_VF_LINK_STATE_ENABLE:
- vf->link_forced = true;
- vf->link_up = true;
- break;
- case IFLA_VF_LINK_STATE_DISABLE:
- vf->link_forced = true;
- vf->link_up = false;
- break;
- default:
- return -EINVAL;
- }
-
- ice_vc_notify_vf_link_state(vf);
-
- return 0;
-}
-
-/**
- * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
- * @pf: PF associated with VFs
- */
-static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
-{
- int rate = 0, i;
-
- ice_for_each_vf(pf, i)
- rate += pf->vf[i].min_tx_rate;
-
- return rate;
-}
-
-/**
- * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
- * @vf: VF trying to configure min_tx_rate
- * @min_tx_rate: min Tx rate in Mbps
- *
- * Check if the min_tx_rate being passed in will cause oversubscription of total
- * min_tx_rate based on the current link speed and all other VFs configured
- * min_tx_rate
- *
- * Return true if the passed min_tx_rate would cause oversubscription, else
- * return false
- */
-static bool
-ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
-{
- int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
- int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
-
- /* this VF's previous rate is being overwritten */
- all_vfs_min_tx_rate -= vf->min_tx_rate;
-
- if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
- dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
- min_tx_rate, vf->vf_id,
- all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
- link_speed_mbps);
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_set_vf_bw - set min/max VF bandwidth
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @min_tx_rate: Minimum Tx rate in Mbps
- * @max_tx_rate: Maximum Tx rate in Mbps
- */
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
-
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- return -EINVAL;
- }
-
- if (min_tx_rate && ice_is_dcb_active(pf)) {
- dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
- return -EINVAL;
-
- if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
- ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->min_tx_rate = min_tx_rate;
- }
-
- if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
- ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->max_tx_rate = max_tx_rate;
- }
-
- return 0;
-}
-
-/**
- * ice_get_vf_stats - populate some stats for the VF
- * @netdev: the netdev of the PF
- * @vf_id: the host OS identifier (0-255)
- * @vf_stats: pointer to the OS memory to be initialized
- */
-int ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_eth_stats *stats;
- struct ice_vsi *vsi;
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- ice_update_eth_stats(vsi);
- stats = &vsi->eth_stats;
-
- memset(vf_stats, 0, sizeof(*vf_stats));
-
- vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
- stats->rx_multicast;
- vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
- stats->tx_multicast;
- vf_stats->rx_bytes = stats->rx_bytes;
- vf_stats->tx_bytes = stats->tx_bytes;
- vf_stats->broadcast = stats->rx_broadcast;
- vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
- vf_stats->tx_dropped = stats->tx_discards;
-
- return 0;
-}
-
-/**
- * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
- * @vf: pointer to the VF structure
- */
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
-}
-
-/**
- * ice_print_vfs_mdd_events - print VFs malicious driver detect event
- * @pf: pointer to the PF structure
- *
- * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
- */
-void ice_print_vfs_mdd_events(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int i;
-
- /* check that there are pending MDD events to print */
- if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
- return;
-
- /* VF MDD event logs are rate limited to one second intervals */
- if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
- return;
-
- pf->last_printed_mdd_jiffies = jiffies;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* only print Rx MDD event message if there are new events */
- if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
- vf->mdd_rx_events.last_printed =
- vf->mdd_rx_events.count;
- ice_print_vf_rx_mdd_event(vf);
- }
-
- /* only print Tx MDD event message if there are new events */
- if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
- vf->mdd_tx_events.last_printed =
- vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dev_lan_addr.addr);
- }
- }
-}
-
-/**
- * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
- *
- * Called when recovering from a PF FLR to restore interrupt capability to
- * the VFs.
- */
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
-{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
-}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending)
-{
- s16 vf_id = le16_to_cpu(event->desc.retval);
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_mbx_data mbxdata;
- bool malvf = false;
- struct ice_vf *vf;
- int status;
-
- if (ice_validate_vf_id(pf, vf_id))
- return false;
-
- vf = &pf->vf[vf_id];
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
-
- mbxdata.num_msg_proc = num_msg_proc;
- mbxdata.num_pending_arq = num_msg_pending;
- mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
- mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
- /* check to see if we have a malicious VF */
- status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
- if (status)
- return false;
-
- if (malvf) {
- bool report_vf = false;
-
- /* if the VF is malicious and we haven't let the user
- * know about it, then let them know now
- */
- status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
- ICE_MAX_VF_COUNT, vf_id,
- &report_vf);
- if (status)
- dev_dbg(dev, "Error reporting malicious VF\n");
-
- if (report_vf) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (pf_vsi)
- dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
- pf_vsi->netdev->dev_addr);
- }
-
- return true;
- }
-
- /* if there was an error in detection or the VF is not malicious then
- * return false
- */
- return false;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
deleted file mode 100644
index 752487a1bdd6..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ /dev/null
@@ -1,346 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_VIRTCHNL_PF_H_
-#define _ICE_VIRTCHNL_PF_H_
-#include "ice.h"
-#include "ice_virtchnl_fdir.h"
-
-/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
-#define ICE_MAX_VLAN_PER_VF 8
-/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
- * broadcast, and 16 for additional unicast/multicast filters
- */
-#define ICE_MAX_MACADDR_PER_VF 18
-
-/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
-#define ICE_MDD_EVENTS_THRESHOLD 30
-
-/* Static VF transaction/status register def */
-#define VF_DEVICE_STATUS 0xAA
-#define VF_TRANS_PENDING_M 0x20
-
-/* wait defines for polling PF_PCI_CIAD register status */
-#define ICE_PCI_CIAD_WAIT_COUNT 100
-#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-
-/* VF resource constraints */
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_RSS_QS_PER_VF 16
-#define ICE_NUM_VF_MSIX_MED 17
-#define ICE_NUM_VF_MSIX_SMALL 5
-#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_TRIES 40
-#define ICE_MAX_VF_RESET_SLEEP_MS 20
-
-#define ice_for_each_vf(pf, i) \
- for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
-
-/* Specific VF states */
-enum ice_vf_states {
- ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
- ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
- ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
- ICE_VF_STATE_DIS,
- ICE_VF_STATE_MC_PROMISC,
- ICE_VF_STATE_UC_PROMISC,
- ICE_VF_STATES_NBITS
-};
-
-/* VF capabilities */
-enum ice_virtchnl_cap {
- ICE_VIRTCHNL_VF_CAP_L2 = 0,
- ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
-};
-
-struct ice_time_mac {
- unsigned long time_modified;
- u8 addr[ETH_ALEN];
-};
-
-/* VF MDD events print structure */
-struct ice_mdd_vf_events {
- u16 count; /* total count of Rx|Tx events */
- /* count number of the last printed event */
- u16 last_printed;
-};
-
-struct ice_vf;
-
-struct ice_vc_vf_ops {
- int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
- int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
- void (*reset_vf)(struct ice_vf *vf);
- int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
- int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
- int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_vlan_stripping)(struct ice_vf *vf);
- int (*dis_vlan_stripping)(struct ice_vf *vf);
- int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
- int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-};
-
-/* VF information structure */
-struct ice_vf {
- struct ice_pf *pf;
-
- /* Used during virtchnl message handling and NDO ops against the VF
- * that will trigger a VFR
- */
- struct mutex cfg_lock;
-
- u16 vf_id; /* VF ID in the PF space */
- u16 lan_vsi_idx; /* index into PF struct */
- u16 ctrl_vsi_idx;
- struct ice_vf_fdir fdir;
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
- struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
- struct virtchnl_version_info vf_ver;
- u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
- struct ice_time_mac legacy_last_added_umac;
- DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- u16 port_vlan_info; /* Port VLAN ID and QoS */
- u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
- u8 trusted:1;
- u8 spoofchk:1;
- u8 link_forced:1;
- u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
- unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
-
- u64 num_inval_msgs; /* number of continuous invalid msgs */
- u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* VF's adv. capabilities */
- u8 num_req_qs; /* num of queue pairs requested by VF */
- u16 num_mac;
- u16 num_vf_qs; /* num of queue configured per VF */
- struct ice_mdd_vf_events mdd_rx_events;
- struct ice_mdd_vf_events mdd_tx_events;
- DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
-
- struct ice_repr *repr;
-
- struct ice_vc_vf_ops vc_ops;
-
- /* devlink port data */
- struct devlink_port devlink_port;
-};
-
-#ifdef CONFIG_PCI_IOV
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
-void ice_process_vflr_event(struct ice_pf *pf);
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
-
-void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_vc_notify_link_state(struct ice_pf *pf);
-void ice_vc_notify_reset(struct ice_pf *pf);
-void ice_vc_notify_vf_link_state(struct ice_vf *vf);
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending);
-
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto);
-
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
-
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
-
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
-
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-
-bool ice_is_vf_disabled(struct ice_vf *vf);
-
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
-
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-int
-ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_print_vfs_mdd_events(struct ice_pf *pf);
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-#else /* CONFIG_PCI_IOV */
-static inline void ice_process_vflr_event(struct ice_pf *pf) { }
-static inline void ice_free_vfs(struct ice_pf *pf) { }
-static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
-static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
-static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
-static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
-static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
-static inline
-void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
-static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-
-static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- return true;
-}
-
-static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return NULL;
-}
-
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
- struct ice_rq_event_info __always_unused *event,
- u16 __always_unused num_msg_proc,
- u16 __always_unused num_msg_pending)
-{
- return false;
-}
-
-static inline bool
-ice_reset_all_vfs(struct ice_pf __always_unused *pf,
- bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline bool
-ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline int
-ice_sriov_configure(struct pci_dev __always_unused *pdev,
- int __always_unused num_vfs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_mac(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u8 __always_unused *mac)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_get_vf_cfg(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_info __always_unused *ivi)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_trust(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused trusted)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u16 __always_unused vid,
- u8 __always_unused qos, __be16 __always_unused v_proto)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused ena)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_link_state(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused link_state)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
- struct ice_q_vector __always_unused *q_vector)
-{
- return 0;
-}
-
-static inline int
-ice_get_vf_stats(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_stats __always_unused *vf_stats)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
-{
- return false;
-}
-#endif /* CONFIG_PCI_IOV */
-#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan.h b/drivers/net/ethernet/intel/ice/ice_vlan.h
new file mode 100644
index 000000000000..bc4550a03173
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_H_
+#define _ICE_VLAN_H_
+
+#include <linux/types.h>
+#include "ice_type.h"
+
+struct ice_vlan {
+ u16 tpid;
+ u16 vid;
+ u8 prio;
+};
+
+#define ICE_VLAN(tpid, vid, prio) ((struct ice_vlan){ tpid, vid, prio })
+
+#endif /* _ICE_VLAN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
new file mode 100644
index 000000000000..bcda2e004807
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_pkg_get_supported_vlan_mode - determine if DDP supports Double VLAN mode
+ * @hw: pointer to the HW struct
+ * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
+ */
+static int
+ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
+{
+ u16 meta_init_size = sizeof(struct ice_meta_init_section);
+ struct ice_meta_init_section *sect;
+ struct ice_buf_build *bld;
+ int status;
+
+ /* if anything fails, we assume there is no DVM support */
+ *dvm = false;
+
+ bld = ice_pkg_buf_alloc_single_section(hw,
+ ICE_SID_RXPARSER_METADATA_INIT,
+ meta_init_size, (void **)&sect);
+ if (!bld)
+ return -ENOMEM;
+
+ /* only need to read a single section */
+ sect->count = cpu_to_le16(1);
+ sect->offset = cpu_to_le16(ICE_META_VLAN_MODE_ENTRY);
+
+ status = ice_aq_upload_section(hw,
+ (struct ice_buf_hdr *)ice_pkg_buf(bld),
+ ICE_PKG_BUF_SIZE, NULL);
+ if (!status) {
+ DECLARE_BITMAP(entry, ICE_META_INIT_BITS);
+ u32 arr[ICE_META_INIT_DW_CNT];
+ u16 i;
+
+ /* convert to host bitmap format */
+ for (i = 0; i < ICE_META_INIT_DW_CNT; i++)
+ arr[i] = le32_to_cpu(sect->entry.bm[i]);
+
+ bitmap_from_arr32(entry, arr, (u16)ICE_META_INIT_BITS);
+
+ /* check if DVM is supported */
+ *dvm = test_bit(ICE_META_VLAN_MODE_BIT, entry);
+ }
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_vlan_mode - get the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @get_params: structure FW fills in based on the current VLAN mode config
+ *
+ * Get VLAN Mode Parameters (0x020D)
+ */
+static int
+ice_aq_get_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_get_vlan_mode *get_params)
+{
+ struct ice_aq_desc desc;
+
+ if (!get_params)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_get_vlan_mode_parameters);
+
+ return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params),
+ NULL);
+}
+
+/**
+ * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns true if the hardware/firmware is configured in double VLAN mode,
+ * else return false signaling that the hardware/firmware is configured in
+ * single VLAN mode.
+ *
+ * Also, return false if this call fails for any reason (i.e. firmware doesn't
+ * support this AQ call).
+ */
+static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_params = { 0 };
+ int status;
+
+ status = ice_aq_get_vlan_mode(hw, &get_params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA);
+}
+
+/**
+ * ice_is_dvm_ena - check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * The device is configured in single or double VLAN mode on initialization and
+ * this cannot be dynamically changed during runtime. Based on this there is no
+ * need to make an AQ call every time the driver needs to know the VLAN mode.
+ * Instead, use the cached VLAN mode.
+ */
+bool ice_is_dvm_ena(struct ice_hw *hw)
+{
+ return hw->dvm_ena;
+}
+
+/**
+ * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded
+ * @hw: pointer to the HW structure
+ *
+ * This is only called after downloading the DDP and after the global
+ * configuration lock has been released because all ports on a device need to
+ * cache the VLAN mode.
+ */
+static void ice_cache_vlan_mode(struct ice_hw *hw)
+{
+ hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
+}
+
+/**
+ * ice_pkg_supports_dvm - find out if DDP supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_pkg_supports_dvm(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm;
+ int status;
+
+ status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return pkg_supports_dvm;
+}
+
+/**
+ * ice_fw_supports_dvm - find out if FW supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_fw_supports_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
+ int status;
+
+ /* If firmware returns success, then it supports DVM, else it only
+ * supports SVM
+ */
+ status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_is_dvm_supported - check if Double VLAN Mode is supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if Double VLAN Mode (DVM) is supported and false if only Single
+ * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and
+ * firmware must support it, otherwise only SVM is supported. This function
+ * should only be called while the global config lock is held and after the
+ * package has been successfully downloaded.
+ */
+static bool ice_is_dvm_supported(struct ice_hw *hw)
+{
+ if (!ice_pkg_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "DDP doesn't support DVM\n");
+ return false;
+ }
+
+ if (!ice_fw_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "FW doesn't support DVM\n");
+ return false;
+ }
+
+ return true;
+}
+
+#define ICE_EXTERNAL_VLAN_ID_FV_IDX 11
+#define ICE_SW_LKUP_VLAN_LOC_LKUP_IDX 1
+#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
+#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
+#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
+static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_VLAN_LOC_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the VLAN
+ * packet flags to support VLAN filtering on multiple VLAN
+ * ethertypes (i.e. 0x8100 and 0x88a8) in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
+ .ignore_valid = false,
+ .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
+ .mask_valid = true,
+ .lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_PROMISC_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_PROMISC_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX,
+ },
+};
+
+/**
+ * ice_dvm_update_dflt_recipes - update default switch recipes in DVM
+ * @hw: hardware structure used to update the recipes
+ */
+static int ice_dvm_update_dflt_recipes(struct ice_hw *hw)
+{
+ unsigned long i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_dvm_dflt_recipes); i++) {
+ struct ice_update_recipe_lkup_idx_params *params;
+ int status;
+
+ params = &ice_dvm_dflt_recipes[i];
+
+ status = ice_update_recipe_lkup_idx(hw, params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update RID %d lkup_idx %d fv_idx %d mask_valid %s mask 0x%04x\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask_valid ? "true" : "false",
+ params->mask);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_vlan_mode - set the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @set_params: requested VLAN mode configuration
+ *
+ * Set VLAN Mode Parameters (0x020C)
+ */
+static int
+ice_aq_set_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_set_vlan_mode *set_params)
+{
+ u8 rdma_packet, mng_vlan_prot_id;
+ struct ice_aq_desc desc;
+
+ if (!set_params)
+ return -EINVAL;
+
+ if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX)
+ return -EINVAL;
+
+ rdma_packet = set_params->rdma_packet;
+ if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING &&
+ rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING)
+ return -EINVAL;
+
+ mng_vlan_prot_id = set_params->mng_vlan_prot_id;
+ if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER &&
+ mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_set_vlan_mode_parameters);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
+ NULL);
+}
+
+/**
+ * ice_set_dvm - sets up software and hardware for double VLAN mode
+ * @hw: pointer to the hardware structure
+ */
+static int ice_set_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode params = { 0 };
+ int status;
+
+ params.l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG;
+ params.rdma_packet = ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ params.mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER;
+
+ status = ice_aq_set_vlan_mode(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set double VLAN mode parameters, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_dvm_update_dflt_recipes(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update default recipes for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_aq_set_port_params(hw->port_info, true, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port in double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_set_dvm_boost_entries(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set boost TCAM entries for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_svm - set single VLAN mode
+ * @hw: pointer to the HW structure
+ */
+static int ice_set_svm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode *set_params;
+ int status;
+
+ status = ice_aq_set_port_params(hw->port_info, false, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n");
+ return status;
+ }
+
+ set_params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*set_params),
+ GFP_KERNEL);
+ if (!set_params)
+ return -ENOMEM;
+
+ /* default configuration for SVM configurations */
+ set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG;
+ set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER;
+
+ status = ice_aq_set_vlan_mode(hw, set_params);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n");
+
+ devm_kfree(ice_hw_to_dev(hw), set_params);
+ return status;
+}
+
+/**
+ * ice_set_vlan_mode
+ * @hw: pointer to the HW structure
+ */
+int ice_set_vlan_mode(struct ice_hw *hw)
+{
+ if (!ice_is_dvm_supported(hw))
+ return 0;
+
+ if (!ice_set_dvm(hw))
+ return 0;
+
+ return ice_set_svm(hw);
+}
+
+/**
+ * ice_print_dvm_not_supported - print if DDP and/or FW doesn't support DVM
+ * @hw: pointer to the HW structure
+ *
+ * The purpose of this function is to print that QinQ is not supported due to
+ * incompatibilty from the DDP and/or FW. This will give a hint to the user to
+ * update one and/or both components if they expect QinQ functionality.
+ */
+static void ice_print_dvm_not_supported(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm = ice_pkg_supports_dvm(hw);
+ bool fw_supports_dvm = ice_fw_supports_dvm(hw);
+
+ if (!fw_supports_dvm && !pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package and NVM to versions that support QinQ.\n");
+ else if (!pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package to a version that supports QinQ.\n");
+ else if (!fw_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your NVM to a version that supports QinQ.\n");
+}
+
+/**
+ * ice_post_pkg_dwnld_vlan_mode_cfg - configure VLAN mode after DDP download
+ * @hw: pointer to the HW structure
+ *
+ * This function is meant to configure any VLAN mode specific functionality
+ * after the global configuration lock has been released and the DDP has been
+ * downloaded.
+ *
+ * Since only one PF downloads the DDP and configures the VLAN mode there needs
+ * to be a way to configure the other PFs after the DDP has been downloaded and
+ * the global configuration lock has been released. All such code should go in
+ * this function.
+ */
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw)
+{
+ ice_cache_vlan_mode(hw);
+
+ if (ice_is_dvm_ena(hw))
+ ice_change_proto_id_to_dvm();
+ else
+ ice_print_dvm_not_supported(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.h b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
new file mode 100644
index 000000000000..a0fb743d08e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_MODE_H_
+#define _ICE_VLAN_MODE_H_
+
+struct ice_hw;
+
+bool ice_is_dvm_ena(struct ice_hw *hw);
+int ice_set_vlan_mode(struct ice_hw *hw);
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
+
+#endif /* _ICE_VLAN_MODE_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
new file mode 100644
index 000000000000..5b4a0abb4607
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice.h"
+
+static void print_invalid_tpid(struct ice_vsi *vsi, u16 tpid)
+{
+ dev_err(ice_pf_to_dev(vsi->back), "%s %d specified invalid VLAN tpid 0x%04x\n",
+ ice_vsi_type_str(vsi->type), vsi->idx, tpid);
+}
+
+/**
+ * validate_vlan - check if the ice_vlan passed in is valid
+ * @vsi: VSI used for printing error message
+ * @vlan: ice_vlan structure to validate
+ *
+ * Return true if the VLAN TPID is valid or if the VLAN TPID is 0 and the VLAN
+ * VID is 0, which allows for non-zero VLAN filters with the specified VLAN TPID
+ * and untagged VLAN 0 filters to be added to the prune list respectively.
+ */
+static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ if (vlan->tpid != ETH_P_8021Q && vlan->tpid != ETH_P_8021AD &&
+ vlan->tpid != ETH_P_QINQ1 && (vlan->tpid || vlan->vid)) {
+ print_invalid_tpid(vsi, vlan->tpid);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vsi_add_vlan - default add VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to add
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ err = ice_fltr_add_vlan(vsi, vlan);
+ if (err && err != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ vlan->vid, vsi->vsi_num, err);
+ return err;
+ }
+
+ vsi->num_vlan++;
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan - default del VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to delete
+ */
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_fltr_remove_vlan(vsi, vlan);
+ if (!err)
+ vsi->num_vlan--;
+ else if (err == -ENOENT || err == -EBUSY)
+ err = 0;
+ else
+ dev_err(dev, "Error removing VLAN %d on VSI %i error: %d\n",
+ vlan->vid, vsi->vsi_num, err);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting inner_vlan_flags to ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags &
+ ICE_AQ_VSI_INNER_VLAN_EMODE_M);
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_inner_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena)
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+ else
+ /* Disable stripping. Leave tag in packet */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+
+ /* Allow all packets untagged/tagged */
+ ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+}
+
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+/**
+ * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
+ * @vsi: the VSI to update
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
+ */
+static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
+ ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ info->port_based_inner_vlan = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = info->inner_vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.port_based_inner_vlan = info->port_based_inner_vlan;
+out:
+ kfree(ctxt);
+ return ret;
+}
+
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->tpid != ETH_P_8021Q)
+ return -EINVAL;
+
+ if (vlan->prio > 7)
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct ice_pf *pf;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
+ pf = vsi->back;
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena)
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ goto err_out;
+ }
+
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ kfree(ctxt);
+ return 0;
+
+err_out:
+ kfree(ctxt);
+ return status;
+}
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, true);
+}
+
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, false);
+}
+
+static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ else
+ ctx->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, true);
+}
+
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, false);
+}
+
+/**
+ * tpid_to_vsi_outer_vlan_type - convert from TPID to VSI context based tag_type
+ * @tpid: tpid used to translate into VSI context based tag_type
+ * @tag_type: output variable to hold the VSI context based tag type
+ */
+static int tpid_to_vsi_outer_vlan_type(u16 tpid, u8 *tag_type)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_8100;
+ break;
+ case ETH_P_8021AD:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_STAG;
+ break;
+ case ETH_P_QINQ1:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_9100;
+ break;
+ default:
+ *tag_type = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ena_outer_stripping - enable outer VLAN stripping
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN stripping for
+ *
+ * Enable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for stripping will affect the TPID for insertion.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN stripping settings and the VLAN TPID. Outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This enables hardware to strip a VLAN tag with the specified TPID to be
+ * stripped from the packet and placed in the receive descriptor.
+ */
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_stripping - disable outer VLAN stripping
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN stripping settings. The VLAN TPID and outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This tells the hardware to not strip any VLAN tagged packets, thus leaving
+ * them in the packet. This enables software offloaded VLAN stripping and
+ * disables hardware offloaded VLAN stripping.
+ */
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
+ ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_ena_outer_insertion - enable outer VLAN insertion
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN insertion for
+ *
+ * Enable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for insertion will affect the TPID for stripping.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN insertion settings and the VLAN TPID. Outer VLAN
+ * stripping settings are unmodified.
+ *
+ * This allows a VLAN tag with the specified TPID to be inserted in the transmit
+ * descriptor.
+ */
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_insertion - disable outer VLAN insertion
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN insertion settings. The VLAN TPID and outer VLAN
+ * settings are unmodified.
+ *
+ * This tells the hardware to not allow any VLAN tagged packets in the transmit
+ * descriptor. This enables software offloaded VLAN insertion and disables
+ * hardware offloaded VLAN insertion.
+ */
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * __ice_vsi_set_outer_port_vlan - set the outer port VLAN and related settings
+ * @vsi: VSI to configure
+ * @vlan_info: packed u16 that contains the VLAN prio and ID
+ * @tpid: TPID of the port VLAN
+ *
+ * Set the port VLAN prio, ID, and TPID.
+ *
+ * Enable VLAN pruning so the VSI doesn't receive any traffic that doesn't match
+ * a VLAN prune rule. The caller should take care to add a VLAN prune rule that
+ * matches the port VLAN ID and TPID.
+ *
+ * Tell hardware to strip outer VLAN tagged packets on receive and don't put
+ * them in the receive descriptor. VSI(s) in port VLANs should not be aware of
+ * the port VLAN ID or TPID they are assigned to.
+ *
+ * Tell hardware to prevent outer VLAN tag insertion on transmit and only allow
+ * untagged outer packets from the transmit descriptor.
+ *
+ * Also, tell the hardware to insert the port VLAN on transmit.
+ */
+static int
+__ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.port_based_outer_vlan = cpu_to_le16(vlan_info);
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
+ ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_set_outer_port_vlan - public version of __ice_vsi_set_outer_port_vlan
+ * @vsi: VSI to configure
+ * @vlan: ice_vlan structure used to set the port VLAN
+ *
+ * Set the outer port VLAN via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * This function does not support clearing the port VLAN as there is currently
+ * no use case for this.
+ *
+ * Use the ice_vlan structure passed in to set this VSI in a port VLAN.
+ */
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->prio > (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
new file mode 100644
index 000000000000..f459909490ec
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_LIB_H_
+#define _ICE_VSI_VLAN_LIB_H_
+
+#include <linux/types.h>
+#include "ice_vlan.h"
+
+struct ice_vsi;
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi);
+
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
new file mode 100644
index 000000000000..4a6c850d83ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_pf_vsi_vlan_ops.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_lib.h"
+#include "ice.h"
+
+static int
+op_unsupported_vlan_arg(struct ice_vsi * __always_unused vsi,
+ struct ice_vlan * __always_unused vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+op_unsupported_tpid_arg(struct ice_vsi *__always_unused vsi,
+ u16 __always_unused tpid)
+{
+ return -EOPNOTSUPP;
+}
+
+static int op_unsupported(struct ice_vsi *__always_unused vsi)
+{
+ return -EOPNOTSUPP;
+}
+
+/* If any new ops are added to the VSI VLAN ops interface then an unsupported
+ * implementation should be set here.
+ */
+static struct ice_vsi_vlan_ops ops_unsupported = {
+ .add_vlan = op_unsupported_vlan_arg,
+ .del_vlan = op_unsupported_vlan_arg,
+ .ena_stripping = op_unsupported_tpid_arg,
+ .dis_stripping = op_unsupported,
+ .ena_insertion = op_unsupported_tpid_arg,
+ .dis_insertion = op_unsupported,
+ .ena_rx_filtering = op_unsupported,
+ .dis_rx_filtering = op_unsupported,
+ .ena_tx_filtering = op_unsupported,
+ .dis_tx_filtering = op_unsupported,
+ .set_port_vlan = op_unsupported_vlan_arg,
+};
+
+/**
+ * ice_vsi_init_unsupported_vlan_ops - init all VSI VLAN ops to unsupported
+ * @vsi: VSI to initialize VSI VLAN ops to unsupported for
+ *
+ * By default all inner and outer VSI VLAN ops return -EOPNOTSUPP. This was done
+ * as oppsed to leaving the ops null to prevent unexpected crashes. Instead if
+ * an unsupported VSI VLAN op is called it will just return -EOPNOTSUPP.
+ *
+ */
+static void ice_vsi_init_unsupported_vlan_ops(struct ice_vsi *vsi)
+{
+ vsi->outer_vlan_ops = ops_unsupported;
+ vsi->inner_vlan_ops = ops_unsupported;
+}
+
+/**
+ * ice_vsi_init_vlan_ops - initialize type specific VSI VLAN ops
+ * @vsi: VSI to initialize ops for
+ *
+ * If any VSI types are added and/or require different ops than the PF or VF VSI
+ * then they will have to add a case here to handle that. Also, VSI type
+ * specific files should be added in the same manner that was done for PF VSI.
+ */
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ /* Initialize all VSI types to have unsupported VSI VLAN ops */
+ ice_vsi_init_unsupported_vlan_ops(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ ice_pf_vsi_init_vlan_ops(vsi);
+ break;
+ case ICE_VSI_VF:
+ ice_vf_vsi_init_vlan_ops(vsi);
+ break;
+ default:
+ dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
+ ice_vsi_type_str(vsi->type));
+ break;
+ }
+}
+
+/**
+ * ice_get_compat_vsi_vlan_ops - Get VSI VLAN ops based on VLAN mode
+ * @vsi: VSI used to get the VSI VLAN ops
+ *
+ * This function is meant to be used when the caller doesn't know which VLAN ops
+ * to use (i.e. inner or outer). This allows backward compatibility for VLANs
+ * since most of the Outer VSI VLAN functins are not supported when
+ * the device is configured in Single VLAN Mode (SVM).
+ */
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi)
+{
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return &vsi->outer_vlan_ops;
+ else
+ return &vsi->inner_vlan_ops;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
new file mode 100644
index 000000000000..5b47568f6256
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_OPS_H_
+#define _ICE_VSI_VLAN_OPS_H_
+
+#include "ice_type.h"
+#include "ice_vsi_vlan_lib.h"
+
+struct ice_vsi;
+
+struct ice_vsi_vlan_ops {
+ int (*add_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*del_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*ena_stripping)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_stripping)(struct ice_vsi *vsi);
+ int (*ena_insertion)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_insertion)(struct ice_vsi *vsi);
+ int (*ena_rx_filtering)(struct ice_vsi *vsi);
+ int (*dis_rx_filtering)(struct ice_vsi *vsi);
+ int (*ena_tx_filtering)(struct ice_vsi *vsi);
+ int (*dis_tx_filtering)(struct ice_vsi *vsi);
+ int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+};
+
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2388837d6d6c..056c904b83cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -190,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
+ ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
@@ -241,7 +244,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -315,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -327,14 +386,26 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
@@ -344,11 +415,12 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
+failure:
if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure);
@@ -359,33 +431,28 @@ xsk_pool_if_up:
}
/**
- * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
- * @rx_ring: Rx ring
+ * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
+ * @pool: XSK Buffer pool to pull the buffers from
+ * @xdp: SW ring of xdp_buff that will hold the buffers
+ * @rx_desc: Pointer to Rx descriptors that will be filled
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
- * Returns true if all allocations were successful, false if any fail.
+ * Note that ring wrap should be handled by caller of this function.
+ *
+ * Returns the amount of allocated Rx descriptors
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
- union ice_32b_rx_flex_desc *rx_desc;
- u16 ntu = rx_ring->next_to_use;
- struct xdp_buff **xdp;
- u32 nb_buffs, i;
dma_addr_t dma;
+ u16 buffs;
+ int i;
- rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = ice_xdp_buf(rx_ring, ntu);
-
- nb_buffs = min_t(u16, count, rx_ring->count - ntu);
- nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
- if (!nb_buffs)
- return false;
-
- i = nb_buffs;
- while (i--) {
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
@@ -394,13 +461,81 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp++;
}
+ return buffs;
+}
+
+/**
+ * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Place the @count of descriptors onto Rx ring. Handle the ring wrap
+ * for case where space from next_to_use up to the end of ring is less
+ * than @count. Finally do a tail bump.
+ *
+ * Returns true if all allocations were successful, false if any fail.
+ */
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ xdp = ice_xdp_buf(rx_ring, ntu);
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
+ rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = ice_xdp_buf(rx_ring, 0);
+ ntu = 0;
+ count -= nb_buffs_extra;
+ ice_release_rx_desc(rx_ring, 0);
+ }
+
+ nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+
ntu += nb_buffs;
if (ntu == rx_ring->count)
ntu = 0;
- ice_release_rx_desc(rx_ring, ntu);
+exit:
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
- return count == nb_buffs;
+ return total_count == (nb_buffs_extra + nb_buffs);
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Wrapper for internal allocation routine; figure out how many tail
+ * bumps should take place based on the given threshold
+ *
+ * Returns true if all calls to internal alloc routine succeeded
+ */
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
+ u16 leftover, i, tail_bumps;
+
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
+
+ for (i = 0; i < tail_bumps; i++)
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ return false;
+ return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
}
/**
@@ -428,20 +563,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
xsk_buff_free(xdp);
return skb;
@@ -467,9 +606,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -480,15 +623,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -509,6 +653,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
+ int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
@@ -528,7 +673,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -537,6 +682,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
+ break;
+
xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.pkt_len) &
@@ -553,18 +701,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
- if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(xdp);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ } else if (xdp_res == ICE_XDP_PASS) {
+ goto construct_skb;
+ }
- total_rx_bytes += size;
- total_rx_packets++;
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
- ice_bump_ntc(rx_ring);
- continue;
- }
construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
@@ -583,9 +736,7 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
@@ -594,7 +745,9 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -612,58 +765,6 @@ construct_skb:
}
/**
- * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
- * @xdp_ring: XDP Tx ring
- * @budget: max number of frames to xmit
- *
- * Returns true if cleanup/transmission is done.
- */
-static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
-{
- struct ice_tx_desc *tx_desc = NULL;
- bool work_done = true;
- struct xdp_desc desc;
- dma_addr_t dma;
-
- while (likely(budget-- > 0)) {
- struct ice_tx_buf *tx_buf;
-
- if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- work_done = false;
- break;
- }
-
- tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
-
- if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
- break;
-
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
- desc.len);
-
- tx_buf->bytecount = desc.len;
-
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz =
- ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
-
- xdp_ring->next_to_use++;
- if (xdp_ring->next_to_use == xdp_ring->count)
- xdp_ring->next_to_use = 0;
- }
-
- if (tx_desc) {
- ice_xdp_ring_update_tail(xdp_ring);
- xsk_tx_release(xdp_ring->xsk_pool);
- }
-
- return budget > 0 && work_done;
-}
-
-/**
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
* @xdp_ring: XDP Tx ring
* @tx_buf: Tx buffer to clean
@@ -672,38 +773,45 @@ static void
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ xdp_ring->xdp_tx_active--;
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
}
/**
- * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
- * @budget: NAPI budget
- *
- * Returns true if cleanup/tranmission is done.
*/
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
- int total_packets = 0, total_bytes = 0;
- s16 ntc = xdp_ring->next_to_clean;
+ u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
- u32 xsk_frames = 0;
- bool xmit_done;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
- tx_desc = ICE_TX_DESC(xdp_ring, ntc);
- tx_buf = &xdp_ring->tx_buf[ntc];
- ntc -= xdp_ring->count;
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
- do {
- if (!(tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ if (!xsk_frames)
+ return;
+
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
- total_bytes += tx_buf->bytecount;
- total_packets++;
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
if (tx_buf->raw_buf) {
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
@@ -712,34 +820,145 @@ bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
xsk_frames++;
}
- tx_desc->cmd_type_offset_bsz = 0;
- tx_buf++;
- tx_desc++;
ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+}
- if (unlikely(!ntc)) {
- ntc -= xdp_ring->count;
- tx_buf = xdp_ring->tx_buf;
- tx_desc = ICE_TX_DESC(xdp_ring, 0);
- }
+/**
+ * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
+ * @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @desc: AF_XDP descriptor to pull the DMA address and length from
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
+{
+ struct ice_tx_desc *tx_desc;
+ dma_addr_t dma;
- prefetch(tx_desc);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
- } while (likely(--budget));
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, desc->len, 0);
- ntc += xdp_ring->count;
- xdp_ring->next_to_clean = ntc;
+ *total_bytes += desc->len;
+}
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+/**
+ * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ unsigned int *total_bytes)
+{
+ u16 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ u32 i;
+
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma_addr_t dma;
+
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, descs[i].len, 0);
+
+ *total_bytes += descs[i].len;
+ }
+
+ xdp_ring->next_to_use = ntu;
+}
+
+/**
+ * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @nb_pkts: count of packets to be send
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ u32 nb_pkts, unsigned int *total_bytes)
+{
+ u32 batched, leftover, i;
+
+ batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+ ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ for (; i < batched + leftover; i++)
+ ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
+
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ */
+static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+}
+
+/**
+ * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ *
+ * Returns true if there is no more work that needs to be done, false otherwise
+ */
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+ int budget;
+
+ ice_clean_xdp_irq_zc(xdp_ring);
+
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+ ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ xdp_ring->next_to_use = 0;
+ }
+
+ ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+ &total_bytes);
+
+ ice_set_rs_bit(xdp_ring);
+ ice_xdp_ring_update_tail(xdp_ring);
+ ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
- ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
- xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
-
- return budget > 0 && xmit_done;
+ return nb_pkts < budget;
}
/**
@@ -759,19 +978,19 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
- if (queue_id >= vsi->num_txq)
- return -ENXIO;
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
+ return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
@@ -810,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4c7bd8e9dfc4..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -4,7 +4,16 @@
#ifndef _ICE_XSK_H_
#define _ICE_XSK_H_
#include "ice_txrx.h"
-#include "ice.h"
+
+#define PKTS_PER_BATCH 8
+
+#ifdef __clang__
+#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
+#elif __GNUC__ >= 8
+#define loop_unrolled_for _Pragma("GCC unroll 8") for
+#else
+#define loop_unrolled_for for
+#endif
struct ice_vsi;
@@ -12,13 +21,19 @@ struct ice_vsi;
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+{
+ return false;
+}
+
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
struct xsk_buff_pool __always_unused *pool,
@@ -35,13 +50,6 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
- int __always_unused budget)
-{
- return false;
-}
-
-static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
@@ -62,5 +70,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index cbe92fd23a70..8d6e44ee1895 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2207,7 +2207,7 @@ out:
* igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ca5429774994..fa028928482f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1033,9 +1033,6 @@
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
-
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1277c5c7d099..205d577bdbba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -854,7 +854,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 9cb49980ec2d..eb9f6da9208a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -116,7 +116,6 @@
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 51a2dcaf553d..e5f3e7680dc6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -850,14 +850,14 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
- strlcpy(drvinfo->fw_version, adapter->fw_version,
+ strscpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
@@ -965,10 +965,6 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
- /* Clear copied XDP RX-queue info */
- memset(&temp_ring[i].xdp_rxq, 0,
- sizeof(temp_ring[i].xdp_rxq));
-
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
@@ -1802,14 +1798,14 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page);
+ data = kmap_local_page(rx_buffer->page);
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
+ kunmap_local(data);
return match;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 38ba92022cd4..f8e32833226c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1211,8 +1211,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1945,7 +1944,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
* However, when we do so, no frame from queue 2 and 3 are
* transmitted. It seems the MAX_TPKT_SIZE should not be great
* or _equal_ to the buffer size programmed in TXPBS. For this
- * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
@@ -3138,7 +3137,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
adapter->i2c_algo.data = adapter;
adapter->i2c_adap.algo_data = &adapter->i2c_algo;
adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
+ strscpy(adapter->i2c_adap.name, "igb BB",
sizeof(adapter->i2c_adap.name));
status = i2c_bit_add_bus(&adapter->i2c_adap);
return status;
@@ -3164,8 +3163,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
+ int err;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -3180,17 +3179,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igb_driver_name);
@@ -3306,8 +3299,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -3644,6 +3636,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3656,12 +3649,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3821,7 +3815,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3981,6 +3977,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -4352,7 +4351,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
- int size;
+ int size, res;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index, 0);
+ if (res < 0) {
+ dev_err(dev, "Failed to register xdp_rxq index %u\n",
+ rx_ring->queue_index);
+ return res;
+ }
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -4375,14 +4385,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = adapter->xdp_prog;
- /* XDP RX-queue info */
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0) < 0)
- goto err;
-
return 0;
err:
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
@@ -4819,8 +4825,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ /* Free all the Tx ring sk_buffs or xdp frames */
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ dev_kfree_skb_any(tx_buffer->skb);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5505,7 +5514,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
@@ -6256,74 +6266,108 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
struct xdp_frame *xdpf)
{
- union e1000_adv_tx_desc *tx_desc;
- u32 len, cmd_type, olinfo_status;
- struct igb_tx_buffer *tx_buffer;
- dma_addr_t dma;
- u16 i;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, i, index = tx_ring->next_to_use;
+ struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
+ struct igb_tx_buffer *tx_buffer = tx_head;
+ union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
+ u32 len = xdpf->len, cmd_type, olinfo_status;
+ void *data = xdpf->data;
- len = xdpf->len;
-
- if (unlikely(!igb_desc_unused(tx_ring)))
- return IGB_XDP_CONSUMED;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ if (igb_maybe_stop_tx(tx_ring, count + 3))
return IGB_XDP_CONSUMED;
+ i = 0;
/* record the location of the first descriptor for this packet */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->type = IGB_TYPE_XDP;
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- i = tx_ring->next_to_use;
- tx_desc = IGB_TX_DESC(tx_ring, i);
+ olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->type = IGB_TYPE_XDP;
- tx_buffer->xdpf = xdpf;
+ for (;;) {
+ dma_addr_t dma;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto unmap;
- /* put descriptor type bits */
- cmd_type = E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_DEXT |
- E1000_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IGB_TXD_DCMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- olinfo_status |= tx_ring->reg_idx << 4;
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS | len;
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ tx_buffer->protocol = 0;
+
+ if (++index == tx_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ tx_desc = IGB_TX_DESC(tx_ring, index);
+ tx_desc->read.olinfo_status = 0;
- netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
/* set the timestamp */
- tx_buffer->time_stamp = jiffies;
+ tx_head->time_stamp = jiffies;
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
/* set next_to_watch value indicating a packet is present */
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- tx_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ tx_ring->next_to_use = index;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
- writel(i, tx_ring->tail);
+ writel(index, tx_ring->tail);
return IGB_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == tx_head)
+ break;
+
+ if (!index)
+ index += tx_ring->count;
+ index--;
+ }
+
+ return IGB_XDP_CONSUMED;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -7920,8 +7964,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7935,6 +7981,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
@@ -8814,6 +8861,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
@@ -9518,7 +9566,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
igb_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -9897,11 +9945,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 reg;
if (hw->mac.type > e1000_82580) {
if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
@@ -9934,7 +9981,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Disable BMC-to-OS Watchdog Enable */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
wr32(E1000_DMACR, reg);
/* no lower threshold to disable
@@ -9951,12 +9997,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ }
- /* make low power state decision controlled
- * by DMA coal
- */
+ if (hw->mac.type >= e1000_i210 ||
+ (adapter->flags & IGB_FLAG_DMAC)) {
reg = rd32(E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
+ reg |= E1000_PCIEMISC_LX_DECISION;
wr32(E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 6580fcddb4be..15e57460e19e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -165,23 +165,21 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
unsigned long flags;
u64 ns;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
case e1000_i354:
case e1000_i350:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
ns = timecounter_cyc2time(&adapter->tc, systim);
-
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(ns);
break;
case e1000_i210:
case e1000_i211:
- memset(hwtstamps, 0, sizeof(*hwtstamps));
/* Upper 32 bits contain s, lower 32 bits contain ns. */
hwtstamps->hwtstamp = ktime_set(systim >> 32,
systim & 0xFFFFFFFF);
@@ -192,7 +190,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
/* PTP clock operations */
-static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -201,15 +199,14 @@ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
u64 rate;
u32 incvalue;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate = ppb;
- rate <<= 14;
- rate = div_u64(rate, 1953125);
- incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+ incvalue = INCVALUE_82576;
+ rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incvalue -= rate;
@@ -1349,7 +1346,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.max_adj = 999999881;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 9d4322b74163..83b97989a6bd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -169,8 +169,8 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 975eb47ee04d..57d39ee00b58 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -227,7 +227,7 @@ struct igbvf_adapter {
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
- * on the first update, thus the flag..
+ * the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b78407289741..3a32809510fc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1109,7 +1109,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
return -ENOMEM;
}
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
return 0;
}
@@ -2537,7 +2537,7 @@ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
igbvf_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -2684,25 +2684,18 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
-
static int cards_found;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, igbvf_driver_name);
@@ -2783,10 +2776,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IGBVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 3e386c38d016..1e7e7071f64d 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -264,7 +264,6 @@ int igc_reinit_queues(struct igc_adapter *adapter);
void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
void igc_disable_rx_ring(struct igc_ring *ring);
void igc_enable_rx_ring(struct igc_ring *ring);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index f068b66b8025..a15927e77272 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -182,8 +182,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- phy->type = igc_phy_i225;
-
out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5c66b97c0cfa..4f9d7f013a95 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -610,7 +610,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index b1e72ec5f131..88680e3d613d 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -53,11 +53,6 @@ enum igc_mac_type {
igc_num_macs /* List is 1-based, so subtract 1 for true count. */
};
-enum igc_phy_type {
- igc_phy_unknown = 0,
- igc_phy_i225,
-};
-
enum igc_media_type {
igc_media_type_unknown = 0,
igc_media_type_copper = 1,
@@ -94,8 +89,6 @@ struct igc_mac_info {
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
- u8 forced_speed_duplex;
-
bool asf_firmware_present;
bool arc_subsystem_valid;
@@ -138,8 +131,6 @@ struct igc_nvm_info {
struct igc_phy_info {
struct igc_phy_operations ops;
- enum igc_phy_type type;
-
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 66ea566488d1..59d5c467ea6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2f17f36e94fd..34889be63e78 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -505,6 +505,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
u8 index = rx_ring->queue_index;
int size, desc_len, res;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->q_vector->napi.napi_id);
if (res < 0) {
@@ -2126,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
- return -EBUSY;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
+ return -EBUSY;
+ }
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
+
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
- buffer->next_to_watch = desc;
+ if (i == nr_frags)
+ break;
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2366,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2446,19 +2487,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int totalsize = metasize + datasize;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
@@ -4352,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -5809,9 +5850,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -5824,7 +5866,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->gate_mask & BIT(i))
queue_uses[i]++;
- if (queue_uses[i] > 1)
+ /* There are limitations: A single queue cannot be
+ * opened and closed multiple times per cycle unless the
+ * gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
return false;
}
}
@@ -5868,6 +5915,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0;
size_t n;
@@ -5883,9 +5931,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
@@ -5898,8 +5943,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
@@ -6167,6 +6219,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -6183,56 +6238,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
-{
- struct igc_mac_info *mac = &adapter->hw.mac;
-
- mac->autoneg = false;
-
- /* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work
- */
- if ((spd & 1) || (dplx & ~1))
- goto err_inval;
-
- switch (spd + dplx) {
- case SPEED_10 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_10_HALF;
- break;
- case SPEED_10 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_10_FULL;
- break;
- case SPEED_100 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_100_HALF;
- break;
- case SPEED_100 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_100_FULL;
- break;
- case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
- break;
- case SPEED_1000 + DUPLEX_HALF: /* not supported */
- goto err_inval;
- case SPEED_2500 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
- break;
- case SPEED_2500 + DUPLEX_HALF: /* not supported */
- default:
- goto err_inval;
- }
-
- /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
-
- return 0;
-
-err_inval:
- netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
-}
-
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -6251,23 +6256,17 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igc_driver_name);
@@ -6367,8 +6366,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..53b77c969c85 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -141,24 +141,14 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
* igc_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
- * Success returns 0, Failure returns 1
- *
* A downshift is detected by querying the PHY link health.
*/
-s32 igc_check_downshift(struct igc_hw *hw)
+void igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- switch (phy->type) {
- case igc_phy_i225:
- default:
- /* speed downshift not supported */
- phy->speed_downgraded = false;
- ret_val = 0;
- }
- return ret_val;
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
}
/**
@@ -581,7 +571,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +628,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -746,8 +736,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +767,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
index 1b031372d206..832a7e359f18 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.h
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -11,7 +11,7 @@ s32 igc_phy_hw_reset(struct igc_hw *hw);
s32 igc_get_phy_id(struct igc_hw *hw);
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
-s32 igc_check_downshift(struct igc_hw *hw);
+void igc_check_downshift(struct igc_hw *hw);
s32 igc_setup_copper_link(struct igc_hw *hw);
void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d6e3215e98f..8dbb9f903ca7 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
@@ -992,6 +991,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1009,8 +1019,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e197a33d93a0..c0d8214148d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -59,9 +59,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* MSI-X Table Register Descriptions */
-#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
-
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -306,7 +303,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +316,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 46efcfab7234..efa980514944 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -456,9 +456,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgb_driver_name,
+ strscpy(drvinfo->driver, ixgb_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index c8d1e815ec6b..98bd3267b99b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -576,7 +576,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
* Writes a value to the specified offset in the VLAN filter table.
*
* hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
+ * offset - Offset in VLAN filter table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
void
@@ -588,7 +588,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
}
/******************************************************************************
- * Clears the VLAN filer table
+ * Clears the VLAN filter table
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 99d481904ce6..b4d47e7a76c8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -361,7 +361,6 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct ixgb_adapter *adapter;
static int cards_found = 0;
- int pci_using_dac;
u8 addr[ETH_ALEN];
int i;
int err;
@@ -370,16 +369,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -421,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgb_netdev_ops;
ixgb_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, ixgb_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -444,10 +437,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 16114 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -1196,7 +1187,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
@@ -1713,7 +1704,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
netdev->stats.tx_window_errors = 0;
}
-#define IXGB_MAX_INTR 10
/**
* ixgb_intr - Interrupt Handler
* @irq: interrupt number
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index f0cadd532c53..d40f96250691 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -141,8 +141,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
-#define XSUMRX_DEFAULT OPTION_ENABLED
-
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4a69823e6abd..5369a97ff5ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -167,21 +167,58 @@ enum ixgbe_tx_flags {
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+
+struct vf_stats {
+ u64 gprc;
+ u64 gorc;
+ u64 gptc;
+ u64 gotc;
+ u64 mprc;
+};
+
struct vf_data_storage {
struct pci_dev *vfdev;
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
bool clear_to_send;
+ struct vf_stats vfstats;
+ struct vf_stats last_vfstats;
+ struct vf_stats saved_rst_vfstats;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ int link_enable;
+ int link_state;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
int xcast_mode;
unsigned int vf_api;
+ u8 primary_abort_count;
};
enum ixgbevf_xcast_modes {
@@ -556,6 +593,8 @@ struct ixgbe_mac_addr {
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
+#define IXGBE_PRIMARY_ABORT_LIMIT 5
+
/* board specific private data structure */
struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -614,6 +653,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
+#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
/* Tx fast path data */
int num_tx_queues;
@@ -773,6 +813,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
+ spinlock_t vfs_lock;
};
static inline int ixgbe_determine_xdp_q_idx(int cpu)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 95c92fe890a1..100388968e4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -879,7 +879,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_clear_vfta_82598 - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index e90b5047e695..38c4609bd429 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
usleep_range(1000, 2000);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -2506,15 +2506,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2523,23 +2523,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Poll for bit to read as set */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
break;
usleep_range(100, 120);
}
- if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
+ if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) {
hw_dbg(hw, "GIO disable did not set - requesting resets\n");
goto gio_disable_fail;
}
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
ixgbe_removed(hw->hw_addr))
return 0;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
udelay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
return 0;
@@ -2547,13 +2547,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n");
gio_disable_fail:
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -2575,7 +2575,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
}
/**
@@ -3237,7 +3237,7 @@ vfta_update:
* ixgbe_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 72e6ebffea33..e85f7d2e8810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -8,12 +8,10 @@
#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f70967c32116..e88e3dfac8c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -18,8 +18,6 @@
#include "ixgbe_phy.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBE_STATS};
struct ixgbe_stats {
@@ -138,6 +136,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
"legacy-rx",
#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
"vf-ipsec",
+#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
+ "mdd-disable-vf",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -1106,12 +1106,12 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ strscpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
@@ -1964,15 +1964,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+ data = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
-
return match;
}
@@ -3510,6 +3508,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
+ if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
+ priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
+
return priv_flags;
}
@@ -3517,6 +3518,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
+ unsigned int i;
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
@@ -3526,6 +3528,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
+ flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
+ if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ /* Reset primary abort counter */
+ for (i = 0; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].primary_abort_count = 0;
+
+ flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+ } else {
+ e_info(probe,
+ "Cannot set private flags: Operation not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 0fcd82036d4e..7311bd545acf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1004,7 +1004,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
UTS_RELEASE);
/* Firmware Version */
- strlcpy(info->firmware_version, adapter->eeprom_id,
+ strscpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index e596e1a9fc75..774de63dd93a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -585,7 +585,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
@@ -757,7 +757,7 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
u32 zerobuf[4] = {0, 0, 0, 0};
u16 sa_idx;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa *rsa;
u8 ipi;
@@ -903,7 +903,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ if (sam->dir != XFRM_DEV_OFFLOAD_IN) {
err = -EOPNOTSUPP;
goto err_out;
}
@@ -914,7 +914,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs->xso.flags = sam->flags;
+ xs->xso.dir = sam->dir;
xs->id.spi = sam->spi;
xs->id.proto = sam->proto;
xs->props.family = sam->family;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index d2b64ff8eb4e..809ab51a7842 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -74,7 +74,7 @@ struct ixgbe_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 86b11164655e..f8156fe4b1dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -874,8 +874,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c6ff656b2476..298cfbfcb7b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -151,8 +151,8 @@ MODULE_PARM_DESC(max_vfs,
"Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
-static unsigned int allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, uint, 0);
+static bool allow_unsupported_sfp;
+module_param(allow_unsupported_sfp, bool, 0);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -3247,8 +3248,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
- irq_set_affinity_hint(entry->vector,
- &q_vector->affinity_mask);
+ irq_update_affinity_hint(entry->vector,
+ &q_vector->affinity_mask);
}
}
@@ -3264,8 +3265,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(adapter->msix_entries[vector].vector,
- NULL);
+ irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
@@ -3398,7 +3399,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue;
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
@@ -5051,12 +5052,12 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(adapter->netdev, 65536);
+ netif_set_tso_max_size(adapter->netdev, 65536);
return;
}
if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(adapter->netdev, 32768);
+ netif_set_tso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
@@ -5160,7 +5161,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
}
/**
- * ixgbe_lpbthresh - calculate low water mark for for flow control
+ * ixgbe_lpbthresh - calculate low water mark for flow control
*
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
@@ -5548,6 +5549,47 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
}
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ adapter->vfinfo[i].last_vfstats.gprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+ adapter->vfinfo[i].vfstats.gprc;
+ adapter->vfinfo[i].vfstats.gprc = 0;
+ adapter->vfinfo[i].last_vfstats.gptc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+ adapter->vfinfo[i].vfstats.gptc;
+ adapter->vfinfo[i].vfstats.gptc = 0;
+ adapter->vfinfo[i].last_vfstats.gorc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+ adapter->vfinfo[i].vfstats.gorc;
+ adapter->vfinfo[i].vfstats.gorc = 0;
+ adapter->vfinfo[i].last_vfstats.gotc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+ adapter->vfinfo[i].vfstats.gotc;
+ adapter->vfinfo[i].vfstats.gotc = 0;
+ adapter->vfinfo[i].last_vfstats.mprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+ adapter->vfinfo[i].vfstats.mprc;
+ adapter->vfinfo[i].vfstats.mprc = 0;
+ }
+}
+
static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5683,10 +5725,14 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->service_timer, jiffies);
+ ixgbe_clear_vf_stats_counters(adapter);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -5948,8 +5994,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_SFP_NOT_PRESENT:
case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
+ case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ e_dev_err("primary disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
@@ -6144,11 +6190,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = false;
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
/* disable transmits in the hardware now that interrupts are off */
@@ -6402,6 +6445,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
+
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
@@ -7270,6 +7316,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
netdev->stats.rx_length_errors = hwstats->rlec;
netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
+
+ /* VF Stats Collection - skip while resetting because these
+ * are not clear on read and otherwise you'll sometimes get
+ * crazy values.
+ */
+ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ for (i = 0; i < adapter->num_vfs; i++) {
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),
+ adapter->vfinfo[i].last_vfstats.gprc,
+ adapter->vfinfo[i].vfstats.gprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),
+ adapter->vfinfo[i].last_vfstats.gptc,
+ adapter->vfinfo[i].vfstats.gptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),
+ IXGBE_PVFGORC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gorc,
+ adapter->vfinfo[i].vfstats.gorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),
+ IXGBE_PVFGOTC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gotc,
+ adapter->vfinfo[i].vfstats.gotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),
+ adapter->vfinfo[i].last_vfstats.mprc,
+ adapter->vfinfo[i].vfstats.mprc);
+ }
+ }
}
/**
@@ -7613,6 +7685,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
+static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
+ adapter->vfinfo[vf].primary_abort_count++;
+ if (adapter->vfinfo[vf].primary_abort_count ==
+ IXGBE_PRIMARY_ABORT_LIMIT) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->vfinfo[vf].primary_abort_count = 0;
+
+ e_info(drv,
+ "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
+ hw->bus.func, vf,
+ adapter->vfinfo[vf].vf_mac_addresses);
+ }
+ }
+}
+
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7644,8 +7737,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
continue;
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
- status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ status_reg & PCI_STATUS_REC_MASTER_ABORT) {
+ ixgbe_bad_vf_abort(adapter, vf);
pcie_flr(vfdev);
+ }
}
}
@@ -8548,57 +8643,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_tx_buffer *tx_buffer;
- union ixgbe_adv_tx_desc *tx_desc;
- u32 len, cmd_type;
- dma_addr_t dma;
- u16 i;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = ring->next_to_use;
+ struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
+ struct ixgbe_tx_buffer *tx_buff = tx_head;
+ union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
+ u32 cmd_type, len = xdpf->len;
+ void *data = xdpf->data;
- len = xdpf->len;
-
- if (unlikely(!ixgbe_desc_unused(ring)))
+ if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
return IXGBE_XDP_CONSUMED;
- dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma))
- return IXGBE_XDP_CONSUMED;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record the location of the first descriptor for this packet */
- tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
- i = ring->next_to_use;
- tx_desc = IXGBE_TX_DESC(ring, i);
+ for (;;) {
+ dma_addr_t dma;
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->xdpf = xdpf;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ goto unmap;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma_unmap_len_set(tx_buff, len, len);
+ dma_unmap_addr_set(tx_buff, dma, dma);
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS | len;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buff->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_buff = &ring->tx_buffer_info[index];
+ tx_desc = IXGBE_TX_DESC(ring, index);
+ tx_desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
/* put descriptor type bits */
- cmd_type = IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT |
- IXGBE_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IXGBE_TXD_CMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
- tx_desc->read.olinfo_status =
- cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
- /* set next_to_watch value indicating a packet is present */
- i++;
- if (i == ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ ring->next_to_use = index;
return IXGBE_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buff = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buff, len))
+ dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
+ dma_unmap_len(tx_buff, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buff, len, 0);
+ if (tx_buff == tx_head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return IXGBE_XDP_CONSUMED;
}
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
@@ -8972,6 +9093,23 @@ static void ixgbe_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
}
+static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf < 0 || vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc;
+ vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc;
+ vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc;
+ vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc;
+ vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc;
+
+ return 0;
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
@@ -10284,9 +10422,11 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
#ifdef IXGBE_FCOE
@@ -10632,9 +10772,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
- int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
+ int i, err, expected_gts;
bool disable_dev = false;
#ifdef IXGBE_FCOE
u16 device_caps;
@@ -10654,16 +10794,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, ixgbe_driver_name);
@@ -10714,7 +10849,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
@@ -10750,6 +10885,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -10861,8 +10999,7 @@ skip_sriov:
netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
@@ -11003,7 +11140,7 @@ skip_sriov:
err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
+ strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a148534d7256..8f4316b19278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 336426a67ac1..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -113,12 +113,16 @@
* the sign bit. This register enables software to calculate frequency
* adjustments and apply them directly to the clock rate.
*
- * The math for converting ppb into TIMINCA values is fairly straightforward.
- * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL
+ * The math for converting scaled_ppm into TIMINCA values is fairly
+ * straightforward.
*
- * This assumes that ppb is never high enough to create a value bigger than
- * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this
- * value is also simple.
+ * TIMINCA value = ( Base_Frequency * scaled_ppm ) / 1000000ULL << 16
+ *
+ * To avoid overflow, we simply use mul_u64_u64_div_u64.
+ *
+ * This assumes that scaled_ppm is never high enough to create a value bigger
+ * than TIMINCA's 31 bits can store. This is ensured by the stack, and is
+ * measured in parts per billion. Calculating this value is also simple.
* Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL
*
* For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is
@@ -138,7 +142,6 @@
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
-#define MAX_TIMADJ 0x7FFFFFFF
/**
* ixgbe_ptp_setup_sdp_X540
@@ -434,45 +437,45 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_adjfreq_82599
+ * ixgbe_ptp_adjfine_82599
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
- * adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated scaled_ppm from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- u64 freq, incval;
- u32 diff;
+ u64 incval, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
smp_mb();
incval = READ_ONCE(adapter->base_incval);
- freq = incval;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ diff = mul_u64_u64_div_u64(incval, scaled_ppm,
+ 1000000ULL << 16);
incval = neg_adj ? (incval - diff) : (incval + diff);
switch (hw->mac.type) {
case ixgbe_mac_X540:
if (incval > 0xFFFFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval);
break;
case ixgbe_mac_82599EB:
if (incval > 0x00FFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
@@ -485,32 +488,35 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * ixgbe_ptp_adjfreq_X550
+ * ixgbe_ptp_adjfine_X550
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
+ *
+ * Adjust the frequency of the SYSTIME registers by the indicated scaled_ppm
+ * from base frequency.
*
- * adjust the frequency of the SYSTIME registers by the indicated ppb from base
- * frequency
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
int neg_adj = 0;
- u64 rate = IXGBE_X550_BASE_PERIOD;
+ u64 rate;
u32 inca;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate *= ppb;
- rate = div_u64(rate, 1000000000ULL);
+
+ rate = mul_u64_u64_div_u64(IXGBE_X550_BASE_PERIOD, scaled_ppm,
+ 1000000ULL << 16);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -1208,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1243,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1287,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1312,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
@@ -1356,7 +1395,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1373,7 +1412,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1389,7 +1428,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38de3f41..29cc60988071 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -77,7 +77,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* limit trafffic classes based on VFs enabled */
+ /* limit traffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
@@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
for (i = 0; i < num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
+ adapter->vfinfo[i].link_enable = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
@@ -204,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ unsigned long flags;
int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -820,6 +824,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
}
}
+/**
+ * ixgbe_set_vf_rx_tx - Set VF rx tx
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ *
+ * Set or reset correct transmit and receive for vf
+ **/
+static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
+{
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+
+ if (adapter->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | 1 << vf_shift;
+ reg_req_rx = reg_cur_rx | 1 << vf_shift;
+ } else {
+ reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* CONFIG_FCOE */
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* Enable/Disable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -845,11 +900,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= BIT(vf_shift);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
/* force drop enable for all VF Rx queues */
reg = IXGBE_QDE_ENABLE;
if (adapter->vfinfo[vf].pf_vlan)
@@ -857,27 +907,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_qde(adapter, vf, reg);
- /* enable receive for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= BIT(vf_shift);
- /*
- * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
- * For more info take a look at ixgbe_set_vf_lpe
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- struct net_device *dev = adapter->netdev;
- int pf_max_frame = dev->mtu + ETH_HLEN;
-
-#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
- pf_max_frame = max_t(int, pf_max_frame,
- IXGBE_FCOE_JUMBO_FRAME_SIZE);
-
-#endif /* CONFIG_FCOE */
- if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~BIT(vf_shift);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ ixgbe_set_vf_rx_tx(adapter, vf);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
@@ -1157,9 +1187,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1181,9 +1211,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
@@ -1202,6 +1232,26 @@ out:
return 0;
}
+static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 *link_state = &msgbuf[1];
+
+ /* verify the PF is supporting the correct API */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *link_state = adapter->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1267,6 +1317,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_LINK_STATE:
+ retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
+ break;
case IXGBE_VF_IPSEC_ADD:
retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
break;
@@ -1305,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@@ -1320,18 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
-}
-
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* disable transmit and receive for all vfs */
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
@@ -1359,6 +1403,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_set_all_vfs - update vfs queues
+ * @adapter: Pointer to adapter struct
+ *
+ * Update setting transmit and receive queues for all vfs
+ **/
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ ixgbe_set_vf_link_state(adapter, i,
+ adapter->vfinfo[i].link_state);
+}
+
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1656,6 +1715,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+/**
+ * ixgbe_set_vf_link_state - Set link state
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set a link force state on/off a single vf
+ **/
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
+{
+ adapter->vfinfo[vf].link_state = state;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ adapter->vfinfo[vf].link_enable = false;
+ else
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ adapter->vfinfo[vf].link_enable = false;
+ break;
+ }
+
+ ixgbe_set_vf_rx_tx(adapter, vf);
+
+ /* restart the VF */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+}
+
+/**
+ * ixgbe_ndo_set_vf_link_state - Set link state
+ * @netdev: network interface device structure
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (vf < 0 || vf >= adapter->num_vfs) {
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF link - invalid VF identifier %d\n", vf);
+ return -EINVAL;
+ }
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state %d - not supported\n",
+ vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state disable\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state auto\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF %d - invalid link state %d\n", vf, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 3ec21923c89c..0690ecb8dfa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos, __be16 vlan_proto);
@@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index bba3feaf3318..f1f69ce67420 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -8,6 +8,7 @@
#define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2)
+#define IXGBE_XDP_EXIT BIT(3)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2647937f7f4d..2b00db92b08f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1811,7 +1811,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2193,8 +2193,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2533,6 +2533,13 @@ enum {
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
@@ -3671,7 +3678,7 @@ struct ixgbe_info {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
@@ -3705,7 +3712,9 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3715,6 +3724,7 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e4b50c7781ff..aa4bf6c9a2f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1721,9 +1721,59 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* change mode enforcement rules to hybrid */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x0400;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* manually control the config */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20002240;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* move the AN base page values */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x1;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* set the AN37 over CB mode */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20000000;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* restart AN manually */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
@@ -1737,7 +1787,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for native SFP support.
+ * Configure the integrated PHY for native SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
@@ -1786,7 +1836,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b3fd8e5cd85b..1703c640a434 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return IXGBE_XDP_REDIR;
+ if (!err)
+ return IXGBE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = IXGBE_XDP_EXIT;
+ else
+ result = IXGBE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = IXGBE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = IXGBE_XDP_CONSUMED;
- break;
}
return result;
}
@@ -207,26 +211,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
}
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
+ const struct xdp_buff *xdp)
{
- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- bi->xdp->data_end - bi->xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
- xsk_buff_free(bi->xdp);
- bi->xdp = NULL;
return skb;
}
@@ -301,28 +307,36 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
- if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(bi->xdp);
+ if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IXGBE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IXGBE_XDP_CONSUMED) {
+ xsk_buff_free(bi->xdp);
+ } else if (xdp_res == IXGBE_XDP_PASS) {
+ goto construct_skb;
+ }
- bi->xdp = NULL;
- total_rx_packets++;
- total_rx_bytes += size;
+ bi->xdp = NULL;
+ total_rx_packets++;
+ total_rx_bytes += size;
- cleaned_count++;
- ixgbe_inc_ntc(rx_ring);
- continue;
- }
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+construct_skb:
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+
cleaned_count++;
ixgbe_inc_ntc(rx_ring);
@@ -390,12 +404,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
u32 cmd_type;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
- !netif_carrier_ok(xdp_ring->netdev)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
}
+ if (!netif_carrier_ok(xdp_ring->netdev))
+ break;
+
if (!xsk_tx_peek_desc(pool, &desc))
break;
@@ -509,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!READ_ONCE(adapter->xdp_prog))
- return -ENXIO;
+ return -EINVAL;
if (qid >= adapter->num_xdp_queues)
- return -ENXIO;
+ return -EINVAL;
ring = adapter->xdp_ring[qid];
@@ -520,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!ring->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 3b41f83c8dff..ccfa6b91aac6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -17,8 +17,6 @@
#include "ixgbevf.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats {
@@ -130,8 +128,6 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
#define IXGBE_REGS_LEN 45
@@ -217,8 +213,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e763cee0695e..9984ebc62d78 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -25,7 +25,7 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
/* send the important bits to the PF */
sam = (struct sa_mbx_msg *)(&msgbuf[1]);
- sam->flags = xs->xso.flags;
+ sam->dir = xs->xso.dir;
sam->spi = xs->id.spi;
sam->proto = xs->id.proto;
sam->family = xs->props.family;
@@ -280,7 +280,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
@@ -394,7 +394,7 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
if (!ipsec->rx_tbl[sa_idx].used) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
index 3740725041c3..d22990165353 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
@@ -57,7 +57,7 @@ struct ixgbevf_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e257390a4f6a..149c733fcc2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -387,6 +387,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
+ bool link_state;
+
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
#ifdef CONFIG_XFRM
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0015fcf1df2b..99933e89717a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
@@ -2297,7 +2298,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ bool state;
ixgbevf_configure_msix(adapter);
@@ -2310,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ state = adapter->link_state;
+ hw->mac.ops.get_link_state(hw, &adapter->link_state);
+ if (state && state != adapter->link_state)
+ dev_info(&pdev->dev, "VF is administratively disabled\n");
+
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -2725,7 +2733,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2752,7 +2760,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
ring->reg_idx = reg_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
@@ -3080,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+ adapter->link_state = true;
+
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -3312,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_watchdog_update_link(adapter);
- if (adapter->link_up)
+ if (adapter->link_up && adapter->link_state)
ixgbevf_watchdog_link_is_up(adapter);
else
ixgbevf_watchdog_link_is_down(adapter);
@@ -4511,22 +4521,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- int err, pci_using_dac;
bool disable_dev = false;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, ixgbevf_driver_name);
@@ -4606,10 +4611,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IXGBEVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_SG |
@@ -4785,7 +4787,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 7346ccf014a5..835bbcc5cc8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -100,6 +100,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61d8970c6d1d..1641d00d8ed3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -585,6 +585,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
+ * ixgbevf_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ */
+static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ s32 err;
+
+ msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+ msgbuf[1] = 0x0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ ret_val = IXGBE_ERR_MBX;
+ } else {
+ ret_val = 0;
+ *link_state = msgbuf[1];
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @link_state: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -924,7 +964,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if we we didn't get an ACK there must have been
+ /* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
@@ -968,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
+ .get_link_state = ixgbevf_get_link_state_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
.set_rlpml = ixgbevf_set_rlpml_vf,
@@ -985,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
.set_rar = ixgbevf_hv_set_rar_vf,
.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .get_link_state = ixgbevf_hv_get_link_state_vf,
.set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
.set_vfta = ixgbevf_hv_set_vfta_vf,
.set_rlpml = ixgbevf_hv_set_rlpml_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 54158dac8707..b4eef5b6c172 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -39,6 +39,7 @@ struct ixgbe_mac_operations {
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 439674fc9765..1732ec3c3dbd 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -28,6 +28,7 @@
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include "jme.h"
@@ -2179,7 +2180,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
}
if (unlikely(txbi->start_xmit &&
- (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) &&
txbi->skb)) {
netif_stop_queue(jme->dev);
netif_info(jme, tx_queued, jme->dev,
@@ -2331,9 +2332,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -3008,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
+ netif_napi_add(netdev, &jme->napi, jme_poll);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 2af76329b4a2..860494ff3714 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -379,8 +379,6 @@ struct jme_ring {
#define DECLARE_NET_DEVICE_STATS
#define DECLARE_NAPI_STRUCT struct napi_struct napi;
-#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
- netif_napi_add(dev, napis, pollfn, q);
#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
#define JME_NAPI_WEIGHT(w) int w
#define JME_NAPI_WEIGHT_VAL(w) w
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index df9a8eefa007..2b9335cb4bb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -416,7 +416,8 @@ static void korina_abort_rx(struct net_device *dev)
}
/* transmit packet */
-static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t korina_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
u32 chain_prev, chain_next;
@@ -938,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1354,7 +1355,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lp->napi, korina_poll);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = korina_mdio_read;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 9b6fa27b7daf..f5961bdcc480 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -470,7 +470,7 @@ ltq_etop_stop(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
@@ -485,7 +485,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
- dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
@@ -701,11 +700,11 @@ ltq_etop_probe(struct platform_device *pdev)
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
- netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_tx, 8);
+ netif_napi_add_weight(dev, &priv->ch[i].napi,
+ ltq_etop_poll_tx, 8);
else if (IS_RX(i))
- netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_rx, 32);
+ netif_napi_add_weight(dev, &priv->ch[i].napi,
+ ltq_etop_poll_rx, 32);
priv->ch[i].netdev = dev;
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41d11137cde0..8d646c7f8c82 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -260,9 +267,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
if (ctl & LTQ_DMA_EOP) {
ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
- netif_receive_skb(ch->skb_head);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += ch->skb_head->len;
+ netif_receive_skb(ch->skb_head);
ch->skb_head = NULL;
ch->skb_tail = NULL;
ret = XRX200_DMA_PACKET_COMPLETE;
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
@@ -613,10 +620,9 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
- NAPI_POLL_WEIGHT);
- netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
+ netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
+ xrx200_tx_housekeeping);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index f99adbf26ab4..04345b929d8e 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index fdd99f0de424..35f24e0f0934 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -152,7 +152,8 @@ static int liteeth_stop(struct net_device *netdev)
return 0;
}
-static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct liteeth *priv = netdev_priv(netdev);
void __iomem *txbuffer;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..f58a1c0144ba 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
@@ -177,6 +178,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9f88fe822555..ceba4aa4f026 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 105247582684..8941f69d93e9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -775,7 +775,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
@@ -1603,12 +1603,12 @@ mv643xx_eth_set_link_ksettings(struct net_device *dev,
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int mv643xx_eth_get_coalesce(struct net_device *dev,
@@ -1661,7 +1661,7 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
if (er->rx_mini_pending || er->rx_jumbo_pending)
return -EINVAL;
- mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+ mp->rx_ring_size = min(er->rx_pending, 4096U);
mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
@@ -2481,6 +2481,7 @@ out_free:
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
+ napi_disable(&mp->napi);
free_irq(dev->irq, dev);
return err;
@@ -2704,6 +2705,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
static struct platform_device *port_platdev[3];
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
@@ -2740,7 +2751,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- of_get_mac_address(pnp, ppd.mac_addr);
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2804,21 +2817,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
return ret;
}
}
return 0;
}
-static void mv643xx_eth_shared_of_remove(void)
-{
- int n;
-
- for (n = 0; n < 3; n++) {
- platform_device_del(port_platdev[n]);
- port_platdev[n] = NULL;
- }
-}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
@@ -3088,8 +3093,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
- struct resource *res;
- int err;
+ int err, irq;
pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
@@ -3180,14 +3184,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (WARN_ON(irq < 0)) {
+ err = irq;
+ goto out;
+ }
+ dev->irq = irq;
dev->netdev_ops = &mv643xx_eth_netdev_ops;
@@ -3201,7 +3208,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
- netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
+ netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 83c8908f0cc7..ff3e361e06e7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -76,6 +76,8 @@
#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_AC5_CNM_DDR_TARGET 0x2
+#define MVNETA_AC5_CNM_DDR_ATTR 0xb
#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
#define MVNETA_PORT_CONFIG 0x2400
#define MVNETA_UNI_PROMISC_MODE BIT(0)
@@ -544,6 +546,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
+ bool neta_ac5;
u16 rx_offset_correction;
const struct mbus_dram_target_info *dram_target_info;
};
@@ -1884,8 +1887,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
- } else if (buf->type == MVNETA_TYPE_XDP_TX ||
- buf->type == MVNETA_TYPE_XDP_NDO) {
+ } else if ((buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
@@ -2060,61 +2063,104 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, struct skb_shared_info *sinfo,
- int sync_len)
+ struct xdp_buff *xdp, int sync_len)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), true);
+
+out:
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
sync_len, true);
}
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
- struct xdp_frame *xdpf, bool dma_map)
+ struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct device *dev = pp->dev->dev.parent;
struct mvneta_tx_desc *tx_desc;
- struct mvneta_tx_buf *buf;
- dma_addr_t dma_addr;
+ int i, num_frames = 1;
+ struct page *page;
+
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ num_frames += sinfo->nr_frags;
- if (txq->count >= txq->tx_stop_threshold)
+ if (txq->count + num_frames >= txq->size)
return MVNETA_XDP_DROPPED;
- tx_desc = mvneta_txq_next_desc_get(txq);
+ for (i = 0; i < num_frames; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = NULL;
+ int len = xdpf->len;
+ dma_addr_t dma_addr;
- buf = &txq->buf[txq->txq_put_index];
- if (dma_map) {
- /* ndo_xdp_xmit */
- dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
- xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
- mvneta_txq_desc_put(txq);
- return MVNETA_XDP_DROPPED;
+ if (unlikely(i)) { /* paged area */
+ frag = &sinfo->frags[i - 1];
+ len = skb_frag_size(frag);
}
- buf->type = MVNETA_TYPE_XDP_NDO;
- } else {
- struct page *page = virt_to_page(xdpf->data);
- dma_addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
- xdpf->len, DMA_BIDIRECTIONAL);
- buf->type = MVNETA_TYPE_XDP_TX;
- }
- buf->xdpf = xdpf;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ void *data;
+
+ data = unlikely(frag) ? skb_frag_address(frag)
+ : xdpf->data;
+ dma_addr = dma_map_single(dev, data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto unmap;
+ }
- tx_desc->command = MVNETA_TXD_FLZ_DESC;
- tx_desc->buf_phys_addr = dma_addr;
- tx_desc->data_size = xdpf->len;
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag)
+ : virt_to_page(xdpf->data);
+ dma_addr = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma_addr += skb_frag_off(frag);
+ else
+ dma_addr += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dev, dma_addr, len,
+ DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = unlikely(i) ? NULL : xdpf;
- mvneta_txq_inc_put(txq);
- txq->pending++;
- txq->count++;
+ tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = len;
+ *nxmit_byte += len;
+
+ mvneta_txq_inc_put(txq);
+ }
+ /*last descriptor */
+ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->pending += num_frames;
+ txq->count += num_frames;
return MVNETA_XDP_TX;
+
+unmap:
+ for (i--; i >= 0; i--) {
+ mvneta_txq_desc_put(txq);
+ tx_desc = txq->descs + txq->next_desc_to_proc;
+ dma_unmap_single(dev, tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ }
+
+ return MVNETA_XDP_DROPPED;
}
static int
@@ -2123,8 +2169,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu, nxmit_byte = 0;
struct xdp_frame *xdpf;
- int cpu;
u32 ret;
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -2136,10 +2182,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
- ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
if (ret == MVNETA_XDP_TX) {
u64_stats_update_begin(&stats->syncp);
- stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_bytes += nxmit_byte;
stats->es.ps.tx_packets++;
stats->es.ps.xdp_tx++;
u64_stats_update_end(&stats->syncp);
@@ -2178,11 +2224,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
- ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
+ true);
if (ret != MVNETA_XDP_TX)
break;
- nxmit_byte += frames[i]->len;
nxmit++;
}
@@ -2205,7 +2251,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2226,7 +2271,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2237,7 +2282,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
break;
default:
bpf_warn_invalid_xdp_action(pp->dev, prog, act);
@@ -2246,7 +2291,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2269,7 +2314,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
- struct skb_shared_info *sinfo;
if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2289,11 +2333,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
+ xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
data_len, false);
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = 0;
}
static void
@@ -2301,9 +2343,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct skb_shared_info *xdp_sinfo,
struct page *page)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;
@@ -2321,25 +2363,25 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir);
rx_desc->buf_phys_addr = 0;
- if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
- skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
+ if (!xdp_buff_has_frags(xdp))
+ sinfo->nr_frags = 0;
+
+ if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->xdp_frags_size = *size;
+ xdp_buff_set_frags_flag(xdp);
+ }
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
*size -= len;
}
@@ -2348,8 +2390,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- int i, num_frags = sinfo->nr_frags;
struct sk_buff *skb;
+ u8 num_frags;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ num_frags = sinfo->nr_frags;
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
@@ -2361,13 +2406,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_put(skb, xdp->data_end - xdp->data);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- skb_frag_page(frag), skb_frag_off(frag),
- skb_frag_size(frag), PAGE_SIZE);
- }
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -2379,7 +2422,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev;
- struct skb_shared_info sinfo;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz;
@@ -2388,8 +2430,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
-
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2431,7 +2471,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
- &size, &sinfo, page);
+ &size, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2439,7 +2479,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
goto next;
}
@@ -2451,7 +2491,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2468,11 +2508,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
next:
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
}
if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
@@ -2625,8 +2664,8 @@ err_drop_frame:
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2688,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
@@ -3260,7 +3299,8 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
+ PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -3740,6 +3780,7 @@ static void mvneta_percpu_disable(void *arg)
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *prog = pp->xdp_prog;
int ret;
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
@@ -3748,8 +3789,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
- if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
- netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ if (prog && !prog->aux->xdp_has_frags &&
+ mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
+ mtu);
+
return -EINVAL;
}
@@ -3969,6 +4013,15 @@ static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
.pcs_an_restart = mvneta_pcs_an_restart,
};
+static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return &pp->phylink_pcs;
+}
+
static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -4169,13 +4222,14 @@ static void mvneta_mac_link_up(struct phylink_config *config,
mvneta_port_up(pp);
if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ pp->eee_active = phy_init_eee(phy, false) >= 0;
mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
}
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = mvneta_mac_select_pcs,
.mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
.mac_finish = mvneta_mac_finish,
@@ -4490,8 +4544,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct mvneta_port *pp = netdev_priv(dev);
struct bpf_prog *old_prog;
- if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ if (prog && !prog->aux->xdp_has_frags &&
+ dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
return -EOPNOTSUPP;
}
@@ -4601,11 +4656,11 @@ mvneta_ethtool_get_coalesce(struct net_device *dev,
static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -4680,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4792,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4802,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
@@ -5272,6 +5345,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
win_protect |= 3 << (2 * i);
}
} else {
+ if (pp->neta_ac5)
+ mvreg_write(pp, MVNETA_WIN_BASE(0),
+ (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+ MVNETA_AC5_CNM_DDR_TARGET);
/* For Armada3700 open default 4GB Mbus window, leaving
* arbitration of target/attribute to a different layer
* of configuration.
@@ -5321,26 +5398,66 @@ static int mvneta_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0)
- return -EINVAL;
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+ pp->dn = dn;
+
+ pp->rxq_def = rxq_def;
+ pp->indir[0] = rxq_def;
err = of_get_phy_mode(dn, &phy_mode);
if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- goto err_free_irq;
+ return err;
}
+ pp->phy_interface = phy_mode;
+
comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
- if (comphy == ERR_PTR(-EPROBE_DEFER)) {
- err = -EPROBE_DEFER;
- goto err_free_irq;
- } else if (IS_ERR(comphy)) {
+ if (comphy == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(comphy))
comphy = NULL;
+
+ pp->comphy = comphy;
+
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pp->base))
+ return PTR_ERR(pp->base);
+
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+ if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+ pp->neta_armada3700 = true;
+ pp->neta_ac5 = true;
}
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0)
+ return -EINVAL;
+
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_free_irq;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
@@ -5377,55 +5494,16 @@ static int mvneta_probe(struct platform_device *pdev)
phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_free_irq;
- }
-
- dev->tx_queue_len = MVNETA_MAX_TXD;
- dev->watchdog_timeo = 5 * HZ;
- dev->netdev_ops = &mvneta_netdev_ops;
-
- dev->ethtool_ops = &mvneta_eth_tool_ops;
-
- pp->phylink = phylink;
- pp->comphy = comphy;
- pp->phy_interface = phy_mode;
- pp->dn = dn;
-
- pp->rxq_def = rxq_def;
- pp->indir[0] = rxq_def;
-
- /* Get special SoC configurations */
- if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
- pp->neta_armada3700 = true;
-
- pp->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pp->clk))
- pp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pp->clk)) {
- err = PTR_ERR(pp->clk);
- goto err_free_phylink;
- }
-
- clk_prepare_enable(pp->clk);
-
- pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(pp->clk_bus))
- clk_prepare_enable(pp->clk_bus);
-
- pp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pp->base)) {
- err = PTR_ERR(pp->base);
goto err_clk;
}
- pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- phylink_set_pcs(phylink, &pp->phylink_pcs);
+ pp->phylink = phylink;
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_phylink;
}
/* Alloc per-cpu stats */
@@ -5522,14 +5600,13 @@ static int mvneta_probe(struct platform_device *pdev)
* operation, so only single NAPI should be initialized.
*/
if (pp->neta_armada3700) {
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
} else {
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port =
per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, mvneta_poll);
port->pp = pp;
}
}
@@ -5539,7 +5616,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
+ netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
@@ -5569,12 +5646,12 @@ err_netdev:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
-err_clk:
- clk_disable_unprepare(pp->clk_bus);
- clk_disable_unprepare(pp->clk);
err_free_phylink:
if (pp->phylink)
phylink_destroy(pp->phylink);
+err_clk:
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
return err;
@@ -5720,6 +5797,7 @@ static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
{ .compatible = "marvell,armada-3700-neta" },
+ { .compatible = "marvell,armada-ac5-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ad73a488fc5f..11e603686a27 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7cdbf8b8bbf6..eb0fb8128096 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1869,7 +1869,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
* counted by hardware but before reaching software statistics (most probably
- * multicast packets), and in the oppposite way, during transmission, FCS bytes
+ * multicast packets), and in the opposite way, during transmission, FCS bytes
* are added in between as well as TSO skb will be split and header bytes added.
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
@@ -5425,11 +5425,11 @@ mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5770,8 +5770,7 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
v->irq = irq_of_parse_and_map(port_node, 0);
if (v->irq <= 0)
return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
port->nqvecs = 1;
@@ -5831,8 +5830,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
goto err;
}
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
}
return 0;
@@ -6861,7 +6859,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
- netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+ netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
@@ -6870,6 +6868,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
@@ -6940,9 +6941,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
- port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
-
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
@@ -7706,7 +7704,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000000..0d7db815340e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000000..2026c8118158
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000000..6ad88d0fe43f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 val;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7);
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_pf_mbox_intr(struct octep_device *oct)
+{
+ u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ for (qno = 0; qno < OCTEP_MAX_VF; qno++) {
+ val = readq(oct->mbox[qno]->mbox_read_reg);
+ dev_dbg(&oct->pdev->dev,
+ "PF MBOX READ: val:%llx from VF:%llx\n", val, qno);
+ }
+
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MBOX_RINT intr: 0x%llx\n", reg_val);
+ cn93_handle_pf_mbox_intr(oct);
+ goto irq_handled;
+ }
+
+ /* Check for OEI INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received OEI_EINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val);
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ goto irq_handled;
+ }
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000000..f208f3f9a447
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000000..39322e4dd100
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \
+ (sizeof(struct octep_ctrl_mbox_msg) * (i)))
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask)
+{
+ return (index + 1) & mask;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask)
+{
+ return mask - ((pi - ci) & mask);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask)
+{
+ return ((pi - ci) & mask);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1);
+ mutex_init(&mbox->h2fq_lock);
+
+ mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1);
+ mutex_init(&mbox->f2hq_lock);
+
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_INFO_SZ +
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ +
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ;
+
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->h2fq.hw_q +
+ ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) *
+ mbox->h2fq.elem_cnt);
+
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS);
+ unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS);
+ struct octep_ctrl_mbox_q *q;
+ unsigned long expire;
+ u64 *mbuf, *word0;
+ u8 __iomem *qidx;
+ u16 pi, ci;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask))
+ return -ENOMEM;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi);
+ mbuf = (u64 *)msg->msg;
+ word0 = &msg->hdr.word0;
+
+ mutex_lock(&mbox->h2fq_lock);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(*word0, qidx);
+
+ pi = octep_ctrl_mbox_circq_inc(pi, q->mask);
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ /* don't check for notification response */
+ if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ return 0;
+
+ expire = jiffies + timeout;
+ while (true) {
+ *word0 = readq(qidx);
+ if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ break;
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current) || time_after(jiffies, expire)) {
+ pr_info("octep_ctrl_mbox: Timed out\n");
+ return -EBUSY;
+ }
+ }
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ return 0;
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_q *q;
+ u32 count, pi, ci;
+ u8 __iomem *qidx;
+ u64 *mbuf;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+ count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask);
+ if (!count)
+ return -EAGAIN;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci);
+ mbuf = (u64 *)msg->msg;
+
+ mutex_lock(&mbox->f2hq_lock);
+
+ msg->hdr.word0 = readq(qidx);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ ci = octep_ctrl_mbox_circq_inc(ci, q->mask);
+ writel(ci, q->hw_cons);
+
+ mutex_unlock(&mbox->f2hq_lock);
+
+ if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req)
+ return 0;
+
+ mbox->process_req(mbox->user_ctx, msg);
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(msg->hdr.word0, qidx);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000000..2dc5753cfec6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to target queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox target to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+/* Size of mbox queue in bytes */
+#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt))
+/* Size of mbox in bytes */
+#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \
+ OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt))
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 word0;
+ struct {
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* size of message in words excluding header */
+ u32 sizew;
+ };
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* q element size, should be aligned to unsigned long */
+ u16 elem_sz;
+ /* q element count, should be power of 2 */
+ u16 elem_cnt;
+ /* q mask */
+ u16 mask;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* host driver version */
+ u64 version;
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* user context for callback, can be null */
+ void *user_ctx;
+ /* callback handler for processing request, called from octep_ctrl_mbox_recv */
+ int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg);
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param ep: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000000..7c00c896ab98
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+int octep_get_link_status(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ return resp->link.state;
+}
+
+void octep_set_link_status(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+void octep_set_rx_state(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ memcpy(addr, resp->mac.addr, ETH_ALEN);
+
+ return err;
+}
+
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req.mac.addr, addr, ETH_ALEN);
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_set_mtu(struct octep_device *oct, int mtu)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.mtu.val = mtu;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_if_stats(struct octep_device *oct)
+{
+ void __iomem *iface_rx_stats;
+ void __iomem *iface_tx_stats;
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+ req.get_stats.offset = oct->ctrl_mbox_ifstats_offset;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset;
+ iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset +
+ sizeof(struct octep_iface_rx_stats);
+ memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats));
+
+ return err;
+}
+
+int octep_get_link_info(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ oct->link_info.supported_modes = resp->link_info.supported_modes;
+ oct->link_info.advertised_modes = resp->link_info.advertised_modes;
+ oct->link_info.autoneg = resp->link_info.autoneg;
+ oct->link_info.pause = resp->link_info.pause;
+ oct->link_info.speed = resp->link_info.speed;
+
+ return err;
+}
+
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link_info.info.advertised_modes = link_info->advertised_modes;
+ req.link_info.info.autoneg = link_info->autoneg;
+ req.link_info.info.pause = link_info->pause;
+ req.link_info.info.speed = link_info->speed;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000000..f23b58381322
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+};
+
+struct octep_ctrl_net_req_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_req_cmd_get_stats {
+ /* offset into barmem where fw should copy over stats */
+ u32 offset;
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+struct octep_ctrl_net_resp_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Size of host to fw octep_ctrl_mbox queue element */
+union octep_ctrl_net_h2f_data_sz {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+};
+
+/* Size of fw to host octep_ctrl_mbox queue element */
+union octep_ctrl_net_f2h_data_sz {
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+/* size of host to fw data in words */
+#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size of fw to host data in words */
+#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size in words of get/set mtu request */
+#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2
+/* size in words of get/set mac request */
+#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2
+/* size in words of get stats request */
+#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2
+/* size in words of get/set state request */
+#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2
+/* size in words of get/set link info request */
+#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4
+
+/* size in words of get mtu response */
+#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2
+/* size in words of set mtu response */
+#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1
+/* size in words of get mac response */
+#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2
+/* size in words of set mac response */
+#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1
+/* size in words of get state request */
+#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2
+/* size in words of set state request */
+#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1
+/* size in words of get link info request */
+#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4
+/* size in words of set link info request */
+#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_get_link_status(struct octep_device *oct);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_link_status(struct octep_device *oct, bool up);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_rx_state(struct octep_device *oct, bool up);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ */
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param mtu: mtu.
+ */
+int octep_set_mtu(struct octep_device *oct, int mtu);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_if_stats(struct octep_device *oct);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_link_info(struct octep_device *oct);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000000..87ef129b269a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ octep_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_set_link_info(oct, &link_info_new);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000000..9089adcb75f9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i, j;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (j = 0; j < oct->num_oqs; j++) {
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, j);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ j, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(j % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (j) {
+ --j;
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ irq_set_affinity_hint(msix_entry->vector, NULL);
+ free_irq(msix_entry->vector, ioq_vector);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ kfree(oct->non_ioq_irq_names);
+ oct->non_ioq_irq_names = NULL;
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_set_rx_state(oct, true);
+
+ ret = octep_get_link_status(oct);
+ if (!ret)
+ octep_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_get_link_status(oct);
+ if (ret)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_set_link_status(oct, false);
+ octep_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ iq->stats.instr_posted++;
+ skb_tx_timestamp(skb);
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ octep_get_if_stats(oct);
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req req = {};
+ struct octep_ctrl_mbox_msg msg;
+ int ret = 0;
+
+ msg.msg = &req;
+ while (true) {
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg);
+ if (ret)
+ break;
+
+ switch (req.hdr.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req.link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req.hdr.cmd);
+ break;
+ }
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ dev_info(&pdev->dev,
+ "Setting up OCTEON CN93XX PF PASS%d.%d\n",
+ OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return -1;
+ }
+ oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
+ ctrl_mbox->h2fq.elem_cnt,
+ ctrl_mbox->f2hq.elem_sz,
+ ctrl_mbox->f2hq.elem_cnt);
+
+ return 0;
+
+unsupported_dev:
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ destroy_workqueue(octep_wq);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000000..025626a61383
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000000..3d5d39a52fe6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000000..392d9b0da0d7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000000..782a24f27f3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000000..5a520d37bea0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000000..2ef57980eb47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 639893d87055..6b4f640163f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -32,9 +32,12 @@ config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
select NET_DEVLINK
+ depends on MACSEC || !MACSEC
depends on (64BIT && COMPILE_TEST) || ARM64
+ select DIMLIB
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on MACSEC || !MACSEC
help
This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 7f4a4ca9af78..3cf4c8285c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..c8724bfa86b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -498,6 +498,32 @@ static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
@@ -578,31 +604,78 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
}
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -722,26 +795,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
-{
- struct cgx *cgx = cgxd;
- u64 cfg;
-
- if (is_dev_rpm(cgx))
- return 0;
-
- if (!is_lmac_valid(cgx, lmac_id))
- return -ENODEV;
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
- return 0;
-}
-
static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
@@ -782,21 +835,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
if (!is_lmac_valid(cgx, lmac_id))
return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
-
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -813,21 +853,127 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
}
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ }
+
+ if (tx_pause) {
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -892,9 +1038,9 @@ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -1320,11 +1466,19 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
u64 req = 0;
u64 resp;
- if (enable)
+ if (enable) {
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
- else
- req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
}
@@ -1489,6 +1643,16 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1505,6 +1669,10 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
rvu_free_bitmap(&lmac->mac_to_index_bmap);
err_name_free:
kfree(lmac->name);
@@ -1562,6 +1730,7 @@ static struct mac_ops cgx_mac_ops = {
.tx_stats_cnt = 18,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
@@ -1570,6 +1739,10 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1612,6 +1785,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index ab1e4abdea38..0b06788b8d80 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -76,6 +76,13 @@
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
@@ -85,7 +92,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
-#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_LMAC_FWI 0
@@ -172,4 +179,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index f72ec0e2506f..d4a27c882a5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -261,4 +261,6 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..52b6016789fa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -17,6 +17,8 @@
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
@@ -33,6 +35,8 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
@@ -76,6 +80,7 @@ struct mac_ops {
*/
int (*get_nr_lmacs)(void *cgx);
u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
bool enable);
/* Register Stats related functions */
@@ -107,6 +112,15 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..8d5d5a0f68c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -33,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -169,9 +169,12 @@ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
- msg_rsp) \
+ cgx_mac_addr_update_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -239,6 +242,9 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
+M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \
+ npc_get_secret_key_req, \
+ npc_get_secret_key_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -287,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
- nix_bandprof_get_hwinfo_rsp)
-
-/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+ nix_bandprof_get_hwinfo_rsp) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -426,6 +486,7 @@ struct get_hw_cap_rsp {
struct mbox_msghdr hdr;
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
};
/* CGX mbox message formats */
@@ -449,6 +510,7 @@ struct cgx_fec_stats_rsp {
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
+ u32 index;
};
/* Structure for requesting the operation to
@@ -464,7 +526,7 @@ struct cgx_mac_addr_add_req {
*/
struct cgx_mac_addr_add_rsp {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for requesting the operation to
@@ -472,7 +534,7 @@ struct cgx_mac_addr_add_rsp {
*/
struct cgx_mac_addr_del_req {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for response against the operation to
@@ -480,7 +542,7 @@ struct cgx_mac_addr_del_req {
*/
struct cgx_max_dmac_entries_get_rsp {
struct mbox_msghdr hdr;
- u8 max_dmac_filters;
+ u32 max_dmac_filters;
};
struct cgx_link_user_info {
@@ -581,10 +643,20 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
struct cgx_mac_addr_update_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
- u8 index;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
@@ -609,6 +681,21 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
struct npc_set_pkind {
struct mbox_msghdr hdr;
#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
@@ -732,6 +819,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
@@ -1422,11 +1510,22 @@ struct npc_mcam_get_stats_rsp {
u8 stat_ena; /* enabled */
};
+struct npc_get_secret_key_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_secret_key_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1434,6 +1533,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -1602,6 +1702,424 @@ enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
+};
+
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u8 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000000..4a343f853b28
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1603 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable and clear the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ return ret;
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000000..64dc2b80e15d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector Enumeration */
+enum mcs_int_vec_e {
+ MCS_INT_VEC_MIL_RX_GBL = 0x0,
+ MCS_INT_VEC_MIL_RX_LMACX = 0x1,
+ MCS_INT_VEC_MIL_TX_LMACX = 0x5,
+ MCS_INT_VEC_HIL_RX_GBL = 0x9,
+ MCS_INT_VEC_HIL_RX_LMACX = 0xa,
+ MCS_INT_VEC_HIL_TX_GBL = 0xe,
+ MCS_INT_VEC_HIL_TX_LMACX = 0xf,
+ MCS_INT_VEC_IP = 0x13,
+ MCS_INT_VEC_CNT = 0x14,
+};
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 4ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000000..7b6205414428
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000000..c95a8b8f5eaf
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000000..fa8029a94068
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 77fd39e2c8db..f187293e3e08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -10,6 +10,14 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -200,6 +208,7 @@ enum key_fields {
NPC_ERRLEV,
NPC_ERRCODE,
NPC_LXMB,
+ NPC_EXACT_RESULT,
NPC_LA,
NPC_LB,
NPC_LC,
@@ -381,6 +390,22 @@ struct nix_rx_action {
};
/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
@@ -455,7 +480,7 @@ struct npc_coalesced_kpu_prfl {
u8 name[NPC_NAME_LEN]; /* KPU Profile name */
u64 version; /* KPU firmware/profile version */
u8 num_prfl; /* No of NPC profiles. */
- u16 prfl_sz[0];
+ u16 prfl_sz[];
};
struct npc_mcam_kex {
@@ -482,7 +507,7 @@ struct npc_kpu_fwdata {
* struct npc_kpu_profile_cam[entries];
* struct npc_kpu_profile_action[entries];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct npc_lt_def {
@@ -572,7 +597,7 @@ struct npc_kpu_profile_fwdata {
* Custom KPU CAM and ACTION configuration entries.
* struct npc_kpu_fwdata kpu[kpus];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct rvu_npc_mcam_rule {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..a820bad3abb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -155,7 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
- NPC_PARSE_NIBBLE_ERRCODE | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15123,7 +15123,8 @@ static struct npc_mcam_kex npc_mkex_default = {
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Error code + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index e682b7bfde64..3411e2e47d46 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
@@ -25,6 +27,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
@@ -46,10 +51,153 @@
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
+
+#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -77,8 +225,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -100,15 +248,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
@@ -117,7 +272,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ *clk = ptp->read_ptp_tstmp(ptp);
return 0;
}
@@ -141,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -165,22 +324,63 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
}
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
- *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -214,6 +414,17 @@ static int ptp_probe(struct pci_dev *pdev,
if (!first_ptp_block)
first_ptp_block = ptp;
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -238,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -305,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 1b81a0493cd3..b9d92abc3844 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -15,7 +15,12 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..a70e1153fa04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -22,6 +22,7 @@ static struct mac_ops rpm_mac_ops = {
.tx_stats_cnt = 34,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
@@ -30,6 +31,10 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -54,14 +59,60 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
+ struct lmac *lmac;
u64 cfg;
if (!rpm)
return;
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
if (enable) {
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
@@ -83,13 +134,94 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
return 0;
}
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -113,8 +245,12 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
@@ -127,56 +263,28 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_t *rpm = rpmd;
u64 cfg;
- if (enable) {
- /* Enable 802.3 pause frame mode */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable receive pause frames */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Set pause time and interval */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
- cfg | RPM_DEFAULT_PAUSE_TIME);
- /* Set pause interval as the hardware default is too short */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
- cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
- /* Disable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- }
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -243,6 +351,35 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
return err;
}
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -252,23 +389,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
if (!rpm || lmac_id >= rpm->lmac_count)
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
@@ -281,9 +415,85 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
- else
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, class_en;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..77f2ef9e1425 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -16,6 +16,7 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -33,20 +34,53 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
@@ -57,4 +91,10 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..3f5e09b77d4b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -16,14 +16,14 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
+#include "mcs.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
-
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@@ -68,6 +68,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
hw->rvu = rvu;
if (is_rvu_pre_96xx_C0(rvu)) {
@@ -85,6 +87,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -412,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@@ -520,8 +525,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
@@ -1119,6 +1127,12 @@ cpt:
goto cgx_err;
}
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
@@ -1144,6 +1158,12 @@ cpt:
rvu_program_channels(rvu);
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
return 0;
nix_err:
@@ -1988,6 +2008,7 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
return 0;
}
@@ -2545,6 +2566,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
mutex_lock(&rvu->flr_lock);
/* Reset order should reflect inter-block dependencies:
* 1. Reset any packet/work sources (NIX, CPT, TIM)
@@ -2561,6 +2585,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
@@ -3268,6 +3298,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@@ -3294,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@@ -3329,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@@ -3345,6 +3383,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..76474385a602 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -338,6 +344,8 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
};
struct rvu_hwinfo {
@@ -369,6 +377,7 @@ struct rvu_hwinfo {
struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct npc_exact_table *table;
};
struct mbox_wq_info {
@@ -419,6 +428,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
bool custom;
size_t pkinds;
size_t kpus;
@@ -493,6 +503,8 @@ struct rvu {
struct ptp *ptp;
+ int mcs_blk_cnt;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -500,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -575,6 +593,17 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
u8 lmacid, u8 chan)
{
@@ -754,7 +783,6 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
-int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
@@ -773,14 +801,17 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
+
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@@ -806,11 +837,20 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
-
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
@@ -842,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_exit(struct rvu *rvu);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..addc69f4b65c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -441,16 +442,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- return 0;
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
@@ -464,6 +475,11 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
if (!is_cgx_config_permitted(rvu, pcifunc))
return;
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
@@ -574,6 +590,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -592,6 +611,9 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
if (rc >= 0) {
@@ -612,6 +634,9 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
}
@@ -633,6 +658,11 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
return 0;
}
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
return 0;
@@ -670,6 +700,10 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
@@ -685,6 +719,10 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
@@ -823,6 +861,22 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu)
return fifo_len;
}
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
@@ -853,6 +907,45 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
@@ -860,11 +953,9 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
void *cgxd;
- if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
- return 0;
-
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -876,13 +967,11 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
mac_ops = get_mac_ops(cgxd);
if (req->set)
- mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
- &rsp->tx_pause,
- &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
}
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
@@ -1014,7 +1103,7 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
- return -ENXIO;
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
@@ -1043,7 +1132,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -1053,12 +1142,16 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
return cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
- struct msg_rsp *rsp)
+ struct cgx_mac_addr_update_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
@@ -1066,6 +1159,73 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a73a8017e0ee..38bbae5d9ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2
+#define CPT_CTX_ILEN 2ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -480,7 +480,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
- val |= rvu->hw->cpt_chan_base;
+ val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
@@ -579,7 +579,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
- return blkaddr;
+ return false;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
@@ -605,6 +605,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
+ case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..a1970ebedf95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -18,6 +18,8 @@
#include "cgx.h"
#include "lmac_common.h"
#include "npc.h"
+#include "rvu_npc_hash.h"
+#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -226,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@@ -248,7 +594,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
@@ -407,7 +753,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
/* Get the maximum width of a column */
lf_str_size = get_max_column_width(rvu);
@@ -1224,6 +1570,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
@@ -2598,6 +2946,170 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
@@ -2606,8 +3118,22 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_mcam_info_fops);
debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_mcam_rules_fops);
+
debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
}
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
@@ -2872,6 +3398,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index d0ab8f233a02..88dee589cb21 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -10,6 +10,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "octeontx2-af"
@@ -1436,14 +1437,75 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
};
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
};
/* Devlink switch mode */
@@ -1501,6 +1563,7 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
+ size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1522,8 +1585,12 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- err = devlink_params_register(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+ /* Register exact match devlink only for CN10K-B */
+ size = ARRAY_SIZE(rvu_af_dl_params);
+ if (!rvu_npc_exact_has_match_table(rvu))
+ size -= 1;
+
+ err = devlink_params_register(dl, rvu_af_dl_params, size);
if (err) {
dev_err(rvu->dev,
"devlink params register failed with error %d", err);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..7646bb2ec89b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -14,6 +14,7 @@
#include "npc.h"
#include "cgx.h"
#include "lmac_common.h"
+#include "rvu_npc_hash.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -296,7 +297,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
- struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
@@ -326,13 +326,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
- mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
-
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
- rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -512,11 +505,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -533,7 +526,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -2068,8 +2061,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2085,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3800,9 +3793,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_enable(rvu, pcifunc);
} else {
if (!nix_rx_multicast)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_disable(rvu, pcifunc);
}
return 0;
@@ -3878,7 +3877,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3890,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3917,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
@@ -4011,9 +4010,13 @@ linkcfg:
return 0;
/* Update transmit credits for CGX links */
- lmac_fifo_len =
- rvu_cgx_get_fifolen(rvu) /
- cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, lmac);
+ return 0;
+ }
return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
(lmac_fifo_len - req->maxlen) / 16);
}
@@ -4065,7 +4068,10 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
@@ -4099,12 +4105,23 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Skip when cgx is not available or lmac cnt is zero */
if (lmac_cnt <= 0)
continue;
- tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
- lmac_max_frs) / 16;
- /* Enable credits and set credit pkt count to max allowed */
- cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
- for (link = slink; link < (slink + lmac_cnt); link++) {
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+
+ link = iter + slink;
nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
@@ -4279,8 +4296,14 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
- rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
@@ -4578,6 +4601,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
pfvf->hw_rx_tstamp_en = false;
}
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
@@ -5314,6 +5343,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
aq_req.op = NIX_AQ_INSTOP_WRITE;
memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
/* Clear higher layer enable bit in the mid profile, just in case */
aq_req.prof.hl_en = 0;
aq_req.prof_mask.hl_en = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..1e348fd0d930 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -15,6 +15,7 @@
#include "npc.h"
#include "cgx.h"
#include "npc_profile.h"
+#include "rvu_npc_hash.h"
#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
@@ -402,6 +403,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +425,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +497,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -598,7 +606,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
@@ -619,7 +627,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, index);
} else {
- *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
@@ -650,7 +657,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
u64 relaxed_mask;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
@@ -678,14 +685,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
action.index = pfvf->promisc_mce_idx;
@@ -825,7 +832,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
u8 mac_addr[ETH_ALEN] = { 0 };
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
u16 vf_func;
@@ -854,14 +861,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
action.index = pfvf->mcast_mce_idx;
}
@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
@@ -1081,6 +1097,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1090,8 +1109,39 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
NIXLF_PROMISC_ENTRY, false);
}
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1166,14 +1216,6 @@ void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-
-#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-
static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex, u8 intf)
{
@@ -1247,6 +1289,9 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
}
static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
@@ -1448,6 +1493,7 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
profile->lt_def = &npc_lt_defaults;
profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
return 0;
}
@@ -1635,7 +1681,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1804,7 +1850,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
-
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1900,6 +1945,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1912,15 +1958,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
@@ -2032,6 +2078,7 @@ int rvu_npc_init(struct rvu *rvu)
rvu_npc_setup_interfaces(rvu, blkaddr);
+ npc_config_secret_key(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
@@ -2519,7 +2566,7 @@ alloc:
/* Copy MCAM entry indices into mbox response entry_list.
* Requester always expects indices in ascending order, so
- * so reverse the list if reverse bitmap is used for allocation.
+ * reverse the list if reverse bitmap is used for allocation.
*/
if (!req->contig && rsp->count) {
index = 0;
@@ -2547,6 +2594,14 @@ alloc:
return 0;
}
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..7c4e1acd0f77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -10,6 +10,8 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
#define NPC_BYTESM GENMASK_ULL(19, 16)
#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
@@ -227,6 +229,25 @@ static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
return true;
}
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
u8 key_nibble, u8 intf)
{
@@ -276,6 +297,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
default:
return;
}
+
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -445,7 +467,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
@@ -509,8 +532,8 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
u8 key_nibble = 0;
- u64 cfg;
/* Scan and note how parse result is going to be in key.
* A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
@@ -518,12 +541,24 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
* will be concatenated in key.
*/
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
- cfg &= NPC_PARSE_NIBBLE;
- for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
+ NPC_EXACT_NIBBLE_START) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
/* Scan and note how layer data is going to be in key */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
@@ -624,9 +659,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
* If any bits in mask are 0 then corresponding bits in value are
* dont care.
*/
-static void npc_update_entry(struct rvu *rvu, enum key_fields type,
- struct mcam_entry *entry, u64 val_lo,
- u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry dummy = { {0} };
@@ -705,8 +740,6 @@ static void npc_update_entry(struct rvu *rvu, enum key_fields type,
}
}
-#define IPV6_WORDS 4
-
static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
@@ -779,7 +812,8 @@ static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
- struct rvu_npc_mcam_rule *output, u8 intf)
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
{
u64 dmac_mask = ether_addr_to_u64(mask->dmac);
u64 smac_mask = ether_addr_to_u64(mask->smac);
@@ -828,6 +862,7 @@ do { \
} while (0)
NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
@@ -854,10 +889,12 @@ do { \
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
}
-static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
- u16 entry)
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
{
struct rvu_npc_mcam_rule *iter;
@@ -1023,8 +1060,9 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
- int entry_index, err;
bool new = false;
+ u16 entry_index;
+ int err;
installed_features = req->features;
features = req->features;
@@ -1032,7 +1070,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
entry_index = req->entry;
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
- req->intf);
+ req->intf, blkaddr);
if (is_npc_intf_rx(req->intf))
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
@@ -1057,7 +1095,8 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, missing_features,
&def_ucast_rule->packet,
&def_ucast_rule->mask,
- &dummy, req->intf);
+ &dummy, req->intf,
+ blkaddr);
installed_features = req->features | missing_features;
}
@@ -1098,14 +1137,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1132,6 +1163,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
@@ -1420,3 +1463,98 @@ void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
}
mutex_unlock(&mcam->lock);
}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000000..bdd65ce56a32
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000000..594029007f85
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = secret_key[1] << 31;
+ hash_key[0] |= secret_key[2];
+ hash_key[1] = secret_key[1] >> 33;
+ hash_key[1] |= secret_key[0] << 31;
+ hash_key[2] = secret_key[0] >> 33;
+
+ data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
+ field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(int lt, int ld)
+{
+ u64 cfg = 0;
+
+ switch (lt) {
+ case NPC_LT_LC_IP6:
+ /* Update use_hash(bit-20) and bytesm1 (bit-16:19)
+ * in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ ld ? 0x8 : 0x18,
+ 0x1, 0x0, 0x10);
+ break;
+ }
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_secret_key_req req;
+ struct npc_get_secret_key_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ break;
+ }
+
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+ struct npc_get_secret_key_req *req,
+ struct npc_get_secret_key_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
+ * @mac_addr: MAC address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ return mac;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, bitmap_weight(table->id_bmap, table->tot_ids));
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode; disable cam */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62))) {
+ dev_info(rvu->dev, "%s: No support for exact match support\n",
+ __func__);
+ return 0;
+ }
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
+ dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
+ __func__);
+ return 0;
+ }
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ memset(table, 0, sizeof(*table));
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
+ sizeof(long), GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
+ table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
+ table->tot_ids, GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000000..3efeb09c58de
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 22cd751613cd..0e0d536645ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -266,6 +266,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
@@ -565,7 +566,13 @@
#define NPC_AF_PCK_DEF_OIP4 (0x00620)
#define NPC_AF_PCK_DEF_OIP6 (0x00630)
#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
@@ -599,6 +606,15 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
u64 offset; \
\
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 0048b5946712..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -11,4 +11,8 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..9ec5f38d38a8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[0] = ~0ULL;
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ kfree(txsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ kfree(rxsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->protect_frames == txsc->last_protect_frames)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_protect_frames)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->protect_frames)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 66da31f30d3e..9e10e7471b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -97,11 +97,6 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
-#define OTX2_GET_RX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
-#define OTX2_GET_TX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
-
dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
@@ -222,8 +217,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -233,6 +231,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
@@ -262,6 +264,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
@@ -583,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -599,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -608,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -616,21 +626,29 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -638,11 +656,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -666,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -796,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -853,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
sq->head = 0;
+ sq->cons_head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
/* Set SQE threshold to 10% of total SQEs */
@@ -931,7 +977,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (!is_otx2_lbkvf(pfvf->pdev)) {
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
aq->cq.bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
@@ -1036,7 +1086,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -1049,7 +1099,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1211,7 +1261,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1538,11 +1592,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1573,6 +1634,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
@@ -1704,6 +1767,56 @@ out:
}
EXPORT_SYMBOL(otx2_get_max_mtu);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
@@ -1715,4 +1828,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..282db6fe3b08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -17,6 +17,9 @@
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
+#include <linux/time64.h>
+#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@@ -31,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -38,6 +42,11 @@
#define NAME_SIZE 32
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -53,6 +62,11 @@ enum arua_mapped_qtypes {
/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID 2000
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
@@ -178,13 +192,18 @@ struct otx2_hw {
u16 rqpool_cnt;
u16 sqpool_cnt;
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 sqb_size;
/* NIX */
- u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 txschq_link_cfg_lvl;
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -226,6 +245,8 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -259,6 +280,13 @@ struct refill_work {
struct otx2_nic *pf;
};
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -272,6 +300,11 @@ struct otx2_ptp {
u64 thresh;
struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -301,8 +334,8 @@ struct otx2_flow_config {
#define OTX2_VF_VLAN_TX_INDEX 1
u16 max_flows;
u8 dmacflt_max_flows;
- u8 *bmap_to_dmacindex;
- unsigned long dmacflt_bmap;
+ u32 *bmap_to_dmacindex;
+ unsigned long *dmacflt_bmap;
struct list_head flow_list;
};
@@ -321,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_protect_frames;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -344,6 +437,8 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
+#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -396,6 +491,20 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
+#endif
+
+ /* napi event count. It is needed for adaptive irq coalescing. */
+ u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -435,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -464,7 +578,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -603,6 +721,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
@@ -719,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@@ -761,6 +881,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+#ifdef CONFIG_DCB
+ if (pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+
+ return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -783,7 +913,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -862,6 +992,10 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -870,9 +1004,35 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
+#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..ccaf97bb1ce0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_free_req *free_req;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* free PFC TLx nodes */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err)
+ return err;
+
+process_pfc:
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 2ec800f741d8..80d853b343f9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -8,7 +8,7 @@
#include "otx2_common.h"
static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
- u8 *dmac_index)
+ u32 *dmac_index)
{
struct cgx_mac_addr_add_req *req;
struct cgx_mac_addr_add_rsp *rsp;
@@ -35,9 +35,10 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
{
struct cgx_mac_addr_set_or_get *req;
+ struct cgx_mac_addr_set_or_get *rsp;
int err;
mutex_lock(&pf->mbox.lock);
@@ -48,16 +49,31 @@ static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
return -ENOMEM;
}
+ req->index = *dmac_index;
+
ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_set_or_get *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *dmac_index = rsp->index;
+out:
mutex_unlock(&pf->mbox.lock);
return err;
}
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
{
- u8 *dmacindex;
+ u32 *dmacindex;
/* Store dmacindex returned by CGX/RPM driver which will
* be used for macaddr update/remove
@@ -65,13 +81,13 @@ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_add_pfmac(pf);
+ return otx2_dmacflt_add_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_add(pf, mac, dmacindex);
}
static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
- u8 dmac_index)
+ u32 dmac_index)
{
struct cgx_mac_addr_del_req *req;
int err;
@@ -91,9 +107,9 @@ static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
{
- struct msg_req *req;
+ struct cgx_mac_addr_reset_req *req;
int err;
mutex_lock(&pf->mbox.lock);
@@ -102,6 +118,7 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
+ req->index = dmac_index;
err = otx2_sync_mbox_msg(&pf->mbox);
@@ -110,12 +127,12 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
}
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
- u8 bit_pos)
+ u32 bit_pos)
{
- u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_remove_pfmac(pf);
+ return otx2_dmacflt_remove_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_remove(pf, mac, dmacindex);
}
@@ -144,6 +161,12 @@ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
rsp = (struct cgx_max_dmac_entries_get_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
out:
@@ -151,9 +174,10 @@ out:
return err;
}
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
{
struct cgx_mac_addr_update_req *req;
+ struct cgx_mac_addr_update_rsp *rsp;
int rc;
mutex_lock(&pf->mbox.lock);
@@ -167,8 +191,19 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
ether_addr_copy(req->mac_addr, mac);
req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ /* check the response and change index */
+
rc = otx2_sync_mbox_msg(&pf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_update_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+
+out:
mutex_unlock(&pf->mbox.lock);
return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d85db90632d6..0eb74e8c553d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
}
static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
@@ -371,6 +371,8 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
@@ -379,6 +381,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@@ -386,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@@ -403,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
- if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@@ -413,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@@ -431,6 +455,14 @@ static int otx2_get_coalesce(struct net_device *netdev,
cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
cmd->tx_coalesce_usecs = hw->cq_time_wait;
cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ cmd->use_adaptive_rx_coalesce = 1;
+ cmd->use_adaptive_tx_coalesce = 1;
+ } else {
+ cmd->use_adaptive_rx_coalesce = 0;
+ cmd->use_adaptive_tx_coalesce = 0;
+ }
return 0;
}
@@ -442,11 +474,30 @@ static int otx2_set_coalesce(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_hw *hw = &pfvf->hw;
+ u8 priv_coalesce_status;
int qidx;
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
+ if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
+ netdev_err(netdev,
+ "adaptive-rx should be same as adaptive-tx");
+ return -EINVAL;
+ }
+
+ /* Check and update coalesce status */
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ priv_coalesce_status = 1;
+ if (!ec->use_adaptive_rx_coalesce)
+ pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ } else {
+ priv_coalesce_status = 0;
+ if (ec->use_adaptive_rx_coalesce)
+ pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ }
+
/* 'cq_time_wait' is 8bit and is in multiple of 100ns,
* so clamp the user given value to the range of 1 to 25usec.
*/
@@ -470,9 +521,9 @@ static int otx2_set_coalesce(struct net_device *netdev,
* so clamp the user given value to the range of 1 to 64k.
*/
ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
/* Rx and Tx are mapped to same CQ, check which one
* is changed, if both then choose the min.
@@ -485,6 +536,17 @@ static int otx2_set_coalesce(struct net_device *netdev,
hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
ec->tx_max_coalesced_frames);
+ /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
+ * default values if coalesce status changed from
+ * 'on' to 'off'.
+ */
+ if (priv_coalesce_status &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ }
+
if (netif_running(netdev)) {
for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
otx2_config_irq_coalescing(pfvf, qidx);
@@ -901,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -1206,7 +1270,10 @@ end:
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1248,8 +1315,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *vf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1325,7 +1392,10 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 77a13fb555fb..709fc0114fbd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,11 +18,13 @@ struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
u32 location;
- u16 entry;
+ u32 entry;
bool is_vf;
u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
int vf;
- bool dmac_filter;
};
enum dmac_req {
@@ -230,6 +232,9 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
return 0;
}
+/* TODO : revisit on size */
+#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
+
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg;
@@ -240,6 +245,12 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
if (!pfvf->flow_cfg)
return -ENOMEM;
+ pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pfvf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
flow_cfg->max_flows = 0;
@@ -257,6 +268,12 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->flow_cfg)
return -ENOMEM;
+ pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
/* Allocate bare minimum number of MCAM entries needed for
@@ -282,7 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->flow_cfg->bmap_to_dmacindex =
- devm_kzalloc(pf->dev, sizeof(u8) *
+ devm_kzalloc(pf->dev, sizeof(u32) *
pf->flow_cfg->dmacflt_max_flows,
GFP_KERNEL);
@@ -353,7 +370,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -436,7 +453,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- bitmap_weight(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
@@ -899,6 +916,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
struct npc_install_flow_req *req;
int err, vf = 0;
@@ -940,6 +960,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
mutex_unlock(&pfvf->mbox.lock);
return -EINVAL;
}
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
}
/* ethtool ring_cookie has (VF + 1) for VF */
@@ -951,6 +989,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -966,7 +1010,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
return -ENOMEM;
pf_mac->entry = 0;
- pf_mac->dmac_filter = true;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
@@ -981,7 +1025,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
otx2_add_flow_to_list(pfvf, pf_mac);
pfvf->flow_cfg->nr_flows++;
- set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
return 0;
}
@@ -1031,11 +1075,11 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
- if (flow->dmac_filter)
+ if (flow->rule_type & DMAC_FILTER_RULE)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
- if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ if (bitmap_full(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows)) {
netdev_warn(pfvf->netdev,
"Can't insert the rule %d as max allowed dmac filters are %d\n",
@@ -1049,17 +1093,17 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
/* Install PF mac address to DMAC filter list */
- if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ if (!test_bit(0, flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
- flow->dmac_filter = true;
- flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow->rule_type |= DMAC_FILTER_RULE;
+ flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
- set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ set_bit(flow->entry, flow_cfg->dmacflt_bmap);
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
@@ -1120,16 +1164,17 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter && iter->entry == 0) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
0);
- clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
found = true;
} else {
ether_addr_copy(eth_hdr->h_dest,
pfvf->netdev->dev_addr);
+
otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
}
break;
@@ -1156,7 +1201,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
if (!flow)
return -ENOENT;
- if (flow->dmac_filter) {
+ if (flow->rule_type & DMAC_FILTER_RULE) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
@@ -1165,15 +1210,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
flow->entry);
- clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
/* If all dmac filters are removed delete macfilter with
* interface mac address and configure CGX/RPM block in
* promiscuous mode
*/
- if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ if (bitmap_weight(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
@@ -1383,7 +1435,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6080ebd9bd94..303930499a4c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -15,6 +15,7 @@
#include <net/ip.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/bitfield.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -394,7 +395,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
@@ -853,6 +859,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -912,6 +927,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@@ -1115,7 +1131,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
@@ -1156,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev,
}
EXPORT_SYMBOL(otx2_set_real_num_queues);
+static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
+ "NIX_SQOPERR_OOR",
+ "NIX_SQOPERR_CTX_FAULT",
+ "NIX_SQOPERR_CTX_POISON",
+ "NIX_SQOPERR_DISABLED",
+ "NIX_SQOPERR_SIZE_ERR",
+ "NIX_SQOPERR_OFLOW",
+ "NIX_SQOPERR_SQB_NULL",
+ "NIX_SQOPERR_SQB_FAULT",
+ "NIX_SQOPERR_SQE_SZ_ZERO",
+};
+
+static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ "NIX_MNQERR_SQ_CTX_FAULT",
+ "NIX_MNQERR_SQ_CTX_POISON",
+ "NIX_MNQERR_SQB_FAULT",
+ "NIX_MNQERR_SQB_POISON",
+ "NIX_MNQERR_TOTAL_ERR",
+ "NIX_MNQERR_LSO_ERR",
+ "NIX_MNQERR_CQ_QUERY_ERR",
+ "NIX_MNQERR_MAX_SQE_SIZE_ERR",
+ "NIX_MNQERR_MAXLEN_ERR",
+ "NIX_MNQERR_SQE_SIZEM1_ZERO",
+};
+
+static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+ "NIX_SND_STATUS_GOOD",
+ "NIX_SND_STATUS_SQ_CTX_FAULT",
+ "NIX_SND_STATUS_SQ_CTX_POISON",
+ "NIX_SND_STATUS_SQB_FAULT",
+ "NIX_SND_STATUS_SQB_POISON",
+ "NIX_SND_STATUS_HDR_ERR",
+ "NIX_SND_STATUS_EXT_ERR",
+ "NIX_SND_STATUS_JUMP_FAULT",
+ "NIX_SND_STATUS_JUMP_POISON",
+ "NIX_SND_STATUS_CRC_ERR",
+ "NIX_SND_STATUS_IMM_ERR",
+ "NIX_SND_STATUS_SG_ERR",
+ "NIX_SND_STATUS_MEM_ERR",
+ "NIX_SND_STATUS_INVALID_SUBDC",
+ "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+ "NIX_SND_STATUS_DATA_FAULT",
+ "NIX_SND_STATUS_DATA_POISON",
+ "NIX_SND_STATUS_NPC_DROP_ACTION",
+ "NIX_SND_STATUS_LOCK_VIOL",
+ "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_ABORT",
+ "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+ "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+ "NIX_SND_STATUS_SEND_STATS_ERR",
+};
+
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
@@ -1189,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
+ u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ u8 sq_op_err_code, mnq_err_code, snd_err_code;
+
+ /* Below debug registers captures first errors corresponding to
+ * those registers. We don't have to check against SQ qid as
+ * these are fatal errors.
+ */
+
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
(val & NIX_SQINT_BITS));
- if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
- continue;
-
if (val & BIT_ULL(42)) {
netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT));
- } else {
- if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SQ_OP_ERR_DBG));
- otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
- qidx,
- otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
- otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SEND_ERR_DBG));
- otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
- netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
- qidx);
+ goto done;
}
+ sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
+ if (!(sq_op_err_dbg & BIT(44)))
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+ qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+ if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
+ goto chk_mnq_err_dbg;
+
+ /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
+ * TODO: But we are in irq context. How to call mbox functions which does sleep
+ */
+
+chk_mnq_err_dbg:
+ mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
+ if (!(mnq_err_dbg & BIT(44)))
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+ qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+done:
+ /* Print values and reset */
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+
schedule_work(&pf->reset_task);
}
@@ -1249,6 +1339,7 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
/* Schedule NAPI */
+ pf->napi_events++;
napi_schedule_irqoff(&cq_poll->napi);
return IRQ_HANDLED;
@@ -1262,6 +1353,7 @@ static void otx2_disable_napi(struct otx2_nic *pf)
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
+ cancel_work_sync(&cq_poll->dim.work);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
}
@@ -1306,6 +1398,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
int total_size;
int rbuf_size;
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
/* The data transferred by NIX to memory consists of actual packet
* plus additional data which has timestamp and/or EDSA/HIGIG2
* headers if interface is configured in corresponding modes.
@@ -1379,18 +1474,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
goto err_free_sq_ptrs;
}
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
err = otx2_config_nix_queues(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
+
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl);
+ err = otx2_txschq_config(pf, lvl, 0, false);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+#endif
+
mutex_unlock(&mbox->lock);
return err;
@@ -1445,6 +1562,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
mutex_lock(&mbox->lock);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
@@ -1538,6 +1660,24 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_dim_work(struct work_struct *w)
+{
+ struct dim_cq_moder cur_moder;
+ struct otx2_cq_poll *cq_poll;
+ struct otx2_nic *pfvf;
+ struct dim *dim;
+
+ dim = container_of(w, struct dim, work);
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ cq_poll = container_of(dim, struct otx2_cq_poll, dim);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
+ dim->state = DIM_START_MEASURE;
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1604,8 +1744,9 @@ int otx2_open(struct net_device *netdev)
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
cq_poll->dev = (void *)pf;
- netif_napi_add(netdev, &cq_poll->napi,
- otx2_napi_handler, NAPI_POLL_WEIGHT);
+ cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
@@ -1689,9 +1830,6 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
-
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
@@ -1713,7 +1851,6 @@ err_free_cints:
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
err_disable_napi:
otx2_disable_napi(pf);
@@ -1757,7 +1894,6 @@ int otx2_stop(struct net_device *netdev)
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
/* Cleanup CQ NAPI and IRQ */
@@ -1791,8 +1927,7 @@ int otx2_stop(struct net_device *netdev)
kfree(qset->rq);
kfree(qset->napi);
/* Do not clear RQ/SQ ringsize settings */
- memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
- sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ memset_startat(qset, 0, sqe_cnt);
return 0;
}
EXPORT_SYMBOL(otx2_stop);
@@ -1829,6 +1964,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u8 vlan_prio;
+#endif
+
+#ifdef CONFIG_DCB
+ if (!skb->vlan_present)
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1858,9 +2017,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
- bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
- bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1870,46 +2027,7 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
- if ((changed & NETIF_F_NTUPLE) && !ntuple)
- otx2_destroy_ntuple_flows(pf);
-
- if ((changed & NETIF_F_NTUPLE) && ntuple) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && !tc &&
- pf->flow_cfg && pf->flow_cfg->nr_flows) {
- netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
- return -EBUSY;
- }
-
- if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
- netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
- return -EINVAL;
- }
-
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -2004,8 +2122,19 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -2464,6 +2593,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
@@ -2620,6 +2750,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2716,6 +2849,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2737,7 +2874,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
- netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
@@ -2748,7 +2885,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_del_mcam_entries;
+ goto err_mcs_free;
}
err = otx2_wq_init(pf);
@@ -2773,9 +2910,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
@@ -2785,6 +2924,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_mcs_free:
+ cn10k_mcs_free(pf);
err_del_mcam_entries:
otx2_mcam_flow_del(pf);
err_ptp_destroy:
@@ -2920,6 +3061,23 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+ cn10k_mcs_free(pf);
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 61c20907315f..896b2f9bac34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
- struct ptp_rsp *rsp;
- int err;
if (!ptp->nic)
- return 0;
+ return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
- return 0;
+ return -ENOMEM;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp))
- return 0;
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
- return rsp->clk;
+ return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
+{
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ *tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(ptp, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;
@@ -294,6 +341,16 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
@@ -308,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 6ff284211d7b..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -8,6 +8,21 @@
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index 4bbd12ff26e6..fa37b9f312ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_51_16 : 36;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
@@ -274,4 +281,61 @@ enum nix_sqint_e {
BIT_ULL(NIX_SQINT_SEND_ERR) | \
BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+enum nix_sqoperr_e {
+ NIX_SQOPERR_OOR = 0,
+ NIX_SQOPERR_CTX_FAULT = 1,
+ NIX_SQOPERR_CTX_POISON = 2,
+ NIX_SQOPERR_DISABLED = 3,
+ NIX_SQOPERR_SIZE_ERR = 4,
+ NIX_SQOPERR_OFLOW = 5,
+ NIX_SQOPERR_SQB_NULL = 6,
+ NIX_SQOPERR_SQB_FAULT = 7,
+ NIX_SQOPERR_SQE_SZ_ZERO = 8,
+ NIX_SQOPERR_MAX,
+};
+
+enum nix_mnqerr_e {
+ NIX_MNQERR_SQ_CTX_FAULT = 0,
+ NIX_MNQERR_SQ_CTX_POISON = 1,
+ NIX_MNQERR_SQB_FAULT = 2,
+ NIX_MNQERR_SQB_POISON = 3,
+ NIX_MNQERR_TOTAL_ERR = 4,
+ NIX_MNQERR_LSO_ERR = 5,
+ NIX_MNQERR_CQ_QUERY_ERR = 6,
+ NIX_MNQERR_MAX_SQE_SIZE_ERR = 7,
+ NIX_MNQERR_MAXLEN_ERR = 8,
+ NIX_MNQERR_SQE_SIZEM1_ZERO = 9,
+ NIX_MNQERR_MAX,
+};
+
+enum nix_snd_status_e {
+ NIX_SND_STATUS_GOOD = 0x0,
+ NIX_SND_STATUS_SQ_CTX_FAULT = 0x1,
+ NIX_SND_STATUS_SQ_CTX_POISON = 0x2,
+ NIX_SND_STATUS_SQB_FAULT = 0x3,
+ NIX_SND_STATUS_SQB_POISON = 0x4,
+ NIX_SND_STATUS_HDR_ERR = 0x5,
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+ NIX_SND_STATUS_CRC_ERR = 0x9,
+ NIX_SND_STATUS_IMM_ERR = 0x10,
+ NIX_SND_STATUS_SG_ERR = 0x11,
+ NIX_SND_STATUS_MEM_ERR = 0x12,
+ NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+ NIX_SND_STATUS_DATA_FAULT = 0x15,
+ NIX_SND_STATUS_DATA_POISON = 0x16,
+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+ NIX_SND_STATUS_LOCK_VIOL = 0x18,
+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+ NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
+ NIX_SND_STATUS_MAX,
+};
+
#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 626961a41089..e64318c110fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -28,6 +28,9 @@
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
+#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
+#define CN10K_MAX_BURST_SIZE 8453888ULL
+
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -58,7 +64,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
- if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
- u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
{
+ int max_burst, max_mantissa;
unsigned int tmp;
+ if (is_dev_otx2(nic->pdev)) {
+ max_burst = MAX_BURST_SIZE;
+ max_mantissa = MAX_BURST_MANTISSA;
+ } else {
+ max_burst = CN10K_MAX_BURST_SIZE;
+ max_mantissa = CN10K_MAX_BURST_MANTISSA;
+ }
+
/* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes.
*/
- burst = min_t(u32, burst, MAX_BURST_SIZE);
+ burst = min_t(u32, burst, max_burst);
if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst);
- if (burst < MAX_BURST_MANTISSA)
+ if (burst < max_mantissa)
*burst_mantissa = tmp * 2;
else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else {
*burst_exp = MAX_BURST_EXPONENT;
- *burst_mantissa = MAX_BURST_MANTISSA;
+ *burst_mantissa = max_mantissa;
}
}
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp)
{
- unsigned int tmp;
+ u64 tmp;
/* Rate calculation by hardware
*
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
}
}
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+ u64 maxrate, u32 burst)
{
- struct otx2_hw *hw = &nic->hw;
- struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
+ u64 regval = 0;
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ if (is_dev_otx2(nic->pdev)) {
+ regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ } else {
+ regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ }
+
+ return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+ u32 burst, u64 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
- /* Get exponent and mantissa values from the desired rate */
- otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
- otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
- req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
- FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
- FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
- FIELD_PREP(TLX_RATE_EXPONENT, exp) |
- FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
@@ -190,13 +224,47 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic,
return 0;
}
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry;
- u32 rate;
+ u64 rate;
int err;
err = otx2_tc_validate_flow(nic, actions, extack);
@@ -212,13 +280,17 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8;
- rate = max_t(u32, rate / 1000000, 1);
+ rate = max_t(u64, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err)
return err;
@@ -315,6 +387,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
u8 nr_police = 0;
bool pps = false;
u64 rate;
+ int err;
int i;
if (!flow_action_has_entries(flow_action)) {
@@ -355,6 +428,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
@@ -571,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_DPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_DPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ if (flow_mask->dport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_SPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_SPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+ if (flow_mask->sport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
}
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
@@ -1023,6 +1106,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
@@ -1052,6 +1136,7 @@ int otx2_init_tc(struct otx2_nic *nic)
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
+EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
@@ -1060,3 +1145,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7c4068c5d1ac..ef10aef3cda0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -19,6 +19,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
@@ -148,6 +154,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
@@ -167,14 +174,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
+ u64 timestamp, tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (err)
return;
@@ -433,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0, qidx;
+ struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
@@ -443,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
return 0;
process_cqe:
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
+
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
@@ -450,18 +462,20 @@ process_cqe:
return 0;
break;
}
+
if (cq->cq_type == CQ_XDP) {
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
- cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
} else {
- otx2_snd_pkt_handler(pfvf, cq,
- &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
+ otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
+ &tx_pkts, &tx_bytes);
}
+
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
cq->pend_cqe--;
+
+ sq->cons_head++;
+ sq->cons_head &= (sq->sqe_cnt - 1);
}
/* Free CQEs to HW */
@@ -482,6 +496,18 @@ process_cqe:
return 0;
}
+static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+{
+ struct dim_sample dim_sample;
+ u64 rx_frames, rx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+}
+
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
@@ -519,6 +545,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
+ /* Check for adaptive interrupt coalesce */
+ if (workdone != 0 &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ /* Adjust irq coalese using net_dim */
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ /* Update irq coalescing */
+ for (i = 0; i < pfvf->hw.cint_cnt; i++)
+ otx2_config_irq_coalescing(pfvf, i);
+ }
+
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
@@ -599,7 +636,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
@@ -661,7 +698,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -671,6 +709,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -906,7 +951,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
@@ -927,16 +972,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
+ struct ptpv2_tstamp *origin_tstamp;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
u64 iova;
- if (!skb_shinfo(skb)->gso_size &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+ if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum);
} else {
skb_tx_timestamp(skb);
}
@@ -947,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
{
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
struct otx2_nic *pfvf = netdev_priv(netdev);
- int offset, num_segs, free_sqe;
+ int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
- /* Check if there is room for new SQE.
- * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
- * will give free SQE count.
+ /* Check if there is enough room between producer
+ * and consumer index.
*/
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ if (free_desc < sq->sqe_thresh)
+ return false;
- if (free_sqe < sq->sqe_thresh ||
- free_sqe < otx2_get_sqe_count(pfvf, skb))
+ if (free_desc < otx2_get_sqe_count(pfvf, skb))
return false;
num_segs = skb_shinfo(skb)->nr_frags + 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index f1a04cf9210c..93cac2c2664c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
-#define OTX2_MIN_MTU 64
+#define OTX2_MIN_MTU 60
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -79,6 +79,7 @@ struct sg_list {
struct otx2_snd_queue {
u8 aura_id;
u16 head;
+ u16 cons_head;
u16 sqe_size;
u32 sqe_cnt;
u16 num_sqbs;
@@ -109,6 +110,7 @@ struct otx2_cq_poll {
#define CINT_INVALID_CQ 255
u8 cint_idx;
u8 cq_ids[CQS_PER_CINT];
+ struct dim dim;
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 925b74ebb8b0..86653bb8e403 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work)
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ netdev->features;
- bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
- struct otx2_nic *vf = netdev_priv(netdev);
-
- if (changed & NETIF_F_NTUPLE) {
- if (!ntuple_enabled) {
- otx2_mcam_flow_del(vf);
- return 0;
- }
-
- if (!otx2_get_maxflows(vf->flow_cfg)) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static const struct net_device_ops otx2vf_netdev_ops = {
@@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -662,8 +650,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
- netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
@@ -697,16 +686,24 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_unreg_netdev;
- err = otx2_register_dl(vf);
+ err = otx2_init_tc(vf);
if (err)
goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
err_ptp_destroy:
@@ -739,6 +736,22 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b6f20e2034c6..f2f7663c3d10 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -8,6 +8,7 @@ config PRESTERA
depends on NET_SWITCHDEV && VLAN_8021Q
depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
+ select PHYLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index d395f4131648..df14cee80153 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -4,6 +4,6 @@ prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
prestera_flower.o prestera_span.o prestera_counter.o \
- prestera_router.o prestera_router_hw.o
+ prestera_router.o prestera_router_hw.o prestera_matchall.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index a0a5a8e6bd8c..35554ee805cd 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -7,6 +7,7 @@
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/phylink.h>
#include <net/devlink.h>
#include <uapi/linux/if_ether.h>
@@ -20,6 +21,26 @@ struct prestera_fw_rev {
u16 sub;
};
+struct prestera_flood_domain {
+ struct prestera_switch *sw;
+ struct list_head flood_domain_port_list;
+ u32 idx;
+};
+
+struct prestera_mdb_entry {
+ struct prestera_switch *sw;
+ struct prestera_flood_domain *flood_domain;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct prestera_flood_domain_port {
+ struct prestera_flood_domain *flood_domain;
+ struct net_device *dev;
+ struct list_head flood_domain_port_node;
+ u16 vid;
+};
+
struct prestera_port_stats {
u64 good_octets_received;
u64 bad_octets_received;
@@ -72,6 +93,7 @@ struct prestera_lag {
struct prestera_flow_block;
struct prestera_port_mac_state {
+ bool valid;
u32 mode;
u32 speed;
bool oper;
@@ -107,7 +129,8 @@ struct prestera_port_phy_config {
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
- struct prestera_flow_block *flow_block;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
struct devlink_port dl_port;
struct list_head lag_member;
struct prestera_lag *lag;
@@ -130,6 +153,13 @@ struct prestera_port {
struct prestera_port_phy_config cfg_phy;
struct prestera_port_mac_state state_mac;
struct prestera_port_phy_state state_phy;
+
+ struct phylink_config phy_config;
+ struct phylink *phy_link;
+ struct phylink_pcs phylink_pcs;
+
+ /* protects state_mac */
+ spinlock_t state_mac_lock;
};
struct prestera_device {
@@ -270,20 +300,33 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct device_node *np;
struct prestera_router *router;
struct prestera_lag *lags;
struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
+ u32 size_tbl_router_nexthop;
};
struct prestera_router {
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable nh_neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_neigh_cache_ht;
+ struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
- bool aborted;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */
+ unsigned long nhgrp_hw_cache_kick; /* jiffies */
+ struct {
+ struct delayed_work dw;
+ } neighs_update;
};
struct prestera_rxtx_params {
@@ -318,6 +361,8 @@ void prestera_router_fini(struct prestera_switch *sw);
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+struct prestera_switch *prestera_switch_get(struct net_device *dev);
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg);
@@ -326,6 +371,16 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+void prestera_queue_work(struct work_struct *work);
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
+void prestera_queue_drain(void);
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
@@ -333,9 +388,30 @@ bool prestera_netdev_check(const struct net_device *dev);
int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
bool prestera_port_is_lag_member(const struct prestera_port *port);
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
u16 prestera_port_lag_id(const struct prestera_port *port);
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid);
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry);
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw);
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain);
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid);
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port);
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index f0d9f592173b..cba89fda504b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -22,6 +22,7 @@ struct prestera_acl {
struct prestera_acl_ruleset_ht_key {
struct prestera_flow_block *block;
+ u32 chain_index;
};
struct prestera_acl_rule_entry {
@@ -34,6 +35,14 @@ struct prestera_acl_rule_entry {
u8 valid:1;
} accept, drop, trap;
struct {
+ u8 valid:1;
+ struct prestera_acl_action_police i;
+ } police;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
u32 id;
struct prestera_counter_block *block;
} counter;
@@ -45,12 +54,18 @@ struct prestera_acl_ruleset {
struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
struct prestera_acl *acl;
+ struct {
+ u32 min;
+ u32 max;
+ } prio;
unsigned long rule_count;
refcount_t refcount;
void *keymask;
u32 vtcam_id;
+ u32 index;
u16 pcl_id;
bool offload;
+ bool ingress;
};
struct prestera_acl_vtcam {
@@ -60,6 +75,7 @@ struct prestera_acl_vtcam {
u32 id;
bool is_keymask_set;
u8 lookup;
+ u8 direction;
};
static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
@@ -83,20 +99,59 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
+{
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
+ };
+
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
+ return -EINVAL;
+
+ *client = ingress_client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
+{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
static struct prestera_acl_ruleset *
prestera_acl_ruleset_create(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
u32 uid = 0;
int err;
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
+ return ERR_PTR(-EINVAL);
+
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
@@ -108,7 +163,12 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
goto err_ruleset_create;
/* make pcl-id based on uid */
- ruleset->pcl_id = (u8)uid;
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
@@ -125,43 +185,81 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask)
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
+ if (!ruleset->keymask)
+ return -ENOMEM;
+
+ return 0;
}
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
+ struct prestera_acl_iface iface;
u32 vtcam_id;
+ int dir;
int err;
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
+
if (ruleset->offload)
return -EEXIST;
- err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
+ dir,
ruleset->keymask, &vtcam_id);
if (err)
- return err;
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
ruleset->vtcam_id = vtcam_id;
ruleset->offload = true;
return 0;
+
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
+ return err;
}
static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl *acl = ruleset->acl;
u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
- if (ruleset->offload)
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
idr_remove(&acl->uid, uid);
-
rhashtable_destroy(&ruleset->rule_ht);
kfree(ruleset->keymask);
kfree(ruleset);
@@ -169,23 +267,26 @@ static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
static struct prestera_acl_ruleset *
__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
ht_key.block = block;
+ ht_key.chain_index = chain_index;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
prestera_acl_ruleset_ht_params);
}
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (!ruleset)
return ERR_PTR(-ENOENT);
@@ -195,17 +296,18 @@ prestera_acl_ruleset_lookup(struct prestera_acl *acl,
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (ruleset) {
refcount_inc(&ruleset->refcount);
return ruleset;
}
- return prestera_acl_ruleset_create(acl, block);
+ return prestera_acl_ruleset_create(acl, block, chain_index);
}
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
@@ -274,6 +376,26 @@ prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
block->ruleset_zero = NULL;
}
+static void
+prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl,
+ struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl_rule *rule;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ list_for_each_entry(rule, &acl->rules, list) {
+ if (ruleset->ingress != rule->ruleset->ingress)
+ continue;
+ if (ruleset->ht_key.chain_index != rule->chain_index)
+ continue;
+
+ ruleset->prio.min = min(ruleset->prio.min, rule->priority);
+ ruleset->prio.max = max(ruleset->prio.max, rule->priority);
+ }
+}
+
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
@@ -293,6 +415,18 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
prestera_acl_rule_ht_params);
}
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->index;
+}
+
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max)
+{
+ *prio_min = ruleset->prio.min;
+ *prio_max = ruleset->prio.max;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
@@ -300,7 +434,7 @@ bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie, u32 chain_index)
{
struct prestera_acl_rule *rule;
@@ -310,6 +444,7 @@ prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
rule->ruleset = ruleset;
rule->cookie = cookie;
+ rule->chain_index = chain_index;
refcount_inc(&ruleset->refcount);
@@ -324,10 +459,21 @@ void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+
prestera_acl_ruleset_put(rule->ruleset);
kfree(rule);
}
+static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset,
+ u32 prio)
+{
+ ruleset->prio.min = min(ruleset->prio.min, prio);
+ ruleset->prio.max = max(ruleset->prio.max, prio);
+}
+
int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule)
{
@@ -345,10 +491,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
rule->re_arg.vtcam_id = ruleset->vtcam_id;
rule->re_key.prio = rule->priority;
- /* setup counter */
- rule->re_arg.count.valid = true;
- rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
-
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
if (err)
@@ -360,8 +502,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
if (err)
goto err_rule_add;
- /* bind the block (all ports) to chain index 0 */
- if (!ruleset->rule_count) {
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
err = prestera_acl_ruleset_block_bind(ruleset, block);
if (err)
goto err_acl_block_bind;
@@ -369,6 +513,7 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
list_add_tail(&rule->list, &sw->acl->rules);
ruleset->rule_count++;
+ prestera_acl_ruleset_prio_update(ruleset, rule->priority);
return 0;
err_acl_block_bind:
@@ -393,9 +538,10 @@ void prestera_acl_rule_del(struct prestera_switch *sw,
list_del(&rule->list);
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+ prestera_acl_ruleset_prio_refresh(sw->acl, ruleset);
/* unbind block (all ports) */
- if (!ruleset->rule_count)
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
prestera_acl_ruleset_block_unbind(ruleset, block);
}
@@ -459,6 +605,18 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
act_num++;
}
+ /* police */
+ if (e->police.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE;
+ act_hw[act_num].police = e->police.i;
+ act_num++;
+ }
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
/* counter */
if (e->counter.block) {
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
@@ -477,6 +635,9 @@ __prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
{
/* counter */
prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
+ /* police */
+ if (e->police.valid)
+ prestera_hw_policer_release(sw, e->police.i.id);
}
void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
@@ -499,16 +660,37 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
struct prestera_acl_rule_entry *e,
struct prestera_acl_rule_entry_arg *arg)
{
+ int err;
+
/* accept */
e->accept.valid = arg->accept.valid;
/* drop */
e->drop.valid = arg->drop.valid;
/* trap */
e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
+ /* police */
+ if (arg->police.valid) {
+ u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS :
+ PRESTERA_POLICER_TYPE_EGRESS;
+
+ err = prestera_hw_policer_create(sw, type, &e->police.i.id);
+ if (err)
+ goto err_out;
+
+ err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id,
+ arg->police.rate,
+ arg->police.burst);
+ if (err) {
+ prestera_hw_policer_release(sw, e->police.i.id);
+ goto err_out;
+ }
+ e->police.valid = arg->police.valid;
+ }
/* counter */
if (arg->count.valid) {
- int err;
-
err = prestera_counter_get(sw->counter, arg->count.client,
&e->counter.block,
&e->counter.id);
@@ -605,7 +787,7 @@ vtcam_found:
return 0;
}
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id)
{
struct prestera_acl_vtcam *vtcam;
@@ -617,7 +799,8 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
* fine for now
*/
list_for_each_entry(vtcam, &acl->vtcam_list, list) {
- if (lookup != vtcam->lookup)
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
continue;
if (!keymask && !vtcam->is_keymask_set) {
@@ -638,7 +821,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return -ENOMEM;
err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
- PRESTERA_HW_VTCAM_DIR_INGRESS);
+ dir);
if (err) {
kfree(vtcam);
@@ -651,6 +834,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return 0;
}
+ vtcam->direction = dir;
vtcam->id = new_vtcam_id;
vtcam->lookup = lookup;
if (keymask) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 40f6c1d961fa..a35cc0609a1d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -10,6 +10,14 @@
#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
(PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
#define rule_match_set_n(match_p, type, val_p, size) \
memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
@@ -46,7 +54,9 @@ enum prestera_acl_rule_action {
PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
PRESTERA_ACL_RULE_ACTION_DROP = 1,
PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+ PRESTERA_ACL_RULE_ACTION_POLICE = 8,
PRESTERA_ACL_RULE_ACTION_MAX
};
@@ -61,6 +71,14 @@ struct prestera_acl_match {
__be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
+struct prestera_acl_action_jump {
+ u32 index;
+};
+
+struct prestera_acl_action_police {
+ u32 id;
+};
+
struct prestera_acl_action_count {
u32 id;
};
@@ -73,7 +91,9 @@ struct prestera_acl_rule_entry_key {
struct prestera_acl_hw_action_info {
enum prestera_acl_rule_action id;
union {
+ struct prestera_acl_action_police police;
struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
};
};
@@ -88,6 +108,16 @@ struct prestera_acl_rule_entry_arg {
u8 valid:1;
} accept, drop, trap;
struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
+ u8 valid:1;
+ u64 rate;
+ u64 burst;
+ bool ingress;
+ } police;
+ struct {
u8 valid:1;
u32 client;
} count;
@@ -98,7 +128,9 @@ struct prestera_acl_rule {
struct rhash_head ht_node; /* Member of acl HT */
struct list_head list;
struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
unsigned long cookie;
+ u32 chain_index;
u32 priority;
struct prestera_acl_rule_entry_key re_key;
struct prestera_acl_rule_entry_arg re_arg;
@@ -122,7 +154,7 @@ void prestera_acl_fini(struct prestera_switch *sw);
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie, u32 chain_index);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
@@ -147,12 +179,14 @@ prestera_acl_rule_entry_create(struct prestera_acl *acl,
struct prestera_acl_rule_entry_arg *arg);
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block);
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask);
+ struct prestera_flow_block *block,
+ u32 chain_index);
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
@@ -160,12 +194,16 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 40d5b89573bb..2f52daba58e6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -300,8 +300,8 @@ static void prestera_ethtool_get_drvinfo(struct net_device *dev,
struct prestera_port *port = netdev_priv(dev);
struct prestera_switch *sw = port->sw;
- strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
@@ -521,6 +521,9 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ecmd->base.speed = SPEED_UNKNOWN;
ecmd->base.duplex = DUPLEX_UNKNOWN;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_get(port->phy_link, ecmd);
+
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
@@ -648,6 +651,9 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
u8 adver_fec;
int err;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_set(port->phy_link, ecmd);
+
err = prestera_port_type_set(ecmd, port);
if (err)
return err;
@@ -782,28 +788,6 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt)
-{
- struct prestera_port_mac_state *smac = &port->state_mac;
-
- smac->oper = evt->data.mac.oper;
-
- if (smac->oper) {
- smac->mode = evt->data.mac.mode;
- smac->speed = evt->data.mac.speed;
- smac->duplex = evt->data.mac.duplex;
- smac->fc = evt->data.mac.fc;
- smac->fec = evt->data.mac.fec;
- } else {
- smac->mode = PRESTERA_MAC_MODE_MAX;
- smac->speed = SPEED_UNKNOWN;
- smac->duplex = DUPLEX_UNKNOWN;
- smac->fc = 0;
- smac->fec = 0;
- }
-}
-
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 9eb18e99dea6..bd5600886bc6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -11,7 +11,4 @@ struct prestera_port;
extern const struct ethtool_ops prestera_ethtool_ops;
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt);
-
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index d849f046ece7..9f4267f326b0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -7,8 +7,9 @@
#include "prestera.h"
#include "prestera_acl.h"
#include "prestera_flow.h"
-#include "prestera_span.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
static LIST_HEAD(prestera_block_cb_list);
@@ -17,9 +18,9 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return prestera_span_replace(block, f);
+ return prestera_mall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
return 0;
default:
return -EOPNOTSUPP;
@@ -29,9 +30,6 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- if (f->common.chain_index != 0)
- return -EOPNOTSUPP;
-
switch (f->command) {
case FLOW_CLS_REPLACE:
return prestera_flower_replace(block, f);
@@ -71,13 +69,16 @@ static void prestera_flow_block_destroy(void *cb_priv)
prestera_flower_template_cleanup(block);
+ WARN_ON(!list_empty(&block->template_list));
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
}
static struct prestera_flow_block *
-prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
{
struct prestera_flow_block *block;
@@ -86,8 +87,13 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+ block->ingress = ingress;
return block;
}
@@ -166,7 +172,8 @@ static int prestera_flow_block_unbind(struct prestera_flow_block *block,
static struct prestera_flow_block *
prestera_flow_block_get(struct prestera_switch *sw,
struct flow_block_offload *f,
- bool *register_block)
+ bool *register_block,
+ bool ingress)
{
struct prestera_flow_block *block;
struct flow_block_cb *block_cb;
@@ -174,7 +181,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_flow_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net, ingress);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -210,7 +217,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -218,7 +225,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
bool register_block;
int err;
- block = prestera_flow_block_get(sw, f, &register_block);
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
if (IS_ERR(block))
return PTR_ERR(block);
@@ -233,7 +240,11 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
}
- port->flow_block = block;
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
return 0;
err_block_bind:
@@ -243,7 +254,7 @@ err_block_bind:
}
static void prestera_setup_flow_block_unbind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -256,7 +267,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
block = flow_block_cb_priv(block_cb);
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
err = prestera_flow_block_unbind(block, port);
if (err)
@@ -267,24 +278,38 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
list_del(&block_cb->driver_list);
}
error:
- port->flow_block = NULL;
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
}
-int prestera_flow_block_setup(struct prestera_port *port,
- struct flow_block_offload *f)
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
{
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
f->driver_block_list = &prestera_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return prestera_setup_flow_block_bind(port, f);
+ return prestera_setup_flow_block_bind(port, f, ingress);
case FLOW_BLOCK_UNBIND:
- prestera_setup_flow_block_unbind(port, f);
+ prestera_setup_flow_block_unbind(port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 1ea5b745bf72..a85a3eb40279 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -8,7 +8,6 @@
struct prestera_port;
struct prestera_switch;
-struct prestera_flower_template;
struct prestera_flow_block_binding {
struct list_head list;
@@ -22,8 +21,14 @@ struct prestera_flow_block {
struct net *net;
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
- struct prestera_flower_template *tmplt;
+ struct list_head template_list;
+ struct {
+ u32 prio_min;
+ u32 prio_max;
+ bool bound;
+ } mall;
unsigned int rule_count;
+ bool ingress;
};
int prestera_flow_block_setup(struct prestera_port *port,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 19c1417fd05f..91a478b75cbf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -5,34 +5,90 @@
#include "prestera_acl.h"
#include "prestera_flow.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
};
+static void
+prestera_flower_template_free(struct prestera_flower_template *template)
+{
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
+}
+
void prestera_flower_template_cleanup(struct prestera_flow_block *block)
{
- if (block->tmplt) {
- /* put the reference to the ruleset kept in create */
- prestera_acl_ruleset_put(block->tmplt->ruleset);
- kfree(block->tmplt);
- block->tmplt = NULL;
- return;
- }
+ struct prestera_flower_template *template, *tmp;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ prestera_flower_template_free(template);
+}
+
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
}
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
+ u32 chain_index,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
- int i;
+ int err, i;
/* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+ /* setup counter first */
+ rule->re_arg.count.valid = true;
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
+ &rule->re_arg.count.client);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
@@ -53,6 +109,23 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.trap.valid = 1;
break;
+ case FLOW_ACTION_POLICE:
+ if (rule->re_arg.police.valid)
+ return -EEXIST;
+
+ rule->re_arg.police.valid = 1;
+ rule->re_arg.police.rate =
+ act->police.rate_bytes_ps;
+ rule->re_arg.police.burst = act->police.burst;
+ rule->re_arg.police.ingress = block->ingress;
+ break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
@@ -66,7 +139,8 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct flow_cls_offload *f,
struct prestera_flow_block *block)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct prestera_acl_match *r_match = &rule->re_key.match;
struct prestera_port *port;
struct net_device *ingress_dev;
@@ -95,24 +169,24 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- mask = htons(0x1FFF);
- key = htons(port->hw_id);
+ mask = htons(0x1FFF << 3);
+ key = htons(port->hw_id << 3);
rule_match_set(r_match->key, SYS_PORT, key);
rule_match_set(r_match->mask, SYS_PORT, mask);
- mask = htons(0x1FF);
+ mask = htons(0x3FF);
key = htons(port->dev_id);
rule_match_set(r_match->key, SYS_DEV, key);
rule_match_set(r_match->mask, SYS_DEV, mask);
return 0;
-
}
static int prestera_flower_parse(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_cls_offload *f)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
struct prestera_acl_match *r_match = &rule->re_key.match;
__be16 n_proto_mask = 0;
@@ -130,6 +204,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
@@ -229,6 +304,29 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
}
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ struct flow_match_ports_range match;
+ __be32 tp_key, tp_mask;
+
+ flow_rule_match_ports_range(f_rule, &match);
+
+ /* src port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.src) |
+ (ntohs(match.key->tp_max.src) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.src) |
+ (ntohs(match.mask->tp_max.src) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
+
+ /* dst port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.dst) |
+ (ntohs(match.key->tp_max.dst) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
+ (ntohs(match.mask->tp_max.dst) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
+ }
+
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
@@ -259,9 +357,53 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
}
return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
f->common.extack);
}
+static int prestera_flower_prio_check(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ u32 mall_prio_min;
+ u32 mall_prio_max;
+ int err;
+
+ err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+
+ if (f->common.prio <= mall_prio_max && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= mall_prio_min && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
+ return 0;
+}
+
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
@@ -270,12 +412,17 @@ int prestera_flower_replace(struct prestera_flow_block *block,
struct prestera_acl_rule *rule;
int err;
- ruleset = prestera_acl_ruleset_get(acl, block);
+ err = prestera_flower_prio_check(block, f);
+ if (err)
+ return err;
+
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* increments the ruleset reference */
- rule = prestera_acl_rule_create(ruleset, f->cookie);
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -312,7 +459,8 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return;
@@ -322,7 +470,6 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
prestera_acl_rule_destroy(rule);
}
prestera_acl_ruleset_put(ruleset);
-
}
int prestera_flower_tmplt_create(struct prestera_flow_block *block,
@@ -345,14 +492,17 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
}
prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
- ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR_OR_NULL(ruleset)) {
err = -EINVAL;
goto err_ruleset_get;
}
/* preserve keymask/template to this ruleset */
- prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ if (err)
+ goto err_ruleset_keymask_set;
/* skip error, as it is not possible to reject template operation,
* so, keep the reference to the ruleset for rules to be added
@@ -364,9 +514,12 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
/* keep the reference to the ruleset */
template->ruleset = ruleset;
- block->tmplt = template;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
return 0;
+err_ruleset_keymask_set:
+ prestera_acl_ruleset_put(ruleset);
err_ruleset_get:
kfree(template);
err_malloc:
@@ -377,7 +530,14 @@ err_malloc:
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- prestera_flower_template_cleanup(block);
+ struct prestera_flower_template *template, *tmp;
+
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ if (template->chain_index == f->common.chain_index) {
+ /* put the reference to the ruleset kept in create */
+ prestera_flower_template_free(template);
+ return;
+ }
}
int prestera_flower_stats(struct prestera_flow_block *block,
@@ -390,7 +550,8 @@ int prestera_flower_stats(struct prestera_flow_block *block,
u64 bytes;
int err;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index dc3aa4280e9f..1181115fe6fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -6,7 +6,6 @@
#include <net/pkt_cls.h>
-struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
@@ -20,5 +19,7 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
void prestera_flower_template_cleanup(struct prestera_flow_block *block);
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 51fc841b1e7a..fc6f7d2746e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -10,11 +10,14 @@
#include "prestera_hw.h"
#include "prestera_acl.h"
#include "prestera_counter.h"
+#include "prestera_router_hw.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
#define PRESTERA_MIN_MTU 64
+#define PRESTERA_MSG_CHUNK_SIZE 1024
+
enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
@@ -55,9 +58,23 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
+
+ PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
+ PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
+
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
@@ -68,9 +85,15 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
- PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
- PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
+
+ PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
+ PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
+ PRESTERA_CMD_TYPE_POLICER_SET = 0x1502,
PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
@@ -87,6 +110,7 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
@@ -162,6 +186,10 @@ enum {
};
enum {
+ PRESTERA_POLICER_MODE_SR_TCM
+};
+
+enum {
PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
@@ -175,6 +203,12 @@ struct prestera_fw_event_handler {
void *arg;
};
+enum {
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
+};
+
struct prestera_msg_cmd {
__le32 type;
};
@@ -261,6 +295,7 @@ union prestera_msg_port_param {
u8 duplex;
u8 fec;
u8 fc;
+ u8 br_locked;
union {
struct {
u8 admin;
@@ -424,6 +459,12 @@ struct prestera_msg_acl_action {
__le32 __reserved;
union {
struct {
+ __le32 index;
+ } jump;
+ struct {
+ __le32 id;
+ } police;
+ struct {
__le32 id;
} count;
__le32 reserved[6];
@@ -499,6 +540,23 @@ struct prestera_msg_iface {
u8 __pad[3];
};
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
+struct prestera_msg_nh {
+ struct prestera_msg_iface oif;
+ __le32 hw_id;
+ u8 mac[ETH_ALEN];
+ u8 is_active;
+ u8 pad;
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
@@ -515,6 +573,43 @@ struct prestera_msg_rif_resp {
u8 __pad[2];
};
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_nh_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
+ __le32 size;
+ __le32 grp_id;
+};
+
+struct prestera_msg_nh_chunk_req {
+ struct prestera_msg_cmd cmd;
+ __le32 offset;
+};
+
+struct prestera_msg_nh_chunk_resp {
+ struct prestera_msg_ret ret;
+ u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
+};
+
+struct prestera_msg_nh_grp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 grp_id;
+ __le32 size;
+};
+
+struct prestera_msg_nh_grp_resp {
+ struct prestera_msg_ret ret;
+ __le32 grp_id;
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
@@ -547,6 +642,26 @@ struct mvsw_msg_cpu_code_counter_ret {
__le64 packet_count;
};
+struct prestera_msg_policer_req {
+ struct prestera_msg_cmd cmd;
+ __le32 id;
+ union {
+ struct {
+ __le64 cir;
+ __le32 cbs;
+ } __packed sr_tcm; /* make sure always 12 bytes size */
+ __le32 reserved[6];
+ };
+ u8 mode;
+ u8 type;
+ u8 pad[2];
+};
+
+struct prestera_msg_policer_resp {
+ struct prestera_msg_ret ret;
+ __le32 id;
+};
+
struct prestera_msg_event {
__le16 type;
__le16 id;
@@ -573,6 +688,57 @@ struct prestera_msg_event_fdb {
u8 dest_type;
};
+struct prestera_msg_flood_domain_create_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_flood_domain_create_resp {
+ struct prestera_msg_ret ret;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+};
+
+struct prestera_msg_flood_domain_ports_reset_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_port {
+ union {
+ struct {
+ __le32 port_num;
+ __le32 dev_num;
+ };
+ __le16 lag_id;
+ };
+ __le16 vid;
+ __le16 port_type;
+};
+
+struct prestera_msg_mdb_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_mdb_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
static void prestera_hw_build_tests(void)
{
/* check requests */
@@ -598,9 +764,23 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -615,6 +795,10 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -892,6 +1076,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
+ sw->size_tbl_router_nexthop =
+ __le32_to_cpu(resp.size_tbl_router_nexthop);
return 0;
}
@@ -1164,6 +1350,12 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
case PRESTERA_ACL_RULE_ACTION_TRAP:
/* just rule action id, no specific data */
break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
+ case PRESTERA_ACL_RULE_ACTION_POLICE:
+ action->police.id = __cpu_to_le32(info->police.id);
+ break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
@@ -1295,27 +1487,39 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
return 0;
}
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
- &req.cmd, sizeof(req));
}
-int prestera_hw_span_unbind(const struct prestera_port *port)
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
+ enum prestera_cmd_type_t cmd_type;
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
- &req.cmd, sizeof(req));
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
@@ -1467,7 +1671,7 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1485,7 +1689,7 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1503,14 +1707,15 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked)
{
struct prestera_msg_port_attr_req req = {
- .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
- .flood = flood,
+ .br_locked = br_locked,
}
};
@@ -1518,41 +1723,6 @@ static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val)
-{
- int err;
-
- if (port->sw->dev->fw_rev.maj <= 2) {
- if (!(mask & BR_FLOOD))
- return 0;
-
- return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
- }
-
- if (mask & BR_FLOOD) {
- err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
- if (err)
- goto err_uc_flood;
- }
-
- if (mask & BR_MCAST_FLOOD) {
- err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
- if (err)
- goto err_mc_flood;
- }
-
- return 0;
-
-err_mc_flood:
- prestera_hw_port_mc_flood_set(port, 0);
-err_uc_flood:
- if (mask & BR_FLOOD)
- prestera_hw_port_uc_flood_set(port, 0);
-
- return err;
-}
-
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -1831,8 +2001,8 @@ static int prestera_iface_to_msg(struct prestera_iface *iface,
int prestera_hw_rif_create(struct prestera_switch *sw,
struct prestera_iface *iif, u8 *mac, u16 *rif_id)
{
- struct prestera_msg_rif_req req;
struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
int err;
memcpy(req.mac, mac, ETH_ALEN);
@@ -1868,9 +2038,9 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
{
- int err;
struct prestera_msg_vr_resp resp;
struct prestera_msg_vr_req req;
+ int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
@@ -1891,6 +2061,112 @@ int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
sizeof(req));
}
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id)
+{
+ struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
+ .grp_id = __cpu_to_le32(grp_id) };
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ req.nh[i].is_active = nhs[i].connected;
+ memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
+ err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
+ if (err)
+ return err;
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */)
+{
+ static struct prestera_msg_nh_chunk_resp resp;
+ struct prestera_msg_nh_chunk_req req;
+ u32 buf_offset;
+ int err;
+
+ memset(&hw_state[0], 0, buf_size);
+ buf_offset = 0;
+ while (1) {
+ if (buf_offset >= buf_size)
+ break;
+
+ memset(&req, 0, sizeof(req));
+ req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
+ err = prestera_cmd_ret(sw,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
+ &req.cmd, sizeof(req), &resp.ret,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ memcpy(&hw_state[buf_offset], &resp.hw_state[0],
+ buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
+ buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
+ buf_offset += PRESTERA_MSG_CHUNK_SIZE;
+ }
+
+ return 0;
+}
+
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id)
+{
+ struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
+ struct prestera_msg_nh_grp_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *grp_id = __le32_to_cpu(resp.grp_id);
+ return err;
+}
+
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id)
+{
+ struct prestera_msg_nh_grp_req req = {
+ .grp_id = __cpu_to_le32(grp_id),
+ .size = __cpu_to_le32(nh_count)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
@@ -2108,3 +2384,178 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
&req.cmd, sizeof(req));
}
+
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id)
+{
+ struct prestera_msg_policer_resp resp;
+ struct prestera_msg_policer_req req = {
+ .type = type
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *policer_id = __le32_to_cpu(resp.id);
+ return 0;
+}
+
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id)
+{
+ struct prestera_msg_policer_req req = {
+ .id = __cpu_to_le32(policer_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs)
+{
+ struct prestera_msg_policer_req req = {
+ .mode = PRESTERA_POLICER_MODE_SR_TCM,
+ .id = __cpu_to_le32(policer_id),
+ .sr_tcm = {
+ .cir = __cpu_to_le64(cir),
+ .cbs = __cpu_to_le32(cbs)
+ }
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_create_resp resp;
+ struct prestera_msg_flood_domain_create_req req;
+ int err;
+
+ err = prestera_cmd_ret(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
+ sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ domain->idx = __le32_to_cpu(resp.flood_domain_idx);
+
+ return 0;
+}
+
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ struct prestera_msg_flood_domain_ports_set_req *req;
+ struct prestera_msg_flood_domain_port *ports;
+ struct prestera_switch *sw = domain->sw;
+ struct prestera_port *port;
+ u32 ports_num = 0;
+ int buf_size;
+ void *buff;
+ u16 lag_id;
+ int err;
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node)
+ ports_num++;
+
+ if (!ports_num)
+ return -EINVAL;
+
+ buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
+
+ buff = kmalloc(buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ ports = buff + sizeof(*req);
+
+ req->flood_domain_idx = __cpu_to_le32(domain->idx);
+ req->ports_num = __cpu_to_le32(ports_num);
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node) {
+ if (netif_is_lag_master(flood_domain_port->dev)) {
+ if (prestera_lag_id(sw, flood_domain_port->dev,
+ &lag_id)) {
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
+ ports->lag_id = __cpu_to_le16(lag_id);
+ } else {
+ port = prestera_port_dev_lower_find(flood_domain_port->dev);
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
+ ports->dev_num = __cpu_to_le32(port->dev_id);
+ ports->port_num = __cpu_to_le32(port->hw_id);
+ }
+
+ ports->vid = __cpu_to_le16(flood_domain_port->vid);
+
+ ports++;
+ }
+
+ err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
+ &req->cmd, buf_size);
+
+ kfree(buff);
+
+ return err;
+}
+
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_ports_reset_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_create_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
+ sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 3ff12bae5909..0a929279e1ce 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -107,6 +107,11 @@ enum {
PRESTERA_STP_FORWARD,
};
+enum {
+ PRESTERA_POLICER_TYPE_INGRESS,
+ PRESTERA_POLICER_TYPE_EGRESS
+};
+
enum prestera_hw_cpu_code_cnt_t {
PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
@@ -118,9 +123,10 @@ enum prestera_hw_vtcam_direction_t {
};
enum {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
};
struct prestera_switch;
@@ -138,6 +144,9 @@ struct prestera_acl_hw_action_info;
struct prestera_acl_iface;
struct prestera_counter_stats;
struct prestera_iface;
+struct prestera_flood_domain;
+struct prestera_mdb_entry;
+struct prestera_neigh_info;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -173,8 +182,10 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val);
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -235,8 +246,9 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
-int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress);
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
/* Router API */
@@ -249,6 +261,22 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
+/* NH API */
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id);
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */);
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id);
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
@@ -282,4 +310,21 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
enum prestera_hw_cpu_code_cnt_t counter_type,
u64 *packet_count);
+/* Policer API */
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id);
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id);
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs);
+
+/* Flood domain / MDB API */
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain);
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb);
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 08fdd1e50388..24f9d6024745 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include "prestera.h"
#include "prestera_hw.h"
@@ -28,6 +29,43 @@
#define PRESTERA_MAC_ADDR_NUM_MAX 255
static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
+
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+ queue_delayed_work(prestera_wq, work, delay);
+}
+
+void prestera_queue_drain(void)
+{
+ drain_workqueue(prestera_wq);
+ drain_workqueue(prestera_owq);
+}
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn)
+{
+ return prestera_hw_port_learning_set(port, learn);
+}
+
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_uc_flood_set(port, flood);
+}
+
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_mc_flood_set(port, flood);
+}
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked)
+{
+ return prestera_hw_port_br_locked_set(port, br_locked);
+}
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
@@ -85,6 +123,14 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
return port;
}
+struct prestera_switch *prestera_switch_get(struct net_device *dev)
+{
+ struct prestera_port *port;
+
+ port = prestera_port_dev_lower_find(dev);
+ return port ? port->sw : NULL;
+}
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg)
{
@@ -113,18 +159,24 @@ static int prestera_port_open(struct net_device *dev)
struct prestera_port_mac_config cfg_mac;
int err = 0;
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- err = prestera_port_cfg_mac_read(port, &cfg_mac);
- if (!err) {
- cfg_mac.admin = true;
- err = prestera_port_cfg_mac_write(port, &cfg_mac);
- }
+ if (port->phy_link) {
+ phylink_start(port->phy_link);
} else {
- port->cfg_phy.admin = true;
- err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port,
+ &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
netif_start_queue(dev);
@@ -140,23 +192,261 @@ static int prestera_port_close(struct net_device *dev)
netif_stop_queue(dev);
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ if (port->phy_link) {
+ phylink_stop(port->phy_link);
+ phylink_disconnect_phy(port->phy_link);
err = prestera_port_cfg_mac_read(port, &cfg_mac);
if (!err) {
cfg_mac.admin = false;
prestera_port_cfg_mac_write(port, &cfg_mac);
}
} else {
- port->cfg_phy.admin = false;
- err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+ }
+
+ return err;
+}
+
+static void
+prestera_port_mac_state_cache_read(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ *state = port->state_mac;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static void
+prestera_port_mac_state_cache_write(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ port->state_mac = *state;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct prestera_port, phylink_pcs);
+}
+
+static void prestera_mac_config(struct phylink_config *config,
+ unsigned int an_mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void prestera_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(ndev);
+ struct prestera_port_mac_state state_mac;
+
+ /* Invalidate. Parameters will update on next link event. */
+ memset(&state_mac, 0, sizeof(state_mac));
+ state_mac.valid = false;
+ prestera_port_mac_state_cache_write(port, &state_mac);
+}
+
+static void prestera_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static struct phylink_pcs *
+prestera_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *dev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->phylink_pcs;
+}
+
+static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct prestera_port *port = container_of(pcs, struct prestera_port,
+ phylink_pcs);
+ struct prestera_port_mac_state smac;
+
+ prestera_port_mac_state_cache_read(port, &smac);
+
+ if (smac.valid) {
+ state->link = smac.oper ? 1 : 0;
+ /* AN is completed, when port is up */
+ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0;
+ state->speed = smac.speed;
+ state->duplex = smac.duplex;
+ } else {
+ state->link = 0;
+ state->an_complete = 0;
+ }
+}
+
+static int prestera_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct prestera_port *port = prestera_pcs_to_port(pcs);
+ struct prestera_port_mac_config cfg_mac;
+ int err;
+
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (err)
+ return err;
+
+ cfg_mac.admin = true;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ cfg_mac.speed = SPEED_10000;
+ cfg_mac.inband = 0;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cfg_mac.speed = SPEED_2500;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cfg_mac.inband = 1;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ default:
+ cfg_mac.speed = SPEED_1000;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X;
+ break;
+ }
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ /* TODO: add 1000basex AN restart support
+ * (Currently FW has no support for 1000baseX AN restart, but it will in the future,
+ * so as for now the function would stay empty.)
+ */
+}
+
+static const struct phylink_mac_ops prestera_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = prestera_mac_select_pcs,
+ .mac_config = prestera_mac_config,
+ .mac_link_down = prestera_mac_link_down,
+ .mac_link_up = prestera_mac_link_up,
+};
+
+static const struct phylink_pcs_ops prestera_pcs_ops = {
+ .pcs_get_state = prestera_pcs_get_state,
+ .pcs_config = prestera_pcs_config,
+ .pcs_an_restart = prestera_pcs_an_restart,
+};
+
+static int prestera_port_sfp_bind(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct device_node *ports, *node;
+ struct fwnode_handle *fwnode;
+ struct phylink *phy_link;
+ int err;
+
+ if (!sw->np)
+ return 0;
+
+ of_node_get(sw->np);
+ ports = of_find_node_by_name(sw->np, "ports");
+
+ for_each_child_of_node(ports, node) {
+ int num;
+
+ err = of_property_read_u32(node, "prestera,port-num", &num);
+ if (err) {
+ dev_err(sw->dev->dev,
+ "device node %pOF has no valid reg property: %d\n",
+ node, err);
+ goto out;
+ }
+
+ if (port->fp_id != num)
+ continue;
+
+ port->phylink_pcs.ops = &prestera_pcs_ops;
+
+ port->phy_config.dev = &port->dev->dev;
+ port->phy_config.type = PHYLINK_NETDEV;
+
+ fwnode = of_fwnode_handle(node);
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phy_config.supported_interfaces);
+
+ port->phy_config.mac_capabilities =
+ MAC_1000 | MAC_2500FD | MAC_10000FD;
+
+ phy_link = phylink_create(&port->phy_config, fwnode,
+ PHY_INTERFACE_MODE_INTERNAL,
+ &prestera_mac_ops);
+ if (IS_ERR(phy_link)) {
+ netdev_err(port->dev, "failed to create phylink\n");
+ err = PTR_ERR(phy_link);
+ goto out;
+ }
+
+ port->phy_link = phy_link;
+ break;
}
+out:
+ of_node_put(node);
+ of_node_put(ports);
return err;
}
+static int prestera_port_sfp_unbind(struct prestera_port *port)
+{
+ if (port->phy_link)
+ phylink_destroy(port->phy_link);
+
+ return 0;
+}
+
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -337,6 +627,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
port->id = id;
port->sw = sw;
+ spin_lock_init(&port->state_mac_lock);
+
err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
&port->fp_id);
if (err) {
@@ -351,8 +643,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
+ SET_NETDEV_DEV(dev, sw->dev->dev);
- netif_carrier_off(dev);
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
+ netif_carrier_off(dev);
dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
dev->min_mtu = sw->mtu_min;
@@ -403,7 +697,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
cfg_mac.admin = false;
cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
}
- cfg_mac.inband = false;
+ cfg_mac.inband = 0;
cfg_mac.speed = 0;
cfg_mac.duplex = DUPLEX_UNKNOWN;
cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
@@ -445,8 +739,13 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
prestera_devlink_port_set(port);
+ err = prestera_port_sfp_bind(port);
+ if (err)
+ goto err_sfp_bind;
+
return 0;
+err_sfp_bind:
err_register_netdev:
prestera_port_list_del(port);
err_port_init:
@@ -492,8 +791,10 @@ static int prestera_create_ports(struct prestera_switch *sw)
return 0;
err_port_create:
- list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list) {
+ prestera_port_sfp_unbind(port);
prestera_port_destroy(port);
+ }
return err;
}
@@ -501,25 +802,45 @@ err_port_create:
static void prestera_port_handle_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
+ struct prestera_port_mac_state smac;
+ struct prestera_port_event *pevt;
struct delayed_work *caching_dw;
struct prestera_port *port;
- port = prestera_find_port(sw, evt->port_evt.port_id);
- if (!port || !port->dev)
- return;
-
- caching_dw = &port->cached_hw_stats.caching_dw;
-
- prestera_ethtool_port_state_changed(port, &evt->port_evt);
-
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ pevt = &evt->port_evt;
+ port = prestera_find_port(sw, pevt->port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
+ }
+ prestera_port_mac_state_cache_write(port, &smac);
+
if (port->state_mac.oper) {
- netif_carrier_on(port->dev);
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, true);
+ else
+ netif_carrier_on(port->dev);
+
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
- } else if (netif_running(port->dev) &&
- netif_carrier_ok(port->dev)) {
- netif_carrier_off(port->dev);
+ } else {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, false);
+ else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
+ netif_carrier_off(port->dev);
+
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);
}
@@ -542,18 +863,20 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
struct device_node *base_mac_np;
- struct device_node *np;
- int ret;
+ int ret = 0;
- np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
- base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+ if (sw->np) {
+ base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
+ if (base_mac_np) {
+ ret = of_get_mac_address(base_mac_np, sw->base_mac);
+ of_node_put(base_mac_np);
+ }
+ }
- ret = of_get_mac_address(base_mac_np, sw->base_mac);
- if (ret) {
+ if (!is_valid_ether_addr(sw->base_mac) || ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
}
- of_node_put(base_mac_np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
@@ -578,6 +901,30 @@ static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
return NULL;
}
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id)
+{
+ struct prestera_lag *lag;
+ int free_id = -1;
+ int id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = prestera_lag_by_id(sw, id);
+ if (lag->member_count) {
+ if (lag->dev == lag_dev) {
+ *lag_id = id;
+ return 0;
+ }
+ } else if (free_id < 0) {
+ free_id = id;
+ }
+ }
+ if (free_id < 0)
+ return -ENOSPC;
+ *lag_id = free_id;
+ return 0;
+}
+
static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
struct net_device *lag_dev)
{
@@ -869,6 +1216,150 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
return notifier_from_errno(err);
}
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_flood_domain *flood_domain;
+ struct prestera_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ flood_domain = prestera_flood_domain_create(sw);
+ if (!flood_domain)
+ goto err_flood_domain_create;
+
+ mdb_entry->sw = sw;
+ mdb_entry->vid = vid;
+ mdb_entry->flood_domain = flood_domain;
+ ether_addr_copy(mdb_entry->addr, addr);
+
+ if (prestera_hw_mdb_create(mdb_entry))
+ goto err_mdb_hw_create;
+
+ return mdb_entry;
+
+err_mdb_hw_create:
+ prestera_flood_domain_destroy(flood_domain);
+err_flood_domain_create:
+ kfree(mdb_entry);
+err_mdb_alloc:
+ return NULL;
+}
+
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry)
+{
+ prestera_hw_mdb_destroy(mdb_entry);
+ prestera_flood_domain_destroy(mdb_entry->flood_domain);
+ kfree(mdb_entry);
+}
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw)
+{
+ struct prestera_flood_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->sw = sw;
+
+ if (prestera_hw_flood_domain_create(domain)) {
+ kfree(domain);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&domain->flood_domain_port_list);
+
+ return domain;
+}
+
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain)
+{
+ WARN_ON(!list_empty(&flood_domain->flood_domain_port_list));
+ WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain));
+ kfree(flood_domain);
+}
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ bool is_first_port_in_list = false;
+ int err;
+
+ flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL);
+ if (!flood_domain_port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ flood_domain_port->vid = vid;
+
+ if (list_empty(&flood_domain->flood_domain_port_list))
+ is_first_port_in_list = true;
+
+ list_add(&flood_domain_port->flood_domain_port_node,
+ &flood_domain->flood_domain_port_list);
+
+ flood_domain_port->flood_domain = flood_domain;
+ flood_domain_port->dev = dev;
+
+ if (!is_first_port_in_list) {
+ err = prestera_hw_flood_domain_ports_reset(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+ }
+
+ err = prestera_hw_flood_domain_ports_set(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+
+ return 0;
+
+err_prestera_mdb_port_create_hw:
+ list_del(&flood_domain_port->flood_domain_port_node);
+ kfree(flood_domain_port);
+err_port_alloc:
+ return err;
+}
+
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port)
+{
+ struct prestera_flood_domain *flood_domain = port->flood_domain;
+
+ list_del(&port->flood_domain_port_node);
+
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain));
+
+ if (!list_empty(&flood_domain->flood_domain_port_list))
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain));
+
+ kfree(port);
+}
+
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ list_for_each_entry(flood_domain_port,
+ &flood_domain->flood_domain_port_list,
+ flood_domain_port_node)
+ if (flood_domain_port->dev == dev &&
+ vid == flood_domain_port->vid)
+ return flood_domain_port;
+
+ return NULL;
+}
+
static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
{
sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
@@ -885,6 +1376,8 @@ static int prestera_switch_init(struct prestera_switch *sw)
{
int err;
+ sw->np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+
err = prestera_hw_switch_init(sw);
if (err) {
dev_err(prestera_dev(sw), "Failed to init Switch device\n");
@@ -982,8 +1475,10 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
+ of_node_put(sw->np);
}
int prestera_device_register(struct prestera_device *dev)
@@ -1023,12 +1518,19 @@ static int __init prestera_module_init(void)
if (!prestera_wq)
return -ENOMEM;
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq) {
+ destroy_workqueue(prestera_wq);
+ return -ENOMEM;
+ }
+
return 0;
}
static void __exit prestera_module_exit(void)
{
destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
}
module_init(prestera_module_init);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
new file mode 100644
index 000000000000..1da9c1bc1ee9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static int prestera_mall_prio_check(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ u32 flower_prio_min;
+ u32 flower_prio_max;
+ int err;
+
+ err = prestera_flower_prio_get(block, f->common.chain_index,
+ &flower_prio_min, &flower_prio_max);
+ if (err == -ENOENT)
+ /* No flower filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+
+ if (f->common.prio <= flower_prio_max && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= flower_prio_min && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max)
+{
+ if (!block->mall.bound)
+ return -ENOENT;
+
+ *prio_min = block->mall.prio_min;
+ *prio_max = block->mall.prio_max;
+ return 0;
+}
+
+static void prestera_mall_prio_update(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ block->mall.prio_min = min(block->mall.prio_min, f->common.prio);
+ block->mall.prio_max = max(block->mall.prio_max, f->common.prio);
+}
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ err = prestera_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err == -EEXIST)
+ return err;
+ if (err)
+ goto rollback;
+ }
+
+ prestera_mall_prio_update(block, f);
+
+ block->mall.bound = true;
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+ return err;
+}
+
+void prestera_mall_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
new file mode 100644
index 000000000000..fed08be80257
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_MATCHALL_H_
+#define _PRESTERA_MATCHALL_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_mall_destroy(struct prestera_flow_block *block);
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_MATCHALL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..59470d99f522 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 8a3b7b664358..4046be0e86ff 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -4,11 +4,80 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
#include <net/switchdev.h>
+#include <linux/rhashtable.h>
+#include <net/nexthop.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <net/netevent.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
+
+struct prestera_kern_neigh_cache_key {
+ struct prestera_ip_addr addr;
+ struct net_device *dev;
+};
+
+struct prestera_kern_neigh_cache {
+ struct prestera_kern_neigh_cache_key key;
+ struct rhash_head ht_node;
+ struct list_head kern_fib_cache_list;
+ /* Hold prepared nh_neigh info if is in_kernel */
+ struct prestera_neigh_info nh_neigh_info;
+ /* Indicate if neighbour is reachable by direct route */
+ bool reachable;
+ /* Lock cache if neigh is present in kernel */
+ bool in_kernel;
+};
+
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ struct prestera_nexthop_group_key nh_grp_key;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct prestera_kern_neigh_cache_head {
+ struct prestera_kern_fib_cache *this;
+ struct list_head head;
+ struct prestera_kern_neigh_cache *n_cache;
+ } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
+ union {
+ struct fib_notifier_info info; /* point to any of 4/6 */
+ struct fib_entry_notifier_info fen4_info;
+ };
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_neigh_cache, key),
+ .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_neigh_cache_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
/* This util to be used, to convert kernel rules for default vr in hw_vr */
static u32 prestera_fix_tb_id(u32 tb_id)
{
@@ -20,15 +89,1190 @@ static u32 prestera_fix_tb_id(u32 tb_id)
return tb_id;
}
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ memset(key, 0, sizeof(*key));
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ if (nhc->nhc_gw_family == AF_INET) {
+ nk->addr.v = PRESTERA_IPV4;
+ nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
+ } else {
+ nk->addr.v = PRESTERA_IPV6;
+ nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
+ }
+
+ nk->dev = nhc->nhc_dev;
+ return 0;
+}
+
+static void
+prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
+ struct prestera_nh_neigh_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ nk->addr = ck->addr;
+ nk->rif = (void *)ck->dev;
+}
+
+static bool
+prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ struct prestera_kern_neigh_cache_key tk;
+ int err;
+
+ err = prestera_util_nhc2nc_key(sw, nhc, &tk);
+ if (err)
+ return false;
+
+ if (memcmp(&tk, nk, sizeof(tk)))
+ return false;
+
+ return true;
+}
+
+static int
+prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ if (n->tbl->family == AF_INET) {
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = *(__be32 *)n->primary_key;
+ } else {
+ return -ENOENT;
+ }
+
+ key->dev = n->dev;
+
+ return 0;
+}
+
+static bool __prestera_fi_is_direct(struct fib_info *fi)
+{
+ struct fib_nh *fib_nh;
+
+ if (fib_info_num_path(fi) == 1) {
+ fib_nh = fib_info_nh(fi, 0);
+ if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
+ return true;
+ }
+
+ return false;
+}
+
+static bool prestera_fi_is_direct(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi_is_direct(fi);
+}
+
+static bool prestera_fi_is_nh(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi_is_direct(fi);
+}
+
+static bool __prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (!fi->fib6_nh->nh_common.nhc_gw_family)
+ return true;
+
+ return false;
+}
+
+static bool prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fi6_is_nh(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_direct(fen_info->fi);
+ else
+ return prestera_fi6_is_direct(fen6_info->rt);
+}
+
+static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_nh(fen_info->fi);
+ else
+ return prestera_fi6_is_nh(fen6_info->rt);
+}
+
+/* must be called with rcu_read_lock() */
+static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
+ __be32 *addr)
+{
+ struct flowi4 fl4;
+
+ /* TODO: walkthrough appropriate tables in kernel
+ * to know if the same prefix exists in several tables
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = *addr;
+ return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
+}
+
+static bool
+__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ struct net_device *dev)
+{
+ struct fib_nh *fib_nh;
+ struct fib_result res;
+ bool reachable;
+
+ reachable = false;
+
+ if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ if (prestera_fi_is_direct(res.fi)) {
+ fib_nh = fib_info_nh(res.fi, 0);
+ if (dev == fib_nh->fib_nh_dev)
+ reachable = true;
+ }
+
+ return reachable;
+}
+
+/* Check if neigh route is reachable */
+static bool
+prestera_util_kern_n_is_reachable(u32 tb_id,
+ struct prestera_ip_addr *addr,
+ struct net_device *dev)
+{
+ if (addr->v == PRESTERA_IPV4)
+ return __prestera_util_kern_n_is_reachable_v4(tb_id,
+ &addr->u.ipv4,
+ dev);
+ else
+ return false;
+}
+
+static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
+ bool offloaded)
+{
+ if (offloaded)
+ n->flags |= NTF_OFFLOADED;
+ else
+ n->flags &= ~NTF_OFFLOADED;
+}
+
+static void
+prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
+{
+ if (offloaded)
+ nhc->nhc_flags |= RTNH_F_OFFLOAD;
+ else
+ nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
+
+ if (trap)
+ nhc->nhc_flags |= RTNH_F_TRAP;
+ else
+ nhc->nhc_flags &= ~RTNH_F_TRAP;
+}
+
+static struct fib_nh_common *
+prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+ struct fib6_info *iter;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return &fib_info_nh(fen4_info->fi, n)->nh_common;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ if (!n)
+ return &fen6_info->rt->fib6_nh->nh_common;
+
+ list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
+ fib6_siblings) {
+ if (!--n)
+ return &iter->fib6_nh->nh_common;
+ }
+ }
+
+ /* if family is incorrect - than upper functions has BUG */
+ /* if doesn't find requested index - there is alsi bug, because
+ * valid index must be produced by nhs, which checks list length
+ */
+ WARN(1, "Invalid parameters passed to %s n=%d i=%p",
+ __func__, n, info);
+ return NULL;
+}
+
+static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_num_path(fen4_info->fi);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ return fen6_info->rt->fib6_nsiblings + 1;
+ }
+
+ return 0;
+}
+
+static unsigned char
+prestera_kern_fib_info_type(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fen4_info->fi->fib_type;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ /* TODO: ECMP in ipv6 is several routes.
+ * Every route has single nh.
+ */
+ return fen6_info->rt->fib6_type;
+ }
+
+ return RTN_UNSPEC;
+}
+
+/* Decided, that uc_nh route with key==nh is obviously neighbour route */
+static bool
+prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
+{
+ if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
+ return false;
+
+ if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
+ return false;
+
+ if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
+ return false;
+
+ if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
+ &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
+ return false;
+
+ return true;
+}
+
+static int prestera_dev_if_type(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan;
+
+ if (is_vlan_dev(dev) &&
+ netif_is_bridge_master(vlan_dev_real_dev(dev))) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_bridge_master(dev)) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_lag_master(dev)) {
+ return PRESTERA_IF_LAG_E;
+ } else if (netif_is_macvlan(dev)) {
+ vlan = netdev_priv(dev);
+ return prestera_dev_if_type(vlan->lowerdev);
+ } else {
+ return PRESTERA_IF_PORT_E;
+ }
+}
+
+static int
+prestera_neigh_iface_init(struct prestera_switch *sw,
+ struct prestera_iface *iface,
+ struct neighbour *n)
+{
+ struct prestera_port *port;
+
+ iface->vlan_id = 0; /* TODO: vlan egress */
+ iface->type = prestera_dev_if_type(n->dev);
+ if (iface->type != PRESTERA_IF_PORT_E)
+ return -EINVAL;
+
+ if (!prestera_netdev_check(n->dev))
+ return -EINVAL;
+
+ port = netdev_priv(n->dev);
+ iface->dev_port.hw_dev_num = port->dev_id;
+ iface->dev_port.port_num = port->hw_id;
+
+ return 0;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache =
+ rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
+ __prestera_kern_neigh_cache_ht_params);
+ return IS_ERR(n_cache) ? NULL : n_cache;
+}
+
+static void
+__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ dev_put(n_cache->key.dev);
+}
+
+static void
+__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static struct prestera_kern_neigh_cache *
+__prestera_kern_neigh_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
+ if (!n_cache)
+ goto err_kzalloc;
+
+ memcpy(&n_cache->key, key, sizeof(*key));
+ dev_hold(n_cache->key.dev);
+
+ INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
+ err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return n_cache;
+
+err_ht_insert:
+ dev_put(n_cache->key.dev);
+ kfree(n_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_get(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, key);
+ if (!n_cache)
+ n_cache = __prestera_kern_neigh_cache_create(sw, key);
+
+ return n_cache;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_put(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ if (!n_cache->in_kernel &&
+ list_empty(&n_cache->kern_fib_cache_list)) {
+ __prestera_kern_neigh_cache_destroy(sw, n_cache);
+ return NULL;
+ }
+
+ return n_cache;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return fib_cache;
+}
+
+static void
+__prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int i;
+
+ for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
+ n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
+ if (n_cache) {
+ list_del(&fib_cache->kern_neigh_cache_head[i].head);
+ prestera_kern_neigh_cache_put(sw, n_cache);
+ }
+ }
+
+ fib_info_put(fib_cache->fen4_info.fi);
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static int
+__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_neigh_cache_key nc_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ struct fib_nh_common *nhc;
+ int i, nhs, err;
+
+ if (!prestera_fib_info_is_nh(&fc->info))
+ return 0;
+
+ nhs = prestera_kern_fib_info_nhs(&fc->info);
+ if (nhs > PRESTERA_NHGR_SIZE_MAX)
+ return 0;
+
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
+ err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
+ if (err)
+ return 0;
+
+ n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
+ if (!n_cache)
+ return 0;
+
+ fc->kern_neigh_cache_head[i].this = fc;
+ fc->kern_neigh_cache_head[i].n_cache = n_cache;
+ list_add(&fc->kern_neigh_cache_head[i].head,
+ &n_cache->kern_fib_cache_list);
+ }
+
+ return 0;
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fen_info->fi);
+ memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* Handle nexthops */
+ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
+ if (err)
+ goto out; /* Not critical */
+
+out:
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fen_info->fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fibc,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded, bool trap)
+{
+ struct fib_nh_common *nhc;
+ int i, nhs;
+
+ nhs = prestera_kern_fib_info_nhs(&fibc->info);
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
+ if (!nc) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ continue;
+ }
+
+ if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ break;
+ }
+ }
+}
+
+static void
+__prestera_k_arb_n_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded)
+{
+ struct neighbour *n;
+
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ return;
+
+ prestera_util_kern_set_neigh_offload(n, offloaded);
+ neigh_release(n);
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ switch (fc->key.addr.v) {
+ case PRESTERA_IPV4:
+ fri.fi = fc->fen4_info.fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.dscp = fc->fen4_info.dscp;
+ fri.type = fc->fen4_info.type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+ return;
+ case PRESTERA_IPV6:
+ /* TODO */
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache,
+ bool enabled)
+{
+ struct prestera_nexthop_group_key nh_grp_key;
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ struct prestera_fib_node *fib_node;
+ struct prestera_fib_key fib_key;
+
+ /* Exception for fc with prefix 32: LPM entry is already used by fib */
+ memset(&fc_key, 0, sizeof(fc_key));
+ fc_key.addr = n_cache->key.addr;
+ fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ /* But better to use tb_id of route, which pointed to this neighbour. */
+ /* We take it from rif, because rif inconsistent.
+ * Must be separated in_rif and out_rif.
+ * Also note: for each fib pointed to this neigh should be separated
+ * neigh lpm entry (for each ingress vr)
+ */
+ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ memset(&fib_key, 0, sizeof(fib_key));
+ fib_key.addr = n_cache->key.addr;
+ fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
+ fib_node = prestera_fib_node_find(sw, &fib_key);
+ if (!fib_cache || !fib_cache->reachable) {
+ if (!enabled && fib_node) {
+ if (prestera_fib_node_util_is_neighbour(fib_node))
+ prestera_fib_node_destroy(sw, fib_node);
+ return;
+ }
+ }
+
+ if (enabled && !fib_node) {
+ memset(&nh_grp_key, 0, sizeof(nh_grp_key));
+ prestera_util_nc_key2nh_key(&n_cache->key,
+ &nh_grp_key.neigh[0]);
+ fib_node = prestera_fib_node_create(sw, &fib_key,
+ PRESTERA_FIB_TYPE_UC_NH,
+ &nh_grp_key);
+ if (!fib_node)
+ pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
+ &fib_key.addr.u.ipv4);
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
+ &nc->key.addr, nc->key.dev))
+ nc->reachable = true;
+ else
+ nc->reachable = false;
+}
+
+/* Kernel neighbour -> neigh_cache info */
+static void
+__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct neighbour *n;
+ int err;
+
+ memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
+ if (!n)
+ goto out;
+
+ read_lock_bh(&n->lock);
+ if (n->nud_state & NUD_VALID && !n->dead) {
+ err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
+ n);
+ if (err)
+ goto n_read_out;
+
+ memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
+ nc->nh_neigh_info.connected = true;
+ }
+n_read_out:
+ read_unlock_bh(&n->lock);
+out:
+ nc->in_kernel = nc->nh_neigh_info.connected;
+ if (n)
+ neigh_release(n);
+}
+
+/* neigh_cache info -> lpm update */
+static void
+__prestera_k_arb_nc_apply(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_kern_neigh_cache_head *nhead;
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ int err;
+
+ __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
+ __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh)
+ goto out;
+
+ /* Do hw update only if something changed to prevent nh flap */
+ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
+ sizeof(nh_neigh->info))) {
+ memcpy(&nh_neigh->info, &nc->nh_neigh_info,
+ sizeof(nh_neigh->info));
+ err = prestera_nh_neigh_set(sw, nh_neigh);
+ if (err) {
+ pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
+ "prestera_nh_neigh_set", err,
+ &nh_neigh->key.addr.u.ipv4,
+ &nh_neigh->info.ha[0]);
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
+ __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
+ nc->in_kernel,
+ !nc->in_kernel);
+ }
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct fib_nh_common *nhc;
+ int nh_cnt;
+
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (prestera_kern_fib_info_type(&fc->info)) {
+ case RTN_UNICAST:
+ if (prestera_fib_info_is_direct(&fc->info) &&
+ fc->key.prefix_len ==
+ PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
+ /* This is special case.
+ * When prefix is 32. Than we will have conflict in lpm
+ * for direct route - once TRAP added, there is no
+ * place for neighbour entry. So represent direct route
+ * with prefix 32, as NH. So neighbour will be resolved
+ * as nexthop of this route.
+ */
+ nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
+ fc->lpm_info.nh_grp_key.neigh[0].addr =
+ fc->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[0].rif =
+ nhc->nhc_dev;
+
+ break;
+ }
+
+ /* We can also get nh_grp_key from fi. This will be correct to
+ * because cache not always represent, what actually written to
+ * lpm. But we use nh cache, as well for now (for this case).
+ */
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
+ break;
+
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
+ }
+
+ fc->lpm_info.fib_type = nh_cnt ?
+ PRESTERA_FIB_TYPE_UC_NH :
+ PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type,
+ &fc->lpm_info.nh_grp_key);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ fc->reachable, false);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ struct neighbour *n;
+ bool hw_active;
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh) {
+ pr_err("Cannot find nh_neigh for cached %pI4n",
+ &nc->key.addr.u.ipv4);
+ return;
+ }
+
+ hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
+
+#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+ if (!hw_active && nc->in_kernel)
+ goto out;
+#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+ if (!hw_active)
+ goto out;
+#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+
+ if (nc->key.addr.v == PRESTERA_IPV4) {
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ } else {
+ n = NULL;
+ }
+
+ if (!IS_ERR(n) && n) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ } else {
+ pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
+ }
+
+out:
+ return;
+}
+
+/* Propagate hw state to kernel */
+static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_hw_state_upd(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+/* Propagate kernel event to hw */
+static void prestera_k_arb_n_evt(struct prestera_switch *sw,
+ struct neighbour *n)
+{
+ struct prestera_kern_neigh_cache_key n_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ err = prestera_util_neigh2nc_key(sw, n, &n_key);
+ if (err)
+ return;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
+ if (!n_cache) {
+ n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
+ if (!n_cache)
+ return;
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ }
+
+ __prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+
+ prestera_kern_neigh_cache_put(sw, n_cache);
+}
+
+static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_notifier_info *info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ /* Update all neighs to resolve overlapped and apply related */
+ __prestera_k_arb_fib_evt2nc(sw);
+
+ return 0;
+}
+
+static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_neigh_cache *n_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ if (!list_empty(&n_cache->kern_fib_cache_list)) {
+ WARN_ON(1); /* BUG */
+ return;
+ }
+ __prestera_k_arb_n_offload_set(sw, n_cache, false);
+ n_cache->in_kernel = false;
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_fib_cache *fib_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
+ false, false,
+ false);
+ __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
+ false, false);
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static void prestera_k_arb_abort(struct prestera_switch *sw)
+{
+ /* Function to remove all arbiter entries and related hw objects. */
+ /* Sequence:
+ * 1) Clear arbiter tables, but don't touch hw
+ * 2) Clear hw
+ * We use such approach, because arbiter object is not directly mapped
+ * to hw. So deletion of one arbiter object may even lead to creation of
+ * hw object (e.g. in case of overlapped routes).
+ */
+ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
+ __prestera_k_arb_abort_fib_ht_cb,
+ sw);
+ rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
+ __prestera_k_arb_abort_neigh_ht_cb,
+ sw);
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(port_dev);
- int err;
- struct prestera_rif_entry *re;
struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
u32 kern_tb_id;
+ int err;
err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
if (err) {
@@ -45,21 +1289,21 @@ static int __prestera_inetaddr_port_event(struct net_device *port_dev,
switch (event) {
case NETDEV_UP:
if (re) {
- NL_SET_ERR_MSG_MOD(extack, "rif_entry already exist");
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
return -EEXIST;
}
re = prestera_rif_entry_create(port->sw, &re_key,
prestera_fix_tb_id(kern_tb_id),
port_dev->dev_addr);
if (!re) {
- NL_SET_ERR_MSG_MOD(extack, "Can't create rif_entry");
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
return -EINVAL;
}
dev_hold(port_dev);
break;
case NETDEV_DOWN:
if (!re) {
- NL_SET_ERR_MSG_MOD(extack, "rif_entry not exist");
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
return -EEXIST;
}
prestera_rif_entry_destroy(port->sw, re);
@@ -75,11 +1319,11 @@ static int __prestera_inetaddr_event(struct prestera_switch *sw,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (prestera_netdev_check(dev) && !netif_is_bridge_port(dev) &&
- !netif_is_lag_port(dev) && !netif_is_ovs_port(dev))
- return __prestera_inetaddr_port_event(dev, event, extack);
+ if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
+ netif_is_lag_port(dev))
+ return 0;
- return 0;
+ return __prestera_inetaddr_port_event(dev, event, extack);
}
static int __prestera_inetaddr_cb(struct notifier_block *nb,
@@ -126,6 +1370,8 @@ static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
goto out;
if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
err = -EINVAL;
goto out;
}
@@ -135,10 +1381,174 @@ out:
return notifier_from_errno(err);
}
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct prestera_netevent_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct neighbour *n;
+};
+
+static void prestera_router_neigh_event_work(struct work_struct *work)
+{
+ struct prestera_netevent_work *net_work =
+ container_of(work, struct prestera_netevent_work, work);
+ struct prestera_switch *sw = net_work->sw;
+ struct neighbour *n = net_work->n;
+
+ /* neigh - its not hw related object. It stored only in kernel. So... */
+ rtnl_lock();
+
+ prestera_k_arb_n_evt(sw, n);
+
+ neigh_release(n);
+ rtnl_unlock();
+ kfree(net_work);
+}
+
+static int prestera_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_netevent_work *net_work;
+ struct prestera_router *router;
+ struct neighbour *n = ptr;
+
+ router = container_of(nb, struct prestera_router, netevent_nb);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl->family != AF_INET)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (WARN_ON(!net_work))
+ return NOTIFY_BAD;
+
+ neigh_clone(n);
+ net_work->n = n;
+ net_work->sw = router->sw;
+ INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
+ prestera_queue_work(&net_work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void prestera_router_update_neighs_work(struct work_struct *work)
+{
+ struct prestera_router *router;
+
+ router = container_of(work, struct prestera_router,
+ neighs_update.dw.work);
+ rtnl_lock();
+
+ prestera_k_arb_hw_evt(router->sw);
+
+ rtnl_unlock();
+ prestera_queue_delayed_work(&router->neighs_update.dw,
+ msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
+}
+
+static int prestera_neigh_work_init(struct prestera_switch *sw)
+{
+ INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
+ prestera_router_update_neighs_work);
+ prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
+ return 0;
+}
+
+static void prestera_neigh_work_fini(struct prestera_switch *sw)
+{
+ cancel_delayed_work_sync(&sw->router->neighs_update.dw);
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
- int err;
+ int err, nhgrp_cache_bytes;
router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
if (!router)
@@ -151,6 +1561,27 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_router_lib_init;
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
+ err = rhashtable_init(&router->kern_neigh_cache_ht,
+ &__prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_kern_neigh_cache_ht_init;
+
+ nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
+ router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
+ if (!router->nhgrp_hw_state_cache) {
+ err = -ENOMEM;
+ goto err_nh_state_cache_alloc;
+ }
+
+ err = prestera_neigh_work_init(sw);
+ if (err)
+ goto err_neigh_work_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
@@ -161,12 +1592,35 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_register_inetaddr_notifier;
+ router->netevent_nb.notifier_call = prestera_router_netevent_event;
+ err = register_netevent_notifier(&router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
return 0;
+err_register_fib_notifier:
+ unregister_netevent_notifier(&router->netevent_nb);
+err_register_netevent_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
- /* prestera_router_hw_fini */
+ prestera_neigh_work_fini(sw);
+err_neigh_work_init:
+ kfree(router->nhgrp_hw_state_cache);
+err_nh_state_cache_alloc:
+ rhashtable_destroy(&router->kern_neigh_cache_ht);
+err_kern_neigh_cache_ht_init:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
+ prestera_router_hw_fini(sw);
err_router_lib_init:
kfree(sw->router);
return err;
@@ -174,9 +1628,18 @@ err_router_lib_init:
void prestera_router_fini(struct prestera_switch *sw)
{
+ unregister_fib_notifier(&init_net, &sw->router->fib_nb);
+ unregister_netevent_notifier(&sw->router->netevent_nb);
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
- /* router_hw_fini */
+ prestera_neigh_work_fini(sw);
+ prestera_queue_drain();
+
+ prestera_k_arb_abort(sw);
+
+ kfree(sw->router->nhgrp_hw_state_cache);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
+ prestera_router_hw_fini(sw);
kfree(sw->router);
sw->router = NULL;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 5866a4be50f5..aa080dc57ff0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -8,25 +8,103 @@
#include "prestera_router_hw.h"
#include "prestera_acl.h"
-/* +--+
- * +------->|vr|
- * | +--+
- * |
- * +-+-------+
- * |rif_entry|
- * +---------+
- * Rif is
+/* Nexthop is pointed
+ * to port (not rif)
+ * +-------+
+ * +>|nexthop|
+ * | +-------+
+ * |
+ * +--+ +-----++
+ * +------->|vr|<-+ +>|nh_grp|
+ * | +--+ | | +------+
+ * | | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
* used as
* entry point
* for vr in hw
*/
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+/* Need to merge it with router_manager */
+#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_nh_neigh_ht_params = {
+ .key_offset = offsetof(struct prestera_nh_neigh, key),
+ .key_len = sizeof(struct prestera_nh_neigh_key),
+ .head_offset = offsetof(struct prestera_nh_neigh, ht_node),
+};
+
+static const struct rhashtable_params __prestera_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct prestera_nexthop_group, key),
+ .key_len = sizeof(struct prestera_nexthop_group_key),
+ .head_offset = offsetof(struct prestera_nexthop_group, ht_node),
+};
+
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg);
+
+/* TODO: move to router.h as macros */
+static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key)
+{
+ return memchr_inv(key, 0, sizeof(*key)) ? true : false;
+}
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
+ int err;
+
+ err = rhashtable_init(&sw->router->nh_neigh_ht,
+ &__prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_nh_neigh_ht_init;
+
+ err = rhashtable_init(&sw->router->nexthop_group_ht,
+ &__prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_grp_ht_init;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
return 0;
+
+err_fib_ht_init:
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+err_nexthop_grp_ht_init:
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+err_nh_neigh_ht_init:
+ return 0;
+}
+
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ rhashtable_free_and_destroy(&sw->router->fib_ht,
+ prestera_fib_node_destroy_ht_cb, sw);
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
@@ -47,13 +125,8 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
struct netlink_ext_ack *extack)
{
struct prestera_vr *vr;
- u16 hw_vr_id;
int err;
- err = prestera_hw_vr_create(sw, &hw_vr_id);
- if (err)
- return ERR_PTR(-ENOMEM);
-
vr = kzalloc(sizeof(*vr), GFP_KERNEL);
if (!vr) {
err = -ENOMEM;
@@ -61,23 +134,26 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
}
vr->tb_id = tb_id;
- vr->hw_vr_id = hw_vr_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
list_add(&vr->router_node, &sw->router->vr_list);
return vr;
-err_alloc_vr:
- prestera_hw_vr_delete(sw, hw_vr_id);
+err_hw_create:
kfree(vr);
+err_alloc_vr:
return ERR_PTR(err);
}
static void __prestera_vr_destroy(struct prestera_switch *sw,
struct prestera_vr *vr)
{
- prestera_hw_vr_delete(sw, vr->hw_vr_id);
list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
kfree(vr);
}
@@ -87,17 +163,22 @@ static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id
struct prestera_vr *vr;
vr = __prestera_vr_find(sw, tb_id);
- if (!vr)
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
vr = __prestera_vr_create(sw, tb_id, extack);
- if (IS_ERR(vr))
- return ERR_CAST(vr);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
return vr;
}
static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
{
- if (!vr->ref_cnt)
+ if (refcount_dec_and_test(&vr->refcount))
__prestera_vr_destroy(sw, vr);
}
@@ -120,7 +201,7 @@ __prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
out->iface.vlan_id = in->iface.vlan_id;
break;
default:
- pr_err("Unsupported iface type");
+ WARN(1, "Unsupported iface type");
return -EINVAL;
}
@@ -158,7 +239,6 @@ void prestera_rif_entry_destroy(struct prestera_switch *sw,
iface.vr_id = e->vr->hw_vr_id;
prestera_hw_rif_delete(sw, e->hw_id, &iface);
- e->vr->ref_cnt--;
prestera_vr_put(sw, e->vr);
kfree(e);
}
@@ -183,7 +263,6 @@ prestera_rif_entry_create(struct prestera_switch *sw,
if (IS_ERR(e->vr))
goto err_vr_get;
- e->vr->ref_cnt++;
memcpy(&e->addr, addr, sizeof(e->addr));
/* HW */
@@ -198,7 +277,6 @@ prestera_rif_entry_create(struct prestera_switch *sw,
return e;
err_hw_create:
- e->vr->ref_cnt--;
prestera_vr_put(sw, e->vr);
err_vr_get:
err_key_copy:
@@ -206,3 +284,405 @@ err_key_copy:
err_kzalloc:
return NULL;
}
+
+static void __prestera_nh_neigh_destroy(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ rhashtable_remove_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ kfree(neigh);
+}
+
+static struct prestera_nh_neigh *
+__prestera_nh_neigh_create(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+ int err;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
+ if (!neigh)
+ goto err_kzalloc;
+
+ memcpy(&neigh->key, key, sizeof(*key));
+ neigh->info.connected = false;
+ INIT_LIST_HEAD(&neigh->nexthop_group_list);
+ err = rhashtable_insert_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return neigh;
+
+err_rhashtable_insert:
+ kfree(neigh);
+err_kzalloc:
+ return NULL;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *nh_neigh;
+
+ nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
+ key, __prestera_nh_neigh_ht_params);
+ return IS_ERR(nh_neigh) ? NULL : nh_neigh;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+
+ neigh = prestera_nh_neigh_find(sw, key);
+ if (!neigh)
+ return __prestera_nh_neigh_create(sw, key);
+
+ return neigh;
+}
+
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ if (list_empty(&neigh->nexthop_group_list))
+ __prestera_nh_neigh_destroy(sw, neigh);
+}
+
+/* Updates new prestera_neigh_info */
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ struct prestera_nh_neigh_head *nh_head;
+ struct prestera_nexthop_group *nh_grp;
+ int err;
+
+ list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) {
+ nh_grp = nh_head->this;
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh)
+{
+ bool state;
+ struct prestera_nh_neigh_head *nh_head, *tmp;
+
+ state = false;
+ list_for_each_entry_safe(nh_head, tmp,
+ &nh_neigh->nexthop_group_list, head) {
+ state = prestera_nexthop_group_util_hw_state(sw, nh_head->this);
+ if (state)
+ goto out;
+ }
+
+out:
+ return state;
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_create(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt, err, gid;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ goto err_kzalloc;
+
+ memcpy(&nh_grp->key, key, sizeof(*key));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt]))
+ break;
+
+ nh_neigh = prestera_nh_neigh_get(sw,
+ &nh_grp->key.neigh[nh_cnt]);
+ if (!nh_neigh)
+ goto err_nh_neigh_get;
+
+ nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh;
+ nh_grp->nh_neigh_head[nh_cnt].this = nh_grp;
+ list_add(&nh_grp->nh_neigh_head[nh_cnt].head,
+ &nh_neigh->nexthop_group_list);
+ }
+
+ err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id);
+ if (err)
+ goto err_nh_group_create;
+
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ goto err_nexthop_group_set;
+
+ err = rhashtable_insert_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* reset cache for created group */
+ gid = nh_grp->grp_id;
+ sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8);
+
+ return nh_grp;
+
+err_ht_insert:
+err_nexthop_group_set:
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+err_nh_group_create:
+err_nh_neigh_get:
+ for (nh_cnt--; nh_cnt >= 0; nh_cnt--) {
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh);
+ }
+
+ kfree(nh_grp);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_nexthop_group_destroy(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt;
+
+ rhashtable_remove_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!nh_neigh)
+ break;
+
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_neigh);
+ }
+
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+ kfree(nh_grp);
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_find(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
+ key, __prestera_nexthop_group_ht_params);
+ return IS_ERR(nh_grp) ? NULL : nh_grp;
+}
+
+static struct prestera_nexthop_group *
+prestera_nexthop_group_get(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = __prestera_nexthop_group_find(sw, key);
+ if (nh_grp) {
+ refcount_inc(&nh_grp->refcount);
+ } else {
+ nh_grp = __prestera_nexthop_group_create(sw, key);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&nh_grp->refcount, 1);
+ }
+
+ return nh_grp;
+}
+
+static void prestera_nexthop_group_put(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ if (refcount_dec_and_test(&nh_grp->refcount))
+ __prestera_nexthop_group_destroy(sw, nh_grp);
+}
+
+/* Updates with new nh_neigh's info */
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX];
+ struct prestera_nh_neigh *neigh;
+ int nh_cnt;
+
+ memset(&info[0], 0, sizeof(info));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!neigh)
+ break;
+
+ memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info));
+ }
+
+ return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id);
+}
+
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ int err;
+ u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1;
+ u32 gid = nh_grp->grp_id;
+ u8 *cache = sw->router->nhgrp_hw_state_cache;
+
+ /* Antijitter
+ * Prevent situation, when we read state of nh_grp twice in short time,
+ * and state bit is still cleared on second call. So just stuck active
+ * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred.
+ */
+ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick +
+ msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) {
+ err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size);
+ if (err) {
+ pr_err("Failed to get hw state nh_grp's");
+ return false;
+ }
+
+ sw->router->nhgrp_hw_cache_kick = jiffies;
+ }
+
+ if (cache[gid / 8] & BIT(gid % 8))
+ return true;
+
+ return false;
+}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_fib_node *node = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_fib_node_destruct(sw, node);
+ kfree(node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ case PRESTERA_FIB_TYPE_UC_NH:
+ fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
+ nh_grp_key);
+ if (IS_ERR(fib_node->info.nh_grp))
+ goto err_nh_grp_get;
+
+ grp_id = fib_node->info.nh_grp->grp_id;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+ if (fib_type == PRESTERA_FIB_TYPE_UC_NH)
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index fed53595f7bb..9ca97919c863 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -6,7 +6,7 @@
struct prestera_vr {
struct list_head router_node;
- unsigned int ref_cnt;
+ refcount_t refcount;
u32 tb_id; /* key (kernel fib table id) */
u16 hw_vr_id; /* virtual router ID */
u8 __pad[2];
@@ -22,6 +22,103 @@ struct prestera_rif_entry {
struct list_head router_node; /* ht */
};
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \
+ /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */)
+};
+
+struct prestera_nh_neigh_key {
+ struct prestera_ip_addr addr;
+ /* Seems like rif is obsolete, because there is iface in info ?
+ * Key can contain functional fields, or fields, which is used to
+ * filter duplicate objects on logical level (before you pass it to
+ * HW)... also key can be used to cover hardware restrictions.
+ * In our case rif - is logical interface (even can be VLAN), which
+ * is used in combination with IP address (which is also not related to
+ * hardware nexthop) to provide logical compression of created nexthops.
+ * You even can imagine, that rif+IPaddr is just cookie.
+ */
+ /* struct prestera_rif *rif; */
+ /* Use just as cookie, to divide ARP domains (in order with addr) */
+ void *rif;
+};
+
+/* Used for hw call */
+struct prestera_neigh_info {
+ struct prestera_iface iface;
+ unsigned char ha[ETH_ALEN];
+ u8 connected; /* bool. indicate, if mac/oif valid */
+ u8 __pad[1];
+};
+
+/* Used to notify nh about neigh change */
+struct prestera_nh_neigh {
+ struct prestera_nh_neigh_key key;
+ struct prestera_neigh_info info;
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct list_head nexthop_group_list;
+};
+
+#define PRESTERA_NHGR_SIZE_MAX 4
+
+struct prestera_nexthop_group {
+ struct prestera_nexthop_group_key {
+ struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX];
+ } key;
+ /* Store intermediate object here.
+ * This prevent overhead kzalloc call.
+ */
+ /* nh_neigh is used only to notify nexthop_group */
+ struct prestera_nh_neigh_head {
+ struct prestera_nexthop_group *this;
+ struct list_head head;
+ /* ptr to neigh is not necessary.
+ * It used to prevent lookup of nh_neigh by key (n) on destroy
+ */
+ struct prestera_nh_neigh *neigh;
+ } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX];
+ struct rhash_head ht_node; /* node of prestera_vr */
+ refcount_t refcount;
+ u32 grp_id; /* hw */
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* must be pointer to nh_grp id */
+ PRESTERA_FIB_TYPE_UC_NH,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+ /* Valid only if type = UC_NH*/
+ struct prestera_nexthop_group *nh_grp;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
struct prestera_rif_entry *
prestera_rif_entry_find(const struct prestera_switch *sw,
const struct prestera_rif_entry_key *k);
@@ -31,6 +128,28 @@ struct prestera_rif_entry *
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key);
int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index e452cdeaf703..9277a8fd1339 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -102,7 +102,7 @@ struct prestera_sdma {
struct net_device napi_dev;
u32 map_addr;
u64 dma_mask;
- /* protect SDMA with concurrrent access from multiple CPUs */
+ /* protect SDMA with concurrent access from multiple CPUs */
spinlock_t tx_lock;
};
@@ -659,7 +659,7 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
init_dummy_netdev(&sdma->napi_dev);
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
@@ -776,6 +776,7 @@ tx_done:
int prestera_rxtx_switch_init(struct prestera_switch *sw)
{
struct prestera_rxtx *rxtx;
+ int err;
rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
if (!rxtx)
@@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw)
sw->rxtx = rxtx;
- return prestera_sdma_switch_init(sw);
+ err = prestera_sdma_switch_init(sw);
+ if (err)
+ kfree(rxtx);
+
+ return err;
}
void prestera_rxtx_switch_fini(struct prestera_switch *sw)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 845e9d8c8cc7..1005182ce3bc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -107,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
entry = prestera_span_entry_find_by_id(sw->span, span_id);
if (!entry)
- return false;
+ return -ENOENT;
if (!refcount_dec_and_test(&entry->ref_count))
return 0;
@@ -120,8 +120,9 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
return 0;
}
-static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
- struct prestera_port *to_port)
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress)
{
struct prestera_switch *sw = binding->port->sw;
u8 span_id;
@@ -135,7 +136,7 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
if (err)
return err;
- err = prestera_hw_span_bind(binding->port, span_id);
+ err = prestera_hw_span_bind(binding->port, span_id, ingress);
if (err) {
prestera_span_put(sw, span_id);
return err;
@@ -145,11 +146,15 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
return 0;
}
-static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress)
{
int err;
- err = prestera_hw_span_unbind(binding->port);
+ if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
+ return -ENOENT;
+
+ err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
@@ -161,60 +166,6 @@ static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
return 0;
}
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f)
-{
- struct prestera_flow_block_binding *binding;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- struct prestera_port *port;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- act = &f->rule->action.entries[0];
-
- if (!prestera_netdev_check(act->dev)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only Marvell Prestera port is supported");
- return -EINVAL;
- }
- if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
- return -EOPNOTSUPP;
- if (act->id != FLOW_ACTION_MIRRED)
- return -EOPNOTSUPP;
- if (protocol != htons(ETH_P_ALL))
- return -EOPNOTSUPP;
-
- port = netdev_priv(act->dev);
-
- list_for_each_entry(binding, &block->binding_list, list) {
- err = prestera_span_rule_add(binding, port);
- if (err)
- goto rollback;
- }
-
- return 0;
-
-rollback:
- list_for_each_entry_continue_reverse(binding,
- &block->binding_list, list)
- prestera_span_rule_del(binding);
- return err;
-}
-
-void prestera_span_destroy(struct prestera_flow_block *block)
-{
- struct prestera_flow_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- prestera_span_rule_del(binding);
-}
-
int prestera_span_init(struct prestera_switch *sw)
{
struct prestera_span *span;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
index f0644521f78a..493b68524bcb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -8,13 +8,17 @@
#define PRESTERA_SPAN_INVALID_ID -1
+struct prestera_port;
struct prestera_switch;
-struct prestera_flow_block;
+struct prestera_flow_block_binding;
int prestera_span_init(struct prestera_switch *sw);
void prestera_span_fini(struct prestera_switch *sw);
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f);
-void prestera_span_destroy(struct prestera_flow_block *block);
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress);
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress);
#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index b4599fe4ca8d..e548cd32582e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -39,7 +39,10 @@ struct prestera_bridge {
struct net_device *dev;
struct prestera_switchdev *swdev;
struct list_head port_list;
+ struct list_head br_mdb_entry_list;
+ bool mrouter_exist;
bool vlan_enabled;
+ bool multicast_enabled;
u16 bridge_id;
};
@@ -48,8 +51,10 @@ struct prestera_bridge_port {
struct net_device *dev;
struct prestera_bridge *bridge;
struct list_head vlan_list;
+ struct list_head br_mdb_port_list;
refcount_t ref_count;
unsigned long flags;
+ bool mrouter;
u8 stp_state;
};
@@ -67,6 +72,20 @@ struct prestera_port_vlan {
u16 vid;
};
+struct prestera_br_mdb_port {
+ struct prestera_bridge_port *br_port;
+ struct list_head br_mdb_port_node;
+};
+
+/* Software representation of MDB table. */
+struct prestera_br_mdb_entry {
+ struct prestera_bridge *bridge;
+ struct prestera_mdb_entry *mdb;
+ struct list_head br_mdb_port_list;
+ struct list_head br_mdb_entry_node;
+ bool enabled;
+};
+
static struct workqueue_struct *swdev_wq;
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
@@ -74,6 +93,88 @@ static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
u8 state);
+static struct prestera_bridge *
+prestera_bridge_find(const struct prestera_switch *sw,
+ const struct net_device *br_dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
+ if (bridge->dev == br_dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_find(const struct prestera_bridge *bridge,
+ const struct net_device *brport_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head)
+ if (br_port->dev == brport_dev)
+ return br_port;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_find(struct prestera_switch *sw,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_find(sw, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_find(bridge, brport_dev);
+}
+
+static void
+prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ prestera_port_uc_flood_set(port, false);
+ prestera_port_mc_flood_set(port, false);
+ prestera_port_learning_set(port, false);
+ prestera_port_br_locked_set(port, false);
+}
+
+static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_br_locked_set(port,
+ br_port->flags & BR_PORT_LOCKED);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ prestera_br_port_flags_reset(br_port, port);
+ return err;
+}
+
static struct prestera_bridge_vlan *
prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
{
@@ -220,6 +321,70 @@ static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
}
static void
+prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_flood_domain *fl_domain = mdb->flood_domain;
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ flood_domain_port = prestera_flood_domain_port_find(fl_domain,
+ orig_dev,
+ mdb->vid);
+ if (flood_domain_port)
+ prestera_flood_domain_port_destroy(flood_domain_port);
+}
+
+static void
+prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
+{
+ struct prestera_bridge_port *br_port;
+
+ if (list_empty(&br_mdb->br_mdb_port_list)) {
+ list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
+ prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
+
+ prestera_mdb_entry_destroy(br_mdb->mdb);
+ list_del(&br_mdb->br_mdb_entry_node);
+ kfree(br_mdb);
+ }
+}
+
+static void
+prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp;
+
+ list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ if (br_mdb_port->br_port == br_port) {
+ list_del(&br_mdb_port->br_mdb_port_node);
+ kfree(br_mdb_port);
+ }
+ }
+}
+
+static void
+prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
+ struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
+ struct prestera_bridge *br_dev = br_port->bridge;
+
+ list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ list_for_each_entry_safe(br_mdb_port, tmp_port,
+ &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ prestera_mdb_port_del(br_mdb->mdb,
+ br_mdb_port->br_port->dev);
+ prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
+ }
+ prestera_br_mdb_entry_put(br_mdb);
+ }
+}
+
+static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
@@ -244,6 +409,8 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
else
prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_mdb_flush_bridge_port(br_port);
+
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
prestera_bridge_port_put(br_port);
@@ -295,8 +462,10 @@ prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
bridge->vlan_enabled = vlan_enabled;
bridge->swdev = swdev;
bridge->dev = dev;
+ bridge->multicast_enabled = br_multicast_enabled(dev);
INIT_LIST_HEAD(&bridge->port_list);
+ INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
list_add(&bridge->head, &swdev->bridge_list);
@@ -314,6 +483,7 @@ static void prestera_bridge_destroy(struct prestera_bridge *bridge)
else
prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+ WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
WARN_ON(!list_empty(&bridge->port_list));
kfree(bridge);
}
@@ -405,6 +575,7 @@ prestera_bridge_port_create(struct prestera_bridge *bridge,
INIT_LIST_HEAD(&br_port->vlan_list);
list_add(&br_port->head, &bridge->port_list);
+ INIT_LIST_HEAD(&br_port->br_mdb_port_list);
return br_port;
}
@@ -414,6 +585,7 @@ prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
{
list_del(&br_port->head);
WARN_ON(!list_empty(&br_port->vlan_list));
+ WARN_ON(!list_empty(&br_port->br_mdb_port_list));
kfree(br_port);
}
@@ -461,19 +633,13 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
- if (err)
- goto err_port_flood_set;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
return 0;
-err_port_learning_set:
-err_port_flood_set:
+err_flags2port_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
@@ -592,8 +758,9 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
- prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
+ prestera_mdb_flush_bridge_port(br_port);
+
+ prestera_br_port_flags_reset(br_port, port);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
@@ -603,26 +770,14 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
- int err;
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
- if (err)
- return err;
-
- if (flags.mask & BR_LEARNING) {
- err = prestera_hw_port_learning_set(port,
- flags.val & BR_LEARNING);
- if (err)
- return err;
- }
-
- memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
-
- return 0;
+ br_port->flags &= ~flags.mask;
+ br_port->flags |= flags.val & flags.mask;
+ return prestera_br_port_flags_set(br_port, port);
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
@@ -716,6 +871,290 @@ err_port_stp_set:
return err;
}
+static int
+prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
+ bool enabled)
+{
+ struct prestera_port *pr_port;
+ struct prestera_switch *sw;
+ u16 lag_id;
+ int err;
+
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+ if (!pr_port)
+ return 0;
+
+ sw = pr_port->sw;
+ err = prestera_lag_id(sw, br_port->dev, &lag_id);
+ if (err)
+ return err;
+
+ list_for_each_entry(pr_port, &sw->port_list, list) {
+ if (pr_port->lag->lag_id == lag_id) {
+ err = prestera_port_mc_flood_set(pr_port, enabled);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_port *port;
+ bool enabled;
+ int err;
+
+ /* if mrouter exists:
+ * - make sure every mrouter receives unreg mcast traffic;
+ * if mrouter doesn't exists:
+ * - make sure every port receives unreg mcast traffic;
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ if (br_dev->multicast_enabled && br_dev->mrouter_exist)
+ enabled = br_port->mrouter;
+ else
+ enabled = br_port->flags & BR_MCAST_FLOOD;
+
+ if (netif_is_lag_master(br_port->dev)) {
+ err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
+ enabled);
+ if (err)
+ return err;
+ continue;
+ }
+
+ port = prestera_port_dev_lower_find(br_port->dev);
+ if (!port)
+ continue;
+
+ err = prestera_port_mc_flood_set(port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_br_mdb_port *tmp_port;
+
+ list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (tmp_port->br_port->dev == orig_dev)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev,
+ const unsigned char addr[ETH_ALEN], u16 vid)
+{
+ struct prestera_flood_domain *flood_domain = mdb->flood_domain;
+ int err;
+
+ if (!prestera_flood_domain_port_find(flood_domain,
+ orig_dev, vid)) {
+ err = prestera_flood_domain_port_create(flood_domain, orig_dev,
+ vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
+static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+ struct prestera_bridge_port *br_port;
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_mdb_entry *mdb;
+ struct prestera_port *pr_port;
+ int err = 0;
+
+ if (!br_dev->multicast_enabled)
+ return 0;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ mdb = br_mdb->mdb;
+ /* Make sure every port that explicitly been added to the mdb
+ * joins the specified group.
+ */
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ br_port = br_mdb_port->br_port;
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Match only mdb and br_mdb ports that belong to the
+ * same broadcast domain.
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ /* If port is not in MDB or there's no Mrouter
+ * clear HW mdb.
+ */
+ if (prestera_br_mdb_port_is_member(br_mdb,
+ br_mdb_port->br_port->dev) &&
+ br_dev->mrouter_exist)
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ else
+ prestera_mdb_port_del(mdb, br_port->dev);
+
+ if (err)
+ return err;
+ }
+
+ /* Make sure that every mrouter port joins every MC group int
+ * broadcast domain. If it's not an mrouter - it should leave
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Make sure mrouter woudln't receive traffci from
+ * another broadcast domain (e.g. from a vlan, which
+ * mrouter port is not a member of).
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ if (br_port->mrouter) {
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ if (err)
+ return err;
+ } else if (!br_port->mrouter &&
+ !prestera_br_mdb_port_is_member
+ (br_mdb, br_port->dev)) {
+ prestera_mdb_port_del(mdb, br_port->dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
+{
+ int err;
+
+ if (enable != br_mdb->enabled) {
+ if (enable)
+ err = prestera_hw_mdb_create(br_mdb->mdb);
+ else
+ err = prestera_hw_mdb_destroy(br_mdb->mdb);
+
+ if (err)
+ return err;
+
+ br_mdb->enabled = enable;
+ }
+
+ return 0;
+}
+
+static int
+prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ int err;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ err = prestera_mdb_enable_set(br_mdb, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *br_dev;
+
+ br_dev = prestera_bridge_find(sw, orig_dev);
+ if (!br_dev)
+ return 0;
+
+ br_dev->multicast_enabled = !mc_disabled;
+
+ /* There's no point in enabling mdb back if router is missing. */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static bool
+prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &br_dev->port_list, head)
+ if (br_port->mrouter)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_port_attr_mrouter_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+
+ br_port = prestera_bridge_port_find(port->sw, orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+ br_port->mrouter = is_port_mrouter;
+
+ br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
+
+ /* Enable MDB processing if both mrouter exists and mc is enabled.
+ * In case if MC enabled, but there is no mrouter, device would flood
+ * all multicast traffic (even if MDB table is not empty) with the use
+ * of bridge's flood capabilities (without the use of flood_domain).
+ */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -730,7 +1169,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags.mask &
- ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
@@ -745,6 +1184,14 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -918,14 +1365,9 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- return err;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
if (err)
@@ -950,8 +1392,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
err_bridge_vlan_get:
prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
err_port_vid_stp_set:
- prestera_hw_port_learning_set(port, false);
-err_port_learning_set:
+ prestera_br_port_flags_reset(br_port, port);
+err_flags2port_set:
return err;
}
@@ -1048,20 +1490,162 @@ static int prestera_port_vlans_add(struct prestera_port *port,
flag_pvid, extack);
}
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_create(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb_entry;
+ struct prestera_mdb_entry *mdb_entry;
+
+ br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
+ if (!br_mdb_entry)
+ return NULL;
+
+ mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ br_mdb_entry->mdb = mdb_entry;
+ br_mdb_entry->bridge = br_dev;
+ br_mdb_entry->enabled = true;
+ INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
+
+ list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
+
+ return br_mdb_entry;
+
+err_mdb_alloc:
+ kfree(br_mdb_entry);
+ return NULL;
+}
+
+static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (br_mdb_port->br_port == br_port)
+ return 0;
+
+ br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
+ if (!br_mdb_port)
+ return -ENOMEM;
+
+ br_mdb_port->br_port = br_port;
+ list_add(&br_mdb_port->br_mdb_port_node,
+ &br_mdb->br_mdb_port_list);
+
+ return 0;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node)
+ if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
+ vid == br_mdb->mdb->vid)
+ return br_mdb;
+
+ return NULL;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_get(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
+ if (br_mdb)
+ return br_mdb;
+
+ return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
+}
+
+static int
+prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ struct prestera_switch *sw;
+ struct prestera_port *port;
+ int err;
+
+ sw = prestera_switch_get(mdb->obj.orig_dev);
+ port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
+
+ br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ if (mdb->vid)
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ br_dev->bridge_id);
+
+ if (!br_mdb)
+ return -ENOMEM;
+
+ /* Make sure newly allocated MDB entry gets disabled if either MC is
+ * disabled, or the mrouter does not exist.
+ */
+ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ err = prestera_br_mdb_port_add(br_mdb, br_port);
+ if (err) {
+ prestera_br_mdb_entry_put(br_mdb);
+ return err;
+ }
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return prestera_port_vlans_add(port, vlan, extack);
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_add(mdb);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ fallthrough;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_port_vlans_del(struct prestera_port *port,
@@ -1086,17 +1670,71 @@ static int prestera_port_vlans_del(struct prestera_port *port,
return 0;
}
+static int
+prestera_mdb_port_addr_obj_del(struct prestera_port *port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ int err;
+
+ /* Bridge port no longer exists - and so does this MDB entry */
+ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ /* Removing MDB with non-existing VLAN - not supported; */
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (br_port->bridge->vlan_enabled)
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ br_port->bridge->bridge_id);
+
+ if (!br_mdb)
+ return 0;
+
+ /* Since there might be a situation that this port was the last in the
+ * MDB group, we have to both remove this port from software and HW MDB,
+ * sync MDB table, and then destroy software MDB (if needed).
+ */
+ prestera_br_mdb_port_del(br_mdb, br_port);
+
+ prestera_br_mdb_entry_put(br_mdb);
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_del(port, mdb);
+ break;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_switchdev_blk_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 52bef50f5a0d..cf456d62677f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1354,10 +1354,10 @@ static void pxa168_eth_netpoll(struct net_device *dev)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
@@ -1486,7 +1486,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
/* Hardware supports only 3 ports */
BUG_ON(pep->port_num > 2);
- netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+ netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll,
+ pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index cf03c67fbf40..1b43704baceb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -50,7 +50,6 @@
#define PHY_RETRIES 1000
#define ETH_JUMBO_MTU 9000
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define BLINK_MS 250
#define LINK_HZ HZ
@@ -395,9 +394,9 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(skge->hw->pdev),
sizeof(info->bus_info));
}
@@ -3833,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_HIGHDMA;
skge = netdev_priv(dev);
- netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &skge->napi, skge_poll);
skge->netdev = dev;
skge->hw = hw;
skge->msg_enable = netif_msg_init(debug, default_msg);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ea16b1dd6a98..ab33ba1c3023 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -63,7 +63,6 @@
#define TX_DEF_PENDING 63
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
@@ -1864,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
@@ -3688,9 +3687,9 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sky2->hw->pdev),
sizeof(info->bus_info));
}
@@ -4712,7 +4711,7 @@ static irqreturn_t sky2_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
@@ -4938,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &hw->napi, sky2_poll);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 86d356b4388d..97374fb3ee79 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,12 +7,18 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
+config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
select PINCTRL
select PHYLINK
select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 79d4cdbbcbf5..45ba0970504a 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,4 +5,9 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b67b4323cff0..7cd381530aa4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -20,9 +21,11 @@
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
#include <linux/jhash.h>
+#include <linux/bitfield.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -31,6 +34,112 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
+static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x1800,
+ .rx_ptr = 0x1900,
+ .rx_cnt_cfg = 0x1904,
+ .qcrx_ptr = 0x1908,
+ .glo_cfg = 0x1a04,
+ .rst_idx = 0x1a08,
+ .delay_irq = 0x1a0c,
+ .fc_th = 0x1a10,
+ .int_grp = 0x1a20,
+ .hred = 0x1a44,
+ .ctx_ptr = 0x1b00,
+ .dtx_ptr = 0x1b04,
+ .crx_ptr = 0x1b10,
+ .drx_ptr = 0x1b14,
+ .fq_head = 0x1b20,
+ .fq_tail = 0x1b24,
+ .fq_count = 0x1b28,
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+ .tx_irq_mask = 0x0a28,
+ .tx_irq_status = 0x0a20,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6100,
+ .rx_cnt_cfg = 0x6104,
+ .pcrx_ptr = 0x6108,
+ .glo_cfg = 0x6204,
+ .rst_idx = 0x6208,
+ .delay_irq = 0x620c,
+ .irq_status = 0x6220,
+ .irq_mask = 0x6228,
+ .int_grp = 0x6250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -48,13 +157,20 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll",
+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -260,14 +376,33 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ unsigned int sid;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface)) {
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ return mtk_sgmii_select_pcs(eth->sgmii, sid);
+ }
+
+ return NULL;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
+ u32 i;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -324,6 +459,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
+ /* FIXME: this is incorrect. Not only does it
+ * use state->speed (which is not guaranteed
+ * to be correct) but it also makes use of it
+ * in a code path that will only be reachable
+ * when the PHY interface mode changes, not
+ * when the speed changes. Consequently, RGMII
+ * is probably broken.
+ */
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
@@ -380,38 +523,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
SYSCFG0_SGMII_MASK,
~(u32)SYSCFG0_SGMII_MASK);
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac->id;
-
- /* Setup SGMIISYS with the determined property */
- if (state->interface != PHY_INTERFACE_MODE_SGMII)
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
- state);
- else if (phylink_autoneg_inband(mode))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
-
- if (err)
- goto init_err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
+ /* Save the syscfg0 value for mac_finish */
+ mac->syscfg0 = val;
} else if (phylink_autoneg_inband(mode)) {
dev_err(eth->dev,
"In-band mode not supported in non SGMII mode!\n");
return;
}
- /* Setup gmac */
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
-
- /* Only update control register when needed! */
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
-
return;
err_phy:
@@ -424,6 +543,33 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ /* Enable SGMII */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface))
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, mac->syscfg0);
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static void mtk_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -456,14 +602,6 @@ static void mtk_mac_pcs_get_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_TX;
}
-static void mtk_mac_an_restart(struct phylink_config *config)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
-
- mtk_sgmii_restart_an(mac->hw, mac->id);
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -482,8 +620,9 @@ static void mtk_mac_link_up(struct phylink_config *config,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ u32 mcr;
+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
MAC_MCR_FORCE_RX_FC);
@@ -515,9 +654,10 @@ static void mtk_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = mtk_mac_select_pcs,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
- .mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
+ .mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
};
@@ -573,8 +713,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -584,8 +724,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -595,8 +735,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -606,8 +746,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -658,39 +798,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset;
u64 stats;
- hw_stats->rx_bytes += mtk_r32(mac->hw,
- MTK_GDM1_RX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
if (stats)
hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow +=
- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip +=
- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
u64_stats_update_end(&hw_stats->syncp);
@@ -764,8 +904,8 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ struct mtk_rx_dma_v2 *dma_rxd)
{
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE))
@@ -774,67 +914,89 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * soc->txrx.txd_size,
&eth->phy_scratch_ring,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
- GFP_KERNEL);
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring +
- (sizeof(struct mtk_tx_dma) * (cnt - 1));
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 =
- (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+ txd->txd2 = eth->phy_scratch_ring +
+ (i + 1) * soc->txrx.txd_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0;
}
-static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
- void *ret = ring->dma;
-
- return ret + (desc - ring->phys);
+ return ring->dma + (desc - ring->phys);
}
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ void *txd, u32 txd_size)
{
- int idx = txd - ring->dma;
+ int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx];
}
@@ -842,54 +1004,66 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
- return ring->dma_pdma - ring->dma + dma;
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
}
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{
- return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+ return (dma - ring->dma) / txd_size;
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
- else
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -906,7 +1080,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -915,18 +1089,108 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *desc = txd;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM;
+ /* vlan header offload */
+ if (info->vlan)
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
+ }
+ WRITE_ONCE(desc->txd4, data);
+}
+
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_v2 *desc = txd;
+ struct mtk_eth *eth = mac->hw;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ if (!info->qid && mac->id)
+ info->qid = MTK_QDMA_GMAC2_QID;
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
+ WRITE_ONCE(desc->txd4, data);
+
+ data = 0;
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO_V2;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM_V2;
+ }
+ WRITE_ONCE(desc->txd5, data);
+
+ data = 0;
+ if (info->first && info->vlan)
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
+ WRITE_ONCE(desc->txd6, data);
+
+ WRITE_ONCE(desc->txd7, 0);
+ WRITE_ONCE(desc->txd8, 0);
+}
+
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
+ else
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = skb_headlen(skb),
+ .gso = gso,
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
+ .vlan = skb_vlan_tag_present(skb),
+ .qid = skb->mark & MTK_QDMA_TX_MASK,
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ .last = !skb_is_nonlinear(skb),
+ };
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0, fport;
int k = 0;
itxd = ring->next_free;
@@ -934,52 +1198,35 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- /* set the forward port */
- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
- txd4 |= fport;
-
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf));
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(eth->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
return -ENOMEM;
- WRITE_ONCE(itxd->txd1, mapped_addr);
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
/* TX SG offload */
txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd);
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
bool new_desc = true;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
@@ -991,47 +1238,42 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
new_desc = false;
}
-
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+ soc->txrx.dma_max_len);
+ txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
+ offset, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma;
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, fport);
-
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
- frag_map_size, k++);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
+ txd_info.size, k++);
- frag_size -= frag_map_size;
- offset += frag_map_size;
+ frag_size -= txd_info.size;
+ offset += txd_info.size;
}
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1049,13 +1291,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
- ring->dma_size);
+ int next_idx;
+
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1063,13 +1307,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -1079,17 +1323,16 @@ err_dma:
return -ENOMEM;
}
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{
- int i, nfrags;
+ int i, nfrags = 1;
skb_frag_t *frag;
- nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- MTK_TX_DMA_BUF_LEN);
+ eth->soc->txrx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1141,7 +1384,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto drop;
- tx_num = mtk_cal_txd_req(skb);
+ tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
@@ -1192,9 +1435,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ struct mtk_rx_dma *rxd;
+
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
@@ -1222,42 +1468,352 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
+ id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
+ struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- unsigned int pktlen;
dma_addr_t dma_addr;
- u32 hash;
- int mac;
+ u32 hash, reason;
+ int mac = 0;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = &ring->dma[idx];
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
- if (!mtk_rx_get_desc(&trxd, rxd))
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
- (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- mac = 0;
- else
- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK) - 1;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1268,70 +1824,144 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
- dma_unmap_single(eth->dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- skb_free_frag(data);
- netdev->stats.rx_dropped++;
- goto skip_rx;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->rx_dma_l4_valid)
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+ } else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd4;
+ }
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (trxd.rxd3 & RX_DMA_VTAG_V2)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd4)),
+ RX_DMA_VID(trxd.rxd4));
+ } else if (trxd.rxd2 & RX_DMA_VTAG) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ RX_DMA_VID(trxd.rxd3));
+ }
+
+ /* If the device is attached to a dsa switch, the special
+ * tag inserted in VLAN field by hw switch can * be offloaded
+ * by RX HW VLAN offload. Clear vlan info.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
}
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- (trxd.rxd2 & RX_DMA_VTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1350,22 +1980,27 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1375,29 +2010,34 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+ eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget;
}
@@ -1406,34 +2046,38 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[0] += skb->len;
- done[0]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
- mtk_tx_unmap(eth, tx_buf, true);
-
- desc = &ring->dma[cpu];
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -1490,24 +2134,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
- mtk_r32(eth, eth->tx_int_status_reg),
- mtk_r32(eth, eth->tx_int_mask_reg));
+ mtk_r32(eth, reg_map->tx_irq_status),
+ mtk_r32(eth, reg_map->tx_irq_mask));
}
if (tx_done == budget)
return budget;
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget;
if (napi_complete_done(napi, tx_done))
@@ -1519,6 +2164,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0;
mtk_handle_status_irq(eth);
@@ -1526,40 +2172,44 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
- mtk_r32(eth, MTK_PDMA_INT_STATUS),
- mtk_r32(eth, MTK_PDMA_INT_MASK));
+ mtk_r32(eth, reg_map->pdma.irq_status),
+ mtk_r32(eth, reg_map->pdma.irq_mask));
}
if (rx_done_total == budget)
return budget;
- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->dma);
+ int i, sz = soc->txrx.txd_size;
+ struct mtk_tx_dma_v2 *txd;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
goto no_tx_mem;
@@ -1567,18 +2217,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
- ring->dma[i].txd2 = next_ptr;
- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd = ring->dma + i * sz;
+ txd->txd2 = next_ptr;
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma,
- GFP_ATOMIC);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
@@ -1590,8 +2247,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
- ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->next_free = ring->dma;
+ ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
@@ -1600,20 +2257,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
+ soc->reg_map->qdma.crx_ptr);
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
- MTK_QTX_CFG(0));
+ soc->reg_map->qdma.qtx_cfg);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
return 0;
@@ -1624,45 +2281,43 @@ no_tx_mem:
static void mtk_tx_clean(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
int i;
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
- ring->dma_pdma,
- ring->phys_pdma);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
- u32 offset = 0;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
ring = &eth->rx_ring_qdma;
- offset = 0x1000;
} else {
ring = &eth->rx_ring[ring_no];
}
@@ -1682,45 +2337,98 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->data[i])
- return -ENOMEM;
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
+ struct mtk_rx_dma_v2 *rxd;
+ dma_addr_t dma_addr;
+ void *data;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
+ rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- ring->dma[i].rxd2 = RX_DMA_LSO;
+ rxd->rxd2 = RX_DMA_LSO;
else
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+ rxd->rxd8 = 0;
+ }
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ else
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys,
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->qdma.rst_idx);
+ } else {
+ mtk_w32(eth, ring->phys,
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->pdma.rst_idx);
+ }
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0;
}
@@ -1731,27 +2439,36 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
+ struct mtk_rx_dma *rxd;
+
if (!ring->data[i])
continue;
- if (!ring->dma[i].rxd1)
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (!rxd->rxd1)
continue;
- dma_unmap_single(eth->dev,
- ring->dma[i].rxd1,
- ring->buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- ring->dma_size * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
@@ -1949,6 +2666,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
@@ -2024,9 +2744,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- reg = MTK_QDMA_GLO_CFG;
+ reg = eth->soc->reg_map->qdma.glo_cfg;
else
- reg = MTK_PDMA_GLO_CFG;
+ reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
@@ -2084,8 +2804,8 @@ static int mtk_dma_init(struct mtk_eth *eth)
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
- FC_THRES_MIN, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
}
return 0;
@@ -2093,16 +2813,16 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
- eth->scratch_ring,
- eth->phy_scratch_ring);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
@@ -2137,7 +2857,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
}
return IRQ_HANDLED;
@@ -2159,13 +2879,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+ eth->soc->txrx.rx_irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -2179,16 +2902,17 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
}
#endif
static int mtk_start_dma(struct mtk_eth *eth)
{
- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
err = mtk_dma_init(eth);
@@ -2198,21 +2922,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+ MTK_CHK_DDONE_EN;
+ else
+ val |= MTK_RX_BT_32DWORDS;
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
} else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
}
return 0;
@@ -2261,21 +2991,25 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- u32 gdm_config = MTK_GDMA_TO_PDMA;
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
- gdm_config = MTK_GDMA_TO_PPE;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -2313,6 +3047,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -2327,7 +3062,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -2335,17 +3070,59 @@ static int mtk_stop(struct net_device *dev)
cancel_work_sync(&eth->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(&eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -2390,6 +3167,7 @@ static void mtk_dim_rx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2397,7 +3175,7 @@ static void mtk_dim_rx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
@@ -2407,9 +3185,9 @@ static void mtk_dim_rx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2420,6 +3198,7 @@ static void mtk_dim_tx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2427,7 +3206,7 @@ static void mtk_dim_tx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
@@ -2437,9 +3216,9 @@ static void mtk_dim_tx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2448,6 +3227,9 @@ static void mtk_dim_tx(struct work_struct *work)
static int mtk_hw_init(struct mtk_eth *eth)
{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2460,6 +3242,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
@@ -2478,9 +3264,25 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- /* Non-MT7628 handling... */
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ val = RSTCTRL_FE | RSTCTRL_PPE;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ val |= RSTCTRL_ETH;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+ }
+
+ ethsys_reset(eth, val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+ }
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -2518,12 +3320,48 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ /* PSE should not drop port8 and port9 packets */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+ /* PSE config input queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ /* PSE config output queue threshold */
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
+ }
+
return 0;
err_disable_pm:
@@ -2580,6 +3418,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -2732,8 +3576,8 @@ static void mtk_get_drvinfo(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
@@ -2769,11 +3613,18 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -2781,13 +3632,35 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2815,6 +3688,8 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
@@ -2907,6 +3782,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -2968,14 +3845,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
/* mac config is not set */
mac->interface = PHY_INTERFACE_MODE_NA;
- mac->mode = MLO_AN_PHY;
mac->speed = SPEED_UNKNOWN;
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
- /* This driver makes use of state->speed/state->duplex in
- * mac_config
- */
+ /* This driver makes use of state->speed in mac_config */
mac->phylink_config.legacy_pre_march2020 = true;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
@@ -3040,8 +3914,38 @@ free_netdev:
return err;
}
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
static int mtk_probe(struct platform_device *pdev)
{
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -3053,24 +3957,13 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
- } else {
- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- } else {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
- }
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
@@ -3101,6 +3994,16 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
+
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
GFP_KERNEL);
@@ -3123,6 +4026,33 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ }
+
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
+ }
+ }
+
for (i = 0; i < 3; i++) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
eth->irq[i] = eth->irq[0];
@@ -3130,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev)
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- return -ENXIO;
+ err = -ENXIO;
+ goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_wed_exit;
}
eth->clks[i] = NULL;
}
@@ -3153,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_hw_init(eth);
if (err)
- return err;
+ goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
@@ -3198,10 +4132,20 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- err = mtk_ppe_init(&eth->ppe, eth->dev,
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+ eth->soc->offload_version, i);
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+ }
err = mtk_eth_offload_init(eth);
if (err)
@@ -3226,10 +4170,8 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- MTK_NAPI_WEIGHT);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
@@ -3241,6 +4183,8 @@ err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
return err;
}
@@ -3260,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev)
phylink_disconnect_phy(mac->phylink);
}
+ mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
@@ -3271,50 +4216,129 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7621_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7622_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7623_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7629_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
};
static const struct mtk_soc_data rt5350_data = {
+ .reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
const struct of_device_id of_mtk_match[] = {
@@ -3323,6 +4347,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index c9d42be314b5..b52f3b0177ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,14 +17,17 @@
#include <linux/phylink.h>
#include <linux/rhashtable.h>
#include <linux/dim.h>
+#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_DMA_SIZE 512
-#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
@@ -48,6 +51,13 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
+#define MTK_QRX_OFFSET 0x10
+
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -81,6 +91,10 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
@@ -91,7 +105,6 @@
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_TO_PDMA 0x0
-#define MTK_GDMA_TO_PPE 0x4444
#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
@@ -100,25 +113,38 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
-/* PDMA RX Base Pointer Register */
-#define MTK_PRX_BASE_PTR0 0x900
-#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Free Queue Flow Control */
+#define PSE_FQFC_CFG1 0x100
+#define PSE_FQFC_CFG2 0x104
+#define PSE_DROP_CFG 0x108
+
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
-/* PDMA RX Maximum Count Register */
-#define MTK_PRX_MAX_CNT0 0x904
-#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
-/* PDMA RX CPU Pointer Register */
-#define MTK_PRX_CRX_IDX0 0x908
-#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+/* GDM and CDM Threshold */
+#define MTK_GDM2_THRES 0x1530
+#define MTK_CDMW0_THRES 0x164c
+#define MTK_CDMW1_THRES 0x1650
+#define MTK_CDME0_THRES 0x1654
+#define MTK_CDME1_THRES 0x1658
+#define MTK_CDMM_THRES 0x165c
/* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
#define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988
@@ -126,18 +152,19 @@
#define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
-/* PDMA Global Configuration Register */
-#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_RX_DMA_LRO_EN BIT(8)
#define MTK_MULTI_EN BIT(10)
#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_LRO_SDL 0x3000
+#define MTK_RX_CFG_SDL_OFFSET 16
+
/* PDMA Reset Index Register */
-#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
@@ -151,19 +178,9 @@
#define MTK_PDMA_DELAY_PINT_MASK 0x7f
#define MTK_PDMA_DELAY_PTIME_MASK 0xff
-/* PDMA Interrupt Status Register */
-#define MTK_PDMA_INT_STATUS 0xa20
-
-/* PDMA Interrupt Mask Register */
-#define MTK_PDMA_INT_MASK 0xa28
-
/* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
-/* PDMA Interrupt grouping registers */
-#define MTK_PDMA_INT_GRP1 0xa50
-#define MTK_PDMA_INT_GRP2 0xa54
-
/* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
@@ -185,26 +202,9 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
-/* QDMA TX Queue Scheduler Registers */
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-
-/* QDMA RX Base Pointer Register */
-#define MTK_QRX_BASE_PTR0 0x1900
-
-/* QDMA RX Maximum Count Register */
-#define MTK_QRX_MAX_CNT0 0x1904
-
-/* QDMA RX CPU Pointer Register */
-#define MTK_QRX_CRX_IDX0 0x1908
-
-/* QDMA RX DMA Pointer Register */
-#define MTK_QRX_DRX_IDX0 0x190C
-
/* QDMA Global Configuration Register */
-#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
@@ -216,20 +216,19 @@
#define MTK_TX_DMA_EN BIT(0)
#define MTK_DMA_BUSY_TIMEOUT_US 1000000
-/* QDMA Reset Index Register */
-#define MTK_QDMA_RST_IDX 0x1A08
-
-/* QDMA Delay Interrupt Register */
-#define MTK_QDMA_DELAY_INT 0x1A0C
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
/* QDMA Flow Control Register */
-#define MTK_QDMA_FC_THRES 0x1A10
#define FC_THRES_DROP_MODE BIT(20)
#define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19)
@@ -243,58 +242,32 @@
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+#define MTK_RX_DONE_INT_V2 BIT(14)
+
/* QDMA Interrupt grouping registers */
-#define MTK_QDMA_INT_GRP1 0x1a20
-#define MTK_QDMA_INT_GRP2 0x1a24
#define MTK_RLS_DONE_INT BIT(0)
-/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_MASK 0x1A1C
-
-/* QDMA Interrupt Mask Register */
-#define MTK_QDMA_HRED2 0x1A44
-
-/* QDMA TX Forward CPU Pointer Register */
-#define MTK_QTX_CTX_PTR 0x1B00
-
-/* QDMA TX Forward DMA Pointer Register */
-#define MTK_QTX_DTX_PTR 0x1B04
-
-/* QDMA TX Release CPU Pointer Register */
-#define MTK_QTX_CRX_PTR 0x1B10
-
-/* QDMA TX Release DMA Pointer Register */
-#define MTK_QTX_DRX_PTR 0x1B14
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_HEAD 0x1B20
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_TAIL 0x1B24
-
-/* QDMA FQ Free Page Counter Register */
-#define MTK_QDMA_FQ_CNT 0x1B28
-
-/* QDMA FQ Free Page Buffer Length Register */
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-/* GMA1 counter / statics register */
-#define MTK_GDM1_RX_GBCNT_L 0x2400
-#define MTK_GDM1_RX_GBCNT_H 0x2404
-#define MTK_GDM1_RX_GPCNT 0x2408
-#define MTK_GDM1_RX_OERCNT 0x2410
-#define MTK_GDM1_RX_FERCNT 0x2414
-#define MTK_GDM1_RX_SERCNT 0x2418
-#define MTK_GDM1_RX_LENCNT 0x241c
-#define MTK_GDM1_RX_CERCNT 0x2420
-#define MTK_GDM1_RX_FCCNT 0x2424
-#define MTK_GDM1_TX_SKIPCNT 0x2428
-#define MTK_GDM1_TX_COLCNT 0x242c
-#define MTK_GDM1_TX_GBCNT_L 0x2430
-#define MTK_GDM1_TX_GBCNT_H 0x2434
-#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
+/* QDMA TX NUM */
+#define MTK_QDMA_TX_NUM 16
+#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+#define MTK_QDMA_GMAC2_QID 8
+
+#define MTK_TX_DMA_BUF_SHIFT 8
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -305,10 +278,9 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
-#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -318,12 +290,14 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
/* QDMA descriptor rxd4 */
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
@@ -334,10 +308,20 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
@@ -461,10 +445,26 @@
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+#define RSTCTRL_PPE1 BIT(30)
+#define RSTCTRL_ETH BIT(23)
+
+/* ethernet reset check idle register */
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
+
/* SGMII subsystem config registers */
/* Register to auto-negotiation restart */
#define SGMSYS_PCS_CONTROL_1 0x0
@@ -485,9 +485,10 @@
#define SGMSYS_SGMII_MODE 0x20
#define SGMII_IF_MODE_BIT0 BIT(0)
#define SGMII_SPEED_DUPLEX_AN BIT(1)
-#define SGMII_SPEED_10 0x0
-#define SGMII_SPEED_100 BIT(2)
-#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_SPEED_MASK GENMASK(3, 2)
+#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
+#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
+#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
#define SGMII_DUPLEX_FULL BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
@@ -538,6 +539,17 @@ struct mtk_rx_dma {
unsigned int rxd4;
} __packed __aligned(4);
+struct mtk_rx_dma_v2 {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+} __packed __aligned(4);
+
struct mtk_tx_dma {
unsigned int txd1;
unsigned int txd2;
@@ -545,9 +557,30 @@ struct mtk_tx_dma {
unsigned int txd4;
} __packed __aligned(4);
+struct mtk_tx_dma_v2 {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+} __packed __aligned(4);
+
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -571,6 +604,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
@@ -612,6 +647,10 @@ enum mtk_clks_map {
MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_NETSYS0,
+ MTK_CLK_NETSYS1,
MTK_CLK_MAX
};
@@ -642,12 +681,28 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -657,7 +712,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
@@ -677,7 +734,7 @@ struct mtk_tx_buf {
* are present
*/
struct mtk_tx_ring {
- struct mtk_tx_dma *dma;
+ void *dma;
struct mtk_tx_buf *buf;
dma_addr_t phys;
struct mtk_tx_dma *next_free;
@@ -707,7 +764,7 @@ enum mtk_rx_flags {
* @calc_idx: The current head of ring
*/
struct mtk_rx_ring {
- struct mtk_rx_dma *dma;
+ void *dma;
u8 **data;
dma_addr_t phys;
u16 frag_size;
@@ -716,6 +773,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
@@ -731,7 +791,9 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
+ MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
+ MTK_RSTCTRL_PPE1_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -763,7 +825,9 @@ enum mkt_eth_capabilities {
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -836,8 +900,65 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+
+struct mtk_tx_dma_desc_info {
+ dma_addr_t addr;
+ u32 size;
+ u16 vlan_tci;
+ u16 qid;
+ u8 gso:1;
+ u8 csum:1;
+ u8 vlan:1;
+ u8 first:1;
+ u8 last:1;
+};
+
+struct mtk_reg_map {
+ u32 tx_irq_mask;
+ u32 tx_irq_status;
+ struct {
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 pcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 irq_status; /* interrupt status */
+ u32 irq_mask; /* interrupt mask */
+ u32 int_grp;
+ } pdma;
+ struct {
+ u32 qtx_cfg; /* tx queue configuration */
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 qcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 fc_th; /* flow control */
+ u32 int_grp;
+ u32 hred; /* interrupt mask */
+ u32 ctx_ptr; /* tx acquire cpu pointer */
+ u32 dtx_ptr; /* tx acquire dma pointer */
+ u32 crx_ptr; /* tx release cpu pointer */
+ u32 drx_ptr; /* tx release dma pointer */
+ u32 fq_head; /* fq head pointer */
+ u32 fq_tail; /* fq tail pointer */
+ u32 fq_count; /* fq free page count */
+ u32 fq_blen; /* fq free page buffer length */
+ } qdma;
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+ u32 wdma_base[2];
+};
+
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
+ * @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
@@ -846,42 +967,63 @@ enum mkt_eth_capabilities {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @foe_entry_size Foe table entry size.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+ * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
*/
struct mtk_soc_data {
+ const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
u32 caps;
u32 required_clks;
bool required_pctl;
u8 offload_version;
+ u8 hash_offset;
+ u16 foe_entry_size;
netdev_features_t hw_features;
+ struct {
+ u32 txd_size;
+ u32 rxd_size;
+ u32 rx_irq_done_mask;
+ u32 rx_dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } txrx;
};
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
-#define MTK_SGMII_PHYSPEED_AN BIT(31)
-#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
-#define MTK_SGMII_PHYSPEED_1000 BIT(0)
-#define MTK_SGMII_PHYSPEED_2500 BIT(1)
-#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
-
-/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
- * characteristics
+/* struct mtk_pcs - This structure holds each sgmii regmap and associated
+ * data
* @regmap: The register map pointing at the range used to setup
* SGMII modes
- * @flags: The enum refers to which mode the sgmii wants to run on
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @pcs: Phylink PCS structure
*/
+struct mtk_pcs {
+ struct regmap *regmap;
+ u32 ana_rgc3;
+ struct phylink_pcs pcs;
+};
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @pcs Array of individual PCS structures
+ */
struct mtk_sgmii {
- struct regmap *regmap[MTK_MAX_DEVS];
- u32 flags[MTK_MAX_DEVS];
- u32 ana_rgc3;
+ struct mtk_pcs pcs[MTK_MAX_DEVS];
};
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
+ * @dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
@@ -925,6 +1067,7 @@ struct mtk_sgmii {
struct mtk_eth {
struct device *dev;
+ struct device *dma_dev;
void __iomem *base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
@@ -946,7 +1089,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi;
struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
+ void *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clks[MTK_CLK_MAX];
@@ -969,13 +1112,12 @@ struct mtk_eth {
u32 tx_bytes;
struct dim tx_dim;
- u32 tx_int_mask_reg;
- u32 tx_int_status_reg;
- u32 rx_dma_l4_valid;
int ip_align;
- struct mtk_ppe ppe;
+ struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -989,7 +1131,6 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
- unsigned int mode;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -998,23 +1139,101 @@ struct mtk_mac {
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
+ unsigned int syscfg0;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id);
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state);
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
@@ -1023,6 +1242,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c793308..2d8ca99f2467 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -6,9 +6,22 @@
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
return ppe_m32(ppe, reg, val, 0);
}
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
+}
+
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
@@ -70,19 +88,12 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
- case MTK_PPE_PKT_TYPE_BRIDGE:
- hv1 = e->bridge.src_mac_lo;
- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
- hv2 = e->bridge.src_mac_hi >> 16;
- hv2 ^= e->bridge.dest_mac_lo;
- hv3 = e->bridge.dest_mac_hi;
- break;
+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -111,16 +122,19 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash <<= 1;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
}
static inline struct mtk_foe_mac_info *
-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
@@ -129,9 +143,12 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
}
static inline u32 *
-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
@@ -139,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
return &entry->ipv4.ib2;
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac)
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac)
{
struct mtk_foe_mac_info *l2;
u32 ports_pad, val;
memset(entry, 0, sizeof(*entry));
- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
- MTK_FOE_IB1_BIND_TTL |
- MTK_FOE_IB1_BIND_CACHE;
- entry->ib1 = val;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+ entry->ib1 = val;
- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+ } else {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+ }
if (is_multicast_ether_addr(dest_mac))
- val |= MTK_FOE_IB2_MULTICAST;
+ val |= mtk_get_ib2_multicast_mask(eth);
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
@@ -167,7 +195,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
@@ -188,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
return 0;
}
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port)
{
- u32 *ib2 = mtk_foe_entry_ib2(entry);
- u32 val;
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ u32 val = *ib2;
- val = *ib2;
- val &= ~MTK_FOE_IB2_DEST_PORT;
- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+ } else {
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ }
*ib2 = val;
return 0;
}
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool egress,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
struct mtk_ipv4_tuple *t;
switch (type) {
@@ -240,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
return 0;
}
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
int i;
@@ -275,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
return 0;
}
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
l2->etype = BIT(port);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
else
l2->etype |= BIT(8);
- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
return 0;
}
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
case 0:
- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+ mtk_prep_ib1_vlan_layer(eth, 1);
l2->vlan1 = vid;
return 0;
case 1:
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
l2->vlan1 = vid;
l2->etype |= BIT(8);
} else {
l2->vlan2 = vid;
- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
}
return 0;
default:
@@ -315,79 +357,399 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
}
}
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
l2->etype = ETH_P_PPP_SES;
- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
l2->pppoe_id = sid;
return 0;
}
-static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid)
{
- return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
- FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ }
+
+ return 0;
}
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+ struct mtk_foe_entry *data)
{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ u16 now = mtk_eth_timestamp(ppe->eth);
+ u16 timestamp = ib1 & ib1_ts_mask;
+
+ if (timestamp > now)
+ return ib1_ts_mask + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
- u32 hash;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
+
+ hwe = mtk_foe_get_entry(ppe, cur->hash);
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~ib1_ts_mask;
+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry foe = {};
+ struct mtk_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+ if (entry->hash == 0xffff)
+ goto out;
- hash = mtk_ppe_hash_entry(entry);
- hwe = &ppe->foe_table[hash];
- if (!mtk_foe_entry_usable(hwe)) {
- hwe++;
- hash++;
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
- if (!mtk_foe_entry_usable(hwe))
- return -ENOSPC;
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
+ struct mtk_foe_entry *hwe;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+ timestamp);
+ } else {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+ timestamp);
}
- memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
wmb();
hwe->ib1 = entry->ib1;
dma_wmb();
mtk_ppe_cache_clear(ppe);
+}
- return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+ GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&foe, hwe, soc->foe_entry_size);
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
+}
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ tag += 4;
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version)
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version, int index)
{
- struct mtk_foe_entry *foe;
+ const struct mtk_soc_data *soc = eth->soc;
+ struct device *dev = eth->dev;
+ struct mtk_ppe *ppe;
+ u32 foe_flow_size;
+ void *foe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
+ ppe->eth = eth;
ppe->dev = dev;
ppe->version = version;
- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+ foe = dmam_alloc_coherent(ppe->dev,
+ MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return -ENOMEM;
+ return NULL;
ppe->foe_table = foe;
- mtk_ppe_debugfs_init(ppe);
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return NULL;
- return 0;
+ mtk_ppe_debugfs_init(ppe, index);
+
+ return ppe;
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@ -395,21 +757,30 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+ memset(ppe->foe_table, 0,
+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
/* skip all entries that cross the 1024 byte boundary */
- for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
- for (k = 0; k < ARRAY_SIZE(skip); k++)
- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+ for (k = 0; k < ARRAY_SIZE(skip); k++) {
+ struct mtk_foe_entry *hwe;
+
+ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+ hwe->ib1 |= MTK_FOE_IB1_STATIC;
+ }
+ }
}
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
+ if (!ppe)
+ return;
+
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
@@ -428,6 +799,8 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -435,16 +808,21 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, true);
- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_6RD |
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
- MTK_PPE_FLOW_CFG_L2_BRIDGE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+ MTK_PPE_MD_TOAP_BYP_CRSN1 |
+ MTK_PPE_MD_TOAP_BYP_CRSN2 |
+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+ else
+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
@@ -479,7 +857,10 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- return 0;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+ }
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -487,9 +868,15 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
u32 val;
int i;
- for (i = 0; i < MTK_PPE_ENTRIES; i++)
- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_INVALID);
+ if (!ppe)
+ return 0;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+ }
mtk_ppe_cache_enable(ppe, false);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 242fb8f2ae65..0b7a67a958e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -6,8 +6,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
-
-#define MTK_ETH_PPE_BASE 0xc00
+#include <linux/rhashtable.h>
#define MTK_PPE_ENTRIES_SHIFT 3
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
@@ -33,6 +32,15 @@
#define MTK_FOE_IB1_UDP BIT(30)
#define MTK_FOE_IB1_STATIC BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
+
enum {
MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
@@ -48,19 +56,30 @@ enum {
#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
#define MTK_FOE_IB2_MULTICAST BIT(8)
-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
+
+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+
+#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
enum {
MTK_FOE_STATE_INVALID,
@@ -82,21 +101,21 @@ struct mtk_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u16 minfo;
+ u16 winfo;
};
+/* software-only entry type */
struct mtk_foe_bridge {
- u32 dest_mac_hi;
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 vlan;
- u16 src_mac_lo;
- u16 dest_mac_lo;
-
- u32 src_mac_hi;
+ struct {} key_end;
u32 ib2;
- u32 _rsv[5];
-
- u32 udf_tsid;
struct mtk_foe_mac_info l2;
};
@@ -202,7 +221,7 @@ struct mtk_foe_entry {
struct mtk_foe_ipv4_dslite dslite;
struct mtk_foe_ipv6 ipv6;
struct mtk_foe_ipv6_6rd ipv6_6rd;
- u32 data[19];
+ u32 data[23];
};
};
@@ -235,54 +254,105 @@ enum {
MTK_PPE_CPU_REASON_INVALID = 0x1f,
};
+enum {
+ MTK_FLOW_TYPE_L4,
+ MTK_FLOW_TYPE_L2,
+ MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+ union {
+ struct hlist_node list;
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+ };
+ };
+ u8 type;
+ s8 wed_index;
+ u8 ppe_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+ struct {
+ struct mtk_flow_entry *base_flow;
+ struct hlist_node list;
+ struct {} end;
+ } l2_data;
+ };
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
struct mtk_ppe {
+ struct mtk_eth *eth;
struct device *dev;
void __iomem *base;
int version;
+ char dirname[5];
- struct mtk_foe_entry *foe_table;
+ void *foe_table;
dma_addr_t foe_phys;
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+ struct hlist_head *foe_flow;
+
+ struct rhashtable l2_flows;
+
void *acct_table;
};
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- ppe->foe_table[hash].ib1 = 0;
- dma_wmb();
-}
+ u16 now, diff;
-static inline int
-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
-{
- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
+ if (!ppe)
+ return;
+
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
- return -1;
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ppe->foe_check_time[hash] = now;
+ __mtk_ppe_check_skb(ppe, skb, hash);
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac);
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool orig,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index d4b482340cb9..391b071bcff3 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
static const char * const type_str[] = {
[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
@@ -80,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
int i;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
- struct mtk_foe_entry *entry = &ppe->foe_table[i];
+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
unsigned char h_source[ETH_ALEN];
@@ -163,52 +162,28 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
}
static int
-mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, false);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
static int
-mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, true);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
-static int
-mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
- return single_open(file, mtk_ppe_debugfs_foe_show_all,
- inode->i_private);
-}
-
-static int
-mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
-{
- return single_open(file, mtk_ppe_debugfs_foe_show_bind,
- inode->i_private);
-}
-
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
-{
- static const struct file_operations fops_all = {
- .open = mtk_ppe_debugfs_foe_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
- static const struct file_operations fops_bind = {
- .open = mtk_ppe_debugfs_foe_open_bind,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
struct dentry *root;
- root = debugfs_create_dir("mtk_ppe", NULL);
- debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
- debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+ root = debugfs_create_dir(ppe->dirname, NULL);
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 7bb1f20002b5..28bbd1df3e30 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -6,10 +6,12 @@
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
struct mtk_flow_data {
struct ethhdr eth;
@@ -19,11 +21,18 @@ struct mtk_flow_data {
__be32 src_addr;
__be32 dst_addr;
} v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
};
__be16 src_port;
__be16 dst_port;
+ u16 vlan_in;
+
struct {
u16 id;
__be16 proto;
@@ -35,12 +44,6 @@ struct mtk_flow_data {
} pppoe;
};
-struct mtk_flow_entry {
- struct rhash_head node;
- unsigned long cookie;
- u16 hash;
-};
-
static const struct rhashtable_params mtk_flow_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, node),
.key_offset = offsetof(struct mtk_flow_entry, cookie),
@@ -48,19 +51,22 @@ static const struct rhashtable_params mtk_flow_ht_params = {
.automatic_shrinking = true,
};
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
+static int
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data, bool egress)
{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
+ data->v4.src_addr, data->src_port,
+ data->v4.dst_addr, data->dst_port);
}
static int
-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
- bool egress)
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data)
{
- return mtk_foe_entry_set_ipv4_tuple(foe, egress,
- data->v4.src_addr, data->src_port,
- data->v4.dst_addr, data->dst_port);
+ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
}
static void
@@ -80,6 +86,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->wdma_idx = path->mtk_wdma.wdma_idx;
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
static int
mtk_flow_mangle_ports(const struct flow_action_entry *act,
@@ -139,7 +174,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dp->cpu_dp->master;
+ *dev = dsa_port_to_master(dp);
return dp->index;
#else
@@ -149,13 +184,36 @@ mtk_flow_get_dsa_port(struct net_device **dev)
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- struct net_device *dev)
+ struct net_device *dev, const u8 *dest_mac,
+ int *wed_index)
{
+ struct mtk_wdma_info info = {};
int pse_port, dsa_port;
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+ info.bss, info.wcid);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ switch (info.wdma_idx) {
+ case 0:
+ pse_port = 8;
+ break;
+ case 1:
+ pse_port = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pse_port = 3;
+ }
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
+
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(foe, dsa_port);
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
if (dev == eth->netdev[0])
pse_port = 1;
@@ -164,7 +222,8 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
else
return -EOPNOTSUPP;
- mtk_foe_entry_set_pse_port(foe, pse_port);
+out:
+ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
return 0;
}
@@ -179,11 +238,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
+ int wed_index = -1;
u16 addr_type = 0;
- u32 timestamp;
u8 l4proto = 0;
int err = 0;
- int hash;
int i;
if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
@@ -215,9 +273,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return -EOPNOTSUPP;
}
+ switch (addr_type) {
+ case 0:
+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan_in = match.key->vlan_id;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
mtk_flow_offload_mangle_eth(act, &data.eth);
break;
@@ -249,31 +343,25 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
}
- switch (addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
- data.eth.h_source,
- data.eth.h_dest);
+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+ data.eth.h_source, data.eth.h_dest);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
- } else {
+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
@@ -285,13 +373,27 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv4_addr(&foe, &data, false);
+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv6_addr(eth, &foe, &data);
}
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
@@ -312,47 +414,56 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
if (err)
return err;
}
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ foe.bridge.vlan = data.vlan_in;
+
if (data.vlan.num == 1) {
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
}
if (data.pppoe.num == 1)
- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev);
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
if (err)
return err;
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->cookie = f->cookie;
- timestamp = mtk_eth_timestamp(eth);
- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
- if (hash < 0) {
- err = hash;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
+ if (err < 0)
goto free;
- }
- entry->hash = hash;
err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (err < 0)
- goto clear_flow;
+ goto clear;
return 0;
-clear_flow:
- mtk_foe_entry_clear(&eth->ppe, hash);
+
+clear:
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
+ if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);
return err;
}
@@ -366,9 +477,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(&eth->ppe, entry->hash);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+ mtk_wed_flow_remove(entry->wed_index);
kfree(entry);
return 0;
@@ -378,7 +491,6 @@ static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
- int timestamp;
u32 idle;
entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -386,11 +498,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
- if (timestamp < 0)
- return -ETIMEDOUT;
-
- idle = mtk_eth_timestamp(eth) - timestamp;
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -442,7 +550,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe.foe_table)
+ if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -483,16 +591,16 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- if (type == TC_SETUP_FT)
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
return mtk_eth_setup_tc_block(dev, type_data);
-
- return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
}
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe.foe_table)
- return 0;
-
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 0c45ea0900f1..59596d823d8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -21,6 +21,9 @@
#define MTK_PPE_GLO_CFG_BUSY BIT(31)
#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
@@ -54,6 +57,7 @@
#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
enum {
MTK_PPE_SCAN_MODE_DISABLED,
@@ -112,6 +116,8 @@ enum {
#define MTK_PPE_DEFAULT_CPU_PORT 0x248
#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
+
#define MTK_PPE_MTU_DROP 0x308
#define MTK_PPE_VLAN_MTU0 0x30c
@@ -141,4 +147,6 @@ enum {
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+#define MTK_PPE_SBW_CTRL 0x374
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 32d83421226a..736839c84130 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -9,118 +9,151 @@
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs)
{
- struct device_node *np;
- int i;
-
- ss->ana_rgc3 = ana_rgc3;
-
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- np = of_parse_phandle(r, "mediatek,sgmiisys", i);
- if (!np)
- break;
-
- ss->regmap[i] = syscon_node_to_regmap(np);
- if (IS_ERR(ss->regmap[i]))
- return PTR_ERR(ss->regmap[i]);
- }
-
- return 0;
+ return container_of(pcs, struct mtk_pcs, pcs);
}
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+/* For SGMII interface mode */
+static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
{
unsigned int val;
- if (!ss->regmap[id])
- return -EINVAL;
-
/* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
SGMII_LINK_TIMER_DEFAULT);
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
return 0;
+
}
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state)
+/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
+ * fixed speed.
+ */
+static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
+ phy_interface_t interface)
{
unsigned int val;
- if (!ss->regmap[id])
- return -EINVAL;
-
- regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val);
val &= ~RG_PHY_SPEED_MASK;
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
val |= RG_PHY_SPEED_3_125G;
- regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+ regmap_write(mpcs->regmap, mpcs->ana_rgc3, val);
/* Disable SGMII AN */
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val &= ~SGMII_AN_ENABLE;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
-
- /* SGMII force mode setting */
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
- val &= ~SGMII_IF_MODE_MASK;
-
- switch (state->speed) {
- case SPEED_10:
- val |= SGMII_SPEED_10;
- break;
- case SPEED_100:
- val |= SGMII_SPEED_100;
- break;
- case SPEED_2500:
- case SPEED_1000:
- val |= SGMII_SPEED_1000;
- break;
- }
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
- if (state->duplex == DUPLEX_FULL)
- val |= SGMII_DUPLEX_FULL;
-
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+ /* Set the speed etc but leave the duplex unchanged */
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
+ val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK;
+ val |= SGMII_SPEED_1000;
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
/* Release PHYA power down state */
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
return 0;
}
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct mtk_sgmii *ss = eth->sgmii;
- unsigned int val, sid;
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ int err = 0;
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac_id;
+ /* Setup SGMIISYS with the determined property */
+ if (interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_pcs_setup_mode_force(mpcs, interface);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_pcs_setup_mode_an(mpcs);
- if (!ss->regmap[sid])
- return;
+ return err;
+}
+
+static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
+{
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int val;
- regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
+}
+
+static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int val;
+
+ if (!phy_interface_mode_is_8023z(interface))
+ return;
+
+ /* SGMII force duplex setting */
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_DUPLEX_FULL;
+ if (duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
+}
+
+static const struct phylink_pcs_ops mtk_pcs_ops = {
+ .pcs_config = mtk_pcs_config,
+ .pcs_an_restart = mtk_pcs_restart_an,
+ .pcs_link_up = mtk_pcs_link_up,
+};
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->pcs[i].ana_rgc3 = ana_rgc3;
+ ss->pcs[i].regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(ss->pcs[i].regmap))
+ return PTR_ERR(ss->pcs[i].regmap);
+
+ ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ }
+
+ return 0;
+}
+
+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id)
+{
+ if (!ss->pcs[id].regmap)
+ return NULL;
+
+ return &ss->pcs[id].pcs;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 89ca7960b225..7050351250b7 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
@@ -30,9 +31,9 @@
#define MTK_STAR_WAIT_TIMEOUT 300
#define MTK_STAR_MAX_FRAME_SIZE 1514
#define MTK_STAR_SKB_ALIGNMENT 16
-#define MTK_STAR_NAPI_WEIGHT 64
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
@@ -130,6 +131,11 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
@@ -150,6 +156,7 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
@@ -182,9 +189,14 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
-#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
-#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
-#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
@@ -217,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb;
};
-#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
@@ -232,6 +245,11 @@ struct mtk_star_ring {
unsigned int tail;
};
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
struct mtk_star_priv {
struct net_device *ndev;
@@ -247,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
- struct napi_struct napi;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
@@ -256,6 +275,11 @@ struct mtk_star_priv {
int speed;
int duplex;
int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
@@ -358,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags);
}
-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
- return abs(ring->head - ring->tail);
-}
+ u32 avail;
-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
-}
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) > 0;
+ return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -415,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -430,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
-{
- unsigned int val;
-
- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
-
- return val;
-}
-
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
- val = mtk_star_intr_read(priv);
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
@@ -715,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback.
- *
- * FIXME: The interrupt handling should be more fine-grained with each
- * interrupt enabled/disabled independently when needed. Unfortunatly this
- * turned out to impact the driver's stability and until we have something
- * working properly, we're disabling all interrupts during TX & RX processing
- * or when resetting the counter registers.
- */
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
- struct mtk_star_priv *priv;
- struct net_device *ndev;
-
- ndev = data;
- priv = netdev_priv(ndev);
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
- if (netif_running(ndev)) {
- mtk_star_intr_disable(priv);
- napi_schedule(&priv->napi);
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
@@ -822,32 +883,26 @@ static void mtk_star_phy_config(struct mtk_star_priv *priv)
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
- /* Only full-duplex supported for now. */
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
-
- regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
-
if (priv->pause) {
- val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
- val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
- val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
- val = 0;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
- if (priv->pause) {
- val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
- val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
- } else {
- val = 0;
- }
-
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
@@ -899,14 +954,7 @@ static void mtk_star_init_config(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
- MTK_STAR_BIT_CLK_DIV_10);
-}
-
-static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
-{
- regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
- MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
- MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+ priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
@@ -952,11 +1000,12 @@ static int mtk_star_enable(struct net_device *ndev)
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
- IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
- napi_enable(&priv->napi);
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
@@ -977,6 +1026,8 @@ static int mtk_star_enable(struct net_device *ndev)
return 0;
err_free_irq:
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
free_irq(ndev->irq, ndev);
err_free_skbs:
mtk_star_free_rx_skbs(priv);
@@ -989,7 +1040,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
@@ -1021,13 +1073,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1035,17 +1119,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb;
desc_data.len = skb->len;
-
- spin_lock_bh(&priv->lock);
-
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
- if (mtk_star_ring_full(ring))
- netif_stop_queue(ndev);
-
- spin_unlock_bh(&priv->lock);
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
@@ -1077,31 +1155,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret;
}
-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
- int ret, pkts_compl, bytes_compl;
- bool wake = false;
-
- spin_lock(&priv->lock);
-
- for (pkts_compl = 0, bytes_compl = 0;;
- pkts_compl++, bytes_compl += ret, wake = true) {
- if (!mtk_star_ring_descs_available(ring))
- break;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
- if (wake && netif_queue_stopped(ndev))
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
- spin_unlock(&priv->lock);
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
@@ -1170,7 +1257,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
static void mtk_star_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+ strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
}
/* TODO Add ethtool stats. */
@@ -1181,7 +1268,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
@@ -1189,107 +1276,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
- int ret;
+ int ret, count = 0;
- spin_lock(&priv->lock);
- ret = mtk_star_ring_pop_tail(ring, &desc_data);
- spin_unlock(&priv->lock);
- if (ret)
- return -1;
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
- curr_skb = desc_data.skb;
+ curr_skb = desc_data.skb;
- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
- /* Error packet -> drop and reuse skb. */
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- /* Prepare new skb before receiving the current one. Reuse the current
- * skb if we fail at any point.
- */
- new_skb = mtk_star_alloc_skb(ndev);
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
- if (dma_mapping_error(dev, new_dma_addr)) {
- ndev->stats.rx_dropped++;
- dev_kfree_skb(new_skb);
- new_skb = curr_skb;
- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
- goto push_new_skb;
- }
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
- /* We can't fail anymore at this point: it's safe to unmap the skb. */
- mtk_star_dma_unmap_rx(priv, &desc_data);
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
- skb_put(desc_data.skb, desc_data.len);
- desc_data.skb->ip_summed = CHECKSUM_NONE;
- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
- desc_data.skb->dev = ndev;
- netif_receive_skb(desc_data.skb);
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
- /* update dma_addr for new skb */
- desc_data.dma_addr = new_dma_addr;
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
push_new_skb:
- desc_data.len = skb_tailroom(new_skb);
- desc_data.skb = new_skb;
- spin_lock(&priv->lock);
- mtk_star_ring_push_head_rx(ring, &desc_data);
- spin_unlock(&priv->lock);
+ count++;
- return 0;
-}
-
-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
-{
- int received, ret;
-
- for (received = 0, ret = 0; received < budget && ret == 0; received++)
- ret = mtk_star_receive_packet(priv);
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
mtk_star_dma_resume_rx(priv);
- return received;
+ return count;
}
-static int mtk_star_poll(struct napi_struct *napi, int budget)
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
- unsigned int status;
- int received = 0;
-
- priv = container_of(napi, struct mtk_star_priv, napi);
-
- status = mtk_star_intr_read(priv);
- mtk_star_intr_ack_all(priv);
+ int work_done = 0;
- if (status & MTK_STAR_BIT_INT_STS_TNTC)
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
- if (status & MTK_STAR_BIT_INT_STS_FNRC)
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
-
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
}
- if (received < budget)
- napi_complete_done(napi, received);
-
- mtk_star_intr_enable(priv);
-
- return received;
+ return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1443,6 +1508,25 @@ static void mtk_star_clk_disable_unprepare(void *data)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
@@ -1461,6 +1545,7 @@ static int mtk_star_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
@@ -1511,7 +1596,8 @@ static int mtk_star_probe(struct platform_device *pdev)
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
- } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
@@ -1523,7 +1609,23 @@ static int mtk_star_probe(struct platform_device *pdev)
return -ENODEV;
}
- mtk_star_set_mode_rmii(priv);
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1551,18 +1653,95 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
+#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
static const struct of_device_id mtk_star_of_match[] = {
- { .compatible = "mediatek,mt8516-eth", },
- { .compatible = "mediatek,mt8518-eth", },
- { .compatible = "mediatek,mt8175-eth", },
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+#endif
static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
mtk_star_suspend, mtk_star_resume);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 000000000000..65e01bf4b4d2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,1160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+
+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+
+#define MTK_WED_TX_RING_SIZE 2048
+#define MTK_WED_WDMA_RING_SIZE 1024
+#define MTK_WED_MAX_GROUP_SIZE 0x100
+#define MTK_WED_VLD_GROUP_SIZE 0x40
+#define MTK_WED_PER_GROUP_PKT 128
+
+#define MTK_WED_FBUF_SIZE 128
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, 0, mask);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_RESET);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 status;
+
+ wed_w32(dev, MTK_WED_RESET, mask);
+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+ !(status & mask), 0, 1000))
+ WARN_ON_ONCE(1);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw;
+ int i;
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw)
+ return NULL;
+
+ if (!hw->wed_dev)
+ goto out;
+
+ if (hw->version == 1)
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+ }
+ /* MT7986 PCIE or AXI */
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = hw_list[i];
+ if (hw && !hw->wed_dev)
+ goto out;
+ }
+
+ return NULL;
+
+out:
+ hw->wed_dev = dev;
+ return hw;
+}
+
+static int
+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+ int i, page_idx;
+
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->buf_ring.size = ring_size;
+ dev->buf_ring.pages = page_list;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->buf_ring.desc = desc;
+ dev->buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx++] = page;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+ u32 txd_size;
+ u32 ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+ if (dev->hw->version == 1)
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ else
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG0;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
+ void **page_list = dev->buf_ring.pages;
+ int page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
+
+ if (!page)
+ break;
+
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
+ desc, dev->buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
+ ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ } else {
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ }
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
+
+ if (dev->hw->version == 1) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
+ } else {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ mtk_wed_set_512_support(dev, false);
+ }
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+ mtk_wed_dma_disable(dev);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mutex_lock(&hw_lock);
+
+ mtk_wed_stop(dev);
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ struct device_node *wlan_node;
+
+ wlan_node = dev->wlan.pci_dev->dev.of_node;
+ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+ }
+
+ if (!hw_list[!hw->index]->wed_dev &&
+ hw->eth->dma_dev != hw->eth->dev)
+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+ memset(dev, 0, sizeof(*dev));
+ module_put(THIS_MODULE);
+
+ hw->wed_dev = NULL;
+ mutex_unlock(&hw_lock);
+}
+
+#define PCIE_BASE_ADDR0 0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
+
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ }
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+ u32 mask, set;
+
+ mtk_wed_stop(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (dev->hw->version == 1) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ MTK_PCIE_BASE(dev->hw->index));
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+ MTK_WDMA_INT_STATUS) |
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+ MTK_WDMA_GLO_CFG));
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+ MTK_WDMA_RING_TX(0)) |
+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+ MTK_WDMA_RING_RX(0)));
+ }
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+ if (dev->init_done)
+ return;
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->buf_ring.size / 128));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (dev->hw->version == 1)
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ else
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
+{
+ void *head = (void *)ring->desc;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct mtk_wdma_desc *desc;
+
+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = 0;
+ }
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev)
+{
+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) &
+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
+ return true;
+
+ return false;
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev);
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+ bool busy = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+ if (!dev->tx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
+ }
+
+ if (mtk_wed_poll_busy(dev))
+ busy = mtk_wed_check_busy(dev);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+ wed_w32(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_TX |
+ MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+ }
+
+ for (i = 0; i < 100; i++) {
+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ break;
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+ MTK_WED_WPDMA_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+ }
+
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size, u32 desc_size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = desc_size;
+ ring->size = size;
+ mtk_wed_ring_reset(ring, size);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+
+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+
+ return 0;
+}
+
+static void
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+ /* wed control cr set */
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+ dev->wlan.tx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+ dev->wlan.tx_tbit[1]));
+
+ /* initail txfree interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+ dev->wdma_idx));
+ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ wed_set(dev, MTK_WED_WPDMA_CTRL,
+ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ if (!dev->tx_wdma[i].desc)
+ mtk_wed_wdma_ring_setup(dev, i, 16);
+
+ mtk_wed_hw_init(dev);
+ mtk_wed_configure_irq(dev, irq_mask);
+
+ mtk_wed_set_ext_int(dev, true);
+
+ if (dev->hw->version == 1) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ } else {
+ mtk_wed_set_512_support(dev, true);
+ }
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+ __releases(RCU)
+{
+ struct mtk_wed_hw *hw;
+ struct device *device;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+ rcu_read_unlock();
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw_lock);
+
+ hw = mtk_wed_assign(dev);
+ if (!hw) {
+ module_put(THIS_MODULE);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+ ? &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
+ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+ mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+ ret = mtk_wed_buffer_alloc(dev);
+ if (ret) {
+ mtk_wed_detach(dev);
+ goto out;
+ }
+
+ mtk_wed_hw_init_early(dev);
+ if (hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+ /*
+ * Tx ring redirection:
+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+ * registers.
+ *
+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+ * into MTK_WED_WPDMA_RING_TX(n) registers.
+ * It gets filled with packets picked up from WED TX ring and from
+ * WDMA RX.
+ */
+
+ BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
+
+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc)))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_TX_RING_SIZE);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+ int i, index = dev->hw->version == 1;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+ ring->reg_base = MTK_WED_RING_RX(index);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
+ }
+
+ return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ val &= ext_mask;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+ val = wed_r32(dev, MTK_WED_INT_STATUS);
+ val &= mask;
+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+ return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+ if (!dev->running)
+ return;
+
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+ int ret;
+
+ if (!hw || !hw->wed_dev)
+ return -ENODEV;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
+ return 0;
+ }
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+ if (!ret)
+ hw->num_flows++;
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+
+ if (!hw)
+ return;
+
+ if (--hw->num_flows)
+ return;
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+ .tx_ring_setup = mtk_wed_tx_ring_setup,
+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .start = mtk_wed_start,
+ .stop = mtk_wed_stop,
+ .reset_dma = mtk_wed_reset_dma,
+ .reg_read = wed_r32,
+ .reg_write = wed_w32,
+ .irq_get = mtk_wed_irq_get,
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+ struct mtk_wed_hw *hw;
+ struct regmap *regs;
+ int irq;
+
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ goto err_of_node_put;
+
+ get_device(&pdev->dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_put_device;
+
+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(regs))
+ goto err_put_device;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+ mutex_lock(&hw_lock);
+
+ if (WARN_ON(hw_list[index]))
+ goto unlock;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
+
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
+ hw->wdma_phy = wdma_phy;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+ if (hw->version == 1) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
+
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+
+ mutex_unlock(&hw_lock);
+
+ return;
+
+unlock:
+ mutex_unlock(&hw_lock);
+err_put_device:
+ put_device(&pdev->dev);
+err_of_node_put:
+ of_node_put(np);
+}
+
+void mtk_wed_exit(void)
+{
+ int i;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+ synchronize_rcu();
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[i];
+ if (!hw)
+ continue;
+
+ hw_list[i] = NULL;
+ debugfs_remove(hw->debugfs_dir);
+ put_device(hw->dev);
+ of_node_put(hw->node);
+ kfree(hw);
+ }
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..ae420ca01a48
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+
+struct mtk_wed_hw {
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
+ phys_addr_t wdma_phy;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+ char dirname[5];
+ int irq;
+ int index;
+};
+
+struct mtk_wdma_info {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(dev->hw->regs, reg, &val);
+
+ return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ if (!dev->txfree_ring.wpdma)
+ return 0;
+
+ return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ if (!dev->txfree_ring.wpdma)
+ return;
+
+ writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+ return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 000000000000..f420f187e837
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+ const char *name;
+ u16 offset;
+ u8 type;
+ u8 base;
+};
+
+enum {
+ DUMP_TYPE_STRING,
+ DUMP_TYPE_WED,
+ DUMP_TYPE_WDMA,
+ DUMP_TYPE_WPDMA_TX,
+ DUMP_TYPE_WPDMA_TXFREE,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+ seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ const struct reg_dump *regs, int n_regs)
+{
+ const struct reg_dump *cur;
+ u32 val;
+
+ for (cur = regs; cur < &regs[n_regs]; cur++) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ cur > regs ? "\n" : "",
+ cur->name);
+ continue;
+ case DUMP_TYPE_WED:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ }
+ print_reg_val(s, cur->name, val);
+ }
+}
+
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED TX"),
+ DUMP_WED(WED_TX_MIB(0)),
+ DUMP_WED_RING(WED_RING_TX(0)),
+
+ DUMP_WED(WED_TX_MIB(1)),
+ DUMP_WED_RING(WED_RING_TX(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WPDMA_TX_RING(0),
+ DUMP_WPDMA_TX_RING(1),
+
+ DUMP_STR("WED WDMA RX"),
+ DUMP_WED(WED_WDMA_RX_MIB(0)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+ DUMP_WED(WED_WDMA_RX_THRES(0)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+ DUMP_WED(WED_WDMA_RX_MIB(1)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+ DUMP_WED(WED_WDMA_RX_THRES(1)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+ DUMP_STR("WDMA RX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+ DUMP_STR("TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+ struct mtk_wed_hw *hw = data;
+
+ regmap_write(hw->regs, hw->debugfs_reg, val);
+
+ return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+ struct mtk_wed_hw *hw = data;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+ "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+ struct dentry *dir;
+
+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+ dir = debugfs_create_dir(hw->dirname, NULL);
+ if (!dir)
+ return;
+
+ hw->debugfs_dir = dir;
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 000000000000..a5d9d8a5bce2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 000000000000..e270fb336143
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
+struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_RESET 0x008
+#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_WED BIT(31)
+
+#define MTK_WED_CTRL 0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS 0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK 0x028
+
+#define MTK_WED_STATUS 0x060
+#define MTK_WED_STATUS_TX GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL 0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_BM_BASE 0x084
+
+#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_V2 0x0c8
+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN 0x08c
+
+#define MTK_WED_TX_BM_INTF 0x09c
+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR 0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL 0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR 0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0 0x120
+#define MTK_WED_TXP_DW1 0x124
+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL 0x130
+#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB 0x1cc
+
+#define MTK_WED_INT_STATUS 0x200
+#define MTK_WED_INT_MASK 0x204
+
+#define MTK_WED_GLO_CFG 0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_RESET_IDX 0x20c
+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG 0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+
+#define MTK_WED_WPDMA_RESET_IDX 0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_CTRL 0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
+
+#define MTK_WED_WPDMA_INT_CTRL 0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_MASK 0x524
+
+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_INTM 0x564
+#define MTK_WED_PCIE_CFG_MSIS 0x568
+#define MTK_WED_PCIE_INT_TRIGGER 0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+#define MTK_WED_PCIE_INT_CTRL 0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+
+#define MTK_WED_WPDMA_CFG_BASE 0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+#define MTK_WED_WPDMA_CFG_TX 0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
+
+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG 0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX 0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_CLR 0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WDMA_CFG_BASE 0xaa0
+#define MTK_WED_WDMA_OFFSET0 0xaa4
+#define MTK_WED_WDMA_OFFSET1 0xaa8
+
+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
+
+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+
+#define MTK_WED_RING_OFS_BASE 0x00
+#define MTK_WED_RING_OFS_COUNT 0x04
+#define MTK_WED_RING_OFS_CPU_IDX 0x08
+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG 0x204
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
+
+#define MTK_WDMA_RESET_IDX 0x208
+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WDMA_INT_STATUS 0x220
+
+#define MTK_WDMA_INT_MASK 0x228
+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
+#define MTK_WDMA_INT_GRP1 0x250
+#define MTK_WDMA_INT_GRP2 0x254
+
+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP 0x008
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e10b7b04b894..c56d2194cbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port, err;
+ int p, port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
@@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port;
+ int p, port;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
-
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index d5fc72b1a36f..1184ac5751e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -147,13 +147,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
- netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
napi_enable(&cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
napi_enable(&cq->napi);
break;
case TX_XDP:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ed5038d98ef6..7d45f1d55f79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,15 +89,15 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
+ strscpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
}
@@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
- return 0;
+ return ret;
}
i += ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c61dc7ae0c05..ca4b93a01034 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3417,6 +3417,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
+ /* supports LSOv2 packets. */
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8cfc649f226b..8f762fc170b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1067,7 +1067,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1078,7 +1078,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
}
qp->event = mlx4_en_sqp_event;
- memset(context, 0, sizeof(*context));
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, -1, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 817f4154b86d..43a4102e9c09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -42,8 +42,8 @@
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/moduleparam.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/ipv6.h>
#include "mlx4_en.h"
@@ -635,19 +635,28 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag)
+ void **pfrag,
+ int *hopbyhop)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- if (skb->encapsulation)
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
- else
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *hopbyhop = 0;
+ if (skb->encapsulation) {
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
+ } else {
+ /* Detects large IPV6 TCP packets and prepares for removal of
+ * HBH header that has been pushed by ip6_xmit(),
+ * mainly so that tcpdump can dissect them.
+ */
+ if (ipv6_has_hopopt_jumbo(skb))
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
+ *lso_header_size = skb_tcp_all_headers(skb);
+ }
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size + 4, DS_SIZE);
+ ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -874,6 +883,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
+ struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -882,6 +892,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
+ int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -891,7 +902,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr);
+ &inline_ok, &fragptr, &hopbyhop);
if (unlikely(!real_size))
goto tx_drop_count;
@@ -944,7 +955,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -1009,14 +1020,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
- /* Copy headers;
- * note that we already verified that it is linear */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ if (unlikely(hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
+ h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ }
ring->tso_packets++;
i = shinfo->gso_segs;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 9e48509ed3b2..414e390e6b48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask))
return;
- hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
- mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+ mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
- irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+ irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..fe48d20d6118 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
@@ -1779,7 +1779,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index d89a3da89e5a..59b8b3c73582 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -208,7 +208,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
@@ -222,7 +222,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
chunk->npages, DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..d3fc86cd3c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3071,7 +3071,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3094,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3111,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3336,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3345,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3999,6 +4004,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4026,6 +4032,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4035,6 +4042,7 @@ err_params_unregister:
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4064,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4148,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4172,6 +4184,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4305,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4351,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4359,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4379,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4410,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4431,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4439,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4454,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 4ba1a78c6515..26685fd0fdaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
-config MLX5_ACCEL
- bool
-
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
- select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -143,71 +139,29 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_FPGA_IPSEC
- bool "Mellanox Technologies IPsec Innova support"
- depends on MLX5_CORE
- depends on MLX5_FPGA
- help
- Build IPsec support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_IPSEC
- bool "Mellanox Technologies IPsec Connect-X support"
+config MLX5_EN_MACSEC
+ bool "Connect-X support for MACSec offload"
depends on MLX5_CORE_EN
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- select MLX5_ACCEL
+ depends on MACSEC
+ default n
help
- Build IPsec support for the Connect-X family of network cards by Mellanox
- Technologies.
- Note: If you select this option, the mlx5_core driver will include
- IPsec support for the Connect-X family.
+ Build support for MACsec cryptography-offload acceleration in the NIC.
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload acceleration"
+ bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
-config MLX5_FPGA_TLS
- bool "Mellanox Technologies TLS Innova support"
- depends on TLS_DEVICE
- depends on TLS=y || MLX5_CORE=m
- depends on MLX5_CORE_EN
- depends on MLX5_FPGA
- select MLX5_EN_TLS
- help
- Build TLS support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_TLS
+config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- select MLX5_ACCEL
- select MLX5_EN_TLS
- help
- Build TLS support for the Connect-X family of network cards by Mellanox
- Technologies.
-
-config MLX5_EN_TLS
- bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index fcfd38fa9e6c..a22c32aabf11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
+ lib/crypto.o
#
# Netdev extra
@@ -39,13 +40,14 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
- en/mapping.o
+ en/mapping.o lag/mpesw.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
lib/fs_chains.o en/tc_tun.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -53,9 +55,13 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
- en/tc/act/redirect_ingress.o
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
+
+ifneq ($(CONFIG_MLX5_TC_CT),)
+ mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
+ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+endif
-mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
@@ -63,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/debugfs.o esw/devlink_port.o esw/vporttbl.o esw/qos.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
@@ -84,17 +90,16 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
+mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+ en_accel/macsec_stats.o
+
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o en_accel/ipsec_fs.o
+ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
+ en_accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
@@ -103,9 +108,10 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
+ steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
- steering/dr_dbg.o
+ steering/dr_dbg.o lib/smfs.o
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
deleted file mode 100644
index 82b185121edb..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __MLX5E_ACCEL_H__
-#define __MLX5E_ACCEL_H__
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
-{
- __be16 *ethtype;
-
- if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
- return false;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
- return false;
- return true;
-}
-
-static inline void remove_metadata_hdr(struct sk_buff *skb)
-{
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
-
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
-}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
deleted file mode 100644
index 09f5ce97af46..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/ipsec.h"
-#include "mlx5_core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec_offload.h"
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
- int err = 0;
-
- ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
- mlx5_ipsec_offload_ops(mdev) :
- mlx5_fpga_ipsec_ops(mdev);
-
- if (!ipsec_ops || !ipsec_ops->init) {
- mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
- return;
- }
-
- err = ipsec_ops->init(mdev);
- if (err) {
- mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
- return;
- }
-
- mdev->ipsec_ops = ipsec_ops;
-}
-
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->cleanup)
- return;
-
- ipsec_ops->cleanup(mdev);
-}
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->device_caps)
- return 0;
-
- return ipsec_ops->device_caps(mdev);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_count)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_count(mdev);
-}
-
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_read)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_read(mdev, counters, count);
-}
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- __be32 saddr[4] = {}, daddr[4] = {};
-
- if (!ipsec_ops || !ipsec_ops->create_hw_context)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (!xfrm->attrs.is_ipv6) {
- saddr[3] = xfrm->attrs.saddr.a4;
- daddr[3] = xfrm->attrs.daddr.a4;
- } else {
- memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
- memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
- }
-
- return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
- xfrm->attrs.is_ipv6, sa_handle);
-}
-
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->free_hw_context)
- return;
-
- ipsec_ops->free_hw_context(context);
-}
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- struct mlx5_accel_esp_xfrm *xfrm;
-
- if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
- return ERR_PTR(-EOPNOTSUPP);
-
- xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
- if (IS_ERR(xfrm))
- return xfrm;
-
- xfrm->mdev = mdev;
- return xfrm;
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
-
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
- return;
-
- ipsec_ops->esp_destroy_xfrm(xfrm);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
-
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
- return -EOPNOTSUPP;
-
- return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
deleted file mode 100644
index fbb9c5415d53..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_IPSEC_H__
-#define __MLX5_ACCEL_IPSEC_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/accel.h>
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_ipsec_ops {
- u32 (*device_caps)(struct mlx5_core_dev *mdev);
- unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
- int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
- void* (*create_hw_context)(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle);
- void (*free_hw_context)(void *context);
- int (*init)(struct mlx5_core_dev *mdev);
- void (*cleanup)(struct mlx5_core_dev *mdev);
- struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
- int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
- void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
-};
-
-#else
-
-#define MLX5_IPSEC_DEV(mdev) false
-
-static inline void *
-mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- return NULL;
-}
-
-static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
-
-static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
-
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
deleted file mode 100644
index d6667d38e1de..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#include "mlx5_core.h"
-#include "ipsec_offload.h"
-#include "lib/mlx5.h"
-#include "en_accel/ipsec_fs.h"
-
-#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
- MLX5_ACCEL_IPSEC_CAP_LSO)
-
-struct mlx5_ipsec_sa_ctx {
- struct rhash_head hash;
- u32 enc_key_id;
- u32 ipsec_obj_id;
- /* hw ctx */
- struct mlx5_core_dev *dev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-};
-
-struct mlx5_ipsec_esp_xfrm {
- /* reference counter of SA ctx */
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
-{
- u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
-
- if (!mlx5_is_ipsec_device(mdev))
- return 0;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
- return 0;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
- MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
- caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
- caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- /* We can accommodate up to 2^24 different IPsec objects
- * because we use up to 24 bit in flow table metadata
- * to hold the IPsec Object unique handle.
- */
- WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
- return caps;
-}
-
-static int
-mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
- attrs->replay_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
- attrs->keymat_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
- attrs->keymat.aes_gcm.iv_algo);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
- attrs->keymat.aes_gcm.key_len);
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- !MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- int err = 0;
-
- err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
- if (err)
- return ERR_PTR(err);
-
- mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
- if (!mxfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&mxfrm->lock);
- memcpy(&mxfrm->accel_xfrm.attrs, attrs,
- sizeof(mxfrm->accel_xfrm.attrs));
-
- return &mxfrm->accel_xfrm;
-}
-
-static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
- accel_xfrm);
-
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- WARN_ON(mxfrm->sa_ctx);
- kfree(mxfrm);
-}
-
-struct mlx5_ipsec_obj_attrs {
- const struct aes_gcm_keymat *aes_gcm;
- u32 accel_flags;
- u32 esn_msb;
- u32 enc_key_id;
-};
-
-static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 *ipsec_id)
-{
- const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
- void *obj, *salt_p, *salt_iv_p;
- int err;
-
- obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
-
- /* salt and seq_iv */
- salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
- memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
-
- switch (aes_gcm->icv_len) {
- case 64:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B);
- break;
- case 96:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B);
- break;
- case 128:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_16B);
- break;
- default:
- return -EINVAL;
- }
- salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
- memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
- /* esn */
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- MLX5_SET(ipsec_obj, obj, esn_en, 1);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
- }
-
- MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
- return err;
-}
-
-static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *hw_handle)
-{
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
- struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- int err;
-
- /* alloc SA context */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
- mutex_lock(&mxfrm->lock);
- sa_ctx->mxfrm = mxfrm;
-
- /* key */
- err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
- aes_gcm->key_len / BITS_PER_BYTE,
- MLX5_ACCEL_OBJ_IPSEC_KEY,
- &sa_ctx->enc_key_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
- goto err_sa_ctx;
- }
-
- ipsec_attrs.aes_gcm = aes_gcm;
- ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
- ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
- ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
- err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
- &sa_ctx->ipsec_obj_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
- goto err_enc_key;
- }
-
- *hw_handle = sa_ctx->ipsec_obj_id;
- mxfrm->sa_ctx = sa_ctx;
- mutex_unlock(&mxfrm->lock);
-
- return sa_ctx;
-
-err_enc_key:
- mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
-err_sa_ctx:
- mutex_unlock(&mxfrm->lock);
- kfree(sa_ctx);
- return ERR_PTR(err);
-}
-
-static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
-{
- struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
- struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
-
- mutex_lock(&mxfrm->lock);
- mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
- mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
- kfree(sa_ctx);
- mxfrm->sa_ctx = NULL;
- mutex_unlock(&mxfrm->lock);
-}
-
-static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
- u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
- u64 modify_field_select = 0;
- u64 general_obj_types;
- void *obj;
- int err;
-
- if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
- return 0;
-
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return -EINVAL;
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err) {
- mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
- ipsec_id, err);
- return err;
- }
-
- obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
- modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
-
- /* esn */
- if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
- !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
- return -EOPNOTSUPP;
-
- obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
-
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
- return -EOPNOTSUPP;
-
- mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
-
- mutex_lock(&mxfrm->lock);
-
- if (!mxfrm->sa_ctx)
- /* Not bound xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* need to add find and replace in ipsec_rhash_sa the sa_ctx */
- /* modify device with new hw_sa */
- ipsec_attrs.accel_flags = attrs->flags;
- ipsec_attrs.esn_msb = attrs->esn;
- err = mlx5_modify_ipsec_obj(mdev,
- &ipsec_attrs,
- mxfrm->sa_ctx->ipsec_obj_id);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
-
- mutex_unlock(&mxfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
- .device_caps = mlx5_ipsec_offload_device_caps,
- .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
- .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
- .init = mlx5_ipsec_offload_init,
- .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
- .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
- .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_ipsec_offload_device_caps(mdev))
- return NULL;
-
- return &ipsec_offload_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
deleted file mode 100644
index 970c66d19c1d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_OFFLOAD_H__
-#define __MLX5_IPSEC_OFFLOAD_H__
-
-#include <linux/mlx5/driver.h>
-#include "accel/ipsec.h"
-
-#ifdef CONFIG_MLX5_IPSEC
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!MLX5_CAP_GEN(mdev, ipsec_offload))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return false;
-
- return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
- MLX5_CAP_ETH(mdev, insert_trailer);
-}
-
-#else
-static inline const struct mlx5_accel_ipsec_ops *
-mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- return false;
-}
-
-#endif /* CONFIG_MLX5_IPSEC */
-#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
deleted file mode 100644
index 6c2b86a26863..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/tls.h"
-#include "mlx5_core.h"
-#include "lib/mlx5.h"
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-#include "fpga/tls.h"
-
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid,
- direction_sx);
-}
-
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx)
-{
- mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
-}
-
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
-}
-
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_is_tls_device(mdev) ||
- mlx5_accel_is_ktls_device(mdev);
-}
-
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_device_caps(mdev);
-}
-
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_init(mdev);
-}
-
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- mlx5_fpga_tls_cleanup(mdev);
-}
-#endif
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
-{
- u32 sz_bytes;
- void *key;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- default:
- return -EINVAL;
- }
-
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
-}
-
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
-{
- mlx5_destroy_encryption_key(mdev, key_id);
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
deleted file mode 100644
index fd874f0c380a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_TLS_H__
-#define __MLX5_ACCEL_TLS_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/tls.h>
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
-
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_tx);
-}
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_rx);
-}
-
-static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_accel_is_ktls_tx(mdev) &&
- !mlx5_accel_is_ktls_rx(mdev))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
-}
-
-static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info)
-{
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (crypto_info->version == TLS_1_2_VERSION)
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
- break;
- }
-
- return false;
-}
-#else
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline int
-mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id) { return -ENOTSUPP; }
-static inline void
-mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
-
-static inline bool
-mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
-static inline bool
-mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info) { return false; }
-#endif
-
-enum {
- MLX5_ACCEL_TLS_TX = BIT(0),
- MLX5_ACCEL_TLS_RX = BIT(1),
- MLX5_ACCEL_TLS_V12 = BIT(2),
- MLX5_ACCEL_TLS_V13 = BIT(3),
- MLX5_ACCEL_TLS_LRO = BIT(4),
- MLX5_ACCEL_TLS_IPV6 = BIT(5),
- MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
- MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
-};
-
-struct mlx5_ifc_tls_flow_bits {
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 reserved_at_2[0x1e];
-};
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
-
-#else
-
-static inline int
-mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx) { return -ENOTSUPP; }
-static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_accel_is_ktls_device(mdev);
-}
-static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
-static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 291e427e9e4f..6aca004e88cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
-{
- dma_addr_t t;
-
- buf->size = size;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
-
- buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
- if (!buf->frags)
- return -ENOMEM;
-
- buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->frags->buf)
- goto err_out;
-
- buf->frags->map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
-
- return 0;
-err_out:
- kfree(buf->frags);
- return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf)
-{
- return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
- dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
- buf->frags->map);
-
- kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
if (!pgdir)
return NULL;
- pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+ pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -260,12 +213,6 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
- return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc);
-
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
u32 db_per_page = PAGE_SIZE / cache_line_size();
@@ -286,19 +233,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
- u64 addr;
- int i;
-
- for (i = 0; i < buf->npages; i++) {
- addr = buf->frags->map + (i << buf->page_shift);
-
- pas[i] = cpu_to_be64(addr);
- }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 17fe05809653..2e0d59ca62b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -131,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->alloc_lock, flags);
+ lockdep_assert_held(&cmd->alloc_lock);
set_bit(idx, &cmd->bitmask);
- spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
+ struct mlx5_cmd *cmd = ent->cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
if (!refcount_dec_and_test(&ent->refcnt))
- return;
+ goto out;
if (ent->idx >= 0) {
- struct mlx5_cmd *cmd = ent->cmd;
-
cmd_free_index(cmd, ent->idx);
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
}
cmd_free_ent(ent);
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
@@ -190,10 +190,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
return 0;
}
@@ -259,12 +259,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
- return err;
+ return -EHWPOISON;
next = next->next;
}
@@ -477,9 +477,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_STATE:
case MLX5_CMD_OP_MODIFY_VHCA_STATE:
case MLX5_CMD_OP_ALLOC_SF:
+ case MLX5_CMD_OP_SUSPEND_VHCA:
+ case MLX5_CMD_OP_RESUME_VHCA:
+ case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
+ case MLX5_CMD_OP_SAVE_VHCA_STATE:
+ case MLX5_CMD_OP_LOAD_VHCA_STATE:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
- return -EIO;
+ return -ENOLINK;
default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
return -EINVAL;
@@ -674,6 +679,11 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
MLX5_COMMAND_STR_CASE(ALLOC_SF);
MLX5_COMMAND_STR_CASE(DEALLOC_SF);
+ MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
+ MLX5_COMMAND_STR_CASE(RESUME_VHCA);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
+ MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
default: return "unknown command opcode";
}
}
@@ -760,44 +770,72 @@ struct mlx5_ifc_mbox_in_bits {
u8 reserved_at_40[0x40];
};
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
- *status = MLX5_GET(mbox_out, out, status);
- *syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
}
+EXPORT_SYMBOL(mlx5_cmd_out_err);
-static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
+ u16 opcode, op_mod;
u32 syndrome;
u8 status;
- u16 opcode;
- u16 op_mod;
u16 uid;
+ int err;
- mlx5_cmd_mbox_status(out, &status, &syndrome);
- if (!status)
- return 0;
+ syndrome = MLX5_GET(mbox_out, out, syndrome);
+ status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ err = cmd_status_to_err(status);
+
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
- mlx5_core_err_rl(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode), opcode, op_mod,
- cmd_status_str(status), status, syndrome);
+ mlx5_cmd_out_err(dev, opcode, op_mod, out);
else
mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode),
- opcode, op_mod,
- cmd_status_str(status),
- status,
- syndrome);
+ "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod, uid,
+ cmd_status_str(status), status, syndrome, err);
+}
- return cmd_status_to_err(status);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
+{
+ /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ if (err == -ENXIO) {
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u32 syndrome;
+ u8 status;
+
+ /* PCI Error, emulate command return status, for smooth reset */
+ err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, syndrome);
+ if (!err)
+ return 0;
+ }
+
+ /* driver or FW delivery error */
+ if (err != -EREMOTEIO && err)
+ return err;
+
+ /* check outbox status */
+ err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
+ if (err)
+ cmd_status_print(dev, in, out);
+
+ return err;
}
+EXPORT_SYMBOL(mlx5_cmd_check);
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
@@ -980,13 +1018,7 @@ static void cmd_work_handler(struct work_struct *work)
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
- u8 status = 0;
- u32 drv_synd;
-
- ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
- MLX5_SET(mbox_out, ent->out, status, status);
- MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
-
+ ent->ret = -ENXIO;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
return;
}
@@ -1005,6 +1037,31 @@ static void cmd_work_handler(struct work_struct *work)
}
}
+static int deliv_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_DELIVERY_STAT_OK:
+ case MLX5_DRIVER_STATUS_ABORTED:
+ return 0;
+ case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+ case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+ return -EBADR;
+ case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+ case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+ return -EFAULT; /* Bad address */
+ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+ case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+ return -ENOMSG;
+ case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
static const char *deliv_status_to_str(u8 status)
{
switch (status) {
@@ -1101,16 +1158,27 @@ out_err:
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
+ *
+ * return value in case (!callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret > 0 : Command execution couldn't be performed by firmware
+ * ret == 0: Command was executed by FW, Caller must check FW outbox status.
+ *
+ * return value in case (callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret == 0: Command will be submitted to FW for execution
+ * and the callback will be called for further status updates
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status,
+ void *context, int page_queue,
u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
struct mlx5_cmd_stats *stats;
+ u8 status = 0;
int err = 0;
s64 ds;
u16 op;
@@ -1141,12 +1209,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
cmd_work_handler(&ent->work);
} else if (!queue_work(cmd->wq, &ent->work)) {
mlx5_core_warn(dev, "failed to queue work\n");
- err = -ENOMEM;
+ err = -EALREADY;
goto out_free;
}
if (callback)
- goto out; /* mlx5_cmd_comp_handler() will put(ent) */
+ return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
if (err == -ETIMEDOUT || err == -ECANCELED)
@@ -1164,12 +1232,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
mlx5_command_str(op), ds);
- *status = ent->status;
out_free:
+ status = ent->status;
cmd_ent_put(ent);
-out:
- return err;
+ return err ? : status;
}
static ssize_t dbg_write(struct file *filp, const char __user *buf,
@@ -1486,7 +1553,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+ dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
@@ -1612,15 +1679,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
- if (!ent->ret) {
+
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->ret = -ENXIO;
+
+ if (!ent->ret) { /* Command completed by FW */
if (!cmd->checksum_disabled)
ent->ret = verify_signature(ent);
- else
- ent->ret = 0;
- if (vec & MLX5_TRIGGERED_CMD_COMP)
- ent->status = MLX5_DRIVER_STATUS_ABORTED;
- else
- ent->status = ent->lay->status_own >> 1;
+
+ ent->status = ent->lay->status_own >> 1;
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
@@ -1638,21 +1705,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
callback = ent->callback;
context = ent->context;
- err = ent->ret;
- if (!err) {
+ err = ent->ret ? : ent->status;
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
+ if (!err)
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
- err = err ? err : mlx5_cmd_check(dev,
- ent->in->first.data,
- ent->uout);
- }
-
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
- err = err ? err : ent->status;
/* final consumer is done, release ent */
cmd_ent_put(ent);
callback(err, context);
@@ -1666,7 +1730,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
@@ -1706,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
@@ -1719,31 +1788,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
-static int status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_DELIVERY_STAT_OK:
- case MLX5_DRIVER_STATUS_ABORTED:
- return 0;
- case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
- case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
- return -EBADR;
- case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
- case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
- return -EFAULT; /* Bad address */
- case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
- case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
- return -ENOMSG;
- case MLX5_CMD_DELIVERY_STAT_FW_ERR:
- return -EIO;
- default:
- return -EINVAL;
- }
-}
-
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
@@ -1787,27 +1831,23 @@ static int is_manage_pages(void *in)
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
+/* Notes:
+ * 1. Callback functions may not sleep
+ * 2. Page queue commands do not support asynchrous completion
+ */
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- struct mlx5_cmd_msg *inb;
- struct mlx5_cmd_msg *outb;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ struct mlx5_cmd_msg *inb, *outb;
int pages_queue;
gfp_t gfp;
- int err;
- u8 status = 0;
- u32 drv_synd;
- u16 opcode;
u8 token;
+ int err;
- opcode = MLX5_GET(mbox_in, in, opcode);
- if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
- err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
- MLX5_SET(mbox_out, out, status, status);
- MLX5_SET(mbox_out, out, syndrome, drv_synd);
- return err;
- }
+ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ return -ENXIO;
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1833,46 +1873,143 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token, force_polling);
+ pages_queue, token, force_polling);
+ if (callback)
+ return err;
+
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
if (err)
goto out_out;
- mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
- if (status) {
- err = status_to_err(status);
- goto out_out;
+ /* command completed by FW */
+ err = mlx5_copy_from_msg(out, outb, out_size);
+out_out:
+ mlx5_free_cmd_msg(dev, outb);
+out_in:
+ free_msg(dev, inb);
+ return err;
+}
+
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ u32 syndrome, int err)
+{
+ struct mlx5_cmd_stats *stats;
+
+ if (!err)
+ return;
+
+ stats = &dev->cmd.stats[opcode];
+ spin_lock_irq(&stats->lock);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+ if (err == -EREMOTEIO) {
+ stats->failed_mbox_status++;
+ stats->last_failed_mbox_status = status;
+ stats->last_failed_syndrome = syndrome;
}
+ spin_unlock_irq(&stats->lock);
+}
- if (!callback)
- err = mlx5_copy_from_msg(out, outb, out_size);
+/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
-out_out:
- if (!callback)
- mlx5_free_cmd_msg(dev, outb);
+ if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
+ err = -EIO;
-out_in:
- if (!callback)
- free_msg(dev, inb);
+ if (!err && status != MLX5_CMD_STAT_OK)
+ err = -EREMOTEIO;
+
+ cmd_status_log(dev, opcode, status, syndrome, err);
return err;
}
+/**
+ * mlx5_cmd_do - Executes a fw command, wait for completion.
+ * Unlike mlx5_cmd_exec, this function will not translate or intercept
+ * outbox.status and will return -EREMOTEIO when
+ * outbox.status != MLX5_CMD_STAT_OK
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return:
+ * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
+ * Caller must check FW outbox status.
+ * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
+ * < 0 : Command execution couldn't be performed by firmware or driver
+ */
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_cmd_do);
+
+/**
+ * mlx5_cmd_exec - Executes a fw command, wait for completion
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- int err;
+ int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- return err ? : mlx5_cmd_check(dev, in, out);
+ return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
+/**
+ * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
+ * Needed for driver force teardown, when command completion EQ
+ * will not be available to complete the command
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+ return mlx5_cmd_check(dev, err, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx)
{
ctx->dev = dev;
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
- init_waitqueue_head(&ctx->wait);
+ init_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
@@ -1886,19 +2023,21 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
*/
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{
- atomic_dec(&ctx->num_inflight);
- wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+ if (!atomic_dec_and_test(&ctx->num_inflight))
+ wait_for_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
- struct mlx5_async_ctx *ctx = work->ctx;
+ struct mlx5_async_ctx *ctx;
+ ctx = work->ctx;
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
@@ -1909,28 +2048,19 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
+ work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
if (ret && atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
return ret;
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
-int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size)
-{
- int err;
-
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
-
- return err ? : mlx5_cmd_check(dev, in, out);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec_polling);
-
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 5371ad0a12eb..4caa1b6f40ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
@@ -86,8 +85,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen, u32 *out, int outlen)
+/* Callers must verify outbox status in case of err */
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn_or_apu_element);
@@ -101,7 +101,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -148,6 +148,16 @@ err_cmd:
mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
+EXPORT_SYMBOL(mlx5_create_cq);
+
+/* oubox is checked and err val is normalized */
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
+
+ return mlx5_cmd_check(dev, err, in, out);
+}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10d195042ab5..3e232a65a0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
+{
+ return dev->priv.dbg.dbg_root;
+}
+EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
+
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
+ dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.qp_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
+ dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.eq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
@@ -161,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
+static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cmd *cmd;
+ char tbuf[6];
+ int weight;
+ int field;
+ int ret;
+
+ cmd = filp->private_data;
+ weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+ field = cmd->max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations slots_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = slots_read,
+};
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -168,8 +195,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
const char *namep;
int i;
- cmd = &dev->priv.cmdif_debugfs;
- *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+ cmd = &dev->priv.dbg.cmdif_debugfs;
+ *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
+
+ debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
@@ -180,23 +209,53 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
+ debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
+ debugfs_create_u64("failed_mbox_status", 0400, stats->root,
+ &stats->failed_mbox_status);
+ debugfs_create_u32("last_failed_errno", 0400, stats->root,
+ &stats->last_failed_errno);
+ debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
+ &stats->last_failed_mbox_status);
+ debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
+ &stats->last_failed_syndrome);
}
}
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
+ dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
+}
+
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+{
+ struct dentry *pages;
+
+ dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
+ pages = dev->priv.dbg.pages_debugfs;
+
+ debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+ debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+ debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+ &dev->priv.reclaim_pages_discard);
+}
+
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+ debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
@@ -441,7 +500,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
@@ -468,7 +527,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
@@ -493,7 +552,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ba6dad97e308..0571e40c6ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -340,8 +340,10 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
struct auxiliary_driver *adrv;
int ret = 0, i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -389,6 +391,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
@@ -401,7 +404,9 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
pm_message_t pm = {};
int i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -430,6 +435,7 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -438,6 +444,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
{
int ret;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
@@ -450,6 +457,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
@@ -526,16 +534,22 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- return 0;
+ goto out;
+
+ err = add_drivers(dev);
- return add_drivers(dev);
+out:
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
+ return err;
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
@@ -555,12 +569,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
PCI_SLOT(dev->pdev->devfn));
}
-static int next_phys_dev(struct device *dev, const void *data)
+static int _next_phys_dev(struct mlx5_core_dev *mdev,
+ const struct mlx5_core_dev *curr)
{
- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
- struct mlx5_core_dev *mdev = madev->mdev;
- const struct mlx5_core_dev *curr = data;
-
if (!mlx5_core_is_pf(mdev))
return 0;
@@ -574,22 +585,52 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+static void *pci_get_other_drvdata(struct device *this, struct device *other)
{
- struct auxiliary_device *adev;
- struct mlx5_adev *madev;
+ if (this->driver != other->driver)
+ return NULL;
+
+ return pci_get_drvdata(to_pci_dev(other));
+}
+
+static int next_phys_dev_lag(struct device *dev, const void *data)
+{
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
+ !MLX5_CAP_GEN(mdev, lag_master) ||
+ (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
+ MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
+ return 0;
+
+ return _next_phys_dev(mdev, data);
+}
+
+static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct device *next;
if (!mlx5_core_is_pf(dev))
return NULL;
- adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
- if (!adev)
+ next = bus_find_device(&pci_bus_type, NULL, dev, match);
+ if (!next)
return NULL;
- madev = container_of(adev, struct mlx5_adev, adev);
- put_device(&adev->dev);
- return madev->mdev;
+ put_device(next);
+ return pci_get_drvdata(to_pci_dev(next));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
+{
+ lockdep_assert_held(&mlx5_intf_mutex);
+ return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d1093bb2d436..66c6a7017695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -100,14 +100,19 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
}
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
- err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
if (err)
- goto out;
+ return err;
err = mlx5_fw_reset_wait_reset_done(dev);
-out:
if (err)
- NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
+ return err;
+
+ mlx5_unload_one_devl_locked(dev);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
return err;
}
@@ -138,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -160,17 +166,21 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -178,24 +188,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -588,14 +601,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
- strcpy(value.vstr, "dmfs");
- else
- strcpy(value.vstr, "smfs");
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- value);
-
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
@@ -606,18 +611,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
-
- if (MLX5_ESWITCH_MANAGER(dev)) {
- if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
- dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
- value.vbool = true;
- } else {
- value.vbool = false;
- }
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- value);
- }
#endif
value.vu32 = MLX5_COMP_EQ_SIZE;
@@ -852,28 +845,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 7841ef6c193c..c5bb79a4fa57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -259,6 +259,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ trace_seq_printf(p, "none\n");
+ break;
}
trace_seq_putc(p, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index eae9aa9c0811..978a2bb8e122 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
if (!tracer->owner)
return;
+ if (unlikely(!tracer->str_db.loaded))
+ goto arm;
+
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
+arm:
mlx5_fw_tracer_arm(dev);
}
@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
- if (likely(tracer->str_db.loaded))
- queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index 538adab6878b..c5b560a8b026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
u32 mkey;
+ u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
return -EINVAL;
}
-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
+
+static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
+ int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
+ int size = 0;
void *menu;
int i;
- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+ if (!start_idx) {
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
+ num_of_records);
+ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
+ data += size;
+ }
+ num_of_items = rsc_dump->number_of_menu_items;
+
+ for (i = 0; start_idx + i < num_of_items; i++) {
+ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
+ if (size >= read_size)
+ return start_idx + i;
- for (i = 0; i < num_of_items; i++) {
- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
+ return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
+ int start_idx = 0;
int size;
int err;
@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
if (err < 0)
goto destroy_cmd;
- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 812e6810cb3b..26a23047f1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -59,6 +59,7 @@
#include "lib/hv_vhca.h"
#include "lib/clock.h"
#include "en/rx_res.h"
+#include "en/selq.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -92,29 +93,26 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_LOG_WQE_SZ 18
-#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
- MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
-#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-
-#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
-#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
-#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
-/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
- * WQEs, This page will absorb write overflow by the hardware, when
- * receiving packets larger than MTU. These oversize packets are
- * dropped by the driver at a later stage.
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+
+/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
+ * These are theoretical maximums, which can be further restricted by
+ * capabilities. These values are used for static resource allocations and
+ * sanity checks.
+ * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
+ * size actually used at runtime, but it's not a problem when calculating static
+ * array sizes.
*/
-#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
-#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5_UMR_MAX_MTT_SPACE \
+ (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
+ MLX5_UMR_MTT_ALIGNMENT))
+#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
+ rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
+#define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
- (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
-#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
- (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
- (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
@@ -126,8 +124,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
- MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
@@ -149,13 +146,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_UMR_WQE_INLINE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + \
- ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
- MLX5_UMR_MTT_ALIGNMENT))
-#define MLX5E_UMR_WQEBBS \
- (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-
#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
(sizeof(struct mlx5e_umr_wqe) +\
(sizeof(struct mlx5_klm) * (sgl_len)))
@@ -172,8 +162,8 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -187,12 +177,6 @@ do { \
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
-enum mlx5e_rq_group {
- MLX5E_RQ_GROUP_REGULAR,
- MLX5E_RQ_GROUP_XSK,
-#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
-};
-
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -221,10 +205,40 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
+/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
+ * bytes units. Driver hardens the limitation to 1KB (16
+ * WQEBBs), unless firmware capability is stricter.
+ */
+static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+{
+ BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX);
+
+ return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+}
+
+static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev)
+{
+/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
+ * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
+ * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64)
+ * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower
+ * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
+ * cache-aligned.
+ */
+ u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+#if L1_CACHE_BYTES >= 128
+ wqebbs = ALIGN_DOWN(wqebbs, 2);
+#endif
+ return wqebbs;
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_ll {
@@ -241,8 +255,9 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
union {
- struct mlx5_mtt inline_mtts[0];
- struct mlx5_klm inline_klms[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
+ DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
+ DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
@@ -293,7 +308,8 @@ struct mlx5e_params {
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
- struct mlx5e_mqprio_rl *rl;
+ u64 max_rate[TC_MAX_QUEUE];
+ u32 hw_id[TC_MAX_QUEUE];
} channel;
} mqprio;
bool rx_cqe_compress_def;
@@ -326,7 +342,6 @@ enum {
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
- MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
@@ -377,6 +392,7 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_XDP_MULTIBUF,
};
struct mlx5e_tx_mpwqe {
@@ -427,12 +443,12 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
unsigned int hw_mtu;
- struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
@@ -448,12 +464,9 @@ struct mlx5e_txqsq {
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
-struct mlx5e_dma_info {
- dma_addr_t addr;
- union {
- struct page *page;
- struct xdp_buff *xsk;
- };
+union mlx5e_alloc_unit {
+ struct page *page;
+ struct xdp_buff *xsk;
};
/* XDP packets can be transmitted in different ways. On completion, we need to
@@ -486,7 +499,7 @@ struct mlx5e_xdp_info {
} frame;
struct {
struct mlx5e_rq *rq;
- struct mlx5e_dma_info di;
+ struct page *page;
} page;
};
};
@@ -508,7 +521,7 @@ struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- struct mlx5e_xdp_info *,
+ struct skb_shared_info *,
int);
struct mlx5e_xdpsq {
@@ -540,6 +553,8 @@ struct mlx5e_xdpsq {
u32 sqn;
struct device *pdev;
__be32 mkey_be;
+ u16 stop_room;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -580,19 +595,15 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
u32 offset;
bool last_in_page;
};
-struct mlx5e_umr_dma_info {
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
- DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
+ union mlx5e_alloc_unit alloc_units[];
};
#define MLX5E_MAX_RX_FRAGS 4
@@ -600,13 +611,13 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
-#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
- MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
- struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+ struct page *page_cache[MLX5E_CACHE_SIZE];
};
struct mlx5e_rq;
@@ -615,8 +626,8 @@ typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef struct sk_buff *
-(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
+(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
@@ -639,6 +650,12 @@ struct mlx5e_rq_frags_info {
u8 num_frags;
u8 log_num_frags;
u8 wqe_bulk;
+ u8 wqe_index_mask;
+};
+
+struct mlx5e_dma_info {
+ dma_addr_t addr;
+ struct page *page;
};
struct mlx5e_shampo_hd {
@@ -660,13 +677,20 @@ struct mlx5e_hw_gro_data {
int second_ip_id;
};
+enum mlx5e_mpwrq_umr_mode {
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_UNALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_OVERSIZED,
+ MLX5E_MPWRQ_UMR_MODE_TRIPLE,
+};
+
struct mlx5e_rq {
/* data path */
union {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
@@ -675,12 +699,19 @@ struct mlx5e_rq {
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
+ __be32 umr_mkey_be;
u16 num_strides;
u16 actual_wq_head;
u8 log_stride_sz;
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ u8 min_wqe_bulk;
+ u8 page_shift;
+ u8 pages_per_wqe;
+ u8 umr_wqebbs;
+ u8 mtts_per_wqe;
+ u8 umr_mode;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -730,7 +761,7 @@ struct mlx5e_rq {
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- u32 umr_mkey;
+ struct mlx5e_channel *channel;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -819,11 +850,6 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-enum {
- MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -863,22 +889,13 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
-struct mlx5e_htb {
- DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
- DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
- struct mlx5e_sq_stats **qos_sq_stats;
- u16 max_qos_sqs;
- u16 maj_id;
- u16 defcls;
-};
-
struct mlx5e_trap;
+struct mlx5e_htb;
struct mlx5e_priv {
/* priv data path fields - start */
+ struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
- int **channel_tc2realtxq;
- int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -894,7 +911,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -911,6 +928,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ struct mlx5e_sq_stats **htb_qos_sq_stats;
+ u16 htb_max_qos_sqs;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
@@ -921,7 +940,6 @@ struct mlx5e_priv {
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
- int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -930,6 +948,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_MACSEC
+ struct mlx5e_macsec *macsec;
+#endif
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
@@ -943,7 +964,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
- struct mlx5e_htb htb;
+ struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
};
@@ -959,6 +980,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
@@ -979,7 +1002,6 @@ struct mlx5e_profile {
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
- u8 rq_groups;
u32 features;
};
@@ -988,13 +1010,13 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
@@ -1017,6 +1039,7 @@ struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
@@ -1044,6 +1067,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
+void mlx5e_trigger_napi_sched(struct napi_struct *napi);
+
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
@@ -1068,7 +1094,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
@@ -1103,6 +1129,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1115,8 +1142,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
-void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1145,7 +1170,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -1188,6 +1214,7 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
}
+int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev);
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index e7c14c0de0a7..48581ea3adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -10,28 +10,33 @@ unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
return chs->num;
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix)
{
- struct mlx5e_channel *c;
+ WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs));
+ return chs->c[ix];
+}
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- *rqn = c->rq.rqn;
+ return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
{
- struct mlx5e_channel *c;
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+ *rqn = c->rq.rqn;
+}
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- return false;
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
+
+ WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
- return true;
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index ca00cbc827cb..637ca90daaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -9,8 +9,9 @@
struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 9976de8b9047..b59aee75de94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp {
};
void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
-void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#else
static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
-static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index ae52e7f38306..b69f9d10ccbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -21,6 +21,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
unsigned int dl_port_index;
+ int ret;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
@@ -41,7 +42,13 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
memset(dl_port, 0, sizeof(*dl_port));
devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ ret = devl_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
+
+ return ret;
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
@@ -54,8 +61,13 @@ void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
- devlink_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ devl_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 678ffbb48a25..bf2741eb7f9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -8,32 +8,17 @@
#include "lib/fs_ttc.h"
struct mlx5e_post_act;
+struct mlx5e_tc_table;
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
+ MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
};
struct mlx5e_flow_table {
@@ -104,108 +89,116 @@ enum {
#endif
};
-struct mlx5e_priv;
-
-#ifdef CONFIG_MLX5_EN_RXNFC
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-#else
-static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
-static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
-{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{ return -EOPNOTSUPP; }
-#endif /* CONFIG_MLX5_EN_RXNFC */
+struct mlx5e_flow_steering;
+struct mlx5e_rx_res;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables;
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple);
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple);
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs);
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
+{ return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
-struct mlx5e_flow_steering {
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_namespace *egress_ns;
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ struct ttc_params *ttc_params, bool tunnel);
+
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res);
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev);
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile);
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
#ifdef CONFIG_MLX5_EN_RXNFC
- struct mlx5e_ethtool_steering ethtool;
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
- struct mlx5e_tc_table tc;
- struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table *vlan;
- struct mlx5e_l2_table l2;
- struct mlx5_ttc_table *ttc;
- struct mlx5_ttc_table *inner_ttc;
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner);
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner);
#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables *arfs;
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs);
#endif
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs);
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5e_accel_fs_tcp *accel_tcp;
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
#endif
- struct mlx5e_fs_udp *udp;
- struct mlx5e_fs_any *any;
- struct mlx5e_ptp_fs *ptp_fs;
-};
-
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
- struct ttc_params *ttc_params, bool tunnel);
-
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy);
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs, bool vlan_strip_disable);
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs);
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+
+#define fs_err(fs, fmt, ...) \
+ mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_dbg(fs, fmt, ...) \
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn(fs, fmt, ...) \
+ mlx5_core_warn(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn_once(fs, fmt, ...) \
+ mlx5_core_warn_once(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
new file mode 100644
index 000000000000..9e276fd3c0cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5E_FS_ETHTOOL_H__
+#define __MLX5E_FS_ETHTOOL_H__
+
+struct mlx5e_priv;
+struct mlx5e_ethtool_steering;
+#ifdef CONFIG_MLX5_EN_RXNFC
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
+{ return 0; }
+static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
+#endif
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..03cb79adf912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
#include "en/fs_tt_redirect.h"
#include "fs_core.h"
+#include "mlx5_core.h"
enum fs_udp_type {
FS_IPV4_UDP,
@@ -74,17 +74,17 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
enum fs_udp_type type = tt2fs_udp(ttc_type);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_udp *fs_udp;
int err;
if (type == FS_UDP_NUM_TYPES)
@@ -94,7 +94,6 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs.udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -106,31 +105,30 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
- __func__, fs_udp_type2str(type), err);
+ fs_err(fs, "%s: add %s rule failed, err %d\n",
+ __func__, fs_udp_type2str(type), err);
}
return rule;
}
-static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5e_flow_table *fs_udp_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs.udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -206,33 +204,36 @@ out:
return err;
}
-static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_flow_table *ft;
int err;
+ ft = &fs_udp->tables[type];
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
- fs_udp_type2str(type), ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
+ fs_udp_type2str(type), ft->t->id, ft->t->level);
err = fs_udp_create_groups(ft, type);
if (err)
goto err;
- err = fs_udp_add_default_rule(priv, type);
+ err = fs_udp_add_default_rule(fs, type);
if (err)
goto err;
@@ -253,17 +254,17 @@ static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
fs_udp->tables[i].t = NULL;
}
-static int fs_udp_disable(struct mlx5e_priv *priv)
+static int fs_udp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
@@ -271,30 +272,31 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int fs_udp_enable(struct mlx5e_priv *priv)
+static int fs_udp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs.udp->tables[i].t;
+ dest.ft = udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
return 0;
}
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
int i;
if (!fs_udp)
@@ -303,48 +305,50 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
if (--fs_udp->ref_cnt)
return;
- fs_udp_disable(priv);
+ fs_udp_disable(fs);
for (i = 0; i < FS_UDP_NUM_TYPES; i++)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs.udp = NULL;
+ mlx5e_fs_set_udp(fs, NULL);
}
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
int i, err;
- if (priv->fs.udp) {
- priv->fs.udp->ref_cnt++;
+ if (udp) {
+ udp->ref_cnt++;
return 0;
}
- priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
- if (!priv->fs.udp)
+ udp = kzalloc(sizeof(*udp), GFP_KERNEL);
+ if (!udp)
return -ENOMEM;
+ mlx5e_fs_set_udp(fs, udp);
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- err = fs_udp_create_table(priv, i);
+ err = fs_udp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = fs_udp_enable(priv);
+ err = fs_udp_enable(fs);
if (err)
goto err_destroy_tables;
- priv->fs.udp->ref_cnt = 1;
+ udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs.udp, i);
+ fs_udp_destroy_table(udp, i);
- kfree(priv->fs.udp);
- priv->fs.udp = NULL;
+ kfree(udp);
+ mlx5e_fs_set_udp(fs, NULL);
return err;
}
@@ -356,22 +360,21 @@ static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_typ
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_any *fs_any;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs.any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -383,31 +386,29 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add ANY rule failed, err %d\n",
+ __func__, err);
}
return rule;
}
-static int fs_any_add_default_rule(struct mlx5e_priv *priv)
+static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5e_flow_table *fs_any_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs.any;
fs_any_t = &fs_any->table;
-
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=ANY, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
+ __func__, err);
return err;
}
@@ -472,9 +473,11 @@ err:
return err;
}
-static int fs_any_create_table(struct mlx5e_priv *priv)
+static int fs_any_create_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
+ struct mlx5e_flow_table *ft = &fs_any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,21 +487,21 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
- ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = fs_any_create_groups(ft);
if (err)
goto err;
- err = fs_any_add_default_rule(priv);
+ err = fs_any_add_default_rule(fs);
if (err)
goto err;
@@ -509,35 +512,38 @@ err:
return err;
}
-static int fs_any_disable(struct mlx5e_priv *priv)
+static int fs_any_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
}
-static int fs_any_enable(struct mlx5e_priv *priv)
+static int fs_any_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.any->table.t;
+ dest.ft = any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
@@ -553,9 +559,9 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
fs_any->table.t = NULL;
}
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_any *fs_any = priv->fs.any;
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
if (!fs_any)
return;
@@ -563,43 +569,45 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
if (--fs_any->ref_cnt)
return;
- fs_any_disable(priv);
+ fs_any_disable(fs);
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs.any = NULL;
+ mlx5e_fs_set_any(fs, NULL);
}
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
int err;
- if (priv->fs.any) {
- priv->fs.any->ref_cnt++;
+ if (fs_any) {
+ fs_any->ref_cnt++;
return 0;
}
- priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
- if (!priv->fs.any)
+ fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
+ if (!fs_any)
return -ENOMEM;
+ mlx5e_fs_set_any(fs, fs_any);
- err = fs_any_create_table(priv);
+ err = fs_any_create_table(fs);
if (err)
return err;
- err = fs_any_enable(priv);
+ err = fs_any_enable(fs);
if (err)
goto err_destroy_table;
- priv->fs.any->ref_cnt = 1;
+ fs_any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs.any);
+ fs_any_destroy_table(fs_any);
- kfree(priv->fs.any);
- priv->fs.any = NULL;
+ kfree(fs_any);
+ mlx5e_fs_set_any(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
index 7a70c4f38fda..5780fd7ad507 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -4,23 +4,22 @@
#ifndef __MLX5E_FS_TT_REDIRECT_H__
#define __MLX5E_FS_TT_REDIRECT_H__
-#include "en.h"
#include "en/fs.h"
void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs);
/* ANY traffic type redirect*/
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type);
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
new file mode 100644
index 000000000000..6dac76fa58a3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/pkt_cls.h>
+#include "htb.h"
+#include "en.h"
+#include "../qos.h"
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ struct mlx5e_selq *selq;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+/* Software representation of the QoS tree */
+
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = callback(data, node->qid, node->hw_id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
+{
+ int last;
+
+ last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
+ return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
+}
+
+static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
+{
+ int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
+ struct mlx5e_priv *priv = htb->priv;
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(htb->qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+static struct mlx5e_qos_node *
+mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, htb->qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(htb->priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, htb->qos_used_qids);
+ mlx5e_update_tx_netdev_queues(htb->priv);
+ }
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
+}
+
+/* TX datapath API */
+
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_htb_node_find_rcu(htb, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+/* HTB TC handlers */
+
+static int
+mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ goto err_cancel_selq;
+ }
+
+ root = mlx5e_htb_node_create_root(htb);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ mlx5e_selq_apply(htb->selq);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_htb_node_delete(htb, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(htb->selq);
+ return err;
+}
+
+static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
+
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare_htb(htb->selq, 0, 0);
+ mlx5e_selq_apply(htb->selq);
+
+ root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
+ if (err)
+ qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_htb_node_delete(htb, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
+{
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
+
+ qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ int qid;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_htb_find_unused_qos_qid(htb);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_htb_node_find(htb, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_htb_node_delete(htb, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ struct mlx5e_priv *priv = htb->priv;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_htb_node_delete(htb, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
+
+ node = mlx5e_htb_node_find(htb, *classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, *classid, err);
+
+ mlx5e_htb_node_delete(htb, node);
+
+ moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb->qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, moved_qid);
+
+ __set_bit(qid, htb->qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *classid = node->classid;
+ return 0;
+}
+
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, qid);
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_htb_node_delete(htb, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int
+mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_htb_update_children(htb, node, extack);
+
+ return err;
+}
+
+struct mlx5e_htb *mlx5e_htb_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
+}
+
+void mlx5e_htb_free(struct mlx5e_htb *htb)
+{
+ kvfree(htb);
+}
+
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv)
+{
+ htb->mdev = mdev;
+ htb->netdev = netdev;
+ htb->selq = selq;
+ htb->priv = priv;
+ hash_init(htb->qos_tc2node);
+ return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->extack);
+}
+
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
+{
+ mlx5e_htb_root_del(htb);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
new file mode 100644
index 000000000000..8386f1ea4559
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_EN_HTB_H_
+#define __MLX5E_EN_HTB_H_
+
+#include "qos.h"
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_selq;
+struct mlx5e_htb;
+
+typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
+
+/* TX datapath API */
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
+
+/* HTB TC handlers */
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+struct mlx5e_htb *mlx5e_htb_alloc(void);
+void mlx5e_htb_free(struct mlx5e_htb *htb);
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv);
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 66180ffb4606..29dd3a04c154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -5,13 +5,213 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec.h"
+#include <net/xdp_sock_drv.h>
-static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
+{
+ u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
+
+ return min_page_shift ? : 12;
+}
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
+
+ /* Regular RQ uses order-0 pages, the NIC must be able to map them. */
+ if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
+ min_page_shift = req_page_shift;
+
+ return max(req_page_shift, min_page_shift);
+}
+
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ /* Different memory management schemes use different mechanisms to map
+ * user-mode memory. The stricter guarantees we have, the faster
+ * mechanisms we use:
+ * 1. MTT - direct mapping in page granularity.
+ * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
+ * all mappings have the same size.
+ * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
+ * mappings can have different sizes.
+ */
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ bool oversized = false;
+
+ if (xsk) {
+ oversized = xsk->chunk_size < (1 << page_shift);
+ WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
+ }
+
+ /* XSK frame size doesn't match the UMR page size, either because the
+ * frame size is not a power of two, or it's smaller than the minimal
+ * page size supported by the firmware.
+ * It's possible to receive packets bigger than MTU in certain setups.
+ * To avoid writing over the XSK frame boundary, the top region of each
+ * stride is mapped to a garbage page, resulting in two mappings of
+ * different sizes per frame.
+ */
+ if (oversized) {
+ /* An optimization for frame sizes equal to 3 * power_of_two.
+ * 3 KSMs point to the frame, and one KSM points to the garbage
+ * page, which works faster than KLM.
+ */
+ if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
+ return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
+
+ return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
+ }
+
+ /* XSK frames can start at arbitrary unaligned locations, but they all
+ * have the same size which is a power of two. It allows to optimize to
+ * one KSM per frame.
+ */
+ if (unaligned)
+ return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
+
+ /* XSK: frames are naturally aligned, MTT can be used.
+ * Non-XSK: Allocations happen in units of CPU pages, therefore, the
+ * mappings are naturally aligned.
+ */
+ return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
+}
+
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
+{
+ switch (mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return sizeof(struct mlx5_mtt);
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return sizeof(struct mlx5_ksm);
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return sizeof(struct mlx5_klm) * 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return sizeof(struct mlx5_ksm) * 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
+ return 0;
+}
+
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u16 max_wqe_size;
+
+ /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
+ max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
+ MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+ max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+
+ return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+}
+
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
+ u8 pages_per_wqe;
+
+ pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
+
+ /* Two MTTs are needed to form an octword. The number of MTTs is encoded
+ * in octwords in a UMR WQE, so we need at least two to avoid mapping
+ * garbage addresses.
+ */
+ if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ pages_per_wqe = 2;
+
+ /* Sanity check for further calculations to succeed. */
+ BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
+ if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
+ return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
+
+ return pages_per_wqe;
+}
+
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u16 umr_wqe_sz;
+
+ umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
+ ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+
+ WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
+
+ return umr_wqe_sz;
+}
+
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
+ MLX5_SEND_WQE_BB);
+}
+
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+
+ /* Add another page as a buffer between WQEs. This page will absorb
+ * write overflow by the hardware, when receiving packets larger than
+ * MTU. These oversize packets are dropped by the driver at a later
+ * stage.
+ */
+ return ALIGN(pages_per_wqe + 1,
+ MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
+}
+
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- return params->xdp_prog || xsk;
+ /* Same limits apply to KSMs and KLMs. */
+ u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5E_MAX_RQ_NUM_MTTS;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return klm_limit;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ /* Each entry is two KLMs. */
+ return klm_limit / 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ /* Each entry is four KSMs. */
+ return klm_limit / 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
+ u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
+
+ return ilog2(max_entries / mtts_per_wqe);
+}
+
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ MLX5E_ORDER2_MAX_PACKET_MTU;
}
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
@@ -23,7 +223,7 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return xsk->headroom;
headroom = NET_IP_ALIGN;
- if (mlx5e_rx_is_xdp(params, xsk))
+ if (params->xdp_prog)
headroom += XDP_PACKET_HEADROOM;
else
headroom += MLX5_RX_HEADROOM;
@@ -31,70 +231,80 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return headroom;
}
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
- return linear_rq_headroom + hw_mtu;
+ return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
{
- u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
-
- /* AF_XDP doesn't build SKBs in place. */
- if (!xsk)
- frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
+ /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
+ u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
+ u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
- * special case. It can run with frames smaller than a page, as it
- * doesn't allocate pages dynamically. However, here we pretend that
- * fragments are page-sized: it allows to treat XSK frames like pages
- * by redirecting alloc and free operations to XSK rings and by using
- * the fact there are no multiple packets per "page" (which is a frame).
- * The latter is important, because frames may come in a random order,
- * and we will have trouble assemblying a real page of multiple frames.
- */
- if (mlx5e_rx_is_xdp(params, xsk))
- frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
+ return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
+}
- /* Even if we can go with a smaller fragment size, we must not put
- * multiple packets into a single frame.
+static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ bool mpwqe)
+{
+ /* XSK frames are mapped as individual pages, because frames may come in
+ * an arbitrary order from random locations in the UMEM.
*/
if (xsk)
- frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
+ return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- return frag_sz;
+ /* XDP in mlx5e doesn't support multiple packets per page. */
+ if (params->xdp_prog)
+ return PAGE_SIZE;
+
+ return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
}
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
+ u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
- return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ order_base_2(linear_stride_sz);
}
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
- * than one page. For this, check both with and without xsk.
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
+ return false;
+
+ /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ * must fit into a CPU page.
*/
- u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
- mlx5e_rx_get_linear_frag_sz(params, NULL));
+ if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
+ return false;
+
+ /* XSK frames must be big enough to hold the packet data. */
+ if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
+ return false;
- return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
- linear_frag_sz <= PAGE_SIZE;
+ return true;
}
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides)
+static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides,
+ u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
+ if (log_stride_sz + log_num_strides !=
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
return false;
if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
@@ -114,28 +324,53 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- s8 log_num_strides;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_num_strides;
u8 log_stride_sz;
+ u8 log_wqe_sz;
+
+ if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
+ return false;
+
+ log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
- if (!mlx5e_rx_is_linear_skb(params, xsk))
+ if (log_wqe_sz < log_stride_sz)
return false;
- log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
+ log_num_strides = log_wqe_sz - log_stride_sz;
- return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
+ log_num_strides, page_shift,
+ umr_mode);
}
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
+
+ log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
+ page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
/* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames <
log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ /* Ethtool's rx_max_pending is calculated for regular RQ, that uses
+ * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
+ * frame size not equal to PAGE_SIZE.
+ * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
+ * unexpected failure.
+ */
+ if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
+ return max_log_rq_size;
+
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
@@ -165,7 +400,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk)
{
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
+ return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
@@ -174,20 +409,35 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- return MLX5_MPWRQ_LOG_WQE_SZ -
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
+{
+#define UMR_WQE_BULK (2)
+ return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
+}
+
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(params, xsk) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+ return linear_headroom;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return linear_headroom;
+
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ return linear_headroom;
- return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
- mlx5e_get_linear_rq_headroom(params, xsk) : 0;
+ return 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -195,14 +445,14 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room;
- stop_room = mlx5e_tls_get_stop_room(mdev, params);
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room = mlx5e_ktls_get_stop_room(mdev, params);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
- /* A MPWQE can take up to the maximum-sized WQE + all the normal
- * stop room can be taken if a new packet breaks the active
- * MPWQE session and allocates its WQEs right away.
+ /* A MPWQE can take up to the maximum cacheline-aligned WQE +
+ * all the normal stop room can be taken if a new packet breaks
+ * the active MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_mpwqe(mdev);
return stop_room;
}
@@ -309,25 +559,46 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return false;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
- if (params->xdp_prog) {
- /* XSK params are not considered here. If striding RQ is in use,
- * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
- * be called with the known XSK params.
- */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return false;
+ if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ u16 max_mtu_pkts;
+
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
+
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return -EINVAL;
+
+ /* Current RQ length is too big for the given frame size, the
+ * needed number of WQEs exceeds the maximum.
+ */
+ max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
+ if (params->log_rq_mtu_frames > max_mtu_pkts) {
+ mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
+ 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+ return -EINVAL;
}
- return true;
+ return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -340,7 +611,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
+ BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
@@ -348,8 +619,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_CYCLIC;
}
@@ -359,15 +629,16 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
+ * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*
* No XSK params: checking the availability of striding RQ in general.
*/
- if (!slow_pci_heuristic(mdev) &&
- mlx5e_striding_rq_possible(mdev, params) &&
+ if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
+ !mlx5e_mpwrq_validate_regular(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
- !mlx5e_rx_is_linear_skb(params, NULL)))
+ !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -385,58 +656,132 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
};
}
+static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
+{
+ if (xdp)
+ /* XDP requires all fragments to be of the same size. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
+
+ /* Optimization for small packets: the last fragment is bigger than the others. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
+}
+
#define DEFAULT_FRAG_SIZE (2048)
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
+static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_frags_info *info)
{
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
int frag_size_max = DEFAULT_FRAG_SIZE;
+ int first_frag_size_max;
u32 buf_size = 0;
+ u16 headroom;
+ int max_mtu;
int i;
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
- if (mlx5e_rx_is_linear_skb(params, xsk)) {
+ if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
int frag_stride;
- frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
- frag_stride = roundup_pow_of_two(frag_stride);
+ frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
info->num_frags = 1;
- info->wqe_bulk = PAGE_SIZE / frag_stride;
+
+ /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
+ * first WQE in the page is responsible for allocation of this
+ * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
+ * still not completed, the allocation must stop before k*N.
+ */
+ info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
+
goto out;
}
- if (byte_count > PAGE_SIZE +
- (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu || params->xdp_prog) {
frag_size_max = PAGE_SIZE;
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu) {
+ mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
+ params->sw_mtu, max_mtu);
+ return -EINVAL;
+ }
+ }
i = 0;
while (buf_size < byte_count) {
int frag_size = byte_count - buf_size;
- if (i < MLX5E_MAX_RX_FRAGS - 1)
+ if (i == 0)
+ frag_size = min(frag_size, first_frag_size_max);
+ else if (i < MLX5E_MAX_RX_FRAGS - 1)
frag_size = min(frag_size, frag_size_max);
info->arr[i].frag_size = frag_size;
- info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
buf_size += frag_size;
+
+ if (params->xdp_prog) {
+ /* XDP multi buffer expects fragments of the same size. */
+ info->arr[i].frag_stride = frag_size_max;
+ } else {
+ if (i == 0) {
+ /* Ensure that headroom and tailroom are included. */
+ frag_size += headroom;
+ frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+ }
+
i++;
}
info->num_frags = i;
- /* number of different wqes sharing a page */
- info->wqe_bulk = 1 + (info->num_frags % 2);
+
+ /* The last fragment of WQE with index 2*N may share the page with the
+ * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
+ * is not completed yet, WQE 2*N must not be allocated, as it's
+ * responsible for allocating a new page.
+ */
+ if (frag_size_max == PAGE_SIZE) {
+ /* No WQE can start in the middle of a page. */
+ info->wqe_index_mask = 0;
+ } else {
+ /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
+ * because there would be more than MLX5E_MAX_RX_FRAGS of them.
+ */
+ WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
+
+ /* Odd number of fragments allows to pack the last fragment of
+ * the previous WQE and the first fragment of the next WQE into
+ * the same page.
+ * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
+ * is 4, the last fragment can be bigger than the rest only if
+ * it's the fourth one, so WQEs consisting of 3 fragments will
+ * always share a page.
+ * When a page is shared, WQE bulk size is 2, otherwise just 1.
+ */
+ info->wqe_index_mask = info->num_frags % 2;
+ }
out:
- info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ /* Bulking optimization to skip allocation until at least 8 WQEs can be
+ * allocated in a row. At the same time, never start allocation when
+ * the page is still used by older WQEs.
+ */
+ info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8);
+
info->log_num_frags = order_base_2(info->num_frags);
+
+ return 0;
}
static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
@@ -472,7 +817,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
@@ -496,7 +841,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
else
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -533,17 +878,22 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
int ndsegs = 1;
+ int err;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
- log_wqe_num_of_strides)) {
+ log_wqe_num_of_strides,
+ page_shift, umr_mode)) {
mlx5_core_err(mdev,
- "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
- log_wqe_stride_size, log_wqe_num_of_strides);
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
+ log_wqe_stride_size, log_wqe_num_of_strides,
+ umr_mode);
return -EINVAL;
}
@@ -551,7 +901,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
MLX5_SET(wq, wq, shampo_enable, true);
MLX5_SET(wq, wq, log_reservation_size,
@@ -572,7 +922,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ if (err)
+ return err;
ndsegs = param->frags_info.num_frags;
}
@@ -638,8 +990,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- !!MLX5_IPSEC_DEV(mdev);
+ allow_swp =
+ mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -661,13 +1013,6 @@ static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
-{
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- return MLX5_GET(wq, wq, log_wq_sz);
-}
-
/* This function calculates the maximum number of headers entries that are needed
* per WQE, the formula is based on the size of the reservations and the
* restriction we have about max packets for reservation that is equal to max
@@ -717,7 +1062,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
@@ -728,25 +1073,98 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
+static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 umr_wqebbs;
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+
+ return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+}
+
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
- u32 wqebbs;
+ u32 wqebbs, total_pages, useful_space;
/* MLX5_WQ_TYPE_CYCLIC */
if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ /* UMR WQEs for the regular RQ. */
+ wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
+
+ /* If XDP program is attached, XSK may be turned on at any time without
+ * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
+ * both regular RQ and XSK RQ.
+ *
+ * XSK uses different values of page_shift, and the total number of UMR
+ * WQEBBs depends on it. This dependency is complex and not monotonic,
+ * especially taking into consideration that some of the parameters come
+ * from capabilities. Hence, we have to try all valid values of XSK
+ * frame size (and page_shift) to find the maximum.
+ */
+ if (params->xdp_prog) {
+ u32 max_xsk_wqebbs = 0;
+ u8 frame_shift;
+
+ for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
+ frame_shift <= PAGE_SHIFT; frame_shift++) {
+ /* The headroom doesn't affect the calculation. */
+ struct mlx5e_xsk_param xsk = {
+ .chunk_size = 1 << frame_shift,
+ .unaligned = false,
+ };
+
+ /* XSK aligned mode. */
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a power of two. */
+ xsk.unaligned = true;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is not equal to stride size. */
+ xsk.chunk_size -= 1;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a triple power of two. */
+ xsk.chunk_size = (1 << frame_shift) / 4 * 3;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+ }
+
+ wqebbs += max_xsk_wqebbs;
+ }
+
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+
+ /* UMR WQEs don't cross the page boundary, they are padded with NOPs.
+ * This padding is always smaller than the max WQE size. That gives us
+ * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
+ * per page. The number of pages is estimated as the total size of WQEs
+ * divided by the useful space in page, rounding up. If some WQEs don't
+ * fully fit into the useful space, they can occupy part of the padding,
+ * which proves this estimation to be correct (reserve enough space).
+ */
+ useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
+ total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
+ wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -774,10 +1192,10 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(mdev, param);
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
- param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
+ param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
+ param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls)
- param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
@@ -785,6 +1203,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -793,6 +1212,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+ param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -812,7 +1232,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 433e6967692d..034debd140bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -9,6 +9,7 @@
struct mlx5e_xsk_param {
u16 headroom;
u16 chunk_size;
+ bool unaligned;
};
struct mlx5e_cq_param {
@@ -31,6 +32,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
+ bool is_xdp_mb;
u16 stop_room;
};
@@ -51,37 +53,26 @@ struct mlx5e_create_sq_param {
u8 min_inline_mode;
};
-static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
- u16 qid,
- enum mlx5e_rq_group group,
- u16 *ix)
-{
- int nch = params->num_channels;
- int ch = qid - nch * group;
-
- if (ch < 0 || ch >= nch)
- return false;
-
- *ix = ch;
- return true;
-}
-
-static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
- u16 qid,
- u16 *ix,
- enum mlx5e_rq_group *group)
-{
- u16 nch = params->num_channels;
-
- *ix = qid % nch;
- *group = qid / nch;
-}
-
-static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
- struct mlx5e_params *params, u64 qid)
-{
- return qid < params->num_channels * profile->rq_groups;
-}
+/* Striding RQ dynamic parameters */
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
/* Parameter calculations */
@@ -91,25 +82,23 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -129,6 +118,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -154,6 +144,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 673f1c82d381..c9d5d8d93994 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
- xoff, &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
+ port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 82baafd3c00c..8469e9c38670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+}
+
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ struct skb_shared_hwtstamps hwts = {};
+ struct sk_buff *skb;
+
+ ptpsq->cq_stats->resync_event++;
+
+ while (skb_cc != skb_id) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ skb_tstamp_tx(skb, &hwts);
+ ptpsq->cq_stats->resync_cqe++;
+ skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ }
+}
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct sk_buff *skb;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++;
goto out;
}
+ if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+ mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
+
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
@@ -195,7 +225,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -242,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+ struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa);
@@ -251,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1;
-
+ if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ ptpsq->ts_cqe_ctr_mask =
+ (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
}
@@ -449,7 +481,7 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -590,37 +622,39 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
}
-static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
+static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
if (!ptp_fs->valid)
return;
mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
ptp_fs->valid = false;
}
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_flow_steering *fs = priv->fs;
struct mlx5_flow_handle *rule;
+ struct mlx5e_ptp_fs *ptp_fs;
int err;
+ ptp_fs = mlx5e_fs_get_ptp(fs);
if (ptp_fs->valid)
return 0;
- err = mlx5e_fs_tt_redirect_udp_create(priv);
+ err = mlx5e_fs_tt_redirect_udp_create(fs);
if (err)
goto out_free;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -628,7 +662,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -636,11 +670,11 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v6_rule = rule;
- err = mlx5e_fs_tt_redirect_any_create(priv);
+ err = mlx5e_fs_tt_redirect_any_create(fs);
if (err)
goto out_destroy_udp_v6_rule;
- rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
+ rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto out_destroy_fs_any;
@@ -651,13 +685,13 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
return 0;
out_destroy_fs_any:
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
out_destroy_udp_v6_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
out_destroy_udp_v4_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
out_destroy_fs_udp:
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
out_free:
return err;
}
@@ -691,7 +725,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -737,6 +771,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ mlx5e_trigger_napi_sched(&c->napi);
}
}
@@ -764,29 +799,31 @@ int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
return 0;
}
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
if (!ptp_fs)
return -ENOMEM;
+ mlx5e_fs_set_ptp(fs, ptp_fs);
- priv->fs.ptp_fs = ptp_fs;
return 0;
}
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return;
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(fs);
kfree(ptp_fs);
}
@@ -812,6 +849,6 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
return -EINVAL;
}
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(priv->fs);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index a71a32e00ebb..cc7efde88ac3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -6,6 +6,7 @@
#include "en.h"
#include "en_stats.h"
+#include "en/txrx.h"
#include <linux/ptp_classify.h>
#define MLX5E_PTP_CHANNEL_IX 0
@@ -17,6 +18,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
+ u16 ts_cqe_ctr_mask;
};
enum {
@@ -67,14 +69,24 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
fk.ports.dst == htons(PTP_EV_PORT));
}
+static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
+{
+ if (!sq->ptpsq)
+ return true;
+
+ return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
+}
+
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_ptp **cp);
void mlx5e_ptp_close(struct mlx5e_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 00449df98a5e..2842195ee548 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -2,11 +2,16 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
#include "en.h"
#include "params.h"
#include "../qos.h"
+#include "en/htb.h"
-#define BYTES_IN_MBIT 125000
+struct qos_sq_callback_params {
+ struct mlx5e_priv *priv;
+ struct mlx5e_channels *chs;
+};
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
@@ -28,121 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
}
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
-{
- int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
-
- return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
-}
-
-/* Software representation of the QoS tree (internal to this file) */
-
-static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
-{
- int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
- int res;
-
- WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
- res = find_first_zero_bit(priv->htb.qos_used_qids, size);
-
- return res == size ? -ENOSPC : res;
-}
-
-struct mlx5e_qos_node {
- struct hlist_node hnode;
- struct rcu_head rcu;
- struct mlx5e_qos_node *parent;
- u64 rate;
- u32 bw_share;
- u32 max_average_bw;
- u32 hw_id;
- u32 classid; /* 16-bit, except root. */
- u16 qid;
-};
-
-#define MLX5E_QOS_QID_INNER 0xffff
-#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
-
-static struct mlx5e_qos_node *
-mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
- struct mlx5e_qos_node *parent)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->parent = parent;
-
- node->qid = qid;
- __set_bit(qid, priv->htb.qos_used_qids);
-
- node->classid = classid;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
-
- mlx5e_update_tx_netdev_queues(priv);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->qid = MLX5E_QOS_QID_INNER;
- node->classid = MLX5E_HTB_CLASSID_ROOT;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
-{
- hash_del_rcu(&node->hnode);
- if (node->qid != MLX5E_QOS_QID_INNER) {
- __clear_bit(node->qid, priv->htb.qos_used_qids);
- mlx5e_update_tx_netdev_queues(priv);
- }
- kfree_rcu(node, rcu);
-}
-
/* TX datapath API */
-static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{
/* These channel params are safe to access from the datapath, because:
- * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active.
- * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues.
*/
@@ -151,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
-{
- struct mlx5e_qos_node *node;
- u16 qid;
- int res;
-
- rcu_read_lock();
-
- node = mlx5e_sw_node_find_rcu(priv, classid);
- if (!node) {
- res = -ENOENT;
- goto out;
- }
- qid = READ_ONCE(node->qid);
- if (qid == MLX5E_QOS_QID_INNER) {
- res = -EINVAL;
- goto out;
- }
- res = mlx5e_qid_from_qos(&priv->channels, qid);
-
-out:
- rcu_read_unlock();
- return res;
-}
+/* SQ lifecycle */
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
{
@@ -191,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
return mlx5e_state_dereference(priv, qos_sqs[qid]);
}
-/* SQ lifecycle */
-
-static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
- struct mlx5e_qos_node *node)
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id)
{
struct mlx5e_create_cq_param ccp = {};
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -207,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
params = &chs->params;
- txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node->qid > priv->htb.max_qos_sqs);
- if (node->qid == priv->htb.max_qos_sqs) {
+ WARN_ON(node_qid > priv->htb_max_qos_sqs);
+ if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL;
- if (priv->htb.max_qos_sqs == 0) {
+ if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list),
GFP_KERNEL);
@@ -226,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return -ENOMEM;
}
if (stats_list)
- WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
- WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
- /* Order max_qos_sqs increment after writing the array pointer.
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
*/
- smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
}
- ix = node->qid % params->num_channels;
- qid = node->qid / params->num_channels;
+ ix = node_qid % params->num_channels;
+ qid = node_qid / params->num_channels;
c = chs->c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
@@ -254,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id,
- priv->htb.qos_sq_stats[node->qid]);
+ &param_sq, sq, 0, hw_id,
+ priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
@@ -270,13 +143,29 @@ err_free_sq:
return err;
}
-static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
{
+ struct qos_sq_callback_params *cb_params = data;
+
+ return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
+}
+
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
+{
+ struct mlx5e_priv *priv = data;
struct mlx5e_txqsq *sq;
+ u16 qid;
- sq = mlx5e_get_qos_sq(priv, node->qid);
+ sq = mlx5e_get_qos_sq(priv, node_qid);
- WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+ qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
+
+ /* If it's a new queue, it will be marked as started at this point.
+ * Stop it before updating txq2sq.
+ */
+ mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
+
+ priv->txq2sq[qid] = sq;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -284,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
+
+ return 0;
}
-static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
@@ -299,11 +190,16 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- /* The queue is disabled, no synchronization with datapath is needed. */
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+
+ /* Make the change to txq2sq visible before the queue is started again.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
-static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_params *params;
@@ -353,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c)
kvfree(qos_sqs);
}
-static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -361,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_close_queues(chs->c[i]);
}
-static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
u16 qos_sqs_size;
int i;
@@ -397,24 +293,20 @@ err_free:
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt, err;
-
- if (!priv->htb.maj_id)
- return 0;
+ struct qos_sq_callback_params callback_params;
+ int err;
err = mlx5e_qos_alloc_queues(priv, chs);
if (err)
return err;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- err = mlx5e_open_qos_sq(priv, chs, node);
- if (err) {
- mlx5e_qos_close_all_queues(chs);
- return err;
- }
+ callback_params.priv = priv;
+ callback_params.chs = chs;
+
+ err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
}
return 0;
@@ -422,14 +314,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- mlx5e_activate_qos_sq(priv, node);
- }
+ mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
}
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
@@ -458,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
}
}
-static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -466,278 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_deactivate_queues(chs->c[i]);
}
-/* HTB API */
-
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *root;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
-
- if (!mlx5_qos_is_supported(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
- return -EOPNOTSUPP;
- }
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- err = mlx5e_qos_alloc_queues(priv, &priv->channels);
- if (err)
- return err;
- }
-
- root = mlx5e_sw_node_create_root(priv);
- if (IS_ERR(root)) {
- err = PTR_ERR(root);
- goto err_free_queues;
- }
-
- err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
- goto err_sw_node_delete;
- }
-
- WRITE_ONCE(priv->htb.defcls, htb_defcls);
- /* Order maj_id after defcls - pairs with
- * mlx5e_select_queue/mlx5e_select_htb_queues.
- */
- smp_store_release(&priv->htb.maj_id, htb_maj_id);
-
- return 0;
-
-err_sw_node_delete:
- mlx5e_sw_node_delete(priv, root);
-
-err_free_queues:
- if (opened)
- mlx5e_qos_close_all_queues(&priv->channels);
- return err;
-}
-
-int mlx5e_htb_root_del(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *root;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
-
- WRITE_ONCE(priv->htb.maj_id, 0);
- synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
-
- root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
- if (!root) {
- qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
- return -ENOENT;
- }
- err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
- if (err)
- qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
- root->hw_id, err);
- mlx5e_sw_node_delete(priv, root);
-
- mlx5e_qos_deactivate_all_queues(&priv->channels);
- mlx5e_qos_close_all_queues(&priv->channels);
-
- return err;
-}
-
-static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
- struct mlx5e_qos_node *parent, u32 *bw_share)
-{
- u64 share = 0;
-
- while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
- parent = parent->parent;
-
- if (parent->max_average_bw)
- share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
- parent->max_average_bw);
- else
- share = 101;
-
- *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
-
- qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
- rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
-
- return 0;
-}
-
-static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
-{
- *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
-
- qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
- ceil, *max_average_bw);
-}
-
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- int qid;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
- classid, parent_classid, rate, ceil);
-
- qid = mlx5e_find_unused_qos_qid(priv);
- if (qid < 0) {
- NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
- return qid;
- }
-
- parent = mlx5e_sw_node_find(priv, parent_classid);
- if (!parent)
- return -EINVAL;
-
- node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- mlx5e_sw_node_delete(priv, node);
- return err;
- }
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- return mlx5e_qid_from_qos(&priv->channels, node->qid);
-}
-
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *child;
- int err, tmp_err;
- u32 new_hw_id;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
- classid, child_classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
- qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- /* Intentionally reuse the qid for the upcoming first child. */
- child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto err_destroy_hw_node;
- }
-
- child->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
- child->max_average_bw, &child->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- goto err_delete_sw_node;
- }
-
- /* No fail point. */
-
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, child);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, child);
- }
- }
-
- return 0;
-
-err_delete_sw_node:
- child->qid = MLX5E_QOS_QID_INNER;
- mlx5e_sw_node_delete(priv, child);
-
-err_destroy_hw_node:
- tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
- if (tmp_err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
- new_hw_id, classid, tmp_err);
- return err;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
-{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
- if (node->qid == qid)
- break;
-
- return node;
-}
-
-static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
{
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
netdev_tx_reset_queue(txq);
netif_tx_start_queue(txq);
}
-static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
@@ -750,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc));
}
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node;
- struct netdev_queue *txq;
- u16 qid, moved_qid;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
-
- node = mlx5e_sw_node_find(priv, *classid);
- if (!node)
- return -ENOENT;
-
- /* Store qid for reuse. */
- qid = node->qid;
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, qid));
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, *classid, err);
-
- mlx5e_sw_node_delete(priv, node);
-
- moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
-
- if (moved_qid == 0) {
- /* The last QoS SQ was just destroyed. */
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
- moved_qid--;
-
- if (moved_qid < qid) {
- /* The highest QoS SQ was just destroyed. */
- WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
- qid, moved_qid);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
-
- WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
- qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
-
- node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
- WARN(!node, "Could not find a node with qid %u to move to queue %u",
- moved_qid, qid);
-
- /* Stop traffic to the old queue. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
- __clear_bit(moved_qid, priv->htb.qos_used_qids);
-
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, moved_qid));
- mlx5e_deactivate_qos_sq(priv, moved_qid);
- mlx5e_close_qos_sq(priv, moved_qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, moved_qid);
-
- __set_bit(qid, priv->htb.qos_used_qids);
- WRITE_ONCE(node->qid, qid);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
- node->classid, moved_qid, qid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- mlx5e_update_tx_netdev_queues(priv);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
-
- *classid = node->classid;
- return 0;
-}
-
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack)
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
{
- struct mlx5e_qos_node *node, *parent;
- u32 old_hw_id, new_hw_id;
- int err, saved_err = 0;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
- force ? "_FORCE" : "", classid);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
- node->parent->bw_share,
- node->parent->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- if (!force)
- return err;
- saved_err = err;
- }
-
- /* Store qid for reuse and prevent clearing the bit. */
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, qid);
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- parent = node->parent;
- mlx5e_sw_node_delete(priv, node);
+ struct mlx5e_htb *htb = priv->htb;
+ int res;
- node = parent;
- WRITE_ONCE(node->qid, qid);
+ if (!htb && htb_qopt->command != TC_HTB_CREATE)
+ return -EINVAL;
- /* Early return on error in force mode. Parent will still be an inner
- * node to be deleted by a following delete operation.
- */
- if (saved_err)
- return saved_err;
-
- old_hw_id = node->hw_id;
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
+ switch (htb_qopt->command) {
+ case TC_HTB_CREATE:
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(htb_qopt->extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
}
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- return 0;
-}
-
-static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *child;
- int err = 0;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
- u32 old_bw_share = child->bw_share;
- int err_one;
-
- if (child->parent != node)
- continue;
-
- mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
- if (child->bw_share == old_bw_share)
- continue;
-
- err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
- child->max_average_bw, child->hw_id);
- if (!err && err_one) {
- err = err_one;
-
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
- qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
- node->classid, err);
+ priv->htb = mlx5e_htb_alloc();
+ htb = priv->htb;
+ if (!htb)
+ return -ENOMEM;
+ res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
+ if (res) {
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
}
+ return res;
+ case TC_HTB_DESTROY:
+ mlx5e_htb_cleanup(htb);
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
+ return 0;
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
+ htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb_qopt->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
+ htb_qopt->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-
- return err;
-}
-
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- u32 bw_share, max_average_bw;
- struct mlx5e_qos_node *node;
- bool ceil_changed = false;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
- classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
-
- err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
- max_average_bw, node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
- qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- if (max_average_bw != node->max_average_bw)
- ceil_changed = true;
-
- node->bw_share = bw_share;
- node->max_average_bw = max_average_bw;
-
- if (ceil_changed)
- err = mlx5e_qos_update_children(priv, node, extack);
-
- return err;
}
struct mlx5e_mqprio_rl {
@@ -1080,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc];
return 0;
}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index b7558907ba20..4947afa23b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -6,41 +6,39 @@
#include <linux/mlx5/driver.h>
-#define MLX5E_QOS_MAX_LEAF_NODES 256
+#define BYTES_IN_MBIT 125000
struct mlx5e_priv;
+struct mlx5e_htb;
struct mlx5e_channels;
struct mlx5e_channel;
+struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
-
-/* TX datapath API */
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
-struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
/* SQ lifecycle */
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id);
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
+
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+
+/* TX datapath API */
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_root_del(struct mlx5e_priv *priv);
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 9c076aa20306..b6f5c1bcdbcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *priv;
-
- /* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
- return false;
-
- priv = netdev_priv(netdev);
- rpriv = priv->ppriv;
-
- /* Egress acl forward to vport is supported only non-uplink representor */
- return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_bond *bond;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return NOTIFY_DONE;
+
+ bond = container_of(nb, struct mlx5e_rep_bond, nb);
+ priv = netdev_priv(netdev);
+ rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
+ /* Verify VF representor is on the same device of the bond handling the netevent. */
+ if (rpriv->uplink_priv.bond != bond)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGELOWERSTATE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index c6d2f8c78db7..8099a21e674c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
return err;
}
+static int
+mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct net_device *lower;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
+ return 0;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(lower))
+ continue;
+
+ priv = netdev_priv(lower);
+ mdev = priv->mdev;
+ if (!mlx5_lag_is_active(mdev))
+ return -EAGAIN;
+ if (!mlx5_lag_is_shared_fdb(mdev))
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
break;
case NETDEV_CHANGEUPPER:
@@ -269,6 +300,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
attr->u.vlan_filtering, br_offloads);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlx5_esw_bridge_vlan_proto_set(vport_num,
+ esw_owner_vhca_id,
+ attr->u.vlan_protocol,
+ br_offloads);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -491,7 +528,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
}
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
- err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
@@ -509,7 +546,9 @@ err_register_swdev_blk:
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
+ rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
}
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
@@ -524,7 +563,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return;
cancel_delayed_work_sync(&br_offloads->update_work);
- unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 0991345c4ae5..fac7e3ff2674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
+#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -263,14 +264,14 @@ int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_tc_esw_init(uplink_priv);
return err;
}
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
{
/* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
+static int
+mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct flow_action_entry *action;
+ struct mlx5e_tc_act *act;
+ bool add = false;
+ int i;
+
+ /* There is no use case currently for more than one action (e.g. pedit).
+ * when there will be, need to handle cleaning multiple actions on err.
+ */
+ if (!flow_offload_has_one_action(&fl_act->action))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ flow_action_for_each(i, action, &fl_act->action) {
+ act = mlx5e_tc_act_get(action->id, ns_type);
+ if (!act)
+ continue;
+
+ if (!act->offload_action)
+ continue;
+
+ if (!act->offload_action(priv, fl_act, action))
+ add = true;
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->destroy_action)
+ return -EOPNOTSUPP;
+
+ return act->destroy_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->stats_action)
+ return -EOPNOTSUPP;
+
+ return act->stats_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return mlx5e_rep_indr_replace_act(rpriv, fl_act);
+ case FLOW_ACT_DESTROY:
+ return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
+ case FLOW_ACT_STATS:
+ return mlx5e_rep_indr_stats_act(rpriv, fl_act);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
+ enum tc_setup_type type,
+ void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return mlx5e_rep_indr_setup_act(rpriv, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 2684e9da9f41..5f6f95ad6888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -123,6 +123,8 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
xskrq->stats->recover++;
}
+ mlx5e_trigger_napi_icosq(icosq->channel);
+
mutex_unlock(&icosq->channel->icosq_recovery_lock);
return 0;
@@ -132,44 +134,24 @@ out:
return err;
}
-static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
-{
- struct net_device *dev = rq->netdev;
- int err;
-
- err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
- return err;
- }
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
{
struct mlx5e_rq *rq = ctx;
int err;
mlx5e_deactivate_rq(rq);
- mlx5e_free_rx_descs(rq);
-
- err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
if (err)
- goto out;
+ return err;
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq);
rq->stats->recover++;
+ if (rq->channel)
+ mlx5e_trigger_napi_icosq(rq->channel);
+ else
+ mlx5e_trigger_napi_sched(rq->cq.napi);
return 0;
-out:
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
- return err;
}
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1cdd8c2e37a..7f93426b88b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -442,7 +442,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
@@ -457,7 +457,7 @@ inner_tir:
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 24c32f73040a..e1095bc36543 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -24,8 +24,6 @@ struct mlx5e_rx_res {
struct {
struct mlx5e_rqt direct_rqt;
struct mlx5e_tir direct_tir;
- struct mlx5e_rqt xsk_rqt;
- struct mlx5e_tir xsk_tir;
} *channels;
struct {
@@ -320,48 +318,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
mlx5e_tir_builder_clear(builder);
}
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- goto out;
-
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
- res->mdev, false, res->drop_rqn);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_rqts;
- }
- }
-
- for (ix = 0; ix < res->max_nch; ix++) {
- mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
- mlx5e_tir_builder_build_direct(builder);
-
- err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_tirs;
- }
-
- mlx5e_tir_builder_clear(builder);
- }
-
goto out;
-err_destroy_xsk_tirs:
- while (--ix >= 0)
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
-
- ix = res->max_nch;
-err_destroy_xsk_rqts:
- while (--ix >= 0)
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
-
- ix = res->max_nch;
err_destroy_direct_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
@@ -420,12 +378,6 @@ static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
}
kvfree(res->channels);
@@ -491,13 +443,6 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
}
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
-{
- WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
-
- return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
-}
-
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
{
struct mlx5e_rss *rss = res->rss[0];
@@ -523,56 +468,53 @@ static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int i
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
-void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
+ struct mlx5e_channels *chs,
+ unsigned int ix)
{
- unsigned int nch, ix;
+ u32 rqn = res->rss_rqns[ix];
int err;
- nch = mlx5e_channels_get_num(chs);
-
- for (ix = 0; ix < chs->num; ix++)
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
- res->rss_nch = chs->num;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ rqn, ix, err);
+}
- mlx5e_rx_res_rss_enable(res);
+static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
+ unsigned int ix)
+{
+ int err;
- for (ix = 0; ix < nch; ix++) {
- u32 rqn;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+}
- mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- rqn, ix, err);
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+{
+ unsigned int nch, ix;
+ int err;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ nch = mlx5e_channels_get_num(chs);
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
+ for (ix = 0; ix < chs->num; ix++) {
+ if (mlx5e_channels_is_xsk(chs, ix))
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
}
- for (ix = nch; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
+ res->rss_nch = chs->num;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < nch; ix++)
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
+ for (ix = nch; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -595,22 +537,8 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
@@ -621,33 +549,17 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
}
}
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix)
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk)
{
- u32 rqn;
- int err;
-
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- return -EINVAL;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
- return err;
-}
+ if (xsk)
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
-{
- int err;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- return err;
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
}
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index b39b20a720e0..5d5f64fab60f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -17,8 +17,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
- MLX5E_RX_RES_FEATURE_XSK = BIT(1),
- MLX5E_RX_RES_FEATURE_PTP = BIT(2),
+ MLX5E_RX_RES_FEATURE_PTP = BIT(1),
};
/* Setup */
@@ -32,7 +31,6 @@ void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
@@ -40,9 +38,8 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk);
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
new file mode 100644
index 000000000000..f675b1926340
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "selq.h"
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include "en.h"
+#include "en/ptp.h"
+#include "en/htb.h"
+
+struct mlx5e_selq_params {
+ unsigned int num_regular_queues;
+ unsigned int num_channels;
+ unsigned int num_tcs;
+ union {
+ u8 is_special_queues;
+ struct {
+ bool is_htb : 1;
+ bool is_ptp : 1;
+ };
+ };
+ u16 htb_maj_id;
+ u16 htb_defcls;
+};
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+{
+ struct mlx5e_selq_params *init_params;
+
+ selq->state_lock = state_lock;
+
+ selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
+ if (!selq->standby)
+ return -ENOMEM;
+
+ init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
+ if (!init_params) {
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ return -ENOMEM;
+ }
+ /* Assign dummy values, so that mlx5e_select_queue won't crash. */
+ *init_params = (struct mlx5e_selq_params) {
+ .num_regular_queues = 1,
+ .num_channels = 1,
+ .num_tcs = 1,
+ .is_htb = false,
+ .is_ptp = false,
+ .htb_maj_id = 0,
+ .htb_defcls = 0,
+ };
+ rcu_assign_pointer(selq->active, init_params);
+
+ return 0;
+}
+
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+{
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ selq->is_prepared = true;
+
+ mlx5e_selq_apply(selq);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+}
+
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
+{
+ struct mlx5e_selq_params *selq_active;
+
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
+ selq->standby->num_channels = params->num_channels;
+ selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
+ selq->standby->num_regular_queues =
+ selq->standby->num_channels * selq->standby->num_tcs;
+ selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
+}
+
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *selq_active =
+ rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
+
+ return selq_active->htb_maj_id;
+}
+
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
+{
+ struct mlx5e_selq_params *selq_active;
+
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
+ selq->standby->is_htb = htb_maj_id;
+ selq->standby->htb_maj_id = htb_maj_id;
+ selq->standby->htb_defcls = htb_defcls;
+}
+
+void mlx5e_selq_apply(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *old_params;
+
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+
+ old_params = rcu_replace_pointer(selq->active, selq->standby,
+ lockdep_is_held(selq->state_lock));
+ synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
+ selq->standby = old_params;
+}
+
+void mlx5e_selq_cancel(struct mlx5e_selq *selq)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+}
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
+static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
+ return mlx5e_get_dscp_up(priv, skb);
+#endif
+ if (skb_vlan_tag_present(skb))
+ return skb_vlan_tag_get_prio(skb);
+ return 0;
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up;
+
+ up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
+
+ return selq->num_regular_queues + up;
+}
+
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
+{
+ u16 classid;
+
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = selq->htb_defcls;
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_selq_params *selq;
+ int txq_ix, up;
+
+ selq = rcu_dereference_bh(priv->selq.active);
+
+ /* This is a workaround needed only for the mlx5e_netdev_change_profile
+ * flow that zeroes out the whole priv without unregistering the netdev
+ * and without preventing ndo_select_queue from being called.
+ */
+ if (unlikely(!selq))
+ return 0;
+
+ if (likely(!selq->is_special_queues)) {
+ /* No special queues, netdev_pick_tx returns one of the regular ones. */
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
+ */
+ return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
+ up * selq->num_channels;
+ }
+
+ if (unlikely(selq->htb_maj_id)) {
+ /* num_tcs == 1, shortcut for PTP */
+
+ txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
+ if (txq_ix > 0)
+ return txq_ix;
+
+ if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
+ return selq->num_channels;
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
+ */
+ return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
+ }
+
+ /* PTP is enabled */
+
+ if (mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb, selq);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Normalize any picked txq_ix to [0, num_channels). Queues in range
+ * [0, num_regular_queues) will be mapped to the corresponding channel
+ * index, so that we can apply the packet's UP (if num_tcs > 1).
+ * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
+ * because driver should select the PTP only at mlx5e_select_ptpsq().
+ */
+ txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ return txq_ix + up * selq->num_channels;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
new file mode 100644
index 000000000000..fd590f80e4d1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_SELQ_H__
+#define __MLX5_EN_SELQ_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_selq_params;
+
+struct mlx5e_selq {
+ struct mlx5e_selq_params __rcu *active;
+ struct mlx5e_selq_params *standby;
+ struct mutex *state_lock; /* points to priv->state_lock */
+ bool is_prepared;
+};
+
+struct mlx5e_params;
+struct net_device;
+struct sk_buff;
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
+void mlx5e_selq_apply(struct mlx5e_selq *selq);
+void mlx5e_selq_cancel(struct mlx5e_selq *selq);
+
+static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels)
+{
+ while (unlikely(txq >= num_channels))
+ txq -= num_channels;
+ return txq;
+}
+
+static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels)
+{
+ if (unlikely(txq >= num_channels)) {
+ if (unlikely(txq >= num_channels << 3))
+ txq %= num_channels;
+ else
+ do
+ txq -= num_channels;
+ while (txq >= num_channels);
+ }
+ return txq;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+#endif /* __MLX5_EN_SELQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index b0de6b999675..21aab96357b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,9 +19,8 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_ACCEPT;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index e600924e30ea..3337241cfd84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -2,66 +2,46 @@
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
+#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- &mlx5e_tc_act_trap,
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_redirect_ingress,
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan_mangle,
- &mlx5e_tc_act_tun_encap,
- &mlx5e_tc_act_tun_decap,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- NULL, /* FLOW_ACTION_MARK, */
- &mlx5e_tc_act_ptype,
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- &mlx5e_tc_act_sample,
- NULL, /* FLOW_ACTION_POLICE, */
- &mlx5e_tc_act_ct,
- NULL, /* FLOW_ACTION_CT_METADATA, */
- &mlx5e_tc_act_mpls_push,
- &mlx5e_tc_act_mpls_pop,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
+ [FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
+ [FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
+ [FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
+ [FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
+ [FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
+ [FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
+ [FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
+ [FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
};
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- NULL, /* FLOW_ACTION_TRAP, */
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred_nic,
- NULL, /* FLOW_ACTION_MIRRED, */
- NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- NULL, /* FLOW_ACTION_VLAN_PUSH, */
- NULL, /* FLOW_ACTION_VLAN_POP, */
- NULL, /* FLOW_ACTION_VLAN_MANGLE, */
- NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
- NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- &mlx5e_tc_act_mark,
- NULL, /* FLOW_ACTION_PTYPE, */
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- NULL, /* FLOW_ACTION_SAMPLE, */
- NULL, /* FLOW_ACTION_POLICE, */
- &mlx5e_tc_act_ct,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
};
/**
@@ -98,6 +78,77 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
- parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
+ parse_state->flow_action = flow_action;
+}
+
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder)
+{
+ struct flow_action_entry *act;
+ int i, j = 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ /* Add CT action to be first. */
+ if (act->id == FLOW_ACTION_CT)
+ flow_action_reorder->entries[j++] = act;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT)
+ continue;
+ flow_action_reorder->entries[j++] = act;
+ }
+}
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ struct mlx5e_priv *priv;
+ int err = 0, i;
+
+ priv = parse_state->flow->priv;
+
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse)
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr)
+{
+ struct mlx5_core_dev *mdev = flow->priv->mdev;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+ int err;
+
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+
+ /* Set handle on current post act rule to next post act rule. */
+ err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed setting post action handle");
+ return err;
+ }
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 26efa33de56f..e1570ff056ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -13,15 +13,20 @@
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
- unsigned int num_actions;
+ struct flow_action *flow_action;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ u32 actions;
+ bool ct;
+ bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
+ bool eth_push;
+ bool eth_pop;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
- struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
+ struct mlx5e_mpls_info mpls_info;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
struct mlx5_tc_ct_priv *ct_priv;
@@ -30,7 +35,8 @@ struct mlx5e_tc_act_parse_state {
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index);
+ int act_index,
+ struct mlx5_flow_attr *attr);
int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -40,6 +46,25 @@ struct mlx5e_tc_act {
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
+
+ bool (*is_multi_table_act)(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr);
+
+ int (*offload_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act);
+
+ int (*destroy_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+
+ int (*stats_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+};
+
+struct mlx5e_tc_flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
@@ -61,6 +86,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+extern struct mlx5e_tc_act mlx5e_tc_act_police;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
@@ -72,4 +98,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder);
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type);
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr);
+
#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
index 29920ef0180a..c0f08ae6a57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -38,11 +38,12 @@ csum_offload_supported(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow *flow = parse_state->flow;
- return csum_offload_supported(flow->priv, flow->attr->action,
+ return csum_offload_supported(flow->priv, attr->action,
act->csum_flags, parse_state->extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 06ec30cdb269..a829c94289c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -8,13 +8,14 @@
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
- if (flow_flag_test(parse_state->flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ if (parse_state->ct && !clear_action) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
@@ -27,24 +28,77 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
+ /* It's redundant to do ct clear more than once. */
+ if (clear_action && parse_state->ct_clear)
+ return 0;
+
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
if (err)
return err;
- flow_flag_set(parse_state->flow, CT);
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ if (clear_action) {
+ parse_state->ct_clear = true;
+ } else {
+ attr->flags |= MLX5_ATTR_FLAG_CT;
+ flow_flag_set(parse_state->flow, CT);
+ parse_state->ct = true;
+ }
+
return 0;
}
+static int
+tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts;
+ int err;
+
+ /* If ct action exist, we can ignore previous ct_clear actions */
+ if (parse_state->ct)
+ return 0;
+
+ if (parse_state->ct_clear) {
+ err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Failed to set registers for ct clear");
+ return err;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ /* Prevent handling of additional, redundant clear actions */
+ parse_state->ct_clear = false;
+ }
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->ct.action & TCA_CT_ACT_CLEAR)
+ return false;
+
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
+ .is_multi_table_act = tc_act_is_multi_table_act_ct,
+ .post_parse = tc_act_post_parse_ct,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index 2e29a23bed12..dd025a95c439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,8 +19,7 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index f44515061228..25174f68613e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -8,9 +8,11 @@
static int
validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
@@ -20,7 +22,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
@@ -32,7 +34,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
}
if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= flow->attr->chain) {
+ dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
@@ -43,8 +45,8 @@ validate_goto_chain(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ if (attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
@@ -57,12 +59,13 @@ validate_goto_chain(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
- if (validate_goto_chain(flow->priv, flow, act, extack))
+ if (validate_goto_chain(flow->priv, flow, attr, act, extack))
return false;
return true;
@@ -74,8 +77,7 @@ tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_chain = act->chain_index;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
index d775c3d9edf3..e8d227595b3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index c614fc7fdc9c..4ac7de3f6afa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -10,6 +10,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
#include "en_rep.h"
+#include "lag/lag.h"
static bool
same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
@@ -99,7 +100,8 @@ get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
static bool
tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -108,8 +110,8 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv = flow->priv;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev) {
/* out_dev is NULL when filters with
@@ -124,6 +126,16 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
return false;
}
+ if (parse_state->eth_pop && !parse_state->mpls_push) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop eth is supported only with mpls push");
+ return false;
+ }
+
+ if (flow_flag_test(parse_state->flow, L3_TO_L2_DECAP) && !parse_state->eth_push) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop is only supported with vlan eth push");
+ return false;
+ }
+
if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
@@ -177,6 +189,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
return -ENOMEM;
parse_state->encap = false;
+
+ if (parse_state->mpls_push) {
+ memcpy(&parse_attr->mpls_info[esw_attr->out_count],
+ &parse_state->mpls_info, sizeof(parse_state->mpls_info));
+ parse_state->mpls_push = false;
+ }
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
@@ -198,6 +216,7 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
+ bool is_uplink_rep;
int *ifindexes;
int if_count;
int err;
@@ -212,6 +231,10 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
+ is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev);
+ err = mlx5_lag_do_mirred(priv->mdev, out_dev);
+ if (err)
+ return err;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
@@ -251,6 +274,14 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+
+ /* If output device is bond master then rules are not explicit
+ * so we don't attempt to count them.
+ */
+ if (is_uplink_rep && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
+ MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
+ attr->lag.count = true;
+
esw_attr->out_count++;
return 0;
@@ -295,8 +326,7 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
index 2c74567b6d25..90b4c1b34776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -39,8 +40,7 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 784fc4f68b1e..f106190bf37c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_priv *priv = parse_state->flow->priv;
@@ -22,6 +23,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
return true;
}
+static void
+copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
+ const struct flow_action_entry *act)
+{
+ mpls_info->label = act->mpls_push.label;
+ mpls_info->tc = act->mpls_push.tc;
+ mpls_info->bos = act->mpls_push.bos;
+ mpls_info->ttl = act->mpls_push.ttl;
+}
+
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -29,6 +40,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
+ copy_mpls_info(&parse_state->mpls_info, act);
return 0;
}
@@ -36,21 +48,22 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *filter_dev;
- filter_dev = flow->attr->parse_attr->filter_dev;
+ filter_dev = attr->parse_attr->filter_dev;
/* we only support mpls pop if it is the first action
+ * or it is second action after tunnel key unset
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
- if (act_index) {
- NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ if ((act_index == 1 && !parse_state->decap) || act_index > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action or with decap");
return false;
}
@@ -68,7 +81,7 @@ tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->esw_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 79addbbef087..47597c524e59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -42,12 +42,11 @@ out_err:
return -EOPNOTSUPP;
}
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
u8 htype = act->mangle.htype;
@@ -79,51 +78,11 @@ out_err:
return err;
}
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-int
-mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
-}
-
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -141,21 +100,16 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
ns_type = mlx5e_get_flow_namespace(flow);
- err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
- attr->parse_attr, parse_state->hdrs,
- flow, parse_state->extack);
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, attr->parse_attr->hdrs,
+ parse_state->extack);
if (err)
return err;
- if (flow_flag_test(flow, L3_TO_L2_DECAP))
- goto out;
-
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
esw_attr->split_count = esw_attr->out_count;
-out:
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
index da8ab03af58f..434c8bd710a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -24,9 +24,7 @@ struct pedit_headers_action {
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
new file mode 100644
index 000000000000..c8e5ca65bb6e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return false;
+ }
+ if (mlx5e_policer_validate(parse_state->flow_action, act,
+ parse_state->extack))
+ return false;
+
+ return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
+}
+
+static int
+fill_meter_params_from_act(const struct flow_action_entry *act,
+ struct mlx5e_flow_meter_params *params)
+{
+ params->index = act->hw_index;
+ if (act->police.rate_bytes_ps) {
+ params->mode = MLX5_RATE_LIMIT_BPS;
+ /* change rate to bits per second */
+ params->rate = act->police.rate_bytes_ps << 3;
+ params->burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps) {
+ params->mode = MLX5_RATE_LIMIT_PPS;
+ params->rate = act->police.rate_pkt_ps;
+ params->burst = act->police.burst_pkt;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return true;
+}
+
+static int
+tc_act_police_offload(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ int err = 0;
+
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
+ err = fill_meter_params_from_act(act, &params);
+ if (err)
+ return err;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
+ meter = mlx5e_tc_meter_replace(priv->mdev, &params);
+ } else if (!IS_ERR(meter)) {
+ err = mlx5e_tc_meter_update(meter, &params);
+ mlx5e_tc_meter_put(meter);
+ }
+
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ err = PTR_ERR(meter);
+ }
+
+ return err;
+}
+
+static int
+tc_act_police_destroy(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+ /* first put for the get and second for cleanup */
+ mlx5e_tc_meter_put(meter);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+static int
+tc_act_police_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ u64 bytes, packets, drops, lastuse;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+
+ mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
+ flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_police = {
+ .can_offload = tc_act_can_offload_police,
+ .parse_action = tc_act_parse_police,
+ .is_multi_table_act = tc_act_is_multi_table_act_police,
+ .offload_action = tc_act_police_offload,
+ .destroy_action = tc_act_police_destroy,
+ .stats_action = tc_act_police_stats,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
index 0819110193dc..6454b031ff7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index 1c32e24e528d..ad09a8a5f36e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -7,16 +7,16 @@
static bool
tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev)
return false;
@@ -58,8 +58,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *out_dev = act->dev;
int err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
index 6699bdf5cf01..2c0196431302 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -4,17 +4,21 @@
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
+#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
+ bool ct_nat;
- if (flow_flag_test(parse_state->flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+
+ if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
@@ -27,11 +31,7 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_sample_attr *sample_attr;
-
- sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!sample_attr)
- return -ENOMEM;
+ struct mlx5e_sample_attr *sample_attr = &attr->sample_attr;
sample_attr->rate = act->sample.rate;
sample_attr->group_num = act->sample.psample_group->group_num;
@@ -39,13 +39,33 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
if (act->sample.truncate)
sample_attr->trunc_size = act->sample.trunc_size;
- attr->sample_attr = sample_attr;
+ attr->flags |= MLX5_ATTR_FLAG_SAMPLE;
flow_flag_set(parse_state->flow, SAMPLE);
return 0;
}
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr)
+{
+ if (MLX5_CAP_GEN(mdev, reg_c_preserve) ||
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return true;
+
+ return false;
+}
+
+static bool
+tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr);
+}
+
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
+ .is_multi_table_act = tc_act_is_multi_table_act_sample,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
new file mode 100644
index 000000000000..3efb3a15c5d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__
+#define __MLX5_EN_TC_ACT_SAMPLE_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr);
+
+#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 046b64c2cec4..53b270f652b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -7,11 +7,12 @@
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- if (parse_state->num_actions != 1) {
+ if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
@@ -25,9 +26,8 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
index 6f4a2cf46afd..b4fa2de9711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (!act->tunnel) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
@@ -34,7 +35,8 @@ tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index 70fc0c2d8813..b86ac604d0c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -9,7 +9,6 @@
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
@@ -26,7 +25,7 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
};
return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
+ &prio_tag_act, parse_attr, action,
extack);
}
@@ -35,7 +34,8 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct mlx5e_tc_act_parse_state *parse_state)
{
u8 vlan_idx = attr->total_vlan;
@@ -85,6 +85,16 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
+ case FLOW_ACTION_VLAN_POP_ETH:
+ parse_state->eth_pop = true;
+ break;
+ case FLOW_ACTION_VLAN_PUSH_ETH:
+ if (!flow_flag_test(parse_state->flow, L3_TO_L2_DECAP))
+ return -EOPNOTSUPP;
+ parse_state->eth_push = true;
+ memcpy(attr->eth.h_dest, act->vlan_push_eth.dst, ETH_ALEN);
+ memcpy(attr->eth.h_source, act->vlan_push_eth.src, ETH_ALEN);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
@@ -110,7 +120,7 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
if (err)
return err;
@@ -140,7 +150,7 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
- extack);
+ extack, NULL);
if (err)
return err;
}
@@ -151,7 +161,8 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -170,11 +181,11 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
- attr->parse_attr, parse_state->hdrs,
- &attr->action, parse_state->extack);
+ attr->parse_attr, &attr->action,
+ parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
- parse_state->extack);
+ parse_state->extack, parse_state);
}
if (err)
@@ -191,7 +202,6 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- struct pedit_headers_action *hdrs = parse_state->hdrs;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -202,7 +212,7 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr,
&attr->action, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
index 3d62f13ab61f..2fa58c6f44eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -24,7 +24,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 63e36e7f53e3..9a8a1a6bd99e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -12,7 +12,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
@@ -44,8 +43,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
- NULL, extack);
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr->hdrs,
+ extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -54,7 +53,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -69,8 +69,7 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
- err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
- attr->parse_attr, parse_state->hdrs,
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act, attr->parse_attr,
&attr->action, parse_state->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
new file mode 100644
index 000000000000..bb6b1a979ba1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_TC_CT_FS_H__
+#define __MLX5_EN_TC_CT_FS_H__
+
+struct mlx5_ct_fs {
+ const struct net_device *netdev;
+ struct mlx5_core_dev *dev;
+
+ /* private data */
+ void *priv_data[];
+};
+
+struct mlx5_ct_fs_rule {
+};
+
+struct mlx5_ct_fs_ops {
+ int (*init)(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct);
+ void (*destroy)(struct mlx5_ct_fs *fs);
+
+ struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ struct flow_rule *flow_rule);
+ void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+
+ size_t priv_size;
+};
+
+static inline void *mlx5_ct_fs_priv(struct mlx5_ct_fs *fs)
+{
+ return &fs->priv_data;
+}
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void);
+
+#if IS_ENABLED(CONFIG_MLX5_SW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_smfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
+#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
new file mode 100644
index 000000000000..ae4f55be48ce
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args)
+
+struct mlx5_ct_fs_dmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+};
+
+static int
+mlx5_ct_fs_dmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ return 0;
+}
+
+static void
+mlx5_ct_fs_dmfs_destroy(struct mlx5_ct_fs *fs)
+{
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule;
+ int err;
+
+ dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL);
+ if (!dmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(dmfs_rule->rule)) {
+ err = PTR_ERR(dmfs_rule->rule);
+ ct_dbg("Failed to add ct entry fs rule");
+ goto err_insert;
+ }
+
+ dmfs_rule->attr = attr;
+
+ return &dmfs_rule->fs_rule;
+
+err_insert:
+ kfree(dmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+
+ mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr);
+ kfree(dmfs_rule);
+}
+
+static struct mlx5_ct_fs_ops dmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_dmfs_init,
+ .destroy = mlx5_ct_fs_dmfs_destroy,
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void)
+{
+ return &dmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
new file mode 100644
index 000000000000..2b80fe73549d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/refcount.h>
+
+#include "en_tc.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#include "lib/smfs.h"
+
+#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+
+struct mlx5_ct_fs_smfs_matcher {
+ struct mlx5dr_matcher *dr_matcher;
+ struct list_head list;
+ int prio;
+ refcount_t ref;
+};
+
+struct mlx5_ct_fs_smfs_matchers {
+ struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
+ struct list_head used;
+};
+
+struct mlx5_ct_fs_smfs {
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
+ struct mlx5_ct_fs_smfs_matchers matchers;
+ struct mlx5_ct_fs_smfs_matchers matchers_nat;
+ struct mlx5dr_action *fwd_action;
+ struct mlx5_flow_table *ct_nat;
+ struct mutex lock; /* Guards matchers */
+};
+
+struct mlx5_ct_fs_smfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5dr_rule *rule;
+ struct mlx5dr_action *count_action;
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+};
+
+static inline void
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
+ bool gre)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+
+ if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ if (likely(ipv4)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6));
+ }
+
+ if (likely(tcp)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(MLX5_CT_TCP_FLAGS_MASK));
+ } else if (!gre) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
+ }
+
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
+}
+
+static struct mlx5dr_matcher *
+mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
+ bool tcp, bool gre, u32 priority)
+{
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+
+ dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
+ kvfree(spec);
+ if (!dr_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return dr_matcher;
+}
+
+static struct mlx5_ct_fs_smfs_matcher *
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
+ struct mlx5_ct_fs_smfs_matchers *matchers;
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5dr_table *tbl;
+ struct list_head *prev;
+ int prio;
+
+ matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
+ smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
+
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ return smfs_matcher;
+
+ mutex_lock(&fs_smfs->lock);
+
+ /* Retry with lock, as another thread might have already created the relevant matcher
+ * till we acquired the lock
+ */
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ goto out_unlock;
+
+ // Find next available priority in sorted used list
+ prio = 0;
+ prev = &matchers->used;
+ list_for_each_entry(m, &matchers->used, list) {
+ prev = &m->list;
+
+ if (m->prio == prio)
+ prio = m->prio + 1;
+ else
+ break;
+ }
+
+ tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
+ dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
+ if (IS_ERR(dr_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
+
+ smfs_matcher = ERR_CAST(dr_matcher);
+ goto out_unlock;
+ }
+
+ smfs_matcher->dr_matcher = dr_matcher;
+ smfs_matcher->prio = prio;
+ list_add(&smfs_matcher->list, prev);
+ refcount_set(&smfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_smfs->lock);
+ return smfs_matcher;
+}
+
+static void
+mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
+ return;
+
+ mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
+ list_del(&smfs_matcher->list);
+ mutex_unlock(&fs_smfs->lock);
+}
+
+static int
+mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
+ ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
+ ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
+ fs_smfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
+ return -EOPNOTSUPP;
+ }
+
+ ct_dbg("using smfs steering");
+
+ fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
+ if (!fs_smfs->fwd_action) {
+ return -EINVAL;
+ }
+
+ fs_smfs->ct_tbl = ct_tbl;
+ fs_smfs->ct_nat_tbl = ct_nat_tbl;
+ mutex_init(&fs_smfs->lock);
+ INIT_LIST_HEAD(&fs_smfs->matchers.used);
+ INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
+
+ return 0;
+}
+
+static void
+mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ mlx5_smfs_action_destroy(fs_smfs->fwd_action);
+}
+
+static inline bool
+mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
+{
+#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+ const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
+ const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+static bool
+mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ ct_dbg("rule uses unexpected dissectors (0x%08x)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+ struct mlx5_ct_fs_smfs_rule *smfs_rule;
+ struct mlx5dr_action *actions[5];
+ struct mlx5dr_rule *rule;
+ int num_actions = 0, err;
+ bool nat, tcp, ipv4, gre;
+
+ if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
+ if (!smfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
+ if (!smfs_rule->count_action) {
+ err = -EINVAL;
+ goto err_count;
+ }
+
+ actions[num_actions++] = smfs_rule->count_action;
+ actions[num_actions++] = attr->modify_hdr->action.dr_action;
+ actions[num_actions++] = fs_smfs->fwd_action;
+
+ nat = (attr->ft == fs_smfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
+ if (IS_ERR(smfs_matcher)) {
+ err = PTR_ERR(smfs_matcher);
+ goto err_matcher;
+ }
+
+ rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
+ spec->flow_context.flow_source);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_create;
+ }
+
+ smfs_rule->rule = rule;
+ smfs_rule->smfs_matcher = smfs_matcher;
+
+ return &smfs_rule->fs_rule;
+
+err_create:
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
+err_matcher:
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+err_count:
+ kfree(smfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+ kfree(smfs_rule);
+}
+
+static struct mlx5_ct_fs_ops fs_smfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_smfs_init,
+ .destroy = mlx5_ct_fs_smfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_smfs),
+};
+
+struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return &fs_smfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
new file mode 100644
index 000000000000..be74e1403328
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/math64.h>
+#include "lib/aso.h"
+#include "en/tc/post_act.h"
+#include "meter.h"
+#include "en/tc_priv.h"
+
+#define MLX5_START_COLOR_SHIFT 28
+#define MLX5_METER_MODE_SHIFT 24
+#define MLX5_CBS_EXP_SHIFT 24
+#define MLX5_CBS_MAN_SHIFT 16
+#define MLX5_CIR_EXP_SHIFT 8
+
+/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */
+#define MLX5_CONST_CIR 8000000000ULL
+#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e))
+#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1)
+
+/* cbs = cbs_mantissa*2^cbs_exponent */
+#define MLX5_CALC_CBS(m, e) ((m) << (e))
+#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1)
+#define MLX5_MAX_HW_CBS 0x7FFFFFFF
+
+struct mlx5e_flow_meter_aso_obj {
+ struct list_head entry;
+ int base_id;
+ int total_meters;
+
+ unsigned long meters_map[0]; /* must be at the end of this struct */
+};
+
+struct mlx5e_flow_meters {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_aso *aso;
+ struct mutex aso_lock; /* Protects aso operations */
+ int log_granularity;
+ u32 pdn;
+
+ DECLARE_HASHTABLE(hashtbl, 8);
+
+ struct mutex sync_lock; /* protect flow meter operations */
+ struct list_head partial_list;
+ struct list_head full_list;
+
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_post_act *post_act;
+};
+
+static void
+mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp)
+{
+ s64 _cir, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cir << e;
+ if ((s64)m < 0) /* overflow */
+ break;
+ m = div64_u64(m, MLX5_CONST_CIR);
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cir = MLX5_CALC_CIR(m, e);
+ _delta = cir - _cir;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+static void
+mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp)
+{
+ s64 _cbs, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cbs >> e;
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cbs = MLX5_CALC_CBS(m, e);
+ _delta = cbs - _cbs;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params)
+{
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl;
+ struct mlx5_wqe_aso_data_seg *aso_data;
+ struct mlx5e_flow_meters *flow_meters;
+ u8 cir_man, cir_exp, cbs_man, cbs_exp;
+ struct mlx5_aso_wqe *aso_wqe;
+ unsigned long expires;
+ struct mlx5_aso *aso;
+ u64 rate, burst;
+ u8 ds_cnt;
+ int err;
+
+ rate = meter_params->rate;
+ burst = meter_params->burst;
+
+ /* HW treats each packet as 128 bytes in PPS mode */
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS) {
+ rate <<= 10;
+ burst <<= 7;
+ }
+
+ if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS)
+ return -EINVAL;
+
+ /* HW has limitation of total 31 bits for cbs */
+ if (burst > MLX5_MAX_HW_CBS) {
+ mlx5_core_warn(mdev,
+ "burst(%lld) is too large, use HW allowed value(%d)\n",
+ burst, MLX5_MAX_HW_CBS);
+ burst = MLX5_MAX_HW_CBS;
+ }
+
+ mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode);
+ mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp);
+ mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n",
+ rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man);
+ mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp);
+ mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n",
+ burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man);
+
+ if (!cir_man || !cbs_man)
+ return -EINVAL;
+
+ flow_meters = meter->flow_meters;
+ aso = flow_meters->aso;
+
+ mutex_lock(&flow_meters->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
+
+ aso_ctrl = &aso_wqe->aso_ctrl;
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
+ aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
+ aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6;
+ aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32));
+
+ aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1);
+ memset(aso_data, 0, sizeof(*aso_data));
+ aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */
+ (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT));
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS)
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT);
+ else
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT);
+
+ aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) |
+ (cbs_man << MLX5_CBS_MAN_SHIFT) |
+ (cir_exp << MLX5_CIR_EXP_SHIFT) |
+ cir_man);
+
+ mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
+
+ /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(aso, true);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
+ mutex_unlock(&flow_meters->aso_lock);
+
+ return err;
+}
+
+static int
+mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ void *obj;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+
+ obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
+ MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err) {
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id);
+ }
+
+ return err;
+}
+
+static void
+mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+{
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5_fc *counter;
+ int err, pos, total;
+ u32 id;
+
+ meter = kzalloc(sizeof(*meter), GFP_KERNEL);
+ if (!meter)
+ return ERR_PTR(-ENOMEM);
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_red_counter;
+ }
+ meter->red_counter = counter;
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_green_counter;
+ }
+ meter->green_counter = counter;
+
+ meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
+ struct mlx5e_flow_meter_aso_obj,
+ entry);
+ /* 2 meters in one object */
+ total = 1 << (flow_meters->log_granularity + 1);
+ if (!meters_obj) {
+ err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create flow meter ASO object\n");
+ goto err_create;
+ }
+
+ meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total),
+ GFP_KERNEL);
+ if (!meters_obj) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ meters_obj->base_id = id;
+ meters_obj->total_meters = total;
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ pos = 0;
+ } else {
+ pos = find_first_zero_bit(meters_obj->meters_map, total);
+ if (bitmap_weight(meters_obj->meters_map, total) == total - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->full_list);
+ }
+ }
+
+ bitmap_set(meters_obj->meters_map, pos, 1);
+ meter->flow_meters = flow_meters;
+ meter->meters_obj = meters_obj;
+ meter->obj_id = meters_obj->base_id + pos / 2;
+ meter->idx = pos % 2;
+
+ mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+
+ return meter;
+
+err_mem:
+ mlx5e_flow_meter_destroy_aso_obj(mdev, id);
+err_create:
+ mlx5_fc_destroy(mdev, meter->green_counter);
+err_green_counter:
+ mlx5_fc_destroy(mdev, meter->red_counter);
+err_red_counter:
+ kfree(meter);
+ return ERR_PTR(err);
+}
+
+static void
+__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ int n, pos;
+
+ mlx5_fc_destroy(mdev, meter->green_counter);
+ mlx5_fc_destroy(mdev, meter->red_counter);
+
+ meters_obj = meter->meters_obj;
+ pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
+ bitmap_clear(meters_obj->meters_map, pos, 1);
+ n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters);
+ if (n == 0) {
+ list_del(&meters_obj->entry);
+ mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id);
+ kfree(meters_obj);
+ } else if (n == meters_obj->total_meters - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ }
+
+ mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+ kfree(meter);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
+ if (meter->params.index == index)
+ goto add_ref;
+
+ return ERR_PTR(-ENOENT);
+
+add_ref:
+ meter->refcnt++;
+
+ return meter;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ mutex_unlock(&flow_meters->sync_lock);
+
+ return meter;
+}
+
+static void
+__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ __mlx5e_tc_meter_put(meter);
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+static struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter))
+ return meter;
+
+ hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
+ meter->params.index = params->index;
+ meter->refcnt++;
+
+ return meter;
+}
+
+static int
+__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ int err = 0;
+
+ if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
+ meter->params.burst != params->burst) {
+ err = mlx5e_tc_meter_modify(mdev, meter, params);
+ if (err)
+ goto out;
+
+ meter->params.mode = params->mode;
+ meter->params.rate = params->rate;
+ meter->params.burst = params->burst;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_meters->sync_lock);
+ err = __mlx5e_tc_meter_update(meter, params);
+ mutex_unlock(&flow_meters->sync_lock);
+ return err;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ if (IS_ERR(meter)) {
+ meter = mlx5e_tc_meter_alloc(flow_meters, params);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_get;
+ }
+ }
+
+ err = __mlx5e_tc_meter_update(meter, params);
+ if (err)
+ goto err_update;
+
+ mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ __mlx5e_tc_meter_put(meter);
+err_get:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
+}
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
+{
+ return flow_meters->ns_type;
+}
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (IS_ERR_OR_NULL(post_act)) {
+ netdev_dbg(priv->netdev,
+ "flow meter offload is not supported, post action is missing\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL);
+ if (!flow_meters)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn);
+ if (IS_ERR(flow_meters->aso)) {
+ mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n");
+ err = PTR_ERR(flow_meters->aso);
+ goto err_sq;
+ }
+
+ mutex_init(&flow_meters->sync_lock);
+ INIT_LIST_HEAD(&flow_meters->partial_list);
+ INIT_LIST_HEAD(&flow_meters->full_list);
+
+ flow_meters->ns_type = ns_type;
+ flow_meters->mdev = mdev;
+ flow_meters->post_act = post_act;
+ mutex_init(&flow_meters->aso_lock);
+ flow_meters->log_granularity = min_t(int, 6,
+ MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc));
+
+ return flow_meters;
+
+err_sq:
+ mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
+err_out:
+ kfree(flow_meters);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
+{
+ if (IS_ERR_OR_NULL(flow_meters))
+ return;
+
+ mlx5_aso_destroy(flow_meters->aso);
+ mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
+ kfree(flow_meters);
+}
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
+{
+ u64 bytes1, packets1, lastuse1;
+ u64 bytes2, packets2, lastuse2;
+
+ mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+
+ *bytes = bytes1 + bytes2;
+ *packets = packets1 + packets2;
+ *drops = packets2;
+ *lastuse = max_t(u64, lastuse1, lastuse2);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
new file mode 100644
index 000000000000..6de6e8a16327
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_FLOW_METER_H__
+#define __MLX5_EN_FLOW_METER_H__
+
+struct mlx5e_post_meter_priv;
+struct mlx5e_flow_meter_aso_obj;
+struct mlx5e_flow_meters;
+struct mlx5_flow_attr;
+
+enum mlx5e_flow_meter_mode {
+ MLX5_RATE_LIMIT_BPS,
+ MLX5_RATE_LIMIT_PPS,
+};
+
+struct mlx5e_flow_meter_params {
+ enum mlx5e_flow_meter_mode mode;
+ /* police action index */
+ u32 index;
+ u64 rate;
+ u64 burst;
+};
+
+struct mlx5e_flow_meter_handle {
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ u32 obj_id;
+ u8 idx;
+
+ int refcnt;
+ struct hlist_node hlist;
+ struct mlx5e_flow_meter_params params;
+
+ struct mlx5_fc *green_counter;
+ struct mlx5_fc *red_counter;
+};
+
+struct mlx5e_meter_attr {
+ struct mlx5e_flow_meter_params params;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5e_post_meter_priv *post_meter;
+};
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params);
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params);
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_action);
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+
+#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 31b4e39be2d3..4e48946c4c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
@@ -21,9 +22,9 @@ struct mlx5e_post_act_handle {
u32 id;
};
-#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
-#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
@@ -35,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
@@ -75,21 +76,47 @@ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
kfree(post_act);
}
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Post action rule matches on fte_id and executes original rule's tc rule action */
+ mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
+
+ handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
+ if (IS_ERR(handle->rule)) {
+ err = PTR_ERR(handle->rule);
+ netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
+ goto err_rule;
+ }
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
{
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
- struct mlx5e_post_act_handle *handle = NULL;
- struct mlx5_flow_attr *post_attr = NULL;
- struct mlx5_flow_spec *spec = NULL;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *post_attr;
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
- if (!handle || !spec || !post_attr) {
+ if (!handle || !post_attr) {
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(-ENOMEM);
}
@@ -100,7 +127,8 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->ft = post_act->ft;
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
- post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
@@ -112,36 +140,29 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
if (err)
goto err_xarray;
- /* Post action rule matches on fte_id and executes original rule's
- * tc rule action
- */
- mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
- handle->id, MLX5_POST_ACTION_MASK);
-
- handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
- if (IS_ERR(handle->rule)) {
- err = PTR_ERR(handle->rule);
- netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
- goto err_rule;
- }
handle->attr = post_attr;
- kvfree(spec);
return handle;
-err_rule:
- xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(err);
}
void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
+ handle->rule = NULL;
+}
+
+void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
{
- mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
+ if (!IS_ERR_OR_NULL(handle->rule))
+ mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
index b530ec1981a5..f476774c0b75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
@@ -24,6 +24,14 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
+void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
struct mlx5_flow_table *
mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
new file mode 100644
index 000000000000..8b77e822810e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en/tc_priv.h"
+#include "post_meter.h"
+#include "en/tc/post_act.h"
+
+#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
+#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
+
+struct mlx5e_post_meter_priv {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *fwd_green_rule;
+ struct mlx5_flow_handle *drop_red_rule;
+};
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->ft;
+}
+
+static int
+mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+
+ root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
+ if (!root_ns) {
+ mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(post_meter->ft)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ return PTR_ERR(post_meter->ft);
+ }
+
+ return 0;
+}
+
+static int
+mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *misc2, *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
+ if (IS_ERR(post_meter->fg)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
+ err = PTR_ERR(post_meter->fg);
+ }
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter_id = mlx5_fc_id(red_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ err = PTR_ERR(rule);
+ goto err_red;
+ }
+ post_meter->drop_red_rule = rule;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(green_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ err = PTR_ERR(rule);
+ goto err_green;
+ }
+ post_meter->fwd_green_rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_green:
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+err_red:
+ kvfree(spec);
+ return err;
+}
+
+static void
+mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->fwd_green_rule);
+}
+
+static void
+mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_group(post_meter->fg);
+}
+
+static void
+mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->ft);
+}
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5e_post_meter_priv *post_meter;
+ int err;
+
+ post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL);
+ if (!post_meter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
+ if (err)
+ goto err_ft;
+
+ err = mlx5e_post_meter_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
+ red_counter);
+ if (err)
+ goto err_rules;
+
+ return post_meter;
+
+err_rules:
+ mlx5e_post_meter_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_table_destroy(post_meter);
+err_ft:
+ kfree(post_meter);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_fg_destroy(post_meter);
+ mlx5e_post_meter_table_destroy(post_meter);
+ kfree(post_meter);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
new file mode 100644
index 000000000000..34d0e4b9fc7a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_POST_METER_H__
+#define __MLX5_EN_POST_METER_H__
+
+#define packet_color_to_reg { \
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \
+ .moffset = 0, \
+ .mlen = 8, \
+ .soffset = MLX5_BYTE_OFF(fte_match_param, \
+ misc_parameters_2.metadata_reg_c_5), \
+}
+
+struct mlx5e_post_meter_priv;
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter);
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+
+#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index ff4b4f8a5a9d..1cbd2eb9d04f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/tc/act/sample.h"
#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
@@ -46,14 +47,12 @@ struct mlx5e_sample_flow {
struct mlx5_flow_handle *pre_rule;
struct mlx5_flow_attr *post_attr;
struct mlx5_flow_handle *post_rule;
- struct mlx5e_post_act_handle *post_act_handle;
};
struct mlx5e_sample_restore {
struct hlist_node hlist;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *rule;
- struct mlx5e_post_act_handle *post_act_handle;
u32 obj_id;
int count;
};
@@ -94,6 +93,7 @@ sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.vport.num = esw->manager_vport;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
if (IS_ERR(tc_psample->termtbl_rule)) {
err = PTR_ERR(tc_psample->termtbl_rule);
@@ -231,69 +231,46 @@ sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
*/
static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
- struct mlx5e_post_act_handle *handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_modify_hdr *modify_hdr;
int err;
- err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
CHAIN_TO_REG, obj_id);
if (err)
goto err_set_regc0;
- if (handle) {
- err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
- if (err)
- goto err_post_act;
- }
-
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_acts.num_actions,
- mod_acts.actions);
+ mod_acts->num_actions,
+ mod_acts->actions);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
}
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
return modify_hdr;
err_modify_hdr:
-err_post_act:
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
-static u32
-restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
-{
- return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
-}
-
-static bool
-restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
-{
- return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
-}
-
static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5_eswitch *esw = tc_psample->esw;
struct mlx5_core_dev *mdev = esw->dev;
struct mlx5e_sample_restore *restore;
struct mlx5_modify_hdr *modify_hdr;
- u32 hash_key;
int err;
mutex_lock(&tc_psample->restore_lock);
- hash_key = restore_hash(obj_id, post_act_handle);
- hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
- if (restore_equal(restore, obj_id, post_act_handle))
+ hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
goto add_ref;
restore = kzalloc(sizeof(*restore), GFP_KERNEL);
@@ -302,9 +279,8 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_alloc;
}
restore->obj_id = obj_id;
- restore->post_act_handle = post_act_handle;
- modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
+ modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
@@ -317,7 +293,7 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_restore;
}
- hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
+ hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
add_ref:
restore->count++;
mutex_unlock(&tc_psample->restore_lock);
@@ -403,7 +379,7 @@ add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
post_attr->chain = 0;
post_attr->prio = 0;
post_attr->ft = default_tbl;
- post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ post_attr->flags = MLX5_ATTR_FLAG_NO_IN_PORT;
/* When offloading sample and encap action, if there is no valid
* neigh data struct, a slow path rule is offloaded first. Source
@@ -492,16 +468,16 @@ del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_post_act_handle *post_act_handle = NULL;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_esw_flow_attr *pre_esw_attr;
struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr;
+ u32 tunnel_id = attr->tunnel_id;
struct mlx5_eswitch *esw;
u32 default_tbl_id;
u32 obj_id;
@@ -513,7 +489,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
if (!sample_flow)
return ERR_PTR(-ENOMEM);
- sample_attr = attr->sample_attr;
+ sample_attr = &attr->sample_attr;
sample_attr->sample_flow = sample_flow;
/* For NICs with reg_c_preserve support or decap action, use
@@ -522,18 +498,11 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* original flow table.
*/
esw = tc_psample->esw;
- if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
- attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
struct mlx5_flow_table *ft;
ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
default_tbl_id = ft->id;
- post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
- if (IS_ERR(post_act_handle)) {
- err = PTR_ERR(post_act_handle);
- goto err_post_act;
- }
- sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
if (err)
@@ -546,6 +515,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
err = PTR_ERR(sample_flow->sampler);
goto err_sampler;
}
+ sample_attr->sampler_id = sample_flow->sampler->sampler_id;
/* Create an id mapping reg_c0 value to sample object. */
restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
@@ -559,7 +529,8 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_attr->restore_obj_id = obj_id;
/* Create sample restore context. */
- sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+ sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
if (IS_ERR(sample_flow->restore)) {
err = PTR_ERR(sample_flow->restore);
goto err_sample_restore;
@@ -580,13 +551,13 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (tunnel_id)
pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
- pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
pre_attr->inner_match_level = attr->inner_match_level;
pre_attr->outer_match_level = attr->outer_match_level;
pre_attr->chain = attr->chain;
pre_attr->prio = attr->prio;
- pre_attr->sample_attr = attr->sample_attr;
- sample_attr->sampler_id = sample_flow->sampler->sampler_id;
+ pre_attr->ft = attr->ft;
+ pre_attr->sample_attr = *sample_attr;
pre_esw_attr = pre_attr->esw_attr;
pre_esw_attr->in_mdev = esw_attr->in_mdev;
pre_esw_attr->in_rep = esw_attr->in_rep;
@@ -611,9 +582,6 @@ err_sampler:
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
- if (post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
-err_post_act:
kfree(sample_flow);
return ERR_PTR(err);
}
@@ -633,15 +601,13 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
* will hit fw syndromes.
*/
esw = tc_psample->esw;
- sample_flow = attr->sample_attr->sample_flow;
+ sample_flow = attr->sample_attr.sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
sample_restore_put(tc_psample, sample_flow->restore);
- mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- else
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index 9ef8a49d7801..a569367eae4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -26,8 +26,7 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id);
+ struct mlx5_flow_attr *attr);
void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
@@ -45,8 +44,7 @@ mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
static inline struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 4a0d38d219ed..864ce0c393e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -15,18 +15,20 @@
#include <linux/refcount.h>
#include <linux/xarray.h>
#include <linux/if_macvlan.h>
+#include <linux/debugfs.h>
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+#include "en/tc_priv.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en/tc/post_act.h"
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
+#include "fs_core.h"
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
@@ -34,8 +36,8 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
-#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
+#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
/* Statically allocate modify actions for
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
@@ -46,6 +48,15 @@
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
+struct mlx5_tc_ct_debugfs {
+ struct {
+ atomic_t offloaded;
+ atomic_t rx_dropped;
+ } stats;
+
+ struct dentry *root;
+};
+
struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
const struct net_device *netdev;
@@ -62,19 +73,23 @@ struct mlx5_tc_ct_priv {
struct mapping_ctx *labels_mapping;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_fs_chains *chains;
+ struct mlx5_ct_fs *fs;
+ struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
+ struct workqueue_struct *wq;
+
+ struct mlx5_tc_ct_debugfs debugfs;
};
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
- struct mlx5_flow_handle *rule;
+ struct mlx5_ct_fs_rule *rule;
struct mlx5e_mod_hdr_handle *mh;
struct mlx5_flow_attr *attr;
bool nat;
@@ -258,7 +273,8 @@ mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
return -EOPNOTSUPP;
}
} else {
- return -EOPNOTSUPP;
+ if (tuple->ip_proto != IPPROTO_GRE)
+ return -EOPNOTSUPP;
}
return 0;
@@ -505,7 +521,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
@@ -517,6 +533,8 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+
+ atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
static struct flow_action_entry *
@@ -579,6 +597,12 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return 0;
}
+int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
+}
+
static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
char *modact)
@@ -692,7 +716,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
- u8 zone_restore_id, bool nat)
+ u8 zone_restore_id, bool nat_table, bool has_nat)
{
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
@@ -708,11 +732,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
- if (nat) {
- err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
- &mod_acts);
- if (err)
- goto err_mapping;
+ if (nat_table) {
+ if (has_nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
ct_state |= MLX5_CT_STATE_NAT_BIT;
}
@@ -727,7 +752,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- if (nat) {
+ if (nat_table && has_nat) {
attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
mod_acts.num_actions,
mod_acts.actions);
@@ -795,7 +820,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
- zone_restore_id, nat);
+ zone_restore_id,
+ nat,
+ mlx5_tc_ct_entry_has_nat(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -807,16 +834,20 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_chain = 0;
attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- attr->outer_match_level = MLX5_MATCH_L4;
+ if (entry->tuple.ip_proto == IPPROTO_TCP ||
+ entry->tuple.ip_proto == IPPROTO_UDP)
+ attr->outer_match_level = MLX5_MATCH_L4;
+ else
+ attr->outer_match_level = MLX5_MATCH_L3;
attr->counter = entry->counter->counter;
- attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
@@ -911,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
{
- struct mlx5e_priv *priv;
-
if (!refcount_dec_and_test(&entry->refcnt))
return;
- priv = netdev_priv(entry->ct_priv->netdev);
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
- queue_work(priv->wq, &entry->work);
+ queue_work(entry->ct_priv->wq, &entry->work);
}
static struct mlx5_ct_counter *
@@ -1027,6 +1055,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_nat;
+ atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
@@ -1154,7 +1183,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
}
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
- mlx5_tc_ct_entry_remove_from_tuples(entry);
spin_unlock_bh(&ct_priv->ht_lock);
mlx5_tc_ct_entry_put(entry);
@@ -1224,16 +1252,20 @@ mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
struct flow_keys flow_keys;
skb_reset_network_header(skb);
- skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+ skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
tuple->zone = zone;
if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
- flow_keys.basic.ip_proto != IPPROTO_UDP)
+ flow_keys.basic.ip_proto != IPPROTO_UDP &&
+ flow_keys.basic.ip_proto != IPPROTO_GRE)
return false;
- tuple->port.src = flow_keys.ports.src;
- tuple->port.dst = flow_keys.ports.dst;
+ if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
+ flow_keys.basic.ip_proto == IPPROTO_UDP) {
+ tuple->port.src = flow_keys.ports.src;
+ tuple->port.dst = flow_keys.ports.dst;
+ }
tuple->n_proto = flow_keys.basic.n_proto;
tuple->ip_proto = flow_keys.basic.ip_proto;
@@ -1400,9 +1432,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
- bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
- int err;
-
if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
@@ -1413,17 +1442,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
- if (!clear_action)
- goto out;
-
- err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
- return err;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
-out:
return 0;
}
@@ -1742,6 +1760,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
if (!refcount_dec_and_test(&ft->refcount))
return;
+ flush_workqueue(ct_priv->wq);
nf_flow_table_offload_del_cb(ft->nf_ft,
mlx5_tc_ct_block_flow_offload, ft);
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
@@ -1756,7 +1775,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + ft prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1766,7 +1785,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* v
* +---------------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
+ * + zone+nat match +---------------->+ post_act (see below) +
* +---------------------+ set zone +-------------------------+
* | set zone
* v
@@ -1781,21 +1800,19 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
- * + post_act + original filter actions
+ * + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
struct mlx5_flow_attr *attr)
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
@@ -1804,7 +1821,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
- kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
@@ -1818,14 +1834,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->ft = ft;
- handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- ct_dbg("Failed to allocate post action handle");
- goto err_post_act_handle;
- }
- ct_flow->post_act_handle = handle;
-
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
@@ -1835,6 +1843,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
+ pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1853,30 +1862,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
- if (err) {
- ct_dbg("Failed to set post action handle");
- goto err_mapping;
- }
-
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
- u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
-
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
- tun_id);
+ attr->tunnel_id);
if (err) {
ct_dbg("Failed to set tunnel register mapping");
goto err_mapping;
@@ -1884,8 +1885,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- pre_mod_acts.num_actions,
- pre_mod_acts.actions);
+ pre_mod_acts->num_actions,
+ pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
@@ -1905,20 +1906,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
- mlx5e_tc_post_act_del(ct_priv->post_act, handle);
-err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
@@ -1926,87 +1925,19 @@ err_ft:
return ERR_PTR(err);
}
-static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_flow_spec *orig_spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts)
-{
- struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
- struct mlx5_flow_handle *rule;
- struct mlx5_ct_flow *ct_flow;
- int err;
-
- ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
- if (!ct_flow)
- return ERR_PTR(-ENOMEM);
-
- /* Base esw attributes on original rule attribute */
- pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
- if (!pre_ct_attr) {
- err = -ENOMEM;
- goto err_attr;
- }
-
- memcpy(pre_ct_attr, attr, attr_sz);
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- mod_acts->num_actions,
- mod_acts->actions);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- ct_dbg("Failed to add create ct clear mod hdr");
- goto err_mod_hdr;
- }
-
- pre_ct_attr->modify_hdr = mod_hdr;
-
- rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add ct clear rule");
- goto err_insert;
- }
-
- attr->ct_attr.ct_flow = ct_flow;
- ct_flow->pre_ct_attr = pre_ct_attr;
- ct_flow->pre_ct_rule = rule;
- return rule;
-
-err_insert:
- mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
-err_mod_hdr:
- netdev_warn(priv->netdev,
- "Failed to offload ct clear flow, err %d\n", err);
- kfree(pre_ct_attr);
-err_attr:
- kfree(ct_flow);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_flow_handle *rule;
if (!priv)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&priv->control_lock);
-
- if (clear_action)
- rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
- else
- rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
+ rule = __mlx5_tc_ct_flow_offload(priv, spec, attr);
mutex_unlock(&priv->control_lock);
return rule;
@@ -2014,21 +1945,17 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_ct_flow *ct_flow)
+ struct mlx5_ct_flow *ct_flow,
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
- pre_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- if (ct_flow->post_act_handle) {
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
- mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
- }
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
@@ -2036,7 +1963,6 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
@@ -2048,11 +1974,43 @@ mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
return;
mutex_lock(&priv->control_lock);
- __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ __mlx5_tc_ct_delete_flow(priv, ct_flow, attr);
mutex_unlock(&priv->control_lock);
}
static int
+mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
+ struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
+ int err;
+
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
+ if (!ct_priv->fs)
+ return -ENOMEM;
+
+ ct_priv->fs->netdev = ct_priv->netdev;
+ ct_priv->fs->dev = ct_priv->dev;
+ ct_priv->fs_ops = fs_ops;
+
+ err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
+ if (err)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ kfree(ct_priv->fs);
+ return err;
+}
+
+static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
const char **err_msg)
{
@@ -2104,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
*/
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing";
err = -EOPNOTSUPP;
goto out_err;
@@ -2119,6 +2077,29 @@ out_err:
return err;
}
+static void
+mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
+{
+ bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
+ struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
+ char dirname[16] = {};
+
+ if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
+ return;
+
+ ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
+ debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
+ &ct_dbgfs->stats.offloaded);
+ debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
+ &ct_dbgfs->stats.rx_dropped);
+}
+
+static void
+mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
+{
+ debugfs_remove_recursive(ct_priv->debugfs.root);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2190,8 +2171,23 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
goto err_ct_tuples_nat_ht;
+ ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
+ if (!ct_priv->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ err = mlx5_tc_ct_fs_init(ct_priv);
+ if (err)
+ goto err_init_fs;
+
+ mlx5_ct_tc_create_dbgfs(ct_priv);
return ct_priv;
+err_init_fs:
+ destroy_workqueue(ct_priv->wq);
+err_wq:
+ rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
rhashtable_destroy(&ct_priv->ct_tuples_ht);
err_ct_tuples_ht:
@@ -2220,8 +2216,13 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
if (!ct_priv)
return;
+ destroy_workqueue(ct_priv->wq);
+ mlx5_ct_tc_remove_dbgfs(ct_priv);
chains = ct_priv->chains;
+ ct_priv->fs_ops->destroy(ct_priv->fs);
+ kfree(ct_priv->fs);
+
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
@@ -2246,22 +2247,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return true;
if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
- return false;
+ goto out_inc_drop;
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
- return false;
+ goto out_inc_drop;
spin_lock(&ct_priv->ht_lock);
entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
if (!entry) {
spin_unlock(&ct_priv->ht_lock);
- return false;
+ goto out_inc_drop;
}
if (IS_ERR(entry)) {
spin_unlock(&ct_priv->ht_lock);
- return false;
+ goto out_inc_drop;
}
spin_unlock(&ct_priv->ht_lock);
@@ -2269,4 +2270,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
__mlx5_tc_ct_entry_put(entry);
return true;
+
+out_inc_drop:
+ atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 99662af1e41a..5bbd6b92840f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -62,10 +62,11 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_4),\
}
+/* 8 LSB of metadata C5 are reserved for packet color */
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
- .moffset = 0,\
- .mlen = 32,\
+ .moffset = 8,\
+ .mlen = 24,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -84,8 +85,8 @@ struct mlx5_ct_attr {
.mlen = ESW_ZONE_ID_BITS,\
}
-#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
-#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG)
+#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
@@ -116,19 +117,21 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+int
+mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -171,6 +174,13 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
}
static inline int
+mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
@@ -183,7 +193,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
static inline struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
@@ -193,7 +202,6 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static inline void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index f832c26ff2c3..2e42d7c5451e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
-#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
@@ -35,14 +34,17 @@ enum {
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
- struct ethhdr eth;
struct mlx5e_tc_act_parse_state parse_state;
};
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
+
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
@@ -94,6 +96,7 @@ struct mlx5e_tc_flow {
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+ struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
@@ -107,10 +110,21 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
struct completion del_hw_done;
- int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
+ struct list_head attrs;
+ u32 chain_mapping;
};
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
struct mlx5_flow_handle *
@@ -119,6 +133,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
+
+void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
+int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
+
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
@@ -173,6 +193,7 @@ struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec);
+
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
@@ -185,7 +206,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
+
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 33815246fead..e6f64d890fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
+#include <net/inet_ecn.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -235,7 +236,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -350,7 +351,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -505,7 +506,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int err;
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -619,7 +620,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -712,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_int_port *int_port;
TC_TUN_ROUTE_ATTR_INIT(attr);
u16 vport_num;
@@ -746,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num;
- } else if (netif_is_ovs_master(attr.route_dev)) {
+ } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
attr.route_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9918ed8c059b..5aff97914367 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ continue;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -488,12 +500,17 @@ static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
int out_index);
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index)
{
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (flow->attr->esw_attr->dests[out_index].flags &
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
@@ -733,6 +750,7 @@ static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -740,6 +758,7 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
@@ -748,8 +767,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
+ const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
@@ -760,6 +779,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
+ mpls_info = &parse_attr->mpls_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@@ -810,6 +830,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err_init;
}
e->tun_info = tun_info;
+ memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err_init;
@@ -834,8 +855,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
e->compl_result = 1;
attach_flow:
- err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before,
- out_index);
+ err = mlx5e_attach_encap_route(priv, flow, attr, e, entry_created,
+ tbl_time_before, out_index);
if (err)
goto out_err;
@@ -885,20 +906,18 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_pkt_reformat_params reformat_params;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = flow->attr->parse_attr;
- if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ if (sizeof(attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
return -EOPNOTSUPP;
}
- key.key = parse_attr->eth;
+ key.key = attr->eth;
hash_key = hash_decap_info(&key);
mutex_lock(&esw->offloads.decap_tbl_lock);
d = mlx5e_decap_get(priv, &key, hash_key);
@@ -928,8 +947,8 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
- reformat_params.size = sizeof(parse_attr->eth);
- reformat_params.data = &parse_attr->eth;
+ reformat_params.size = sizeof(attr->eth);
+ reformat_params.data = &attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
@@ -1198,6 +1217,7 @@ out:
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -1206,7 +1226,6 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned long tbl_time_after = tbl_time_before;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_route_entry *r;
@@ -1357,17 +1376,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
- spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
@@ -1377,7 +1398,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue;
}
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err);
@@ -1389,9 +1410,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ goto offload_to_slow_path;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index 3391504d9a08..d542b8476491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -7,15 +7,19 @@
#include "tc_priv.h"
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index);
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
+
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 60952b33b568..c5b1617d556f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
- u32 tun_id;
- tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
- *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+ *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
return 0;
}
@@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters_2);
- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters_2);
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
- return 0;
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
- return 0;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- if (!enc_keyid.mask->keyid)
- return 0;
-
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return -EOPNOTSUPP;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index da169b816665..d4239e3b3c88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
break;
- case MLX5E_PACKET_MERGE_SHAMPO:
- MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index a55b066746cb..201ac7dd338f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
bool busy = false;
int work_done = 0;
+ rcu_read_lock();
+
ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
busy |= rq->post_wqes(rq);
- if (busy)
- return budget;
+ if (busy) {
+ work_done = budget;
+ goto out;
+ }
if (unlikely(!napi_complete_done(napi, work_done)))
- return work_done;
+ goto out;
mlx5e_cq_arm(&rq->cq);
+
+out:
+ rcu_read_unlock();
return work_done;
}
@@ -140,7 +147,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -172,6 +179,7 @@ static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
napi_enable(&trap->napi);
mlx5e_activate_rq(&trap->rq);
+ mlx5e_trigger_napi_sched(&trap->napi);
}
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
@@ -222,12 +230,12 @@ static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_vlan_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_mac_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
@@ -248,10 +256,10 @@ static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
{
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(priv->fs);
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- mlx5e_remove_mac_trap(priv);
+ mlx5e_remove_mac_trap(priv->fs);
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 4cdf8e5b24c2..853f312cd757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -9,20 +9,28 @@
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
+#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+
+/* IPSEC inline data includes:
+ * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
+ * next header.
+ * 2. ESP authentication data: 16 bytes for ICV.
*/
-#if L1_CACHE_BYTES < 128
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
+ 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
-#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
+ * encapsulations.
+ */
+#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
+ MLX5_SEND_WQE_DS)
-#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
+#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
+ MLX5E_MAX_TX_INLINE_DS + \
+ MLX5E_MAX_TX_IPSEC_DS + \
+ MAX_SKB_FRAGS + 1, \
+ MLX5_SEND_WQEBB_NUM_DS)
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -57,10 +65,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
@@ -68,13 +74,17 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
/* TX */
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
static inline bool
+mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
+{
+ return (*fifo->pc - *fifo->cc) < fifo->mask;
+}
+
+static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
@@ -167,6 +177,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+}
+
struct mlx5e_shampo_umr {
u16 len;
};
@@ -303,9 +318,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
- return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+ return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
@@ -426,9 +441,11 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
-static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
+#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
+
+static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
- BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+ WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
@@ -438,19 +455,36 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
* stop room of X-1 + X.
* WQE size is also limited by the hardware limit.
*/
+ WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
- if (__builtin_constant_p(wqe_size))
- BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- else
- WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
+ return MLX5E_STOP_ROOM(wqe_size);
+}
- return wqe_size * 2 - 1;
+static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
+{
+ return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
+}
+
+static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
+{
+ u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+
+ return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
}
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
- u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+ u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
+
+static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
+{
+ size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe);
+
+ return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 338d65e2c9ce..20507ef2f956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -57,12 +57,14 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di, struct xdp_buff *xdp)
+ struct page *page, struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
+ int i;
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -96,46 +98,77 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd.dma_addr = dma_addr;
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
- } else {
- /* Driver assumes that xdp_convert_buff_to_frame returns
- * an xdp_frame that points to the same memory region as
- * the original xdp_buff. It allows to map the memory only
- * once and to use the DMA_BIDIRECTIONAL mode.
- */
- xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+ return false;
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ return true;
+ }
+
+ /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
+ * that points to the same memory region as the original xdp_buff. It
+ * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
+ * mode.
+ */
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ xdpi.page.rq = rq;
+
+ dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
- dma_addr = di->addr + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
- DMA_TO_DEVICE);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 len;
- xdptxd.dma_addr = dma_addr;
- xdpi.page.rq = rq;
- xdpi.page.di = *di;
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ len = skb_frag_size(frag);
+ dma_sync_single_for_device(sq->pdev, addr, len,
+ DMA_BIDIRECTIONAL);
+ }
}
- return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ xdptxd.dma_addr = dma_addr;
+
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+ return false;
+
+ xdpi.page.page = page;
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ xdpi.page.page = skb_frag_page(frag);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ }
+ }
+
+ return true;
}
/* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
{
- struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
u32 act;
int err;
- if (!prog)
- return false;
-
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- *len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -147,7 +180,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, di);
+ mlx5e_page_dma_unmap(rq, page);
rq->stats->xdp_redirect++;
return true;
default:
@@ -199,7 +232,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -245,10 +278,8 @@ enum {
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
- const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- stop_room))) {
+ sq->stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -262,12 +293,26 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ struct skb_shared_info *sinfo, int check_result);
+
+INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ if (unlikely(sinfo)) {
+ /* MPWQE is enabled, but a multi-buffer packet is queued for
+ * transmission. MPWQE can't send fragmented packets, so close
+ * the current session and fall back to a regular WQE.
+ */
+ if (unlikely(sq->mpwqe.wqe))
+ mlx5e_xdp_mpwqe_complete(sq);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+ }
+
if (unlikely(xdptxd->len > sq->hw_mtu)) {
stats->err++;
return false;
@@ -288,17 +333,16 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
-INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
{
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -308,43 +352,76 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK;
}
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+{
+ return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
+}
+
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg = wqe->data;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5e_tx_wqe *wqe;
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
+ u16 ds_cnt, inline_hdr_sz;
+ u8 num_wqebbs = 1;
+ int num_frags = 0;
+ u16 pi;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- net_prefetchw(wqe);
-
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
return false;
}
- if (!check_result)
- check_result = mlx5e_xmit_xdp_frame_check(sq);
+ ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
+ ds_cnt++;
+
+ /* check_result must be 0 if sinfo is passed. */
+ if (!check_result) {
+ int stop_room = 1;
+
+ if (unlikely(sinfo)) {
+ ds_cnt += sinfo->nr_frags;
+ num_frags = sinfo->nr_frags;
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
+ * enough to hold all fragments.
+ */
+ stop_room = MLX5E_STOP_ROOM(num_wqebbs);
+ }
+
+ check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
+ }
if (unlikely(check_result < 0))
return false;
- cseg->fm_ce_se = 0;
+ pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ net_prefetchw(wqe);
+
+ cseg = &wqe->ctrl;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ inline_hdr_sz = 0;
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
+ memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
+ MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
dseg++;
}
@@ -354,11 +431,45 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- sq->pc++;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
+ u8 num_pkts = 1 + num_frags;
+ int i;
+
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ dseg->lkey = sq->mkey_be;
+
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+
+ dseg++;
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ }
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = num_pkts,
+ };
+
+ sq->pc += num_wqebbs;
+ } else {
+ cseg->fm_ce_se = 0;
+
+ sq->pc++;
+ }
sq->doorbell_cseg = cseg;
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
@@ -384,7 +495,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
- mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
+ mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
break;
case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */
@@ -537,12 +648,13 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.dma_addr = xdptxd.dma_addr;
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
break;
}
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
nxmit++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8d991c3b7a50..bc2d9034af5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,7 +38,6 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
@@ -47,8 +46,8 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -59,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
@@ -123,12 +122,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- MLX5E_TX_MPW_MAX_NUM_DS;
- return mlx5e_tx_mpwqe_is_full(session);
+ max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+
+ return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 279cd8f4e79f..ebada0c5af3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -72,6 +72,7 @@ void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *x
{
xsk->headroom = xsk_pool_get_headroom(pool);
xsk->chunk_size = xsk_pool_get_chunk_size(pool);
+ xsk->unaligned = pool->unaligned;
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
@@ -98,6 +99,15 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
mlx5e_build_xsk_param(pool, &xsk);
+ if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
+ const char *recommendation = is_power_of_2(xsk.chunk_size) ?
+ "Upgrade firmware" : "Disable striding RQ";
+
+ mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
+ xsk.chunk_size, recommendation);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
goto validate_closed;
@@ -117,20 +127,18 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
goto err_remove_pool;
mlx5e_activate_xsk(c);
+ mlx5e_trigger_napi_icosq(c);
/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
* any Fill Ring entries at the setup stage.
*/
- err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
- if (unlikely(err))
- goto err_deactivate;
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
- return 0;
+ mlx5e_deactivate_rq(&c->rq);
+ mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
-err_deactivate:
- mlx5e_deactivate_xsk(c);
- mlx5e_close_xsk(c);
+ return 0;
err_remove_pool:
mlx5e_xsk_remove_pool(&priv->xsk, ix);
@@ -169,7 +177,13 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
- mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
+
+ mlx5e_activate_rq(&c->rq);
+ mlx5e_trigger_napi_icosq(c);
+ mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
+
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
+
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
@@ -207,11 +221,10 @@ int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
- u16 ix;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
- mlx5e_xsk_disable_pool(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
+ mlx5e_xsk_disable_pool(priv, qid);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 8e7b877d8a12..c91b54d9ff27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -4,21 +4,225 @@
#include "rx.h"
#include "en/xdp.h"
#include <net/xdp_sock_drv.h>
+#include <linux/filter.h>
/* RX data path */
-static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
- u32 cqe_bcnt)
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5_wq_cyc *wq = &icosq->wq;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int batch, i;
+ u32 offset; /* 17-bit value with MTT. */
+ u16 pi;
+
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
+ goto err;
+
+ BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
+ rq->mpwqe.pages_per_wqe);
+
+ /* If batch < pages_per_wqe, either:
+ * 1. Some (or all) descriptors were invalid.
+ * 2. dma_need_sync is true, and it fell back to allocating one frame.
+ * In either case, try to continue allocating frames one by one, until
+ * the first error, which will mean there are no more valid descriptors.
+ */
+ for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
+ wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!wi->alloc_units[batch].xsk))
+ goto err_reuse_batch;
+ }
+
+ pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
+ }
+ } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ }
+ } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
+ u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 1] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size * 2),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 3] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ };
+ }
+ } else {
+ __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
+ rq->xsk_pool->chunk_size);
+ __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ .bcount = frame_size,
+ };
+ umr_wqe->inline_klms[(i << 1) + 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ .bcount = pad_size,
+ };
+ }
+ }
+
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
+ wi->consumed_strides = 0;
+
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
+
+ /* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
+ offset = ix * rq->mpwqe.mtts_per_wqe;
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ offset = offset * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED))
+ offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
+ offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+
+ icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
+ .umr.rq = rq,
+ };
+
+ icosq->pc += rq->mpwqe.umr_wqebbs;
+
+ icosq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+err_reuse_batch:
+ while (--batch >= 0)
+ xsk_buff_free(wi->alloc_units[batch].xsk);
+
+err:
+ rq->stats->buff_alloc_err++;
+ return -ENOMEM;
+}
+
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct xdp_buff **buffs;
+ u32 contig, alloc;
+ int i;
+
+ /* mlx5e_init_frags_partition creates a 1:1 mapping between
+ * rq->wqe.frags and rq->wqe.alloc_units, which allows us to
+ * allocate XDP buffers straight into alloc_units.
+ */
+ BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) !=
+ sizeof(rq->wqe.alloc_units[0].xsk));
+ buffs = (struct xdp_buff **)rq->wqe.alloc_units;
+ contig = mlx5_wq_cyc_get_size(wq) - ix;
+ if (wqe_bulk <= contig) {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
+ } else {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig);
+ if (likely(alloc == contig))
+ alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig);
+ }
+
+ for (i = 0; i < alloc; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return alloc;
+}
+
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ int i;
+
+ for (i = 0; i < wqe_bulk; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!frag->au->xsk))
+ return i;
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return wqe_bulk;
+}
+
+static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
+{
+ u32 totallen = xdp->data_end - xdp->data_meta;
+ u32 metalen = xdp->data - xdp->data_meta;
struct sk_buff *skb;
- skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
+ skb = napi_alloc_skb(rq->cq.napi, totallen);
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
return NULL;
}
- skb_put_data(skb, data, cqe_bcnt);
+ skb_put_data(skb, xdp->data_meta, totallen);
+
+ if (metalen) {
+ skb_metadata_set(skb, metalen);
+ __skb_pull(skb, metalen);
+ }
return skb;
}
@@ -29,8 +233,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
- u32 cqe_bcnt32 = cqe_bcnt;
+ struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
+ struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -45,8 +249,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xdp->data_end = xdp->data + cqe_bcnt32;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -65,7 +268,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
* allocated first from the Reuse Ring, so it has enough space.
*/
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -74,15 +278,15 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->di->xsk;
+ struct xdp_buff *xdp = wi->au->xsk;
+ struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
* DMA address point directly to the necessary place. Furthermore, the
@@ -91,22 +295,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xdp->data_end = xdp->data + cqe_bcnt;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
- * will be handled by mlx5e_put_rx_frag.
+ * will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 7f88ccf67fdd..087c943bd8e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,48 +5,19 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
/* RX data path */
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
- if (!dma_info->xsk)
- return -ENOMEM;
-
- /* Store the DMA address without headroom. In striding RQ case, we just
- * provide pages for UMR, and headroom is counted at the setup stage
- * when creating a WQE. In non-striding RQ case, headroom is accounted
- * in mlx5e_alloc_rx_wqe.
- */
- dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
-
- return 0;
-}
-
-static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
-{
- if (!xsk_uses_need_wakeup(rq->xsk_pool))
- return alloc_err;
-
- if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rq->xsk_pool);
-
- return false;
-}
-
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 25eac9e20342..ff03c43833bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -5,24 +5,19 @@
#include "en/params.h"
#include "en/txrx.h"
#include "en/health.h"
+#include <net/xdp_sock_drv.h>
-/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
- * change unexpectedly, and mlx5e has a minimum valid stride size for striding
- * RQ, keep this check in the driver.
+/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
+ * stride size of striding RQ.
*/
-#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
+#define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE ||
- xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
- return false;
-
- /* Current MTU and XSK headroom don't allow packets to fit the frames. */
- if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear
@@ -30,9 +25,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
*/
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return mlx5e_rx_is_linear_skb(params, xsk);
+ return mlx5e_rx_is_linear_skb(mdev, params, xsk);
}
}
@@ -43,7 +38,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
@@ -64,13 +59,14 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->clock = &mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
+ rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = pool;
rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
- rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
+ rq_xdp_ix = c->ix;
err = mlx5e_rq_set_handlers(rq, params, xsk);
if (err)
return err;
@@ -158,7 +154,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
@@ -179,10 +175,6 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
mlx5e_reporter_icosq_resume_recovery(c);
/* TX queue is created active. */
-
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 8e96260fce1d..367a9505ca4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -12,18 +12,14 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_channel *c;
- u16 ix;
if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- c = priv->channels.c[ix];
-
- if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
- return -ENXIO;
+ c = priv->channels.c[qid];
if (!napi_if_scheduled_mark_missed(&c->napi)) {
/* To avoid WQE overrun, don't post a NOP if async_icosq is not
@@ -36,9 +32,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_napi_icosq(c);
}
return 0;
@@ -103,12 +97,15 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+ check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xsk_tx_post_err(sq, &xdpi);
+ } else {
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index a05085035f23..9c505158b975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,7 +5,6 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
/* TX data path */
@@ -13,15 +12,4 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
-static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
-{
- if (!xsk_uses_need_wakeup(sq->xsk_pool))
- return;
-
- if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->xsk_pool);
- else
- xsk_set_tx_need_wakeup(sq->xsk_pool);
-}
-
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index d964665eaa63..07187028f0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -37,8 +37,9 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ktls_txrx.h"
+#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
@@ -124,8 +125,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */
- if (mlx5e_tls_skb_offloaded(skb))
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ if (mlx5e_ktls_skb_offloaded(skb))
+ if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
+ &state->tls)))
return false;
#endif
@@ -136,16 +138,16 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
- return true;
-}
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
+ struct mlx5e_priv *priv = netdev_priv(dev);
-static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
-{
-#ifdef CONFIG_MLX5_EN_IPSEC
- return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#else
- return false;
+ if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
+ return false;
+ }
#endif
+
+ return true;
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
@@ -171,6 +173,11 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
+ mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
+#endif
+
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
@@ -183,7 +190,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
+ mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
@@ -202,4 +209,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+ return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mlx5e_ktls_cleanup_tx(priv);
+}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4c4ee524176c..285d32d2fd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
+#include <mlx5_core.h>
#include "en_accel/fs_tcp.h"
#include "fs_core.h"
@@ -71,13 +71,13 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
@@ -86,23 +86,21 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs.accel_tcp;
-
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (sk->sk_family) {
case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
- mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
- &inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport,
- &inet_sk(sk)->inet_daddr,
- inet_sk(sk)->inet_dport);
+ fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
+ &inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ &inet_sk(sk)->inet_daddr,
+ inet_sk(sk)->inet_dport);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
@@ -140,34 +138,32 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
- PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
out:
kvfree(spec);
return flow;
}
-static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
+static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs.accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, accel_fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -265,9 +261,11 @@ out:
return err;
}
-static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
+static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,21 +275,21 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
- ft->t->id, ft->t->level);
+ fs_dbg(fs, "Created fs accel table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type);
if (err)
goto err;
- err = accel_fs_tcp_add_default_rule(priv, type);
+ err = accel_fs_tcp_add_default_rule(fs, type);
if (err)
goto err;
@@ -301,17 +299,18 @@ err:
return err;
}
-static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
@@ -319,32 +318,32 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs.accel_tcp->tables[i].t;
+ dest.ft = accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
return 0;
}
-static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
+static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{
- struct mlx5e_accel_fs_tcp *fs_tcp;
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
- fs_tcp = priv->fs.accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -353,40 +352,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL;
}
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i;
- if (!priv->fs.accel_tcp)
+ if (!accel_tcp)
return;
- accel_fs_tcp_disable(priv);
+ accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
- accel_fs_tcp_destroy_table(priv, i);
+ accel_fs_tcp_destroy_table(fs, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
}
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err;
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
- if (!priv->fs.accel_tcp)
+ accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ if (!accel_tcp)
return -ENOMEM;
+ mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- err = accel_fs_tcp_create_table(priv, i);
+ err = accel_fs_tcp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = accel_fs_tcp_enable(priv);
+ err = accel_fs_tcp_enable(fs);
if (err)
goto err_destroy_tables;
@@ -394,9 +396,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables:
while (--i >= 0)
- accel_fs_tcp_destroy_table(priv, i);
-
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ accel_fs_tcp_destroy_table(fs, i);
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index 589235824543..a032bff482a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__
-#include "en.h"
+#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
-static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
-static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
+static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
+static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 7cab08a2f715..a715601865d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -35,26 +35,14 @@
#include <crypto/aead.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
-#include <linux/module.h>
#include "en.h"
-#include "en_accel/ipsec.h"
-#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/ipsec_fs.h"
+#include "ipsec.h"
+#include "ipsec_rxtx.h"
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
- struct mlx5e_ipsec_sa_entry *sa;
-
- if (!x)
- return NULL;
-
- sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
- if (!sa)
- return NULL;
-
- WARN_ON(sa->x != x);
- return sa;
+ return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
@@ -75,9 +63,9 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
- unsigned int handle)
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ unsigned int handle = sa_entry->ipsec_obj_id;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
@@ -113,7 +101,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom = 0;
u8 overlap;
- u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
@@ -128,11 +115,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
- esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
- ++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&
@@ -149,7 +134,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
- struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
struct crypto_aead *aead;
unsigned int crypto_data_len, key_len;
@@ -183,23 +168,17 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
- /* rx handle */
- attrs->sa_handle = sa_entry->handle;
-
- /* algo type */
- attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
-
/* action */
- attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
- MLX5_ACCEL_ESP_ACTION_ENCRYPT :
- MLX5_ACCEL_ESP_ACTION_DECRYPT;
+ attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT :
+ MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
/* spi */
- attrs->spi = x->id.spi;
+ attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
@@ -227,8 +206,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_ESN)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -275,46 +253,29 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
- if (x->props.family == AF_INET6 &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_IPV6)) {
- netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
- return -EINVAL;
- }
return 0;
}
-static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return 0;
-
- return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
- sa_entry->ipsec_obj_id,
- &sa_entry->ipsec_rule);
-}
-
-static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static void _update_xfrm_state(struct work_struct *work)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return;
+ struct mlx5e_ipsec_modify_state_work *modify_work =
+ container_of(work, struct mlx5e_ipsec_modify_state_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
+ modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
- mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
- &sa_entry->ipsec_rule);
+ mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
+ if (!priv->ipsec)
+ return -EOPNOTSUPP;
err = mlx5e_xfrm_validate_state(x);
if (err)
@@ -332,33 +293,18 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
- /* create xfrm */
- mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
- sa_entry->xfrm =
- mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
- if (IS_ERR(sa_entry->xfrm)) {
- err = PTR_ERR(sa_entry->xfrm);
- goto err_sa_entry;
- }
-
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
/* create hw context */
- sa_entry->hw_context =
- mlx5_accel_esp_create_hw_context(priv->mdev,
- sa_entry->xfrm,
- &sa_handle);
- if (IS_ERR(sa_entry->hw_context)) {
- err = PTR_ERR(sa_entry->hw_context);
+ err = mlx5_ipsec_create_sa_ctx(sa_entry);
+ if (err)
goto err_xfrm;
- }
- sa_entry->ipsec_obj_id = sa_handle;
- err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
+ err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
if (err)
goto err_hw_ctx;
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry);
if (err)
goto err_add_rule;
} else {
@@ -366,18 +312,16 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
+ INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_add_rule:
- mlx5e_xfrm_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
err_hw_ctx:
- mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
+ mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
- mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
-err_sa_entry:
kfree(sa_entry);
-
out:
return err;
}
@@ -386,10 +330,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- if (!sa_entry)
- return;
-
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
mlx5e_ipsec_sadb_rx_del(sa_entry);
}
@@ -398,24 +339,18 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
- if (!sa_entry)
- return;
-
- if (sa_entry->hw_context) {
- flush_workqueue(sa_entry->ipsec->wq);
- mlx5e_xfrm_fs_del_rule(priv, sa_entry);
- mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
- mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
- }
-
+ cancel_work_sync(&sa_entry->modify_work.work);
+ mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
- struct mlx5e_ipsec *ipsec = NULL;
+ struct mlx5e_ipsec *ipsec;
+ int ret;
- if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
}
@@ -426,21 +361,27 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
- ida_init(&ipsec->halloc);
- ipsec->en_priv = priv;
- ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
+ ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
- kfree(ipsec);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_wq;
}
+ ret = mlx5e_accel_ipsec_fs_init(ipsec);
+ if (ret)
+ goto err_fs_init;
+
priv->ipsec = ipsec;
- mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
+
+err_fs_init:
+ destroy_workqueue(ipsec->wq);
+err_wq:
+ kfree(ipsec);
+ return (ret != -EOPNOTSUPP) ? ret : 0;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
@@ -450,10 +391,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
- mlx5e_accel_ipsec_fs_cleanup(priv);
+ mlx5e_accel_ipsec_fs_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
-
- ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
}
@@ -473,50 +412,19 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
-struct mlx5e_ipsec_modify_state_work {
- struct work_struct work;
- struct mlx5_accel_esp_xfrm_attrs attrs;
- struct mlx5e_ipsec_sa_entry *sa_entry;
-};
-
-static void _update_xfrm_state(struct work_struct *work)
-{
- int ret;
- struct mlx5e_ipsec_modify_state_work *modify_work =
- container_of(work, struct mlx5e_ipsec_modify_state_work, work);
- struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
-
- ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
- &modify_work->attrs);
- if (ret)
- netdev_warn(sa_entry->ipsec->en_priv->netdev,
- "Not an IPSec offload device\n");
-
- kfree(modify_work);
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5e_ipsec_modify_state_work *modify_work;
+ struct mlx5e_ipsec_modify_state_work *modify_work =
+ &sa_entry->modify_work;
bool need_update;
- if (!sa_entry)
- return;
-
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
- modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
- if (!modify_work)
- return;
-
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
- modify_work->sa_entry = sa_entry;
-
- INIT_WORK(&modify_work->work, _update_xfrm_state);
- WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
+ queue_work(sa_entry->ipsec->wq, &modify_work->work);
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
@@ -532,11 +440,8 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
- !MLX5_CAP_ETH(mdev, swp)) {
- mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+ if (!mlx5_ipsec_device_caps(mdev))
return;
- }
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
@@ -551,15 +456,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
- !MLX5_CAP_ETH(mdev, swp_lso)) {
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}
- if (mlx5_is_ipsec_device(mdev))
- netdev->gso_partial_features |= NETIF_F_GSO_ESP;
-
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 6164c7f59efb..16bcceec16c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -40,11 +40,56 @@
#include <net/xfrm.h>
#include <linux/idr.h>
-#include "accel/ipsec.h"
-
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
+enum mlx5_accel_esp_flags {
+ MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
+ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
+ MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
+};
+
+enum mlx5_accel_esp_action {
+ MLX5_ACCEL_ESP_ACTION_DECRYPT,
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT,
+};
+
+struct aes_gcm_keymat {
+ u64 seq_iv;
+
+ u32 salt;
+ u32 icv_len;
+
+ u32 key_len;
+ u32 aes_key[256 / 32];
+};
+
+struct mlx5_accel_esp_xfrm_attrs {
+ enum mlx5_accel_esp_action action;
+ u32 esn;
+ u32 spi;
+ u32 flags;
+ struct aes_gcm_keymat aes_gcm;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } saddr;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } daddr;
+
+ u8 is_ipv6;
+};
+
+enum mlx5_ipsec_cap {
+ MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
+ MLX5_IPSEC_CAP_ESN = 1 << 1,
+};
+
struct mlx5e_priv;
struct mlx5e_ipsec_sw_stats {
@@ -55,37 +100,16 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer;
- atomic64_t ipsec_tx_drop_metadata;
-};
-
-struct mlx5e_ipsec_stats {
- u64 ipsec_dec_in_packets;
- u64 ipsec_dec_out_packets;
- u64 ipsec_dec_bypass_packets;
- u64 ipsec_enc_in_packets;
- u64 ipsec_enc_out_packets;
- u64 ipsec_enc_bypass_packets;
- u64 ipsec_dec_drop_packets;
- u64 ipsec_dec_auth_fail_packets;
- u64 ipsec_enc_drop_packets;
- u64 ipsec_add_sa_success;
- u64 ipsec_add_sa_fail;
- u64 ipsec_del_sa_success;
- u64 ipsec_del_sa_fail;
- u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
- struct mlx5e_priv *en_priv;
+ struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
- bool no_trailer;
- spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
- struct ida halloc;
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
- struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
@@ -102,21 +126,26 @@ struct mlx5e_ipsec_rule {
struct mlx5_modify_hdr *set_modify_hdr;
};
+struct mlx5e_ipsec_modify_state_work {
+ struct work_struct work;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+};
+
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
- struct mlx5_accel_esp_xfrm *xfrm;
- void *hw_context;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
u32 ipsec_obj_id;
+ u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
+ struct mlx5e_ipsec_modify_state_work modify_work;
};
-void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -124,12 +153,27 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
-#else
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_sa_entry *sa_entry);
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
-static inline void mlx5e_ipsec_build_inverse_table(void)
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs);
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ return sa_entry->ipsec->mdev;
}
-
+#else
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;
@@ -143,6 +187,10 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 17da23dff0ed..b859e4a4c744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2,8 +2,9 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
-#include "accel/ipsec_offload.h"
-#include "ipsec_fs.h"
+#include "en.h"
+#include "en/fs.h"
+#include "ipsec.h"
#include "fs_core.h"
#define NUM_IPSEC_FTE BIT(15)
@@ -35,6 +36,7 @@ struct mlx5e_accel_fs_esp {
};
struct mlx5e_ipsec_tx {
+ struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
struct mutex mutex; /* Protect IPsec TX steering */
u32 refcnt;
@@ -58,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_spec *spec;
- int err = 0;
+ int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -94,101 +96,27 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
goto out;
}
+ kvfree(spec);
rx_err->rule = fte;
rx_err->copy_modify_hdr = modify_hdr;
+ return 0;
out:
- if (err)
- mlx5_modify_header_dealloc(mdev, modify_hdr);
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
kvfree(spec);
return err;
}
-static void rx_err_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_rx_err *rx_err)
-{
- if (rx_err->rule) {
- mlx5_del_flow_rules(rx_err->rule);
- rx_err->rule = NULL;
- }
-
- if (rx_err->copy_modify_hdr) {
- mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
- rx_err->copy_modify_hdr = NULL;
- }
-}
-
-static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
-{
- rx_err_del_rule(priv, rx_err);
-
- if (rx_err->ft) {
- mlx5_destroy_flow_table(rx_err->ft);
- rx_err->ft = NULL;
- }
-}
-
-static int rx_err_create_ft(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot,
- struct mlx5e_ipsec_rx_err *rx_err)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *ft;
- int err;
-
- ft_attr.max_fte = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
- return err;
- }
-
- rx_err->ft = ft;
- err = rx_err_add_rule(priv, fs_prot, rx_err);
- if (err)
- goto out_err;
-
- return 0;
-
-out_err:
- mlx5_destroy_flow_table(ft);
- rx_err->ft = NULL;
- return err;
-}
-
-static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
-{
- if (fs_prot->miss_rule) {
- mlx5_del_flow_rules(fs_prot->miss_rule);
- fs_prot->miss_rule = NULL;
- }
-
- if (fs_prot->miss_group) {
- mlx5_destroy_flow_group(fs_prot->miss_group);
- fs_prot->miss_group = NULL;
- }
-
- if (fs_prot->ft) {
- mlx5_destroy_flow_table(fs_prot->ft);
- fs_prot->ft = NULL;
- }
-}
-
static int rx_fs_create(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft = fs_prot->ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
- struct mlx5_flow_table *ft;
u32 *flow_group_in;
int err = 0;
@@ -199,20 +127,6 @@ static int rx_fs_create(struct mlx5e_priv *priv,
goto out;
}
- /* Create FT */
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
- goto out;
- }
- fs_prot->ft = ft;
-
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
@@ -227,19 +141,19 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss rule */
miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
if (IS_ERR(miss_rule)) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
err = PTR_ERR(miss_rule);
netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
goto out;
}
fs_prot->miss_rule = miss_rule;
-
out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
-static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -249,38 +163,75 @@ static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_esp->fs_prot[type];
- rx_fs_destroy(fs_prot);
-
- rx_err_destroy_ft(priv, &fs_prot->rx_err);
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ mlx5_destroy_flow_table(fs_prot->ft);
- return 0;
+ mlx5_del_flow_rules(fs_prot->rx_err.rule);
+ mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+ mlx5_destroy_flow_table(fs_prot->rx_err.ft);
}
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_table *ft;
int err;
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
-
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
- err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
+ fs_prot->rx_err.ft = ft;
+ err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
if (err)
- return err;
+ goto err_add;
+
+ /* Create FT */
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_fs_ft;
+ }
+ fs_prot->ft = ft;
err = rx_fs_create(priv, fs_prot);
if (err)
- rx_destroy(priv, type);
+ goto err_fs;
+
+ return 0;
+err_fs:
+ mlx5_destroy_flow_table(fs_prot->ft);
+err_fs_ft:
+ mlx5_del_flow_rules(fs_prot->rx_err.rule);
+ mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+err_add:
+ mlx5_destroy_flow_table(fs_prot->rx_err.ft);
return err;
}
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
@@ -289,21 +240,21 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
- if (fs_prot->refcnt++)
- goto out;
+ if (fs_prot->refcnt)
+ goto skip;
/* create FT */
err = rx_create(priv, type);
- if (err) {
- fs_prot->refcnt--;
+ if (err)
goto out;
- }
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
+skip:
+ fs_prot->refcnt++;
out:
mutex_unlock(&fs_prot->prot_mutex);
return err;
@@ -311,17 +262,19 @@ out:
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
- if (--fs_prot->refcnt)
+ fs_prot->refcnt--;
+ if (fs_prot->refcnt)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
@@ -338,15 +291,9 @@ static int tx_create(struct mlx5e_priv *priv)
struct mlx5_flow_table *ft;
int err;
- priv->fs.egress_ns =
- mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
- if (!priv->fs.egress_ns)
- return -EOPNOTSUPP;
-
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
@@ -356,32 +303,20 @@ static int tx_create(struct mlx5e_priv *priv)
return 0;
}
-static void tx_destroy(struct mlx5e_priv *priv)
-{
- struct mlx5e_ipsec *ipsec = priv->ipsec;
-
- if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
- return;
-
- mlx5_destroy_flow_table(ipsec->tx_fs->ft);
- ipsec->tx_fs->ft = NULL;
-}
-
static int tx_ft_get(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
int err = 0;
mutex_lock(&tx_fs->mutex);
- if (tx_fs->refcnt++)
- goto out;
+ if (tx_fs->refcnt)
+ goto skip;
err = tx_create(priv);
- if (err) {
- tx_fs->refcnt--;
+ if (err)
goto out;
- }
-
+skip:
+ tx_fs->refcnt++;
out:
mutex_unlock(&tx_fs->mutex);
return err;
@@ -392,11 +327,11 @@ static void tx_ft_put(struct mlx5e_priv *priv)
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
mutex_lock(&tx_fs->mutex);
- if (--tx_fs->refcnt)
+ tx_fs->refcnt--;
+ if (tx_fs->refcnt)
goto out;
- tx_destroy(priv);
-
+ mlx5_destroy_flow_table(tx_fs->ft);
out:
mutex_unlock(&tx_fs->mutex);
}
@@ -424,8 +359,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
/* SPI number */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
- be32_to_cpu(attrs->spi));
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.outer_esp_spi, attrs->spi);
if (ip_version == 4) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -453,16 +388,18 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
0xff, 16);
}
- flow_act->ipsec_obj_id = ipsec_obj_id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act->crypto.obj_id = ipsec_obj_id;
flow_act->flags |= FLOW_ACT_NO_APPEND;
}
static int rx_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
struct mlx5_modify_hdr *modify_hdr = NULL;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
@@ -508,7 +445,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr;
@@ -536,9 +473,7 @@ out:
}
static int tx_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -555,7 +490,8 @@ static int tx_add_rule(struct mlx5e_priv *priv,
goto out;
}
- setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+ setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
+ &flow_act);
/* Add IPsec indicator in metadata_reg_a */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
@@ -565,16 +501,16 @@ static int tx_add_rule(struct mlx5e_priv *priv,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- attrs->action, err);
+ sa_entry->attrs.action, err);
goto out;
}
- ipsec_rule->rule = rule;
+ sa_entry->ipsec_rule.rule = rule;
out:
kvfree(spec);
@@ -583,133 +519,88 @@ out:
return err;
}
-static void rx_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule)
-{
- mlx5_del_flow_rules(ipsec_rule->rule);
- ipsec_rule->rule = NULL;
-
- mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
- ipsec_rule->set_modify_hdr = NULL;
-
- rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
-}
-
-static void tx_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_rule *ipsec_rule)
-{
- mlx5_del_flow_rules(ipsec_rule->rule);
- ipsec_rule->rule = NULL;
-
- tx_ft_put(priv);
-}
-
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!priv->ipsec->rx_fs)
- return -EOPNOTSUPP;
+ if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
+ return tx_add_rule(priv, sa_entry);
- if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
- else
- return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ return rx_add_rule(priv, sa_entry);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!priv->ipsec->rx_fs)
- return;
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
- if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- rx_del_rule(priv, attrs, ipsec_rule);
- else
- tx_del_rule(priv, ipsec_rule);
-}
+ mlx5_del_flow_rules(ipsec_rule->rule);
-static void fs_cleanup_tx(struct mlx5e_priv *priv)
-{
- mutex_destroy(&priv->ipsec->tx_fs->mutex);
- WARN_ON(priv->ipsec->tx_fs->refcnt);
- kfree(priv->ipsec->tx_fs);
- priv->ipsec->tx_fs = NULL;
+ if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
+ tx_ft_put(priv);
+ return;
+ }
+
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
+ rx_ft_put(priv,
+ sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
-static void fs_cleanup_rx(struct mlx5e_priv *priv)
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
- accel_esp = priv->ipsec->rx_fs;
+ if (!ipsec->rx_fs)
+ return;
+
+ mutex_destroy(&ipsec->tx_fs->mutex);
+ WARN_ON(ipsec->tx_fs->refcnt);
+ kfree(ipsec->tx_fs);
+
+ accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_destroy(&fs_prot->prot_mutex);
WARN_ON(fs_prot->refcnt);
}
- kfree(priv->ipsec->rx_fs);
- priv->ipsec->rx_fs = NULL;
-}
-
-static int fs_init_tx(struct mlx5e_priv *priv)
-{
- priv->ipsec->tx_fs =
- kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
- if (!priv->ipsec->tx_fs)
- return -ENOMEM;
-
- mutex_init(&priv->ipsec->tx_fs->mutex);
- return 0;
+ kfree(ipsec->rx_fs);
}
-static int fs_init_rx(struct mlx5e_priv *priv)
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_namespace *ns;
enum accel_fs_esp_type i;
+ int err = -ENOMEM;
+
+ ns = mlx5_get_flow_namespace(ipsec->mdev,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!ns)
+ return -EOPNOTSUPP;
- priv->ipsec->rx_fs =
- kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
- if (!priv->ipsec->rx_fs)
+ ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
+ if (!ipsec->tx_fs)
return -ENOMEM;
- accel_esp = priv->ipsec->rx_fs;
+ ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
+ if (!ipsec->rx_fs)
+ goto err_rx;
+
+ mutex_init(&ipsec->tx_fs->mutex);
+ ipsec->tx_fs->ns = ns;
+
+ accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_init(&fs_prot->prot_mutex);
}
return 0;
-}
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
-{
- if (!priv->ipsec->rx_fs)
- return;
-
- fs_cleanup_tx(priv);
- fs_cleanup_rx(priv);
-}
-
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
-{
- int err;
-
- if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
- return -EOPNOTSUPP;
-
- err = fs_init_tx(priv);
- if (err)
- return err;
-
- err = fs_init_rx(priv);
- if (err)
- fs_cleanup_tx(priv);
+err_rx:
+ kfree(ipsec->tx_fs);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
deleted file mode 100644
index 3389b3bb3ef8..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_STEERING_H__
-#define __MLX5_IPSEC_STEERING_H__
-
-#include "en.h"
-#include "ipsec.h"
-#include "accel/ipsec_offload.h"
-#include "en/fs.h"
-
-#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule);
-#else
-static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
-#endif
-#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
new file mode 100644
index 000000000000..792724ce7336
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "ipsec.h"
+#include "lib/mlx5.h"
+
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ u32 caps = 0;
+
+ if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return 0;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return 0;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
+ return 0;
+
+ if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
+ MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
+ caps |= MLX5_IPSEC_CAP_CRYPTO;
+
+ if (!caps)
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
+ caps |= MLX5_IPSEC_CAP_ESN;
+
+ /* We can accommodate up to 2^24 different IPsec objects
+ * because we use up to 24 bit in flow table metadata
+ * to hold the IPsec Object unique handle.
+ */
+ WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
+ return caps;
+}
+EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+
+static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
+ void *obj, *salt_p, *salt_iv_p;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
+
+ /* salt and seq_iv */
+ salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
+ memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
+
+ MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
+ salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
+ memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
+ /* esn */
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+ MLX5_SET(ipsec_obj, obj, esn_en, 1);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+ MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ }
+
+ MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sa_entry->ipsec_obj_id =
+ MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ int err;
+
+ /* key */
+ err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
+ aes_gcm->key_len / BITS_PER_BYTE,
+ MLX5_ACCEL_OBJ_IPSEC_KEY,
+ &sa_entry->enc_key_id);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ err = mlx5_create_ipsec_obj(sa_entry);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
+ goto err_enc_key;
+ }
+
+ return 0;
+
+err_enc_key:
+ mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
+ return err;
+}
+
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_destroy_ipsec_obj(sa_entry);
+ mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
+}
+
+static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
+ u64 modify_field_select = 0;
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+ return 0;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return -EINVAL;
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
+ sa_entry->ipsec_obj_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
+ modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
+
+ /* esn */
+ if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
+ MLX5_SET64(ipsec_obj, obj, modify_field_select,
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
+ if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+ MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ int err;
+
+ err = mlx5_modify_ipsec_obj(sa_entry, attrs);
+ if (err)
+ return;
+
+ memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 2db9573a3fe6..6859f1c1a831 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,78 +34,15 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-#include "accel/ipsec_offload.h"
-#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/ipsec.h"
-#include "accel/accel.h"
+#include "ipsec.h"
+#include "ipsec_rxtx.h"
#include "en.h"
enum {
- MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
- MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
- MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
-};
-
-struct mlx5e_ipsec_rx_metadata {
- unsigned char nexthdr;
- __be32 sa_handle;
-} __packed;
-
-enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
};
-struct mlx5e_ipsec_tx_metadata {
- __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
- __be16 seq; /* LSBs of the first TCP seq, only for LSO */
- u8 esp_next_proto; /* Next protocol of ESP */
-} __packed;
-
-struct mlx5e_ipsec_metadata {
- unsigned char syndrome;
- union {
- unsigned char raw[5];
- /* from FPGA to host, on successful decrypt */
- struct mlx5e_ipsec_rx_metadata rx;
- /* from host to FPGA */
- struct mlx5e_ipsec_tx_metadata tx;
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-#define MAX_LSO_MSS 2048
-
-/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
-static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
-
-static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
-{
- return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
-}
-
-static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *eth;
-
- if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
- return ERR_PTR(-ENOMEM);
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
- skb->mac_header -= sizeof(*mdata);
- mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(*mdata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
-
- memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
- return mdata;
-}
-
static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
{
unsigned int alen = crypto_aead_authsize(x->data);
@@ -157,11 +94,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | IP | [TCP | UDP] */
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
return;
}
@@ -235,40 +181,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb_store_bits(skb, iv_offset, &seqno, 8);
}
-static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata,
- struct xfrm_offload *xo)
-{
- struct ip_esp_hdr *esph;
- struct tcphdr *tcph;
-
- if (skb_is_gso(skb)) {
- /* Add LSO metadata indication */
- esph = ip_esp_hdr(skb);
- tcph = inner_tcp_hdr(skb);
- netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
- skb->network_header,
- skb->transport_header,
- skb->inner_network_header,
- skb->inner_transport_header);
- netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
- skb->len, skb_shinfo(skb)->gso_size,
- ntohs(tcph->source), ntohs(tcph->dest),
- ntohl(tcph->seq), ntohl(esph->seq_no));
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
- mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
- mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
- } else {
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
- }
- mdata->content.tx.esp_next_proto = xo->proto;
-
- netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
- mdata->syndrome, mdata->content.tx.esp_next_proto,
- ntohs(mdata->content.tx.mss_inv),
- ntohs(mdata->content.tx.seq));
-}
-
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg)
@@ -289,16 +201,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
ipsec_st->x = x;
ipsec_st->xo = xo;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- aead = x->data;
- alen = crypto_aead_authsize(aead);
- blksize = ALIGN(crypto_aead_blocksize(aead), 4);
- clen = ALIGN(skb->len + 2, blksize);
- plen = max_t(u32, clen - skb->len, 4);
- tailen = plen + alen;
- ipsec_st->plen = plen;
- ipsec_st->tailen = tailen;
- }
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
return 0;
}
@@ -331,19 +241,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
- eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
- encap = x->encap;
- if (!encap) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
- } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
- }
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
}
}
@@ -354,7 +262,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry;
- struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x;
struct sec_path *sp;
@@ -383,19 +290,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
- if (MLX5_CAP_GEN(priv->mdev, fpga)) {
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
- }
- }
-
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- if (MLX5_CAP_GEN(priv->mdev, fpga))
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
-
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
@@ -405,79 +301,6 @@ drop:
return false;
}
-static inline struct xfrm_state *
-mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct xfrm_offload *xo;
- struct xfrm_state *xs;
- struct sec_path *sp;
- u32 sa_handle;
-
- sp = secpath_set(skb);
- if (unlikely(!sp)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
- return NULL;
- }
-
- sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
- xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
- if (unlikely(!xs)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
- return NULL;
- }
-
- sp = skb_sec_path(skb);
- sp->xvec[sp->len++] = xs;
- sp->olen++;
-
- xo = xfrm_offload(skb);
- xo->flags = CRYPTO_DONE;
- switch (mdata->syndrome) {
- case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- if (likely(priv->ipsec->no_trailer)) {
- xo->flags |= XFRM_ESP_NO_TRAILER;
- xo->proto = mdata->content.rx.nexthdr;
- }
- break;
- case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
- xo->status = CRYPTO_INVALID_PROTOCOL;
- break;
- default:
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
- return NULL;
- }
- return xs;
-}
-
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct xfrm_state *xs;
-
- if (!is_metadata_hdr_valid(skb))
- return skb;
-
- /* Use the metadata */
- mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
- xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
- if (unlikely(!xs)) {
- kfree_skb(skb);
- return NULL;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-
- return skb;
-}
-
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
@@ -509,7 +332,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
return;
}
- sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;
@@ -519,8 +341,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
- if (WARN_ON_ONCE(priv->ipsec->no_trailer))
- xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
@@ -532,21 +352,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
-
-void mlx5e_ipsec_build_inverse_table(void)
-{
- u16 mss_inv;
- u32 mss;
-
- /* Calculate 1/x inverse table for use in GSO data path.
- * Using this table, we provide the IPSec accelerator with the value of
- * 1/gso_size so that it can infer the position of each segment inside
- * the GSO, and increment the ESP sequence number, and generate the IV.
- * The HW needs this value in Q0.16 fixed-point number format
- */
- mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
- for (mss = 2; mss < MAX_LSO_MSS; mss++) {
- mss_inv = div_u64(1ULL << 32, mss) >> 16;
- mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index b98db50c3418..1878a70b9031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,9 @@
#include "en.h"
#include "en/txrx.h"
-/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
-#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
@@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt);
-
void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
@@ -80,11 +77,6 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
-{
- return ipsec_st->x;
-}
-
static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
@@ -131,14 +123,17 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- struct xfrm_offload *xo = xfrm_offload(skb);
+ u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
return false;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (xo->inner_ipproto) {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ inner_ipproto = xfrm_offload(skb)->inner_ipproto;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 5cb936541b9e..9de84821dafb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -35,27 +35,7 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/ipsec.h"
-#include "fpga/sdk.h"
-#include "en_accel/ipsec.h"
-#include "fpga/ipsec.h"
-
-static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
-};
+#include "ipsec.h"
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
@@ -65,13 +45,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -103,45 +81,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
-{
- return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
-{
- int ret = 0;
-
- if (priv->ipsec)
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
- if (ret)
- memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
-{
- unsigned int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
-{
- int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc,
- i);
- return idx;
-}
-
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
-MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d93aadbf10da..da2184c94203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,11 +2,49 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
-#include "en_accel/tls.h"
+#include "lib/mlx5.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id)
+{
+ u32 sz_bytes;
+ void *key;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5_create_encryption_key(mdev, key, sz_bytes,
+ MLX5_ACCEL_OBJ_TLS_KEY,
+ p_key_id);
+}
+
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_destroy_encryption_key(mdev, key_id);
+}
+
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
@@ -16,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
@@ -54,20 +92,38 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_resync = mlx5e_ktls_resync,
};
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ /* Check the possibility to post the required ICOSQ WQEs. */
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
+ return false;
+
+ return true;
+}
+
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
return;
- if (mlx5e_accel_is_ktls_tx(mdev)) {
+ if (mlx5e_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -80,9 +136,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable)
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
else
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock);
return err;
@@ -92,7 +148,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -100,7 +156,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) {
destroy_workqueue(priv->tls->rx_wq);
return err;
@@ -112,11 +168,32 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq);
}
+
+int mlx5e_ktls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
+{
+ kfree(priv->tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 5833deb2354c..1c35045e41fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,11 +4,51 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/tls.h>
+#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id);
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ if (is_kdump_kernel())
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
+ MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
+}
+
+static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info)
+{
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256);
+ break;
+ }
+
+ return false;
+}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -16,27 +56,46 @@ struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_tx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
+
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
+ atomic64_t tx_tls_del;
+ atomic64_t tx_tls_pool_alloc;
+ atomic64_t tx_tls_pool_free;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
+};
+
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+ struct workqueue_struct *rx_wq;
+ struct mlx5e_tls_tx_pool *tx_pool;
+};
+
+int mlx5e_ktls_init(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
+static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_rx(mdev);
}
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(mdev);
+ return 0;
}
-#else
-
-static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
{
}
@@ -64,10 +123,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ return 0;
+}
+static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 96064a2033f7..3e54834747ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -3,7 +3,7 @@
#include <net/inet6_hashtables.h>
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
};
struct mlx5e_ktls_offload_context_rx {
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct accel_rule rule;
struct sock *sk;
struct mlx5e_rq_stats *rq_stats;
@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
+ rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
*ctx = priv_rx;
}
@@ -363,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
bool trigger_poll;
@@ -374,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
spin_lock_bh(&ktls_resync->lock);
spin_lock_bh(&priv_rx->lock);
- memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+ switch (priv_rx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &priv_rx->crypto_info.crypto_info_128;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &priv_rx->crypto_info.crypto_info_256;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_rx->crypto_info.crypto_info.cipher_type);
+ spin_unlock_bh(&priv_rx->lock);
+ spin_unlock_bh(&ktls_resync->lock);
+ return;
+ }
+
if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
@@ -462,6 +484,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct net_device *netdev = rq->netdev;
+ struct net *net = dev_net(netdev);
struct sock *sk = NULL;
unsigned int datalen;
struct iphdr *iph;
@@ -476,7 +499,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
@@ -486,7 +509,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
@@ -604,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
- priv_rx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_rx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_rx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 56e7b2aee85f..7c1c0eb16787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -36,18 +36,13 @@
#include "en.h"
#include "fpga/sdk.h"
-#include "en_accel/tls.h"
-
-static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
-};
+#include "en_accel/ktls.h"
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
@@ -55,51 +50,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
-{
- if (!priv->tls)
- return NULL;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return mlx5e_ktls_sw_stats_desc;
- return mlx5e_tls_sw_stats_desc;
-}
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
- return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
+
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- stats_desc[i].format);
+ mlx5e_ktls_sw_stats_desc[i].format);
return n;
}
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] =
- MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc,
+ i);
return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 9ad3459fb63a..2e0335246967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -27,38 +27,78 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{
u16 num_dumps, stop_room = 0;
- if (!mlx5e_accel_is_ktls_tx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev))
return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
- stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += 1; /* fence nop */
return stop_room;
}
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+ MLX5_SET(tisc, tisc, tls_en, 1);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
- MLX5_SET(tisc, tisc, tls_en, 1);
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- return mlx5e_create_tis(mdev, in, tisn);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
}
struct mlx5e_ktls_offload_context_tx {
- struct tls_offload_context_tx *tx_ctx;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
- struct mlx5e_tls_sw_stats *sw_stats;
+ /* fast path */
u32 expected_seq;
u32 tisn;
- u32 key_id;
bool ctx_post_pending;
+ /* control / resync */
+ struct list_head list_node; /* member of the pool */
+ union mlx5e_crypto_info crypto_info;
+ struct tls_offload_context_tx *tx_ctx;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ u32 key_id;
+ u8 create_err : 1;
};
static void
@@ -68,8 +108,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_tx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
*ctx = priv_tx;
}
@@ -83,65 +122,410 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
return *ctx;
}
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+ struct mlx5_async_work context;
+ struct mlx5_async_ctx async_ctx;
+ struct work_struct work;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct completion complete;
+ int err;
+ union {
+ u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+ };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ if (!bulk_async)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+ init_completion(&async->complete);
+ }
+
+ return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+ }
+ kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ if (status) {
+ async->err = status;
+ priv_tx->create_err = 1;
+ goto out;
+ }
+
+ priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+ complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ complete(&async->complete);
+ kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+ struct mlx5e_async_ctx *async)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ int err;
+
+ priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+ if (!priv_tx)
+ return ERR_PTR(-ENOMEM);
+
+ priv_tx->mdev = mdev;
+ priv_tx->sw_stats = sw_stats;
+
+ if (!async) {
+ err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+ if (err)
+ goto err_out;
+ } else {
+ async->priv_tx = priv_tx;
+ err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ async->out_create, sizeof(async->out_create),
+ create_tis_callback, &async->context);
+ if (err)
+ goto err_out;
+ }
+
+ return priv_tx;
+
+err_out:
+ kfree(priv_tx);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_async_ctx *async)
+{
+ if (priv_tx->create_err) {
+ complete(&async->complete);
+ kfree(priv_tx);
+ return;
+ }
+ async->priv_tx = priv_tx;
+ mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+ &async->async_ctx,
+ async->out_destroy, sizeof(async->out_destroy),
+ destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+ struct list_head *list, int size)
+{
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = mlx5e_bulk_async_init(mdev, size);
+ if (!bulk_async)
+ return;
+
+ i = 0;
+ list_for_each_entry_safe(obj, n, list, list_node) {
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ i++;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ wait_for_completion(&async->complete);
+ }
+ mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ struct mutex lock; /* Protects access to the pool */
+ struct list_head list;
+ size_t size;
+
+ struct workqueue_struct *wq;
+ struct work_struct create_work;
+ struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, create_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ LIST_HEAD(local_list);
+ int i, j, err = 0;
+
+ bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+ if (!bulk_async)
+ return;
+
+ for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+ list_add(&obj->list_node, &local_list);
+ }
+
+ for (j = 0; j < i; j++) {
+ struct mlx5e_async_ctx *async = &bulk_async[j];
+
+ wait_for_completion(&async->complete);
+ if (!err && async->err)
+ err = async->err;
+ }
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+ mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&pool->lock);
+ if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ goto err_out;
+ }
+ list_splice(&local_list, &pool->list);
+ pool->size += MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+ return;
+
+err_out:
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+ struct mlx5e_tls_sw_stats *sw_stats)
+{
+ struct mlx5e_tls_tx_pool *pool;
+
+ BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+ if (!pool->wq)
+ goto err_free;
+
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->lock);
+
+ INIT_WORK(&pool->create_work, create_work);
+ INIT_WORK(&pool->destroy_work, destroy_work);
+
+ pool->mdev = mdev;
+ pool->sw_stats = sw_stats;
+
+ return pool;
+
+err_free:
+ kvfree(pool);
+ return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ }
+ if (pool->size) {
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+ atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+ }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ mlx5e_tls_tx_pool_list_cleanup(pool);
+ destroy_workqueue(pool->wq);
+ kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+ mutex_lock(&pool->lock);
+ list_add(&obj->list_node, &pool->list);
+ if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, &pool->destroy_work);
+ mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+
+ mutex_lock(&pool->lock);
+ if (unlikely(pool->size == 0)) {
+ /* pool is empty:
+ * - trigger the populating work, and
+ * - serve the current context via the regular blocking api.
+ */
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+ if (!IS_ERR(obj))
+ atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+ return obj;
+ }
+
+ obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+ list_node);
+ list_del(&obj->list_node);
+ if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ return obj;
+}
+
+/* End of pool API */
+
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
- priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
- if (!priv_tx)
- return -ENOMEM;
+ priv_tx = pool_pop(pool);
+ if (IS_ERR(priv_tx))
+ return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+ err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
if (err)
goto err_create_key;
- priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
- priv_tx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_tx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_tx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
- err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
- if (err)
- goto err_create_tis;
-
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
-err_create_tis:
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
err_create_key:
- kfree(priv_tx);
+ pool_push(pool, priv_tx);
return err;
}
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_tx_pool *pool;
struct mlx5e_priv *priv;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5e_destroy_tis(mdev, priv_tx->tisn);
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
- kfree(priv_tx);
+ mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ pool_push(pool, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -202,6 +586,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
sq->pc += num_wqebbs;
}
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -213,6 +607,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
+ tx_post_fence_nop(sq);
}
struct tx_sync_info {
@@ -288,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- rec_seq = info->rec_seq;
- rec_seq_sz = sizeof(info->rec_seq);
+ switch (priv_tx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_tx->crypto_info.crypto_info.cipher_type);
+ return;
+ }
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post)
@@ -305,7 +717,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
}
static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -327,7 +739,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
- cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -362,67 +773,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
}
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
- tx_fill_wi(sq, pi, 1, 0, NULL);
-
- mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
int datalen,
u32 seq)
{
- struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- int i = 0;
+ int i;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
- if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
- if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
- stats->tls_skip_no_sync_data++;
- return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
- }
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+ /* We might get here with ret == FAIL if a retransmission
+ * reaches the driver after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
- stats->tls_drop_no_sync_data++;
- goto err_out;
- }
-
- stats->tls_ooo++;
+ return ret;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags) {
- tx_post_fence_nop(sq);
- return MLX5E_KTLS_SYNC_DONE;
- }
-
- for (; i < info.nr_frags; i++) {
+ for (i = 0; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
orig_fsz = skb_frag_size(f);
do {
- bool fence = !(i || frag_offset);
unsigned int fsz;
n++;
fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
skb_frag_size_set(f, fsz);
- if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
page_ref_add(skb_frag_page(f), n - 1);
goto err_out;
}
@@ -448,34 +831,55 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_device *tls_netdev;
+ struct tls_context *tls_ctx;
+ int datalen;
u32 seq;
+ datalen = skb->len - skb_tcp_all_headers(skb);
+ if (!datalen)
+ return true;
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't WARN on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
+ goto err_out;
+
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+ stats->tls_ooo++;
+
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ stats->tls_skip_no_sync_data++;
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- fallthrough;
+ goto err_out;
case MLX5E_KTLS_SYNC_FAIL:
+ stats->tls_drop_no_sync_data++;
goto err_out;
}
}
@@ -494,3 +898,24 @@ err_out:
dev_kfree_skb_any(skb);
return false;
}
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return 0;
+
+ priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+ if (!priv->tls->tx_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return;
+
+ mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+ priv->tls->tx_pool = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
index ac29aeb8af49..570a912dd6fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
@@ -21,7 +21,7 @@ enum {
static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 key_id, u32 resync_tcp_sn)
{
char *initial_rn, *gcm_iv;
@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
ctx = params->ctx;
- EXTRACT_INFO_FIELDS;
+ switch (crypto_info->crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &crypto_info->crypto_info_128;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &crypto_info->crypto_info_256;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->crypto_info.cipher_type);
+ return;
+ }
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction)
{
@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
- fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
+ fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 08c9d5134479..2dd78dd4ad65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state {
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
@@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
+
+static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return false;
}
+static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 *cqe_bcnt)
+{
+}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index e5c180f2403b..3d79cd379890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -6,7 +6,6 @@
#include <net/tls.h>
#include "en.h"
-#include "accel/tls.h"
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
@@ -28,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
+union mlx5e_crypto_info {
+ struct tls_crypto_info crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
+ struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
+};
+
struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
@@ -73,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction);
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
new file mode 100644
index 000000000000..2ef36cb9555a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -0,0 +1,1861 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/xarray.h>
+
+#include "en.h"
+#include "lib/aso.h"
+#include "lib/mlx5.h"
+#include "en_accel/macsec.h"
+#include "en_accel/macsec_fs.h"
+
+#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
+#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
+
+enum mlx5_macsec_aso_event_arm {
+ MLX5E_ASO_EPN_ARM = BIT(0),
+};
+
+enum {
+ MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
+struct mlx5e_macsec_handle {
+ struct mlx5e_macsec *macsec;
+ u32 obj_id;
+ u8 idx;
+};
+
+enum {
+ MLX5_MACSEC_EPN,
+};
+
+struct mlx5e_macsec_aso_out {
+ u8 event_arm;
+ u32 mode_param;
+};
+
+struct mlx5e_macsec_aso_in {
+ u8 mode;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_epn_state {
+ u32 epn_msb;
+ u8 epn_enabled;
+ u8 overlap;
+};
+
+struct mlx5e_macsec_async_work {
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ struct work_struct work;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_sa {
+ bool active;
+ u8 assoc_num;
+ u32 macsec_obj_id;
+ u32 enc_key_id;
+ u32 next_pn;
+ sci_t sci;
+ salt_t salt;
+
+ struct rhash_head hash;
+ u32 fs_id;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct rcu_head rcu_head;
+ struct mlx5e_macsec_epn_state epn_state;
+};
+
+struct mlx5e_macsec_rx_sc;
+struct mlx5e_macsec_rx_sc_xarray_element {
+ u32 fs_id;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+};
+
+struct mlx5e_macsec_rx_sc {
+ bool active;
+ sci_t sci;
+ struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
+ struct list_head rx_sc_list_element;
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct metadata_dst *md_dst;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_macsec_umr {
+ dma_addr_t dma_addr;
+ u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ u32 mkey;
+};
+
+struct mlx5e_macsec_aso {
+ /* ASO */
+ struct mlx5_aso *maso;
+ /* Protects macsec ASO */
+ struct mutex aso_lock;
+ /* UMR */
+ struct mlx5e_macsec_umr *umr;
+
+ u32 pdn;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
+ .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
+ .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5e_macsec_device {
+ const struct net_device *netdev;
+ struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
+ struct list_head macsec_rx_sc_list_head;
+ unsigned char *dev_addr;
+ struct list_head macsec_device_list_element;
+};
+
+struct mlx5e_macsec {
+ struct list_head macsec_device_list_head;
+ int num_of_devices;
+ struct mlx5e_macsec_fs *macsec_fs;
+ struct mutex lock; /* Protects mlx5e_macsec internal contexts */
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
+
+ /* Rx fs_id -> rx_sc mapping */
+ struct xarray sc_xarray;
+
+ struct mlx5_core_dev *mdev;
+
+ /* Stats manage */
+ struct mlx5e_macsec_stats stats;
+
+ /* ASO */
+ struct mlx5e_macsec_aso aso;
+
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_macsec_obj_attrs {
+ u32 aso_pdn;
+ u32 next_pn;
+ __be64 sci;
+ u32 enc_key_id;
+ bool encrypt;
+ struct mlx5e_macsec_epn_state epn_state;
+ salt_t salt;
+ __be32 ssci;
+ bool replay_protect;
+ u32 replay_window;
+};
+
+struct mlx5_aso_ctrl_param {
+ u8 data_mask_mode;
+ u8 condition_0_operand;
+ u8 condition_1_operand;
+ u8 condition_0_offset;
+ u8 condition_1_offset;
+ u8 data_offset;
+ u8 condition_operand;
+ u32 condition_0_data;
+ u32 condition_0_mask;
+ u32 condition_1_data;
+ u32 condition_1_mask;
+ u64 bitwise_data;
+ u64 data_mask;
+};
+
+static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr;
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ umr = kzalloc(sizeof(*umr), GFP_KERNEL);
+ if (!umr) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
+ goto out_dma;
+ }
+
+ err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
+ goto out_mkey;
+ }
+
+ umr->dma_addr = dma_addr;
+
+ aso->umr = umr;
+
+ return 0;
+
+out_mkey:
+ dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+out_dma:
+ kfree(umr);
+ return err;
+}
+
+static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr = aso->umr;
+
+ mlx5_core_destroy_mkey(mdev, umr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ kfree(umr);
+}
+
+static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
+{
+ u8 window_sz;
+
+ if (!attrs->replay_protect)
+ return 0;
+
+ switch (attrs->replay_window) {
+ case 256:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ case 128:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 64:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 32:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
+
+ return 0;
+}
+
+static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_obj_attrs *attrs,
+ bool is_tx,
+ u32 *macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *aso_ctx;
+ void *obj;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
+ aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
+
+ MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
+ MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
+ MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
+ MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
+ MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
+
+ /* Epn */
+ if (attrs->epn_state.epn_enabled) {
+ void *salt_p;
+ int i;
+
+ MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
+ salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
+ for (i = 0; i < 3 ; i++)
+ memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
+ } else {
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
+ }
+
+ MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
+ if (is_tx) {
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
+ } else {
+ err = macsec_set_replay_protection(attrs, aso_ctx);
+ if (err)
+ return err;
+ }
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to create MACsec object (err = %d)\n",
+ err);
+ return err;
+ }
+
+ *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa,
+ bool is_tx)
+{
+ int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ if ((is_tx) && sa->fs_id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ sa->fs_id = 0;
+ }
+
+ if (!sa->macsec_rule)
+ return;
+
+ mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+ sa->macsec_rule = NULL;
+}
+
+static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa,
+ bool encrypt,
+ bool is_tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_macsec_obj_attrs obj_attrs;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct macsec_key *key;
+ int err;
+
+ obj_attrs.next_pn = sa->next_pn;
+ obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
+ obj_attrs.enc_key_id = sa->enc_key_id;
+ obj_attrs.encrypt = encrypt;
+ obj_attrs.aso_pdn = macsec->aso.pdn;
+ obj_attrs.epn_state = sa->epn_state;
+
+ if (is_tx) {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
+ key = &ctx->sa.tx_sa->key;
+ } else {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+ key = &ctx->sa.rx_sa->key;
+ }
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+ obj_attrs.replay_window = ctx->secy->replay_window;
+ obj_attrs.replay_protect = ctx->secy->replay_protect;
+
+ err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ if (!macsec_rule) {
+ err = -ENOMEM;
+ goto destroy_macsec_object;
+ }
+
+ sa->macsec_rule = macsec_rule;
+
+ if (is_tx) {
+ err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ if (err)
+ goto destroy_macsec_object_and_rule;
+ }
+
+ return 0;
+
+destroy_macsec_object_and_rule:
+ mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
+destroy_macsec_object:
+ mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
+
+ return err;
+}
+
+static struct mlx5e_macsec_rx_sc *
+mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
+{
+ struct mlx5e_macsec_rx_sc *iter;
+
+ list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
+ if (iter->sci == sci)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
+{
+ struct mlx5_core_dev *mdev = macsec->mdev;
+ struct mlx5_macsec_obj_attrs attrs = {};
+ int err = 0;
+
+ if (rx_sa->active != active)
+ return 0;
+
+ rx_sa->active = active;
+ if (!active) {
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ return 0;
+ }
+
+ attrs.sci = cpu_to_be64((__force u64)rx_sa->sci);
+ attrs.enc_key_id = rx_sa->enc_key_id;
+ err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
+{
+ const struct net_device *netdev = ctx->netdev;
+ const struct macsec_secy *secy = ctx->secy;
+
+ if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when validate_frame is in strict mode\n");
+ return false;
+ }
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
+ netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
+ MACSEC_DEFAULT_ICV_LEN);
+ return false;
+ }
+
+ if (!secy->protect_frames) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when protect_frames is set\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5e_macsec_device *
+mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
+ const struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_device *iter;
+ const struct list_head *list;
+
+ list = &macsec->macsec_device_list_head;
+ list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
+ if (iter->netdev == ctx->secy->netdev)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
+ const pn_t *next_pn_halves)
+{
+ struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+
+ sa->salt = key->salt;
+ epn_state->epn_enabled = 1;
+ epn_state->epn_msb = next_pn_halves->upper;
+ epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
+}
+
+static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (macsec_device->tx_sa[assoc_num]) {
+ netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
+ if (!tx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+ tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
+ tx_sa->sci = secy->sci;
+ tx_sa->assoc_num = assoc_num;
+
+ if (secy->xpn)
+ update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &tx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ macsec_device->tx_sa[assoc_num] = tx_sa;
+ if (!secy->operational ||
+ assoc_num != tx_sc->encoding_sa ||
+ !tx_sa->active)
+ goto out;
+
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto destroy_encryption_key;
+
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+destroy_encryption_key:
+ macsec_device->tx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
+destroy_sa:
+ kfree(tx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct net_device *netdev;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ netdev = ctx->netdev;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+ netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (tx_sa->active == ctx_tx_sa->active)
+ goto out;
+
+ if (tx_sa->assoc_num != tx_sc->encoding_sa)
+ goto out;
+
+ if (ctx_tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ } else {
+ if (!tx_sa->macsec_rule) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree_rcu(tx_sa);
+ macsec_device->tx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
+{
+ struct mlx5e_macsec_sa *macsec_sa;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
+ if (macsec_sa)
+ fs_id = macsec_sa->fs_id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct list_head *rx_sc_list;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
+ if (rx_sc) {
+ netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
+ ctx_rx_sc->sci);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
+ if (!sc_xarray_element) {
+ err = -ENOMEM;
+ goto destroy_rx_sc;
+ }
+
+ sc_xarray_element->rx_sc = rx_sc;
+ err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
+ XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
+ if (err)
+ goto destroy_sc_xarray_elemenet;
+
+ rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!rx_sc->md_dst) {
+ err = -ENOMEM;
+ goto erase_xa_alloc;
+ }
+
+ rx_sc->sci = ctx_rx_sc->sci;
+ rx_sc->active = ctx_rx_sc->active;
+ list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
+
+ rx_sc->sc_xarray_element = sc_xarray_element;
+ rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+erase_xa_alloc:
+ xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
+destroy_sc_xarray_elemenet:
+ kfree(sc_xarray_element);
+destroy_rx_sc:
+ kfree(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int i;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
+ if (!rx_sc) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc->active = ctx_rx_sc->active;
+ if (rx_sc->active == ctx_rx_sc->active)
+ goto out;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+/*
+ * At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+
+ kfree_rcu(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rx_sc->rx_sa[assoc_num]) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
+ if (!rx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rx_sa->active = ctx_rx_sa->active;
+ rx_sa->next_pn = ctx_rx_sa->next_pn;
+ rx_sa->sci = sci;
+ rx_sa->assoc_num = assoc_num;
+ rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
+
+ if (ctx->secy->xpn)
+ update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &rx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ rx_sc->rx_sa[assoc_num] = rx_sa;
+ if (!rx_sa->active)
+ goto out;
+
+ //TODO - add support for both authentication and encryption flows
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ if (err)
+ goto destroy_encryption_key;
+
+ goto out;
+
+destroy_encryption_key:
+ rx_sc->rx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
+destroy_sa:
+ kfree(rx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (!rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
+ sci, assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+ netdev_err(ctx->netdev,
+ "MACsec offload update RX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ sci_t sci = ctx->sa.rx_sa->sc->sci;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (!rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
+ sci, assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ const struct net_device *netdev = ctx->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
+ netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
+ goto out;
+ }
+
+ if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
+ netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
+ err = -EBUSY;
+ goto out;
+ }
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
+ if (!macsec_device->dev_addr) {
+ kfree(macsec_device);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->netdev = dev;
+
+ INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
+ list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
+
+ ++macsec->num_of_devices;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ struct mlx5e_macsec_device *macsec_device)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct list_head *list;
+ int i, err = 0;
+
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa || !rx_sa->macsec_rule)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ }
+ }
+
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ if (rx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+ memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+ return err;
+}
+
+/* this function is called from 2 macsec ops functions:
+ * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
+ * and create new Tx contexts(macsec object + steering).
+ * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
+ * destroy Tx and Rx contexts(macsec object + steering)
+ */
+static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int i, err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
+ if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
+ err = macsec_upd_secy_hw_address(ctx, macsec_device);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree(tx_sa);
+ macsec_device->tx_sa[i] = NULL;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+
+ kfree_rcu(rx_sc);
+ }
+
+ kfree(macsec_device->dev_addr);
+ macsec_device->dev_addr = NULL;
+
+ list_del_rcu(&macsec_device->macsec_device_list_element);
+ --macsec->num_of_devices;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
+ struct mlx5_macsec_obj_attrs *attrs)
+{
+ attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
+ attrs->epn_state.overlap = sa->epn_state.overlap;
+}
+
+static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5_aso_ctrl_param *param)
+{
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ if (macsec_aso->umr->dma_addr) {
+ aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
+ }
+
+ if (!param)
+ return;
+
+ aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
+ aso_ctrl->condition_1_0_operand = param->condition_1_operand |
+ param->condition_0_operand << 4;
+ aso_ctrl->condition_1_0_offset = param->condition_1_offset |
+ param->condition_0_offset << 4;
+ aso_ctrl->data_offset_condition_operand = param->data_offset |
+ param->condition_operand << 6;
+ aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
+ aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
+ aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
+ aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
+ aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
+ aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
+}
+
+static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
+ u32 macsec_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
+ u64 modify_field_select = 0;
+ void *obj;
+ int err;
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
+ macsec_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
+ modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
+
+ /* EPN */
+ if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
+ mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
+ macsec_id);
+ return -EOPNOTSUPP;
+ }
+
+ obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
+ MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5_aso_ctrl_param param = {};
+
+ param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
+ param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
+ param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
+ if (in->mode == MLX5_MACSEC_EPN) {
+ param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ param.bitwise_data = BIT_ULL(54);
+ param.data_mask = param.bitwise_data;
+ }
+ macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
+}
+
+static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false);
+ mutex_unlock(&aso->aso_lock);
+
+ return err;
+}
+
+static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false);
+ if (err)
+ goto err_out;
+
+ if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
+ out->event_arm |= MLX5E_ASO_EPN_ARM;
+
+ out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
+
+err_out:
+ mutex_unlock(&aso->aso_lock);
+ return err;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = iter->tx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list, *sc_list;
+ struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ sc_list = &iter->macsec_rx_sc_list_head;
+ list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = mlx5e_rx_sc->rx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
+ struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
+{
+ struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_macsec_aso_in in = {};
+
+ /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
+ * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
+ * esn_overlap to OLD (1).
+ * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
+ * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
+ * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
+ */
+
+ if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
+ sa->epn_state.epn_msb++;
+ sa->epn_state.overlap = 0;
+ } else {
+ sa->epn_state.overlap = 1;
+ }
+
+ macsec_build_accel_attrs(sa, &attrs);
+ mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
+
+ /* Re-set EPN arm event */
+ in.obj_id = obj_id;
+ in.mode = MLX5_MACSEC_EPN;
+ macsec_aso_set_arm_event(mdev, macsec, &in);
+}
+
+static void macsec_async_event(struct work_struct *work)
+{
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5e_macsec_aso_out out = {};
+ struct mlx5e_macsec_aso_in in = {};
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+
+ async_work = container_of(work, struct mlx5e_macsec_async_work, work);
+ macsec = async_work->macsec;
+ mdev = async_work->mdev;
+ obj_id = async_work->obj_id;
+ macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
+ goto out_async_work;
+ }
+ }
+
+ /* Query MACsec ASO context */
+ in.obj_id = obj_id;
+ macsec_aso_query(mdev, macsec, &in, &out);
+
+ /* EPN case */
+ if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
+ macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
+
+out_async_work:
+ kfree(async_work);
+}
+
+static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5_eqe_obj_change *obj_change;
+ struct mlx5_eqe *eqe = data;
+ u16 obj_type;
+ u32 obj_id;
+
+ if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+ return NOTIFY_DONE;
+
+ obj_change = &eqe->data.obj_change;
+ obj_type = be16_to_cpu(obj_change->obj_type);
+ obj_id = be32_to_cpu(obj_change->obj_id);
+
+ if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
+ return NOTIFY_DONE;
+
+ async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
+ if (!async_work)
+ return NOTIFY_DONE;
+
+ async_work->macsec = macsec;
+ async_work->mdev = macsec->mdev;
+ async_work->obj_id = obj_id;
+
+ INIT_WORK(&async_work->work, macsec_async_event);
+
+ WARN_ON(!queue_work(macsec->wq, &async_work->work));
+
+ return NOTIFY_OK;
+}
+
+static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ struct mlx5_aso *maso;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &aso->pdn);
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
+ err);
+ return err;
+ }
+
+ maso = mlx5_aso_create(mdev, aso->pdn);
+ if (IS_ERR(maso)) {
+ err = PTR_ERR(maso);
+ goto err_aso;
+ }
+
+ err = mlx5e_macsec_aso_reg_mr(mdev, aso);
+ if (err)
+ goto err_aso_reg;
+
+ mutex_init(&aso->aso_lock);
+
+ aso->maso = maso;
+
+ return 0;
+
+err_aso_reg:
+ mlx5_aso_destroy(maso);
+err_aso:
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+ return err;
+}
+
+static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ if (!aso)
+ return;
+
+ mlx5e_macsec_aso_dereg_mr(mdev, aso);
+
+ mlx5_aso_destroy(aso->maso);
+
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+}
+
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
+{
+ mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
+}
+
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
+{
+ if (!macsec)
+ return NULL;
+
+ return &macsec->stats;
+}
+
+static const struct macsec_ops macsec_offload_ops = {
+ .mdo_add_txsa = mlx5e_macsec_add_txsa,
+ .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
+ .mdo_del_txsa = mlx5e_macsec_del_txsa,
+ .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
+ .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
+ .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
+ .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
+ .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
+ .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
+ .mdo_add_secy = mlx5e_macsec_add_secy,
+ .mdo_upd_secy = mlx5e_macsec_upd_secy,
+ .mdo_del_secy = mlx5e_macsec_del_secy,
+};
+
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ goto err_out;
+
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ return;
+
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec *macsec;
+ u32 fs_id;
+
+ macsec = priv->macsec;
+ if (!macsec)
+ return;
+
+ fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+
+ rcu_read_lock();
+ sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
+ rx_sc = sc_xarray_element->rx_sc;
+ if (rx_sc) {
+ dst_hold(&rx_sc->md_dst->dst);
+ skb_dst_set(skb, &rx_sc->md_dst->dst);
+ }
+
+ rcu_read_unlock();
+}
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return;
+
+ /* Enable MACsec */
+ mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
+ netdev->macsec_ops = &macsec_offload_ops;
+ netdev->features |= NETIF_F_HW_MACSEC;
+ netif_keep_dst(netdev);
+}
+
+int mlx5e_macsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_macsec *macsec = NULL;
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ if (!mlx5e_is_macsec_device(priv->mdev)) {
+ mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
+ return 0;
+ }
+
+ macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
+ if (!macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&macsec->macsec_device_list_head);
+ mutex_init(&macsec->lock);
+
+ err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
+ goto err_aso;
+ }
+
+ macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
+ if (!macsec->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
+
+ priv->macsec = macsec;
+
+ macsec->mdev = mdev;
+
+ macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ if (!macsec_fs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ macsec->macsec_fs = macsec_fs;
+
+ macsec->nb.notifier_call = macsec_obj_change_event;
+ mlx5_notifier_register(mdev, &macsec->nb);
+
+ mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
+
+ return 0;
+
+err_out:
+ destroy_workqueue(macsec->wq);
+err_wq:
+ mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
+err_aso:
+ rhashtable_destroy(&macsec->sci_hash);
+err_hash:
+ kfree(macsec);
+ priv->macsec = NULL;
+ return err;
+}
+
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!macsec)
+ return;
+
+ mlx5_notifier_unregister(mdev, &macsec->nb);
+ mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+ destroy_workqueue(macsec->wq);
+ mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
+ rhashtable_destroy(&macsec->sci_hash);
+ mutex_destroy(&macsec->lock);
+ kfree(macsec);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
new file mode 100644
index 000000000000..d580b4a91253
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACCEL_MACSEC_H__
+#define __MLX5_EN_ACCEL_MACSEC_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include <linux/mlx5/driver.h>
+#include <net/macsec.h>
+#include <net/dst_metadata.h>
+
+/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+
+struct mlx5e_priv;
+struct mlx5e_macsec;
+
+struct mlx5e_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_macsec_init(struct mlx5e_priv *priv);
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ return md_dst && (md_dst->type == METADATA_MACSEC);
+}
+
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
+
+#else
+
+static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
+static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{}
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
+#endif /* CONFIG_MLX5_EN_MACSEC */
+
+#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
new file mode 100644
index 000000000000..1ac0cf04e811
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/netdevice.h>
+#include <linux/mlx5/qp.h>
+#include "fs_core.h"
+#include "en/fs.h"
+#include "en_accel/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5e_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5e_macsec_tables {
+ struct mlx5e_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5e_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5e_macsec_tables tables;
+};
+
+struct mlx5e_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5e_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5e_macsec_tables tables;
+};
+
+union mlx5e_macsec_rule {
+ struct mlx5e_macsec_tx_rule tx_rule;
+ struct mlx5e_macsec_rx_rule rx_rule;
+};
+
+struct mlx5e_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5e_macsec_rx *rx_fs;
+};
+
+static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto out_spec;
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_tx_rule *tx_rule)
+{
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+ u32 fs_id;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = fs_id;
+ *sa_fs_id = fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5e_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto free_spec;
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_rx_rule *rx_rule)
+{
+ int i;
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[3-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+}
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action)
+{
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
+}
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ kfree(macsec_fs);
+}
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+ macsec_fs->netdev = netdev;
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto err;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+err:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
new file mode 100644
index 000000000000..b429648d4ee7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include "en_accel/macsec.h"
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5e_macsec_fs;
+union mlx5e_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action);
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
new file mode 100644
index 000000000000..e50a2e3f3d18
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "en_accel/macsec.h"
+
+static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+};
+
+#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(macsec_hw)
+{
+ if (!priv->macsec)
+ return 0;
+
+ if (mlx5e_is_macsec_device(priv->mdev))
+ return NUM_MACSEC_HW_COUNTERS;
+
+ return 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(macsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
+{
+ unsigned int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_macsec_hw_stats_desc[i].format);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
+{
+ int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ mlx5e_macsec_hw_stats_desc,
+ i);
+
+ return idx;
+}
+
+MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
deleted file mode 100644
index b8fc863aa68d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/netdevice.h>
-#include <net/ipv6.h>
-#include "en_accel/tls.h"
-#include "accel/tls.h"
-
-static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 0);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 1);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-}
-#endif
-
-static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
- MLX5_FLD_SZ_BYTES(tls_flow, src_port));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
- MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
-}
-
-static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
-{
- switch (sk->sk_family) {
- case AF_INET:
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
- }
- if (!(caps & MLX5_ACCEL_TLS_IPV6))
- goto error_out;
-
- mlx5e_tls_set_ipv6_flow(flow, sk);
- break;
-#endif
- default:
- goto error_out;
- }
-
- mlx5e_tls_set_flow_tcp_ports(flow, sk);
- return 0;
-error_out:
- return -EINVAL;
-}
-
-static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 caps = mlx5_accel_tls_device_caps(mdev);
- int ret = -ENOMEM;
- void *flow;
- u32 swid;
-
- flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
- if (!flow)
- return ret;
-
- ret = mlx5e_tls_set_flow(flow, sk, caps);
- if (ret)
- goto free_flow;
-
- ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
- if (ret < 0)
- goto free_flow;
-
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context_tx *tx_ctx =
- mlx5e_get_tls_tx_context(tls_ctx);
-
- tx_ctx->swid = htonl(swid);
- tx_ctx->expected_seq = start_offload_tcp_sn;
- } else {
- struct mlx5e_tls_offload_context_rx *rx_ctx =
- mlx5e_get_tls_rx_context(tls_ctx);
-
- rx_ctx->handle = htonl(swid);
- }
-
- return 0;
-free_flow:
- kfree(flow);
- return ret;
-}
-
-static void mlx5e_tls_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- unsigned int handle;
-
- handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
- mlx5e_get_tls_tx_context(tls_ctx)->swid :
- mlx5e_get_tls_rx_context(tls_ctx)->handle);
-
- mlx5_accel_tls_del_flow(priv->mdev, handle,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
-}
-
-static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
- u32 seq, u8 *rcd_sn_data,
- enum tls_offload_ctx_dir direction)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_rx *rx_ctx;
- __be64 rcd_sn = *(__be64 *)rcd_sn_data;
-
- if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
- return -EINVAL;
- rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
-
- netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
- be64_to_cpu(rcd_sn));
- mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
-
- return 0;
-}
-
-static const struct tlsdev_ops mlx5e_tls_ops = {
- .tls_dev_add = mlx5e_tls_add,
- .tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync = mlx5e_tls_resync,
-};
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- struct net_device *netdev = priv->netdev;
- u32 caps;
-
- if (mlx5e_accel_is_ktls_device(priv->mdev)) {
- mlx5e_ktls_build_netdev(priv);
- return;
- }
-
- /* FPGA */
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return;
-
- caps = mlx5_accel_tls_device_caps(priv->mdev);
- if (caps & MLX5_ACCEL_TLS_TX) {
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- }
-
- if (caps & MLX5_ACCEL_TLS_RX) {
- netdev->features |= NETIF_F_HW_TLS_RX;
- netdev->hw_features |= NETIF_F_HW_TLS_RX;
- }
-
- if (!(caps & MLX5_ACCEL_TLS_LRO)) {
- netdev->features &= ~NETIF_F_LRO;
- netdev->hw_features &= ~NETIF_F_LRO;
- }
-
- netdev->tlsdev_ops = &mlx5e_tls_ops;
-}
-
-int mlx5e_tls_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls;
-
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- priv->tls = tls;
- return 0;
-}
-
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls = priv->tls;
-
- if (!tls)
- return;
-
- kfree(tls);
- priv->tls = NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
deleted file mode 100644
index 62ecf14bf86a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#ifndef __MLX5E_TLS_H__
-#define __MLX5E_TLS_H__
-
-#include "accel/tls.h"
-#include "en_accel/ktls.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-#include <net/tls.h>
-#include "en.h"
-
-struct mlx5e_tls_sw_stats {
- atomic64_t tx_tls_ctx;
- atomic64_t tx_tls_del;
- atomic64_t tx_tls_drop_metadata;
- atomic64_t tx_tls_drop_resync_alloc;
- atomic64_t tx_tls_drop_no_sync_data;
- atomic64_t tx_tls_drop_bypass_required;
- atomic64_t rx_tls_ctx;
- atomic64_t rx_tls_del;
- atomic64_t rx_tls_drop_resync_request;
- atomic64_t rx_tls_resync_request;
- atomic64_t rx_tls_resync_reply;
- atomic64_t rx_tls_auth_fail;
-};
-
-struct mlx5e_tls {
- struct mlx5e_tls_sw_stats sw_stats;
- struct workqueue_struct *rx_wq;
-};
-
-struct mlx5e_tls_offload_context_tx {
- struct tls_offload_context_tx base;
- u32 expected_seq;
- __be32 swid;
-};
-
-static inline struct mlx5e_tls_offload_context_tx *
-mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct mlx5e_tls_offload_context_tx,
- base);
-}
-
-struct mlx5e_tls_offload_context_rx {
- struct tls_offload_context_rx base;
- __be32 handle;
-};
-
-static inline struct mlx5e_tls_offload_context_rx *
-mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
- return container_of(tls_offload_ctx_rx(tls_ctx),
- struct mlx5e_tls_offload_context_rx,
- base);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
-{
- return priv->tls;
-}
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_tls_init(struct mlx5e_priv *priv);
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv);
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
-
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_tls_device(mdev);
-}
-
-#else
-
-static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- if (!is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(priv->mdev))
- mlx5e_ktls_build_netdev(priv);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
-static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
-static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif
-
-#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
deleted file mode 100644
index 7a700f913582..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
-#include "accel/accel.h"
-
-#include <net/inet6_hashtables.h>
-#include <linux/ipv6.h>
-
-#define SYNDROM_DECRYPTED 0x30
-#define SYNDROM_RESYNC_REQUEST 0x31
-#define SYNDROM_AUTH_FAILED 0x32
-
-#define SYNDROME_OFFLOAD_REQUIRED 32
-#define SYNDROME_SYNC 33
-
-struct sync_info {
- u64 rcd_sn;
- s32 sync_len;
- int nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
-};
-
-struct recv_metadata_content {
- u8 syndrome;
- u8 reserved;
- __be32 sync_seq;
-} __packed;
-
-struct send_metadata_content {
- /* One byte of syndrome followed by 3 bytes of swid */
- __be32 syndrome_swid;
- __be16 first_seq;
-} __packed;
-
-struct mlx5e_tls_metadata {
- union {
- /* from fpga to host */
- struct recv_metadata_content recv;
- /* from host to fpga */
- struct send_metadata_content send;
- unsigned char raw[6];
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
-{
- struct mlx5e_tls_metadata *pet;
- struct ethhdr *eth;
-
- if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
- return -ENOMEM;
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
- skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
- pet = (struct mlx5e_tls_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->content.send.syndrome_swid =
- htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
-
- return 0;
-}
-
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
- u32 tcp_seq, struct sync_info *info)
-{
- int remaining, i = 0, ret = -EINVAL;
- struct tls_record_info *record;
- unsigned long flags;
- s32 sync_size;
-
- spin_lock_irqsave(&context->base.lock, flags);
- record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
-
- if (unlikely(!record))
- goto out;
-
- sync_size = tcp_seq - tls_record_start_seq(record);
- info->sync_len = sync_size;
- if (unlikely(sync_size < 0)) {
- if (tls_record_is_start_marker(record))
- goto done;
-
- goto out;
- }
-
- remaining = sync_size;
- while (remaining > 0) {
- info->frags[i] = record->frags[i];
- __skb_frag_ref(&info->frags[i]);
- remaining -= skb_frag_size(&info->frags[i]);
-
- if (remaining < 0)
- skb_frag_size_add(&info->frags[i], remaining);
-
- i++;
- }
- info->nr_frags = i;
-done:
- ret = 0;
-out:
- spin_unlock_irqrestore(&context->base.lock, flags);
- return ret;
-}
-
-static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
- struct sk_buff *nskb, u32 tcp_seq,
- int headln, __be64 rcd_sn)
-{
- struct mlx5e_tls_metadata *pet;
- u8 syndrome = SYNDROME_SYNC;
- struct iphdr *iph;
- struct tcphdr *th;
- int data_len, mss;
-
- nskb->dev = skb->dev;
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_offset(skb));
- skb_set_transport_header(nskb, skb_transport_offset(skb));
- memcpy(nskb->data, skb->data, headln);
- memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
-
- iph = ip_hdr(nskb);
- iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
- th = tcp_hdr(nskb);
- data_len = nskb->len - headln;
- tcp_seq -= data_len;
- th->seq = htonl(tcp_seq);
-
- mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
- skb_shinfo(nskb)->gso_size = 0;
- if (data_len > mss) {
- skb_shinfo(nskb)->gso_size = mss;
- skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
- }
- skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
-
- pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
- memcpy(pet, &syndrome, sizeof(syndrome));
- pet->content.send.first_seq = htons(tcp_seq);
-
- /* MLX5 devices don't care about the checksum partial start, offset
- * and pseudo header
- */
- nskb->ip_summed = CHECKSUM_PARTIAL;
-
- nskb->queue_mapping = skb->queue_mapping;
-}
-
-static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tls *tls)
-{
- u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct sync_info info;
- struct sk_buff *nskb;
- int linear_len = 0;
- int headln;
- int i;
-
- sq->stats->tls_ooo++;
-
- if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
- * It should be safe to drop the packet in this case
- */
- atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
- goto err_out;
- }
-
- if (unlikely(info.sync_len < 0)) {
- u32 payload;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- /* SKB payload doesn't require offload
- */
- return true;
-
- atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
- goto err_out;
- }
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
- goto err_out;
- }
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- linear_len += headln + sizeof(info.rcd_sn);
- nskb = alloc_skb(linear_len, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
- goto err_out;
- }
-
- context->expected_seq = tcp_seq + skb->len - headln;
- skb_put(nskb, linear_len);
- for (i = 0; i < info.nr_frags; i++)
- skb_shinfo(nskb)->frags[i] = info.frags[i];
-
- skb_shinfo(nskb)->nr_frags = info.nr_frags;
- nskb->data_len = info.sync_len;
- nskb->len += info.sync_len;
- sq->stats->tls_resync_bytes += nskb->len;
- mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
- cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit_simple(sq, nskb, true);
-
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_tx *context;
- struct tls_context *tls_ctx;
- u32 expected_seq;
- int datalen;
- u32 skb_seq;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- return true;
-
- mlx5e_tx_mpwqe_ensure_complete(sq);
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
- if (mlx5e_accel_is_ktls_tx(sq->mdev))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
-
- /* FPGA */
- skb_seq = ntohl(tcp_hdr(skb)->seq);
- context = mlx5e_get_tls_tx_context(tls_ctx);
- expected_seq = context->expected_seq;
-
- if (unlikely(expected_seq != skb_seq))
- return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
- dev_kfree_skb_any(skb);
- return false;
- }
-
- context->expected_seq = skb_seq + datalen;
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-static int tls_update_resync_sn(struct net_device *netdev,
- struct sk_buff *skb,
- struct mlx5e_tls_metadata *mdata)
-{
- struct sock *sk = NULL;
- struct iphdr *iph;
- struct tcphdr *th;
- __be32 seq;
-
- if (mdata->ethertype != htons(ETH_P_IP))
- return -EINVAL;
-
- iph = (struct iphdr *)(mdata + 1);
-
- th = ((void *)iph) + iph->ihl * 4;
-
- if (iph->version == 4) {
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
-
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
- &ipv6h->saddr, th->source,
- &ipv6h->daddr, ntohs(th->dest),
- netdev->ifindex, 0);
-#endif
- }
- if (!sk || sk->sk_state == TCP_TIME_WAIT) {
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
- goto out;
- }
-
- skb->sk = sk;
- skb->destructor = sock_edemux;
-
- memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
- tls_offload_rx_resync_request(sk, seq);
-out:
- return 0;
-}
-
-/* FPGA tls rx handler */
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt)
-{
- struct mlx5e_tls_metadata *mdata;
- struct mlx5e_priv *priv;
-
- /* Use the metadata */
- mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
- switch (mdata->content.recv.syndrome) {
- case SYNDROM_DECRYPTED:
- skb->decrypted = 1;
- break;
- case SYNDROM_RESYNC_REQUEST:
- tls_update_resync_sn(rq->netdev, skb, mdata);
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
- break;
- case SYNDROM_AUTH_FAILED:
- /* Authentication failure will be observed and verified by kTLS */
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
- break;
- default:
- /* Bypass the metadata header to others */
- return;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-}
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- if (!mlx5e_accel_is_tls_device(mdev))
- return 0;
-
- if (mlx5e_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(mdev, params);
-
- /* FPGA */
- /* Resync SKB. */
- return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
deleted file mode 100644
index 0ca0a023fb8d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5E_TLS_RXTX_H__
-#define __MLX5E_TLS_RXTX_H__
-
-#include "accel/accel.h"
-#include "en_accel/ktls_txrx.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-
-#include <linux/skbuff.h>
-#include "en.h"
-#include "en/txrx.h"
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-
-static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
-static inline void
-mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt);
-
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
-{
- if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
- return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
-
- if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
- return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
-}
-
-#else
-
-static inline bool
-mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- return 0;
-}
-
-#endif /* CONFIG_MLX5_EN_TLS */
-
-#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 49cca6bd49a1..0ae1865086ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -114,47 +114,49 @@ static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
}
}
-static int arfs_disable(struct mlx5e_priv *priv)
+static int arfs_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, arfs_get_tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, arfs_get_tt(i), err);
return err;
}
}
return 0;
}
-static void arfs_del_rules(struct mlx5e_priv *priv);
+static void arfs_del_rules(struct mlx5e_flow_steering *fs);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
- arfs_del_rules(priv);
+ arfs_del_rules(fs);
- return arfs_disable(priv);
+ return arfs_disable(fs);
}
-int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+ dest.ft = arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
- __func__, arfs_get_tt(i), err);
- arfs_disable(priv);
+ fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
+ __func__, arfs_get_tt(i), err);
+ arfs_disable(fs);
return err;
}
}
@@ -167,31 +169,37 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
-static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
+static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
int i;
- arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs->wq);
+ arfs_del_rules(fs);
+ destroy_workqueue(arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&arfs->arfs_tables[i]);
}
}
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
{
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+
+ if (!ntuple)
return;
- _mlx5e_cleanup_tables(priv);
- kvfree(priv->fs.arfs);
+ _mlx5e_cleanup_tables(fs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
}
-static int arfs_add_default_rule(struct mlx5e_priv *priv,
+static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+ struct arfs_table *arfs_t = &arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -200,23 +208,21 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
- netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
- __func__, type);
+ fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
return -EINVAL;
}
/* FIXME: Must use mlx5_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
- dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
- netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
- __func__, type);
+ fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
}
return err;
@@ -318,10 +324,12 @@ out:
return err;
}
-static int arfs_create_table(struct mlx5e_priv *priv,
+static int arfs_create_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +340,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -343,7 +351,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
if (err)
goto err;
- err = arfs_add_default_rule(priv, type);
+ err = arfs_add_default_rule(fs, rx_res, type);
if (err)
goto err;
@@ -353,35 +361,40 @@ err:
return err;
}
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
{
+ struct mlx5e_arfs_tables *arfs;
int err = -ENOMEM;
int i;
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ if (!ntuple)
return 0;
- priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
- if (!priv->fs.arfs)
+ arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
+ if (!arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs.arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs->rules);
- priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs->wq)
+ spin_lock_init(&arfs->arfs_lock);
+ INIT_LIST_HEAD(&arfs->rules);
+ arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!arfs->wq)
goto err;
+ mlx5e_fs_set_arfs(fs, arfs);
+
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- err = arfs_create_table(priv, i);
+ err = arfs_create_table(fs, rx_res, i);
if (err)
goto err_des;
}
return 0;
err_des:
- _mlx5e_cleanup_tables(priv);
+ _mlx5e_cleanup_tables(fs);
err:
- kvfree(priv->fs.arfs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
return err;
}
@@ -389,6 +402,7 @@ err:
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
HLIST_HEAD(del_list);
@@ -396,8 +410,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +422,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -417,20 +431,21 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
}
}
-static void arfs_del_rules(struct mlx5e_priv *priv)
+static void arfs_del_rules(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct hlist_node *htmp;
struct arfs_rule *rule;
HLIST_HEAD(del_list);
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +489,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -588,13 +603,15 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5e_arfs_tables *arfs;
struct mlx5_flow_handle *rule;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ spin_lock_bh(&arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -620,6 +637,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *rule;
struct arfs_tuple *tuple;
@@ -647,7 +665,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,11 +709,12 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
- struct arfs_table *arfs_t;
+ struct mlx5e_arfs_tables *arfs;
struct arfs_rule *arfs_rule;
+ struct arfs_table *arfs_t;
struct flow_keys fk;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
@@ -725,7 +744,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+ queue_work(arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index c0f409c195bf..68f19324db93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -46,8 +46,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
-static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- u32 *mkey)
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index a4c8d8d00d5a..2449731b7d79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1026,15 +1026,6 @@ void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
}
-void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
-
- if (MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-}
-
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
@@ -1142,7 +1133,7 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
err = mlx5_set_trust_state(priv->mdev, *trust_state);
if (err)
return err;
- priv->dcbx_dp.trust_state = *trust_state;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
return 0;
}
@@ -1187,16 +1178,28 @@ static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 trust_state;
int err;
- priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
-
- if (!MLX5_DSCP_SUPPORTED(mdev))
+ if (!MLX5_DSCP_SUPPORTED(mdev)) {
+ WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
return 0;
+ }
- err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ err = mlx5_query_trust_state(priv->mdev, &trust_state);
if (err)
return err;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
+
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
+ /*
+ * Align the driver state with the register state.
+ * Temporary state change is required to enable the app list reset.
+ */
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
+ mlx5e_dcbnl_delete_app(priv);
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+ }
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 57d755db1cf5..24aa25da482b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,24 +30,27 @@
* SOFTWARE.
*/
+#include <linux/ethtool_netlink.h>
+
#include "en.h"
#include "en/port.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
+#include "en/fs_ethtool.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
- strlcpy(drvinfo->bus_info, dev_name(mdev->device),
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
@@ -305,12 +308,26 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ /* Limitation for regular RQ. XSK RQ may clamp the queue length in
+ * mlx5e_mpwqe_get_log_rq_size.
+ */
+ u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev,
+ PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED);
+
+ param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
+
+ kernel_param->tcp_data_split =
+ (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -320,7 +337,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
@@ -451,7 +468,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__);
@@ -486,14 +503,14 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
- mlx5e_arfs_disable(priv);
+ mlx5e_arfs_disable(priv->fs);
/* Switch to new channels, set new parameters and close old ones */
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
- int err2 = mlx5e_arfs_enable(priv);
+ int err2 = mlx5e_arfs_enable(priv->fs);
if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
@@ -1792,7 +1809,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
- return 0;
+ return size_read;
}
i += size_read;
@@ -1988,10 +2005,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
if (enable) {
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return -EOPNOTSUPP;
- if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
- return -EINVAL;
+ /* Checking the regular RQ here; mlx5e_validate_xsk_param called
+ * from mlx5e_open_xsk will check for each XSK queue, and
+ * mlx5e_safe_switch_params will be reverted if any check fails.
+ */
+ int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
+
+ if (err)
+ return err;
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
@@ -2067,7 +2088,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index aeff1d972a46..1892ccb889b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -36,14 +36,42 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
-#include "en.h"
-#include "en_rep.h"
+#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_flow_steering {
+ struct work_struct set_rx_mode_work;
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *egress_ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering *ethtool;
+#endif
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_promisc_table promisc;
+ struct mlx5e_vlan_table *vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_ttc_table *inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables *arfs;
+#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_fs_tcp *accel_tcp;
+#endif
+ struct mlx5e_fs_udp *udp;
+ struct mlx5e_fs_any *any;
+ struct mlx5e_ptp_fs *ptp_fs;
+};
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai);
enum {
@@ -132,9 +160,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
return vlan->ft.t;
}
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
{
- struct net_device *ndev = priv->netdev;
int max_list_size;
int list_size;
u16 *vlans;
@@ -143,35 +170,34 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
list_size++;
- max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+ max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- netdev_warn(ndev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
- vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
+ vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
if (!vlans)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
}
- err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
+ err);
- kfree(vlans);
+ kvfree(vlans);
return err;
}
@@ -183,18 +209,18 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.l2.ft.t;
+ dest.ft = fs->l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -204,24 +230,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan->untagged_rule;
+ rule_p = &fs->vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan->any_cvlan_rule;
+ rule_p = &fs->vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan->any_svlan_rule;
+ rule_p = &fs->vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+ rule_p = &fs->vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -231,7 +257,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+ rule_p = &fs->vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -250,13 +276,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ fs_err(fs, "%s: add rule failed\n", __func__);
}
return err;
}
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
struct mlx5_flow_spec *spec;
@@ -267,68 +293,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+ err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
kvfree(spec);
return err;
}
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan->untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
- priv->fs.vlan->untagged_rule = NULL;
+ if (fs->vlan->untagged_rule) {
+ mlx5_del_flow_rules(fs->vlan->untagged_rule);
+ fs->vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan->any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
- priv->fs.vlan->any_cvlan_rule = NULL;
+ if (fs->vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
+ fs->vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan->any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
- priv->fs.vlan->any_svlan_rule = NULL;
+ if (fs->vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
+ fs->vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan->active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
- priv->fs.vlan->active_svlans_rule[vid] = NULL;
+ if (fs->vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
+ fs->vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan->active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
- priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+ if (fs->vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
+ fs->vlan->active_cvlans_rule[vid] = NULL;
}
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
break;
}
}
-static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
{
int err;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
@@ -352,103 +378,103 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
return rule;
}
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan->trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ fs->vlan->trap_rule = NULL;
+ fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.vlan->trap_rule = rule;
+ fs->vlan->trap_rule = rule;
return 0;
}
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs.vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
- priv->fs.vlan->trap_rule = NULL;
+ if (fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(fs->vlan->trap_rule);
+ fs->vlan->trap_rule = NULL;
}
}
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.l2.trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ fs->l2.trap_rule = NULL;
+ fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.l2.trap_rule = rule;
+ fs->l2.trap_rule = rule;
return 0;
}
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs.l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs.l2.trap_rule);
- priv->fs.l2.trap_rule = NULL;
+ if (fs->l2.trap_rule) {
+ mlx5_del_flow_rules(fs->l2.trap_rule);
+ fs->l2.trap_rule = NULL;
}
}
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (!priv->fs.vlan->cvlan_filter_disabled)
+ if (!fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = false;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = false;
+ if (promisc)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (priv->fs.vlan->cvlan_filter_disabled)
+ if (fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = true;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = true;
+ if (promisc)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan->active_cvlans);
+ set_bit(vid, fs->vlan->active_cvlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan->active_cvlans);
+ clear_bit(vid, fs->vlan->active_cvlans);
return err;
}
-static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev, u16 vid)
{
- struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan->active_svlans);
+ set_bit(vid, fs->vlan->active_svlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
+ clear_bit(vid, fs->vlan->active_svlans);
return err;
}
@@ -457,86 +483,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
return err;
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ fs_err(fs, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q)
- return mlx5e_vlan_rx_add_cvid(priv, vid);
+ return mlx5e_vlan_rx_add_cvid(fs, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
- return mlx5e_vlan_rx_add_svid(priv, vid);
+ return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
return -EOPNOTSUPP;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ fs_err(fs, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan->active_cvlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ clear_bit(vid, fs->vlan->active_cvlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
- netdev_update_features(dev);
+ clear_bit(vid, fs->vlan->active_svlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(netdev);
}
return 0;
}
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_add_any_vid_rules(fs);
}
-static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ WARN_ON_ONCE(fs->state_destroy);
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(fs);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
@@ -547,9 +578,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
switch (action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(mac_addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
+ l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
@@ -557,52 +588,51 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
- mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
+ mlx5e_del_l2_flow_rule(fs, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
}
if (l2_err)
- netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del",
+ mac_addr, l2_err);
}
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(netdev);
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
- priv->netdev->dev_addr);
-
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
+ struct net_device *ndev,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct net_device *ndev = priv->netdev;
struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->fs.l2.broadcast_enabled)
+ else if (fs->l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -614,7 +644,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
}
}
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
@@ -627,19 +658,18 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
if (size > max_size) {
- netdev_warn(priv->netdev,
- "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -649,65 +679,66 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
err = -ENOMEM;
goto out;
}
- mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
}
- err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+ err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- netdev_err(priv->netdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ fs_err(fs, "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_l2_table *ea = &priv->fs.l2;
+ struct mlx5e_l2_table *ea = &fs->l2;
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
- mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(fs->mdev, 0,
ea->allmulti_enabled,
ea->promisc_enabled);
}
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
+ mlx5e_execute_l2_action(fs, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
+ mlx5e_execute_l2_action(fs, hn);
}
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_sync_netdev_addr(priv);
+ if (fs->state_destroy)
+ mlx5e_sync_netdev_addr(fs, netdev);
- mlx5e_apply_netdev_addr(priv);
+ mlx5e_apply_netdev_addr(fs);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
-static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
{
- struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_table *ft = fs->promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -718,22 +749,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
- rule_p = &priv->fs.promisc.rule;
+ rule_p = &fs->promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
-static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5e_flow_table *ft = &fs->promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -742,14 +773,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ fs_err(fs, "fail to create promisc table err=%d\n", err);
return err;
}
- err = mlx5e_add_promisc_rule(priv);
+ err = mlx5e_add_promisc_rule(fs);
if (err)
goto err_destroy_promisc_table;
@@ -762,34 +793,31 @@ err_destroy_promisc_table:
return err;
}
-static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
- mlx5_del_flow_rules(priv->fs.promisc.rule);
- priv->fs.promisc.rule = NULL;
+ mlx5_del_flow_rules(fs->promisc.rule);
+ fs->promisc.rule = NULL;
}
-static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
- mlx5e_del_promisc_rule(priv);
- mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
- priv->fs.promisc.ft.t = NULL;
+ mlx5e_del_promisc_rule(fs);
+ mlx5_destroy_flow_table(fs->promisc.ft.t);
+ fs->promisc.ft.t = NULL;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
-
- struct mlx5e_l2_table *ea = &priv->fs.l2;
- struct net_device *ndev = priv->netdev;
+ struct mlx5e_l2_table *ea = &fs->l2;
- bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
- bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
- bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool rx_mode_enable = fs->state_destroy;
+ bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@@ -801,32 +829,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
int err;
if (enable_promisc) {
- err = mlx5e_create_promisc_table(priv);
+ err = mlx5e_create_promisc_table(fs);
if (err)
enable_promisc = false;
- if (!priv->channels.params.vlan_strip_disable && !err)
- netdev_warn_once(ndev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ if (!fs->vlan_strip_disable && !err)
+ fs_warn_once(fs,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
- mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(priv);
+ mlx5e_handle_netdev_addr(fs, netdev);
if (disable_broadcast)
- mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
if (disable_promisc)
- mlx5e_destroy_promisc_table(priv);
+ mlx5e_destroy_promisc_table(fs);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
- mlx5e_vport_context_update(priv);
+ mlx5e_vport_context_update(fs, netdev);
}
static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
@@ -841,9 +869,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
{
- ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
}
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -854,14 +882,15 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
ft->t = NULL;
}
-static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
+static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -870,13 +899,14 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss_inner(rx_res,
tt);
}
}
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel)
{
@@ -884,7 +914,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -893,23 +923,23 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss(rx_res, tt);
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+ mlx5_get_ttc_flow_table(fs->inner_ttc);
}
}
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
@@ -918,10 +948,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
}
}
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
@@ -939,7 +969,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -957,8 +987,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1042,14 +1071,14 @@ err_destroy_groups:
return err;
}
-static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+ mlx5e_destroy_flow_table(&fs->l2.ft);
}
-static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_l2_table *l2_table = &fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1060,7 +1089,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1180,20 +1209,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
return err;
}
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
- ft = &priv->fs.vlan->ft;
+ ft = &fs->vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t))
return PTR_ERR(ft->t);
@@ -1207,7 +1236,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- mlx5e_add_vlan_rules(priv);
+ mlx5e_fs_add_vlan_rules(fs);
return 0;
@@ -1219,139 +1248,332 @@ err_destroy_vlan_table:
return err;
}
-static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ mlx5e_del_vlan_rules(fs);
+ mlx5e_destroy_flow_table(&fs->vlan->ft);
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+ mlx5_destroy_ttc_table(fs->inner_ttc);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
{
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(fs->ttc);
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return 0;
- mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs.inner_ttc))
- return PTR_ERR(priv->fs.inner_ttc);
+ mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
+ fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
+ &ttc_params);
+ if (IS_ERR(fs->inner_ttc))
+ return PTR_ERR(fs->inner_ttc);
return 0;
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc))
- return PTR_ERR(priv->fs.ttc);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
+ if (IS_ERR(fs->ttc))
+ return PTR_ERR(fs->ttc);
return 0;
}
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev)
{
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
-
- if (!priv->fs.ns)
+ if (!ns)
return -EOPNOTSUPP;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(fs, ns, false);
+ err = mlx5e_arfs_create_tables(fs, rx_res,
+ !!(netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
- netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
- err);
- priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
+ netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ err = mlx5e_create_inner_ttc_table(fs, rx_res);
if (err) {
- netdev_err(priv->netdev,
- "Failed to create inner ttc table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(fs, rx_res);
if (err) {
- netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create ttc table, err=%d\n", err);
goto err_destroy_inner_ttc_table;
}
- err = mlx5e_create_l2_table(priv);
+ err = mlx5e_create_l2_table(fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create l2 table, err=%d\n", err);
goto err_destroy_ttc_table;
}
- err = mlx5e_create_vlan_table(priv);
+ err = mlx5e_fs_create_vlan_table(fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create vlan table, err=%d\n", err);
goto err_destroy_l2_table;
}
- err = mlx5e_ptp_alloc_rx_fs(priv);
+ err = mlx5e_ptp_alloc_rx_fs(fs, profile);
if (err)
goto err_destory_vlan_table;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(fs);
return 0;
err_destory_vlan_table:
- mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_vlan_table(fs);
err_destroy_l2_table:
- mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_l2_table(fs);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(fs);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile)
{
- mlx5e_ptp_free_rx_fs(priv);
- mlx5e_destroy_vlan_table(priv);
- mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ptp_free_rx_fs(fs, profile);
+ mlx5e_destroy_vlan_table(fs);
+ mlx5e_destroy_l2_table(fs);
+ mlx5e_destroy_ttc_table(fs);
+ mlx5e_destroy_inner_ttc_table(fs);
+ mlx5e_arfs_destroy_tables(fs, ntuple);
+ mlx5e_ethtool_cleanup_steering(fs);
}
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
{
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
+ fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
+ if (!fs->vlan)
return -ENOMEM;
return 0;
}
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
+{
+ kvfree(fs->vlan);
+}
+
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
+{
+ return fs->vlan;
+}
+
+static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->tc = mlx5e_tc_table_alloc();
+ if (IS_ERR(fs->tc))
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_tc_table_free(fs->tc);
+}
+
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
+{
+ return fs->tc;
+}
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{
+ return mlx5e_ethtool_alloc(&fs->ethtool);
+}
+
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_ethtool_free(fs->ethtool);
+}
+
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
+{
+ return fs->ethtool;
+}
+#else
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{ return 0; }
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
+#endif
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy)
+{
+ struct mlx5e_flow_steering *fs;
+ int err;
+
+ fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ goto err;
+
+ fs->mdev = mdev;
+ fs->state_destroy = state_destroy;
+ if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+ err = mlx5e_fs_vlan_alloc(fs);
+ if (err)
+ goto err_free_fs;
+ }
+
+ if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+ err = mlx5e_fs_tc_alloc(fs);
+ if (err)
+ goto err_free_vlan;
+ }
+
+ err = mlx5e_fs_ethtool_alloc(fs);
+ if (err)
+ goto err_free_tc;
+
+ return fs;
+err_free_tc:
+ mlx5e_fs_tc_free(fs);
+err_free_vlan:
+ mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
+err:
+ return NULL;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_fs_ethtool_free(fs);
+ mlx5e_fs_tc_free(fs);
+ mlx5e_fs_vlan_free(fs);
+ kvfree(fs);
+}
+
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
+{
+ return &fs->l2;
+}
+
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
+{
+ return egress ? fs->egress_ns : fs->ns;
+}
+
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
+{
+ if (!egress)
+ fs->ns = ns;
+ else
+ fs->egress_ns = ns;
+}
+
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
+{
+ return inner ? fs->inner_ttc : fs->ttc;
+}
+
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
+{
+ if (!inner)
+ fs->ttc = ttc;
+ else
+ fs->inner_ttc = ttc;
+}
+
+#ifdef CONFIG_MLX5_EN_ARFS
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
+{
+ return fs->arfs;
+}
+
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
+{
+ fs->arfs = arfs;
+}
+#endif
+
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
+{
+ return fs->ptp_fs;
+}
+
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
+{
+ fs->ptp_fs = ptp_fs;
+}
+
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
+{
+ return fs->any;
+}
+
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
+{
+ fs->any = any;
+}
+
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
+{
+ return fs->accel_tcp;
+}
+
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
+{
+ fs->accel_tcp = accel_tcp;
+}
+#endif
+
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
+{
+ fs->state_destroy = state_destroy;
+}
+
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
+ bool vlan_strip_disable)
+{
+ fs->vlan_strip_disable = vlan_strip_disable;
+}
+
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
+{
+ return fs->udp;
+}
+
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
+{
+ fs->udp = udp;
+}
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
{
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
+ return fs->mdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ad0d234632a3..aac32e505c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -34,6 +34,22 @@
#include "en.h"
#include "en/params.h"
#include "en/xsk/pool.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
static int flow_type_to_traffic_type(u32 flow_type);
@@ -66,6 +82,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
@@ -81,18 +98,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ eth_ft = &ethtool->l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -382,15 +399,16 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
+ struct list_head *head = &ethtool->rules;
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs.ethtool.rules;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs.ethtool.tot_num_rules++;
+ ethtool->tot_num_rules++;
list_add(&rule->list, head);
}
@@ -433,15 +451,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
eth_rule->rss = rss;
mlx5e_rss_refcnt_inc(eth_rule->rss);
} else {
- struct mlx5e_params *params = &priv->channels.params;
- enum mlx5e_rq_group group;
- u16 ix;
-
- mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
-
- *tirn = group == MLX5E_RQ_GROUP_XSK ?
- mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
+ *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
}
return 0;
@@ -499,15 +509,16 @@ free:
return err ? ERR_PTR(err) : rule;
}
-static void del_ethtool_rule(struct mlx5e_priv *priv,
+static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_ethtool_rule *eth_rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs.ethtool.tot_num_rules--;
+ ethtool->tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -515,9 +526,10 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -531,7 +543,7 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
@@ -662,8 +674,7 @@ static int validate_flow(struct mlx5e_priv *priv,
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
- if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
- fs->ring_cookie))
+ if (fs->ring_cookie >= priv->channels.params.num_channels)
return -EINVAL;
switch (flow_type_mask(fs->flow_type)) {
@@ -742,10 +753,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
- if (!eth_ft->ft) {
- err = -EINVAL;
- goto del_ethtool_rule;
- }
+
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -757,7 +765,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
return 0;
del_ethtool_rule:
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
return err;
}
@@ -777,7 +785,7 @@ mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
goto out;
}
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
out:
return err;
}
@@ -786,12 +794,13 @@ static int
mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &ethtool->rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -829,18 +838,34 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
return err;
}
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
+{
+ *ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
+ if (!*ethtool)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
{
+ kvfree(ethtool);
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
+{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
- del_ethtool_rule(priv, iter);
+ list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
+ del_ethtool_rule(fs, iter);
}
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
{
- INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
+
+ INIT_LIST_HEAD(&ethtool->rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -962,11 +987,12 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ info->rule_cnt = ethtool->tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ac69e0aa09bf..e3a4f01bcceb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,7 +31,6 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
@@ -46,10 +45,9 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ktls.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
@@ -66,25 +64,29 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
-#include "fpga/ipsec.h"
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
- u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
- bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+ u16 umr_wqebbs, max_wqebbs;
+ bool striding_rq_umr;
+ striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
if (!striding_rq_umr)
return false;
- if (!inline_umr) {
- mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
- (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+ max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+ /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
+ * calculated from mlx5e_get_max_sq_aligned_wqebbs.
+ */
+ if (WARN_ON(umr_wqebbs > max_wqebbs))
return false;
- }
+
return true;
}
@@ -201,21 +203,35 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
}
+static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+
+ WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+
+ return entries * umr_entry_size / MLX5_OCTWORD;
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
+ u16 octowords;
+ u8 ds_cnt;
+
+ ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode),
+ MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
- cseg->umr_mkey = rq->mkey_be;
+ cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- ucseg->xlt_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
+ ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
@@ -261,10 +277,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ size_t alloc_size;
- rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
- sizeof(*rq->mpwqe.info)),
- GFP_KERNEL, node);
+ alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units,
+ rq->mpwqe.pages_per_wqe));
+
+ rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -273,18 +291,52 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift, u32 *umr_mkey,
- dma_addr_t filler_addr)
+
+static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5_MKC_ACCESS_MODE_MTT;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return MLX5_MKC_ACCESS_MODE_KLMS;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
+ u32 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr,
+ enum mlx5e_mpwrq_umr_mode umr_mode,
+ u32 xsk_chunk_size)
{
struct mlx5_mtt *mtt;
+ struct mlx5_ksm *ksm;
+ struct mlx5_klm *klm;
+ u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
+ if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
+ umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
+ !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
+ mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
+ return -EINVAL;
+ }
+
+ octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
+
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ MLX5_OCTWORD, octwords);
+ if (inlen < 0)
+ return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
@@ -296,16 +348,17 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
- MLX5_SET(mkc, mkc, translations_octword_size,
- MLX5_MTT_OCTW(npages));
- MLX5_SET(mkc, mkc, log_page_size, page_shift);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
+ if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
+ else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
@@ -313,9 +366,47 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
- mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for (i = 0 ; i < npages ; i++)
- mtt[i].ptag = cpu_to_be64(filler_addr);
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++) {
+ klm[i << 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32(xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ klm[(i << 1) + 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ }
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ mtt[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages * 4; i++) {
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ }
+ break;
+ }
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
@@ -358,10 +449,27 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
+ u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
+ u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ u32 num_entries, max_num_entries;
+ u32 umr_mkey;
+ int err;
+
+ max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
- return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
- &rq->umr_mkey, rq->wqe_overflow.addr);
+ /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
+ if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
+ &num_entries) ||
+ num_entries > max_num_entries))
+ mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
+ __func__, wq_size, rq->mpwqe.mtts_per_wqe,
+ max_num_entries);
+
+ err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
+ &umr_mkey, rq->wqe_overflow.addr,
+ rq->mpwqe.umr_mode, xsk_chunk_size);
+ rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
+ return err;
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
@@ -378,18 +486,20 @@ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
&rq->mpwqe.shampo->mkey);
}
-static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
-{
- return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
- next_frag.di = &rq->wqe.di[0];
+ if (rq->xsk_pool) {
+ /* Assumptions used by XSK batched allocator. */
+ WARN_ON(rq->wqe.info.num_frags != 1);
+ WARN_ON(rq->wqe.info.log_num_frags != 0);
+ WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
+ }
+
+ next_frag.au = &rq->wqe.alloc_units[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -399,7 +509,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
- next_frag.di++;
+ next_frag.au++;
next_frag.offset = 0;
if (prev)
prev->last_in_page = true;
@@ -416,12 +526,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
+static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
- if (!rq->wqe.di)
+ rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.alloc_units)
return -ENOMEM;
mlx5e_init_frags_partition(rq);
@@ -429,9 +540,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
return 0;
}
-void mlx5e_free_di_list(struct mlx5e_rq *rq)
+static void mlx5e_free_au_list(struct mlx5e_rq *rq)
{
- kvfree(rq->wqe.di);
+ kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
@@ -477,6 +588,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->clock = &mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
+ rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
@@ -486,7 +598,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -573,6 +685,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
@@ -588,19 +702,31 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
- mlx5e_mpwqe_get_log_rq_size(params, xsk);
+ rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ rq->mpwqe.pages_per_wqe =
+ mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.umr_wqebbs =
+ mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.mtts_per_wqe =
+ mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+
+ pool_size = rq->mpwqe.pages_per_wqe <<
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
@@ -608,7 +734,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
if (err)
- goto err_free_by_rq_type;
+ goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -633,11 +759,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, node);
+ err = mlx5e_init_au_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
-
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@ -662,14 +786,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_shampo;
+ goto err_free_by_rq_type;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_shampo;
+ goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -677,13 +801,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
- u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
+ rq->mpwqe.page_shift;
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0 : rq->buff.headroom;
wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
- wqe->data[0].lkey = rq->mkey_be;
+ wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
} else {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
@@ -721,19 +846,21 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
-err_free_shampo:
- mlx5e_rq_free_shampo(rq);
+err_destroy_page_pool:
+ page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
}
@@ -761,24 +888,22 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
- struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
-
/* With AF_XDP, page_cache is not used, so this loop is not
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
- mlx5e_page_release_dynamic(rq, dma_info, false);
+ mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -833,7 +958,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -862,6 +987,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
+{
+ mlx5e_free_rx_descs(rq);
+
+ return mlx5e_rq_to_ready(rq, curr_state);
+}
+
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1034,9 +1185,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
-
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
@@ -1070,13 +1218,6 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq) {
- mlx5e_trigger_irq(rq->icosq);
- } else {
- local_bh_disable();
- napi_schedule(rq->cq.napi);
- local_bh_enable();
- }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -1164,6 +1305,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
+ sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
+ mlx5e_stop_room_for_max_wqe(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1313,7 +1457,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -1324,10 +1467,11 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
- if (MLX5_IPSEC_DEV(c->priv->mdev))
+ if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
@@ -1659,14 +1803,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+
+ /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
+ * supported by upstream, and there is no defined trigger to allow
+ * transmitting redirected multi-buffer frames.
+ */
+ if (param->is_xdp_mb && !is_redirect)
+ set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
+
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw) {
- unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+ unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
unsigned int inline_hdr_sz = 0;
int i;
@@ -1909,8 +2061,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
{
int tc;
- if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
- !params->mqprio.channel.rl) {
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
*hw_id = 0;
return 0;
}
@@ -1919,7 +2070,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
if (tc < 0)
return tc;
- return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ if (tc >= params->mqprio.num_tc) {
+ WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
+ tc, params->mqprio.num_tc);
+ return -EINVAL;
+ }
+
+ *hw_id = params->mqprio.channel.hw_id[tc];
+ return 0;
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
@@ -2218,6 +2376,20 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
return 0;
}
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
+{
+ spin_lock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_irq(&c->async_icosq);
+ spin_unlock_bh(&c->async_icosq_lock);
+}
+
+void mlx5e_trigger_napi_sched(struct napi_struct *napi)
+{
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -2257,7 +2429,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2295,10 +2467,13 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_icosq(&c->async_icosq);
- mlx5e_activate_rq(&c->rq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
+ else
+ mlx5e_activate_rq(&c->rq);
+
+ mlx5e_trigger_napi_icosq(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2307,8 +2482,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
+ else
+ mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_rq(&c->rq);
mlx5e_deactivate_icosq(&c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
@@ -2364,9 +2540,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- err = mlx5e_qos_open_queues(priv, chs);
- if (err)
- goto err_close_ptp;
+ if (priv->htb) {
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+ }
mlx5e_health_channels_update(priv);
kvfree(cparam);
@@ -2398,8 +2576,6 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
mlx5e_ptp_activate_channel(chs->ptp);
}
-#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
-
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
@@ -2407,8 +2583,12 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++) {
int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
+ struct mlx5e_channel *c = chs->c[i];
- err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ continue;
+
+ err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
/* Don't wait on the XSK RQ, because the newer xdpsock sample
* doesn't provide any Fill Ring entries at the setup stage.
@@ -2548,9 +2728,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{
- int qos_queues, nch, ntc, num_txqs, err;
+ int nch, ntc, num_txqs, err;
+ int qos_queues = 0;
- qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+ if (priv->htb)
+ qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
@@ -2571,7 +2753,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
- int num_rxqs, nch, ntc;
+ int nch, ntc;
int err;
int i;
@@ -2582,7 +2764,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.mqprio.num_tc;
- num_rxqs = nch * priv->profile->rq_groups;
tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
@@ -2591,18 +2772,11 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ err = netif_set_real_num_rx_queues(netdev, nch);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
- if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
- priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
- }
return 0;
@@ -2677,43 +2851,46 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
- priv->channel_tc2realtxq[i][tc] = i + tc * ch;
}
}
if (!priv->channels.ptp)
- return;
+ goto out;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
- return;
+ goto out;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
- priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
}
-}
-static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
-{
- /* Sync with mlx5e_select_queue. */
- WRITE_ONCE(priv->num_tc_x_num_ch,
- mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num);
+out:
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
- mlx5e_qos_activate_queues(priv);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
+
+ /* dev_watchdog() wants all TX queues to be started when the carrier is
+ * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
+ * Make it happy to avoid TX timeout false alarms.
+ */
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
- mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_activate_channels(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2727,13 +2904,15 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
- mlx5e_remove_sqs_fwd_rules(priv);
+ mlx5e_rep_deactivate_channels(priv);
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
+ /* The results of ndo_select_queue are unreliable, while netdev config
+ * is being changed (real_num_tx_queues, num_tc). Stop all queues to
+ * prevent ndo_start_xmit from being called, so that it can assume that
+ * the selected queue is always valid.
*/
- netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
@@ -2793,6 +2972,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
out:
mlx5e_activate_priv_channels(priv);
@@ -2816,13 +2996,24 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
return mlx5e_switch_priv_params(priv, params, preactivate, context);
new_chs.params = *params;
+
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
+
err = mlx5e_open_channels(priv, &new_chs);
if (err)
- return err;
+ goto err_cancel_selq;
+
err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- mlx5e_close_channels(&new_chs);
+ goto err_close;
+
+ return 0;
+err_close:
+ mlx5e_close_channels(&new_chs);
+
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -2862,6 +3053,8 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
+
set_bit(MLX5E_STATE_OPENED, &priv->state);
err = mlx5e_open_channels(priv, &priv->channels);
@@ -2869,6 +3062,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
@@ -2879,6 +3073,7 @@ int mlx5e_open_locked(struct net_device *netdev)
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -3096,6 +3291,12 @@ err_close_tises:
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+ mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
@@ -3164,19 +3365,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
- params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
+static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
+ struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ u32 hw_id = 0;
+
+ if (rl)
+ mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
+ params->mqprio.channel.hw_id[tc] = hw_id;
+ }
+}
+
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt,
+ struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl)
{
+ int tc;
+
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- params->mqprio.num_tc = qopt->num_tc;
- params->mqprio.channel.rl = rl;
- mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+ params->mqprio.num_tc = mqprio->qopt.num_tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
+
+ mlx5e_mqprio_rl_update_params(params, rl);
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
@@ -3202,6 +3422,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
+ if (!err && priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
@@ -3260,16 +3486,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{
int tc;
- for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
- if (mqprio->max_rate[tc])
+ for (tc = 0; tc < num_tc; tc++)
+ if (max_rate[tc])
return true;
return false;
}
+static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
+ u8 num_tc, u64 max_rate[])
+{
+ struct mlx5e_mqprio_rl *rl;
+ int err;
+
+ if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
+ return NULL;
+
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return ERR_PTR(err);
+ }
+
+ return rl;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -3283,32 +3531,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
- rl = NULL;
- if (mlx5e_mqprio_rate_limit(mqprio)) {
- rl = mlx5e_mqprio_rl_alloc();
- if (!rl)
- return -ENOMEM;
- err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
- mqprio->max_rate);
- if (err) {
- mlx5e_mqprio_rl_free(rl);
- return err;
- }
- }
+ rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
+ if (IS_ERR(rl))
+ return PTR_ERR(rl);
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
+ mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
- if (err && rl) {
- mlx5e_mqprio_rl_cleanup(rl);
- mlx5e_mqprio_rl_free(rl);
+ if (err) {
+ if (rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+ return err;
}
- return err;
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = rl;
+
+ return 0;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3317,7 +3565,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(priv->htb.maj_id))
+ if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL;
switch (mqprio->mode) {
@@ -3330,47 +3578,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
}
}
-static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
-{
- int res;
-
- switch (htb->command) {
- case TC_HTB_CREATE:
- return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
- htb->extack);
- case TC_HTB_DESTROY:
- return mlx5e_htb_root_del(priv);
- case TC_HTB_LEAF_ALLOC_QUEUE:
- res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
- htb->rate, htb->ceil, htb->extack);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- case TC_HTB_LEAF_TO_INNER:
- return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
- htb->rate, htb->ceil, htb->extack);
- case TC_HTB_LEAF_DEL:
- return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
- case TC_HTB_LEAF_DEL_LAST:
- case TC_HTB_LEAF_DEL_LAST_FORCE:
- return mlx5e_htb_leaf_del_last(priv, htb->classid,
- htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
- htb->extack);
- case TC_HTB_NODE_MODIFY:
- return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
- htb->extack);
- case TC_HTB_LEAF_QUERY_QUEUE:
- res = mlx5e_get_txq_by_classid(priv, htb->classid);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3404,7 +3611,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
- err = mlx5e_setup_tc_htb(priv, type_data);
+ err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
default:
@@ -3492,7 +3699,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
- PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
+ VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
stats->rx_crc_errors =
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
@@ -3555,20 +3763,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
- priv->xsk.refcnt);
- err = -EINVAL;
- goto out;
- }
-
cur_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- netdev_warn(netdev, "can't set LRO with legacy RQ\n");
- err = -EINVAL;
- goto out;
- }
-
new_params = *cur_params;
if (enable)
@@ -3616,8 +3811,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3628,9 +3822,11 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_cvlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
else
- mlx5e_disable_cvlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
return 0;
}
@@ -3638,21 +3834,26 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
#endif
- if (!enable && priv->htb.maj_id) {
+ mutex_lock(&priv->state_lock);
+ if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
- return -EINVAL;
+ err = -EINVAL;
}
+ mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
static int set_feature_rx_all(struct net_device *netdev, bool enable)
@@ -3734,20 +3935,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
priv->channels.params.vlan_strip_disable = !enable;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
- if (err)
+ if (err) {
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
priv->channels.params.vlan_strip_disable = enable;
-
+ }
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
+}
+
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -3755,9 +3981,9 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
int err;
if (enable)
- err = mlx5e_arfs_enable(priv);
+ err = mlx5e_arfs_enable(priv->fs);
else
- err = mlx5e_arfs_disable(priv);
+ err = mlx5e_arfs_disable(priv->fs);
return err;
}
@@ -3830,6 +4056,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+ features &= ~NETIF_F_GRO_HW;
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+
return features;
}
@@ -3837,12 +4067,14 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
+ vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs.vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+ if (!vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -3862,6 +4094,30 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ if (params->xdp_prog) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
+ if (priv->xsk.refcnt) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_LRO;
+ }
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
@@ -3907,7 +4163,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
* 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
- max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
+ max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min(max_mtu_frame, max_mtu_page);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
@@ -3919,6 +4175,33 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
+static bool mlx5e_params_validate_xdp(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ bool is_linear;
+
+ /* No XSK params: AF_XDP can't be enabled yet at the point of setting
+ * the XDP program.
+ */
+ is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
+
+ if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+ if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
+ netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+
+ return true;
+}
+
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
@@ -3938,10 +4221,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
- if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, mlx5e_xdp_max_mtu(params, NULL));
+ if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
+ &new_params)) {
err = -EINVAL;
goto out;
}
@@ -3956,19 +4237,21 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL);
- u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
+ u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
+ u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change.
* If XSK is active, XSK RQs are linear.
+ * Reset if the RQ size changed, even if it's non-linear.
*/
if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
- ppw_old == ppw_new)
+ sz_old == sz_new)
reset = false;
}
@@ -4415,24 +4698,11 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
- netdev_warn(netdev,
- "XDP is not available on Innova cards with IPsec support\n");
- return -EINVAL;
- }
-
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- /* No XSK params: AF_XDP can't be enabled yet at the point of setting
- * the XDP program.
- */
- if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_params, NULL));
+ if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
return -EINVAL;
- }
return 0;
}
@@ -4469,8 +4739,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- if (reset)
- mlx5e_set_rq_type(priv->mdev, &new_params);
+
+ /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
+ * supported with the new parameters: if PAGE_SIZE is bigger than
+ * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
+ * the MTU is small enough for the linear mode, because XDP uses strides
+ * of PAGE_SIZE on regular RQs.
+ */
+ if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
+ err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
+ if (err)
+ goto unlock;
+ }
+
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
@@ -4499,6 +4781,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
unlock:
mutex_unlock(&priv->state_lock);
+
+ /* Need to fix some features. */
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
@@ -4637,11 +4924,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
priv->max_nch);
mlx5e_params_mqprio_reset(params);
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -4663,14 +4945,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -4780,6 +5054,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
netdev->mpls_features |= NETIF_F_SG;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -4797,7 +5072,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
@@ -4806,10 +5082,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (!!MLX5_CAP_GEN(mdev, shampo) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4845,7 +5117,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_IPXIP6;
}
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4888,9 +5159,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_netdev_dev_addr(netdev);
+ mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
- mlx5e_tls_build_netdev(priv);
+ mlx5e_ktls_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4935,6 +5208,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -4942,17 +5216,20 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
- err = mlx5e_fs_init(priv);
- if (err) {
+ fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!fs) {
+ err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
+ priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
- err = mlx5e_tls_init(priv);
+ err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -4963,9 +5240,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
- mlx5e_tls_cleanup(priv);
+ mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_fs_cleanup(priv);
+ mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4986,7 +5263,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
+ features = MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
@@ -4996,7 +5273,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_flow_steering(priv);
+ err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
+ priv->netdev);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_rx_res;
@@ -5019,7 +5297,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
@@ -5035,7 +5314,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -5043,6 +5323,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params;
+ struct mlx5e_mqprio_rl *rl;
+
+ params = &priv->channels.params;
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
+ return;
+
+ rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
+ params->mqprio.channel.max_rate);
+ if (IS_ERR(rl))
+ rl = NULL;
+ priv->mqprio_rl = rl;
+ mlx5e_mqprio_rl_update_params(params, rl);
+}
+
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
@@ -5053,16 +5350,30 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_accel_init_tx(priv);
+ if (err)
+ goto err_destroy_tises;
+
+ mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+ return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, netdev);
+
+ err = mlx5e_macsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
@@ -5121,6 +5432,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
+ mlx5e_macsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5142,12 +5454,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
- BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
@@ -5173,8 +5486,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
- tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
- max_nch = min_t(unsigned int, max_nch, tmp);
+ max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
/* netdev tx queues */
tmp = netdev->num_tx_queues;
@@ -5188,13 +5500,31 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
return max_nch;
}
+int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+{
+ /* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
+ * and 1 set of direct TIRS
+ */
+ return 2 * MLX5E_NUM_INDIR_TIRS
+ + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
- int nch, num_txqs, node, i;
+ int nch, num_txqs, node;
+ int err;
num_txqs = netdev->num_tx_queues;
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
@@ -5211,7 +5541,11 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
return -ENOMEM;
mutex_init(&priv->state_lock);
- hash_init(priv->htb.qos_tc2node);
+
+ err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
+ if (err)
+ goto err_free_cpumask;
+
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5219,7 +5553,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_cpumask;
+ goto err_free_selq;
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
if (!priv->txq2sq)
@@ -5229,36 +5563,21 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->tx_rates)
goto err_free_txq2sq;
- priv->channel_tc2realtxq =
- kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq)
- goto err_free_tx_rates;
-
- for (i = 0; i < nch; i++) {
- priv->channel_tc2realtxq[i] =
- kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
- GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq[i])
- goto err_free_channel_tc2realtxq;
- }
-
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
- goto err_free_channel_tc2realtxq;
+ goto err_free_tx_rates;
return 0;
-err_free_channel_tc2realtxq:
- while (--i >= 0)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
destroy_workqueue(priv->wq);
+err_free_selq:
+ mlx5e_selq_cleanup(&priv->selq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
return -ENOMEM;
@@ -5275,22 +5594,17 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
- for (i = 0; i < priv->max_nch; i++)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
+ mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+ mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
- for (i = 0; i < priv->htb.max_qos_sqs; i++)
- kfree(priv->htb.qos_sq_stats[i]);
- kvfree(priv->htb.qos_sq_stats);
-
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+ kfree(priv->htb_qos_sq_stats[i]);
+ kvfree(priv->htb_qos_sq_stats);
memset(priv, 0, sizeof(*priv));
}
@@ -5316,11 +5630,7 @@ static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
- unsigned int nch;
-
- nch = mlx5e_profile_max_num_channels(mdev, profile);
-
- return nch * profile->rq_groups;
+ return mlx5e_profile_max_num_channels(mdev, profile);
}
struct net_device *
@@ -5346,6 +5656,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
dev_net_set(netdev, mlx5_core_net(mdev));
return netdev;
@@ -5379,6 +5690,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+
+ /* Validate the max_wqe_size_sq capability. */
+ if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
+ mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
+ return -EIO;
+ }
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5438,6 +5759,9 @@ err_cleanup_tx:
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5447,6 +5771,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (profile->disable)
profile->disable(priv);
@@ -5534,7 +5861,7 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5557,7 +5884,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5589,7 +5916,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- dev_set_drvdata(&adev->dev, priv);
+ auxiliary_set_drvdata(adev, priv);
priv->profile = profile;
priv->ppriv = NULL;
@@ -5637,7 +5964,7 @@ err_destroy_netdev:
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
pm_message_t state = {};
mlx5e_dcbnl_delete_app(priv);
@@ -5668,7 +5995,6 @@ int mlx5e_init(void)
{
int ret;
- mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 06d1f46f1688..794cd8dfe9c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -55,6 +55,8 @@
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
+#include "en/ptp.h"
+#include "en/fs_ethtool.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -68,7 +70,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -228,7 +230,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int
@@ -396,18 +398,30 @@ out_err:
return err;
}
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+static int
+mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
+ int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool is_uplink_rep = mlx5e_is_uplink_rep(priv);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int n, tc, nch, num_sqs = 0;
struct mlx5e_channel *c;
- int n, tc, num_sqs = 0;
int err = -ENOMEM;
+ bool ptp_sq;
u32 *sqs;
- sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
- sizeof(*sqs), GFP_KERNEL);
+ ptp_sq = !!(priv->channels.ptp &&
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
+ nch = priv->channels.num + ptp_sq;
+ /* +2 for xdpsqs, they don't exist on the ptp channel but will not be
+ * counted for by num_sqs.
+ */
+ if (is_uplink_rep)
+ sqs_per_channel += 2;
+
+ sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL);
if (!sqs)
goto out;
@@ -415,10 +429,23 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
c = priv->channels.c[n];
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
+
+ if (is_uplink_rep) {
+ if (c->xdp)
+ sqs[num_sqs++] = c->rq_xdpsq.sqn;
+
+ sqs[num_sqs++] = c->xdpsq.sqn;
+ }
+ }
+ if (ptp_sq) {
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+
+ for (tc = 0; tc < ptp_ch->num_tc; tc++)
+ sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
}
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
- kfree(sqs);
+ kvfree(sqs);
out:
if (err)
@@ -426,7 +453,8 @@ out:
return err;
}
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+static void
+mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -435,6 +463,49 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
+static int
+mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_group *g;
+
+ g = esw->fdb_table.offloads.send_to_vport_meta_grp;
+ if (!g)
+ return 0;
+
+ flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ rpriv->send_to_vport_meta_rule = flow_rule;
+
+ return 0;
+}
+
+static void
+mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (rpriv->send_to_vport_meta_rule)
+ mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
+}
+
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_add_meta_tunnel_rule(priv);
+}
+
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_rep_del_meta_tunnel_rule(priv);
+ mlx5e_remove_sqs_fwd_rules(priv);
+}
+
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -592,10 +663,16 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
+/* One indirect TIR set for outer. Inner not supported in reps. */
+#define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS
+
static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
{
- return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
- mlx5_eswitch_get_total_vports(mdev);
+ int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir);
+ int num_vports = mlx5_eswitch_get_total_vports(mdev);
+
+ return (max_tir_num - mlx5e_get_pf_num_tirs(mdev)
+ - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports;
}
static void mlx5e_build_rep_params(struct net_device *netdev)
@@ -631,11 +708,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
-
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -670,6 +744,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -682,16 +763,26 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
@@ -702,19 +793,20 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ mlx5e_fs_set_ns(priv->fs,
+ mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc)) {
- err = PTR_ERR(priv->fs.ttc);
+ mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
+ if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
+ err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -734,7 +826,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
return 0;
}
@@ -810,10 +902,12 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
int err;
priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
- return -ENOMEM;
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
@@ -840,29 +934,31 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_root_ft;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ethtool_cleanup_steering(priv->fs);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
@@ -924,6 +1020,13 @@ err_event_reg:
return err;
}
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
+}
+
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -938,31 +1041,33 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
- goto destroy_tises;
+ goto err_init_tx;
}
+ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+ if (err)
+ goto err_ht_init;
+
return 0;
-destroy_tises:
+err_ht_init:
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
+err_init_tx:
mlx5e_destroy_tises(priv);
return err;
}
-static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
-{
- mlx5e_rep_tc_netdevice_event_unregister(rpriv);
- mlx5e_rep_bond_cleanup(rpriv);
- mlx5e_rep_tc_cleanup(rpriv);
-}
-
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- mlx5e_destroy_tises(priv);
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
+
+ mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1097,8 +1202,8 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
+ &MLX5E_STATS_GRP(ptp),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
@@ -1119,7 +1224,6 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
.max_nch_limit = mlx5e_rep_max_nch_limit,
@@ -1139,8 +1243,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
- /* XSK is needed so we can replace profile with NIC netdev */
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
@@ -1254,7 +1356,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
int err;
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
@@ -1269,7 +1371,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_vport_vf_rep_load(dev, rep);
if (err)
- kfree(rpriv);
+ kvfree(rpriv);
return err;
}
@@ -1297,7 +1399,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
free_ppriv:
- kfree(ppriv); /* mlx5e_rep_priv */
+ kvfree(ppriv); /* mlx5e_rep_priv */
}
static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b01dacb6f527..b4e691760da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -62,13 +62,9 @@ struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
+struct mlx5e_flow_meters;
struct mlx5_rep_uplink_priv {
- /* Filters DB - instantiated by the uplink representor and shared by
- * the uplink's VFs
- */
- struct rhashtable tc_ht;
-
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
@@ -102,6 +98,8 @@ struct mlx5_rep_uplink_priv {
/* OVS internal port support */
struct mlx5e_tc_int_port_priv *int_port_priv;
+
+ struct mlx5e_flow_meters *flow_meters;
};
struct mlx5e_rep_priv {
@@ -113,6 +111,8 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct mlx5_flow_handle *send_to_vport_meta_rule;
+ struct rhashtable tc_ht;
};
static inline
@@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
struct rcu_head rcu;
};
+struct mlx5e_mpls_info {
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+};
+
struct mlx5e_encap_entry {
/* attached neigh hash entry */
struct mlx5e_neigh_hash_entry *nhe;
@@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
@@ -234,8 +242,8 @@ int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv);
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
@@ -249,8 +257,8 @@ static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e86ccc22fb82..a61a43fc8d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,12 +34,14 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/bitmap.h>
+#include <linux/filter.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@ -47,10 +49,10 @@
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -221,8 +223,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
@@ -233,121 +234,101 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (!dev_page_is_reusable(dma_info->page)) {
+ if (!dev_page_is_reusable(page)) {
stats->cache_waive++;
return false;
}
- cache->page_cache[cache->tail] = *dma_info;
+ cache->page_cache[cache->tail] = page;
cache->tail = tail_next;
return true;
}
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
+ dma_addr_t addr;
if (unlikely(cache->head == cache->tail)) {
stats->cache_empty++;
return false;
}
- if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ if (page_ref_count(cache->page_cache[cache->head]) != 1) {
stats->cache_busy++;
return false;
}
- *dma_info = cache->page_cache[cache->head];
+ au->page = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
stats->cache_reuse++;
- dma_sync_single_for_device(rq->pdev, dma_info->addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ addr = page_pool_get_dma_addr(au->page);
+ /* Non-XSK always uses PAGE_SIZE. */
+ dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
return true;
}
-static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
- if (mlx5e_rx_cache_get(rq, dma_info))
+ dma_addr_t addr;
+
+ if (mlx5e_rx_cache_get(rq, au))
return 0;
- dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!dma_info->page))
+ au->page = page_pool_dev_alloc_pages(rq->page_pool);
+ if (unlikely(!au->page))
return -ENOMEM;
- dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
- rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
- dma_info->page = NULL;
+ /* Non-XSK always uses PAGE_SIZE. */
+ addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, addr))) {
+ page_pool_recycle_direct(rq->page_pool, au->page);
+ au->page = NULL;
return -ENOMEM;
}
+ page_pool_set_dma_addr(au->page, addr);
return 0;
}
-static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
- if (rq->xsk_pool)
- return mlx5e_xsk_page_alloc_pool(rq, dma_info);
- else
- return mlx5e_page_alloc_pool(rq, dma_info);
-}
+ dma_addr_t dma_addr = page_pool_get_dma_addr(page);
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
-{
- dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+ dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC);
+ page_pool_set_dma_addr(page, 0);
}
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
- if (mlx5e_rx_cache_put(rq, dma_info))
+ if (mlx5e_rx_cache_put(rq, page))
return;
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_recycle_direct(rq->page_pool, page);
} else {
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_release_page(rq->page_pool, dma_info->page);
- put_page(dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_release_page(rq->page_pool, page);
+ put_page(page);
}
}
-static inline void mlx5e_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
-{
- if (rq->xsk_pool)
- /* The `recycle` parameter is ignored, and the page is always
- * put into the Reuse Ring, because there is no way to return
- * the page to the userspace when the interface goes down.
- */
- xsk_buff_free(dma_info->xsk);
- else
- mlx5e_page_release_dynamic(rq, dma_info, recycle);
-}
-
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
int err = 0;
if (!frag->offset)
- /* On first frag (offset == 0), replenish page (dma_info actually).
- * Other frags that point to the same dma_info (with a different
+ /* On first frag (offset == 0), replenish page (alloc_unit actually).
+ * Other frags that point to the same alloc_unit (with a different
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc(rq, frag->di);
+ err = mlx5e_page_alloc_pool(rq, frag->au);
return err;
}
@@ -357,7 +338,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, recycle);
+ mlx5e_page_release_dynamic(rq, frag->au->page, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -373,12 +354,16 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ dma_addr_t addr;
+ u16 headroom;
+
err = mlx5e_get_rx_frag(rq, frag);
if (unlikely(err))
goto free_frags;
- wqe->data[i].addr = cpu_to_be64(frag->di->addr +
- frag->offset + rq->buff.headroom);
+ headroom = i == 0 ? rq->buff.headroom : 0;
+ addr = page_pool_get_dma_addr(frag->au->page);
+ wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
return 0;
@@ -396,6 +381,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
{
int i;
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ xsk_buff_free(wi->au->xsk);
+ return;
+ }
+
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
mlx5e_put_rx_frag(rq, wi, recycle);
}
@@ -407,84 +401,78 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_free_rx_wqe(rq, wi, false);
}
-static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
+static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- int err;
int i;
- if (rq->xsk_pool) {
- int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
-
- /* Check in advance that we have enough frames, instead of
- * allocating one-by-one, failing and moving frames to the
- * Reuse Ring.
- */
- if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
- return -ENOMEM;
- }
-
for (i = 0; i < wqe_bulk; i++) {
- struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_rx_wqe_cyc *wqe;
- err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
- if (unlikely(err))
- goto free_wqes;
- }
-
- return 0;
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
-free_wqes:
- while (--i >= 0)
- mlx5e_dealloc_rx_wqe(rq, ix + i);
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
+ break;
+ }
- return err;
+ return i;
}
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
+ union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_sync_single_for_cpu(rq->pdev,
- di->addr + frag_offset,
- len, DMA_FROM_DEVICE);
- page_ref_inc(di->page);
+ dma_addr_t addr = page_pool_get_dma_addr(au->page);
+
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
+ rq->buff.map_dir);
+ page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- di->page, frag_offset, len, truesize);
+ au->page, frag_offset, len, truesize);
}
static inline void
-mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
- struct mlx5e_dma_info *dma_info,
+mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(dma_info->page) + offset_from;
+ const void *from = page_address(page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
+ rq->buff.map_dir);
skb_copy_to_linear_data(skb, from, len);
}
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
+ union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
- struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
int i;
/* A common case for AF_XDP. */
- if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
+ if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe))
return;
- no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
- MLX5_MPWRQ_PAGES_PER_WQE);
+ no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
- if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], recycle);
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ xsk_buff_free(alloc_units[i].xsk);
+ } else {
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle);
+ }
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
@@ -569,11 +557,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
- err = mlx5e_page_alloc(rq, dma_info);
+ union mlx5e_alloc_unit au;
+
+ err = mlx5e_page_alloc_pool(rq, &au);
if (unlikely(err))
goto err_unmap;
- addr = dma_info->addr;
- page = dma_info->page;
+ page = dma_info->page = au.page;
+ addr = dma_info->addr = page_pool_get_dma_addr(au.page);
} else {
dma_info->addr = addr + header_offset;
dma_info->page = page;
@@ -606,7 +596,7 @@ err_unmap:
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release(rq, dma_info, true);
+ mlx5e_page_release_dynamic(rq, dma_info->page, true);
}
}
rq->stats->buff_alloc_err++;
@@ -620,7 +610,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -654,57 +644,55 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ union mlx5e_alloc_unit *au = &wi->alloc_units[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
+ u32 offset; /* 17-bit value with MTT. */
u16 pi;
int err;
int i;
- /* Check in advance that we have enough frames, instead of allocating
- * one-by-one, failing and moving frames to the Reuse Ring.
- */
- if (rq->xsk_pool &&
- unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
- err = -ENOMEM;
- goto err;
- }
-
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
err = mlx5e_alloc_rx_hd_mpwqe(rq);
if (unlikely(err))
goto err;
}
- pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
+ pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
+ dma_addr_t addr;
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
- err = mlx5e_page_alloc(rq, dma_info);
+ err = mlx5e_page_alloc_pool(rq, au);
if (unlikely(err))
goto err_unmap;
- umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ addr = page_pool_get_dma_addr(au->page);
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
}
- bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->uctrl.xlt_offset =
- cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
+
+ offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
- .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
.umr.rq = rq,
};
- sq->pc += MLX5E_UMR_WQEBBS;
+ sq->pc += rq->mpwqe.umr_wqebbs;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -712,8 +700,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
- dma_info--;
- mlx5e_page_release(rq, dma_info, true);
+ au--;
+ mlx5e_page_release_dynamic(rq, au->page, true);
}
err:
@@ -747,7 +735,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) {
deleted_page = hd_info->page;
- mlx5e_page_release(rq, hd_info, false);
+ mlx5e_page_release_dynamic(rq, hd_info->page, false);
}
}
@@ -762,7 +750,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
/* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false);
}
@@ -770,38 +758,51 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- u8 wqe_bulk;
- int err;
+ int wqe_bulk, count;
+ bool busy = false;
+ u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- wqe_bulk = rq->wqe.info.wqe_bulk;
-
- if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
+ if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
- do {
- u16 head = mlx5_wq_cyc_get_head(wq);
+ wqe_bulk = mlx5_wq_cyc_missing(wq);
+ head = mlx5_wq_cyc_get_head(wq);
- err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
- if (unlikely(err)) {
- rq->stats->buff_alloc_err++;
- break;
- }
+ /* Don't allow any newly allocated WQEs to share the same page with old
+ * WQEs that aren't completed yet. Stop earlier.
+ */
+ wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
- mlx5_wq_cyc_push_n(wq, wqe_bulk);
- } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
+ if (!rq->xsk_pool)
+ count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
+ else if (likely(!rq->xsk_pool->dma_need_sync))
+ count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
+ else
+ /* If dma_need_sync is true, it's more efficient to call
+ * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
+ * because the latter does the same check and returns only one
+ * frame.
+ */
+ count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
+
+ mlx5_wq_cyc_push_n(wq, count);
+ if (unlikely(count != wqe_bulk)) {
+ rq->stats->buff_alloc_err++;
+ busy = true;
+ }
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
mlx5_wq_cyc_update_db_record(wq);
- return !!err;
+ return busy;
}
void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
@@ -960,8 +961,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq->stats->congst_umr++;
-#define UMR_WQE_BULK (2)
- if (likely(missing < UMR_WQE_BULK))
+ if (likely(missing < rq->mpwqe.min_wqe_bulk))
return false;
if (rq->page_pool)
@@ -970,7 +970,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
- alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
+ alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
+ mlx5e_alloc_rx_mpwqe(rq, head);
if (unlikely(alloc_err))
break;
@@ -1117,7 +1118,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
@@ -1349,7 +1350,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
}
/* True when explicitly set via priv flag, or XDP prog is loaded */
- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+ get_cqe_tls_offload(cqe))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -1410,11 +1412,15 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
- mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(get_cqe_tls_offload(cqe)))
+ mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+ if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
+ mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1489,7 +1495,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt)
+ u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
@@ -1501,6 +1507,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
return skb;
}
@@ -1508,86 +1517,161 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, false);
+ xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
-mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = wi->di;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
- va = page_address(di->page) + wi->offset;
+ va = page_address(au->page) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
- frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
- return NULL; /* page/packet was consumed by XDP */
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
+
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
static struct sk_buff *
-mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
- u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- u16 frag_headlen = headlen;
- u16 byte_cnt = cqe_bcnt - headlen;
+ union mlx5e_alloc_unit *au = wi->au;
+ u16 rx_headroom = rq->buff.headroom;
+ struct skb_shared_info *sinfo;
+ u32 frag_consumed_bytes;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ dma_addr_t addr;
+ u32 truesize;
+ void *va;
- /* XDP is not supported in this configuration, as incoming packets
- * might spread among multiple pages.
- */
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
- }
+ va = page_address(au->page) + wi->offset;
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- net_prefetchw(skb->data);
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
+ rq->buff.frame0_sz, rq->buff.map_dir);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(va + rx_headroom);
+
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ truesize = 0;
+
+ cqe_bcnt -= frag_consumed_bytes;
+ frag_info++;
+ wi++;
+
+ while (cqe_bcnt) {
+ skb_frag_t *frag;
- while (byte_cnt) {
- u16 frag_consumed_bytes =
- min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+ au = wi->au;
- mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
- frag_consumed_bytes, frag_info->frag_stride);
- byte_cnt -= frag_consumed_bytes;
- frag_headlen = 0;
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
+
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
+ frag_consumed_bytes, rq->buff.map_dir);
+
+ if (!xdp_buff_has_frags(&xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp);
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, au->page);
+ skb_frag_off_set(frag, wi->offset);
+ skb_frag_size_set(frag, frag_consumed_bytes);
+
+ if (page_is_pfmemalloc(au->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp);
+
+ sinfo->xdp_frags_size += frag_consumed_bytes;
+ truesize += frag_info->frag_stride;
+
+ cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
- /* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
- headlen);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += headlen;
- skb->len += headlen;
+ au = head_wi->au;
+
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+ mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+ xdp.data - xdp.data_hard_start,
+ xdp.data_end - xdp.data,
+ xdp.data - xdp.data_meta);
+ if (unlikely(!skb))
+ return NULL;
+
+ page_ref_inc(au->page);
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ int i;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&xdp));
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
return skb;
}
@@ -1627,10 +1711,11 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ mlx5e_xsk_skb_from_cqe_linear,
+ rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1683,7 +1768,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1712,11 +1797,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1767,12 +1852,13 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 data_bcnt, u32 data_offset)
+mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
while (data_bcnt) {
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
unsigned int truesize;
@@ -1781,12 +1867,12 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_i
else
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ mlx5e_add_skb_frag(rq, skb, au, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
- di++;
+ au++;
}
}
@@ -1794,12 +1880,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
- struct mlx5e_dma_info *head_di = di;
+ union mlx5e_alloc_unit *head_au = au;
struct sk_buff *skb;
+ dma_addr_t addr;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
@@ -1810,14 +1897,17 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
net_prefetchw(skb->data);
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
if (unlikely(frag_offset >= PAGE_SIZE)) {
- di++;
+ au++;
frag_offset -= PAGE_SIZE;
}
- mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
+ mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
+ addr = page_pool_get_dma_addr(head_au->page);
+ mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
+ head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1829,12 +1919,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
- u32 cqe_bcnt32 = cqe_bcnt;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -1843,35 +1934,43 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(di->page) + head_offset;
+ va = page_address(au->page) + head_offset;
data = va + rx_headroom;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
- frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
- __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
- return NULL; /* page/packet was consumed by XDP */
- }
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
- rx_headroom = xdp.data - xdp.data_hard_start;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
-static void
+static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
@@ -1889,13 +1988,13 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
prefetchw(hdr);
prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
- return;
+ return NULL;
/* queue up for recycling/reuse */
page_ref_inc(head->page);
@@ -1907,20 +2006,18 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
- return;
+ return NULL;
}
prefetchw(skb->data);
- mlx5e_copy_skb_header(rq->pdev, skb, head,
+ mlx5e_copy_skb_header(rq, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += head_size;
skb->len += head_size;
}
- rq->hw_gro_data->skb = skb;
- NAPI_GRO_CB(skb)->count = 1;
- skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+ return skb;
}
static void
@@ -1954,7 +2051,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void
@@ -1965,7 +2062,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release(rq, &shampo->info[header_index], true);
+ mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true);
}
bitmap_clear(shampo->bitmap, header_index, 1);
}
@@ -1973,23 +2070,24 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
- wi = &rq->mpwqe.info[wqe_id];
+ wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
@@ -2011,9 +2109,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (likely(head_size))
+ *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ else
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
+ page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
+
+ NAPI_GRO_CB(*skb)->count = 1;
+ skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 &&
@@ -2027,8 +2132,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
}
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ if (likely(head_size)) {
+ au = &wi->alloc_units[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset);
+ }
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush)
@@ -2049,11 +2156,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -2076,9 +2183,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear,
rq, wi, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -2267,7 +2375,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
+ rq, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -2289,46 +2397,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
};
#endif /* CONFIG_MLX5_CORE_IPOIB */
-#ifdef CONFIG_MLX5_EN_IPSEC
-
-static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- struct mlx5e_wqe_frag_info *wi;
- struct sk_buff *skb;
- u32 cqe_bcnt;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- wi = get_frag(rq, ci);
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
-
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
- goto wq_free_wqe;
- }
-
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
- mlx5e_skb_from_cqe_linear,
- mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
- goto wq_free_wqe;
-
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb))
- goto wq_free_wqe;
-
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- napi_gro_receive(rq->cq.napi, skb);
-
-wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi, true);
- mlx5_wq_cyc_pop(wq);
-}
-
-#endif /* CONFIG_MLX5_EN_IPSEC */
-
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
struct net_device *netdev = rq->netdev;
@@ -2345,10 +2413,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
- return -EINVAL;
- }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) {
@@ -2367,19 +2431,12 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
- mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-#ifdef CONFIG_MLX5_EN_IPSEC
- if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- priv->ipsec)
- rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
- else
-#endif
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL;
@@ -2410,7 +2467,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
if (!skb)
goto free_wqe;
@@ -2428,7 +2485,7 @@ free_wqe:
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 8c9163d2c646..08a75654f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
+ count++;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 26e326fe503c..03c1841970f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,11 +32,15 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
#include "en/port.h"
+#ifdef CONFIG_PAGE_POOL_STATS
+#include <net/page_pool.h>
+#endif
+
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
+ s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
+ s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
+ s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
+ s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
+ s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
+ s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
+ s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
+ s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
+ s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
+ s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -444,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+ struct mlx5e_rq_stats *rq_stats = c->rq.stats;
+ struct page_pool *pool = c->rq.page_pool;
+ struct page_pool_stats stats = { 0 };
+
+ if (!page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+}
+#else
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+}
+#endif
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -462,9 +521,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) /* for active channels only */
+ mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
+
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
priv->channel_stats[i];
+
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -578,17 +641,26 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
};
+static const struct counter_desc vnic_env_stats_drop_desc[] = {
+ { "rx_oversize_pkts_buffer",
+ VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
+};
+
#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
+#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
+ ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
- NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
+ NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
@@ -602,6 +674,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vnic_env_stats_dev_oob_desc[i].format);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_drop_desc[i].format);
+
return idx;
}
@@ -616,6 +693,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
vnic_env_stats_dev_oob_desc, i);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i);
+
return idx;
}
@@ -625,7 +707,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
return;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
@@ -1254,9 +1336,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
-
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@@ -1272,6 +1351,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
struct ethtool_fec_stats *fec_stats)
{
+ if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ return;
+
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, fec_stats);
}
@@ -1837,17 +1919,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
- return mlx5e_tls_get_count(priv);
+ return mlx5e_ktls_get_count(priv);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_tls_get_stats(priv, data + idx);
+ return idx + mlx5e_ktls_get_stats(priv, data + idx);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -1887,6 +1969,19 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2024,6 +2119,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2108,13 +2205,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2132,8 +2229,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
@@ -2348,7 +2445,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
-static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+MLX5E_DEFINE_STATS_GRP(ptp, 0);
static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
@@ -2367,13 +2464,15 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
&MLX5E_STATS_GRP(qos),
+#ifdef CONFIG_MLX5_EN_MACSEC
+ &MLX5E_STATS_GRP(macsec_hw),
+#endif
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 2c1ed5b81be6..9f781085be47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -205,7 +205,19 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 rx_pp_alloc_fast;
+ u64 rx_pp_alloc_slow;
+ u64 rx_pp_alloc_slow_high_order;
+ u64 rx_pp_alloc_empty;
+ u64 rx_pp_alloc_refill;
+ u64 rx_pp_alloc_waive;
+ u64 rx_pp_recycle_cached;
+ u64 rx_pp_recycle_cache_full;
+ u64 rx_pp_recycle_ring;
+ u64 rx_pp_recycle_ring_full;
+ u64 rx_pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -261,6 +273,10 @@ struct mlx5e_qcounter_stats {
u32 rx_if_down_packets;
};
+#define VNIC_ENV_GET(vnic_env_stats, c) \
+ MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
+ vport_env.c)
+
struct mlx5e_vnic_env_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
@@ -352,6 +368,19 @@ struct mlx5e_rq_stats {
u64 congst_umr;
u64 arfs_err;
u64 recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 pp_alloc_fast;
+ u64 pp_alloc_slow;
+ u64 pp_alloc_slow_high_order;
+ u64 pp_alloc_empty;
+ u64 pp_alloc_refill;
+ u64 pp_alloc_waive;
+ u64 pp_recycle_cached;
+ u64 pp_recycle_cache_full;
+ u64 pp_recycle_ring;
+ u64 pp_recycle_ring_full;
+ u64 pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
@@ -428,6 +457,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
+ u64 resync_cqe;
+ u64 resync_event;
};
struct mlx5e_stats {
@@ -457,7 +488,8 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
-extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
+extern MLX5E_DECLARE_STATS_GRP(ptp);
+extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d908a7e1406..5a6aa61ec82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -59,6 +59,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
+#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,6 +71,30 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_tc_table {
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
+ struct mutex t_lock;
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
+
+ struct rhashtable ht;
+
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
+};
+
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -104,8 +129,27 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
+ [PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
+{
+ struct mlx5e_tc_table *tc;
+
+ tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
+ return tc ? tc : ERR_PTR(-ENOMEM);
+}
+
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
+{
+ kvfree(tc);
+}
+
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+{
+ return tc->chains;
+}
+
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
@@ -115,6 +159,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -239,9 +284,34 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
return NULL;
}
+struct mlx5e_flow_meters *
+mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ if (is_mdev_switchdev_mode(dev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!uplink_priv->flow_meters)
+ uplink_priv->flow_meters =
+ mlx5e_flow_meters_init(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ uplink_priv->post_act);
+ if (!IS_ERR(uplink_priv->flow_meters))
+ return uplink_priv->flow_meters;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -253,7 +323,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs.tc.ct;
+ return tc->ct;
}
static struct mlx5e_tc_psample *
@@ -273,6 +343,24 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
+static struct mlx5e_post_act *
+get_post_action(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->post_act;
+ }
+
+ return tc->post_act;
+}
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -295,13 +383,121 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
if (is_mdev_switchdev_mode(priv->mdev)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
-
return;
}
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+static bool
+is_flow_meter_action(struct mlx5_flow_attr *attr)
+{
+ return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+}
+
+static int
+mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+ struct mlx5e_post_meter_priv *post_meter;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+ return PTR_ERR(meter);
+ }
+
+ ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
+ meter->red_counter);
+ if (IS_ERR(post_meter)) {
+ mlx5_core_err(priv->mdev, "Failed to init post meter\n");
+ goto err_meter_init;
+ }
+
+ attr->meter_attr.meter = meter;
+ attr->meter_attr.post_meter = post_meter;
+ attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+
+err_meter_init:
+ mlx5e_tc_meter_put(meter);
+ return PTR_ERR(post_meter);
+}
+
+static void
+mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+{
+ mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
+}
+
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
+ &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
+ spec, attr,
+ mod_hdr_acts);
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev))
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
+ return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+
+ if (is_flow_meter_action(attr)) {
+ err = mlx5e_tc_add_flow_meter(priv, attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+}
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
+ return;
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev)) {
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+ return;
+ }
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
+ mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
+ return;
+ }
+
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ if (attr->meter_attr.meter)
+ mlx5e_tc_del_flow_meter(attr);
+}
+
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
@@ -413,11 +609,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs.tc.mod_hdr;
+ &tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -616,6 +813,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
+ struct mlx5_ttc_table *ttc;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
@@ -633,9 +831,10 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
goto err_create_ttc_table;
}
+ ttc = mlx5e_fs_get_ttc(priv->fs, false);
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+ mlx5_get_ttc_flow_table(ttc)->id);
return 0;
@@ -722,10 +921,11 @@ static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hash_for_each_possible(tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -739,11 +939,12 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -799,6 +1000,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
@@ -827,10 +1029,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_lock(&tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -842,7 +1044,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -854,9 +1056,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -932,10 +1134,11 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
@@ -944,6 +1147,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int dest_ix = 0;
+ nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
@@ -968,7 +1172,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
}
dest_ix++;
}
@@ -996,7 +1200,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ rule = ERR_CAST(tc->t);
goto err_ft_get;
}
}
@@ -1039,6 +1243,21 @@ err_ft_get:
}
static int
+alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
+ struct mlx5_flow_attr *attr)
+
+{
+ struct mlx5_fc *counter;
+
+ counter = mlx5_fc_create(counter_dev, true);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
+ attr->counter = counter;
+ return 0;
+}
+
+static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1046,7 +1265,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_fc *counter;
int err;
parse_attr = attr->parse_attr;
@@ -1058,11 +1276,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return PTR_ERR(counter);
-
- attr->counter = counter;
+ err = alloc_flow_attr_counter(dev, attr);
+ if (err)
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -1072,8 +1288,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return err;
}
- if (flow_flag_test(flow, CT))
- flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
attr, &parse_attr->mod_hdr_acts);
else
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
@@ -1086,8 +1302,10 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_fs_chains *nic_chains;
+ nic_chains = mlx5e_nic_chains(tc);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
@@ -1102,26 +1320,26 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
flow_flag_clear(flow, OFFLOADED);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
else if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs.tc.t_lock);
+ mutex_lock(&tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs.tc.t = NULL;
+ mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
+ tc->t = NULL;
}
- mutex_unlock(&priv->fs.tc.t_lock);
+ mutex_unlock(&tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -1132,6 +1350,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ free_flow_post_acts(flow);
+
kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1142,40 +1362,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- if (flow_flag_test(flow, CT)) {
- mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
-
- rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
- flow, spec, attr,
- mod_hdr_acts);
- } else if (flow_flag_test(flow, SAMPLE)) {
- rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
- mlx5e_tc_get_flow_tun_id(flow));
- } else {
- rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- }
+ rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
- if (IS_ERR(flow->rule[1])) {
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- return flow->rule[1];
- }
+ if (IS_ERR(flow->rule[1]))
+ goto err_rule1;
}
return rule;
+
+err_rule1:
+ mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
+ return flow->rule[1];
}
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
@@ -1184,19 +1391,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
- goto offload_rule_0;
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else if (flow_flag_test(flow, SAMPLE))
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- else
-offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1204,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5e_mod_hdr_handle *mh = NULL;
struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
+ bool fwd_and_modify_cap;
+ u32 chain_mapping = 0;
+ int err;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr)
@@ -1214,15 +1420,58 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+
+ fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
+ if (!fwd_and_modify_cap)
+ goto skip_restore;
+
+ err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
+ if (err)
+ goto err_get_chain;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ CHAIN_TO_REG, chain_mapping);
+ if (err)
+ goto err_reg_set;
+ mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
+ MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
+ if (IS_ERR(mh)) {
+ err = PTR_ERR(mh);
+ goto err_attach;
+ }
+
+ slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
+
+skip_restore:
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
- if (!IS_ERR(rule))
- flow_flag_set(flow, SLOW);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_offload;
+ }
+
+ flow->slow_mh = mh;
+ flow->chain_mapping = chain_mapping;
+ flow_flag_set(flow, SLOW);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
kfree(slow_attr);
return rule;
+
+err_offload:
+ if (fwd_and_modify_cap)
+ mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
+err_attach:
+err_reg_set:
+ if (fwd_and_modify_cap)
+ mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
+err_get_chain:
+ mlx5e_mod_hdr_dealloc(&mod_acts);
+ kfree(slow_attr);
+ return ERR_PTR(err);
}
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
@@ -1239,8 +1488,18 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+ if (flow->slow_mh) {
+ slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
+ }
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ if (flow->slow_mh) {
+ mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
+ mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
+ flow->chain_mapping = 0;
+ flow->slow_mh = NULL;
+ }
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
}
@@ -1304,8 +1563,11 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
- if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
- route_mdev->coredev_type != MLX5_COREDEV_VF)
+ if (out_mdev->coredev_type != MLX5_COREDEV_PF)
+ return false;
+
+ if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
+ route_mdev->coredev_type != MLX5_COREDEV_SF)
return false;
return mlx5e_same_hw_devs(out_priv, route_priv);
@@ -1348,10 +1610,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
}
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
@@ -1361,13 +1623,107 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
if (IS_ERR(mod_hdr))
return PTR_ERR(mod_hdr);
- WARN_ON(flow->attr->modify_hdr);
- flow->attr->modify_hdr = mod_hdr;
+ WARN_ON(attr->modify_hdr);
+ attr->modify_hdr = mod_hdr;
return 0;
}
static int
+set_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *encap_valid,
+ bool *vf_tun)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct net_device *encap_dev = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
+ int out_index;
+ int err = 0;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+ *encap_valid = true;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto out;
+ }
+ err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+ extack, &encap_dev, encap_valid);
+ dev_put(out_dev);
+ if (err)
+ goto out;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+ if (*vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static void
+clean_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ bool *vf_tun)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ int out_index;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ mlx5e_detach_encap(priv, flow, attr, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1375,15 +1731,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- bool vf_tun = false, encap_valid = true;
- struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *out_priv;
- struct mlx5_fc *counter;
+ bool vf_tun, encap_valid;
u32 max_prio, max_chain;
int err = 0;
- int out_index;
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
@@ -1414,7 +1765,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_out;
- if (!attr->chain && esw_attr->int_port) {
+ if (!attr->chain && esw_attr->int_port &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss
@@ -1471,50 +1823,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- struct net_device *out_dev;
- int mirred_ifindex;
-
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
- if (!out_dev) {
- NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
- err = -ENODEV;
- goto err_out;
- }
- err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
- extack, &encap_dev, &encap_valid);
- dev_put(out_dev);
- if (err)
- goto err_out;
-
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
- esw_attr->dests[out_index].mdev = out_priv->mdev;
- }
-
- if (vf_tun && esw_attr->out_count > 1) {
- NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
- err = -EOPNOTSUPP;
+ err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
goto err_out;
- }
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
if (vf_tun) {
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err)
goto err_out;
} else {
@@ -1525,20 +1844,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw_attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
+ err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
+ if (err)
goto err_out;
- }
-
- attr->counter = counter;
}
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid)
+ if (!encap_valid || flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -1575,8 +1890,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun = false;
- int out_index;
+ bool vf_tun;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
@@ -1600,16 +1914,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
- mlx5e_detach_encap(priv, flow, out_index);
- kfree(attr->parse_attr->tun_info[out_index]);
- }
- }
+ clean_encap_dests(priv, flow, attr, &vf_tun);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
@@ -1633,7 +1938,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
- kfree(attr->sample_attr);
+ free_flow_post_acts(flow);
+
+ if (flow->attr->lag.count)
+ mlx5_lag_del_mpesw_rule(esw->dev);
+
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -1641,7 +1950,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- return flow->attr->counter;
+ struct mlx5_flow_attr *attr;
+
+ attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
+ return attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
@@ -1853,7 +2165,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- flow->tunnel_id = value;
+ flow->attr->tunnel_id = value;
return 0;
err_set:
@@ -1867,8 +2179,8 @@ err_enc_opts:
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
- u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
- u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw;
@@ -1884,11 +2196,6 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
enc_opts_id);
}
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
-{
- return flow->tunnel_id;
-}
-
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v)
@@ -2355,6 +2662,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
+ match.mask->vlan_eth_type &&
+ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
+ ft_field_support.outer_second_vid,
+ fs_type)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ spec->match_criteria_enable |=
+ MLX5_MATCH_MISC_PARAMETERS;
+ }
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
@@ -2810,14 +3128,15 @@ static unsigned long mask_to_le(unsigned long mask, int size)
return mask;
}
+
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
- struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
@@ -2943,35 +3262,43 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
-static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action_flags,
- struct netlink_ext_ack *extack)
+static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
- int err;
u8 cmd;
- err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
- action_flags, extack);
- if (err < 0)
- goto out_dealloc_parsed_actions;
-
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
- cmd_masks = &hdrs[cmd].masks;
+ cmd_masks = &parse_attr->hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
- NL_SET_ERR_MSG_MOD(extack,
- "attempt to offload an unsupported field");
+ NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
- err = -EOPNOTSUPP;
- goto out_dealloc_parsed_actions;
+ return -EOPNOTSUPP;
}
}
return 0;
+}
+
+static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ u32 *action_flags,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ err = verify_offload_pedit_fields(priv, parse_attr, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ return 0;
out_dealloc_parsed_actions:
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
@@ -3175,11 +3502,11 @@ actions_match_supported_fdb(struct mlx5e_priv *priv,
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
+ u32 actions,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- u32 actions = flow->attr->action;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
@@ -3191,6 +3518,30 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
@@ -3223,57 +3574,13 @@ bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
}
static int
-parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action)
-{
- struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
- struct mlx5_flow_attr *attr = flow->attr;
- enum mlx5_flow_namespace_type ns_type;
- struct mlx5e_priv *priv = flow->priv;
- const struct flow_action_entry *act;
- struct mlx5e_tc_act *tc_act;
- int err, i;
-
- ns_type = mlx5e_get_flow_namespace(flow);
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act) {
- NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
- return -EOPNOTSUPP;
- }
-
- if (!tc_act->can_offload(parse_state, act, i))
- return -EOPNOTSUPP;
-
- err = tc_act->parse_action(parse_state, act, priv, attr);
- if (err)
- return err;
- }
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act || !tc_act->post_parse ||
- !tc_act->can_offload(parse_state, act, i))
- continue;
-
- err = tc_act->post_parse(parse_state, priv, attr);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
- struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
enum mlx5_flow_namespace_type ns_type;
int err;
@@ -3283,8 +3590,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
ns_type = mlx5e_get_flow_namespace(flow);
- err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
- &attr->action, extack);
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
if (err)
return err;
@@ -3305,6 +3611,307 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
return 0;
}
+static struct mlx5_flow_attr*
+mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 attr_sz = ns_to_attr_sz(ns_type);
+ struct mlx5_flow_attr *attr2;
+
+ attr2 = mlx5_alloc_flow_attr(ns_type);
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr) {
+ kvfree(parse_attr);
+ kfree(attr2);
+ return NULL;
+ }
+
+ memcpy(attr2, attr, attr_sz);
+ INIT_LIST_HEAD(&attr2->list);
+ parse_attr->filter_dev = attr->parse_attr->filter_dev;
+ attr2->action = 0;
+ attr2->flags = 0;
+ attr2->parse_attr = parse_attr;
+ attr2->dest_chain = 0;
+ attr2->dest_ft = NULL;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ attr2->esw_attr->out_count = 0;
+ attr2->esw_attr->split_count = 0;
+ }
+
+ return attr2;
+}
+
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int i;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ esw_attr = attr->esw_attr;
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
+ return attr;
+ }
+ }
+
+ return NULL;
+}
+
+void
+mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
+ }
+}
+
+static void
+free_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *tmp;
+ bool vf_tun;
+
+ list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+
+ list_del(&attr->list);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+ }
+}
+
+int
+mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* TC filter rule HW translation:
+ *
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * |
+ * | if multi table action
+ * |
+ * v
+ * +---------------------+
+ * + post act ft |<----.
+ * + match fte id | | split on multi table action
+ * + do actions |-----'
+ * +---------------------+
+ * |
+ * |
+ * v
+ * Do rest of the actions after last multi table action.
+ */
+static int
+alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *next_attr = NULL;
+ struct mlx5e_post_act_handle *handle;
+ bool vf_tun, encap_valid = true;
+ int err;
+
+ /* This is going in reverse order as needed.
+ * The first entry is the last attribute.
+ */
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (!next_attr) {
+ /* Set counter action on last post act rule. */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
+ if (err)
+ goto out_free;
+ }
+
+ /* Don't add post_act rule for first attr (last in the list).
+ * It's being handled by the caller.
+ */
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
+ goto out_free;
+
+ if (!encap_valid)
+ flow_flag_set(flow, SLOW);
+
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ if (err)
+ goto out_free;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto out_free;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto out_free;
+ }
+
+ handle = mlx5e_tc_post_act_add(post_act, attr);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_free;
+ }
+
+ attr->post_act_handle = handle;
+ next_attr = attr;
+ }
+
+ if (flow_flag_test(flow, SLOW))
+ goto out;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err)
+ goto out_free;
+
+out:
+ return 0;
+
+out_free:
+ free_flow_post_acts(flow);
+ return err;
+}
+
+static int
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow_action flow_action_reorder;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5_flow_attr *attr = flow->attr;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ struct flow_action_entry *act, **_act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
+
+ flow_action_reorder.num_entries = flow_action->num_entries;
+ flow_action_reorder.entries = kcalloc(flow_action->num_entries,
+ sizeof(flow_action), GFP_KERNEL);
+ if (!flow_action_reorder.entries)
+ return -ENOMEM;
+
+ mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+ list_add(&attr->list, &flow->attrs);
+
+ flow_action_for_each(i, _act, &flow_action_reorder) {
+ act = *_act;
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (!tc_act->can_offload(parse_state, act, i, attr)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ goto out_free;
+
+ parse_state->actions |= attr->action;
+
+ /* Split attr for multi table act if not the last act. */
+ if (tc_act->is_multi_table_act &&
+ tc_act->is_multi_table_act(priv, act, attr) &&
+ i < flow_action_reorder.num_entries - 1) {
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free;
+
+ attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ list_add(&attr->list, &flow->attrs);
+ }
+ }
+
+ kfree(flow_action_reorder.entries);
+
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free_post_acts;
+
+ err = alloc_flow_post_acts(flow, extack);
+ if (err)
+ goto out_free_post_acts;
+
+ return 0;
+
+out_free:
+ kfree(flow_action_reorder.entries);
+out_free_post_acts:
+ free_flow_post_acts(flow);
+
+ return err;
+}
+
static int
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -3332,7 +3939,6 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3344,17 +3950,17 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3393,12 +3999,25 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
same_hw_reps(priv, peer_netdev));
}
+static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
+{
+ if (same_hw_reps(priv, out_dev) &&
+ MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
+ MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
+ return true;
+
+ return false;
+}
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
if (is_merged_eswitch_vfs(priv, out_dev))
return true;
+ if (is_multiport_eligible(priv, out_dev))
+ return true;
+
if (is_lag_dev(priv, out_dev))
return true;
@@ -3455,7 +4074,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- struct pedit_headers_action *hdrs;
+ struct net_device *filter_dev;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3464,28 +4083,37 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
+ filter_dev = parse_attr->filter_dev;
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
/* Forward to/from internal port can only have 1 dest */
- if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
+ if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
esw_attr->out_count > 1) {
NL_SET_ERR_MSG_MOD(extack,
"Rules with internal port can have only one destination");
return -EOPNOTSUPP;
}
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ /* Forward from tunnel/internal port to internal port is not supported */
+ if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
+ esw_attr->dest_int_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding from tunnel/internal port to internal port is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3520,14 +4148,14 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->uplink_priv.tc_ht;
+ rpriv = priv->ppriv;
+ return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs.tc.ht;
+ return &tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -3560,7 +4188,12 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
sizeof(struct mlx5_nic_flow_attr);
struct mlx5_flow_attr *attr;
- return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ if (!attr)
+ return attr;
+
+ INIT_LIST_HEAD(&attr->list);
+ return attr;
}
static int
@@ -3594,6 +4227,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
+ INIT_LIST_HEAD(&flow->attrs);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
@@ -3651,6 +4285,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_core_dev *in_mdev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -3686,17 +4321,26 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ if (flow->attr->lag.count) {
+ err = mlx5_lag_add_mpesw_rule(esw->dev);
+ if (err)
+ goto err_free;
+ }
+
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
- goto err_free;
+ goto err_lag;
add_unready_flow(flow);
}
return flow;
+err_lag:
+ if (flow->attr->lag.count)
+ mlx5_lag_del_mpesw_rule(esw->dev);
err_free:
mlx5e_flow_put(priv, flow);
out:
@@ -3895,7 +4539,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
int err = 0;
if (!mlx5_esw_hold(priv->mdev))
- return -EAGAIN;
+ return -EBUSY;
mlx5_esw_get(priv->mdev);
@@ -4094,6 +4738,33 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -4121,10 +4792,16 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue");
return -EOPNOTSUPP;
}
+
+ err = mlx5e_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
@@ -4180,6 +4857,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe, *tmp;
LIST_HEAD(init_wait_list);
@@ -4191,11 +4869,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4210,7 +4888,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
@@ -4221,8 +4898,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
- fs = container_of(tc, struct mlx5e_flow_steering, tc);
- priv = container_of(fs, struct mlx5e_priv, fs);
+ priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
@@ -4249,9 +4925,39 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
return tc_tbl_size;
}
+static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_flow_table **ft = &tc->miss_t;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_TC_MISS_LEVEL;
+ ft_attr.prio = 0;
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+
+ *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(*ft)) {
+ err = PTR_ERR(*ft);
+ netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
+ }
+
+ return err;
+}
+
+static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+
+ mlx5_destroy_flow_table(tc->miss_t);
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4262,6 +4968,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
+ tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
@@ -4281,23 +4988,27 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->mapping = chains_mapping;
+ err = mlx5e_tc_nic_create_miss_table(priv);
+ if (err)
+ goto err_chains;
+
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
err = PTR_ERR(tc->chains);
- goto err_chains;
+ goto err_miss;
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
- tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
@@ -4316,6 +5027,8 @@ err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
mlx5_chains_destroy(tc->chains);
+err_miss:
+ mlx5e_tc_nic_destroy_miss_table(priv);
err_chains:
mapping_destroy(chains_mapping);
err_mapping:
@@ -4334,7 +5047,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -4356,12 +5069,30 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_tc_post_act_destroy(tc->post_act);
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
+ mlx5e_tc_nic_destroy_miss_table(priv);
}
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+{
+ int err;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ return 0;
+}
+
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
+
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
- struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
@@ -4369,7 +5100,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
u64 mapping_id;
int err = 0;
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
@@ -4409,12 +5139,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_enc_opts_mapping = mapping;
- err = rhashtable_init(tc_ht, &tc_ht_params);
- if (err)
- goto err_ht_init;
-
- lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
-
uplink_priv->encap = mlx5e_tc_tun_init(priv);
if (IS_ERR(uplink_priv->encap)) {
err = PTR_ERR(uplink_priv->encap);
@@ -4424,8 +5148,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
return 0;
err_register_fib_notifier:
- rhashtable_destroy(tc_ht);
-err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
@@ -4439,13 +5161,8 @@ err_tun_mapping:
return err;
}
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
- struct mlx5_rep_uplink_priv *uplink_priv;
-
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
-
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
@@ -4454,6 +5171,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
+ mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
@@ -4534,13 +5252,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
+ struct mlx5e_tc_table *tc;
int err;
reg_b = be32_to_cpu(cqe->ft_metadata);
-
+ tc = mlx5e_fs_get_tc(priv->fs);
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
@@ -4559,7 +5277,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 5ffae9b13066..48241317a535 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -39,6 +39,7 @@
#include "en/tc_ct.h"
#include "en/tc_tun.h"
#include "en/tc/int_port.h"
+#include "en/tc/meter.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -53,7 +54,7 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
-
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
@@ -71,7 +72,8 @@ struct mlx5_flow_attr {
struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
- struct mlx5e_sample_attr *sample_attr;
+ struct mlx5e_sample_attr sample_attr;
+ struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -82,13 +84,41 @@ struct mlx5_flow_attr {
u8 outer_match_level;
u8 ip_version;
u8 tun_ip_version;
+ int tunnel_id; /* mapped tunnel id */
u32 flags;
+ u32 exe_aso_type;
+ struct list_head list;
+ struct mlx5e_post_act_handle *post_act_handle;
+ struct {
+ /* Indicate whether the parsed flow should be counted for lag mode decision
+ * making
+ */
+ bool count;
+ } lag;
+ /* keep this union last */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
};
};
+enum {
+ MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
+ MLX5_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ATTR_FLAG_ACCEPT = BIT(5),
+ MLX5_ATTR_FLAG_CT = BIT(6),
+};
+
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5e_tc_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_rx_tun_attr {
u16 decap_vport;
union {
@@ -149,8 +179,11 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -200,6 +233,7 @@ enum mlx5e_tc_attr_to_reg {
FTEID_TO_REG,
NIC_CHAIN_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
+ PACKET_COLOR_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -212,6 +246,10 @@ struct mlx5e_tc_attr_to_reg_mapping {
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
+#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
+#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
@@ -243,11 +281,8 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
u32 data);
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow);
-
-struct mlx5e_tc_flow;
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
@@ -289,6 +324,8 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
+static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
@@ -320,6 +357,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -340,6 +379,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
+static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
+static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 7fd33b356cc8..f7897ddb29c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,7 +39,9 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/macsec.h"
#include "en/ptp.h"
+#include <net/ipv6.h>
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
@@ -53,117 +55,6 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
-{
- int dscp_cp = 0;
-
- if (skb->protocol == htons(ETH_P_IP))
- dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- else if (skb->protocol == htons(ETH_P_IPV6))
- dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-
- return priv->dcbx_dp.dscp2prio[dscp_cp];
-}
-#endif
-
-static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int up = 0;
-
- if (!netdev_get_num_tc(dev))
- goto return_txq;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
-return_txq:
- return priv->port_ptp_tc2realtxq[up];
-}
-
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
- u16 htb_maj_id)
-{
- u16 classid;
-
- if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
- classid = TC_H_MIN(skb->priority);
- else
- classid = READ_ONCE(priv->htb.defcls);
-
- if (!classid)
- return 0;
-
- return mlx5e_get_txq_by_classid(priv, classid);
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int num_tc_x_num_ch;
- int txq_ix;
- int up = 0;
- int ch_ix;
-
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
- if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
- struct mlx5e_ptp *ptp_channel;
-
- /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
-
- if (unlikely(htb_maj_id)) {
- txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
- if (txq_ix > 0)
- return txq_ix;
- }
-
- ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb)))
- return mlx5e_select_ptpsq(dev, skb);
-
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
- * If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq()
- * and mlx5e_select_htb_queue().
- */
- if (unlikely(txq_ix >= num_tc_x_num_ch))
- txq_ix %= num_tc_x_num_ch;
- } else {
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- }
-
- if (!netdev_get_num_tc(dev))
- return txq_ix;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
- /* Normalize any picked txq_ix to [0, num_channels),
- * So we can return a txq_ix that matches the channel and
- * packet UP.
- */
- ch_ix = priv->txq2sq[txq_ix]->ch_ix;
-
- return priv->channel_tc2realtxq[ch_ix][up];
-}
-
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
{
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
@@ -202,16 +93,26 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
return min_t(u16, hlen, skb_headlen(skb));
}
+#define MLX5_UNSAFE_MEMCPY_DISCLAIMER \
+ "This copy has been bounds-checked earlier in " \
+ "mlx5i_sq_calc_wqe_attr() and intentionally " \
+ "crosses a flex array boundary. Since it is " \
+ "performance sensitive, splitting the copy is " \
+ "undesirable."
+
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
- memcpy(vhdr, skb->data, cpy1_sz);
+ memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
- memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
+ unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto,
+ skb->data + cpy1_sz,
+ cpy2_sz,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
}
static inline void
@@ -241,23 +142,32 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->csum_none++;
}
+/* Returns the number of header bytes that we plan
+ * to inline later in the transmit descriptor
+ */
static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
+ *hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- else
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ ihs = skb_tcp_all_headers(skb);
+ if (ipv6_has_hopopt_jumbo(skb)) {
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
+ ihs -= sizeof(struct hop_jumbo_hdr);
+ }
+ }
stats->tso_packets++;
- stats->tso_bytes += skb->len - ihs;
+ stats->tso_bytes += skb->len - ihs - *hopbyhop;
}
return ihs;
@@ -319,6 +229,7 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
+ u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@@ -355,14 +266,16 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
- u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+ int hopbyhop;
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
- .headlen = skb_headlen(skb) - ihs,
+ .headlen = skb_headlen(skb) - ihs - hopbyhop,
+ .hopbyhop = hopbyhop,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@@ -392,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
u16 ds_cnt_inl = 0;
u16 ds_cnt_ids = 0;
+ /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
+
if (attr->insz)
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
MLX5_SEND_WQE_DS);
@@ -404,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
inl += VLAN_HLEN;
ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
+ netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
+ (u16)MLX5E_MAX_TX_INLINE_DS);
ds_cnt += ds_cnt_inl;
}
@@ -429,6 +347,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
}
}
+static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ /* Must not be called when a MPWQE session is active but empty. */
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+
+ wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
@@ -459,6 +397,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(sq->ptpsq)) {
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+ if (!netif_tx_queue_stopped(sq->txq) &&
+ !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats->stopped++;
+ }
skb_get(skb);
}
@@ -476,7 +419,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
-
+ u16 ihs = attr->ihs;
+ struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@@ -490,15 +434,40 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr->mss;
- if (attr->ihs) {
- if (skb_vlan_tag_present(skb)) {
- eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
+ if (ihs) {
+ u8 *start = eseg->inline_hdr.start;
+
+ if (unlikely(attr->hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
+ ihs += VLAN_HLEN;
+ h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
+ } else {
+ unsafe_memcpy(start, skb->data,
+ ETH_HLEN + sizeof(*h6),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ h6 = (struct ipv6hdr *)(start + ETH_HLEN);
+ }
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(start, skb, ihs);
+ ihs += VLAN_HLEN;
stats->added_vlan_packets++;
} else {
- eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
- memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr->ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
}
+ eseg->inline_hdr.sz |= cpu_to_be16(ihs);
dseg += wqe_attr->ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
@@ -509,7 +478,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -521,12 +490,13 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
{
return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
- !attr->insz;
+ !attr->insz && !mlx5e_macsec_skb_is_offload(skb);
}
static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
@@ -544,7 +514,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -622,6 +592,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_xmit_data txd;
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+
if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
mlx5e_tx_mpwqe_session_start(sq, eseg);
} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
@@ -631,21 +608,12 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- txd.data = skb->data;
- txd.len = skb->len;
-
- txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
- goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
-
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
-
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
-
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
@@ -664,6 +632,7 @@ err_unmap:
mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
@@ -673,12 +642,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
+static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+ ptpsq->ts_cqe_ctr_mask);
+}
+
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
+ if (unlikely(sq->ptpsq))
+ mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -691,8 +670,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx5e_txqsq *sq;
u16 pi;
+ /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
+ * queue being changed is disabled, and smp_wmb guarantees that the
+ * changes are visible before mlx5e_xmit tries to read from txq2sq. It
+ * guarantees that the value of txq2sq[qid] doesn't change while
+ * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
+ * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
+ */
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
if (unlikely(!sq)) {
+ /* Two cases when sq can be NULL:
+ * 1. The HTB node is registered, and mlx5e_select_queue
+ * selected its queue ID, but the SQ itself is not yet created.
+ * 2. HTB SQ creation failed. Similar to the previous case, but
+ * the SQ won't be created.
+ */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -886,6 +878,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+ mlx5e_ptpsq_fifo_has_room(sq) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
stats->wake++;
@@ -1016,12 +1009,34 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
- memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+ if (unlikely(attr.hopbyhop)) {
+ struct ipv6hdr *h6;
+
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ ETH_HLEN + sizeof(*h6),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ unsafe_memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr.ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ }
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -1033,5 +1048,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 833be29170a1..9a458a5d9853 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,6 +31,7 @@
*/
#include <linux/irq.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en/xdp.h"
@@ -86,26 +87,36 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
{
+ bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool);
bool busy_xsk = false, xsk_rx_alloc_err;
- /* Handle the race between the application querying need_wakeup and the
- * driver setting it:
- * 1. Update need_wakeup both before and after the TX. If it goes to
- * "yes", it can only happen with the first update.
- * 2. If the application queried need_wakeup before we set it, the
- * packets will be transmitted anyway, even w/o a wakeup.
- * 3. Give a chance to clear need_wakeup after new packets were queued
- * for TX.
+ /* If SQ is empty, there are no TX completions to trigger NAPI, so set
+ * need_wakeup. Do it before queuing packets for TX to avoid race
+ * condition with userspace.
*/
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ if (need_wakeup && xsksq->pc == xsksq->cc)
+ xsk_set_tx_need_wakeup(xsksq->xsk_pool);
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ /* If we queued some packets for TX, no need for wakeup anymore. */
+ if (need_wakeup && xsksq->pc != xsksq->cc)
+ xsk_clear_tx_need_wakeup(xsksq->xsk_pool);
+ /* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it
+ * before refilling to avoid race condition with userspace.
+ */
+ if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq))
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
xskrq);
- busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
+ /* Ask for wakeup if WQ is not full after refill. */
+ if (!need_wakeup)
+ busy_xsk |= xsk_rx_alloc_err;
+ else if (xsk_rx_alloc_err)
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xskrq->xsk_pool);
return busy_xsk;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48a45aa54a3c..a0242dc15741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
@@ -439,7 +438,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table;
int i;
- eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+ eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!eq_table)
return -ENOMEM;
@@ -575,6 +575,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+ if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
@@ -728,7 +731,8 @@ struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{
- struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+ struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
+ dev->priv.numa_node);
int err;
if (!eq)
@@ -888,10 +892,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
return ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
+
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
if (!eq) {
err = -ENOMEM;
goto clean;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 39e948bc1204..a994e71e05c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
@@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
vport->ingress.offloads.modify_metadata_rule = NULL;
}
+static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.fg = vport->ingress.offloads.drop_grp;
+ flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ }
+
+ vport->ingress.offloads.drop_rule = flow_rule;
+out:
+ return err;
+}
+
+static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.offloads.drop_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
+ vport->ingress.offloads.drop_rule = NULL;
+}
+
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
@@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
if (!flow_group_in)
return -ENOMEM;
+ if (vport->vport == MLX5_VPORT_UPLINK) {
+ /* This group can hold an FTE to drop all traffic.
+ * Need in case LAG is enabled.
+ */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, ret);
+ goto drop_err;
+ }
+ vport->ingress.offloads.drop_grp = g;
+ flow_index++;
+ }
+
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
+ memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
@@ -221,6 +272,11 @@ metadata_err:
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
+drop_err:
kvfree(flow_group_in);
return ret;
}
@@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
+
+ if (vport->ingress.offloads.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
@@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
@@ -320,3 +383,27 @@ out:
vport->metadata = vport->default_metadata;
return err;
}
+
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport)) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return PTR_ERR(vport);
+ }
+
+ return esw_acl_ingress_src_port_drop_create(esw, vport);
+}
+
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport))) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return;
+ }
+
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index c57869b93d60..11d3d3978848 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -6,6 +6,7 @@
#include "eswitch.h"
+#ifdef CONFIG_MLX5_ESWITCH
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
@@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
u32 metadata);
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
+#else /* CONFIG_MLX5_ESWITCH */
+static void
+mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{}
+
+static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f690f430f40f..4fbff7bcc155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
+#include <linux/build_bug.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <net/netevent.h>
@@ -12,26 +13,57 @@
#define CREATE_TRACE_POINTS
#include "diag/bridge_tracepoint.h"
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
-
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -63,12 +95,14 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_qinq_fg;
struct mlx5_flow_group *egress_mac_fg;
struct mlx5_flow_group *egress_miss_fg;
struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
+ u16 vlan_proto;
};
static void
@@ -138,7 +172,9 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -154,30 +190,53 @@ mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flo
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
+ vlan_proto, PTR_ERR(fg));
return fg;
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
+ u16 vlan_proto, struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -193,27 +252,48 @@ mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
-
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
"Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
PTR_ERR(fg));
-
kvfree(in);
return fg;
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -250,7 +330,9 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -265,13 +347,14 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
@@ -283,6 +366,25 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -346,7 +448,7 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
struct mlx5_eswitch *esw = br_offloads->esw;
int err;
@@ -374,10 +476,22 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
- if (IS_ERR(filter_fg)) {
- err = PTR_ERR(filter_fg);
- goto err_filter_fg;
+ vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(vlan_filter_fg)) {
+ err = PTR_ERR(vlan_filter_fg);
+ goto err_vlan_filter_fg;
+ }
+
+ qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
+ qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_filter_fg)) {
+ err = PTR_ERR(qinq_filter_fg);
+ goto err_qinq_filter_fg;
}
mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
@@ -389,13 +503,19 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
br_offloads->ingress_ft = ingress_ft;
br_offloads->skip_ft = skip_ft;
br_offloads->ingress_vlan_fg = vlan_fg;
- br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
+ br_offloads->ingress_qinq_fg = qinq_fg;
+ br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
br_offloads->ingress_mac_fg = mac_fg;
return 0;
err_mac_fg:
- mlx5_destroy_flow_group(filter_fg);
-err_filter_fg:
+ mlx5_destroy_flow_group(qinq_filter_fg);
+err_qinq_filter_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
+ mlx5_destroy_flow_group(vlan_filter_fg);
+err_vlan_filter_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(skip_ft);
@@ -409,8 +529,12 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
{
mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
br_offloads->ingress_mac_fg = NULL;
- mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
- br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
+ br_offloads->ingress_qinq_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
+ br_offloads->ingress_qinq_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
+ br_offloads->ingress_vlan_filter_fg = NULL;
mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
br_offloads->ingress_vlan_fg = NULL;
mlx5_destroy_flow_table(br_offloads->skip_ft);
@@ -428,7 +552,7 @@ static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
struct mlx5_flow_handle *miss_handle = NULL;
struct mlx5_eswitch *esw = br_offloads->esw;
@@ -447,6 +571,12 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
goto err_vlan_fg;
}
+ qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
@@ -491,6 +621,7 @@ skip_miss_flow:
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_qinq_fg = qinq_fg;
bridge->egress_mac_fg = mac_fg;
bridge->egress_miss_fg = miss_fg;
bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
@@ -498,6 +629,8 @@ skip_miss_flow:
return 0;
err_mac_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(egress_ft);
@@ -515,6 +648,7 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
if (bridge->egress_miss_fg)
mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_qinq_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
}
@@ -559,10 +693,17 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
flow_act.pkt_reformat = vlan->pkt_reformat_push;
flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -645,10 +786,17 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
@@ -696,10 +844,17 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
flow_act.pkt_reformat = vlan->pkt_reformat_pop;
}
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -774,6 +929,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
bridge->ifindex = ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -911,12 +1067,13 @@ mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
}
static int
-mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
- } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
@@ -1008,36 +1165,58 @@ mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct
vlan->pkt_mod_hdr_push_mark = NULL;
}
-static struct mlx5_esw_bridge_vlan *
-mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
- struct mlx5_eswitch *esw)
+static int
+mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
- struct mlx5_esw_bridge_vlan *vlan;
int err;
- vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return ERR_PTR(-ENOMEM);
-
- vlan->vid = vid;
- vlan->flags = flags;
- INIT_LIST_HEAD(&vlan->fdb_list);
-
if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
if (err)
- goto err_vlan_push;
+ return err;
err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
if (err)
goto err_vlan_push_mark;
}
+
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
if (err)
goto err_vlan_pop;
}
+ return 0;
+
+err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+ return err;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
+ if (err)
+ goto err_vlan_push_pop;
+
err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
if (err)
goto err_xa_insert;
@@ -1048,13 +1227,11 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
-err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
-err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
-err_vlan_push:
+err_vlan_push_pop:
kvfree(vlan);
return ERR_PTR(err);
}
@@ -1102,6 +1279,50 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
}
+static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&port->vlans, i, vlan) {
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
+ br_offloads->esw);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
+ vlan->vid, bridge->vlan_proto, port->vport_num,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
@@ -1287,6 +1508,32 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
return 0;
}
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
+ br_offloads);
+ if (!port)
+ return -EINVAL;
+
+ bridge = port->bridge;
+ if (bridge->vlan_proto == proto)
+ return 0;
+ if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
+ return -EOPNOTSUPP;
+ }
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ bridge->vlan_proto = proto;
+ mlx5_esw_bridge_vlans_recreate(bridge);
+
+ return 0;
+}
+
static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
@@ -1434,7 +1681,8 @@ int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
- vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
+ vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
+ br_offloads->esw);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
return PTR_ERR(vlan);
@@ -1574,6 +1822,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads;
+ ASSERT_RTNL();
+
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
@@ -1590,6 +1840,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+ ASSERT_RTNL();
+
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index efc39975226e..10851a515bca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg;
- struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_vlan_filter_fg;
+ struct mlx5_flow_group *ingress_qinq_fg;
+ struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft;
@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
new file mode 100644
index 000000000000..2db13c71e88c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include "eswitch.h"
+
+enum vnic_diag_counter {
+ MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
+ MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_CQ_OVERRUN,
+ MLX5_VNIC_DIAG_INVALID_COMMAND,
+ MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+};
+
+static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
+ u32 *val)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
+ struct mlx5_core_dev *dev = vport->dev;
+ u16 vport_num = vport->vport;
+ void *vnic_diag_out;
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
+ if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
+ switch (counter) {
+ case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
+ break;
+ case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
+ send_queue_priority_update_flow);
+ break;
+ case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_CQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_INVALID_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
+ break;
+ case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
+ break;
+ }
+
+ return 0;
+}
+
+static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
+ enum vnic_diag_counter type)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = mlx5_esw_query_vnic_diag(vport, type, &val);
+ if (ret)
+ return ret;
+
+ seq_printf(file, "%d\n", val);
+ return 0;
+}
+
+static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
+}
+
+static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
+}
+
+static int comp_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
+}
+
+static int async_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
+}
+
+static int cq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
+}
+
+static int invalid_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
+}
+
+static int quota_exceeded_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
+}
+
+DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
+DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
+DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(cq_overrun);
+DEFINE_SHOW_ATTRIBUTE(invalid_command);
+DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ debugfs_remove_recursive(vport->dbgfs);
+ vport->dbgfs = NULL;
+}
+
+/* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
+#define VNIC_DIAG_DIR_NAME_MAX_LEN 8
+
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ struct dentry *vnic_diag;
+ char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return;
+
+ if (vport_num == MLX5_VPORT_PF) {
+ strcpy(dir_name, "pf");
+ } else if (vport_num == MLX5_VPORT_ECPF) {
+ strcpy(dir_name, "ecpf");
+ } else {
+ err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
+ is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
+ if (WARN_ON(err < 0))
+ return;
+ }
+
+ vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
+ vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
+ debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
+ &total_q_under_processor_handle_fops);
+ debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
+ &send_queue_priority_update_flow_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
+ debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
+ &comp_eq_overrun_fops);
+ debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
+ &async_eq_overrun_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
+ debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
+ debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
+ &invalid_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
+ debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
+ &quota_exceeded_command_fops);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 7f9b96d9537e..9bc7be95db54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -87,11 +87,11 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
goto reg_err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -99,7 +99,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
@@ -118,10 +118,10 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
@@ -156,11 +156,11 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
return err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -168,7 +168,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
return err;
}
@@ -182,9 +182,9 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 3401188e0a60..51ac24e6ec3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used)
),
TP_fast_assign(
- strncpy(__entry->dev_name,
+ strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index c275fe028b6d..c9a91158e99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -78,15 +78,19 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_core_dev *dest_mdev)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool vf_sf_vport;
+
+ vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
/* Use indirect table for all IP traffic from UL to VF with vport
* destination when source rewrite flag is set.
*/
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
- mlx5_eswitch_is_vf_vport(esw, vport_num) &&
+ vf_sf_vport &&
esw->dev == dest_mdev &&
attr->ip_version &&
- attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
}
u16
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 9d17206d1625..fabe49a35a5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -11,6 +11,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "fs_ft_pool.h"
#include "esw/qos.h"
enum {
@@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- ft_attr.max_fte = table_size;
+ ft_attr.max_fte = POOL_NEXT_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
@@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
goto out;
}
esw->fdb_table.legacy.fdb = fdb;
+ table_size = fdb->max_fte;
/* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 11bbcd5f5b8b..4f8a24d84a86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
}
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 min_rate, u32 max_rate)
+ u32 max_rate, u32 min_rate)
{
int err;
@@ -924,12 +924,16 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
struct mlx5_esw_rate_group *group,
struct netlink_ext_ack *extack)
{
- int err;
+ int err = 0;
mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled && !group)
+ goto unlock;
+
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
if (!err)
err = esw_qos_vport_update_group(esw, vport, group, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 458ec0bca1b8..2169486c4bfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
+#include <linux/debugfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "esw/qos.h"
@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err)
return err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
return err;
}
@@ -1016,6 +1019,7 @@ err_rep:
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
@@ -1152,8 +1156,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
if (num_vfs < 0)
return;
@@ -1186,6 +1188,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
int total_vports;
int err;
+ if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
+ return 0;
+
total_vports = mlx5_eswitch_get_total_vports(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
@@ -1203,6 +1208,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
+ esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
return 0;
err:
@@ -1215,6 +1221,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
+ esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -1224,7 +1231,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
- * @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
@@ -1238,7 +1244,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
@@ -1257,9 +1263,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- esw->mode = mode;
-
- if (mode == MLX5_ESWITCH_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_rescan_drivers(esw->dev);
@@ -1269,22 +1273,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (err)
goto abort;
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
- mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mlx5_esw_mode_change_notify(esw, mode);
+ mlx5_esw_mode_change_notify(esw, esw->mode);
return 0;
abort:
- esw->mode = MLX5_ESWITCH_NONE;
-
- if (mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
-
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1305,14 +1306,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
- toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+ devl_assert_locked(priv_to_devlink(esw->dev));
+
+ toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- if (esw->mode == MLX5_ESWITCH_NONE) {
- ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
@@ -1330,55 +1333,82 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return ret;
}
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
+/* When disabling sriov, free driver level resources. */
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
- int old_mode;
-
- lockdep_assert_held_write(&esw->mode_lock);
-
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_allowed(esw))
return;
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ devl_assert_locked(priv_to_devlink(esw->dev));
+ down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !clear_vf)
+ goto unlock;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- /* Notify eswitch users that it is exiting from current mode.
- * So that it can do necessary cleanup before the eswitch is disabled.
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ /* If disabling sriov in switchdev mode, free meta rules here
+ * because it depends on num_vfs.
*/
- mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- mlx5_eswitch_event_handlers_unregister(esw);
+ devl_rate_nodes_destroy(devlink);
+ }
- if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
+ esw->esw_funcs.num_vfs = 0;
- old_mode = esw->mode;
- esw->mode = MLX5_ESWITCH_NONE;
+unlock:
+ up_write(&esw->mode_lock);
+}
- if (old_mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
+/* Free resources for corresponding eswitch mode. It is called by devlink
+ * when changing eswitch mode or modprobe when unloading driver.
+ */
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- devlink_rate_nodes_destroy(devlink);
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- mlx5_esw_acls_ns_cleanup(esw);
+ mlx5_eswitch_event_handlers_unregister(esw);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
+
+ if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
+ }
+
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ devl_rate_nodes_destroy(devlink);
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
+ devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- mlx5_eswitch_disable_locked(esw, clear_vf);
- esw->esw_funcs.num_vfs = 0;
+ mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -1569,23 +1599,25 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
- lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
- lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_NONE;
+ esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ if (MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+ esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
@@ -1609,10 +1641,10 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ debugfs_remove_recursive(esw->dbgfs);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
- lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map);
@@ -1875,7 +1907,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -1890,17 +1922,6 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
-bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
-{
- if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
- (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
- dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
- return true;
-
- return false;
-}
-
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
@@ -2006,23 +2027,10 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
- if (!mlx5_esw_allowed(esw))
- return;
up_write(&esw->mode_lock);
}
/**
- * mlx5_esw_lock() - Take write lock on esw mode lock
- * @esw: eswitch device.
- */
-void mlx5_esw_lock(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_allowed(esw))
- return;
- down_write(&esw->mode_lock);
-}
-
-/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ead5e8acc8be..f68dc2d0dbe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -113,8 +113,11 @@ struct vport_ingress {
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
+ /* Optional group to add a drop all rule */
+ struct mlx5_flow_group *drop_grp;
struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
+ struct mlx5_flow_handle *drop_rule;
} offloads;
};
@@ -188,6 +191,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct dentry *dbgfs;
};
struct mlx5_esw_indir_table;
@@ -240,6 +244,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_flow_group *vport_rx_drop_group;
+ struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
@@ -279,10 +285,15 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
+ MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
};
struct mlx5_esw_bridge_offloads;
+enum {
+ MLX5_ESW_FDB_CREATED = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -328,7 +339,7 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
- struct lock_class_key mode_lock_key;
+ struct dentry *dbgfs;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -336,6 +347,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
+
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
@@ -348,10 +363,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -448,22 +464,6 @@ enum {
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
};
-enum {
- MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
- MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
- MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
- MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
- MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
- MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
-};
-
-/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
-static inline bool
-mlx5_esw_attr_flags_skip(u32 attr_flags)
-{
- return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
-}
-
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -487,6 +487,7 @@ struct mlx5_esw_flow_attr {
int src_port_rewrite_act_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_rx_tun_attr *rx_tun_attr;
+ struct ethhdr eth;
struct mlx5_pkt_reformat *decap_pkt_reformat;
};
@@ -530,8 +531,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
-bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
- struct mlx5_core_dev *dev1);
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
@@ -590,6 +589,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
return dl_port_index & 0xffff;
}
+static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
+}
+
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
@@ -687,6 +691,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
@@ -718,7 +725,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
-void mlx5_esw_lock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
@@ -735,8 +741,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
-static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
+static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
@@ -745,9 +751,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
-static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
-
static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9a7b25692505..728ca9f2bb9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -49,6 +49,7 @@
#include "en_tc.h"
#include "en/mapping.h"
#include "devlink.h"
+#include "lag/lag.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -69,6 +70,8 @@
#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -139,7 +142,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
- if (esw_attr->int_port)
+ if (attr && !attr->chain && esw_attr->int_port)
metadata =
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
else
@@ -180,7 +183,7 @@ esw_setup_decap_indir(struct mlx5_eswitch *esw,
{
struct mlx5_flow_table *ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
ft = mlx5_esw_indir_table_get(esw, attr, spec,
@@ -201,12 +204,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
- struct mlx5_flow_attr *attr,
+ u32 sampler_id,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
- dest[i].sampler_id = attr->sample_attr->sampler_id;
+ dest[i].sampler_id = sampler_id;
return 0;
}
@@ -229,10 +232,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
}
static void
-esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_fs_chains *chains,
- int i)
+esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains, int i)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -240,6 +241,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, int i)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+}
+
static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -297,7 +308,7 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
/* flow steering cannot handle more than one dest with the same ft
@@ -364,7 +375,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int j, err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
@@ -418,6 +429,9 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) {
@@ -463,20 +477,17 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
esw_src_port_rewrite_supported(esw))
- attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
- esw_setup_sampler_dest(dest, flow_act, attr, *i);
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
+ !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
+ esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
- } else if (attr->dest_ft) {
- esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
- } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
- esw_setup_slow_path_dest(dest, flow_act, chains, *i);
- (*i)++;
- } else if (attr->dest_chain) {
- err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
- 1, 0, *i);
+ } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
+ esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
@@ -484,6 +495,15 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+
+ if (attr->dest_ft) {
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ }
}
return err;
@@ -498,7 +518,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -508,6 +528,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
}
}
+static void
+esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = attr->meter_attr.meter;
+ flow_act->exe_aso.type = attr->exe_aso_type;
+ flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
+ flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
+ /* use metadata reg 5 for packet color */
+ flow_act->exe_aso.return_reg_id = 5;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -575,6 +609,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
+ esw_setup_meter(attr, &flow_act);
+
if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
@@ -589,7 +627,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
fdb = attr->ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
mlx5_eswitch_set_rule_source_port(esw, spec, attr,
esw_attr->in_mdev->priv.eswitch,
esw_attr->in_rep->vport);
@@ -721,7 +759,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -863,7 +901,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(esw_attr, push, pop);
@@ -871,7 +909,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -902,7 +940,7 @@ skip_set_push:
}
out:
if (!err)
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -921,7 +959,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
+ if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -1024,43 +1062,23 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
- int i = 0, num_vfs = esw->esw_funcs.num_vfs;
-
- if (!num_vfs || !flows)
- return;
-
- for (i = 0; i < num_vfs; i++)
- mlx5_del_flow_rules(flows[i]);
-
- kvfree(flows);
+ if (rule)
+ mlx5_del_flow_rules(rule);
}
-static int
-mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
- int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
- unsigned long i;
- u16 vport_num;
-
- num_vfs = esw->esw_funcs.num_vfs;
- flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
- if (!flows)
- return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto alloc_err;
- }
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
@@ -1073,34 +1091,18 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- vport_num = vport->vport;
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
- dest.vport.num = vport_num;
-
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
- rule_idx, PTR_ERR(flow_rule));
- goto rule_err;
- }
- flows[rule_idx++] = flow_rule;
- }
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
- esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
- kvfree(spec);
- return 0;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
+ vport_num, PTR_ERR(flow_rule));
-rule_err:
- while (--rule_idx >= 0)
- mlx5_del_flow_rules(flows[rule_idx]);
kvfree(spec);
-alloc_err:
- kvfree(flows);
- return err;
+ return flow_rule;
}
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
@@ -1625,18 +1627,200 @@ esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
#endif
+static int
+esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int count, err = 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ /* See comment at table_size calculation */
+ count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
+ *ix += count;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!esw_src_port_rewrite_supported(esw))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev,
+ "Failed to create send-to-vport meta flow group err(%d)\n", err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ return 0;
+
+send_vport_meta_err:
+ return err;
+}
+
+static int
+esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ esw_set_flow_group_source_port(esw, flow_group_in);
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+ u8 *dmac;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + MLX5_ESW_MISS_FLOWS);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ return err;
+}
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
- int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
+ int table_size, ix = 0, err = 0;
u32 flags = 0, *flow_group_in;
- struct mlx5_flow_group *g;
- void *match_criteria;
- u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
@@ -1670,7 +1854,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
- MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
+ esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1711,139 +1895,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
goto fdb_chains_err;
}
- /* create send-to-vport group */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- /* See comment above table_size calculation */
- ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
goto send_vport_err;
- }
- esw->fdb_table.offloads.send_to_vport_grp = g;
-
- if (esw_src_port_rewrite_supported(esw)) {
- /* meta send to vport */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
-
- num_vfs = esw->esw_funcs.num_vfs;
- if (num_vfs) {
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in,
- end_flow_index, ix + num_vfs - 1);
- ix += num_vfs;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
- err);
- goto send_vport_meta_err;
- }
- esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
- err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
- if (err)
- goto meta_rule_err;
- }
- }
-
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- /* create peer esw miss group */
- memset(flow_group_in, 0, inlen);
-
- esw_set_flow_group_source_port(esw, flow_group_in);
-
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in,
- match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + esw->total_vports - 1);
- ix += esw->total_vports;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
- goto peer_miss_err;
- }
- esw->fdb_table.offloads.peer_miss_grp = g;
- }
-
- /* create miss group */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
- match_criteria);
- dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + MLX5_ESW_MISS_FLOWS);
+ err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto send_vport_meta_err;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
- goto miss_err;
- }
- esw->fdb_table.offloads.miss_grp = g;
+ err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto peer_miss_err;
- err = esw_add_fdb_miss_rule(esw);
+ err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
- goto miss_rule_err;
+ goto miss_err;
kvfree(flow_group_in);
return 0;
-miss_rule_err:
- mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
-meta_rule_err:
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
send_vport_meta_err:
@@ -1870,7 +1944,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
@@ -1888,7 +1961,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
-static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
+static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{
int nvports;
@@ -1913,7 +1986,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = esw_get_offloads_ft_size(esw);
+ ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
+ MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1942,7 +2016,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
- nvports = esw_get_offloads_ft_size(esw);
+ nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1972,6 +2046,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
+static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
+{
+ /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+ * for the drop rule, which is placed at the end of the table.
+ * So return the total of vport and int_port as rule index.
+ */
+ return esw_get_nr_ft_offloads_steering_src_ports(esw);
+}
+
+static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int flow_index;
+ int err = 0;
+
+ flow_index = esw_create_vport_rx_drop_rule_index(esw);
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_drop_group = g;
+out:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_group)
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
@@ -2020,6 +2140,32 @@ out:
return flow_rule;
}
+static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
+ &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx drop rule err %ld\n",
+ PTR_ERR(flow_rule));
+ return PTR_ERR(flow_rule);
+ }
+
+ esw->offloads.vport_rx_drop_rule = flow_rule;
+
+ return 0;
+}
+
+static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_rule)
+ mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
+}
+
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
@@ -2030,7 +2176,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2164,20 +2310,15 @@ out_free:
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- esw->dev->priv.sriov.num_vfs);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to legacy");
- }
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2378,60 +2519,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
-static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
-{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
- struct mlx5_eswitch *esw;
- struct mlx5_flow_root_namespace *root;
- struct mlx5_flow_namespace *ns;
- struct mlx5_vport *vport;
- int err;
-
- MLX5_SET(set_flow_table_root_in, in, opcode,
- MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
- MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
- MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
-
- if (master) {
- esw = master->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, table_vport_number,
- MLX5_VPORT_UPLINK);
-
- ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
-
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id_valid, 1);
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(master, vhca_id));
- MLX5_SET(set_flow_table_root_in, in, table_id,
- root->root_ft->id);
- } else {
- esw = slave->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- ns = mlx5_get_flow_vport_acl_namespace(slave,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
- MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
- }
-
- err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
- mutex_unlock(&root->chain_lock);
-
- return err;
-}
-
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
@@ -2613,15 +2700,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
{
int err;
- err = esw_set_uplink_slave_ingress_root(master_esw->dev,
- slave_esw->dev);
- if (err)
- return -EINVAL;
-
err = esw_set_slave_root_fdb(master_esw->dev,
slave_esw->dev);
if (err)
- goto err_fdb;
+ return err;
err = esw_set_master_egress_rule(master_esw->dev,
slave_esw->dev);
@@ -2633,9 +2715,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
-err_fdb:
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
-
return err;
}
@@ -2644,7 +2723,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
{
esw_unset_master_egress_rule(master_esw->dev);
esw_set_slave_root_fdb(NULL, slave_esw->dev);
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -2749,9 +2827,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
- if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
- break;
-
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
@@ -2803,6 +2878,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
+ if (!mlx5_is_lag_supported(esw->dev))
+ return;
+
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
@@ -2820,6 +2898,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
+ if (!mlx5_is_lag_supported(esw->dev))
+ return;
+
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
@@ -2838,13 +2919,22 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- mlx5_ecpf_vport_exists(esw->dev))
- return false;
-
return true;
}
+#define MLX5_ESW_METADATA_RSVD_UPLINK 1
+
+/* Share the same metadata for uplink's. This is fine because:
+ * (a) In shared FDB mode (LAG) both uplink's are treated the
+ * same and tagged with the same metadata.
+ * (b) In non shared FDB mode, packets from physical port0
+ * cannot hit eswitch of PF1 and vice versa.
+ */
+static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
+{
+ return MLX5_ESW_METADATA_RSVD_UPLINK;
+}
+
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
@@ -2859,8 +2949,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
return 0;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
- /* Use only non-zero vport_id (1-4095) for all PF's */
- id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
+ /* Use only non-zero vport_id (2-4095) for all PF's */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
+ MLX5_ESW_METADATA_RSVD_UPLINK + 1,
+ vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
@@ -2878,7 +2970,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
+ else
+ vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
}
@@ -2889,6 +2985,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
if (!vport->default_metadata)
return;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ return;
+
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
@@ -2932,7 +3031,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
int err = 0;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_NONE) {
+ if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
@@ -3062,8 +3161,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
+ err = esw_create_vport_rx_drop_group(esw);
+ if (err)
+ goto create_rx_drop_fg_err;
+
+ err = esw_create_vport_rx_drop_rule(esw);
+ if (err)
+ goto create_rx_drop_rule_err;
+
return 0;
+create_rx_drop_rule_err:
+ esw_destroy_vport_rx_drop_group(esw);
+create_rx_drop_fg_err:
+ esw_destroy_vport_rx_group(esw);
create_fg_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
@@ -3081,6 +3192,8 @@ create_indir_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ esw_destroy_vport_rx_drop_rule(esw);
+ esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
@@ -3093,6 +3206,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
+ struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
@@ -3104,6 +3218,8 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
+ devlink = priv_to_devlink(esw->dev);
+ devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3112,10 +3228,13 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
+ devl_unlock(devlink);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
@@ -3265,20 +3384,12 @@ err_metadata:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err) {
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
+ if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to offloads");
- }
- }
return err;
}
@@ -3372,15 +3483,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
-{
- /* devlink commands in NONE eswitch mode are currently supported only
- * on ECPF.
- */
- return (esw->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
-}
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3407,6 +3509,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3417,6 +3520,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
+ mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
@@ -3438,12 +3542,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return PTR_ERR(esw);
down_write(&esw->mode_lock);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
err = esw_mode_to_devlink(esw->mode, mode);
-unlock:
up_write(&esw->mode_lock);
return err;
}
@@ -3492,9 +3591,6 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
return PTR_ERR(esw);
down_write(&esw->mode_lock);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto out;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -3546,12 +3642,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return PTR_ERR(esw);
down_write(&esw->mode_lock);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-unlock:
up_write(&esw->mode_lock);
return err;
}
@@ -3562,16 +3653,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
- int err;
+ int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_write(&esw->mode_lock);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
@@ -3622,22 +3710,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
-
down_write(&esw->mode_lock);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
*encap = esw->offloads.encap;
-unlock:
up_write(&esw->mode_lock);
- return err;
+ return 0;
}
static bool
@@ -3760,12 +3841,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
goto devlink_err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto rep_err;
return 0;
rep_err:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err:
mlx5_esw_vport_disable(esw, vport_num);
@@ -3775,6 +3858,7 @@ devlink_err:
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 182306bbefaa..108a3503f413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
- if (dest->vport.pkt_reformat)
- hash = jhash(dest->vport.pkt_reformat,
- sizeof(*dest->vport.pkt_reformat),
+ if (flow_act->pkt_reformat)
+ hash = jhash(flow_act->pkt_reformat,
+ sizeof(*flow_act->pkt_reformat),
hash);
return hash;
}
@@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
if (ret)
return ret;
- return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
- memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
- sizeof(*dest1->vport.pkt_reformat)) : 0;
+ if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
+ return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
+ sizeof(*flow_act1->pkt_reformat));
+
+ return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
}
static int
@@ -219,12 +221,14 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- mlx5_esw_attr_flags_skip(attr->flags) ||
+ mlx5e_tc_attr_flags_skip(attr->flags) ||
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true;
/* hairpin */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index a1ac3a654962..9459e56ee90a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -36,6 +36,7 @@ static struct mlx5_nb events_nbs_ref[] = {
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_OBJECT_CHANGE },
/* QP/WQ resource events to forward */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
@@ -132,6 +133,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 2ce4241459ce..39c03dcbd196 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 2a984e82ae16..750c32050165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -57,9 +57,6 @@ struct mlx5_fpga_device {
u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
-
- struct mlx5_fpga_ipsec *ipsec;
- struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
deleted file mode 100644
index 8ec148010d62..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/rhashtable.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
-
-#include "mlx5_core.h"
-#include "fs_cmd.h"
-#include "fpga/ipsec.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-
-enum mlx5_fpga_ipsec_cmd_status {
- MLX5_FPGA_IPSEC_CMD_PENDING,
- MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
- MLX5_FPGA_IPSEC_CMD_COMPLETE,
-};
-
-struct mlx5_fpga_ipsec_cmd_context {
- struct mlx5_fpga_dma_buf buf;
- enum mlx5_fpga_ipsec_cmd_status status;
- struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
- int status_code;
- struct completion complete;
- struct mlx5_fpga_device *dev;
- struct list_head list; /* Item in pending_cmds */
- u8 command[];
-};
-
-struct mlx5_fpga_esp_xfrm;
-
-struct mlx5_fpga_ipsec_sa_ctx {
- struct rhash_head hash;
- struct mlx5_ifc_fpga_ipsec_sa hw_sa;
- u32 sa_handle;
- struct mlx5_core_dev *dev;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-};
-
-struct mlx5_fpga_esp_xfrm {
- unsigned int num_rules;
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* xfrm lock */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-struct mlx5_fpga_ipsec_rule {
- struct rb_node node;
- struct fs_fte *fte;
- struct mlx5_fpga_ipsec_sa_ctx *ctx;
-};
-
-static const struct rhashtable_params rhash_sa = {
- /* Keep out "cmd" field from the key as it's
- * value is not constant during the lifetime
- * of the key object.
- */
- .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
-struct mlx5_fpga_ipsec {
- struct mlx5_fpga_device *fdev;
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
- struct mlx5_fpga_conn *conn;
-
- struct notifier_block fs_notifier_ingress_bypass;
- struct notifier_block fs_notifier_egress;
-
- /* Map hardware SA --> SA context
- * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
- * We will use this hash to avoid SAs duplication in fpga which
- * aren't allowed
- */
- struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
- struct mutex sa_hash_lock;
-
- /* Tree holding all rules for this fpga device
- * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
- */
- struct rb_root rules_rb;
- struct mutex rules_rb_lock; /* rules lock */
-
- struct ida halloc;
-};
-
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
- return false;
-
- return true;
-}
-
-static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
-
- if (status) {
- context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
- buf);
- mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
- status);
- context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
- complete(&context->complete);
- }
-}
-
-static inline
-int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
-{
- switch (syndrome) {
- case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
- return 0;
- case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
- return -EEXIST;
- case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
- return -EINVAL;
- case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
- return -EIO;
- }
- return -EIO;
-}
-
-static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
- struct mlx5_fpga_ipsec_cmd_context *context;
- enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
- struct mlx5_fpga_device *fdev = cb_arg;
- unsigned long flags;
-
- if (buf->sg[0].size < sizeof(*resp)) {
- mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
- buf->sg[0].size, sizeof(*resp));
- return;
- }
-
- mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
- ntohl(resp->syndrome));
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
- struct mlx5_fpga_ipsec_cmd_context,
- list);
- if (context)
- list_del(&context->list);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (!context) {
- mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
- return;
- }
- mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
-
- syndrome = ntohl(resp->syndrome);
- context->status_code = syndrome_to_errno(syndrome);
- context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
- memcpy(&context->resp, resp, sizeof(*resp));
-
- if (context->status_code)
- mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
- syndrome);
-
- complete(&context->complete);
-}
-
-static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
- const void *cmd, int cmd_size)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned long flags;
- int res;
-
- if (!fdev || !fdev->ipsec)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (cmd_size & 3)
- return ERR_PTR(-EINVAL);
-
- context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
- context->dev = fdev;
- context->buf.complete = mlx5_fpga_ipsec_send_complete;
- init_completion(&context->complete);
- memcpy(&context->command, cmd, cmd_size);
- context->buf.sg[0].size = cmd_size;
- context->buf.sg[0].data = &context->command;
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
- if (!res)
- list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (res) {
- mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
- kfree(context);
- return ERR_PTR(res);
- }
-
- /* Context should be freed by the caller after completion. */
- return context;
-}
-
-static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
-{
- struct mlx5_fpga_ipsec_cmd_context *context = ctx;
- unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
- int res;
-
- res = wait_for_completion_timeout(&context->complete, timeout);
- if (!res) {
- mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
- return -ETIMEDOUT;
- }
-
- if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
- res = context->status_code;
- else
- res = -EIO;
-
- return res;
-}
-
-static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
-{
- if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
- return true;
- return false;
-}
-
-static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
- int opcode)
-{
- struct mlx5_core_dev *dev = fdev->mdev;
- struct mlx5_ifc_fpga_ipsec_sa *sa;
- struct mlx5_fpga_ipsec_cmd_context *cmd_context;
- size_t sa_cmd_size;
- int err;
-
- hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
- if (is_v2_sadb_supported(fdev->ipsec))
- sa_cmd_size = sizeof(*hw_sa);
- else
- sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
-
- cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
- mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
- if (IS_ERR(cmd_context))
- return PTR_ERR(cmd_context);
-
- err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
- if (err)
- goto out;
-
- sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
- if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
- mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
- ntohl(sa->ipsec_sa_v1.sw_sa_handle),
- ntohl(cmd_context->resp.sw_sa_handle));
- err = -EIO;
- }
-
-out:
- kfree(cmd_context);
- return err;
-}
-
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- u32 ret = 0;
-
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
- ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
- } else {
- return ret;
- }
-
- if (!fdev->ipsec)
- return ret;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
- ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
- ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
- ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
- ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
- ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- return ret;
-}
-
-static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- number_of_ipsec_counters);
-}
-
-static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int counters_count)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned int i;
- __be32 *data;
- u32 count;
- u64 addr;
- int ret;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_low) +
- ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_high) << 32);
-
- count = mlx5_fpga_ipsec_counters_count(mdev);
-
- data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
- MLX5_FPGA_ACCESS_TYPE_DONTCARE);
- if (ret < 0) {
- mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
- ret);
- goto out;
- }
- ret = 0;
-
- if (count > counters_count)
- count = counters_count;
-
- /* Each counter is low word, then high. But each word is big-endian */
- for (i = 0; i < count; i++)
- counters[i] = (u64)ntohl(data[i * 2]) |
- ((u64)ntohl(data[i * 2 + 1]) << 32);
-
-out:
- kfree(data);
- return ret;
-}
-
-static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
- int err;
-
- cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
- cmd.flags = htonl(flags);
- context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context))
- return PTR_ERR(context);
-
- err = mlx5_fpga_ipsec_cmd_wait(context);
- if (err)
- goto out;
-
- if ((context->resp.flags & cmd.flags) != cmd.flags) {
- mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
- cmd.flags,
- context->resp.flags);
- err = -EIO;
- }
-
-out:
- kfree(context);
- return err;
-}
-
-static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
-{
- u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
- u32 flags = 0;
-
- if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
- flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
-
- return mlx5_fpga_ipsec_set_caps(mdev, flags);
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
-
- /* key */
- memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
- aes_gcm->key_len / 8);
- /* Duplicate 128 bit key twice according to HW layout */
- if (aes_gcm->key_len == 128)
- memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
- aes_gcm->aes_key, aes_gcm->key_len / 8);
-
- /* salt and seq_iv */
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
- sizeof(aes_gcm->seq_iv));
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
- sizeof(aes_gcm->salt));
-
- /* esn */
- if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags |=
- (xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = htonl(xfrm_attrs->esn);
- } else {
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags &=
- ~(xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = 0;
- }
-
- /* rx handle */
- hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
-
- /* enc mode */
- switch (aes_gcm->key_len) {
- case 128:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
- break;
- case 256:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
- break;
- }
-
- /* flags */
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
- MLX5_FPGA_IPSEC_SA_SPI_EN |
- MLX5_FPGA_IPSEC_SA_IP_ESP;
-
- if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
- else
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
-
- /* IPs */
- memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
- memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
-
- /* SPI */
- hw_sa->ipsec_sa_v1.spi = spi;
-
- /* flags */
- if (is_ipv6)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
-}
-
-static bool is_full_mask(const void *p, size_t len)
-{
- WARN_ON(len % 4);
-
- return !memchr_inv(p, 0xff, len);
-}
-
-static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
- const u32 *match_c,
- const u32 *match_v)
-{
- const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- misc_parameters);
- const void *headers_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
- outer_headers);
-
- if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
- const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-
- if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)) ||
- !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)))
- return false;
- } else {
- const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6);
- const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
-
- if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)) ||
- !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)))
- return false;
- }
-
- if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
- outer_esp_spi),
- MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v)
-{
- u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
- bool ipv6_flow;
-
- ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
-
- if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
- mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
- mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
- mlx5_fs_is_vxlan_flow(match_c) ||
- !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
- ipv6_flow))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
- mlx5_fs_is_outer_ipsec_flow(match_c))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
- ipv6_flow)
- return false;
-
- if (!validate_fpga_full_mask(dev, match_c, match_v))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_context *flow_context)
-{
- const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
- bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
- int ret;
-
- ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
- match_v);
- if (!ret)
- return ret;
-
- if (is_dmac || is_smac ||
- (match_criteria_enable &
- ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
- (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
- return false;
-
- return true;
-}
-
-static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle)
-{
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(accel_xfrm, typeof(*fpga_xfrm),
- accel_xfrm);
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode, err;
- void *context;
-
- /* alloc SA */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- /* build candidate SA */
- mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
- saddr, daddr, spi, is_ipv6,
- &sa_ctx->hw_sa);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
- /* all rules must be with same IPs and SPI */
- if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
- sizeof(sa_ctx->hw_sa))) {
- context = ERR_PTR(-EINVAL);
- goto exists;
- }
-
- ++fpga_xfrm->num_rules;
- context = fpga_xfrm->sa_ctx;
- goto exists;
- }
-
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
- if (err < 0) {
- context = ERR_PTR(err);
- goto exists;
- }
-
- sa_ctx->sa_handle = err;
- if (sa_handle)
- *sa_handle = sa_ctx->sa_handle;
- }
- /* This is unbounded fpga_xfrm, try to add to hash */
- mutex_lock(&fipsec->sa_hash_lock);
-
- err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa);
- if (err) {
- /* Can't bound different accel_xfrm to already existing sa_ctx.
- * This is because we can't support multiple ketmats for
- * same IPs and SPI
- */
- context = ERR_PTR(-EEXIST);
- goto unlock_hash;
- }
-
- /* Bound accel_xfrm to sa_ctx */
- opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- context = ERR_PTR(err);
- goto delete_hash;
- }
-
- mutex_unlock(&fipsec->sa_hash_lock);
-
- ++fpga_xfrm->num_rules;
- fpga_xfrm->sa_ctx = sa_ctx;
- sa_ctx->fpga_xfrm = fpga_xfrm;
-
- mutex_unlock(&fpga_xfrm->lock);
-
- return sa_ctx;
-
-delete_hash:
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
-unlock_hash:
- mutex_unlock(&fipsec->sa_hash_lock);
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-exists:
- mutex_unlock(&fpga_xfrm->lock);
- kfree(sa_ctx);
- return context;
-}
-
-static void *
-mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- bool is_egress)
-{
- struct mlx5_accel_esp_xfrm *accel_xfrm;
- __be32 saddr[4], daddr[4], spi;
- struct mlx5_flow_group *fg;
- bool is_ipv6 = false;
-
- fs_get_obj(fg, fte->node.parent);
- /* validate */
- if (is_egress &&
- !mlx5_is_fpga_egress_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val,
- &fte->action,
- &fte->flow_context))
- return ERR_PTR(-EINVAL);
- else if (!mlx5_is_fpga_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val))
- return ERR_PTR(-EINVAL);
-
- /* get xfrm context */
- accel_xfrm =
- (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
-
- /* IPs */
- if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
- fte->val)) {
- memcpy(&saddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
- sizeof(saddr[3]));
- memcpy(&daddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- sizeof(daddr[3]));
- } else {
- memcpy(saddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(saddr));
- memcpy(daddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(daddr));
- is_ipv6 = true;
- }
-
- /* SPI */
- spi = MLX5_GET_BE(typeof(spi),
- fte_match_param, fte->val,
- misc_parameters.outer_esp_spi);
-
- /* create */
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
- saddr, daddr,
- spi, is_ipv6, NULL);
-}
-
-static void
-mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
-{
- struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
- int err;
-
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- WARN_ON(err);
- return;
- }
-
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
- MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-
- mutex_lock(&fipsec->sa_hash_lock);
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
- mutex_unlock(&fipsec->sa_hash_lock);
-}
-
-static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
-
- mutex_lock(&fpga_xfrm->lock);
- if (!--fpga_xfrm->num_rules) {
- mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
- kfree(fpga_xfrm->sa_ctx);
- fpga_xfrm->sa_ctx = NULL;
- }
- mutex_unlock(&fpga_xfrm->lock);
-}
-
-static inline struct mlx5_fpga_ipsec_rule *
-_rule_search(struct rb_root *root, struct fs_fte *fte)
-{
- struct rb_node *node = root->rb_node;
-
- while (node) {
- struct mlx5_fpga_ipsec_rule *rule =
- container_of(node, struct mlx5_fpga_ipsec_rule,
- node);
-
- if (rule->fte < fte)
- node = node->rb_left;
- else if (rule->fte > fte)
- node = node->rb_right;
- else
- return rule;
- }
- return NULL;
-}
-
-static struct mlx5_fpga_ipsec_rule *
-rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
-{
- struct mlx5_fpga_ipsec_rule *rule;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rule = _rule_search(&ipsec_dev->rules_rb, fte);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return rule;
-}
-
-static inline int _rule_insert(struct rb_root *root,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_node **new = &root->rb_node, *parent = NULL;
-
- /* Figure out where to put new node */
- while (*new) {
- struct mlx5_fpga_ipsec_rule *this =
- container_of(*new, struct mlx5_fpga_ipsec_rule,
- node);
-
- parent = *new;
- if (rule->fte < this->fte)
- new = &((*new)->rb_left);
- else if (rule->fte > this->fte)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&rule->node, parent, new);
- rb_insert_color(&rule->node, root);
-
- return 0;
-}
-
-static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- int ret;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- ret = _rule_insert(&ipsec_dev->rules_rb, rule);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return ret;
-}
-
-static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_root *root = &ipsec_dev->rules_rb;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rb_erase(&rule->node, root);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-}
-
-static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- _rule_delete(ipsec_dev, rule);
- kfree(rule);
-}
-
-struct mailbox_mod {
- uintptr_t saved_esp_id;
- u32 saved_action;
- u32 saved_outer_esp_spi_value;
-};
-
-static void restore_spec_mailbox(struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- mbox_mod->saved_outer_esp_spi_value);
- fte->action.action |= mbox_mod->saved_action;
- fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
-}
-
-static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- mbox_mod->saved_esp_id = fte->action.esp_id;
- mbox_mod->saved_action = fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- mbox_mod->saved_outer_esp_spi_value =
- MLX5_GET(fte_match_set_misc, misc_params_v,
- outer_esp_spi);
-
- fte->action.esp_id = 0;
- fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- if (!MLX5_CAP_FLOWTABLE(mdev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
-}
-
-static enum fs_flow_table_type egress_to_fs_ft(bool egress)
-{
- return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
-}
-
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg,
- bool is_egress)
-{
- int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft, u32 *in,
- struct mlx5_flow_group *fg) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
- char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.misc_parameters);
- struct mlx5_core_dev *dev = ns->dev;
- u32 saved_outer_esp_spi_mask;
- u8 match_criteria_enable;
- int ret;
-
- if (MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(ns, ft, in, fg);
-
- match_criteria_enable =
- MLX5_GET(create_flow_group_in, in, match_criteria_enable);
- saved_outer_esp_spi_mask =
- MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
- if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
-
- if (!(*misc_params_c) &&
- !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
- MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
-
- ret = create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*create_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(ns, ft, fg, fte);
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
- return -ENOMEM;
-
- rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
- if (IS_ERR(rule->ctx)) {
- int err = PTR_ERR(rule->ctx);
-
- kfree(rule);
- return err;
- }
-
- rule->fte = fte;
- WARN_ON(rule_insert(fipsec, rule));
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(ns, ft, fg, fte);
- restore_spec_mailbox(fte, &mbox_mod);
- if (ret) {
- _rule_delete(fipsec, rule);
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- kfree(rule);
- }
-
- return ret;
-}
-
-static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*update_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
- struct mlx5_core_dev *dev = ns->dev;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(ns, ft, fg, modify_mask, fte);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(ns, ft, fg, modify_mask, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(ns, ft, fte);
-
- rule = rule_search(fipsec, fte);
- if (!rule)
- return -ENOENT;
-
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- rule_delete(fipsec, rule);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(ns, ft, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
-}
-
-static struct mlx5_flow_cmds fpga_ipsec_ingress;
-static struct mlx5_flow_cmds fpga_ipsec_egress;
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- switch (type) {
- case FS_FT_NIC_RX:
- return &fpga_ipsec_ingress;
- case FS_FT_NIC_TX:
- return &fpga_ipsec_egress;
- default:
- WARN_ON(true);
- return NULL;
- }
-}
-
-static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn *conn;
- int err;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return 0;
-
- fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
- if (!fdev->ipsec)
- return -ENOMEM;
-
- fdev->ipsec->fdev = fdev;
-
- err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
- fdev->ipsec->caps);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
- err);
- goto error;
- }
-
- INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
- spin_lock_init(&fdev->ipsec->pending_cmds_lock);
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_ipsec_recv;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
- err);
- goto error;
- }
- fdev->ipsec->conn = conn;
-
- err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
- if (err)
- goto err_destroy_conn;
- mutex_init(&fdev->ipsec->sa_hash_lock);
-
- fdev->ipsec->rules_rb = RB_ROOT;
- mutex_init(&fdev->ipsec->rules_rb_lock);
-
- err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
- err);
- goto err_destroy_hash;
- }
-
- ida_init(&fdev->ipsec->halloc);
-
- return 0;
-
-err_destroy_hash:
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
-err_destroy_conn:
- mlx5_fpga_sbu_conn_destroy(conn);
-
-error:
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
- return err;
-}
-
-static void destroy_rules_rb(struct rb_root *root)
-{
- struct mlx5_fpga_ipsec_rule *r, *tmp;
-
- rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
- rb_erase(&r->node, root);
- mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
- kfree(r);
- }
-}
-
-static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return;
-
- ida_destroy(&fdev->ipsec->halloc);
- destroy_rules_rb(&fdev->ipsec->rules_rb);
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
- mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
-}
-
-void mlx5_fpga_ipsec_build_fs_cmds(void)
-{
- /* ingress */
- fpga_ipsec_ingress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
- fpga_ipsec_ingress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
- fpga_ipsec_ingress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
- fpga_ipsec_ingress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_ingress;
- fpga_ipsec_ingress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
- fpga_ipsec_ingress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_ingress;
- fpga_ipsec_ingress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_ingress;
- fpga_ipsec_ingress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_ingress;
- fpga_ipsec_ingress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
-
- /* egress */
- fpga_ipsec_egress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
- fpga_ipsec_egress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
- fpga_ipsec_egress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
- fpga_ipsec_egress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_egress;
- fpga_ipsec_egress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
- fpga_ipsec_egress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_egress;
- fpga_ipsec_egress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_egress;
- fpga_ipsec_egress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_egress;
- fpga_ipsec_egress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
-}
-
-static int
-mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->tfc_pad) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.icv_len != 128) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
- v2_command))) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-
- if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
- mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
- if (!fpga_xfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&fpga_xfrm->lock);
- memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
- sizeof(fpga_xfrm->accel_xfrm.attrs));
-
- return &fpga_xfrm->accel_xfrm;
-}
-
-static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(xfrm, struct mlx5_fpga_esp_xfrm,
- accel_xfrm);
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- kfree(fpga_xfrm);
-}
-
-static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
- struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return -EOPNOTSUPP;
- }
-
- if (is_v2_sadb_supported(fipsec)) {
- mlx5_core_warn(mdev, "Modify esp is not supported\n");
- return -EOPNOTSUPP;
- }
-
- fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (!fpga_xfrm->sa_ctx)
- /* Unbounded xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* copy original hw sa */
- memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
- mutex_lock(&fipsec->sa_hash_lock);
- /* remove original hw sa from hash */
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa));
- /* update hw_sa with new xfrm attrs*/
- mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
- &fpga_xfrm->sa_ctx->hw_sa);
- /* try to insert new hw_sa to hash */
- err = rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa);
- if (err)
- goto rollback_sa;
-
- /* modify device with new hw_sa */
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
- fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err)
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
-rollback_sa:
- if (err) {
- /* return original hw_sa to hash */
- memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
- sizeof(org_hw_sa));
- WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
- }
- mutex_unlock(&fipsec->sa_hash_lock);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
- mutex_unlock(&fpga_xfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
- .device_caps = mlx5_fpga_ipsec_device_caps,
- .counters_count = mlx5_fpga_ipsec_counters_count,
- .counters_read = mlx5_fpga_ipsec_counters_read,
- .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
- .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
- .init = mlx5_fpga_ipsec_init,
- .cleanup = mlx5_fpga_ipsec_cleanup,
- .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
- .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
- .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return NULL;
-
- return &fpga_ipsec_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
deleted file mode 100644
index 8931b5584477..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_IPSEC_H__
-#define __MLX5_FPGA_IPSEC_H__
-
-#include "accel/ipsec.h"
-#include "fs_cmd.h"
-
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
-const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-void mlx5_fpga_ipsec_build_fs_cmds(void);
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
-#else
-static inline
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{ return NULL; }
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- return mlx5_fs_cmd_get_default(type);
-}
-
-static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
-static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif /* CONFIG_MLX5_FPGA_IPSEC */
-#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
deleted file mode 100644
index 29b7339ebfa3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-#include "fpga/tls.h"
-#include "fpga/cmd.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-#include "accel/tls.h"
-
-struct mlx5_fpga_tls_command_context;
-
-typedef void (*mlx5_fpga_tls_command_complete)
- (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *ctx,
- struct mlx5_fpga_dma_buf *resp);
-
-struct mlx5_fpga_tls_command_context {
- struct list_head list;
- /* There is no guarantee on the order between the TX completion
- * and the command response.
- * The TX completion is going to touch cmd->buf even in
- * the case of successful transmission.
- * So instead of requiring separate allocations for cmd
- * and cmd->buf we've decided to use a reference counter
- */
- refcount_t ref;
- struct mlx5_fpga_dma_buf buf;
- mlx5_fpga_tls_command_complete complete;
-};
-
-static void
-mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
-{
- if (refcount_dec_and_test(&ctx->ref))
- kfree(ctx);
-}
-
-static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_fpga_conn *conn = fdev->tls->conn;
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- ctx = list_first_entry(&tls->pending_cmds,
- struct mlx5_fpga_tls_command_context, list);
- list_del(&ctx->list);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
- ctx->complete(conn, fdev, ctx, resp);
-}
-
-static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_tls_command_context *ctx =
- container_of(buf, struct mlx5_fpga_tls_command_context, buf);
-
- mlx5_fpga_tls_put_command_ctx(ctx);
-
- if (unlikely(status))
- mlx5_fpga_tls_cmd_complete(fdev, NULL);
-}
-
-static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- mlx5_fpga_tls_command_complete complete)
-{
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
- int ret;
-
- refcount_set(&cmd->ref, 2);
- cmd->complete = complete;
- cmd->buf.complete = mlx5_fpga_cmd_send_complete;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
- * to make sure commands are inserted to the tls->pending_cmds list
- * and the command QP in the same order.
- */
- ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
- if (likely(!ret))
- list_add_tail(&cmd->list, &tls->pending_cmds);
- else
- complete(tls->conn, fdev, cmd, NULL);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
-}
-
-/* Start of context identifiers range (inclusive) */
-#define SWID_START 0
-/* End of context identifiers range (exclusive) */
-#define SWID_END BIT(24)
-
-static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
- void *ptr)
-{
- unsigned long flags;
- int ret;
-
- /* TLS metadata format is 1 byte for syndrome followed
- * by 3 bytes of swid (software ID)
- * swid must not exceed 3 bytes.
- * See tls_rxtx.c:insert_pet() for details
- */
- BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(idr_spinlock, flags);
- ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irqrestore(idr_spinlock, flags);
- idr_preload_end();
-
- return ret;
-}
-
-static void *mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
-{
- unsigned long flags;
- void *ptr;
-
- spin_lock_irqsave(idr_spinlock, flags);
- ptr = idr_remove(idr, swid);
- spin_unlock_irqrestore(idr_spinlock, flags);
- return ptr;
-}
-
-static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf, u8 status)
-{
- kfree(buf);
-}
-
-static void
-mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- if (resp) {
- u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
-
- if (syndrome)
- mlx5_fpga_err(fdev,
- "Teardown stream failed with syndrome = %d",
- syndrome);
- }
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
-{
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
- MLX5_BYTE_OFF(tls_flow, ipv6));
-
- MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
- MLX5_SET(tls_cmd, cmd, direction_sx,
- MLX5_GET(tls_flow, flow, direction_sx));
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- struct mlx5_fpga_dma_buf *buf;
- int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
- void *flow;
- void *cmd;
- int ret;
-
- buf = kzalloc(size, GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
- cmd = (buf + 1);
-
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- if (unlikely(!flow)) {
- rcu_read_unlock();
- WARN_ONCE(1, "Received NULL pointer for handle\n");
- kfree(buf);
- return -EINVAL;
- }
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- rcu_read_unlock();
-
- MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
- MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
- MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- buf->complete = mlx_tls_kfree_complete;
-
- ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
- if (ret < 0)
- kfree(buf);
-
- return ret;
-}
-
-static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
- void *flow, u32 swid, gfp_t flags)
-{
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_dma_buf *buf;
- void *cmd;
-
- ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
- if (!ctx)
- return;
-
- buf = &ctx->buf;
- cmd = (ctx + 1);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
- MLX5_SET(tls_cmd, cmd, swid, swid);
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- kfree(flow);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
- mlx5_fpga_tls_teardown_completion);
-}
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- void *flow;
-
- if (direction_sx)
- flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock,
- swid);
- else
- flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock,
- swid);
-
- if (!flow) {
- mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
- swid);
- return;
- }
-
- synchronize_rcu(); /* before kfree(flow) */
- mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
-}
-
-enum mlx5_fpga_setup_stream_status {
- MLX5_FPGA_CMD_PENDING,
- MLX5_FPGA_CMD_SEND_FAILED,
- MLX5_FPGA_CMD_RESPONSE_RECEIVED,
- MLX5_FPGA_CMD_ABANDONED,
-};
-
-struct mlx5_setup_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- atomic_t status;
- u32 syndrome;
- struct completion comp;
-};
-
-static void
-mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_setup_stream_context *ctx =
- container_of(cmd, struct mlx5_setup_stream_context, cmd);
- int status = MLX5_FPGA_CMD_SEND_FAILED;
- void *tls_cmd = ctx + 1;
-
- /* If we failed to send to command resp == NULL */
- if (resp) {
- ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
- status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
- }
-
- status = atomic_xchg_release(&ctx->status, status);
- if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
- complete(&ctx->comp);
- return;
- }
-
- mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
- ctx->syndrome);
-
- if (!ctx->syndrome) {
- /* The process was killed while waiting for the context to be
- * added, and the add completed successfully.
- * We need to destroy the HW context, and we can't can't reuse
- * the command context because we might not have received
- * the tx completion yet.
- */
- mlx5_fpga_tls_del_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC,
- MLX5_GET(tls_cmd, tls_cmd,
- direction_sx));
- }
-
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
- struct mlx5_setup_stream_context *ctx)
-{
- struct mlx5_fpga_dma_buf *buf;
- void *cmd = ctx + 1;
- int status, ret = 0;
-
- buf = &ctx->cmd.buf;
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
-
- init_completion(&ctx->comp);
- atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
- ctx->syndrome = -1;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
- mlx5_fpga_tls_setup_completion);
- wait_for_completion_killable(&ctx->comp);
-
- status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
- if (unlikely(status == MLX5_FPGA_CMD_PENDING))
- /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
- return -EINTR;
-
- if (unlikely(ctx->syndrome))
- ret = -ENOMEM;
-
- mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
- return ret;
-}
-
-static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
- struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
-
- mlx5_fpga_tls_cmd_complete(fdev, buf);
-}
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
- return false;
-
- return true;
-}
-
-static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
- u32 *p_caps)
-{
- int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
- u32 caps = 0;
- void *buf;
-
- buf = kzalloc(cap_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
- if (err)
- goto out;
-
- if (MLX5_GET(tls_extended_cap, buf, tx))
- caps |= MLX5_ACCEL_TLS_TX;
- if (MLX5_GET(tls_extended_cap, buf, rx))
- caps |= MLX5_ACCEL_TLS_RX;
- if (MLX5_GET(tls_extended_cap, buf, tls_v12))
- caps |= MLX5_ACCEL_TLS_V12;
- if (MLX5_GET(tls_extended_cap, buf, tls_v13))
- caps |= MLX5_ACCEL_TLS_V13;
- if (MLX5_GET(tls_extended_cap, buf, lro))
- caps |= MLX5_ACCEL_TLS_LRO;
- if (MLX5_GET(tls_extended_cap, buf, ipv6))
- caps |= MLX5_ACCEL_TLS_IPV6;
-
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
- caps |= MLX5_ACCEL_TLS_AES_GCM128;
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
- caps |= MLX5_ACCEL_TLS_AES_GCM256;
-
- *p_caps = caps;
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_conn *conn;
- struct mlx5_fpga_tls *tls;
- int err = 0;
-
- if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
- if (err)
- goto error;
-
- if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
- err = -ENOTSUPP;
- goto error;
- }
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
- err);
- goto error;
- }
-
- tls->conn = conn;
- spin_lock_init(&tls->pending_cmds_lock);
- INIT_LIST_HEAD(&tls->pending_cmds);
-
- idr_init(&tls->tx_idr);
- idr_init(&tls->rx_idr);
- spin_lock_init(&tls->tx_idr_spinlock);
- spin_lock_init(&tls->rx_idr_spinlock);
- fdev->tls = tls;
- return 0;
-
-error:
- kfree(tls);
- return err;
-}
-
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->tls)
- return;
-
- mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
- kfree(fdev->tls);
- fdev->tls = NULL;
-}
-
-static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
- struct tls_crypto_info *info,
- __be64 *rcd_sn)
-{
- struct tls12_crypto_info_aes_gcm_128 *crypto_info =
- (struct tls12_crypto_info_aes_gcm_128 *)info;
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
- crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- /* in AES-GCM 128 we need to write the key twice */
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
- TLS_CIPHER_AES_GCM_128_KEY_SIZE,
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
-}
-
-static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
- struct tls_crypto_info *crypto_info)
-{
- __be64 rcd_sn;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
- return -EINVAL;
- mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 swid, u32 tcp_sn)
-{
- u32 caps = mlx5_fpga_tls_device_caps(mdev);
- struct mlx5_setup_stream_context *ctx;
- int ret = -ENOMEM;
- size_t cmd_size;
- void *cmd;
-
- cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
- ctx = kzalloc(cmd_size, GFP_KERNEL);
- if (!ctx)
- goto out;
-
- cmd = ctx + 1;
- ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
- if (ret)
- goto free_ctx;
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
-
- MLX5_SET(tls_cmd, cmd, swid, swid);
- MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
-
- return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
-
-free_ctx:
- kfree(ctx);
-out:
- return ret;
-}
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- int ret = -ENOMEM;
- u32 swid;
-
- if (direction_sx)
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, flow);
- else
- ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, flow);
-
- if (ret < 0)
- return ret;
-
- swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
-
- ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
- if (ret && ret != -EINTR)
- goto free_swid;
-
- *p_swid = swid;
- return 0;
-free_swid:
- if (direction_sx)
- mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, swid);
- else
- mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, swid);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
deleted file mode 100644
index 5714cf391d1b..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_TLS_H__
-#define __MLX5_FPGA_TLS_H__
-
-#include <linux/mlx5/driver.h>
-
-#include <net/tls.h>
-#include "fpga/core.h"
-
-struct mlx5_fpga_tls {
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps;
- struct mlx5_fpga_conn *conn;
-
- struct idr tx_idr;
- struct idr rx_idr;
- spinlock_t tx_idr_spinlock; /* protects the IDR */
- spinlock_t rx_idr_spinlock; /* protects the IDR */
-};
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx);
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
-
-static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mdev->fpga->tls->caps;
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-
-#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index dafe341358c7..32d4c967469c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,10 +50,12 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
- ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+ int max_fte = ft_attr->max_fte;
+
+ ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
return 0;
}
@@ -152,6 +154,12 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
return 0;
}
+static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave,
bool ft_id_valid,
@@ -252,7 +260,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -261,17 +269,19 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ unsigned int size;
int err;
- if (size != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(size);
- size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (ft_attr->max_fte != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(ft_attr->max_fte);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
@@ -449,7 +459,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
@@ -472,6 +483,30 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
}
+
+static void
+mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
+{
+ void *exe_aso_ctrl;
+ void *execute_aso;
+
+ execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
+ execute_aso[0]);
+ MLX5_SET(execute_aso, execute_aso, valid, 1);
+ MLX5_SET(execute_aso, execute_aso, aso_object_id,
+ fte->action.exe_aso.object_id);
+
+ exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
+ fte->action.exe_aso.return_reg_id);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
+ fte->action.exe_aso.type);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
+ fte->action.exe_aso.flow_meter.init_color);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
+ fte->action.exe_aso.flow_meter.meter_idx);
+}
+
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
@@ -542,7 +577,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
- MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
+ fte->action.crypto.type);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
+ fte->action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
@@ -565,18 +603,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
- unsigned int id, type = dst->dest_attr.type;
+ enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ enum mlx5_ifc_flow_destination_type ifc_type;
+ unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = dst->dest_attr.ft_num;
- type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
@@ -590,8 +633,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
/* destination_id is reserved */
id = 0;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
break;
}
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
@@ -606,13 +651,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = dst->dest_attr.sampler_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = dst->dest_attr.tir_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
- type);
+ ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
@@ -647,6 +694,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
@@ -866,15 +922,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
-#ifdef CONFIG_MLX5_IPSEC
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
-#endif
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
@@ -971,6 +1027,12 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
}
+static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -990,6 +1052,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_get_capabilities,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
@@ -1011,6 +1074,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_stub_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 220ec632d35a..8ef4254b9ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
@@ -101,6 +101,9 @@ struct mlx5_flow_cmds {
u16 format_id, u32 *match_mask);
int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
int definer_id);
+
+ u32 (*get_capabilities)(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b628917e38e4..d53749248fa0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,8 +40,6 @@
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -106,6 +104,10 @@
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
+#define KERNEL_RX_MACSEC_NUM_PRIOS 1
+#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
+
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
@@ -116,7 +118,7 @@
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define KERNEL_NIC_TC_NUM_PRIOS 1
-#define KERNEL_NIC_TC_NUM_LEVELS 2
+#define KERNEL_NIC_TC_NUM_LEVELS 3
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
@@ -128,11 +130,15 @@
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
-#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
-#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
+#define KERNEL_TX_MACSEC_NUM_PRIOS 1
+#define KERNEL_TX_MACSEC_NUM_LEVELS 2
+#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
struct node_caps {
size_t arr_sz;
@@ -151,12 +157,16 @@ static struct init_tree_node {
enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 7,
+ .ar_size = 8,
.children = (struct init_tree_node[]){
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
+ KERNEL_RX_MACSEC_NUM_LEVELS))),
ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
@@ -188,24 +198,23 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
-#ifdef CONFIG_MLX5_IPSEC
- .ar_size = 2,
-#else
- .ar_size = 1,
-#endif
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
-#ifdef CONFIG_MLX5_IPSEC
- ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
+ ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
-#endif
+ ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
+ KERNEL_TX_MACSEC_NUM_LEVELS))),
}
};
@@ -432,6 +441,16 @@ static bool is_fwd_next_action(u32 action)
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
}
+static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
+{
+ return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
+ type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TIR;
+}
+
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
@@ -558,8 +577,8 @@ static void del_sw_hw_rule(struct fs_node *node)
mutex_unlock(&rule->dest_attr.ft->lock);
}
- if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
- --fte->dests_size) {
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
+ --fte->dests_size;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
@@ -567,17 +586,23 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
- if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
- --fte->dests_size) {
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ --fte->dests_size;
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
- if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- --fte->dests_size) {
+ if (is_fwd_dest_type(rule->dest_attr.type)) {
+ --fte->dests_size;
+ --fte->fwd_dests;
+
+ if (!fte->fwd_dests)
+ fte->action.action &=
+ ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ goto out;
}
out:
kfree(rule);
@@ -597,6 +622,7 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
+ WARN_ON(fte->dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
@@ -1146,7 +1172,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
if (err)
goto free_ft;
@@ -1186,6 +1212,12 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_flow_table);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
+{
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_flow_table_id);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr, u16 vport)
@@ -1296,6 +1328,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
rule->node.type = FS_TYPE_FLOW_DEST;
if (dest)
memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ else
+ rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
return rule;
}
@@ -1372,6 +1406,9 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
fte->dests_size++;
+ if (is_fwd_dest_type(dest[i].type))
+ fte->fwd_dests++;
+
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
*modify_mask |= type ? count : dst;
@@ -1560,9 +1597,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
return NULL;
}
-static bool check_conflicting_actions(u32 action1, u32 action2)
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+ const struct mlx5_fs_vlan *vlan1)
{
- u32 xored_actions = action1 ^ action2;
+ return vlan0->ethtype != vlan1->ethtype ||
+ vlan0->vid != vlan1->vid ||
+ vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+ const struct mlx5_flow_act *act2)
+{
+ u32 action1 = act1->action;
+ u32 action2 = act2->action;
+ u32 xored_actions;
+
+ xored_actions = action1 ^ action2;
/* if one rule only wants to count, it's ok */
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
@@ -1579,6 +1629,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ act1->pkt_reformat != act2->pkt_reformat)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ act1->modify_hdr != act2->modify_hdr)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+ return true;
+
return false;
}
@@ -1586,7 +1652,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
+ if (check_conflicting_actions(flow_act, &fte->action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
@@ -1696,6 +1762,7 @@ static void free_match_list(struct match_list *head, bool ft_locked)
static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_group *fg,
bool ft_locked)
{
struct rhlist_head *tmp, *list;
@@ -1710,6 +1777,9 @@ static int build_match_list(struct match_list *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
+ if (fg && fg != g)
+ continue;
+
if (unlikely(!tree_get_node(&g->node)))
continue;
@@ -1889,6 +1959,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!check_valid_spec(spec))
return ERR_PTR(-EINVAL);
+ if (flow_act->fg && ft->autogroup.active)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
@@ -1898,7 +1971,7 @@ search_again_locked:
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
- err = build_match_list(&match_head, ft, spec, take_write);
+ err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
@@ -2064,16 +2137,18 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->dests_size) {
- if (fte->modify_mask)
- modify_fte(fte);
- up_write_ref_node(&fte->node, false);
- } else if (list_empty(&fte->node.children)) {
- del_hw_fte(&fte->node);
+ if (list_empty(&fte->node.children)) {
+ fte->node.del_hw_func(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
+ } else {
+ up_write_ref_node(&fte->node, false);
}
kfree(handle);
}
@@ -2211,6 +2286,7 @@ static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
{
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
@@ -2257,7 +2333,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
prio = FDB_BYPASS_PATH;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
break;
@@ -2510,10 +2587,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
- cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
-
/* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
@@ -2654,28 +2727,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
-
- cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- cleanup_root_ns(steering->port_sel_root_ns);
- cleanup_root_ns(steering->sniffer_rx_root_ns);
- cleanup_root_ns(steering->sniffer_tx_root_ns);
- cleanup_root_ns(steering->rdma_rx_root_ns);
- cleanup_root_ns(steering->rdma_tx_root_ns);
- cleanup_root_ns(steering->egress_root_ns);
- mlx5_cleanup_fc_stats(dev);
- kmem_cache_destroy(steering->ftes_cache);
- kmem_cache_destroy(steering->fgs_cache);
- mlx5_ft_pool_destroy(dev);
- kfree(steering);
-}
-
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
@@ -2869,6 +2920,14 @@ static int create_fdb_bypass(struct mlx5_flow_steering *steering)
return 0;
}
+static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2919,10 +2978,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
return 0;
out_err:
- cleanup_root_ns(steering->fdb_root_ns);
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- steering->fdb_root_ns = NULL;
+ cleanup_fdb_root_ns(steering);
return err;
}
@@ -3040,6 +3096,22 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
steering->esw_ingress_root_ns = NULL;
}
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(dev, type);
+ if (!ns)
+ return 0;
+
+ root = find_root(&ns->node);
+ if (!root)
+ return 0;
+
+ return root->cmds->get_capabilities(root, root->table_type);
+}
+
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -3061,42 +3133,24 @@ cleanup:
return err;
}
-int mlx5_init_fs(struct mlx5_core_dev *dev)
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
- struct mlx5_flow_steering *steering;
- int err = 0;
-
- err = mlx5_init_fc_stats(dev);
- if (err)
- return err;
-
- err = mlx5_ft_pool_init(dev);
- if (err)
- return err;
-
- steering = kzalloc(sizeof(*steering), GFP_KERNEL);
- if (!steering) {
- err = -ENOMEM;
- goto err;
- }
-
- steering->dev = dev;
- dev->priv.steering = steering;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
- if (mlx5_fs_dr_is_supported(dev))
- steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ cleanup_root_ns(steering->root_ns);
+ cleanup_fdb_root_ns(steering);
+ cleanup_root_ns(steering->port_sel_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ cleanup_root_ns(steering->egress_root_ns);
+}
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
- sizeof(struct mlx5_flow_group), 0,
- 0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
- 0, NULL);
- if (!steering->ftes_cache || !steering->fgs_cache) {
- err = -ENOMEM;
- goto err;
- }
+int mlx5_fs_core_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err = 0;
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
@@ -3147,16 +3201,71 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
}
return 0;
+
+err:
+ mlx5_fs_core_cleanup(dev);
+ return err;
+}
+
+void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
+ kfree(steering);
+ mlx5_ft_pool_destroy(dev);
+ mlx5_cleanup_fc_stats(dev);
+}
+
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering;
+ int err = 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ goto err;
+
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ if (mlx5_fs_dr_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
err:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_free(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5469b08d635f..3af50fd04d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -120,6 +120,11 @@ enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_SMFS
};
+enum mlx5_flow_steering_capabilty {
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+};
+
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
enum mlx5_flow_steering_mode mode;
@@ -221,6 +226,7 @@ struct fs_fte {
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
+ u32 fwd_dests;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
@@ -293,14 +299,18 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
-int mlx5_init_fs(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
+void mlx5_fs_core_free(struct mlx5_core_dev *dev);
+int mlx5_fs_core_init(struct mlx5_core_dev *dev);
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
+
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2d8406fab844..f34e758a2f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,11 +32,9 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
-#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
-#include "accel/tls.h"
enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1,
@@ -250,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
+ if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
@@ -275,6 +273,19 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_virtualization)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -291,6 +302,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0)
+ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
+
return mlx5_cmd_exec_in(dev, init_hca, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 0b0234f9d694..9d908a0ccfef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -8,7 +8,8 @@
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
- MLX5_FW_RESET_FLAGS_PENDING_COMP
+ MLX5_FW_RESET_FLAGS_PENDING_COMP,
+ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
};
struct mlx5_fw_reset {
@@ -57,7 +58,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
}
-static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
+ u8 *reset_type, u8 *reset_state)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -71,25 +73,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
if (reset_type)
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
+ if (reset_state)
+ *reset_state = MLX5_GET(mfrl_reg, out, reset_state);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
}
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
+static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack)
+{
+ u8 reset_state;
+
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ goto out;
+
+ switch (reset_state) {
+ case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
+ case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
+ return -EBUSY;
+ case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
+ return -ETIMEDOUT;
+ case MLX5_MFRL_REG_RESET_STATE_NACK:
+ NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
+ return -EPERM;
+ }
+
+out:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
+ return -EIO;
+}
+
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
int err;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
- if (err)
- clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- return err;
+
+ MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
+ MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
+ err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MFRL, 0, 1, false);
+ if (!err)
+ return 0;
+
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
+ return mlx5_fw_reset_get_reset_state_err(dev, extack);
+
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
+ return mlx5_cmd_check(dev, err, in, out);
}
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
@@ -105,44 +149,48 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_load_one(dev);
+ mlx5_unload_one(dev);
+ if (mlx5_health_wait_pci_up(dev))
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ else
+ mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
}
}
-static void mlx5_sync_reset_reload_work(struct work_struct *work)
-{
- struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
- reset_reload_work);
- struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
-
- mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
- err = mlx5_health_wait_pci_up(dev);
- if (err)
- mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev);
-}
-
static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer(&fw_reset->timer);
+ del_timer_sync(&fw_reset->timer);
}
-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already cleared\n");
+ return -EALREADY;
+ }
+
mlx5_stop_sync_reset_poll(dev);
- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
if (poll_health)
mlx5_start_health_poll(dev);
+ return 0;
+}
+
+static void mlx5_sync_reset_reload_work(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_reload_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ mlx5_sync_reset_clear_reset_requested(dev, false);
+ mlx5_enter_error_state(dev, true);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -159,8 +207,10 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n");
- mlx5_sync_reset_clear_reset_requested(dev, false);
- queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ else
+ mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
return;
}
@@ -186,13 +236,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
}
-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already set\n");
+ return -EALREADY;
+ }
mlx5_stop_health_poll(dev, true);
- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
mlx5_start_sync_reset_poll(dev);
+ return 0;
}
static void mlx5_fw_live_patch_event(struct work_struct *work)
@@ -221,7 +275,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
err ? "Failed" : "Sent");
return;
}
- mlx5_sync_reset_set_reset_requested(dev);
+ if (mlx5_sync_reset_set_reset_requested(dev))
+ return;
+
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
@@ -303,6 +359,23 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
err = -ETIMEDOUT;
}
+ do {
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &reg16);
+ if (err)
+ return err;
+ if (reg16 == dev_id)
+ break;
+ msleep(20);
+ } while (!time_after(jiffies, timeout));
+
+ if (reg16 == dev_id) {
+ mlx5_core_info(dev, "Firmware responds to PCI config cycles again\n");
+ } else {
+ mlx5_core_err(dev, "Firmware is not responsive (0x%04x) after %llu ms\n",
+ reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
+ err = -ETIMEDOUT;
+ }
+
restore:
list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
pci_cfg_access_unlock(sdev);
@@ -319,7 +392,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- mlx5_sync_reset_clear_reset_requested(dev, false);
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
@@ -336,7 +410,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
@@ -348,10 +421,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
-
- mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
@@ -380,9 +451,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
struct mlx5_eqe *eqe = data;
+ if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ return NOTIFY_DONE;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
- queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
break;
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
mlx5_sync_reset_events_handle(fw_reset, eqe);
@@ -426,6 +500,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
}
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
+ cancel_work_sync(&fw_reset->fw_live_patch_work);
+ cancel_work_sync(&fw_reset->reset_request_work);
+ cancel_work_sync(&fw_reset->reset_reload_work);
+ cancel_work_sync(&fw_reset->reset_now_work);
+ cancel_work_sync(&fw_reset->reset_abort_work);
+}
+
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index 7761ee5fc7d0..dc141c7e641a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -9,12 +9,14 @@
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack);
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 737df402c927..86ed87d704f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
@@ -602,7 +601,7 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
fw_reporter_ctx.miss_counter = health->miss_counter;
if (fw_reporter_ctx.err_synd) {
devlink_health_report(health->fw_reporter,
- "FW syndrom reported", &fw_reporter_ctx);
+ "FW syndrome reported", &fw_reporter_ctx);
return;
}
if (fw_reporter_ctx.miss_counter)
@@ -667,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
+ devlink = priv_to_devlink(dev);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ devl_lock(devlink);
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
+ devl_unlock(devlink);
return;
}
fw_reporter_ctx.err_synd = health->synd;
@@ -699,11 +702,25 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.dump = mlx5_fw_fatal_reporter_dump,
};
-#define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct devlink *devlink = priv_to_devlink(dev);
+ u64 grace_period;
+
+ if (mlx5_core_is_ecpf(dev)) {
+ grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ } else if (mlx5_core_is_pf(dev)) {
+ grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ } else {
+ /* VF or SF */
+ grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ }
health->fw_reporter =
devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
@@ -715,7 +732,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
health->fw_fatal_reporter =
devlink_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
- MLX5_REPORTER_FW_GRACEFUL_PERIOD,
+ grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
@@ -840,9 +857,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
-
- if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -859,6 +873,14 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
del_timer_sync(&health->timer);
}
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
+}
+
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -872,13 +894,6 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
cancel_work_sync(&health->fatal_report_work);
}
-void mlx5_health_flush(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_health *health = &dev->priv.health;
-
- flush_workqueue(health->wq);
-}
-
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index f4f7eaf16446..c247cca154e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
@@ -39,7 +40,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
- strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
+ strscpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
sizeof(drvinfo->driver));
}
@@ -83,7 +84,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int mlx5i_set_channels(struct net_device *dev,
@@ -221,7 +222,6 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_MLX5_EN_RXNFC
static u32 mlx5i_flow_type_mask(u32 flow_type)
{
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
@@ -243,9 +243,18 @@ static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
+ * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
+ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
+ * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
+ */
+ if (info->cmd == ETHTOOL_GRXRINGS) {
+ info->data = priv->channels.params.num_channels;
+ return 0;
+ }
+
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
-#endif
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -263,10 +272,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
-#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
-#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0a99a020a3b2..4e3a75496dd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -35,6 +35,7 @@
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
#define IB_DEFAULT_Q_KEY 0xb1b
#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
@@ -320,43 +321,47 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
+ struct mlx5_flow_namespace *ns =
+ mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!ns)
return -EINVAL;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(priv->fs, ns, false);
+ err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(priv->fs, priv->rx_res);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_destroy_ttc_table(priv->fs);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_ethtool_cleanup_steering(priv->fs);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
@@ -364,9 +369,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
mlx5e_create_q_counters(priv);
@@ -397,6 +411,8 @@ err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -408,6 +424,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+ mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
@@ -446,7 +463,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 0b86e78dbc0e..0227a521d301 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -349,7 +349,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
new file mode 100644
index 000000000000..b8feaf0f5c4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "lag.h"
+
+static char *get_str_mode_type(struct mlx5_lag *ldev)
+{
+ switch (ldev->mode) {
+ case MLX5_LAG_MODE_ROCE: return "roce";
+ case MLX5_LAG_MODE_SRIOV: return "switchdev";
+ case MLX5_LAG_MODE_MULTIPATH: return "multipath";
+ case MLX5_LAG_MODE_MPESW: return "multiport_eswitch";
+ default: return "invalid";
+ }
+
+ return NULL;
+}
+
+static int type_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ struct mlx5_lag *ldev;
+ char *mode = NULL;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ if (__mlx5_lag_is_active(ldev))
+ mode = get_str_mode_type(ldev);
+ mutex_unlock(&ldev->lock);
+ if (!mode)
+ return -EINVAL;
+ seq_printf(file, "%s\n", mode);
+
+ return 0;
+}
+
+static int port_sel_mode_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ struct mlx5_lag *ldev;
+ int ret = 0;
+ char *mode;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ if (__mlx5_lag_is_active(ldev))
+ mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&ldev->lock);
+ if (ret)
+ return ret;
+
+ seq_printf(file, "%s\n", mode);
+ return 0;
+}
+
+static int state_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ struct mlx5_lag *ldev;
+ bool active;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ active = __mlx5_lag_is_active(ldev);
+ mutex_unlock(&ldev->lock);
+ seq_printf(file, "%s\n", active ? "active" : "disabled");
+ return 0;
+}
+
+static int flags_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ bool fdb_sel_mode_native;
+ struct mlx5_lag *ldev;
+ bool shared_fdb;
+ bool lag_active;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ lag_active = __mlx5_lag_is_active(ldev);
+ if (!lag_active)
+ goto unlock;
+
+ shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &ldev->mode_flags);
+
+unlock:
+ mutex_unlock(&ldev->lock);
+ if (!lag_active)
+ return -EINVAL;
+
+ seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
+ seq_printf(file, "%s:%s\n", "fdb_selection_mode",
+ fdb_sel_mode_native ? "native" : "affinity");
+ return 0;
+}
+
+static int mapping_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ u8 ports[MLX5_MAX_PORTS] = {};
+ struct mlx5_lag *ldev;
+ bool hash = false;
+ bool lag_active;
+ int num_ports;
+ int i;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ lag_active = __mlx5_lag_is_active(ldev);
+ if (lag_active) {
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ &num_ports);
+ hash = true;
+ } else {
+ for (i = 0; i < ldev->ports; i++)
+ ports[i] = ldev->v2p_map[i];
+ num_ports = ldev->ports;
+ }
+ }
+ mutex_unlock(&ldev->lock);
+ if (!lag_active)
+ return -EINVAL;
+
+ for (i = 0; i < num_ports; i++) {
+ if (hash)
+ seq_printf(file, "%d\n", ports[i] + 1);
+ else
+ seq_printf(file, "%d:%d\n", i + 1, ports[i]);
+ }
+
+ return 0;
+}
+
+static int members_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+ struct mlx5_lag *ldev;
+ int i;
+
+ ldev = dev->priv.lag;
+ mutex_lock(&ldev->lock);
+ for (i = 0; i < ldev->ports; i++) {
+ if (!ldev->pf[i].dev)
+ continue;
+ seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
+ }
+ mutex_unlock(&ldev->lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(type);
+DEFINE_SHOW_ATTRIBUTE(port_sel_mode);
+DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(flags);
+DEFINE_SHOW_ATTRIBUTE(mapping);
+DEFINE_SHOW_ATTRIBUTE(members);
+
+void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev)
+{
+ struct dentry *dbg;
+
+ dbg = debugfs_create_dir("lag", mlx5_debugfs_get_dev_root(dev));
+ dev->priv.dbg.lag_debugfs = dbg;
+
+ debugfs_create_file("type", 0444, dbg, dev, &type_fops);
+ debugfs_create_file("port_sel_mode", 0444, dbg, dev, &port_sel_mode_fops);
+ debugfs_create_file("state", 0444, dbg, dev, &state_fops);
+ debugfs_create_file("flags", 0444, dbg, dev, &flags_fops);
+ debugfs_create_file("mapping", 0444, dbg, dev, &mapping_fops);
+ debugfs_create_file("members", 0444, dbg, dev, &members_fops);
+}
+
+void mlx5_ldev_remove_debugfs(struct dentry *dbg)
+{
+ debugfs_remove_recursive(dbg);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 4ddf6b330a44..a9f4ede4a9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -31,14 +31,22 @@
*/
#include <linux/netdevice.h>
+#include <net/bonding.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
+#include "mpesw.h"
+
+enum {
+ MLX5_LAG_EGRESS_PORT_1 = 1,
+ MLX5_LAG_EGRESS_PORT_2,
+};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -46,28 +54,67 @@
*/
static DEFINE_SPINLOCK(lag_lock);
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2, bool shared_fdb, u8 flags)
+static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT;
+
+ if (mode == MLX5_LAG_MODE_MPESW)
+ return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW;
+
+ return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
+}
+
+static u8 lag_active_port_bits(struct mlx5_lag *ldev)
+{
+ u8 enabled_ports[MLX5_MAX_PORTS] = {};
+ u8 active_port = 0;
+ int num_enabled;
+ int idx;
+
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ &num_enabled);
+ for (idx = 0; idx < num_enabled; idx++)
+ active_port |= BIT_MASK(enabled_ports[idx]);
+
+ return active_port;
+}
+
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
+ unsigned long flags)
+{
+ bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &flags);
+ int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
- void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
+ void *lag_ctx;
+ lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
+ MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
- MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
- if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- } else {
- MLX5_SET(lagc, lag_ctx, port_select_mode,
- MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
+ switch (port_sel_mode) {
+ case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ break;
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
+ if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
+ break;
+
+ MLX5_SET(lagc, lag_ctx, active_port,
+ lag_active_port_bits(mlx5_lag_dev(dev)));
+ break;
+ default:
+ break;
}
+ MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2)
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+ u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
@@ -75,8 +122,8 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -101,6 +148,75 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+ u8 *ports, int *num_disabled)
+{
+ int i;
+
+ *num_disabled = 0;
+ for (i = 0; i < num_ports; i++) {
+ if (!tracker->netdev_state[i].tx_enabled ||
+ !tracker->netdev_state[i].link_up)
+ ports[(*num_disabled)++] = i;
+ }
+}
+
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+ u8 *ports, int *num_enabled)
+{
+ int i;
+
+ *num_enabled = 0;
+ for (i = 0; i < num_ports; i++) {
+ if (tracker->netdev_state[i].tx_enabled &&
+ tracker->netdev_state[i].link_up)
+ ports[(*num_enabled)++] = i;
+ }
+
+ if (*num_enabled == 0)
+ mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+}
+
+static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
+ struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ unsigned long flags)
+{
+ char buf[MLX5_MAX_PORTS * 10 + 1] = {};
+ u8 enabled_ports[MLX5_MAX_PORTS] = {};
+ int written = 0;
+ int num_enabled;
+ int idx;
+ int err;
+ int i;
+ int j;
+
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
+ mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ &num_enabled);
+ for (i = 0; i < num_enabled; i++) {
+ err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
+ if (err != 3)
+ return;
+ written += err;
+ }
+ buf[written - 2] = 0;
+ mlx5_core_info(dev, "lag map active ports: %s\n", buf);
+ } else {
+ for (i = 0; i < ldev->ports; i++) {
+ for (j = 0; j < ldev->buckets; j++) {
+ idx = i * ldev->buckets + j;
+ err = scnprintf(buf + written, 10,
+ " port %d:%d", i + 1, ldev->v2p_map[idx]);
+ if (err != 9)
+ return;
+ written += err;
+ }
+ }
+ mlx5_core_info(dev, "lag map:%s\n", buf);
+ }
+}
+
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
static void mlx5_do_bond_work(struct work_struct *work);
@@ -112,8 +228,10 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
- cancel_delayed_work_sync(&ldev->bond_work);
+ mlx5_lag_mpesw_cleanup(ldev);
+ cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq);
+ mutex_destroy(&ldev->lock);
kfree(ldev);
}
@@ -143,6 +261,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
}
kref_init(&ldev->ref);
+ mutex_init(&ldev->lock);
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -150,12 +269,17 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
+ ldev->mode = MLX5_LAG_MODE_NONE;
err = mlx5_lag_mp_init(ldev);
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
+ mlx5_lag_mpesw_init(ldev);
+ ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
+ ldev->buckets = 1;
+
return ldev;
}
@@ -164,7 +288,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < ldev->ports; i++)
if (ldev->pf[i].netdev == ndev)
return i;
@@ -173,108 +297,278 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
+ return ldev->mode == MLX5_LAG_MODE_ROCE;
}
static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
+ return ldev->mode == MLX5_LAG_MODE_SRIOV;
}
+/* Create a mapping between steering slots and active ports.
+ * As we have ldev->buckets slots per port first assume the native
+ * mapping should be used.
+ * If there are ports that are disabled fill the relevant slots
+ * with mapping that points to active ports.
+ */
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 *port1, u8 *port2)
+ u8 num_ports,
+ u8 buckets,
+ u8 *ports)
{
- bool p1en;
- bool p2en;
+ int disabled[MLX5_MAX_PORTS] = {};
+ int enabled[MLX5_MAX_PORTS] = {};
+ int disabled_ports_num = 0;
+ int enabled_ports_num = 0;
+ int idx;
+ u32 rand;
+ int i;
+ int j;
- p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
- tracker->netdev_state[MLX5_LAG_P1].link_up;
+ for (i = 0; i < num_ports; i++) {
+ if (tracker->netdev_state[i].tx_enabled &&
+ tracker->netdev_state[i].link_up)
+ enabled[enabled_ports_num++] = i;
+ else
+ disabled[disabled_ports_num++] = i;
+ }
- p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
- tracker->netdev_state[MLX5_LAG_P2].link_up;
+ /* Use native mapping by default where each port's buckets
+ * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
+ */
+ for (i = 0; i < num_ports; i++)
+ for (j = 0; j < buckets; j++) {
+ idx = i * buckets + j;
+ ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ }
- *port1 = 1;
- *port2 = 2;
- if ((!p1en && !p2en) || (p1en && p2en))
+ /* If all ports are disabled/enabled keep native mapping */
+ if (enabled_ports_num == num_ports ||
+ disabled_ports_num == num_ports)
return;
- if (p1en)
- *port2 = 1;
- else
- *port1 = 2;
+ /* Go over the disabled ports and for each assign a random active port */
+ for (i = 0; i < disabled_ports_num; i++) {
+ for (j = 0; j < buckets; j++) {
+ get_random_bytes(&rand, 4);
+ ports[disabled[i] * buckets + j] = enabled[rand % enabled_ports_num] + 1;
+ }
+ }
+}
+
+static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->pf[i].has_drop)
+ return true;
+ return false;
}
-static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
+static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < ldev->ports; i++) {
+ if (!ldev->pf[i].has_drop)
+ continue;
+
+ mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ ldev->pf[i].has_drop = false;
+ }
+}
+
+static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ u8 disabled_ports[MLX5_MAX_PORTS] = {};
+ struct mlx5_core_dev *dev;
+ int disabled_index;
+ int num_disabled;
+ int err;
+ int i;
+
+ /* First delete the current drop rule so there won't be any dropped
+ * packets
+ */
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ if (!ldev->tracker.has_inactive)
+ return;
+
+ mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+
+ for (i = 0; i < num_disabled; i++) {
+ disabled_index = disabled_ports[i];
+ dev = ldev->pf[disabled_index].dev;
+ err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ if (!err)
+ ldev->pf[disabled_index].has_drop = true;
+ else
+ mlx5_core_err(dev,
+ "Failed to create lag drop rule, error: %d", err);
+ }
+}
+
+static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
+ void *lag_ctx;
+
+ lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+ MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+ MLX5_SET(modify_lag_in, in, field_select, 0x2);
+
+ MLX5_SET(lagc, lag_ctx, active_port, ports);
+
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
+}
+
+static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u8 active_ports;
+ int ret;
+
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
+ ret = mlx5_lag_port_sel_modify(ldev, ports);
+ if (ret ||
+ !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
+ return ret;
- if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
- return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
- return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+ active_ports = lag_active_port_bits(ldev);
+
+ return mlx5_cmd_modify_active_port(dev0, active_ports);
+ }
+ return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- u8 v2p_port1, v2p_port2;
+ int idx;
int err;
+ int i;
+ int j;
- mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
- &v2p_port2);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
- if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
- v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
- err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
- if (err) {
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
- return;
+ for (i = 0; i < ldev->ports; i++) {
+ for (j = 0; j < ldev->buckets; j++) {
+ idx = i * ldev->buckets + j;
+ if (ports[idx] == ldev->v2p_map[idx])
+ continue;
+ err = _mlx5_modify_lag(ldev, ports);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ return;
+ }
+ memcpy(ldev->v2p_map, ports, sizeof(ports));
+
+ mlx5_lag_print_mapping(dev0, ldev, tracker,
+ ldev->mode_flags);
+ break;
}
- ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
- ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
- mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
- ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2]);
}
+
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+}
+
+static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
+ unsigned long *flags)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+
+ if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
+ if (ldev->ports > 2)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (ldev->ports > 2)
+ ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
+
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+
+ return 0;
}
-static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
- struct lag_tracker *tracker, u8 *flags)
+static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
- bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
- if (roce_lag ||
- !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
- tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (mode == MLX5_LAG_MODE_MPESW)
return;
- *flags |= MLX5_LAG_FLAG_HASH_BASED;
+
+ if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+}
+
+static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
+ struct lag_tracker *tracker, bool shared_fdb,
+ unsigned long *flags)
+{
+ bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
+
+ *flags = 0;
+ if (shared_fdb) {
+ set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
+ }
+
+ if (mode == MLX5_LAG_MODE_MPESW)
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
+
+ if (roce_lag)
+ return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
+
+ mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
+ return 0;
}
-static char *get_str_port_sel_mode(u8 flags)
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{
- if (flags & MLX5_LAG_FLAG_HASH_BASED)
- return "hash";
- return "queue_affinity";
+ int port_sel_mode = get_port_sel_mode(mode, flags);
+
+ switch (port_sel_mode) {
+ case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash";
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw";
+ default: return "invalid";
+ }
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb, u8 flags)
+ enum mlx5_lag_mode mode,
+ unsigned long flags)
{
+ bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
- ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
- shared_fdb, get_str_port_sel_mode(flags));
+ if (tracker)
+ mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
+ mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
+ shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -303,31 +597,35 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags,
+ enum mlx5_lag_mode mode,
bool shared_fdb)
{
- bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
+ bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ unsigned long flags = 0;
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
- &ldev->v2p_map[MLX5_LAG_P2]);
- mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
- if (flags & MLX5_LAG_FLAG_HASH_BASED) {
- err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
- ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2]);
- if (err) {
- mlx5_core_err(dev0,
- "Failed to create LAG port selection(%d)\n",
- err);
- return err;
+ err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
+ if (err)
+ return err;
+
+ if (mode != MLX5_LAG_MODE_MPESW) {
+ mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
}
}
- err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
+ err = mlx5_create_lag(ldev, tracker, mode, flags);
if (err) {
- if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
mlx5_lag_port_sel_destroy(ldev);
if (roce_lag)
mlx5_core_err(dev0,
@@ -339,26 +637,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return err;
}
- ldev->flags |= flags;
- ldev->shared_fdb = shared_fdb;
+ if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !roce_lag)
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+
+ ldev->mode = mode;
+ ldev->mode_flags = flags;
return 0;
}
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
- u8 flags = ldev->flags;
+ unsigned long flags = ldev->mode_flags;
int err;
- ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
+ ldev->mode = MLX5_LAG_MODE_NONE;
+ ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
- if (ldev->shared_fdb) {
- mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
- ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
- ldev->shared_fdb = false;
+ if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
+ mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
+ dev1->priv.eswitch);
+ clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
}
MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
@@ -372,32 +676,55 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
- } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
- mlx5_lag_port_sel_destroy(ldev);
+ return err;
}
- return err;
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ mlx5_lag_port_sel_destroy(ldev);
+ if (mlx5_lag_has_drop_rule(ldev))
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ return 0;
}
+#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
- return false;
+#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_core_dev *dev;
+ u8 mode;
+#endif
+ int i;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (!ldev->pf[i].dev)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
+ return false;
+
+ mode = mlx5_eswitch_mode(dev);
+ for (i = 0; i < ldev->ports; i++)
+ if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
+ return false;
+
+ if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
+ return false;
#else
- return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
- !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
+ for (i = 0; i < ldev->ports; i++)
+ if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
+ return false;
#endif
+ return true;
}
static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
continue;
@@ -414,7 +741,7 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
continue;
@@ -427,13 +754,14 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}
-static void mlx5_disable_lag(struct mlx5_lag *ldev)
+void mlx5_disable_lag(struct mlx5_lag *ldev)
{
+ bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- bool shared_fdb = ldev->shared_fdb;
bool roce_lag;
int err;
+ int i;
roce_lag = __mlx5_lag_is_roce(ldev);
@@ -444,7 +772,8 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- mlx5_nic_vport_disable_roce(dev1);
+ for (i = 1; i < ldev->ports; i++)
+ mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
err = mlx5_deactivate_lag(ldev);
@@ -462,7 +791,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
}
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -481,13 +810,42 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
+static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
+{
+ bool roce_lag = true;
+ int i;
+
+ for (i = 0; i < ldev->ports; i++)
+ roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ for (i = 0; i < ldev->ports; i++)
+ roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
+#endif
+
+ return roce_lag;
+}
+
+static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
+{
+ return do_bond && __mlx5_lag_is_active(ldev) &&
+ ldev->mode != MLX5_LAG_MODE_MPESW;
+}
+
+static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
+{
+ return !do_bond && __mlx5_lag_is_active(ldev) &&
+ ldev->mode != MLX5_LAG_MODE_MPESW;
+}
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- struct lag_tracker tracker;
+ struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
+ int i;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
@@ -504,21 +862,14 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (do_bond && !__mlx5_lag_is_active(ldev)) {
bool shared_fdb = mlx5_shared_fdb_supported(ldev);
- roce_lag = !mlx5_sriov_is_enabled(dev0) &&
- !mlx5_sriov_is_enabled(dev1);
-
-#ifdef CONFIG_MLX5_ESWITCH
- roce_lag = roce_lag &&
- dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
-#endif
+ roce_lag = mlx5_lag_is_roce_lag(ldev);
if (shared_fdb || roce_lag)
mlx5_lag_remove_devices(ldev);
err = mlx5_activate_lag(ldev, &tracker,
- roce_lag ? MLX5_LAG_FLAG_ROCE :
- MLX5_LAG_FLAG_SRIOV,
+ roce_lag ? MLX5_LAG_MODE_ROCE :
+ MLX5_LAG_MODE_SRIOV,
shared_fdb);
if (err) {
if (shared_fdb || roce_lag)
@@ -528,7 +879,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- mlx5_nic_vport_enable_roce(dev1);
+ for (i = 1; i < ldev->ports; i++)
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
@@ -548,9 +900,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
return;
}
}
- } else if (do_bond && __mlx5_lag_is_active(ldev)) {
+ } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
- } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
+ } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
mlx5_disable_lag(ldev);
}
}
@@ -560,31 +912,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
}
-static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
- struct mlx5_core_dev *dev1)
-{
- if (dev0)
- mlx5_esw_lock(dev0->priv.eswitch);
- if (dev1)
- mlx5_esw_lock(dev1->priv.eswitch);
-}
-
-static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
- struct mlx5_core_dev *dev1)
-{
- if (dev1)
- mlx5_esw_unlock(dev1->priv.eswitch);
- if (dev0)
- mlx5_esw_unlock(dev0->priv.eswitch);
-}
-
static void mlx5_do_bond_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int status;
status = mlx5_dev_list_trylock();
@@ -593,27 +925,29 @@ static void mlx5_do_bond_work(struct work_struct *work)
return;
}
+ mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
+ mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, HZ);
return;
}
- mlx5_lag_lock_eswitches(dev0, dev1);
mlx5_do_bond(ldev);
- mlx5_lag_unlock_eswitches(dev0, dev1);
+ mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock();
}
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- struct net_device *ndev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded, is_in_lag, mode_supported;
- int bond_status = 0;
+ bool has_inactive = 0;
+ struct slave *slave;
+ u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
int idx;
@@ -632,15 +966,19 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0)
+ if (idx >= 0) {
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
bond_status |= (1 << idx);
+ }
num_slaves++;
}
rcu_read_unlock();
/* None of this lagdev's netdevs are slaves of this master. */
- if (!(bond_status & 0x3))
+ if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
return 0;
if (lag_upper_info) {
@@ -648,11 +986,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
tracker->hash_type = lag_upper_info->hash_type;
}
+ tracker->has_inactive = has_inactive;
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
*/
- is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
+ is_in_lag = num_slaves == ldev->ports &&
+ bond_status == GENMASK(ldev->ports - 1, 0);
/* Lag mode must be activebackup or hash. */
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
@@ -704,6 +1044,39 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 1;
}
+static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev)
+{
+ struct net_device *ndev_tmp;
+ struct slave *slave;
+ bool has_inactive = 0;
+ int idx;
+
+ if (!netif_is_lag_master(ndev))
+ return 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+ if (idx < 0)
+ continue;
+
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
+ }
+ rcu_read_unlock();
+
+ if (tracker->has_inactive == has_inactive)
+ return 0;
+
+ tracker->has_inactive = has_inactive;
+
+ return 1;
+}
+
+/* this handler is always registered to netdev events */
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -712,7 +1085,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
- if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ if (event != NETDEV_CHANGEUPPER &&
+ event != NETDEV_CHANGELOWERSTATE &&
+ event != NETDEV_CHANGEINFODATA)
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
@@ -721,13 +1096,15 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
switch (event) {
case NETDEV_CHANGEUPPER:
- changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
- ptr);
+ changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
break;
case NETDEV_CHANGELOWERSTATE:
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
ndev, ptr);
break;
+ case NETDEV_CHANGEINFODATA:
+ changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
+ break;
}
ldev->tracker = tracker;
@@ -743,30 +1120,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
- if (fn >= MLX5_MAX_PORTS)
+ if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ spin_lock_irqsave(&lag_lock, flags);
+ for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -774,24 +1153,23 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= MLX5_MAX_PORTS)
+ if (fn >= ldev->ports)
return;
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
-/* Must be called with intf_mutex held */
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < ldev->ports; i++)
if (ldev->pf[i].dev == dev)
break;
- if (i == MLX5_MAX_PORTS)
+ if (i == ldev->ports)
return;
ldev->pf[i].dev = NULL;
@@ -804,12 +1182,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
- return 0;
-
- tmp_dev = mlx5_get_next_phys_dev(dev);
+ tmp_dev = mlx5_get_next_phys_dev_lag(dev);
if (tmp_dev)
ldev = tmp_dev->priv.lag;
@@ -819,13 +1192,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "Failed to alloc lag dev\n");
return 0;
}
- } else {
- if (ldev->mode_changes_in_progress)
- return -EAGAIN;
- mlx5_ldev_get(ldev);
+ mlx5_ldev_add_mdev(ldev, dev);
+ return 0;
}
+ mutex_lock(&ldev->lock);
+ if (ldev->mode_changes_in_progress) {
+ mutex_unlock(&ldev->lock);
+ return -EAGAIN;
+ }
+ mlx5_ldev_get(ldev);
mlx5_ldev_add_mdev(ldev, dev);
+ mutex_unlock(&ldev->lock);
return 0;
}
@@ -838,15 +1216,19 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
if (!ldev)
return;
+ /* mdev is being removed, might as well remove debugfs
+ * as early as possible.
+ */
+ mlx5_ldev_remove_debugfs(dev->priv.dbg.lag_debugfs);
recheck:
- mlx5_dev_list_lock();
+ mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
- mlx5_dev_list_unlock();
+ mutex_unlock(&ldev->lock);
msleep(100);
goto recheck;
}
mlx5_ldev_remove_mdev(ldev, dev);
- mlx5_dev_list_unlock();
+ mutex_unlock(&ldev->lock);
mlx5_ldev_put(ldev);
}
@@ -854,35 +1236,45 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
+ MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
+ return;
+
recheck:
mlx5_dev_list_lock();
err = __mlx5_lag_dev_add_mdev(dev);
+ mlx5_dev_list_unlock();
+
if (err) {
- mlx5_dev_list_unlock();
msleep(100);
goto recheck;
}
- mlx5_dev_list_unlock();
+ mlx5_ldev_add_debugfs(dev);
}
-/* Must be called with intf_mutex held */
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
+ bool lag_is_active;
ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
+ mutex_lock(&ldev->lock);
mlx5_ldev_remove_netdev(ldev, netdev);
- ldev->flags &= ~MLX5_LAG_FLAG_READY;
+ clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
- if (__mlx5_lag_is_active(ldev))
+ lag_is_active = __mlx5_lag_is_active(ldev);
+ mutex_unlock(&ldev->lock);
+
+ if (lag_is_active)
mlx5_queue_bond_work(ldev, 0);
}
-/* Must be called with intf_mutex held */
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
@@ -893,26 +1285,29 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
if (!ldev)
return;
+ mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (!ldev->pf[i].dev)
+ for (i = 0; i < ldev->ports; i++)
+ if (!ldev->pf[i].netdev)
break;
- if (i >= MLX5_MAX_PORTS)
- ldev->flags |= MLX5_LAG_FLAG_READY;
+ if (i >= ldev->ports)
+ set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
+ mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -921,27 +1316,45 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ bool res = 0;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (ldev)
+ res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
+
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -950,12 +1363,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -964,12 +1378,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
- spin_unlock(&lag_lock);
+ res = ldev && __mlx5_lag_is_sriov(ldev) &&
+ test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -977,8 +1393,6 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev0;
- struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
ldev = mlx5_lag_dev(dev);
@@ -986,16 +1400,13 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
return;
mlx5_dev_list_lock();
-
- dev0 = ldev->pf[MLX5_LAG_P1].dev;
- dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress++;
- if (__mlx5_lag_is_active(ldev)) {
- mlx5_lag_lock_eswitches(dev0, dev1);
+ if (__mlx5_lag_is_active(ldev))
mlx5_disable_lag(ldev);
- mlx5_lag_unlock_eswitches(dev0, dev1);
- }
+
+ mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock();
}
@@ -1007,9 +1418,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
if (!ldev)
return;
- mlx5_dev_list_lock();
+ mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress--;
- mlx5_dev_list_unlock();
+ mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
@@ -1017,17 +1428,21 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
- ldev->pf[MLX5_LAG_P1].netdev :
- ldev->pf[MLX5_LAG_P2].netdev;
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev)
+ ndev = ldev->pf[ldev->ports - 1].netdev;
} else {
ndev = ldev->pf[MLX5_LAG_P1].netdev;
}
@@ -1035,7 +1450,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1045,32 +1460,49 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
+ int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- if (ldev->pf[MLX5_LAG_P1].netdev == slave)
- port = MLX5_LAG_P1;
- else
- port = MLX5_LAG_P2;
+ for (i = 0; i < ldev->ports; i++) {
+ if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
+ port = i;
+ break;
+ }
+ }
- port = ldev->v2p_map[port];
+ port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return 0;
+
+ return ldev->ports;
+}
+EXPORT_SYMBOL(mlx5_lag_get_num_ports);
+
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1080,7 +1512,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1091,8 +1523,9 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
size_t *offsets)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
- struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+ struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1101,19 +1534,25 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
if (!out)
return -ENOMEM;
+ mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL);
+ if (!mdev) {
+ ret = -ENOMEM;
+ goto free_out;
+ }
+
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
- num_ports = MLX5_MAX_PORTS;
- mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
- mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
+ num_ports = ldev->ports;
+ for (i = 0; i < ldev->ports; i++)
+ mdev[i] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
@@ -1123,13 +1562,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out);
if (ret)
- goto free;
+ goto free_mdev;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
-free:
+free_mdev:
+ kvfree(mdev);
+free_out:
kvfree(out);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index e5d231c31b54..ce2ce8ccbd70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -4,9 +4,13 @@
#ifndef __MLX5_LAG_H__
#define __MLX5_LAG_H__
+#include <linux/debugfs.h>
+
+#define MLX5_LAG_MAX_HASH_BUCKETS 16
#include "mlx5_core.h"
#include "mp.h"
#include "port_sel.h"
+#include "mpesw.h"
enum {
MLX5_LAG_P1,
@@ -14,20 +18,27 @@ enum {
};
enum {
- MLX5_LAG_FLAG_ROCE = 1 << 0,
- MLX5_LAG_FLAG_SRIOV = 1 << 1,
- MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
- MLX5_LAG_FLAG_READY = 1 << 3,
- MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
+ MLX5_LAG_FLAG_NDEVS_READY,
};
-#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH | \
- MLX5_LAG_FLAG_HASH_BASED)
+enum {
+ MLX5_LAG_MODE_FLAG_HASH_BASED,
+ MLX5_LAG_MODE_FLAG_SHARED_FDB,
+ MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+};
+
+enum mlx5_lag_mode {
+ MLX5_LAG_MODE_NONE,
+ MLX5_LAG_MODE_ROCE,
+ MLX5_LAG_MODE_SRIOV,
+ MLX5_LAG_MODE_MULTIPATH,
+ MLX5_LAG_MODE_MPESW,
+};
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
+ bool has_drop;
};
/* Used for collection of netdev event info. */
@@ -35,6 +46,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
};
@@ -42,20 +54,37 @@ struct lag_tracker {
* It serves both its phys functions.
*/
struct mlx5_lag {
- u8 flags;
+ enum mlx5_lag_mode mode;
+ unsigned long mode_flags;
+ unsigned long state_flags;
+ u8 ports;
+ u8 buckets;
int mode_changes_in_progress;
- bool shared_fdb;
- u8 v2p_map[MLX5_MAX_PORTS];
+ u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
+ struct work_struct mpesw_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
+ /* Protect lag fields/state changes */
+ struct mutex lock;
+ struct lag_mpesw lag_mpesw;
};
+static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+ return false;
+ return true;
+}
+
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
@@ -65,22 +94,33 @@ mlx5_lag_dev(struct mlx5_core_dev *dev)
static inline bool
__mlx5_lag_is_active(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
+ return ldev->mode != MLX5_LAG_MODE_NONE;
}
static inline bool
mlx5_lag_is_ready(struct mlx5_lag *ldev)
{
- return ldev->flags & MLX5_LAG_FLAG_READY;
+ return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags,
+ enum mlx5_lag_mode mode,
bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
+bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
+
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+ u8 *ports, int *num_enabled);
+
+void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
+void mlx5_ldev_remove_debugfs(struct dentry *dbg);
+void mlx5_disable_lag(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 1ca01a5b6cdd..0259a149a64c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -11,7 +11,7 @@
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+ return ldev->mode == MLX5_LAG_MODE_MULTIPATH;
}
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
@@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
- struct lag_tracker tracker;
+ struct lag_tracker tracker = {};
if (!__mlx5_lag_is_multipath(ldev))
return;
@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
flush_workqueue(mp->wq);
}
+static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len)
+{
+ mp->fib.mfi = fi;
+ mp->fib.priority = fi->fib_priority;
+ mp->fib.dst = dst;
+ mp->fib.dst_len = dst_len;
+}
+
struct mlx5_fib_event_work {
struct work_struct work;
struct mlx5_lag *ldev;
@@ -110,10 +118,10 @@ struct mlx5_fib_event_work {
};
};
-static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
- unsigned long event,
- struct fib_info *fi)
+static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
+ struct fib_entry_notifier_info *fen_info)
{
+ struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
struct fib_nh *fib_nh0, *fib_nh1;
unsigned int nhs;
@@ -121,11 +129,17 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
/* stop track */
- if (mp->mfi == fi)
- mp->mfi = NULL;
+ if (mp->fib.mfi == fi)
+ mp->fib.mfi = NULL;
return;
}
+ /* Handle multipath entry with lower priority value */
+ if (mp->fib.mfi && mp->fib.mfi != fi &&
+ (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
+ fi->fib_priority >= mp->fib.priority)
+ return;
+
/* Handle add/replace event */
nhs = fib_info_num_path(fi);
if (nhs == 1) {
@@ -135,12 +149,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
if (i < 0)
- i = MLX5_LAG_NORMAL_AFFINITY;
- else
- ++i;
+ return;
+ i++;
mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
+
return;
}
@@ -160,15 +175,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
/* First time we see multipath route */
- if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
+ if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
tracker = ldev->tracker;
- mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false);
+ mlx5_activate_lag(ldev, &tracker, MLX5_LAG_MODE_MULTIPATH, false);
}
mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
- mp->mfi = fi;
+ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
@@ -179,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct lag_mp *mp = &ldev->lag_mp;
/* Check the nh event is related to the route */
- if (!mp->mfi || mp->mfi != fi)
+ if (!mp->fib.mfi || mp->fib.mfi != fi)
return;
/* nh added/removed */
@@ -209,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work)
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
- fib_work->fen_info.fi);
+ &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
@@ -308,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
/* Clear mfi, as it might become stale when a route delete event
* has been missed, see mlx5_lag_fib_route_event().
*/
- ldev->lag_mp.mfi = NULL;
+ ldev->lag_mp.fib.mfi = NULL;
}
int mlx5_lag_mp_init(struct mlx5_lag *ldev)
@@ -319,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
/* always clear mfi, as it might become stale when a route delete event
* has been missed
*/
- mp->mfi = NULL;
+ mp->fib.mfi = NULL;
if (mp->fib_nb.notifier_call)
return 0;
@@ -349,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
- mp->mfi = NULL;
+ mp->fib.mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index 57af962cad29..056a066da604 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity {
struct lag_mp {
struct notifier_block fib_nb;
- struct fib_info *mfi; /* used in tracking fib events */
+ struct {
+ const void *mfi; /* used in tracking fib events */
+ u32 priority;
+ u32 dst;
+ int dst_len;
+ } fib;
struct workqueue_struct *wq;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
new file mode 100644
index 000000000000..f643202b29c6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include <net/nexthop.h>
+#include "lag/lag.h"
+#include "eswitch.h"
+#include "lib/mlx5.h"
+
+void mlx5_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
+
+ mutex_lock(&ldev->lock);
+ mlx5_disable_lag(ldev);
+ mutex_unlock(&ldev->lock);
+}
+
+static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+
+ if (!queue_work(ldev->wq, &ldev->mpesw_work))
+ mlx5_core_warn(dev, "failed to queue work\n");
+}
+
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+
+ if (!ldev)
+ return;
+
+ mutex_lock(&ldev->lock);
+ if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
+ ldev->mode == MLX5_LAG_MODE_MPESW)
+ mlx5_lag_disable_mpesw(dev);
+ mutex_unlock(&ldev->lock);
+}
+
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+ int err = 0;
+
+ if (!ldev)
+ return 0;
+
+ mutex_lock(&ldev->lock);
+ if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
+ goto out;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+ if (err)
+ mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
+
+out:
+ mutex_unlock(&ldev->lock);
+ return err;
+}
+
+int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
+{
+ struct mlx5_lag *ldev = mdev->priv.lag;
+
+ if (!netif_is_bond_master(out_dev) || !ldev)
+ return 0;
+
+ mutex_lock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ mutex_unlock(&ldev->lock);
+ return -EOPNOTSUPP;
+ }
+ mutex_unlock(&ldev->lock);
+ return 0;
+}
+
+bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
+{
+ bool ret;
+
+ ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
+ return ret;
+}
+
+void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
+{
+ INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
+ atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
+}
+
+void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
+{
+ cancel_delayed_work_sync(&ldev->bond_work);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
new file mode 100644
index 000000000000..be4abcb8fcd5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LAG_MPESW_H__
+#define __MLX5_LAG_MPESW_H__
+
+#include "lag.h"
+#include "mlx5_core.h"
+
+struct lag_mpesw {
+ struct work_struct mpesw_work;
+ atomic_t mpesw_rule_count;
+};
+
+void mlx5_mpesw_work(struct work_struct *work);
+int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
+bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
+void mlx5_lag_mpesw_init(struct mlx5_lag *ldev);
+void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev);
+#else
+static inline void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {}
+static inline void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {}
+#endif
+
+#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index a6592f9c3c05..d3a3fe4ce670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -12,7 +12,8 @@ enum {
static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
- struct mlx5_flow_definer *definer)
+ struct mlx5_flow_definer *definer,
+ u8 rules)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -25,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, match_definer_id,
mlx5_get_match_definer_id(definer));
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1);
MLX5_SET(create_flow_group_in, in, group_type,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
@@ -36,7 +37,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
- u8 port1, u8 port2)
+ u8 *ports)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_flow_table_attr ft_attr = {};
@@ -44,8 +45,10 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
int err, i;
+ int idx;
+ int j;
- ft_attr.max_fte = MLX5_MAX_PORTS;
+ ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
@@ -61,7 +64,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
}
lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
- lag_definer->definer);
+ lag_definer->definer,
+ ft_attr.max_fte);
if (IS_ERR(lag_definer->fg)) {
err = PTR_ERR(lag_definer->fg);
goto destroy_ft;
@@ -70,19 +74,25 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- u8 affinity = i == 0 ? port1 : port2;
-
- dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
- vhca_id);
- lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
- NULL, &flow_act,
- &dest, 1);
- if (IS_ERR(lag_definer->rules[i])) {
- err = PTR_ERR(lag_definer->rules[i]);
- while (i--)
- mlx5_del_flow_rules(lag_definer->rules[i]);
- goto destroy_fg;
+ for (i = 0; i < ldev->ports; i++) {
+ for (j = 0; j < ldev->buckets; j++) {
+ u8 affinity;
+
+ idx = i * ldev->buckets + j;
+ affinity = ports[idx];
+
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
+ vhca_id);
+ lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft,
+ NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[idx])) {
+ err = PTR_ERR(lag_definer->rules[idx]);
+ while (i--)
+ while (j--)
+ mlx5_del_flow_rules(lag_definer->rules[idx]);
+ goto destroy_fg;
+ }
}
}
@@ -279,8 +289,7 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask,
static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
- enum mlx5_traffic_types tt, bool tunnel, u8 port1,
- u8 port2)
+ enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_definer *lag_definer;
@@ -308,7 +317,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
goto free_mask;
}
- err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
+ err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
if (err)
goto destroy_match_definer;
@@ -329,10 +338,16 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int idx;
int i;
+ int j;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- mlx5_del_flow_rules(lag_definer->rules[i]);
+ for (i = 0; i < ldev->ports; i++) {
+ for (j = 0; j < ldev->buckets; j++) {
+ idx = i * ldev->buckets + j;
+ mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ }
mlx5_destroy_flow_group(lag_definer->fg);
mlx5_destroy_flow_table(lag_definer->ft);
mlx5_destroy_match_definer(dev, lag_definer->definer);
@@ -356,7 +371,7 @@ static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
- u8 port1, u8 port2)
+ u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_lag_definer *lag_definer;
@@ -364,7 +379,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
- false, port1, port2);
+ false, ports);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
@@ -376,7 +391,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
lag_definer =
mlx5_lag_create_definer(ldev, hash_type, tt,
- true, port1, port2);
+ true, ports);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
@@ -505,7 +520,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
struct ttc_params ttc_params = {};
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
- port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->inner.ttc))
return PTR_ERR(port_sel->inner.ttc);
@@ -513,13 +528,13 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
}
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
- enum netdev_lag_hash hash_type, u8 port1, u8 port2)
+ enum netdev_lag_hash hash_type, u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
set_tt_map(port_sel, hash_type);
- err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
+ err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
return err;
@@ -543,52 +558,62 @@ destroy_definers:
return err;
}
-static int
-mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
- struct mlx5_lag_definer **definers,
- u8 port1, u8 port2)
+static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *def,
+ u8 *ports)
{
- struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_destination dest = {};
+ int idx;
int err;
- int tt;
+ int i;
+ int j;
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
- struct mlx5_flow_handle **rules = definers[tt]->rules;
+ for (i = 0; i < ldev->ports; i++) {
+ for (j = 0; j < ldev->buckets; j++) {
+ idx = i * ldev->buckets + j;
+ if (ldev->v2p_map[i] == ports[i])
+ continue;
- if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
- dest.vport.vhca_id =
- MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
- err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
- &dest, NULL);
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
+ vhca_id);
+ err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL);
if (err)
return err;
}
+ }
- if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
- dest.vport.vhca_id =
- MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
- err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
- &dest, NULL);
- if (err)
- return err;
- }
+ return 0;
+}
+
+static int
+mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer **definers,
+ u8 *ports)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+ int tt;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports);
+ if (err)
+ return err;
}
return 0;
}
-int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
err = mlx5_lag_modify_definers_destinations(ldev,
port_sel->outer.definers,
- port1, port2);
+ ports);
if (err)
return err;
@@ -597,7 +622,7 @@ int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
return mlx5_lag_modify_definers_destinations(ldev,
port_sel->inner.definers,
- port1, port2);
+ ports);
}
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
index 6d15b28a42fc..5ec3af2a3ecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
@@ -10,7 +10,10 @@ struct mlx5_lag_definer {
struct mlx5_flow_definer *definer;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
+ /* Each port has ldev->buckets number of rules and they are arrange in
+ * [port * buckets .. port * buckets + buckets) locations
+ */
+ struct mlx5_flow_handle *rules[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
};
struct mlx5_lag_ttc {
@@ -27,22 +30,20 @@ struct mlx5_lag_port_sel {
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports);
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
- enum netdev_lag_hash hash_type, u8 port1,
- u8 port2);
+ enum netdev_lag_hash hash_type, u8 *ports);
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
- u8 port1, u8 port2)
+ u8 *ports)
{
return 0;
}
-static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
- u8 port2)
+static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
new file mode 100644
index 000000000000..c971ff04dd04
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/transobj.h>
+#include "clock.h"
+#include "aso.h"
+#include "wq.h"
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ if (IS_ERR_OR_NULL(aso))
+ return;
+
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
new file mode 100644
index 000000000000..2d40dcf9d42e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <linux/mlx5/qp.h>
+#include "mlx5_core.h"
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define ASO_CTRL_READ_EN BIT(0)
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
+
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
+};
+
+struct mlx5_aso;
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 91e806c1aa21..d3a9ae80fd30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -65,6 +65,8 @@ enum {
MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+ MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
};
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
@@ -72,6 +74,13 @@ static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
}
+static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_real_time_mode(mdev) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
+}
+
static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
@@ -459,9 +468,95 @@ static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
return find_target_cycles(mdev, target_ns);
}
-static u64 perout_conf_real_time(s64 sec)
+static u64 perout_conf_real_time(s64 sec, u32 nsec)
+{
+ return (u64)nsec | (u64)sec << 32;
+}
+
+static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u64 *time_stamp, bool real_time)
+{
+ struct timespec64 ts;
+ s64 ns;
+
+ ts.tv_nsec = rq->perout.period.nsec;
+ ts.tv_sec = rq->perout.period.sec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
+ perout_conf_internal_timer(mdev, rq->perout.start.sec);
+
+ return 0;
+}
+
+#define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
+static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
+ struct ptp_clock_request *rq,
+ u32 *out_pulse_duration_ns)
{
- return (u64)sec << 32;
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ u32 out_pulse_duration;
+ struct timespec64 ts;
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ ts.tv_sec = rq->perout.on.sec;
+ ts.tv_nsec = rq->perout.on.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts);
+ } else {
+ /* out_pulse_duration_ns should be up to 50% of the
+ * pulse period as default
+ */
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
+ }
+
+ if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
+ out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
+ mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
+ out_pulse_duration, pps_info->min_out_pulse_duration_ns,
+ MLX5_MAX_PULSE_DURATION);
+ return -EINVAL;
+ }
+ *out_pulse_duration_ns = out_pulse_duration;
+
+ return 0;
+}
+
+static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u32 *field_select, u32 *out_pulse_duration_ns,
+ u64 *period, u64 *time_stamp)
+{
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct ptp_clock_time *time = &rq->perout.start;
+ struct timespec64 ts;
+
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
+ mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
+ pps_info->min_npps_period);
+ return -EINVAL;
+ }
+ *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
+
+ if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
+ return -EINVAL;
+
+ *time_stamp = perout_conf_real_time(time->sec, time->nsec);
+ *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
+
+ return 0;
+}
+
+static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
+{
+ return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
+ (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,20 +569,20 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(clock, struct mlx5_core_dev, clock);
bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- struct timespec64 ts;
+ u32 out_pulse_duration_ns = 0;
u32 field_select = 0;
+ u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- s64 ns;
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
- if (rq->perout.flags)
+ if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins)
@@ -500,29 +595,25 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
- s64 sec = rq->perout.start.sec;
-
- if (rq->perout.start.nsec)
- return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
- if ((ns >> 1) != 500000000LL)
+ if (rt_mode && rq->perout.start.sec > U32_MAX)
return -EINVAL;
- if (rt_mode && sec > U32_MAX)
- return -EINVAL;
-
- time_stamp = rt_mode ? perout_conf_real_time(sec) :
- perout_conf_internal_timer(mdev, sec);
-
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_TIME_STAMP;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ err = perout_conf_npps_real_time(mdev, rq, &field_select,
+ &out_pulse_duration_ns, &npps_period,
+ &time_stamp);
+ else
+ err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
+ if (err)
+ return err;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -531,7 +622,8 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
-
+ MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
+ MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
@@ -687,6 +779,13 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
+ clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_npps_period);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
+ clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_out_pulse_duration_ns);
+
clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index bced2efe9bef..adefde3ea941 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -14,7 +14,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component {
struct {
void *data;
- } device[MLX5_MAX_PORTS];
+ } device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem;
@@ -25,7 +25,7 @@ struct mlx5_devcom_list {
struct list_head list;
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
- struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
+ struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
};
struct mlx5_devcom {
@@ -74,13 +74,15 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return NULL;
+ if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
+ return NULL;
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
idx = -1;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
if (iter->devs[i])
tmp_dev = iter->devs[i];
else
@@ -134,11 +136,11 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
kfree(devcom);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (priv->devs[i])
break;
- if (i != MLX5_MAX_PORTS)
+ if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
return;
list_del(&priv->list);
@@ -191,7 +193,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx && comp->device[i].data) {
err = comp->handler(event, comp->device[i].data,
event_data);
@@ -239,7 +241,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
return NULL;
}
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 939d5bf1581b..94313c18bb64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,6 +6,8 @@
#include <linux/mlx5/driver.h>
+#define MLX5_DEVCOM_PORTS_SUPPORTED 2
+
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 3d5e57ff558c..9482e51ac82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -12,13 +12,16 @@ struct mlx5_dm {
spinlock_t lock;
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
+ unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
+ u64 header_modify_pattern_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
+ bool support_v2;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
return NULL;
@@ -35,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
if (!dm->steering_sw_icm_alloc_blocks)
goto err_steering;
}
@@ -47,16 +49,33 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_sw_icm_alloc_blocks)
goto err_modify_hdr;
}
+ support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
+ MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
+
+ if (support_v2) {
+ header_modify_pattern_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_pattern_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
+ if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
return dm;
+err_pattern:
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+
err_modify_hdr:
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
err_steering:
kfree(dm);
@@ -75,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
}
if (dm->header_modify_sw_icm_alloc_blocks) {
@@ -83,7 +102,15 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->header_modify_sw_icm_alloc_blocks);
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
}
kfree(dm);
@@ -130,6 +157,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_sw_icm_size);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -203,6 +237,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index d5e47630e284..df58cba37930 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
+ if (!chains->dev->priv.eswitch ||
+ chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
+ return 1;
+
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
@@ -211,7 +212,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index b63dec24747a..b78f2ba25c19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
index 4bad6a5fde56..f240ffe5116c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
{
}
-
-static inline int
-mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
- void *buf, int len)
-{
- return 0;
-}
#endif
#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2f536c5d30b1..032adb21ad4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -83,6 +83,7 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi
enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
};
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 839a01da110f..8ff16318e32d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return;
WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
int err = 0;
u32 index;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return 0;
mutex_lock(&mpfs->lock);
@@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
int err = 0;
u32 index;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return 0;
mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index e042e0924079..4571c56ec3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
new file mode 100644
index 000000000000..9b8c051ccf65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+#include "smfs.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec)
+{
+ struct mlx5dr_match_parameters matcher_mask = {};
+
+ matcher_mask.match_buf = (u64 *)&spec->match_criteria;
+ matcher_mask.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask);
+}
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+ mlx5dr_matcher_destroy(matcher);
+}
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return mlx5dr_table_get_from_fs_ft(ft);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table)
+{
+ return mlx5dr_action_create_dest_table(table);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id)
+{
+ return mlx5dr_action_create_flow_counter(counter_id);
+}
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action)
+{
+ mlx5dr_action_destroy(action);
+}
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source)
+{
+ struct mlx5dr_match_parameters value = {};
+
+ value.match_buf = (u64 *)spec->match_value;
+ value.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_rule_create(matcher, &value, num_actions, actions, flow_source);
+}
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule)
+{
+ mlx5dr_rule_destroy(rule);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
new file mode 100644
index 000000000000..452d0df339ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LIB_SMFS_H__
+#define __MLX5_LIB_SMFS_H__
+
+#include "steering/mlx5dr.h"
+#include "steering/dr_types.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id);
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action);
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source);
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule);
+
+#endif /* __MLX5_LIB_SMFS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index c1df0d3595d8..696e45e2bd06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -10,6 +10,7 @@ struct mlx5_timeouts {
static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
[MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
+ [MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS] = 7200000,
[MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
[MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
[MLX5_TO_FW_INIT_MS] = 2000,
@@ -31,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
+int mlx5_tout_init(struct mlx5_core_dev *dev)
{
int i;
- for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
- tout_set(dev, tout_def_sw_val[i], i);
-}
-
-int mlx5_tout_init(struct mlx5_core_dev *dev)
-{
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
if (!dev->timeouts)
return -ENOMEM;
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 1c42ead782fa..bc9e9aeda847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -7,6 +7,7 @@
enum mlx5_timeouts_types {
/* pre init timeouts (not read from FW) */
MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
+ MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS,
MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
MLX5_TO_FW_PRE_INIT_WAIT_MS,
@@ -34,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index e3b0a131c3e1..d55e15c1f380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2c774f367199..283c4cc28944 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,7 @@
#include "lib/mlx5.h"
#include "lib/tout.h"
#include "fpga/core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ipsec.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
#include "lib/geneve.h"
@@ -92,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
static u32 sw_owner_id[4];
+#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
+static DEFINE_IDA(sw_vhca_ida);
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
@@ -179,30 +179,30 @@ static struct mlx5_profile profile[] = {
},
};
-static int fw_initializing(struct mlx5_core_dev *dev)
-{
- return ioread32be(&dev->iseg->initializing) >> 31;
-}
-
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+ u32 fw_initializing;
int err = 0;
- while (fw_initializing(dev)) {
- if (time_after(jiffies, end)) {
+ do {
+ fw_initializing = ioread32be(&dev->iseg->initializing);
+ if (!(fw_initializing >> 31))
+ break;
+ if (time_after(jiffies, end) ||
+ test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
err = -EBUSY;
break;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
- jiffies_to_msecs(end - warn) / 1000);
+ mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
+ jiffies_to_msecs(end - warn) / 1000, fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
- }
+ } while (true);
return err;
}
@@ -316,13 +316,6 @@ struct mlx5_reg_host_endianness {
u8 rsvd[15];
};
-#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
-
-enum {
- MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
- MLX5_DEV_CAP_FLAG_DCT,
-};
-
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
@@ -501,6 +494,49 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+
+ if (!err)
+ return val.vbool;
+
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
+static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+
+ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
+ !(dev->priv.sw_vhca_id > 0))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
+ MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+
+ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
@@ -526,7 +562,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -579,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported))
- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev);
if (max_uc_list > 0)
@@ -605,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
*/
static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
(!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
}
@@ -634,6 +671,33 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
return err;
}
+static int handle_hca_cap_port_selection(struct mlx5_core_dev *dev,
+ void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, port_selection_cap))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass) ||
+ !MLX5_CAP_PORT_SELECTION_MAX(dev, port_select_flow_table_bypass))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur,
+ MLX5_ST_SZ_BYTES(port_selection_cap));
+ MLX5_SET(port_selection_cap, set_hca_cap, port_select_flow_table_bypass, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION);
+
+ return err;
+}
+
static int set_hca_cap(struct mlx5_core_dev *dev)
{
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
@@ -671,6 +735,20 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_2(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
+ goto out;
+ }
+
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_port_selection(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_port_selection failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -736,10 +814,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
- u32 syndrome;
- u8 status;
+ u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
+ u8 status = MLX5_GET(query_issi_out, query_out, status);
- mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (!status || syndrome == MLX5_DRIVER_SYND) {
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
err, status, syndrome);
@@ -939,6 +1016,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_sf_table_cleanup;
}
+ err = mlx5_fs_core_alloc(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc flow steering\n");
+ goto err_fs;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -949,6 +1032,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_fs:
+ mlx5_sf_table_cleanup(dev);
err_sf_table_cleanup:
mlx5_sf_hw_table_cleanup(dev);
err_sf_hw_table_cleanup:
@@ -986,6 +1071,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);
mlx5_sf_hw_table_cleanup(dev);
mlx5_vhca_event_cleanup(dev);
@@ -1006,7 +1092,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
{
int err;
@@ -1017,15 +1103,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- mlx5_tout_set_def_val(dev);
-
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
+ err = wait_fw_init(dev, timeout,
mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ timeout);
return err;
}
@@ -1046,10 +1130,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto err_cmd_cleanup;
+ goto stop_health_poll;
}
err = mlx5_core_set_issi(dev);
@@ -1101,8 +1187,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_core_err(dev, "query hca failed\n");
goto reclaim_boot_pages;
}
-
- mlx5_start_health_poll(dev);
+ mlx5_start_health_fw_log_up(dev);
return 0;
@@ -1110,6 +1195,8 @@ reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
+stop_health_poll:
+ mlx5_stop_health_poll(dev, boot);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1121,7 +1208,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
{
int err;
- mlx5_stop_health_poll(dev, boot);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
@@ -1129,6 +1215,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
+ mlx5_stop_health_poll(dev, boot);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1184,15 +1271,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
- mlx5_accel_ipsec_init(dev);
-
- err = mlx5_accel_tls_init(dev);
- if (err) {
- mlx5_core_err(dev, "TLS device start failed %d\n", err);
- goto err_tls_start;
- }
-
- err = mlx5_init_fs(dev);
+ err = mlx5_fs_core_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs;
@@ -1237,11 +1316,8 @@ err_ec:
err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
err_fs:
- mlx5_accel_tls_cleanup(dev);
-err_tls_start:
- mlx5_accel_ipsec_cleanup(dev);
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
@@ -1262,13 +1338,12 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
- mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
+ mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
@@ -1283,12 +1358,14 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
int mlx5_init_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
int err = 0;
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, true);
+ err = mlx5_function_setup(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err)
goto err_function;
@@ -1313,6 +1390,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return 0;
err_register:
@@ -1327,11 +1405,15 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return err;
}
void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
@@ -1350,12 +1432,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
}
-int mlx5_load_one(struct mlx5_core_dev *dev)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
+ u64 timeout;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1364,7 +1449,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev)
/* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, false);
+ if (recovery)
+ timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
+ else
+ timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
+ err = mlx5_function_setup(dev, false, timeout);
if (err)
goto err_function;
@@ -1393,8 +1482,20 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int ret;
+
+ devl_lock(devlink);
+ ret = mlx5_load_one_devl_locked(dev, recovery);
+ devl_unlock(devlink);
+ return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
mlx5_detach_device(dev);
@@ -1412,6 +1513,15 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_unload_one_devl_locked(dev);
+ devl_unlock(devlink);
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1434,6 +1544,8 @@ static const int types[] = {
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
MLX5_CAP_DEV_SHAMPO,
+ MLX5_CAP_MACSEC,
+ MLX5_CAP_ADV_VIRTUALIZATION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1476,7 +1588,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1488,8 +1602,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
INIT_LIST_HEAD(&priv->pgdir_list);
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
- priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
- mlx5_debugfs_root);
+ priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
+ mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
@@ -1514,6 +1628,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ /* The conjunction of sw_vhca_id with sw_owner_id will be a global
+ * unique id per function which uses mlx5_core.
+ * Those values are supplied to FW as part of the init HCA command to
+ * be used by both driver and FW when it's applicable.
+ */
+ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
+ MAX_SW_VHCA_ID,
+ GFP_KERNEL);
+ if (dev->priv.sw_vhca_id < 0)
+ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
+ dev->priv.sw_vhca_id);
+
return 0;
err_hca_caps:
@@ -1525,12 +1651,13 @@ err_pagealloc_init:
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
- debugfs_remove(dev->priv.dbg_root);
+ debugfs_remove(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1538,17 +1665,21 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ if (priv->sw_vhca_id > 0)
+ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
- debugfs_remove_recursive(dev->priv.dbg_root);
+ debugfs_remove_recursive(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1619,7 +1750,13 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
+ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ * fw_reset before unregistering the devlink.
+ */
+ mlx5_drain_fw_reset(dev);
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
+ mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
@@ -1733,7 +1870,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_pci_trace(dev, "Enter, loading driver..\n");
- err = mlx5_load_one(dev);
+ err = mlx5_load_one(dev, false);
+
+ if (!err)
+ devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
!err ? "recovered" : "Failed");
@@ -1765,7 +1906,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
}
/* Panic tear down fw command will stop the PCI bus communication
- * with the HCA, so the health polll is no longer needed.
+ * with the HCA, so the health poll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
@@ -1801,6 +1942,7 @@ static void shutdown(struct pci_dev *pdev)
int err;
mlx5_core_info(dev, "Shutdown was called\n");
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev);
@@ -1820,7 +1962,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- return mlx5_load_one(dev);
+ return mlx5_load_one(dev, false);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
@@ -1840,10 +1982,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
{ 0, }
};
@@ -1852,7 +1996,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one_devl_locked(dev);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1863,7 +2007,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
- return mlx5_load_one(dev);
+ return mlx5_load_one_devl_locked(dev, true);
}
static struct pci_driver mlx5_core_driver = {
@@ -1880,6 +2024,48 @@ static struct pci_driver mlx5_core_driver = {
.sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
};
+/**
+ * mlx5_vf_get_core_dev - Get the mlx5 core device from a given VF PCI device if
+ * mlx5_core is its driver.
+ * @pdev: The associated PCI device.
+ *
+ * Upon return the interface state lock stay held to let caller uses it safely.
+ * Caller must ensure to use the returned mlx5 device for a narrow window
+ * and put it back with mlx5_vf_put_core_dev() immediately once usage was over.
+ *
+ * Return: Pointer to the associated mlx5_core_dev or NULL.
+ */
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *mdev;
+
+ mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver);
+ if (IS_ERR(mdev))
+ return NULL;
+
+ mutex_lock(&mdev->intf_state_mutex);
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) {
+ mutex_unlock(&mdev->intf_state_mutex);
+ return NULL;
+ }
+
+ return mdev;
+}
+EXPORT_SYMBOL(mlx5_vf_get_core_dev);
+
+/**
+ * mlx5_vf_put_core_dev - Put the mlx5 core device back.
+ * @mdev: The mlx5 core device.
+ *
+ * Upon return the interface state lock is unlocked and caller should not
+ * access the mdev any more.
+ */
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
+{
+ mutex_unlock(&mdev->intf_state_mutex);
+}
+EXPORT_SYMBOL(mlx5_vf_put_core_dev);
+
static void mlx5_core_verify_params(void)
{
if (prof_sel >= ARRAY_SIZE(profile)) {
@@ -1901,7 +2087,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
- mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index e019d68062d8..495cca58dccc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6f8baa0f2a73..a806e3de7b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -143,6 +143,36 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
+static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
+ size_t item_size, size_t num_items,
+ const char *func, int line)
+{
+ int inlen;
+
+ if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
+ mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_add_overflow((int)fixed, inlen, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ return inlen;
+}
+
+#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
+ mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -164,6 +194,7 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+void mlx5_sriov_disable(struct pci_dev *pdev);
int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
@@ -176,7 +207,6 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -209,7 +239,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
@@ -290,7 +320,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
-int mlx5_load_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index f099a087400e..9d735c343a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f6b5451328fc..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -32,7 +32,6 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/xarray.h>
@@ -327,11 +326,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int notify_fail, bool ec_function)
+ int event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+ int notify_fail = event;
u64 addr;
int err;
u32 *in;
@@ -351,8 +351,10 @@ retry:
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, function);
- if (err)
+ if (err) {
+ dev->priv.fw_pages_alloc_failed += (npages - i);
goto out_4k;
+ }
goto retry;
}
@@ -365,11 +367,20 @@ retry:
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
+ if (err == -EREMOTEIO) {
+ notify_fail = 0;
+ /* if triggered by FW and failed by FW ignore */
+ if (event) {
+ err = 0;
+ goto out_dropped;
+ }
+ }
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_4k;
+ goto out_dropped;
}
dev->priv.fw_pages += npages;
@@ -384,6 +395,8 @@ retry:
kvfree(in);
return 0;
+out_dropped:
+ dev->priv.give_pages_dropped += npages;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
@@ -455,7 +468,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 i = 0;
if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_do(dev, in, in_size, out, out_size);
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
@@ -479,7 +492,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int *nclaimed, bool ec_function)
+ int *nclaimed, bool event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
@@ -507,6 +520,17 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ dev->priv.reclaim_pages_discard += npages;
+ }
+ /* if triggered by FW event and failed by FW then ignore */
+ if (event && err == -EREMOTEIO) {
+ err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
+ if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -546,7 +570,7 @@ static void pages_work_handler(struct work_struct *work)
release_all_pages(dev, req->func_id, req->ec_function);
else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
- req->ec_function);
+ true, req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
@@ -645,7 +669,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
int err;
err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
- &nclaimed, mlx5_core_is_ecpf(dev));
+ &nclaimed, false, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
err, func_id);
@@ -700,12 +724,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
return -ENOMEM;
xa_init(&dev->priv.page_root_xa);
+ mlx5_pages_debugfs_init(dev);
return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
+ mlx5_pages_debugfs_cleanup(dev);
xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 90fec0649ef5..662f1d55e30e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -3,7 +3,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
@@ -95,8 +94,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- query_cap = kzalloc(query_sz, GFP_KERNEL);
- hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
if (!hca_cap || !query_cap) {
ret = -ENOMEM;
goto out;
@@ -119,8 +118,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
out:
- kfree(hca_cap);
- kfree(query_cap);
+ kvfree(hca_cap);
+ kvfree(query_cap);
return ret;
}
@@ -129,11 +128,11 @@ static void irq_release(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
- /* free_irq requires that affinity and rmap will be cleared
+ /* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
kfree(irq);
@@ -238,7 +237,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
if (affinity) {
cpumask_copy(irq->mask, affinity);
- irq_set_affinity_hint(irq->irqn, irq->mask);
+ irq_set_affinity_and_hint(irq->irqn, irq->mask);
}
irq->pool = pool;
irq->refcount = 1;
@@ -251,7 +250,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
return irq;
err_xa:
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
err_cpumask:
free_irq(irq->irqn, &irq->nh);
@@ -601,7 +600,8 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!irq_table)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index aabc53ad8bdd..ee5ffdeb9015 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1ef2b6a848c1..a1548e6bfb35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -33,9 +33,10 @@
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
-int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
- int size_in, void *data_out, int size_out,
- u16 reg_id, int arg, int write)
+/* calling with verbose false will not print error to log */
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose)
{
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
@@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
MLX5_SET(access_register_in, in, argument, arg);
MLX5_SET(access_register_in, in, register_id, reg_id);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
+ if (verbose)
+ err = mlx5_cmd_check(dev, err, in, out);
if (err)
goto out;
@@ -69,6 +72,15 @@ out:
kvfree(in);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_access_reg);
+
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_id, int arg, int write)
+{
+ return mlx5_access_reg(dev, data_in, size_in, data_out, size_out,
+ reg_id, arg, write, true);
+}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
@@ -263,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- int module_mapping;
int err;
MLX5_SET(pmlp_reg, in, local_port, 1);
@@ -272,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
if (err)
return err;
- module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
- *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+ *module_num = MLX5_GET(lane_2_module_mapping,
+ MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping),
+ module);
return 0;
}
@@ -353,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
+static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
+{
+ /* mcia supports either 12 dwords or 32 dwords */
+ return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32);
+}
+
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params, u8 *data)
{
@@ -362,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
void *ptr;
u16 size;
- size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+ size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, size, size);
@@ -406,23 +424,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
- size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+ size = MLX5_EEPROM_PAGE_LENGTH - offset;
query.size = size;
+ query.offset = offset;
return mlx5_query_mcia(dev, &query, data);
}
@@ -432,35 +451,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
u8 *data)
{
- u8 module_id;
int err;
err = mlx5_query_module_num(dev, &params->module_number);
if (err)
return err;
- err = mlx5_query_module_id(dev, params->module_number, &module_id);
- if (err)
- return err;
-
- switch (module_id) {
- case MLX5_MODULE_ID_SFP:
- if (params->page > 0)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_QSFP:
- case MLX5_MODULE_ID_QSFP28:
- case MLX5_MODULE_ID_QSFP_PLUS:
- if (params->page > 3)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_DSFP:
- break;
- default:
- mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
- return -EINVAL;
- }
-
if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
params->i2c_address != MLX5_I2C_ADDR_LOW) {
mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
@@ -497,29 +493,6 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz)
-{
- u32 *in;
- int err;
-
- in = kvzalloc(sz, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- return err;
- }
-
- MLX5_SET(ppcnt_reg, in, local_port, port_num);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
- err = mlx5_core_access_reg(dev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
-
- kvfree(in);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
-
static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
u32 out_size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 7161220afe30..9f8b4005f4bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3be659cd91f1..7d955a4d9f14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
- case MLX5_ESWITCH_NONE:
+ case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index e8185b69ac6c..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -87,6 +87,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
enable_vfs_hca:
num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) {
+ /* Notify the VF before its enablement to let it set
+ * some stuff.
+ */
+ blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+ MLX5_PF_NOTIFY_ENABLE_VF, dev);
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
@@ -127,6 +132,11 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
for (vf = num_vfs - 1; vf >= 0; vf--) {
if (!sriov->vfs_ctx[vf].enabled)
continue;
+ /* Notify the VF before its disablement to let it clean
+ * some resources.
+ */
+ blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+ MLX5_PF_NOTIFY_DISABLE_VF, dev);
err = mlx5_core_disable_hca(dev, vf + 1);
if (err) {
mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
@@ -135,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
sriov->vfs_ctx[vf].enabled = 0;
}
- if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -145,9 +154,12 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
@@ -161,13 +173,16 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
-static void mlx5_sriov_disable(struct pci_dev *pdev)
+void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
+ devl_lock(devlink);
mlx5_device_disable_sriov(dev, num_vfs, true);
+ devl_unlock(devlink);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -205,19 +220,8 @@ int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count)
mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf));
sriov = &dev->priv.sriov;
-
- /* Reversed translation of PCI VF function number to the internal
- * function_id, which exists in the name of virtfn symlink.
- */
- for (id = 0; id < pci_num_vf(pf); id++) {
- if (!sriov->vfs_ctx[id].enabled)
- continue;
-
- if (vf->devfn == pci_iov_virtfn_devfn(pf, id))
- break;
- }
-
- if (id == pci_num_vf(pf) || !sriov->vfs_ctx[id].enabled)
+ id = pci_iov_vf_id(vf);
+ if (id < 0 || !sriov->vfs_ctx[id].enabled)
return -EINVAL;
return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count);
@@ -268,7 +272,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev;
- int total_vfs;
+ int total_vfs, i;
if (!mlx5_core_is_pf(dev))
return 0;
@@ -280,6 +284,9 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
if (!sriov->vfs_ctx)
return -ENOMEM;
+ for (i = 0; i < total_vfs; i++)
+ BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier);
+
return 0;
}
@@ -292,3 +299,53 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
kfree(sriov->vfs_ctx);
}
+
+/**
+ * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from
+ * a notification block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be unregistered.
+ */
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb)
+{
+ struct mlx5_vf_context *vfs_ctx;
+ struct mlx5_core_sriov *sriov;
+
+ sriov = &mdev->priv.sriov;
+ if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs))
+ return;
+
+ vfs_ctx = &sriov->vfs_ctx[vf_id];
+ blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister);
+
+/**
+ * mlx5_sriov_blocking_notifier_register - Register a VF notification
+ * block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be called upon the VF events.
+ *
+ * Returns 0 on success or an error code.
+ */
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb)
+{
+ struct mlx5_vf_context *vfs_ctx;
+ struct mlx5_core_sriov *sriov;
+
+ sriov = &mdev->priv.sriov;
+ if (vf_id < 0 || vf_id >= sriov->num_vfs)
+ return -EINVAL;
+
+ vfs_ctx = &sriov->vfs_ctx[vf_id];
+ return blocking_notifier_chain_register(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index c61a5e83c78c..b1dfad274a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -22,6 +22,7 @@ enum dr_action_valid_state {
DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_ASO,
DR_ACTION_STATE_MAX,
};
@@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -530,6 +591,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0;
}
+static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_actions_attr *attr,
+ bool rx_rule,
+ bool *recalc_cs_required)
+{
+ *recalc_cs_required = false;
+
+ /* if device supports csum recalculation - no adjustment needed */
+ if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
+ return;
+
+ /* no adjustment needed on TX rules */
+ if (!rx_rule)
+ return;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
+ /* Ignore the modify TTL action.
+ * It is always kept as last HW action.
+ */
+ attr->modify_actions--;
+ return;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+ /* Due to a HW bug on some devices, modifying TTL on RX flows
+ * will cause an incorrect checksum calculation. In such cases
+ * we will use a FW table to recalculate the checksum.
+ */
+ *recalc_cs_required = true;
+}
+
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
struct mlx5dr_action *actions[],
int last_idx)
@@ -570,6 +662,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
for (i = 0; i < num_actions; i++) {
struct mlx5dr_action_dest_tbl *dest_tbl;
+ struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -598,9 +691,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
matcher->tbl->level,
dest_tbl->tbl->level);
}
- attr.final_icm_addr = rx_rule ?
- dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+ chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+ dest_tbl->tbl->tx.s_anchor->chunk;
+ attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
} else {
struct mlx5dr_cmd_query_flow_table_details output;
int ret;
@@ -649,8 +742,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite->index;
attr.modify_actions = action->rewrite->num_of_actions;
- recalc_cs_required = action->rewrite->modify_ttl &&
- !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
+ if (action->rewrite->modify_ttl)
+ dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
+ &recalc_cs_required);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
@@ -669,15 +763,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
- if (rx_rule) {
- if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
- mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
- return -EOPNOTSUPP;
- }
- attr.final_icm_addr = action->vport->caps->icm_address_rx;
- } else {
- attr.final_icm_addr = action->vport->caps->icm_address_tx;
- }
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -711,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ attr.aso_flow_meter.obj_id = action->aso->obj_id;
+ attr.aso_flow_meter.offset = action->aso->offset;
+ attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+ attr.aso_flow_meter.init_color = action->aso->init_color;
+ break;
default:
mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
return -EINVAL;
@@ -737,12 +831,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
- /* Due to a HW bug in some devices, modifying TTL on RX flows will
- * cause an incorrect checksum calculation. In this case we will
- * use a FW table to recalculate.
- */
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
- rx_rule && recalc_cs_required && dest_action) {
+ if (recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
mlx5dr_err(dmn,
@@ -776,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
+ [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
};
static struct mlx5dr_action *
@@ -847,7 +937,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
@@ -919,7 +1010,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req,
&action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (ret)
goto free_action;
@@ -1129,7 +1221,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
action->rewrite->data = (void *)hw_actions;
- action->rewrite->index = (action->rewrite->chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
+ (action->rewrite->chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
@@ -1560,12 +1653,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
-{
- return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
- !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
-}
-
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@@ -1577,6 +1664,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite->dmn;
+ __be64 *modify_ttl_sw_action = NULL;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
@@ -1589,8 +1677,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
action->rewrite->allow_rx = 1;
action->rewrite->allow_tx = 1;
- for (i = 0; i < num_sw_actions; i++) {
- sw_action = &sw_actions[i];
+ for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
+ /* modify TTL is handled separately, as a last action */
+ if (i == num_sw_actions) {
+ sw_action = modify_ttl_sw_action;
+ modify_ttl_sw_action = NULL;
+ } else {
+ sw_action = &sw_actions[i];
+ }
ret = dr_action_modify_check_field_limitation(action,
sw_action);
@@ -1599,10 +1693,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) {
- if (dr_action_modify_ttl_ignore(dmn))
- continue;
-
+ modify_ttl_sw_action = sw_action;
*modify_ttl = true;
+ continue;
}
/* Convert SW action to HW action */
@@ -1708,7 +1801,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
action->rewrite->modify_ttl = modify_ttl;
action->rewrite->data = (u8 *)hw_actions;
action->rewrite->num_of_actions = num_hw_actions;
- action->rewrite->index = (chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
@@ -1805,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+ u8 dest_reg_id, u8 aso_type,
+ u8 init_color, u8 meter_id)
+{
+ struct mlx5dr_action *action;
+
+ if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+ return NULL;
+
+ if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+ if (!action)
+ return NULL;
+
+ action->aso->obj_id = obj_id;
+ action->aso->offset = meter_id;
+ action->aso->dest_reg_id = dest_reg_id;
+ action->aso->init_color = init_color;
+ action->aso->dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
@@ -1856,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_SAMPLER:
refcount_dec(&action->sampler->dmn->refcount);
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ refcount_dec(&action->aso->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 4dd619d238cc..16d65fe4f654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -311,7 +311,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
- MLX5_FLOW_DESTINATION_TYPE_VPORT);
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
@@ -439,6 +439,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
+ MLX5_SET(create_flow_table_in, in, uid, attr->uid);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
@@ -604,7 +605,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
for (i = 0; i < fte->dests_size; i++) {
- if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+ fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
@@ -719,18 +721,24 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
for (i = 0; i < fte->dests_size; i++) {
- unsigned int id, type = fte->dest_arr[i].type;
+ enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
+ enum mlx5_ifc_flow_destination_type ifc_type;
+ unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = fte->dest_arr[i].ft_num;
- type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
@@ -740,8 +748,10 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
} else {
id = 0;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, 1);
}
@@ -761,13 +771,15 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = fte->dest_arr[i].sampler_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = fte->dest_arr[i].tir_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
- type);
+ ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 2784cd59fefe..7adcf0eec13b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -3,7 +3,6 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
#include "dr_types.h"
@@ -22,10 +21,11 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200,
- DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
@@ -115,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id);
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id);
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
break;
case DR_ACTION_TYP_CTR:
@@ -218,7 +220,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
}
- dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+ dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+ DR_STE_SIZE_REDUCED);
seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
@@ -347,16 +350,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
const u64 matcher_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr, e_icm_addr;
int i, ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
DR_DUMP_REC_TYPE_MATCHER_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+ e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
- dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -427,12 +433,14 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
const u64 table_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr));
return 0;
}
@@ -630,7 +638,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
}
dmn->dump_info.steering_debugfs =
- debugfs_create_dir("steering", dev->priv.dbg_root);
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
dmn->dump_info.fdb_debugfs =
debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5fa7f9d6d8b9..fc6ae49b5ecc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 68a4c32d5f34..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val;
fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level;
+ fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 7f6fd9c5e371..4ca67fa24cc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,7 +4,6 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
@@ -58,6 +57,36 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
return mlx5_core_create_mkey(mdev, mkey, in, inlen);
}
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+ return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+ chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
@@ -136,37 +165,36 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr);
}
-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{
- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
- if (!chunk->ste_arr)
- return -ENOMEM;
-
- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
- DR_STE_SIZE_REDUCED, GFP_KERNEL);
- if (!chunk->hw_ste_arr)
- goto out_free_ste_arr;
-
- chunk->miss_list = kvmalloc(chunk->num_of_entries *
- sizeof(chunk->miss_list[0]), GFP_KERNEL);
- if (!chunk->miss_list)
- goto out_free_hw_ste_arr;
+ /* We support only one type of STE size, both for ConnectX-5 and later
+ * devices. Once the support for match STE which has a larger tag is
+ * added (32B instead of 16B), the STE size for devices later than
+ * ConnectX-5 needs to account for that.
+ */
+ return DR_STE_SIZE_REDUCED;
+}
- return 0;
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int index = offset / DR_STE_SIZE;
-out_free_hw_ste_arr:
- kvfree(chunk->hw_ste_arr);
-out_free_ste_arr:
- kvfree(chunk->ste_arr);
- return -ENOMEM;
+ chunk->ste_arr = &buddy->ste_arr[index];
+ chunk->miss_list = &buddy->miss_list[index];
+ chunk->hw_ste_arr = buddy->hw_ste_arr +
+ index * dr_icm_buddy_get_ste_size(buddy);
}
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
- kvfree(chunk->miss_list);
- kvfree(chunk->hw_ste_arr);
- kvfree(chunk->ste_arr);
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+
+ memset(chunk->hw_ste_arr, 0,
+ num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->ste_arr, 0,
+ num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -180,7 +208,7 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
{
enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
- buddy->used_memory -= chunk->byte_size;
+ buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
if (icm_type == DR_ICM_TYPE_STE)
@@ -189,6 +217,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk);
}
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+ buddy->ste_arr = kvcalloc(num_of_entries,
+ sizeof(struct mlx5dr_ste), GFP_KERNEL);
+ if (!buddy->ste_arr)
+ return -ENOMEM;
+
+ /* Preallocate full STE size on non-ConnectX-5 devices since
+ * we need to support both full and reduced with the same cache.
+ */
+ buddy->hw_ste_arr = kvcalloc(num_of_entries,
+ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+ if (!buddy->hw_ste_arr)
+ goto free_ste_arr;
+
+ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+ if (!buddy->miss_list)
+ goto free_hw_ste_arr;
+
+ return 0;
+
+free_hw_ste_arr:
+ kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+ kvfree(buddy->ste_arr);
+ return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ kvfree(buddy->ste_arr);
+ kvfree(buddy->hw_ste_arr);
+ kvfree(buddy->miss_list);
+}
+
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy;
@@ -208,11 +274,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr;
buddy->pool = pool;
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ /* Reduce allocations by preallocating and reusing the STE structures */
+ if (dr_icm_buddy_init_ste_cache(buddy))
+ goto err_cleanup_buddy;
+ }
+
/* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0;
+err_cleanup_buddy:
+ mlx5dr_buddy_cleanup(buddy);
err_free_buddy:
kvfree(buddy);
free_mr:
@@ -234,6 +308,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy);
+ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_cleanup_ste_cache(buddy);
+
kvfree(buddy);
}
@@ -252,48 +329,38 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
- chunk->rkey = buddy_mem_pool->icm_mr->mkey;
- chunk->mr_addr = offset;
- chunk->icm_addr =
- (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
- chunk->num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
- chunk->byte_size =
- mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->size = chunk_size;
+ chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
- mlx5dr_err(pool->dmn,
- "Failed to init ste arrays (order: %d)\n",
- chunk_size);
- goto out_free_chunk;
- }
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_init(chunk, offset);
- buddy_mem_pool->used_memory += chunk->byte_size;
- chunk->buddy_mem = buddy_mem_pool;
+ buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
-
-out_free_chunk:
- kvfree(chunk);
- return NULL;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
- return true;
+ int allow_hot_size;
+
+ /* sync when hot memory reaches half of the pool size */
+ allow_hot_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) / 2;
- return false;
+ return pool->hot_memory_size > allow_hot_size;
}
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ u32 num_entries;
int err;
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -306,9 +373,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
- mlx5dr_buddy_free_mem(buddy, chunk->seg,
- ilog2(chunk->num_of_entries));
- pool->hot_memory_size -= chunk->byte_size;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+ mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
+ pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
dr_icm_chunk_destroy(chunk, buddy);
}
@@ -406,7 +473,7 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
/* move the memory to the waiting list AKA "hot" */
mutex_lock(&pool->mutex);
list_move_tail(&chunk->chunk_list, &buddy->hot_list);
- pool->hot_memory_size += chunk->byte_size;
+ pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
/* Check if we have chunks that are waiting for sync-ste */
if (dr_icm_pool_is_sync_required(pool))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index e87cf498c77b..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
return (spec->dmac_47_16 || spec->dmac_15_0);
}
-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
- spec->src_ip_63_32 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
- spec->dst_ip_63_32 || spec->dst_ip_31_0);
-}
-
static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
{
return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@@ -59,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
return spec->ttl_hoplimit;
}
+static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
+{
+ return spec->ipv4_ihl;
+}
+
#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
(_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
(_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
@@ -115,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
@@ -156,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
@@ -273,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
@@ -503,11 +496,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.outer))
+ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.outer))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -519,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.outer))
+ if (dr_mask_is_ttl_set(&mask.outer) ||
+ dr_mask_is_ipv4_ihl_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -610,11 +604,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.inner))
+ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.inner))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -626,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.inner))
+ if (dr_mask_is_ttl_set(&mask.inner) ||
+ dr_mask_is_ipv4_ihl_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -710,7 +705,7 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
/* Connect start hash table to end anchor */
info.type = CONNECT_MISS;
- info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
curr_nic_matcher->s_htbl,
&info, false);
@@ -731,12 +726,14 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
return ret;
/* Update the pointing ste and next hash table */
- curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
- prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+ curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+ prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
if (next_nic_matcher) {
- next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
- curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste =
+ curr_nic_matcher->e_anchor->chunk->ste_arr;
+ curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+ next_nic_matcher->s_htbl;
}
return 0;
@@ -1048,12 +1045,12 @@ static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
if (next_nic_matcher) {
info.type = CONNECT_HIT;
info.hit_next_htbl = next_nic_matcher->s_htbl;
- next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
- prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+ prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
} else {
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
- prev_anchor->ste_arr[0].next_htbl = NULL;
+ prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
}
return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b4374578425b..91ff19f67695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -21,12 +21,12 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
- 0, last_ste->hw_ste,
+ 0, mlx5dr_ste_get_hw_ste(last_ste),
ste_info_last, send_list, true);
return 0;
@@ -41,6 +41,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
+ u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -53,9 +54,9 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
}
/* One and only entry, never grows */
- ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ ste = new_htbl->chunk->ste_arr;
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -79,7 +80,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+ ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
/* Next table */
if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
@@ -107,9 +108,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
* is already written to the hw.
*/
if (ste_info->size == DR_STE_SIZE_CTRL)
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_CTRL);
else
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
@@ -159,7 +162,7 @@ dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
/* Check if hw_ste is present in the list */
list_for_each_entry(ste, miss_list, miss_list_node) {
- if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+ if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
return ste;
}
@@ -185,7 +188,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+ new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
@@ -235,6 +238,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
+ u64 icm_addr;
int new_idx;
u8 sb_idx;
@@ -243,12 +247,12 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
- memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
- new_ste = &new_htbl->ste_arr[new_idx];
+ new_ste = &new_htbl->chunk->ste_arr[new_idx];
if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
@@ -269,7 +273,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
use_update_list = true;
}
- memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
new_htbl->ctrl.num_of_valid_entries++;
@@ -334,7 +338,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
int err = 0;
int i;
- cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+ cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
if (cur_entries < 1) {
mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
@@ -342,7 +346,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
}
for (i = 0; i < cur_entries; i++) {
- cur_ste = &cur_htbl->ste_arr[i];
+ cur_ste = &cur_htbl->chunk->ste_arr[i];
if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
@@ -398,7 +402,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn->type,
@@ -446,21 +450,21 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
* (48B len) which works only on first 32B
*/
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
- prev_htbl->ste_arr[0].hw_ste,
- new_htbl->chunk->icm_addr,
- new_htbl->chunk->num_of_entries);
+ prev_htbl->chunk->hw_ste_arr,
+ mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
- ste_to_update = &prev_htbl->ste_arr[0];
+ ste_to_update = &prev_htbl->chunk->ste_arr[0];
} else {
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
- cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
- 0, ste_to_update->hw_ste, ste_info,
- update_list, false);
+ 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+ ste_info, update_list, false);
return new_htbl;
@@ -489,10 +493,10 @@ static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
enum mlx5dr_icm_chunk_size new_size;
- new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+ new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
- if (new_size == cur_htbl->chunk_size)
+ if (new_size == cur_htbl->chunk->size)
return NULL; /* Skip rehash, we already at the max size */
return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
@@ -659,13 +663,13 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int threshold;
- if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+ if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
return false;
if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
- if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
return false;
threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
@@ -755,6 +759,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
+ u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@@ -762,8 +767,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
ste->ste_chain_location = ste_location;
@@ -822,7 +827,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
again:
index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
miss_list = &cur_htbl->chunk->miss_list[index];
- ste = &cur_htbl->ste_arr[index];
+ ste = &cur_htbl->chunk->ste_arr[index];
if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
@@ -858,7 +863,7 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ cur_htbl->chunk->size);
mlx5dr_htbl_put(cur_htbl);
} else {
cur_htbl = new_htbl;
@@ -1195,7 +1200,8 @@ free_rule:
}
remove_from_nic_tbl:
- mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
+ if (!nic_matcher->rules)
+ mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 00aef47d7682..ef19a66f5233 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -407,17 +407,17 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
int *iterations,
int *num_stes)
{
+ u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int alloc_size;
- if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
- *iterations = htbl->chunk->byte_size /
- dmn->send_ring->max_post_send_size;
+ if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+ *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
*byte_size = dmn->send_ring->max_post_send_size;
alloc_size = *byte_size;
*num_stes = *byte_size / DR_STE_SIZE;
} else {
*iterations = 1;
- *num_stes = htbl->chunk->num_of_entries;
+ *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
alloc_size = *num_stes * DR_STE_SIZE;
}
@@ -453,7 +453,7 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
send_info.write.length = size;
send_info.write.lkey = 0;
send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
- send_info.rkey = ste->htbl->chunk->rkey;
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
return dr_postsend_icm_data(dmn, &send_info);
}
@@ -462,7 +462,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste, u8 *mask)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int num_stes_per_iter;
int iterations;
u8 *data;
@@ -486,7 +486,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
+ struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
if (mlx5dr_ste_is_not_used(ste)) {
@@ -495,7 +495,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
} else {
/* Copy data */
memcpy(data + ste_off,
- htbl->ste_arr[ste_index + j].hw_ste,
+ htbl->chunk->hw_ste_arr +
+ DR_STE_SIZE_REDUCED * (ste_index + j),
DR_STE_SIZE_REDUCED);
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
@@ -511,8 +512,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -530,7 +531,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u8 *ste_init_data,
bool update_hw_ste)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int iterations;
int num_stes;
u8 *copy_dst;
@@ -546,7 +547,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (update_hw_ste) {
/* Copy the reduced STE to hash table ste_arr */
for (i = 0; i < num_stes; i++) {
- copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
@@ -568,8 +569,8 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -591,8 +592,9 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
send_info.write.length = action->rewrite->num_of_actions *
DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
- send_info.remote_addr = action->rewrite->chunk->mr_addr;
- send_info.rkey = action->rewrite->chunk->rkey;
+ send_info.remote_addr =
+ mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 7e61742e58a0..09ebd3088857 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -25,6 +25,7 @@ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
+ u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 masked[DR_STE_SIZE_TAG] = {};
u32 crc32, index;
@@ -32,7 +33,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
int i;
/* Don't calculate CRC if the result is predicted */
- if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+ if (num_entries == 1 || htbl->byte_mask == 0)
return 0;
/* Mask tag using byte mask, bit per byte */
@@ -45,7 +46,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
}
crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
- index = crc32 & (htbl->chunk->num_of_entries - 1);
+ index = crc32 & (num_entries - 1);
return index;
}
@@ -96,13 +97,11 @@ void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
}
static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste, u64 miss_addr)
+ u8 *hw_ste, u64 miss_addr)
{
- u8 *hw_ste_p = ste->hw_ste;
-
- ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
- ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
}
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -113,37 +112,45 @@ void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+ return base_icm_addr + DR_STE_SIZE * index;
}
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
+
+ return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+ u64 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+ return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
}
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return &ste->htbl->miss_list[index];
+ return &ste->htbl->chunk->miss_list[index];
}
static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- u8 *hw_ste = ste->hw_ste;
ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
- dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+ dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
}
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -166,7 +173,8 @@ bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
*/
static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
{
- memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+ DR_STE_SIZE_REDUCED);
dst->next_htbl = src->next_htbl;
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
@@ -184,18 +192,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_htbl *stats_tbl)
{
u8 tmp_data_ste[DR_STE_SIZE] = {};
- struct mlx5dr_ste tmp_ste = {};
u64 miss_addr;
- tmp_ste.hw_ste = tmp_data_ste;
+ miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
/* Use temp ste because dr_ste_always_miss_addr
* touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
*/
- memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
- miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
- memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+ dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+ memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -237,7 +244,7 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
/* Copy all 64 hw_ste bytes */
- memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
mlx5dr_ste_set_bit_mask(hw_ste,
nic_matcher->ste_builder[sb_idx].bit_mask);
@@ -273,12 +280,13 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
if (WARN_ON(!prev_ste))
return;
- miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
- ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+ ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
- prev_ste->hw_ste, ste_info,
- send_ste_list, true /* Copy data*/);
+ mlx5dr_ste_get_hw_ste(prev_ste),
+ ste_info, send_ste_list,
+ true /* Copy data*/);
list_del_init(&ste->miss_list_node);
@@ -364,9 +372,11 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
- struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+ u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+ u32 num_entries =
+ mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
}
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
@@ -385,15 +395,22 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_htbl_connect_info *connect_info)
{
bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
- struct mlx5dr_ste ste = {};
+ u8 tmp_hw_ste[DR_STE_SIZE] = {0};
ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
- ste.hw_ste = formatted_ste;
+ /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+ * touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+ */
+ memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+ connect_info->hit_next_htbl);
else
- dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+ connect_info->miss_icm_addr);
+ memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -444,7 +461,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr =
+ mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
&info, false)) {
mlx5dr_info(dmn, "Failed writing table to HW\n");
@@ -470,6 +488,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
+ u32 num_entries;
int i;
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
@@ -483,22 +502,18 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->chunk = chunk;
htbl->lu_type = lu_type;
htbl->byte_mask = byte_mask;
- htbl->ste_arr = chunk->ste_arr;
- htbl->hw_ste_arr = chunk->hw_ste_arr;
- htbl->miss_list = chunk->miss_list;
htbl->refcount = 0;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- for (i = 0; i < chunk->num_of_entries; i++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+ for (i = 0; i < num_entries; i++) {
+ struct mlx5dr_ste *ste = &chunk->ste_arr[i];
- ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
- INIT_LIST_HEAD(&htbl->miss_list[i]);
+ INIT_LIST_HEAD(&chunk->miss_list[i]);
}
- htbl->chunk_size = chunk_size;
return htbl;
out_free_htbl:
@@ -523,8 +538,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -534,8 +549,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
const struct mlx5dr_ste_action_modify_field *
@@ -602,12 +617,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+ struct mlx5dr_match_spec *spec)
+{
+ if (spec->ip_version) {
+ if (spec->ip_version != 0xf) {
+ mlx5dr_err(dmn,
+ "Partial ip_version mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+ } else if (spec->ethertype != 0xffff &&
+ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+ mlx5dr_err(dmn,
+ "Partial/no ethertype mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
{
- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (value)
+ return 0;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
@@ -621,6 +658,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
}
}
+ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->outer))
+ return -EINVAL;
+
+ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->inner))
+ return -EINVAL;
+
return 0;
}
@@ -763,6 +808,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo
spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
+ spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
@@ -1330,15 +1376,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_header_0_1_init(sb, mask);
}
-static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
- [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
- [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
-};
-
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
{
- if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
- return NULL;
+ if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return mlx5dr_ste_get_ctx_v0();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return mlx5dr_ste_get_ctx_v1();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
+ return mlx5dr_ste_get_ctx_v2();
- return mlx5dr_ste_ctx_arr[version];
+ return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index ca8fa32b8680..17513baff9b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx {
u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
@@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx {
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
-extern struct mlx5dr_ste_ctx ste_ctx_v0;
-extern struct mlx5dr_ste_ctx ste_ctx_v1;
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 2d62950f7a29..2010d4ac6519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
static void
dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -419,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* encapsulation. The reason for that is that we support
* modify headers for outer headers only
*/
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
@@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
static void
dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -511,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
}
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
@@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
return 0;
}
@@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v0 = {
+static struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
@@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.set_action_copy = &dr_ste_v0_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
+{
+ return &ste_ctx_v0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 6ca06800f1d9..ee677a5c76be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -3,7 +3,7 @@
#include <linux/types.h>
#include "mlx5_ifc_dr_ste_v1.h"
-#include "dr_ste.h"
+#include "dr_ste_v1.h"
#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
@@ -121,12 +122,16 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
@@ -223,22 +228,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
@@ -262,7 +267,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
-static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
@@ -270,7 +275,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
}
-static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
@@ -279,12 +284,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
return index << 6;
}
-static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
}
-static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
{
return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
}
@@ -295,13 +300,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
}
-static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
}
-static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
{
u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
@@ -314,7 +319,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
}
-static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
{
u64 index = (icm_addr >> 5) | ht_size;
@@ -322,8 +327,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
}
-static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
- bool is_rx, u16 gvmi)
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
@@ -333,8 +337,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
}
-static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
- u32 ste_size)
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
{
u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
u8 *mask = tag + DR_STE_SIZE_TAG;
@@ -496,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+ u32 object_id,
+ u32 offset,
+ u8 dest_reg_id,
+ u8 init_color)
+{
+ MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_ASO);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+ object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+ /* Convert reg_c index to HW 64bit index */
+ MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+ (dest_reg_id - 1) / 2);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+ offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+ init_color);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -511,11 +535,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
-static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -533,7 +558,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
- allow_modify_hdr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_CTR])
@@ -627,15 +655,31 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -677,13 +721,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
- allow_modify_hdr = false;
- allow_ctr = false;
}
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
+ allow_ctr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
@@ -731,9 +778,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
- allow_ctr = false;
}
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+ allow_ctr = false;
}
if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
@@ -796,15 +843,30 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_action_set(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_set(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
@@ -814,11 +876,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action,
MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
}
-static void dr_ste_v1_set_action_add(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_add(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
@@ -828,12 +890,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action,
MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
}
-static void dr_ste_v1_set_action_copy(u8 *d_action,
- u8 dst_hw_field,
- u8 dst_shifter,
- u8 dst_len,
- u8 src_hw_field,
- u8 src_shifter)
+void dr_ste_v1_set_action_copy(u8 *d_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
{
dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
@@ -848,11 +910,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action,
#define DR_STE_DECAP_L3_ACTION_NUM 8
#define DR_STE_L2_HDR_MAX_SZ 20
-static int dr_ste_v1_set_action_decap_l3_list(void *data,
- u32 data_sz,
- u8 *hw_action,
- u32 hw_action_sz,
- u16 *used_hw_action_num)
+int dr_ste_v1_set_action_decap_l3_list(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num)
{
u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
void *data_ptr = padded_data;
@@ -977,8 +1039,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1001,8 +1063,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
@@ -1025,8 +1087,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
@@ -1060,8 +1122,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
@@ -1079,8 +1141,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
@@ -1201,8 +1263,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1234,8 +1296,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1314,8 +1376,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1331,12 +1393,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
@@ -1375,8 +1438,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
@@ -1399,8 +1462,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
@@ -1426,8 +1489,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
@@ -1471,8 +1534,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
@@ -1506,8 +1569,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
@@ -1547,8 +1610,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
@@ -1594,8 +1657,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
@@ -1616,8 +1679,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
@@ -1643,8 +1706,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
@@ -1673,9 +1736,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
@@ -1703,9 +1765,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
@@ -1726,8 +1787,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
@@ -1749,8 +1810,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
@@ -1773,8 +1834,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
@@ -1837,8 +1898,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
@@ -1892,8 +1953,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1901,8 +1962,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
}
-static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1926,7 +1987,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1959,7 +2020,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1982,8 +2043,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v
return 0;
}
-static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
@@ -2008,7 +2069,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2035,7 +2096,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2046,7 +2107,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v1 = {
+static struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
@@ -2091,7 +2152,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
DR_STE_CTX_ACTION_CAP_RX_PUSH |
- DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP |
+ DR_STE_CTX_ACTION_CAP_POP_MDFY,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
@@ -2103,3 +2165,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
+{
+ return &ste_ctx_v1;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
new file mode 100644
index 000000000000..8a1d49790c6e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V1_
+#define _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+ u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+ u32 hw_action_sz, u16 *used_hw_action_num);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+
+#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
new file mode 100644
index 000000000000..c60fddd125d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+ return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 8ca110643cc0..31d443dd8386 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -10,6 +10,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
+ struct mlx5dr_icm_chunk *chunk;
int ret;
if (!list_empty(&nic_tbl->nic_matcher_list))
@@ -22,13 +23,14 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
else
last_htbl = nic_tbl->s_anchor;
- if (action)
- nic_tbl->default_icm_addr =
- nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
- action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
- else
+ if (action) {
+ chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+ action->dest_tbl->tbl->rx.s_anchor->chunk :
+ action->dest_tbl->tbl->tx.s_anchor->chunk;
+ nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+ } else {
nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
@@ -212,7 +214,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
tbl->table_type);
}
-static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
{
bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
@@ -222,10 +224,10 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
int ret;
if (tbl->rx.s_anchor)
- icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+ icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
if (tbl->tx.s_anchor)
- icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+ icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
ft_attr.table_type = tbl->table_type;
ft_attr.icm_addr_rx = icm_addr_rx;
@@ -234,6 +236,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
ft_attr.sw_owner = true;
ft_attr.decap_en = en_decap;
ft_attr.reformat_en = en_encap;
+ ft_attr.uid = uid;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
NULL, &tbl->table_id);
@@ -241,7 +244,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
+ u32 flags, u16 uid)
{
struct mlx5dr_table *tbl;
int ret;
@@ -261,7 +265,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto free_tbl;
- ret = dr_table_create_sw_owned_tbl(tbl);
+ ret = dr_table_create_sw_owned_tbl(tbl, uid);
if (ret)
goto uninit_tbl;
@@ -305,3 +309,8 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
{
return tbl->table_id;
}
+
+struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return ft->fs_dr_table.dr_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1b3d484b99be..1777a1e508e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
+ DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
};
enum {
@@ -126,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
+ DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
@@ -146,10 +148,12 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
- u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
u32 refcount;
+ /* this ste is part of a rule, located in ste's chain */
+ u8 ste_chain_location;
+
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@@ -160,9 +164,6 @@ struct mlx5dr_ste {
/* The rule this STE belongs to */
struct mlx5dr_rule_rx_tx *rule_rx_tx;
-
- /* this ste is part of a rule, located in ste's chain */
- u8 ste_chain_location;
};
struct mlx5dr_ste_htbl_ctrl {
@@ -180,14 +181,7 @@ struct mlx5dr_ste_htbl {
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
- struct mlx5dr_ste *ste_arr;
- u8 *hw_ste_arr;
-
- struct list_head *miss_list;
-
- enum mlx5dr_icm_chunk_size chunk_size;
struct mlx5dr_ste *pointing_ste;
-
struct mlx5dr_ste_htbl_ctrl ctrl;
};
@@ -278,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
+
+ struct {
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+ } aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -555,7 +556,9 @@ struct mlx5dr_match_spec {
*/
u32 tcp_dport:16;
- u32 reserved_auto1:24;
+ u32 reserved_auto1:16;
+ u32 ipv4_ihl:4;
+ u32 reserved_auto2:4;
u32 ttl_hoplimit:8;
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
@@ -798,6 +801,16 @@ struct mlx5dr_match_param {
(_misc3)->icmpv4_code || \
(_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+ (_spec)->src_ip_95_64 || \
+ (_spec)->src_ip_63_32 || \
+ (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+ (_spec)->dst_ip_95_64 || \
+ (_spec)->dst_ip_63_32 || \
+ (_spec)->dst_ip_31_0)
+
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
u64 drop_icm_address_tx;
@@ -1030,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
+struct mlx5dr_action_aso_flow_meter {
+ struct mlx5dr_domain *dmn;
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1044,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
+ struct mlx5dr_action_aso_flow_meter *aso;
};
};
@@ -1084,16 +1106,12 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
- u32 rkey;
- u32 num_of_entries;
- u32 byte_size;
- u64 icm_addr;
- u64 mr_addr;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
*/
unsigned int seg;
+ enum mlx5dr_icm_chunk_size size;
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
@@ -1133,6 +1151,13 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
{
@@ -1165,7 +1190,7 @@ static inline int
mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
{
int num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+ mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
/* Threshold is 50%, one is added to table of size 1 */
return (num_of_entries + 1) / 2;
@@ -1174,7 +1199,7 @@ mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
static inline bool
mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
{
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+ if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
return false;
return true;
@@ -1192,6 +1217,7 @@ struct mlx5dr_cmd_query_flow_table_details {
struct mlx5dr_cmd_create_flow_table_attr {
u32 table_type;
+ u16 uid;
u64 icm_addr_rx;
u64 icm_addr_tx;
u8 level;
@@ -1268,20 +1294,6 @@ struct mlx5dr_cmd_gid_attr {
u32 roce_ver;
};
-struct mlx5dr_cmd_qp_create_attr {
- u32 page_id;
- u32 pdn;
- u32 cqn;
- u32 pm_state;
- u32 service_type;
- u32 buff_umem_id;
- u32 db_umem_id;
- u32 sq_wqe_cnt;
- u32 rq_wqe_cnt;
- u32 rq_wqe_shift;
- u8 isolate_vl_tc:1;
-};
-
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr);
@@ -1453,7 +1465,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index a476da2424f8..13b6d4721e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -44,11 +44,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) {
err = mlx5dr_action_destroy(action);
- if (err) {
- action = NULL;
- mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
- err);
- }
+ if (err)
+ mlx5_core_err(ns->dev,
+ "Failed to destroy action (%d)\n", err);
+ action = NULL;
}
ft->fs_dr_table.miss_action = action;
if (old_miss_action) {
@@ -63,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -72,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- size,
+ ft_attr,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -80,7 +79,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
+ ft_attr->uid);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
@@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -496,21 +501,58 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action =
+ mlx5dr_action_create_aso(domain,
+ fte->action.exe_aso.object_id,
+ fte->action.exe_aso.return_reg_id,
+ fte->action.exe_aso.type,
+ fte->action.exe_aso.flow_meter.init_color,
+ fte->action.exe_aso.flow_meter.meter_idx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
- if (term_actions->reformat)
+ if (term_actions->reformat) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->reformat;
+ }
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->flow_context.flow_source;
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
@@ -735,6 +777,16 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
}
+static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB ||
+ MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+}
+
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return mlx5dr_is_supported(dev);
@@ -759,6 +811,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
+ .get_capabilities = mlx5_cmd_dr_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
index 1fb185d6ac7f..d168622063d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -14,10 +14,6 @@ struct mlx5_fs_dr_action {
struct mlx5dr_action *dr_action;
};
-struct mlx5_fs_dr_ns {
- struct mlx5_dr_ns *dr_ns;
-};
-
struct mlx5_fs_dr_rule {
struct mlx5dr_rule *dr_rule;
/* Only actions created by fs_dr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9604b2091358..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
+enum {
+ MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+ u8 reserved_at_0[0xc];
+ u8 action[0x1];
+ u8 initial_color[0x2];
+ u8 line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+ u8 action_id[0x8];
+ u8 aso_context_number[0x18];
+
+ u8 dest_reg_id[0x2];
+ u8 change_ordering_tag[0x1];
+ u8 aso_check_ordering[0x1];
+ u8 aso_context_type[0x4];
+ u8 reserved_at_28[0x8];
+ union {
+ u8 aso_fields[0x10];
+ struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+ };
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c7c93131b762..226a0d7bb06d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -51,7 +51,11 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
+ u16 uid);
+
+struct mlx5dr_table *
+mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
int mlx5dr_table_destroy(struct mlx5dr_table *table);
@@ -96,7 +100,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
@@ -127,6 +132,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+ u32 obj_id,
+ u8 return_reg_id,
+ u8 aso_type,
+ u8 init_color,
+ u8 meter_id);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
@@ -136,7 +149,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+ MLX5_STEERING_FORMAT_CONNECTX_7)));
}
/* buddy functions & structure */
@@ -160,6 +173,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free.
*/
struct list_head hot_list;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ struct list_head *miss_list;
+ u8 *hw_ste_arr;
};
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 01e9c412977c..8455e79bc44a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
@@ -100,19 +99,21 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
int err = -ENOMEM;
phys_addr_t pfn;
int bfregs;
+ int node;
int i;
bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
- up = kzalloc(sizeof(*up), GFP_KERNEL);
+ node = mdev->priv.numa_node;
+ up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
if (!up)
return ERR_PTR(err);
up->mdev = mdev;
- up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->reg_bitmap)
goto error1;
- up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->fp_bitmap)
goto error1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 8846d30a380a..d5c317325030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -280,7 +280,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- out = kzalloc(out_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -307,7 +307,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(addr_list[i], mac_addr);
}
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
@@ -335,7 +335,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -360,7 +360,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
@@ -386,7 +386,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
list_size * MLX5_ST_SZ_BYTES(vlan_layout);
memset(out, 0, sizeof(out));
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -411,7 +411,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
}
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
@@ -542,8 +542,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
out_sz += nout * sizeof(*gid);
- in = kzalloc(in_sz, GFP_KERNEL);
- out = kzalloc(out_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
@@ -573,8 +573,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
gid->global.interface_id = tmp->global.interface_id;
out:
- kfree(in);
- kfree(out);
+ kvfree(in);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
@@ -607,8 +607,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
- in = kzalloc(in_sz, GFP_KERNEL);
- out = kzalloc(out_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
@@ -638,8 +638,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
out:
- kfree(in);
- kfree(out);
+ kvfree(in);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
@@ -658,7 +658,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- out = kzalloc(out_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -717,7 +717,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
system_image_guid);
ex:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
@@ -728,7 +728,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep;
int err;
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
@@ -736,7 +736,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
if (!err)
*sys_image_guid = rep->sys_image_guid;
- kfree(rep);
+ kvfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
@@ -747,7 +747,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep;
int err;
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
@@ -755,7 +755,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
if (!err)
*node_guid = rep->node_guid;
- kfree(rep);
+ kvfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
@@ -770,7 +770,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
int err;
- out = kzalloc(outlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -786,7 +786,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
nic_vport_context.promisc_all);
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
@@ -874,7 +874,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
int value;
int err;
- out = kzalloc(outlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -891,7 +891,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
*status = !value;
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
@@ -1033,7 +1033,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "vf %d\n", vf);
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1065,7 +1065,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
req->cap_mask1_perm);
err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
ex:
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
goto free;
MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
- nic_vport_context.affiliated_vhca_id,
- MLX5_CAP_GEN(master_mdev, vhca_id));
+ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
+ } else {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ }
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e5c4dcd1425e..4d629e5ddbc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
wq->cur_sz++;
}
-static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
+static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
{
wq->wqe_ctr += n;
wq->cur_sz += n;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 86826a70f9dd..5a1027b07215 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -75,6 +75,7 @@ struct mlxbf_gige {
struct net_device *netdev;
struct platform_device *pdev;
void __iomem *mdio_io;
+ void __iomem *clk_io;
struct mii_bus *mdiobus;
spinlock_t lock; /* for packet processing indices */
u16 rx_q_entries;
@@ -90,9 +91,6 @@ struct mlxbf_gige {
dma_addr_t rx_cqe_base_dma;
u16 tx_pi;
u16 prev_tx_ci;
- u64 error_intr_count;
- u64 rx_intr_count;
- u64 llu_plu_intr_count;
struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
int error_irq;
@@ -140,7 +138,8 @@ enum mlxbf_gige_res {
MLXBF_GIGE_RES_MDIO9,
MLXBF_GIGE_RES_GPIO0,
MLXBF_GIGE_RES_LLU,
- MLXBF_GIGE_RES_PLU
+ MLXBF_GIGE_RES_PLU,
+ MLXBF_GIGE_RES_CLK
};
/* Version of register data returned by mlxbf_gige_get_regs() */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index ceeb7f4c3f6c..41ebef25a930 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -24,11 +24,9 @@ static void mlxbf_gige_get_regs(struct net_device *netdev,
regs->version = MLXBF_GIGE_REGS_VERSION;
/* Read entire MMIO register space and store results
- * into the provided buffer. Each 64-bit word is converted
- * to big-endian to make the output more readable.
- *
- * NOTE: by design, a read to an offset without an existing
- * register will be acknowledged and return zero.
+ * into the provided buffer. By design, a read to an
+ * offset without an existing register will be
+ * acknowledged and return zero.
*/
memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
index c38795be04a2..5b3519f0cc46 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
@@ -17,8 +17,6 @@ static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
priv = dev_id;
- priv->error_intr_count++;
-
int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
@@ -75,8 +73,6 @@ static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
priv = dev_id;
- priv->rx_intr_count++;
-
/* NOTE: GigE silicon automatically disables "packet rx" interrupt by
* setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
* to the ARM cores. Software needs to re-enable "packet rx"
@@ -90,11 +86,6 @@ static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
{
- struct mlxbf_gige *priv;
-
- priv = dev_id;
- priv->llu_plu_intr_count++;
-
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 66ef0090755e..2292d63a279c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -19,8 +19,6 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-#define DRV_NAME "mlxbf_gige"
-
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
@@ -69,7 +67,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u8 mac[ETH_ALEN];
u64 local_mac;
- memset(mac, 0, ETH_ALEN);
+ eth_zero_addr(mac);
mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
&local_mac);
u64_to_ether_addr(local_mac, mac);
@@ -158,7 +156,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
- netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
@@ -427,7 +425,7 @@ static struct platform_driver mlxbf_gige_driver = {
.remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 7905179a9575..aa780b1614a3 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -22,10 +22,23 @@
#include <linux/property.h>
#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
+#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
+#define MLXBF_GIGE_MDC_CLK_NS 400
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
+#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
+#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
+#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
+#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
+
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
@@ -50,27 +63,76 @@
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
+#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+
+static struct resource corepll_params[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ },
+};
+
+/* Returns core clock i1clk in Hz */
+static u64 calculate_i1clk(struct mlxbf_gige *priv)
+{
+ u8 core_od, core_r;
+ u64 freq_output;
+ u32 reg1, reg2;
+ u32 core_f;
+
+ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
+ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
+
+ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
+ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
+ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
+
+ /* Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * freq_output = freq_reference * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ */
+ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
+ MLXBF_GIGE_MDIO_COREPLL_CONST);
+ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
+
+ return freq_output;
+}
+
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
- * mdc_clk = 2*(val + 1)*i1clk
+ * mdc_clk = 2*(val + 1)*(core clock in sec)
*
- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ * i1clk is in Hz:
+ * 400 ns = 2*(val + 1)*(1/i1clk)
*
- * val = (((400 * 430 / 1000) / 2) - 1)
+ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
+ * val = (400/2 * i1clk)/10^9 - 1
*/
-#define MLXBF_GIGE_I1CLK_MHZ 430
-#define MLXBF_GIGE_MDC_CLK_NS 400
+static u8 mdio_period_map(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u64 i1clk;
-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+ i1clk = calculate_i1clk(priv);
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
- MLXBF_GIGE_MDIO_PERIOD) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
+
+ return mdio_period;
+}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
int phy_reg, u32 opcode)
@@ -105,7 +167,8 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+ val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ 5, 1000000);
if (ret) {
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
@@ -116,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -123,9 +189,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
+ u32 temp;
u32 cmd;
int ret;
- u32 temp;
if (phy_reg & MII_ADDR_C45)
return -EOPNOTSUPP;
@@ -137,23 +203,53 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
/* If the poll timed out, drop the request */
ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+ temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ 5, 1000000);
+
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
return ret;
}
+static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u32 val;
+
+ mdio_period = mdio_period_map(priv);
+
+ val = MLXBF_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+}
+
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
- /* Configure mdio parameters */
- writel(MLXBF_GIGE_MDIO_CFG_VAL,
- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ /* clk resource shared with other drivers so cannot use
+ * devm_platform_ioremap_resource
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
+ if (!res) {
+ /* For backward compatibility with older ACPI tables, also keep
+ * CLK resource internal to the driver.
+ */
+ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ }
+
+ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!priv->clk_io)
+ return -ENOMEM;
+
+ mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 5fb33c9294bf..7be3a793984d 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,6 +8,8 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#define MLXBF_GIGE_VERSION 0x0000
+#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 4683312861ac..a510bf2cff2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -7,6 +7,7 @@ config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
select MLXFW
+ select AUXILIARY_BUS
help
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 196adeb33495..3ca9fce759ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
- core_acl_flex_actions.o core_env.o
+ core_acl_flex_actions.o core_env.o \
+ core_linecards.o core_linecard_dev.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
@@ -11,7 +12,6 @@ mlxsw_i2c-objs := i2c.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_router_xm.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
@@ -28,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
- spectrum_ethtool.o spectrum_policer.o
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 51b260d54237..09bef04b11d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -329,6 +329,32 @@ MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+/* cmd_mbox_query_fw_utc_sec_offset
+ * The offset of the UTC_Sec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
+
+/* cmd_mbox_query_fw_utc_sec_bar
+ * PCI base address register (BAR) of the UTC_Sec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
+
+/* cmd_mbox_query_fw_utc_nsec_offset
+ * The offset of the UTC_nSec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
+
+/* cmd_mbox_query_fw_utc_nsec_bar
+ * PCI base address register (BAR) of the UTC_nSec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
+
/* QUERY_BOARDINFO - Query Board Information
* -----------------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -343,23 +369,6 @@ static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_xm_num_local_ports
- * Number of local_ports connected to the xm.
- * Each local port is a 4x
- * Spectrum-2/3: 25G
- * Spectrum-4: 50G
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_num_local_ports, 0x00, 4, 3);
-
-/* cmd_mbox_xm_exists
- * An XM (eXtanded Mezanine, e.g. used for the XLT) is connected on the board.
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_exists, 0x00, 0, 1);
-
-/* cmd_mbox_xm_local_port_entry
- */
-MLXSW_ITEM_BIT_ARRAY(cmd_mbox, boardinfo, xm_local_port_entry, 0x04, 4, 8);
-
/* cmd_mbox_boardinfo_intapin
* When PCIe interrupt messages are being used, this value is used for clearing
* an interrupt. When using MSI-X, this register is not used.
@@ -650,6 +659,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
@@ -674,11 +689,11 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
-/* cmd_mbox_config_set_kvh_xlt_cache_mode
+/* cmd_mbox_config_set_cqe_time_stamp_type
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, set_kvh_xlt_cache_mode, 0x08, 3, 1);
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
@@ -688,6 +703,9 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
/* cmd_mbox_config_profile_max_lag
* Maximum number of LAG IDs requested.
+ * Reserved when Spectrum-1/2/3, supported from Spectrum-4 and above.
+ * For Spectrum-4, firmware sets 128 for values between 1-128 and 256 for values
+ * between 129-256.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
@@ -736,16 +754,25 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
- * 0-2 - Backward compatible modes for SwitchX devices.
- * 3 - Mixed mode, where:
- * max_flood_tables indicates the number of single-entry tables.
- * max_vid_flood_tables indicates the number of per-VID tables.
- * max_fid_offset_flood_tables indicates the number of FID-offset tables.
- * max_fid_flood_tables indicates the number of per-FID tables.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
/* cmd_mbox_config_profile_max_fid_offset_flood_tables
* Maximum number of FID-offset flooding tables.
@@ -806,12 +833,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
-/* cmd_mbox_config_profile_kvh_xlt_cache_mode
- * KVH XLT cache mode:
- * 0 - XLT can use all KVH as best-effort
- * 1 - XLT cache uses 1/2 KVH
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, kvh_xlt_cache_mode, 0x50, 8, 4);
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
@@ -866,6 +893,26 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
+ /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
+ /* FRC - Free Running Clock, units of 1nSec.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
+ /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
+ * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* cmd_mbox_config_profile_cqe_time_stamp_type
+ * CQE time_stamp_type for non-mirror-packets.
+ * Configured if set_cqe_time_stamp_type is set.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
+
/* cmd_mbox_config_profile_cqe_version
* CQE version:
* 0: CQE version is 0
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 866b9357939b..e2a985ec2c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -48,6 +48,7 @@ struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
u16 local_port;
+ struct mlxsw_linecard *linecard;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -69,6 +70,8 @@ struct mlxsw_core {
struct workqueue_struct *emad_wq;
struct list_head rx_listener_list;
struct list_head event_listener_list;
+ struct list_head irq_event_handler_list;
+ struct mutex irq_event_handler_lock; /* Locks access to handlers list */
struct {
atomic64_t tid;
struct list_head trans_list;
@@ -82,6 +85,7 @@ struct mlxsw_core {
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
+ struct mlxsw_linecards *linecards;
struct mlxsw_core_port *ports;
unsigned int max_ports;
atomic_t active_ports_count;
@@ -94,6 +98,17 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->linecards;
+}
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ mlxsw_core->linecards = linecards;
+}
+
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
static u64 mlxsw_ports_occ_get(void *priv)
@@ -114,11 +129,11 @@ static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
max_ports, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
- max_ports, MLXSW_CORE_RESOURCE_PORTS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &ports_num_params);
+ return devl_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
}
static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
@@ -144,8 +159,8 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
goto err_resources_ports_register;
}
atomic_set(&mlxsw_core->active_ports_count, 0);
- devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
- mlxsw_ports_occ_get, mlxsw_core);
+ devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
return 0;
@@ -158,9 +173,9 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -171,22 +186,28 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_max_ports);
-void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
{
- return mlxsw_core->driver_priv;
-}
-EXPORT_SYMBOL(mlxsw_core_driver_priv);
+ struct mlxsw_driver *driver = mlxsw_core->driver;
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->driver->res_query_enabled;
+ if (driver->profile->used_max_lag) {
+ *p_max_lag = driver->profile->max_lag;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG))
+ return -EIO;
+
+ *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG);
+ return 0;
}
-EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+EXPORT_SYMBOL(mlxsw_core_max_lag);
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_core->driver->temp_warn_enabled;
+ return mlxsw_core->driver_priv;
}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
@@ -212,6 +233,32 @@ struct mlxsw_event_listener_item {
void *priv;
};
+static const u8 mlxsw_core_trap_groups[] = {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
+};
+
+static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
+ int i;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
+ mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/******************
* EMAD processing
******************/
@@ -605,7 +652,7 @@ static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
return;
string = mlxsw_emad_string_tlv_string_data(string_tlv);
- strlcpy(trans->emad_err_string, string,
+ strscpy(trans->emad_err_string, string,
MLXSW_EMAD_STRING_TLV_STRING_LEN);
}
@@ -777,16 +824,10 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (err)
goto err_trap_register;
- err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
- if (err)
- goto err_emad_trap_set;
mlxsw_core->emad.use_emad = true;
return 0;
-err_emad_trap_set:
- mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
- mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
@@ -929,6 +970,20 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
struct mlxsw_core_fw_info {
struct mlxfw_dev mlxfw_dev;
struct mlxsw_core *mlxsw_core;
@@ -1083,8 +1138,9 @@ static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
.fsm_release = mlxsw_core_fw_fsm_release,
};
-static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
- struct netlink_ext_ack *extack)
+static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core_fw_info mlxsw_core_fw_info = {
.mlxfw_dev = {
@@ -1095,13 +1151,9 @@ static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmw
},
.mlxsw_core = mlxsw_core
};
- int err;
-
- mlxsw_core->fw_flash_in_progress = true;
- err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
- mlxsw_core->fw_flash_in_progress = false;
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
+ firmware, extack);
}
static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
@@ -1147,7 +1199,7 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
@@ -1165,7 +1217,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
+ return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
@@ -1208,36 +1260,37 @@ static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
unsigned int count,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
- extack);
+ return mlxsw_core->driver->port_split(mlxsw_core,
+ mlxsw_core_port->local_port,
+ count, extack);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
+ return mlxsw_core->driver->port_unsplit(mlxsw_core,
+ mlxsw_core_port->local_port,
extack);
}
@@ -1271,26 +1324,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
-static void *__dl_port(struct devlink_port *devlink_port)
-{
- return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
-}
-
-static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type port_type)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
-
- if (!mlxsw_driver->port_type_set)
- return -EOPNOTSUPP;
-
- return mlxsw_driver->port_type_set(mlxsw_core,
- mlxsw_core_port->local_port,
- port_type);
-}
-
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
@@ -1480,13 +1513,15 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_re
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
- return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
- mlxsw_core->bus,
- mlxsw_core->bus_priv, true,
- devlink, extack);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink, extack);
+ return err;
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -1619,7 +1654,6 @@ static const struct devlink_ops mlxsw_devlink_ops = {
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
- .port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
@@ -1706,7 +1740,7 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
}
static const struct mlxsw_listener mlxsw_core_health_listener =
- MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+ MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
static int
mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
@@ -2019,7 +2053,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
struct devlink_health_reporter *fw_fatal;
int err;
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
@@ -2049,7 +2083,7 @@ err_trap_register:
static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
{
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return;
mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
@@ -2059,6 +2093,18 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
}
+static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
+{
+ INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list);
+ mutex_init(&mlxsw_core->irq_event_handler_lock);
+}
+
+static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core)
+{
+ mutex_destroy(&mlxsw_core->irq_event_handler_lock);
+ WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list));
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -2069,8 +2115,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct mlxsw_res *res;
size_t alloc_size;
+ u16 max_lag;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
@@ -2085,6 +2131,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err = -ENOMEM;
goto err_devlink_alloc;
}
+ devl_lock(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2094,9 +2141,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus = mlxsw_bus;
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
+ mlxsw_core_irq_event_handler_init(mlxsw_core);
- res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->res);
if (err)
goto err_bus_init;
@@ -2110,10 +2158,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_ports_init;
- if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
- MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(*mlxsw_core->lag.mapping) *
- MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
+ err = mlxsw_core_max_lag(mlxsw_core, &max_lag);
+ if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
@@ -2122,6 +2169,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
+ err = mlxsw_core_trap_groups_set(mlxsw_core);
+ if (err)
+ goto err_trap_groups_set;
+
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -2137,6 +2188,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_fw_rev_validate;
+ err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_linecards_init;
+
err = mlxsw_core_health_init(mlxsw_core);
if (err)
goto err_health_init;
@@ -2150,7 +2205,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
if (err)
goto err_env_init;
@@ -2162,6 +2217,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!reload) {
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
}
return 0;
@@ -2175,23 +2231,29 @@ err_thermal_init:
err_hwmon_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
+ mlxsw_linecards_fini(mlxsw_core);
+err_linecards_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
+err_trap_groups_set:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- if (!reload)
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
err_devlink_alloc:
return err;
}
@@ -2227,8 +2289,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload)
+ if (!reload) {
devlink_unregister(devlink);
+ devl_lock(devlink);
+ }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2246,22 +2310,27 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_core_health_fini(mlxsw_core);
+ mlxsw_linecards_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (!reload)
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
return;
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -2500,6 +2569,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
char hpkt_pl[MLXSW_REG_HPKT_LEN];
int err;
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
listener->enabled_on_register);
if (err)
@@ -2529,6 +2601,9 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
if (!listener->is_event) {
mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
listener->trap_id, listener->dis_trap_group,
@@ -2540,6 +2615,45 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i, err;
+
+ for (i = 0; i < listeners_count; i++) {
+ err = mlxsw_core_trap_register(mlxsw_core,
+ &listeners[i],
+ priv);
+ if (err)
+ goto err_listener_register;
+ }
+ return 0;
+
+err_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_traps_register);
+
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i;
+
+ for (i = 0; i < listeners_count; i++) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_traps_unregister);
+
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled)
@@ -2676,6 +2790,57 @@ int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
}
EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+struct mlxsw_core_irq_event_handler_item {
+ struct list_head list;
+ void (*cb)(struct mlxsw_core *mlxsw_core);
+};
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->cb = cb;
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list);
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register);
+
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item, *tmp;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry_safe(item, tmp,
+ &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb == cb) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister);
+
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb)
+ item->cb(mlxsw_core);
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
@@ -2902,7 +3067,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get);
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -2925,7 +3090,16 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(devlink, devlink_port, local_port);
+ if (slot_index) {
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(mlxsw_core->linecards,
+ slot_index);
+ mlxsw_core_port->linecard = linecard;
+ devlink_port_linecard_set(devlink_port,
+ linecard->devlink_linecard);
+ }
+ err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
return err;
@@ -2937,12 +3111,12 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -2951,7 +3125,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
int err;
err = __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes,
switch_id, switch_id_len);
@@ -2982,7 +3156,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
DEVLINK_PORT_FLAVOUR_CPU,
- 0, false, 0, false, 0,
+ 0, 0, false, 0, false, 0,
switch_id, switch_id_len);
if (err)
return err;
@@ -3010,18 +3184,6 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_type_ib_set(devlink_port, NULL);
-}
-EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
@@ -3034,18 +3196,6 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_clear);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- return devlink_port->type;
-}
-EXPORT_SYMBOL(mlxsw_core_port_type_get);
-
-
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port)
@@ -3058,17 +3208,24 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
- int i;
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+
+ return mlxsw_core_port->linecard;
+}
- for (i = 0; i < bus_info->xm_local_ports_count; i++)
- if (bus_info->xm_local_ports[i] == local_port)
- return true;
- return false;
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
+ return;
+ mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
}
-EXPORT_SYMBOL(mlxsw_core_port_is_xm);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
{
@@ -3181,9 +3338,6 @@ int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
u16 id;
int err;
- if (!res)
- return 0;
-
mlxsw_cmd_mbox_zero(mbox);
for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
@@ -3222,6 +3376,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
{
mlxsw_core->emad.enable_string_tlv = true;
@@ -3232,9 +3404,15 @@ static int __init mlxsw_core_module_init(void)
{
int err;
+ err = mlxsw_linecard_driver_register();
+ if (err)
+ return err;
+
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
- if (!mlxsw_wq)
- return -ENOMEM;
+ if (!mlxsw_wq) {
+ err = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
@@ -3245,6 +3423,8 @@ static int __init mlxsw_core_module_init(void)
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
+err_alloc_workqueue:
+ mlxsw_linecard_driver_unregister();
return err;
}
@@ -3252,6 +3432,7 @@ static void __exit mlxsw_core_module_exit(void)
{
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
+ mlxsw_linecard_driver_unregister();
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index f30bb8614e69..ca0c3d2bee6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -12,12 +12,14 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/net_namespace.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
#include "cmd.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
enum mlxsw_core_resource_id {
MLXSW_CORE_RESOURCE_PORTS = 1,
@@ -33,11 +35,14 @@ struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core);
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecard);
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
@@ -46,6 +51,11 @@ mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
@@ -163,6 +173,9 @@ struct mlxsw_listener {
.enabled_on_register = true, \
}
+#define MLXSW_CORE_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, CORE_EVENT)
+
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
void *priv, bool enabled);
@@ -181,6 +194,12 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled);
@@ -198,6 +217,14 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+typedef void mlxsw_irq_event_cb_t(struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -226,7 +253,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split, u32 split_port_subnumber,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
@@ -238,16 +266,18 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -267,6 +297,7 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
+ used_max_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -278,9 +309,11 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
used_kvd_sizes:1,
- used_kvh_xlt_cache_mode:1;
+ used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
+ u16 max_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -298,10 +331,11 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
- u8 kvh_xlt_cache_mode;
+ u8 cqe_time_stamp_type;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -315,13 +349,14 @@ struct mlxsw_driver {
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
- int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
- enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
+ void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -398,9 +433,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
- bool res_query_enabled;
- bool fw_fatal_enabled;
- bool temp_warn_enabled;
+ bool sdq_supports_cqe_v2;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -411,6 +444,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -450,6 +488,8 @@ struct mlxsw_bus {
u8 *p_status);
u32 (*read_frc_h)(void *bus_priv);
u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
u8 features;
};
@@ -460,8 +500,6 @@ struct mlxsw_fw_rev {
u16 can_reset_minor;
};
-#define MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX 4
-
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
@@ -470,10 +508,7 @@ struct mlxsw_bus_info {
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
u8 low_frequency:1,
- read_frc_capable:1,
- xm_exists:1;
- u8 xm_local_ports_count;
- u8 xm_local_ports[MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX];
+ read_clock_capable:1;
};
struct mlxsw_hwmon;
@@ -529,11 +564,17 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
struct mlxsw_rx_md_info rx_md_info;
};
+ struct mlxsw_cqe_ts cqe_ts;
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
@@ -542,4 +583,92 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
return (struct mlxsw_skb_cb *) skb->cb;
}
+struct mlxsw_linecards;
+
+enum mlxsw_linecard_status_event_type {
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
+};
+
+struct mlxsw_linecard_bdev;
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+ char psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+};
+
+struct mlxsw_linecard {
+ u8 slot_index;
+ struct mlxsw_linecards *linecards;
+ struct devlink_linecard *devlink_linecard;
+ struct mutex lock; /* Locks accesses to the linecard structure */
+ char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN];
+ char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */
+ enum mlxsw_linecard_status_event_type status_event_type_to;
+ struct delayed_work status_event_to_dw;
+ u8 provisioned:1,
+ ready:1,
+ active:1;
+ u16 hw_revision;
+ u16 ini_version;
+ struct mlxsw_linecard_bdev *bdev;
+ struct {
+ struct mlxsw_linecard_device_info info;
+ u8 index;
+ } device;
+};
+
+struct mlxsw_linecard_types_info;
+
+struct mlxsw_linecards {
+ struct mlxsw_core *mlxsw_core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 count;
+ struct mlxsw_linecard_types_info *types_info;
+ struct list_head event_ops_list;
+ struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
+ struct mlxsw_linecard linecards[];
+};
+
+static inline struct mlxsw_linecard *
+mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
+{
+ return &linecards->linecards[slot_index - 1];
+}
+
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info);
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
+
+typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, void *priv);
+
+struct mlxsw_linecards_event_ops {
+ mlxsw_linecards_event_op_t *got_active;
+ mlxsw_linecards_event_op_t *got_inactive;
+};
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard);
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard);
+
+int mlxsw_linecard_driver_register(void);
+void mlxsw_linecard_driver_unregister(void);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 77e82e6cf6e8..9dfe7148199f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -737,8 +737,9 @@ mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
if (!cookie)
return ERR_PTR(-ENOMEM);
refcount_set(&cookie->ref_count, 1);
- memcpy(&cookie->fa_cookie, fa_cookie,
- sizeof(*fa_cookie) + fa_cookie->cookie_len);
+ cookie->fa_cookie = *fa_cookie;
+ memcpy(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
mlxsw_afa_cookie_ht_params);
@@ -1164,7 +1165,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
*
- * The Trap with userdef action action has the same functionality as
+ * The Trap with userdef action has the same functionality as
* the Trap action with addition of user defined value that can be set
* and used by higher layer applications.
*/
@@ -1957,6 +1958,83 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
+/* SIP DIP Action
+ * --------------
+ * The SIP_DIP_ACTION is used for modifying the SIP and DIP fields of the
+ * packet, e.g. for NAT. The L3 checksum is updated. Also, if the L4 is TCP or
+ * if the L4 is UDP and the checksum field is not zero, then the L4 checksum is
+ * updated.
+ */
+
+#define MLXSW_AFA_IP_CODE 0x11
+#define MLXSW_AFA_IP_SIZE 2
+
+enum mlxsw_afa_ip_s_d {
+ /* ip refers to dip */
+ MLXSW_AFA_IP_S_D_DIP,
+ /* ip refers to sip */
+ MLXSW_AFA_IP_S_D_SIP,
+};
+
+/* afa_ip_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, ip, s_d, 0x00, 31, 1);
+
+enum mlxsw_afa_ip_m_l {
+ /* LSB: ip[63:0] refers to ip[63:0] */
+ MLXSW_AFA_IP_M_L_LSB,
+ /* MSB: ip[63:0] refers to ip[127:64] */
+ MLXSW_AFA_IP_M_L_MSB,
+};
+
+/* afa_ip_m_l
+ * MSB or LSB.
+ */
+MLXSW_ITEM32(afa, ip, m_l, 0x00, 30, 1);
+
+/* afa_ip_ip_63_32
+ * Bits [63:32] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_63_32, 0x08, 0, 32);
+
+/* afa_ip_ip_31_0
+ * Bits [31:0] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_31_0, 0x0C, 0, 32);
+
+static void mlxsw_afa_ip_pack(char *payload, enum mlxsw_afa_ip_s_d s_d,
+ enum mlxsw_afa_ip_m_l m_l, u32 ip_31_0,
+ u32 ip_63_32)
+{
+ mlxsw_afa_ip_s_d_set(payload, s_d);
+ mlxsw_afa_ip_m_l_set(payload, m_l);
+ mlxsw_afa_ip_ip_31_0_set(payload, ip_31_0);
+ mlxsw_afa_ip_ip_63_32_set(payload, ip_63_32);
+}
+
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_afa_ip_s_d s_d = is_dip ? MLXSW_AFA_IP_S_D_DIP :
+ MLXSW_AFA_IP_S_D_SIP;
+ enum mlxsw_afa_ip_m_l m_l = is_lsb ? MLXSW_AFA_IP_M_L_LSB :
+ MLXSW_AFA_IP_M_L_MSB;
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_IP_CODE,
+ MLXSW_AFA_IP_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append IP action");
+ return PTR_ERR(act);
+ }
+
+ mlxsw_afa_ip_pack(act, s_d, m_l, val_31_0, val_63_32);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ip);
+
/* L4 Port Action
* --------------
* The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 16cbd6acbb01..db58037be46e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -92,6 +92,9 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6dd4ae2f45f4..0107cbc32fc7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -18,24 +18,101 @@ struct mlxsw_env_module_info {
int num_ports_mapped;
int num_ports_up;
enum ethtool_module_power_mode_policy power_mode_policy;
+ enum mlxsw_reg_pmtm_module_type type;
};
-struct mlxsw_env {
- struct mlxsw_core *core;
+struct mlxsw_env_line_card {
u8 module_count;
- struct mutex module_info_lock; /* Protects 'module_info'. */
+ bool active;
struct mlxsw_env_module_info module_info[];
};
-static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
- bool *qsfp, bool *cmis)
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 max_module_count; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mutex line_cards_lock; /* Protects line cards. */
+ struct mlxsw_env_line_card *line_cards[];
+};
+
+static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ return mlxsw_env->line_cards[slot_index]->active;
+}
+
+static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ bool active;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return active;
+}
+
+static struct
+mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return &mlxsw_env->line_cards[slot_index]->module_info[module];
+}
+
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ struct mlxsw_env_module_info *module_info;
+ int err;
+
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ module_info = mlxsw_env_module_info_get(core, slot_index, module);
+ switch (module_info->type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
+ err = -EINVAL;
+ break;
+ default:
+ err = 0;
+ }
+
+ return err;
+}
+
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = __mlxsw_env_validate_module_type(core, slot_index, module);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return err;
+}
+
+static int
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
+ bool *qsfp, bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
- mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ err = mlxsw_env_validate_module_type(core, slot_index, id);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -53,6 +130,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
*qsfp = true;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
*qsfp = true;
*cmis = true;
break;
@@ -64,8 +142,8 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
}
static int
-mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
- u16 offset, u16 size, void *data,
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -102,7 +180,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
} else {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 0 and I2C high address is used. Please refer
- * refer to "Memory Organization" figure in SFF-8472
+ * to "Memory Organization" figure in SFF-8472
* specification for graphical depiction.
*/
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
@@ -110,7 +188,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -127,8 +206,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
return 0;
}
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp)
+int
+mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
+ int module, int off, int *temp)
{
unsigned int module_temp, module_crit, module_emerg;
union {
@@ -142,8 +222,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int page;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
@@ -172,7 +253,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
*/
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp,
+ &cmis);
if (err)
return err;
@@ -184,12 +266,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -206,17 +288,32 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
}
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo)
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
u8 module_rev_id, module_id, diag_mon;
unsigned int read_size;
int err;
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, false, &read_size);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ netdev_err(netdev,
+ "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0,
+ offset, module_info, false,
+ &read_size);
if (err)
return err;
@@ -245,9 +342,10 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
/* Verify if transceiver provides diagnostic monitoring page */
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
- SFP_DIAGMON, 1, &diag_mon,
- false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, SFP_DIAGMON, 1,
+ &diag_mon, false,
+ &read_size);
if (err)
return err;
@@ -261,6 +359,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
/* Use SFF_8636 as base type. ethtool should recognize specific
* type through the identifier value.
*/
@@ -285,9 +384,11 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
EXPORT_SYMBOL(mlxsw_env_get_module_info);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int offset = ee->offset;
unsigned int read_size;
bool qsfp, cmis;
@@ -297,14 +398,21 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
if (!ee->len)
return -EINVAL;
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
memset(data, 0, ee->len);
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module,
+ &qsfp, &cmis);
if (err)
return err;
while (i < ee->len) {
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, offset,
ee->len - i, data + i,
qsfp, &read_size);
if (err) {
@@ -350,12 +458,27 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl,
}
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u32 bytes_read = 0;
u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot read EEPROM of module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
/* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
device_addr = page->offset;
@@ -364,12 +487,11 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 size;
- int err;
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -393,20 +515,23 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
-static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
}
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
u32 req = *flags;
int err;
@@ -414,25 +539,34 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot reset module on an inactive line card\n");
+ return -EIO;
+ }
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- if (mlxsw_env->module_info[module].num_ports_up) {
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ netdev_err(netdev, "Reset module is not supported on port module type\n");
+ goto out;
+ }
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ if (module_info->num_ports_mapped > 1 &&
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
err = -EINVAL;
goto out;
}
- err = mlxsw_env_module_reset(mlxsw_core, module);
+ err = mlxsw_env_module_reset(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Failed to reset module\n");
goto out;
@@ -441,29 +575,39 @@ int mlxsw_env_reset_module(struct net_device *netdev,
*flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_reset_module);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
char mcion_pl[MLXSW_REG_MCION_LEN];
u32 status_bits;
- int err;
+ int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
+ mutex_lock(&mlxsw_env->line_cards_lock);
+
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
+ goto out;
+ }
- mutex_lock(&mlxsw_env->module_info_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ params->policy = module_info->power_mode_policy;
- params->policy = mlxsw_env->module_info[module].power_mode_policy;
+ /* Avoid accessing an inactive line card, as it will result in an error. */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out;
- mlxsw_reg_mcion_pack(mcion_pl, module);
+ mlxsw_reg_mcion_pack(mcion_pl, slot_index, module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
@@ -480,18 +624,18 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool enable)
+ u8 slot_index, u8 module, bool enable)
{
enum mlxsw_reg_pmaos_admin_status admin_status;
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
@@ -501,12 +645,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power)
+ u8 slot_index, u8 module,
+ bool low_power)
{
u16 eeprom_override_mask, eeprom_override;
char pmmp_pl[MLXSW_REG_PMMP_LEN];
- mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module);
mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
/* Mask all the bits except low power mode. */
eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
@@ -519,24 +664,34 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
}
static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power,
+ u8 slot_index, u8 module,
+ bool low_power,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err;
- err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ /* Avoid accessing an inactive line card, as it will result in an error.
+ * Cached configuration will be applied by mlxsw_env_got_active() when
+ * line card becomes active.
+ */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
return err;
}
- err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ low_power);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
goto err_module_low_power_set;
}
- err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
goto err_module_enable_set;
@@ -545,63 +700,84 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
return 0;
err_module_enable_set:
- mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+ mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ !low_power);
err_module_low_power_set:
- mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
return err;
}
-int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
- enum ethtool_module_power_mode_policy policy,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
{
- struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
bool low_power;
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
- if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
- policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
- return -EOPNOTSUPP;
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Power mode set is not supported on port module type");
+ goto out;
}
- mutex_lock(&mlxsw_env->module_info_lock);
-
- if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy == policy)
goto out;
/* If any ports are up, we are already in high power mode. */
- if (mlxsw_env->module_info[module].num_ports_up)
+ if (module_info->num_ports_up)
goto out_set_policy;
low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
- extack);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ low_power, extack);
if (err)
goto out;
out_set_policy:
- mlxsw_env->module_info[module].power_mode_policy = policy;
+ module_info->power_mode_policy = policy;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index,
+ module, policy, extack);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
return err;
}
EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
- u8 module,
+ u8 slot_index, u8 module,
bool *p_has_temp_sensor)
{
char mtbr_pl[MLXSW_REG_MTBR_LEN];
u16 temp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
@@ -621,13 +797,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
- u16 sensor_index, bool enable)
+static int
+mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u16 sensor_index, bool enable)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
enum mlxsw_reg_mtmp_tee tee;
int err, threshold_hi;
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -635,6 +813,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
if (enable) {
err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ slot_index,
sensor_index -
MLXSW_REG_MTMP_MODULE_INDEX_MIN,
SFP_TEMP_HIGH_WARN,
@@ -662,14 +841,15 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < module_count; i++) {
- err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
- &has_temp_sensor);
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index,
+ i, &has_temp_sensor);
if (err)
return err;
@@ -677,7 +857,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
continue;
sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ err = mlxsw_env_temp_event_set(mlxsw_core, slot_index,
+ sensor_index, true);
if (err)
return err;
}
@@ -694,6 +875,7 @@ struct mlxsw_env_module_temp_warn_event {
static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
@@ -702,7 +884,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->max_module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
@@ -710,9 +892,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
sensor_warning =
mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- mutex_lock(&mlxsw_env->module_info_lock);
- is_overheat =
- mlxsw_env->module_info[i].is_overheat;
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ /* MTWE only supports main board. */
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i);
+ is_overheat = module_info->is_overheat;
if ((is_overheat && sensor_warning) ||
(!is_overheat && !sensor_warning)) {
@@ -720,21 +903,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
- mlxsw_env->module_info[i].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
* on.
*/
- mlxsw_env->module_info[i].is_overheat = true;
- mlxsw_env->module_info[i].module_overheat_counter++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = true;
+ module_info->module_overheat_counter++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
}
@@ -759,15 +942,12 @@ mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
+ MLXSW_CORE_EVENTL(mlxsw_env_mtwe_listener_func, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_temp_warn_listener,
mlxsw_env);
@@ -775,15 +955,13 @@ static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_temp_warn_listener, mlxsw_env);
}
struct mlxsw_env_module_plug_unplug_event {
struct mlxsw_env *mlxsw_env;
+ u8 slot_index;
u8 module;
struct work_struct work;
};
@@ -791,6 +969,7 @@ struct mlxsw_env_module_plug_unplug_event {
static void mlxsw_env_pmpe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
bool has_temp_sensor;
u16 sensor_index;
@@ -800,11 +979,16 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[event->module].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core,
+ event->slot_index,
+ event->module);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
- err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core,
+ event->slot_index,
+ event->module,
&has_temp_sensor);
/* Do not disable events on modules without sensors or faulty sensors
* because FW returns errors.
@@ -816,7 +1000,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
goto out;
sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+ mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index,
+ sensor_index, true);
out:
kfree(event);
@@ -826,12 +1011,14 @@ static void
mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
void *priv)
{
+ u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl);
struct mlxsw_env_module_plug_unplug_event *event;
enum mlxsw_reg_pmpe_module_status module_status;
u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
struct mlxsw_env *mlxsw_env = priv;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count ||
+ slot_index >= mlxsw_env->num_of_slots))
return;
module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
@@ -843,22 +1030,20 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
return;
event->mlxsw_env = mlxsw_env;
+ event->slot_index = slot_index;
event->module = module;
INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
mlxsw_core_schedule_work(&event->work);
}
static const struct mlxsw_listener mlxsw_env_module_plug_listener =
- MLXSW_EVENTL(mlxsw_env_pmpe_listener_func, PMPE, PMPE);
+ MLXSW_CORE_EVENTL(mlxsw_env_pmpe_listener_func, PMPE);
static int
mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
@@ -867,9 +1052,6 @@ mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
static void
mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
@@ -877,14 +1059,15 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
static int
mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err;
- for (i = 0; i < module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i);
mlxsw_reg_pmaos_e_set(pmaos_pl,
MLXSW_REG_PMAOS_E_GENERATE_EVENT);
mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
@@ -896,139 +1079,330 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
}
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter)
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
- mutex_lock(&mlxsw_env->module_info_lock);
- *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ *p_counter = module_info->module_overheat_counter;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_map);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped--;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_inc;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_inc;
/* Transition to high power mode following first port using the module
* being put administratively up.
*/
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
- NULL);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ false, NULL);
if (err)
goto out_unlock;
out_inc:
- mlxsw_env->module_info[module].num_ports_up++;
+ module_info->num_ports_up++;
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_module_port_up);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- mlxsw_env->module_info[module].num_ports_up--;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_up--;
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_unlock;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_unlock;
/* Transition to low power mode following last port using the module
* being put administratively down.
*/
- __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+ __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true,
+ NULL);
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
-int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env)
{
+ struct mlxsw_env_module_info *module_info;
+ int i, j;
+
+ for (i = 0; i < env->num_of_slots; i++) {
+ env->line_cards[i] = kzalloc(struct_size(env->line_cards[i],
+ module_info,
+ env->max_module_count),
+ GFP_KERNEL);
+ if (!env->line_cards[i])
+ goto kzalloc_err;
+
+ /* Firmware defaults to high power mode policy where modules
+ * are transitioned to high power mode following plug-in.
+ */
+ for (j = 0; j < env->max_module_count; j++) {
+ module_info = &env->line_cards[i]->module_info[j];
+ module_info->power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+ }
+ }
+
+ return 0;
+
+kzalloc_err:
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+ return -ENOMEM;
+}
+
+static void mlxsw_env_line_cards_free(struct mlxsw_env *env)
+{
+ int i = env->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+}
+
+static int
+mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+ int err;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core,
+ slot_index);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+}
+
+static int
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i;
+
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ struct mlxsw_env_module_info *module_info;
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index,
+ i);
+ module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_env *env,
+ u8 slot_index)
+{
+ int i;
+
+ for (i = 0; i < env->line_cards[slot_index]->module_count; i++) {
+ enum ethtool_module_power_mode_policy policy;
+ struct mlxsw_env_module_info *module_info;
+ struct netlink_ext_ack extack;
+ int err;
+
+ module_info = &env->line_cards[slot_index]->module_info[i];
+ policy = module_info->power_mode_policy;
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core,
+ slot_index, i,
+ policy, &extack);
+ if (err)
+ dev_err(env->bus_info->dev, "%s\n", extack._msg);
+ }
+}
+
+static void
+mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, slot_index);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &mlxsw_env->line_cards[slot_index]->module_count,
+ NULL);
+
+ err = mlxsw_env_module_event_enable(mlxsw_env, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n",
+ slot_index);
+ goto err_mlxsw_env_module_event_enable;
+ }
+ err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n",
+ slot_index);
+ goto err_type_set;
+ }
+
+ mlxsw_env->line_cards[slot_index]->active = true;
+ /* Apply power mode policy. */
+ mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env,
+ slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return;
+
+err_type_set:
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+err_mlxsw_env_module_event_enable:
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static void
+mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+ mlxsw_env->line_cards[slot_index]->active = false;
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+ mlxsw_env->line_cards[slot_index]->module_count = 0;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_active = mlxsw_env_got_active,
+ .got_inactive = mlxsw_env_got_inactive,
+};
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env)
+{
+ u8 module_count, num_of_slots, max_module_count;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
- u8 module_count;
- int i, err;
+ int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count,
+ &num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ max_module_count = num_of_slots ?
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
+ module_count;
- env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ env = kzalloc(struct_size(env, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!env)
return -ENOMEM;
- /* Firmware defaults to high power mode policy where modules are
- * transitioned to high power mode following plug-in.
- */
- for (i = 0; i < module_count; i++)
- env->module_info[i].power_mode_policy =
- ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
-
- mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
- env->module_count = module_count;
+ env->bus_info = bus_info;
+ env->num_of_slots = num_of_slots + 1;
+ env->max_module_count = max_module_count;
+ err = mlxsw_env_line_cards_alloc(env);
+ if (err)
+ goto err_mlxsw_env_line_cards_alloc;
+
+ mutex_init(&env->line_cards_lock);
*p_env = env;
+ err = mlxsw_linecards_event_ops_register(env->core,
+ &mlxsw_env_event_ops, env);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = mlxsw_env_temp_warn_event_register(mlxsw_core);
if (err)
goto err_temp_warn_event_register;
@@ -1037,34 +1411,54 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core,
- env->module_count);
+ /* Set 'module_count' only for main board. Actual count for line card
+ * is to be set after line card is activated.
+ */
+ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count;
+ /* Enable events only for main board. Line card events are to be
+ * configured only after line card is activated. Before that, access to
+ * modules on line cards is not allowed.
+ */
+ err = mlxsw_env_module_event_enable(env, 0);
if (err)
- goto err_oper_state_event_enable;
+ goto err_mlxsw_env_module_event_enable;
- err = mlxsw_env_module_temp_event_enable(mlxsw_core, env->module_count);
+ err = mlxsw_env_module_type_set(mlxsw_core, 0);
if (err)
- goto err_temp_event_enable;
+ goto err_type_set;
+
+ env->line_cards[0]->active = true;
return 0;
-err_temp_event_enable:
-err_oper_state_event_enable:
+err_type_set:
+ mlxsw_env_module_event_disable(env, 0);
+err_mlxsw_env_module_event_enable:
mlxsw_env_module_plug_event_unregister(env);
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+err_linecards_event_ops_register:
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+err_mlxsw_env_line_cards_alloc:
kfree(env);
return err;
}
void mlxsw_env_fini(struct mlxsw_env *env)
{
+ env->line_cards[0]->active = false;
+ mlxsw_env_module_event_disable(env, 0);
mlxsw_env_module_plug_event_unregister(env);
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index da121b1a84b4..a197e3ae069c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -9,48 +9,60 @@
struct ethtool_modinfo;
struct ethtool_eeprom;
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp);
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, int module, int off,
+ int *temp);
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo);
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data);
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module,
- u32 *flags);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
enum ethtool_module_power_mode_policy policy,
struct netlink_ext_ack *extack);
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter);
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+int mlxsw_env_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index d41afdfbd085..70735068cf29 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -19,6 +19,7 @@
#define MLXSW_HWMON_ATTR_PER_SENSOR 3
#define MLXSW_HWMON_ATTR_PER_MODULE 7
#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16
#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
@@ -27,7 +28,7 @@
struct mlxsw_hwmon_attr {
struct device_attribute dev_attr;
- struct mlxsw_hwmon *hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev;
unsigned int type_index;
char name[32];
};
@@ -40,9 +41,9 @@ static int mlxsw_hwmon_get_attr_index(int index, int count)
return index;
}
-struct mlxsw_hwmon {
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
+struct mlxsw_hwmon_dev {
+ char name[MLXSW_HWMON_DEV_NAME_LEN_MAX];
+ struct mlxsw_hwmon *hwmon;
struct device *hwmon_dev;
struct attribute_group group;
const struct attribute_group *groups[2];
@@ -51,22 +52,32 @@ struct mlxsw_hwmon {
unsigned int attrs_count;
u8 sensor_count;
u8 module_sensor_max;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_hwmon {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct mlxsw_hwmon_dev line_cards[];
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -80,16 +91,18 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -103,10 +116,11 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
int index;
int err;
@@ -117,9 +131,10 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
if (val != 1)
return -EINVAL;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -138,13 +153,14 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
- mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index);
+ mlxsw_reg_mfsm_pack(mfsm_pl, mlxsw_hwmon_attr->type_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
@@ -157,9 +173,10 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -169,7 +186,7 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
return err;
}
- mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault);
+ mlxsw_reg_fore_unpack(fore_pl, mlxsw_hwmon_attr->type_index, &fault);
return sprintf(buf, "%u\n", fault);
}
@@ -178,13 +195,14 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, 0);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n");
@@ -198,9 +216,10 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -211,7 +230,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
if (val > 255)
return -EINVAL;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, val);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n");
@@ -224,16 +243,18 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(dev, "Failed to query module temperature\n");
@@ -261,17 +282,18 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
@@ -303,15 +325,18 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_WARN,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -337,15 +362,18 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_ALARM,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -373,11 +401,11 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
return sprintf(buf, "front panel %03u\n",
- mlwsw_hwmon_attr->type_index);
+ mlxsw_hwmon_attr->type_index);
}
static ssize_t
@@ -385,11 +413,11 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int index = mlwsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_max + 1;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ int index = mlxsw_hwmon_attr->type_index -
+ mlxsw_hwmon_dev->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -458,14 +486,15 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
-static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
+static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev,
enum mlxsw_hwmon_attr_type attr_type,
- unsigned int type_index, unsigned int num) {
+ unsigned int type_index, unsigned int num)
+{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr;
unsigned int attr_index;
- attr_index = mlxsw_hwmon->attrs_count;
- mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index];
+ attr_index = mlxsw_hwmon_dev->attrs_count;
+ mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index];
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
@@ -565,16 +594,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
}
mlxsw_hwmon_attr->type_index = type_index;
- mlxsw_hwmon_attr->hwmon = mlxsw_hwmon;
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev;
mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name;
sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr);
- mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
- mlxsw_hwmon->attrs_count++;
+ mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
+ mlxsw_hwmon_dev->attrs_count++;
}
-static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
int i;
int err;
@@ -584,10 +614,12 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
return err;
}
- mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
- for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
+ mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) {
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl,
+ mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
mtmp_pl);
@@ -602,18 +634,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
i);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
}
return 0;
}
-static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
enum mlxsw_reg_mfcr_pwm_frequency freq;
unsigned int type_index;
@@ -631,10 +664,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
if (tacho_active & BIT(type_index)) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
type_index, num);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
type_index, num++);
}
@@ -642,57 +675,55 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
if (pwm_active & BIT(type_index))
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_PWM,
type_index, num++);
}
return 0;
}
-static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 module_sensor_max;
int i, err;
- if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
- return 0;
-
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &module_sensor_max);
+ &module_sensor_max, NULL);
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
- module_sensor_max;
- for (i = mlxsw_hwmon->sensor_count;
- i < mlxsw_hwmon->module_sensor_max; i++) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon_dev->sensor_count;
+ i < mlxsw_hwmon_dev->module_sensor_max; i++) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
i, i);
}
@@ -700,8 +731,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
-static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -709,22 +741,24 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 gbox_num;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL,
+ NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_max;
- max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
+ index = mlxsw_hwmon_dev->module_sensor_max;
+ max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_max +
+ sensor_index = index % mlxsw_hwmon_dev->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -732,15 +766,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
sensor_index);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
- index, index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
index, index);
index++;
@@ -749,51 +783,144 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
+static void
+mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+ struct device *dev;
+ int err;
+
+ dev = hwmon->bus_info->dev;
+ linecard = &hwmon->line_cards[slot_index];
+ if (linecard->active)
+ return;
+ /* For the main board, module sensor indexes start from 1, sensor index
+ * 0 is used for the ASIC. Use the same numbering for line cards.
+ */
+ linecard->sensor_count = 1;
+ linecard->slot_index = slot_index;
+ linecard->hwmon = hwmon;
+ err = mlxsw_hwmon_module_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_hwmon_gearbox_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->groups[0] = &linecard->group;
+ linecard->group.attrs = linecard->attrs;
+ sprintf(linecard->name, "%s#%02u", "linecard", slot_index);
+ linecard->hwmon_dev =
+ hwmon_device_register_with_groups(dev, linecard->name,
+ linecard, linecard->groups);
+ if (IS_ERR(linecard->hwmon_dev)) {
+ dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->active = true;
+}
+
+static void
+mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+
+ linecard = &hwmon->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ hwmon_device_unregister(linecard->hwmon_dev);
+ /* Reset attributes counter */
+ linecard->attrs_count = 0;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = {
+ .got_active = mlxsw_hwmon_got_active,
+ .got_inactive = mlxsw_hwmon_got_inactive,
+};
+
int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_hwmon **p_hwmon)
{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_hwmon *mlxsw_hwmon;
struct device *hwmon_dev;
+ u8 num_of_slots;
int err;
- mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards,
+ num_of_slots + 1), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
+
mlxsw_hwmon->core = mlxsw_core;
mlxsw_hwmon->bus_info = mlxsw_bus_info;
+ mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon;
+ mlxsw_hwmon->line_cards[0].slot_index = 0;
- err = mlxsw_hwmon_temp_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_init;
- err = mlxsw_hwmon_fans_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_fans_init;
- err = mlxsw_hwmon_module_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_module_init;
- err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_gearbox_init;
- mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
- mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
+ mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group;
+ mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs;
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw", mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ "mlxsw",
+ &mlxsw_hwmon->line_cards[0],
+ mlxsw_hwmon->line_cards[0].groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
}
- mlxsw_hwmon->hwmon_dev = hwmon_dev;
+ err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops,
+ mlxsw_hwmon);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev;
+ mlxsw_hwmon->line_cards[0].active = true;
*p_hwmon = mlxsw_hwmon;
return 0;
+err_linecards_event_ops_register:
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
err_hwmon_register:
err_temp_gearbox_init:
err_temp_module_init:
@@ -805,6 +932,9 @@ err_temp_init:
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
{
- hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ mlxsw_hwmon->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops, mlxsw_hwmon);
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
kfree(mlxsw_hwmon);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
new file mode 100644
index 000000000000..af37e650a8ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/idr.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include "core.h"
+
+#define MLXSW_LINECARD_DEV_ID_NAME "lc"
+
+struct mlxsw_linecard_dev {
+ struct mlxsw_linecard *linecard;
+};
+
+struct mlxsw_linecard_bdev {
+ struct auxiliary_device adev;
+ struct mlxsw_linecard *linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+};
+
+static DEFINE_IDA(mlxsw_linecard_bdev_ida);
+
+static int mlxsw_linecard_bdev_id_alloc(void)
+{
+ return ida_alloc(&mlxsw_linecard_bdev_ida, GFP_KERNEL);
+}
+
+static void mlxsw_linecard_bdev_id_free(int id)
+{
+ ida_free(&mlxsw_linecard_bdev_ida, id);
+}
+
+static void mlxsw_linecard_bdev_release(struct device *device)
+{
+ struct auxiliary_device *adev =
+ container_of(device, struct auxiliary_device, dev);
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+
+ mlxsw_linecard_bdev_id_free(adev->id);
+ kfree(linecard_bdev);
+}
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev;
+ int err;
+ int id;
+
+ id = mlxsw_linecard_bdev_id_alloc();
+ if (id < 0)
+ return id;
+
+ linecard_bdev = kzalloc(sizeof(*linecard_bdev), GFP_KERNEL);
+ if (!linecard_bdev) {
+ mlxsw_linecard_bdev_id_free(id);
+ return -ENOMEM;
+ }
+ linecard_bdev->adev.id = id;
+ linecard_bdev->adev.name = MLXSW_LINECARD_DEV_ID_NAME;
+ linecard_bdev->adev.dev.release = mlxsw_linecard_bdev_release;
+ linecard_bdev->adev.dev.parent = linecard->linecards->bus_info->dev;
+ linecard_bdev->linecard = linecard;
+
+ err = auxiliary_device_init(&linecard_bdev->adev);
+ if (err) {
+ mlxsw_linecard_bdev_id_free(id);
+ kfree(linecard_bdev);
+ return err;
+ }
+
+ err = auxiliary_device_add(&linecard_bdev->adev);
+ if (err) {
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ return err;
+ }
+
+ linecard->bdev = linecard_bdev;
+ return 0;
+}
+
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev = linecard->bdev;
+
+ if (!linecard_bdev)
+ /* Unprovisioned line cards do not have an auxiliary device. */
+ return;
+ auxiliary_device_delete(&linecard_bdev->adev);
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ linecard->bdev = NULL;
+}
+
+static int mlxsw_linecard_dev_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_devlink_info_get(linecard, req, extack);
+}
+
+static int
+mlxsw_linecard_dev_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_flash_update(devlink, linecard,
+ params->fw, extack);
+}
+
+static const struct devlink_ops mlxsw_linecard_dev_devlink_ops = {
+ .info_get = mlxsw_linecard_dev_devlink_info_get,
+ .flash_update = mlxsw_linecard_dev_devlink_flash_update,
+};
+
+static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
+ sizeof(*linecard_dev), &adev->dev);
+ if (!devlink)
+ return -ENOMEM;
+ linecard_dev = devlink_priv(devlink);
+ linecard_dev->linecard = linecard_bdev->linecard;
+ linecard_bdev->linecard_dev = linecard_dev;
+
+ devlink_register(devlink);
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ return 0;
+}
+
+static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
+static const struct auxiliary_device_id mlxsw_linecard_bdev_id_table[] = {
+ { .name = KBUILD_MODNAME "." MLXSW_LINECARD_DEV_ID_NAME },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlxsw_linecard_bdev_id_table);
+
+static struct auxiliary_driver mlxsw_linecard_driver = {
+ .name = MLXSW_LINECARD_DEV_ID_NAME,
+ .probe = mlxsw_linecard_bdev_probe,
+ .remove = mlxsw_linecard_bdev_remove,
+ .id_table = mlxsw_linecard_bdev_id_table,
+};
+
+int mlxsw_linecard_driver_register(void)
+{
+ return auxiliary_driver_register(&mlxsw_linecard_driver);
+}
+
+void mlxsw_linecard_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlxsw_linecard_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
new file mode 100644
index 000000000000..83d2dc91ba2c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "../mlxfw/mlxfw.h"
+
+struct mlxsw_linecard_ini_file {
+ __le16 size;
+ union {
+ u8 data[0];
+ struct {
+ __be16 hw_revision;
+ __be16 ini_version;
+ u8 __dontcare[3];
+ u8 type;
+ u8 name[20];
+ } format;
+ };
+};
+
+struct mlxsw_linecard_types_info {
+ struct mlxsw_linecard_ini_file **ini_files;
+ unsigned int count;
+ size_t data_size;
+ char *data;
+};
+
+#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC)
+
+static void
+mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ linecard->status_event_type_to = status_event_type;
+ mlxsw_core_schedule_dw(&linecard->status_event_to_dw,
+ msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO));
+}
+
+static void
+mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ if (linecard->status_event_type_to == status_event_type)
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+}
+
+static const char *
+mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ int i;
+
+ types_info = linecards->types_info;
+ if (!types_info)
+ return NULL;
+ for (i = 0; i < types_info->count; i++) {
+ ini_file = linecards->types_info->ini_files[i];
+ if (ini_file->format.type == card_type)
+ return ini_file->format.name;
+ }
+ return NULL;
+}
+
+static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return ERR_PTR(err);
+ mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name);
+ return linecard->name;
+}
+
+struct mlxsw_linecard_device_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_linecard *linecard;
+};
+
+static int mlxsw_linecard_device_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index,
+ u32 *p_max_size,
+ u8 *p_align_bits,
+ u16 *p_max_write_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcqi_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcqi), &mcqi_pl);
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_linecard_device_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev,
+ u32 *fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data,
+ u16 size, u32 offset)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcda_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcda), &mcda_pl);
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int mlxsw_linecard_device_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE,
+ 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ u8 error_code;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_linecard_device_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static void mlxsw_linecard_device_fw_fsm_release(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_linecard_device_dev_ops = {
+ .component_query = mlxsw_linecard_device_fw_component_query,
+ .fsm_lock = mlxsw_linecard_device_fw_fsm_lock,
+ .fsm_component_update = mlxsw_linecard_device_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_linecard_device_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_linecard_device_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_linecard_device_fw_fsm_activate,
+ .fsm_query_state = mlxsw_linecard_device_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_linecard_device_fw_fsm_cancel,
+ .fsm_release = mlxsw_linecard_device_fw_fsm_release,
+};
+
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device_fw_info info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_linecard_device_dev_ops,
+ .psid = linecard->device.info.psid,
+ .psid_size = strlen(linecard->device.info.psid),
+ .devlink = linecard_devlink,
+ },
+ .mlxsw_core = mlxsw_core,
+ .linecard = linecard,
+ };
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ NL_SET_ERR_MSG_MOD(extack, "Only active line cards can be flashed");
+ err = -EINVAL;
+ goto unlock;
+ }
+ err = mlxsw_core_fw_flash(mlxsw_core, &info.mlxfw_dev,
+ firmware, extack);
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_device_psid_get(struct mlxsw_linecard *linecard,
+ u8 device_index, char *psid)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mgir_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index, device_index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mgir), &mgir_pl);
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(mgir_pl, psid);
+ return 0;
+}
+
+static int mlxsw_linecard_device_info_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ bool flashable_found = false;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ if (!flash_owner) /* We care only about flashable ones. */
+ continue;
+ if (flashable_found) {
+ dev_warn_once(linecard->linecards->bus_info->dev, "linecard %u: More flashable devices present, exposing only the first one\n",
+ linecard->slot_index);
+ return 0;
+ }
+
+ err = mlxsw_linecard_device_psid_get(linecard, device_index,
+ info.psid);
+ if (err)
+ return err;
+
+ linecard->device.info = info;
+ linecard->device.index = device_index;
+ flashable_found = true;
+ } while (msg_seq);
+
+ return 0;
+}
+
+static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
+{
+ linecard->provisioned = false;
+ linecard->ready = false;
+ linecard->active = false;
+ devlink_linecard_provision_fail(linecard->devlink_linecard);
+}
+
+struct mlxsw_linecards_event_ops_item {
+ struct list_head list;
+ const struct mlxsw_linecards_event_ops *event_ops;
+ void *priv;
+};
+
+static void
+mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard,
+ mlxsw_linecards_event_op_t *op, void *priv)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+
+ if (!op)
+ return;
+ op(mlxsw_core, linecard->slot_index, priv);
+}
+
+static void
+mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+static void
+mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item;
+
+ if (!linecards)
+ return 0;
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->event_ops = ops;
+ item->priv = priv;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_add_tail(&item->list, &linecards->event_ops_list);
+ mutex_unlock(&linecards->event_ops_list_lock);
+ mlxsw_linecards_event_ops_register_call(linecards, item);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_register);
+
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item, *tmp;
+ bool found = false;
+
+ if (!linecards)
+ return;
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) {
+ if (item->event_ops == ops && item->priv == priv) {
+ list_del(&item->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&linecards->event_ops_list_lock);
+
+ if (!found)
+ return;
+ mlxsw_linecards_event_ops_unregister_call(linecards, item);
+ kfree(item);
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (WARN_ON(!linecard->provisioned)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+ if (linecard->active) {
+ struct mlxsw_linecard_device_info *info = &linecard->device.info;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ info->psid);
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int
+mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
+ u16 hw_revision, u16 ini_version)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ const char *type;
+ int err;
+
+ type = mlxsw_linecard_types_lookup(linecards, card_type);
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ if (!type) {
+ /* It is possible for a line card to be provisioned before
+ * driver initialization. Due to a missing INI bundle file
+ * or an outdated one, the queried card's type might not
+ * be recognized by the driver. In this case, try to query
+ * the card's name from the device.
+ */
+ type = mlxsw_linecard_type_name(linecard);
+ if (IS_ERR(type)) {
+ mlxsw_linecard_provision_fail(linecard);
+ return PTR_ERR(type);
+ }
+ }
+ linecard->provisioned = true;
+ linecard->hw_revision = hw_revision;
+ linecard->ini_version = ini_version;
+
+ err = mlxsw_linecard_bdev_add(linecard);
+ if (err) {
+ linecard->provisioned = false;
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+
+ devlink_linecard_provision_set(linecard->devlink_linecard, type);
+ return 0;
+}
+
+static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ mlxsw_linecard_bdev_del(linecard);
+ linecard->provisioned = false;
+ devlink_linecard_provision_clear(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ err = mlxsw_linecard_device_info_update(linecard);
+ if (err)
+ return err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = true;
+ return 0;
+}
+
+static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = false;
+ return 0;
+}
+
+static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_active_ops_call(linecard);
+ linecard->active = true;
+ devlink_linecard_activate(linecard->devlink_linecard);
+}
+
+static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_inactive_ops_call(linecard);
+ linecard->active = false;
+ devlink_linecard_deactivate(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard,
+ const char *mddq_pl)
+{
+ enum mlxsw_reg_mddq_slot_info_ready ready;
+ bool provisioned, sr_valid, active;
+ u16 ini_version, hw_revision;
+ u8 slot_index, card_type;
+ int err = 0;
+
+ mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned,
+ &sr_valid, &ready, &active,
+ &hw_revision, &ini_version,
+ &card_type);
+
+ if (linecard) {
+ if (WARN_ON(slot_index != linecard->slot_index))
+ return -EINVAL;
+ } else {
+ if (WARN_ON(slot_index > linecards->count))
+ return -EINVAL;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ }
+
+ mutex_lock(&linecard->lock);
+
+ if (provisioned && linecard->provisioned != provisioned) {
+ err = mlxsw_linecard_provision_set(linecard, card_type,
+ hw_revision, ini_version);
+ if (err)
+ goto out;
+ }
+
+ if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) {
+ err = mlxsw_linecard_ready_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (active && linecard->active != active)
+ mlxsw_linecard_active_set(linecard);
+
+ if (!active && linecard->active != active)
+ mlxsw_linecard_active_clear(linecard);
+
+ if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY &&
+ linecard->ready) {
+ err = mlxsw_linecard_ready_clear(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!provisioned && linecard->provisioned != provisioned)
+ mlxsw_linecard_provision_clear(linecard);
+
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+
+ return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
+}
+
+static void mlxsw_linecards_irq_event_handler(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ /* Handle change of line card active state. */
+ for (i = 0; i < linecards->count; i++) {
+ struct mlxsw_linecard *linecard = mlxsw_linecard_get(linecards,
+ i + 1);
+
+ mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ }
+}
+
+static const char * const mlxsw_linecard_status_event_type_name[] = {
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
+};
+
+static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
+{
+ struct mlxsw_linecard *linecard =
+ container_of(work, struct mlxsw_linecard,
+ status_event_to_dw.work);
+
+ mutex_lock(&linecard->lock);
+ dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event",
+ linecard->slot_index,
+ mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard)
+{
+ dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error",
+ linecard->slot_index);
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false);
+ return mlxsw_reg_write(linecard->linecards->mlxsw_core,
+ MLXSW_REG(mbct), linecard->mbct_pl);
+}
+
+static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_fsm_state fsm_state)
+{
+ if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR)
+ return 0;
+ return __mlxsw_linecard_fix_fsm_state(linecard);
+}
+
+static int
+mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_status *status,
+ enum mlxsw_reg_mbct_fsm_state *fsm_state,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS, false);
+ err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state);
+ return err;
+}
+
+static int
+mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ const struct mlxsw_linecard_ini_file *ini_file,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ size_t size_left;
+ const u8 *data;
+ int err;
+
+ size_left = le16_to_cpu(ini_file->size);
+ data = ini_file->data;
+ while (size_left) {
+ size_t data_size = MLXSW_REG_MBCT_DATA_LEN;
+ bool is_last = false;
+
+ if (size_left <= MLXSW_REG_MBCT_DATA_LEN) {
+ data_size = size_left;
+ is_last = true;
+ }
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, false);
+ mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size,
+ is_last, data);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL,
+ &status, &fsm_state);
+ if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) ||
+ (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data");
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+ }
+ size_left -= data_size;
+ data += data_size;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ switch (status) {
+ case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE:
+ break;
+ default:
+ /* Should not happen */
+ fallthrough;
+ case MLXSW_REG_MBCT_STATUS_ERASE_FAILED:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI");
+ goto fix_fsm_err_out;
+ case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used");
+ goto fix_fsm_err_out;
+ }
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core,
+ const char *mbct_pl)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ struct mlxsw_linecard *linecard;
+ u8 slot_index;
+
+ mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state);
+ if (WARN_ON(slot_index > linecards->count))
+ return;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mutex_lock(&linecard->lock);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI",
+ linecard->slot_index);
+ goto fix_fsm_out;
+ }
+ mutex_unlock(&linecard->lock);
+ return;
+
+fix_fsm_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ACTIVATE, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI");
+ goto fix_fsm_err_out;
+ }
+
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+#define MLXSW_LINECARD_INI_WAIT_RETRIES 10
+#define MLXSW_LINECARD_INI_WAIT_MS 500
+
+static int
+mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ unsigned int ini_wait_retries = 0;
+ int err;
+
+query_ini_status:
+ err = mlxsw_linecard_query_ini_status(linecard, &status,
+ &fsm_state, extack);
+ if (err)
+ return err;
+
+ switch (fsm_state) {
+ case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE:
+ if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused");
+ return -EINVAL;
+ }
+ mdelay(MLXSW_LINECARD_INI_WAIT_MS);
+ goto query_ini_status;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool mlxsw_linecard_port_selector(void *priv, u16 local_port)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+ return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port);
+}
+
+static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard,
+ ini_file, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard,
+ void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ mlxsw_core_ports_remove_selected(mlxsw_core,
+ mlxsw_linecard_port_selector,
+ linecard);
+
+ err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ bool ret;
+
+ mutex_lock(&linecard->lock);
+ ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) &&
+ linecard->ini_version == be16_to_cpu(ini_file->format.ini_version);
+ mutex_unlock(&linecard->lock);
+ return ret;
+}
+
+static unsigned int
+mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard,
+ void *priv)
+{
+ struct mlxsw_linecard *linecard = priv;
+
+ return linecard->linecards->types_info ?
+ linecard->linecards->types_info->count : 0;
+}
+
+static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard,
+ void *priv, unsigned int index,
+ const char **type, const void **type_priv)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ struct mlxsw_linecard *linecard = priv;
+
+ types_info = linecard->linecards->types_info;
+ if (WARN_ON_ONCE(!types_info))
+ return;
+ ini_file = types_info->ini_files[index];
+ *type = ini_file->format.name;
+ *type_priv = ini_file;
+}
+
+static const struct devlink_linecard_ops mlxsw_linecard_ops = {
+ .provision = mlxsw_linecard_provision,
+ .unprovision = mlxsw_linecard_unprovision,
+ .same_provision = mlxsw_linecard_same_provision,
+ .types_count = mlxsw_linecard_types_count,
+ .types_get = mlxsw_linecard_types_get,
+};
+
+struct mlxsw_linecard_status_event {
+ struct mlxsw_core *mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_status_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_status_event, work);
+ mlxsw_core = event->mlxsw_core;
+ linecards = mlxsw_core_linecards(mlxsw_core);
+ mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg,
+ char *mddq_pl, void *priv)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+struct mlxsw_linecard_bct_event {
+ struct mlxsw_core *mlxsw_core;
+ char mbct_pl[MLXSW_REG_MBCT_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_bct_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_bct_event, work);
+ mlxsw_core = event->mlxsw_core;
+ mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg,
+ char *mbct_pl, void *priv)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_linecard_listener[] = {
+ MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC),
+ MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE),
+};
+
+static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ bool enable)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+}
+
+static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct devlink_linecard *devlink_linecard;
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ linecard->slot_index = slot_index;
+ linecard->linecards = linecards;
+ mutex_init(&linecard->lock);
+
+ devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
+ if (IS_ERR(devlink_linecard))
+ return PTR_ERR(devlink_linecard);
+
+ linecard->devlink_linecard = devlink_linecard;
+ INIT_DELAYED_WORK(&linecard->status_event_to_dw,
+ &mlxsw_linecard_status_event_to_work);
+
+ return 0;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_event_delivery_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
+ if (err)
+ return err;
+
+ err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ if (err)
+ goto err_status_get_and_process;
+
+ return 0;
+
+err_status_get_and_process:
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+ return err;
+}
+
+static void
+mlxsw_linecard_event_delivery_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+}
+
+/* LINECARDS INI BUNDLE FILE
+ * +----------------------------------+
+ * | MAGIC ("NVLCINI+") |
+ * +----------------------------------+ +--------------------+
+ * | INI 0 +---> | __le16 size |
+ * +----------------------------------+ | __be16 hw_revision |
+ * | INI 1 | | __be16 ini_version |
+ * +----------------------------------+ | u8 __dontcare[3] |
+ * | ... | | u8 type |
+ * +----------------------------------+ | u8 name[20] |
+ * | INI N | | ... |
+ * +----------------------------------+ +--------------------+
+ */
+
+#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+"
+
+static int
+mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ struct mlxsw_linecard_ini_file *ini_file;
+ size_t size = types_info->data_size;
+ const u8 *data = types_info->data;
+ unsigned int count = 0;
+ u16 ini_file_size;
+
+ if (size < magic_size) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n");
+ return -EINVAL;
+ }
+ if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n");
+ return -EINVAL;
+ }
+
+ data += magic_size;
+ size -= magic_size;
+
+ while (size > 0) {
+ if (size < sizeof(*ini_file)) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n");
+ return -EINVAL;
+ }
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ if (ini_file_size + sizeof(__le16) > size) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n");
+ return -EINVAL;
+ }
+ if (ini_file_size % 4) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n");
+ return -EINVAL;
+ }
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+ if (!count) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n");
+ return -EINVAL;
+ }
+ types_info->count = count;
+ return 0;
+}
+
+static void
+mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ size_t size = types_info->data_size - magic_size;
+ const u8 *data = types_info->data + magic_size;
+ struct mlxsw_linecard_ini_file *ini_file;
+ unsigned int count = 0;
+ u16 ini_file_size;
+ int i;
+
+ while (size) {
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ for (i = 0; i < ini_file_size / 4; i++) {
+ u32 *val = &((u32 *) ini_file->data)[i];
+
+ *val = swab32(*val);
+ }
+ types_info->ini_files[count] = ini_file;
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+}
+
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \
+ "mellanox/lc_ini_bundle_%u_%u.bin"
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \
+ (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4)
+
+static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev;
+ char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN];
+ struct mlxsw_linecard_types_info *types_info;
+ const struct firmware *firmware;
+ int err;
+
+ err = snprintf(filename, sizeof(filename),
+ MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT,
+ rev->minor, rev->subminor);
+ WARN_ON(err >= sizeof(filename));
+
+ err = request_firmware_direct(&firmware, filename,
+ linecards->bus_info->dev);
+ if (err) {
+ dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n",
+ filename);
+ return 0;
+ }
+
+ types_info = kzalloc(sizeof(*types_info), GFP_KERNEL);
+ if (!types_info) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+ linecards->types_info = types_info;
+
+ types_info->data_size = firmware->size;
+ types_info->data = vmalloc(types_info->data_size);
+ if (!types_info->data) {
+ err = -ENOMEM;
+ release_firmware(firmware);
+ goto err_data_alloc;
+ }
+ memcpy(types_info->data, firmware->data, types_info->data_size);
+ release_firmware(firmware);
+
+ err = mlxsw_linecard_types_file_validate(linecards, types_info);
+ if (err) {
+ err = 0;
+ goto err_type_file_file_validate;
+ }
+
+ types_info->ini_files = kmalloc_array(types_info->count,
+ sizeof(struct mlxsw_linecard_ini_file *),
+ GFP_KERNEL);
+ if (!types_info->ini_files) {
+ err = -ENOMEM;
+ goto err_ini_files_alloc;
+ }
+
+ mlxsw_linecard_types_file_parse(types_info);
+
+ return 0;
+
+err_ini_files_alloc:
+err_type_file_file_validate:
+ vfree(types_info->data);
+err_data_alloc:
+ kfree(types_info);
+ return err;
+}
+
+static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards)
+{
+ struct mlxsw_linecard_types_info *types_info = linecards->types_info;
+
+ if (!types_info)
+ return;
+ kfree(types_info->ini_files);
+ vfree(types_info->data);
+ kfree(types_info);
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_linecards *linecards;
+ u8 slot_count;
+ int err;
+ int i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ NULL, &slot_count);
+ if (!slot_count)
+ return 0;
+
+ linecards = vzalloc(struct_size(linecards, linecards, slot_count));
+ if (!linecards)
+ return -ENOMEM;
+ linecards->count = slot_count;
+ linecards->mlxsw_core = mlxsw_core;
+ linecards->bus_info = bus_info;
+ INIT_LIST_HEAD(&linecards->event_ops_list);
+ mutex_init(&linecards->event_ops_list_lock);
+
+ err = mlxsw_linecard_types_init(mlxsw_core, linecards);
+ if (err)
+ goto err_types_init;
+
+ err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ if (err)
+ goto err_traps_register;
+
+ err = mlxsw_core_irq_event_handler_register(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ if (err)
+ goto err_irq_event_handler_register;
+
+ mlxsw_core_linecards_set(mlxsw_core, linecards);
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1);
+ if (err)
+ goto err_linecard_init;
+ }
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_event_delivery_init(mlxsw_core, linecards,
+ i + 1);
+ if (err)
+ goto err_linecard_event_delivery_init;
+ }
+
+ return 0;
+
+err_linecard_event_delivery_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ i = linecards->count;
+err_linecard_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+err_irq_event_handler_register:
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+err_traps_register:
+ mlxsw_linecard_types_fini(linecards);
+err_types_init:
+ vfree(linecards);
+ return err;
+}
+
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ if (!linecards)
+ return;
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ mlxsw_linecard_types_fini(linecards);
+ mutex_destroy(&linecards->event_ops_list_lock);
+ WARN_ON(!list_empty(&linecards->event_ops_list));
+ vfree(linecards);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index b29824448aa8..987fe5c9d5a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,8 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_ZONE_MAX_NAME 16
-#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
@@ -82,6 +80,16 @@ struct mlxsw_thermal_module {
struct thermal_zone_device *tzdev;
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
int module; /* Module or gearbox number */
+ u8 slot_index;
+};
+
+struct mlxsw_thermal_area {
+ struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ u8 slot_index;
+ bool active;
};
struct mlxsw_thermal {
@@ -92,12 +100,7 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- struct mlxsw_thermal_module *tz_module_arr;
- u8 tz_module_num;
- struct mlxsw_thermal_module *tz_gearbox_arr;
- u8 tz_gearbox_num;
- unsigned int tz_highest_score;
- struct thermal_zone_device *tz_highest_dev;
+ struct mlxsw_thermal_area line_cards[];
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -123,8 +126,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
- if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- strlen(cdev->type)))
+ if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i]))
return 0;
}
@@ -150,13 +152,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
* EEPROM if we got valid thresholds from MTMP.
*/
if (!emerg_temp || !crit_temp) {
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_WARN,
&crit_temp);
if (err)
return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_ALARM,
&emerg_temp);
if (err)
@@ -186,34 +190,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
return 0;
}
-static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
- struct thermal_zone_device *tzdev,
- struct mlxsw_thermal_trip *trips,
- int temp)
-{
- struct mlxsw_thermal_trip *trip = trips;
- unsigned int score, delta, i, shift = 1;
-
- /* Calculate thermal zone score, if temperature is above the hot
- * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
- */
- score = MLXSW_THERMAL_TEMP_SCORE_MAX;
- for (i = MLXSW_THERMAL_TEMP_TRIP_NORM; i < MLXSW_THERMAL_NUM_TRIPS;
- i++, trip++) {
- if (temp < trip->temp) {
- delta = DIV_ROUND_CLOSEST(temp, trip->temp - temp);
- score = delta * shift;
- break;
- }
- shift *= 256;
- }
-
- if (score > thermal->tz_highest_score) {
- thermal->tz_highest_score = score;
- thermal->tz_highest_dev = tzdev;
- }
-}
-
static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
struct thermal_cooling_device *cdev)
{
@@ -271,7 +247,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
int temp;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -279,9 +255,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
- temp);
*p_temp = temp;
return 0;
@@ -342,20 +315,9 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
return 0;
}
-static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
+static struct thermal_zone_params mlxsw_thermal_params = {
+ .no_hwmon = true,
+};
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
@@ -366,7 +328,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.set_trip_temp = mlxsw_thermal_set_trip_temp,
.get_trip_hyst = mlxsw_thermal_get_trip_hyst,
.set_trip_hyst = mlxsw_thermal_set_trip_hyst,
- .get_trend = mlxsw_thermal_trend_get,
};
static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
@@ -388,11 +349,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
trip->min_state,
THERMAL_WEIGHT_DEFAULT);
if (err < 0)
- goto err_bind_cooling_device;
+ goto err_thermal_zone_bind_cooling_device;
}
return 0;
-err_bind_cooling_device:
+err_thermal_zone_bind_cooling_device:
for (j = i - 1; j >= 0; j--)
thermal_zone_unbind_cooling_device(tzdev, j, cdev);
return err;
@@ -419,15 +380,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
static void
mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
- u16 sensor_index, int *p_temp,
- int *p_crit_temp,
+ u8 slot_index, u16 sensor_index,
+ int *p_temp, int *p_crit_temp,
int *p_emerg_temp)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int err;
/* Read module temperature and thresholds. */
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index,
+ false, false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
/* Set temperature and thresholds to zero to avoid passing
@@ -451,13 +413,13 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
int temp, crit_temp, emerg_temp;
struct device *dev;
u16 sensor_index;
- int err;
dev = thermal->bus_info->dev;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
/* Read module temperature and thresholds. */
mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ tz->slot_index,
sensor_index, &temp,
&crit_temp, &emerg_temp);
*p_temp = temp;
@@ -466,10 +428,8 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
/* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
- crit_temp, emerg_temp);
- if (!err && temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
+ mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
return 0;
}
@@ -533,22 +493,6 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
-static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
-
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@@ -558,7 +502,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@@ -572,15 +515,13 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
int err;
index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
*p_temp = temp;
return 0;
@@ -595,7 +536,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
@@ -668,17 +608,22 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int err;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
- module_tz->module + 1);
+ if (module_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d",
+ module_tz->slot_index, module_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
module_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0,
+ &mlxsw_thermal_params,
+ 0,
module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
@@ -699,25 +644,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 module)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
int dummy_temp, crit_temp, emerg_temp;
u16 sensor_index;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
- module_tz = &thermal->tz_module_arr[module];
+ module_tz = &area->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
return 0;
module_tz->module = module;
+ module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Read module temperature and thresholds. */
- mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index,
+ sensor_index, &dummy_temp,
&crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
return mlxsw_thermal_module_trips_update(dev, core, module_tz,
@@ -735,80 +683,85 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &thermal->tz_module_num);
+ &area->tz_module_num, NULL);
+
+ /* For modular system module counter could be zero. */
+ if (!area->tz_module_num)
+ return 0;
- thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
- sizeof(*thermal->tz_module_arr),
- GFP_KERNEL);
- if (!thermal->tz_module_arr)
+ area->tz_module_arr = kcalloc(area->tz_module_num,
+ sizeof(*area->tz_module_arr),
+ GFP_KERNEL);
+ if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_init(dev, core, thermal, i);
+ for (i = 0; i < area->tz_module_num; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, area, i);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_init;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- module_tz = &thermal->tz_module_arr[i];
+ for (i = 0; i < area->tz_module_num; i++) {
+ module_tz = &area->tz_module_arr[i];
if (!module_tz->parent)
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_tz_init;
}
return 0;
-err_unreg_tz_module_arr:
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+err_thermal_module_tz_init:
+err_thermal_module_init:
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
return err;
}
static void
-mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
- return;
-
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
}
static int
mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int ret;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
- gearbox_tz->module + 1);
+ if (gearbox_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d",
+ gearbox_tz->slot_index, gearbox_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
@@ -828,7 +781,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
@@ -837,60 +791,114 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
- NULL);
+ NULL, NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- thermal->tz_gearbox_num = gbox_num;
- thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
- sizeof(*thermal->tz_gearbox_arr),
- GFP_KERNEL);
- if (!thermal->tz_gearbox_arr)
+ area->tz_gearbox_num = gbox_num;
+ area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num,
+ sizeof(*area->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!area->tz_gearbox_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_gearbox_num; i++) {
- gearbox_tz = &thermal->tz_gearbox_arr[i];
+ for (i = 0; i < area->tz_gearbox_num; i++) {
+ gearbox_tz = &area->tz_gearbox_arr[i];
memcpy(gearbox_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
+ gearbox_tz->slot_index = area->slot_index;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
- goto err_unreg_tz_gearbox;
+ goto err_thermal_gearbox_tz_init;
}
return 0;
-err_unreg_tz_gearbox:
+err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
return err;
}
static void
-mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
+ for (i = area->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+}
+
+static void
+mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+ int err;
+
+ linecard = &thermal->line_cards[slot_index];
+
+ if (linecard->active)
return;
- for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ linecard->slot_index = slot_index;
+ err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core,
+ thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev,
+ thermal->core, thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n",
+ slot_index);
+ goto err_thermal_linecard_gearboxes_init;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_thermal_linecard_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static void
+mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+
+ linecard = &thermal->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ mlxsw_thermal_gearboxes_fini(thermal, linecard);
+ mlxsw_thermal_modules_fini(thermal, linecard);
}
+static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = {
+ .got_active = mlxsw_thermal_got_active,
+ .got_inactive = mlxsw_thermal_got_inactive,
+};
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -898,24 +906,34 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
enum mlxsw_reg_mfcr_pwm_frequency freq;
struct device *dev = bus_info->dev;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_thermal *thermal;
+ u8 pwm_active, num_of_slots;
u16 tacho_active;
- u8 pwm_active;
int err, i;
- thermal = devm_kzalloc(dev, sizeof(*thermal),
- GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!thermal)
return -ENOMEM;
thermal->core = core;
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
dev_err(dev, "Failed to probe PWMs\n");
- goto err_free_thermal;
+ goto err_reg_query;
}
mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
@@ -929,14 +947,14 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_query;
/* set the minimal RPMs to 0 */
mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0);
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_write;
}
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
@@ -949,7 +967,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
- goto err_unreg_cdevs;
+ goto err_thermal_cooling_device_register;
}
thermal->cdevs[i] = cdev;
}
@@ -968,44 +986,59 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
MLXSW_THERMAL_TRIP_MASK,
thermal,
&mlxsw_thermal_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
thermal->polling_delay);
if (IS_ERR(thermal->tzdev)) {
err = PTR_ERR(thermal->tzdev);
dev_err(dev, "Failed to register thermal zone\n");
- goto err_unreg_cdevs;
+ goto err_thermal_zone_device_register;
}
- err = mlxsw_thermal_modules_init(dev, core, thermal);
+ err = mlxsw_thermal_modules_init(dev, core, thermal,
+ &thermal->line_cards[0]);
+ if (err)
+ goto err_thermal_modules_init;
+
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
- goto err_unreg_tzdev;
+ goto err_thermal_gearboxes_init;
- err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
+ err = mlxsw_linecards_event_ops_register(core,
+ &mlxsw_thermal_event_ops,
+ thermal);
if (err)
- goto err_unreg_modules_tzdev;
+ goto err_linecards_event_ops_register;
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
- goto err_unreg_gearboxes;
+ goto err_thermal_zone_device_enable;
+ thermal->line_cards[0].active = true;
*p_thermal = thermal;
return 0;
-err_unreg_gearboxes:
- mlxsw_thermal_gearboxes_fini(thermal);
-err_unreg_modules_tzdev:
- mlxsw_thermal_modules_fini(thermal);
-err_unreg_tzdev:
+err_thermal_zone_device_enable:
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+err_linecards_event_ops_register:
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+err_thermal_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
+err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
}
-err_unreg_cdevs:
+err_thermal_zone_device_register:
+err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
if (thermal->cdevs[i])
thermal_cooling_device_unregister(thermal->cdevs[i]);
-err_free_thermal:
- devm_kfree(dev, thermal);
+err_reg_write:
+err_reg_query:
+ kfree(thermal);
return err;
}
@@ -1013,8 +1046,12 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
- mlxsw_thermal_gearboxes_fini(thermal);
- mlxsw_thermal_modules_fini(thermal);
+ thermal->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
@@ -1027,5 +1064,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
}
}
- devm_kfree(thermal->bus_info->dev, thermal);
+ kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 939b692ffc33..f5f5f8dc3d19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/mlxreg.h>
#include <linux/slab.h>
#include "cmd.h"
@@ -51,6 +52,15 @@
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
+/* Driver can be initialized by kernel platform driver or from the user
+ * space. In the first case IRQ line number is passed through the platform
+ * data, otherwise default IRQ line is to be used. Default IRQ is relevant
+ * only for specific I2C slave address, allowing 3.4 MHz I2C path to the chip
+ * (special hardware feature for I2C acceleration).
+ */
+#define MLXSW_I2C_DEFAULT_IRQ 17
+#define MLXSW_FAST_I2C_SLAVE 0x37
+
/**
* struct mlxsw_i2c - device private data:
* @cmd: command attributes;
@@ -63,6 +73,9 @@
* @core: switch core pointer;
* @bus_info: bus info block;
* @block_size: maximum block size allowed to pass to under layer;
+ * @pdata: device platform data;
+ * @irq_work: interrupts work item;
+ * @irq: IRQ line number;
*/
struct mlxsw_i2c {
struct {
@@ -76,6 +89,9 @@ struct mlxsw_i2c {
struct mlxsw_core *core;
struct mlxsw_bus_info bus_info;
u16 block_size;
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct work_struct irq_work;
+ int irq;
};
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
@@ -546,6 +562,67 @@ static void mlxsw_i2c_fini(void *bus_priv)
mlxsw_i2c->core = NULL;
}
+static void mlxsw_i2c_work_handler(struct work_struct *work)
+{
+ struct mlxsw_i2c *mlxsw_i2c;
+
+ mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work);
+ mlxsw_core_irq_event_handlers_call(mlxsw_i2c->core);
+}
+
+static irqreturn_t mlxsw_i2c_irq_handler(int irq, void *dev)
+{
+ struct mlxsw_i2c *mlxsw_i2c = dev;
+
+ mlxsw_core_schedule_work(&mlxsw_i2c->irq_work);
+
+ /* Interrupt handler shares IRQ line with 'main' interrupt handler.
+ * Return here IRQ_NONE, while main handler will return IRQ_HANDLED.
+ */
+ return IRQ_NONE;
+}
+
+static int mlxsw_i2c_irq_init(struct mlxsw_i2c *mlxsw_i2c, u8 addr)
+{
+ int err;
+
+ /* Initialize interrupt handler if system hotplug driver is reachable,
+ * otherwise interrupt line is not enabled and interrupts will not be
+ * raised to CPU. Also request_irq() call will be not valid.
+ */
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG))
+ return 0;
+
+ /* Set default interrupt line. */
+ if (mlxsw_i2c->pdata && mlxsw_i2c->pdata->irq)
+ mlxsw_i2c->irq = mlxsw_i2c->pdata->irq;
+ else if (addr == MLXSW_FAST_I2C_SLAVE)
+ mlxsw_i2c->irq = MLXSW_I2C_DEFAULT_IRQ;
+
+ if (!mlxsw_i2c->irq)
+ return 0;
+
+ INIT_WORK(&mlxsw_i2c->irq_work, mlxsw_i2c_work_handler);
+ err = request_irq(mlxsw_i2c->irq, mlxsw_i2c_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED, "mlxsw-i2c",
+ mlxsw_i2c);
+ if (err) {
+ dev_err(mlxsw_i2c->bus_info.dev, "Failed to request irq: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_i2c_irq_fini(struct mlxsw_i2c *mlxsw_i2c)
+{
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG) || !mlxsw_i2c->irq)
+ return;
+ cancel_work_sync(&mlxsw_i2c->irq_work);
+ free_irq(mlxsw_i2c->irq, mlxsw_i2c);
+}
+
static const struct mlxsw_bus mlxsw_i2c_bus = {
.kind = "i2c",
.init = mlxsw_i2c_init,
@@ -638,31 +715,38 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->bus_info.dev = &client->dev;
mlxsw_i2c->bus_info.low_frequency = true;
mlxsw_i2c->dev = &client->dev;
+ mlxsw_i2c->pdata = client->dev.platform_data;
+
+ err = mlxsw_i2c_irq_init(mlxsw_i2c, client->addr);
+ if (err)
+ goto errout;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
- return err;
+ goto err_bus_device_register;
}
return 0;
+err_bus_device_register:
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;
}
-static int mlxsw_i2c_remove(struct i2c_client *client)
+static void mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
mutex_destroy(&mlxsw_i2c->cmd.lock);
-
- return 0;
}
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 10d13f5f9c7d..55b3c42bb007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -26,20 +26,29 @@ static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
struct mlxsw_m_port;
+struct mlxsw_m_line_card {
+ bool active;
+ int module_to_port[];
+};
+
struct mlxsw_m {
struct mlxsw_m_port **ports;
- int *module_to_port;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
u8 base_mac[ETH_ALEN];
u8 max_ports;
+ u8 max_modules_per_slot; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mlxsw_m_line_card **line_cards;
};
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
u16 local_port;
+ u8 slot_index;
u8 module;
+ u8 module_offset;
};
static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
@@ -59,7 +68,8 @@ static int mlxsw_m_port_open(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+ return mlxsw_env_module_port_up(mlxsw_m->core, 0,
+ mlxsw_m_port->module);
}
static int mlxsw_m_port_stop(struct net_device *dev)
@@ -67,7 +77,7 @@ static int mlxsw_m_port_stop(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
+ mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module);
return 0;
}
@@ -93,14 +103,14 @@ static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- strlcpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_m->bus_info->fw_rev.major,
mlxsw_m->bus_info->fw_rev.minor,
mlxsw_m->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
@@ -110,7 +120,9 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(core, mlxsw_m_port->module, modinfo);
+ return mlxsw_env_get_module_info(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, modinfo);
}
static int
@@ -120,8 +132,9 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module,
- ee, data);
+ return mlxsw_env_get_module_eeprom(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, ee, data);
}
static int
@@ -132,7 +145,9 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
page, extack);
}
@@ -141,7 +156,8 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
flags);
}
@@ -153,7 +169,8 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params, extack);
}
@@ -165,7 +182,8 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params->policy, extack);
}
@@ -181,7 +199,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
static int
mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
- u8 *p_module, u8 *p_width)
+ u8 *p_module, u8 *p_width, u8 *p_slot_index)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -192,6 +210,7 @@ mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
return 0;
}
@@ -209,18 +228,25 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
if (err)
return err;
mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
- eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1 +
+ mlxsw_m_port->module_offset);
return 0;
}
+static bool mlxsw_m_port_created(struct mlxsw_m *mlxsw_m, u16 local_port)
+{
+ return mlxsw_m->ports[local_port];
+}
+
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
+ u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, slot_index,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -243,6 +269,15 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
mlxsw_m_port->mlxsw_m = mlxsw_m;
mlxsw_m_port->local_port = local_port;
mlxsw_m_port->module = module;
+ mlxsw_m_port->slot_index = slot_index;
+ /* Add module offset for line card. Offset for main board iz zero.
+ * For line card in slot #n offset is calculated as (#n - 1)
+ * multiplied by maximum modules number, which could be found on a line
+ * card.
+ */
+ mlxsw_m_port->module_offset = mlxsw_m_port->slot_index ?
+ (mlxsw_m_port->slot_index - 1) *
+ mlxsw_m->max_modules_per_slot : 0;
dev->netdev_ops = &mlxsw_m_port_netdev_ops;
dev->ethtool_ops = &mlxsw_m_port_ethtool_ops;
@@ -288,19 +323,29 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
+static int*
+mlxsw_m_port_mapping_get(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
+{
+ return &mlxsw_m->line_cards[slot_index]->module_to_port[module];
+}
+
static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 module, width;
+ u8 module, width, slot_index;
+ int *module_to_port;
int err;
/* Fill out to local port mapping array */
err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module,
- &width);
+ &width, &slot_index);
if (err)
return err;
+ /* Skip if line card has been already configured */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ return 0;
if (!width)
return 0;
/* Skip, if port belongs to the cluster */
@@ -310,92 +355,220 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, module);
- mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
+ mlxsw_env_module_port_map(mlxsw_m->core, slot_index, module);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, module);
+ *module_to_port = local_port;
return 0;
}
-static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
+static void
+mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
{
- mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, module);
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index,
+ module);
+ *module_to_port = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, slot_index, module);
}
-static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 last_module = max_ports;
- int i;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 num_of_modules;
+ int i, j, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &num_of_modules,
+ &mlxsw_m->num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ if (mlxsw_m->num_of_slots)
+ mlxsw_m->max_modules_per_slot =
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl);
+ else
+ mlxsw_m->max_modules_per_slot = num_of_modules;
+ /* Add slot for main board. */
+ mlxsw_m->num_of_slots += 1;
mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports),
GFP_KERNEL);
if (!mlxsw_m->ports)
return -ENOMEM;
- mlxsw_m->module_to_port = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_m->module_to_port) {
+ mlxsw_m->line_cards = kcalloc(mlxsw_m->num_of_slots,
+ sizeof(*mlxsw_m->line_cards),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards) {
err = -ENOMEM;
- goto err_module_to_port_alloc;
+ goto err_kcalloc;
}
- /* Invalidate the entries of module to local port mapping array */
- for (i = 0; i < max_ports; i++)
- mlxsw_m->module_to_port[i] = -1;
+ for (i = 0; i < mlxsw_m->num_of_slots; i++) {
+ mlxsw_m->line_cards[i] =
+ kzalloc(struct_size(mlxsw_m->line_cards[i],
+ module_to_port,
+ mlxsw_m->max_modules_per_slot),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards[i]) {
+ err = -ENOMEM;
+ goto err_kmalloc_array;
+ }
- /* Fill out module to local port mapping array */
- for (i = 1; i < max_ports; i++) {
- err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
- if (err)
- goto err_module_to_port_map;
+ /* Invalidate the entries of module to local port mapping array. */
+ for (j = 0; j < mlxsw_m->max_modules_per_slot; j++)
+ mlxsw_m->line_cards[i]->module_to_port[j] = -1;
}
- /* Create port objects for each valid entry */
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0 &&
- !mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
- err = mlxsw_m_port_create(mlxsw_m,
- mlxsw_m->module_to_port[i],
- i);
+ return 0;
+
+err_kmalloc_array:
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+err_kcalloc:
+ kfree(mlxsw_m->ports);
+ return err;
+}
+
+static void mlxsw_m_linecards_fini(struct mlxsw_m *mlxsw_m)
+{
+ int i = mlxsw_m->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
+ kfree(mlxsw_m->ports);
+}
+
+static void
+mlxsw_m_linecard_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int i;
+
+ for (i = mlxsw_m->max_modules_per_slot - 1; i >= 0; i--) {
+ int *module_to_port;
+
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0)
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
+ }
+}
+
+static int
+mlxsw_m_linecard_ports_create(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int *module_to_port;
+ int i, err;
+
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0) {
+ err = mlxsw_m_port_create(mlxsw_m, *module_to_port,
+ slot_index, i);
if (err)
- goto err_module_to_port_create;
+ goto err_port_create;
+ /* Mark slot as active */
+ if (!mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = true;
}
}
-
return 0;
-err_module_to_port_create:
+err_port_create:
for (i--; i >= 0; i--) {
- if (mlxsw_m->module_to_port[i] > 0)
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ /* Mark slot as inactive */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = false;
+ }
}
- i = max_ports;
-err_module_to_port_map:
- for (i--; i > 0; i--)
- mlxsw_m_port_module_unmap(mlxsw_m, i);
- kfree(mlxsw_m->module_to_port);
-err_module_to_port_alloc:
- kfree(mlxsw_m->ports);
return err;
}
-static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+static void
+mlxsw_m_linecard_ports_remove(struct mlxsw_m *mlxsw_m, u8 slot_index)
{
int i;
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0) {
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
- mlxsw_m_port_module_unmap(mlxsw_m, i);
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m,
+ slot_index, i);
+
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
}
}
+}
- kfree(mlxsw_m->module_to_port);
- kfree(mlxsw_m->ports);
+static int mlxsw_m_ports_module_map(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 last_module = max_ports;
+ int i, err;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+{
+ int err;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, 0);
+ if (err)
+ goto err_linecard_ports_create;
+
+ return 0;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, 0);
+
+ return err;
+}
+
+static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+{
+ mlxsw_m_linecard_ports_remove(mlxsw_m, 0);
+}
+
+static void
+mlxsw_m_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_linecard *linecard_priv = priv;
+ struct mlxsw_m_line_card *linecard;
+
+ linecard = mlxsw_m->line_cards[linecard_priv->slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, linecard_priv->slot_index);
+ linecard->active = false;
}
static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
@@ -416,6 +589,60 @@ static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
return -EINVAL;
}
+static void
+mlxsw_m_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+ int err;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+ /* Skip if line card has been already configured during init */
+ if (linecard->active)
+ return;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, slot_index);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create port for line card at slot %d\n",
+ slot_index);
+ goto err_linecard_ports_create;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, slot_index);
+}
+
+static void
+mlxsw_m_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, slot_index);
+ linecard->active = false;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_m_event_ops = {
+ .got_active = mlxsw_m_got_active,
+ .got_inactive = mlxsw_m_got_inactive,
+};
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -436,13 +663,33 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_m_linecards_init(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create line cards\n");
+ return err;
+ }
+
+ err = mlxsw_linecards_event_ops_register(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to register line cards operations\n");
+ goto linecards_event_ops_register;
+ }
+
err = mlxsw_m_ports_create(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
- return err;
+ goto err_ports_create;
}
return 0;
+
+err_ports_create:
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+linecards_event_ops_register:
+ mlxsw_m_linecards_fini(mlxsw_m);
+ return err;
}
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
@@ -450,6 +697,9 @@ static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_m_ports_remove(mlxsw_m);
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ mlxsw_m_linecards_fini(mlxsw_m);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
@@ -459,8 +709,8 @@ static struct mlxsw_driver mlxsw_m_driver = {
.priv_size = sizeof(struct mlxsw_m),
.init = mlxsw_m_init,
.fini = mlxsw_m_fini,
+ .ports_remove_selected = mlxsw_m_ports_remove_selected,
.profile = &mlxsw_m_config_profile,
- .res_query_enabled = true,
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index f91dde4df152..c968309657dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -103,6 +103,8 @@ struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
{
q->u.cq.v = mlxsw_pci->max_cqe_ver;
- /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs)
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
@@ -505,9 +507,32 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
+static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
+ ptrdiff_t off)
+{
+ return ioread32be(mlxsw_pci->hw_addr + off);
+}
+
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -527,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (unlikely(!tx_info.is_emad &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
tx_info.local_port);
skb = NULL;
@@ -647,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
@@ -698,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, ncqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
@@ -1159,6 +1187,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
+ profile->max_lag);
+ }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1235,6 +1268,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
@@ -1252,12 +1290,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
}
- if (profile->used_kvh_xlt_cache_mode) {
- mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
- mbox, profile->kvh_xlt_cache_mode);
- }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1268,31 +1300,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
}
- return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
-}
-
-static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_bus_info *bus_info,
- char *mbox)
-{
- int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
- int i;
-
- if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
- return 0;
-
- bus_info->xm_exists = true;
-
- if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
- dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
- return -EINVAL;
+ if (profile->used_cqe_time_stamp_type) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
+ profile->cqe_time_stamp_type);
}
- bus_info->xm_local_ports_count = count;
- for (i = 0; i < count; i++)
- bus_info->xm_local_ports[i] =
- mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
- i);
- return 0;
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
@@ -1306,8 +1321,7 @@ static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
return err;
mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
-
- return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
+ return 0;
}
static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1550,6 +1564,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->free_running_clock_offset =
mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1582,6 +1614,14 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1599,12 +1639,15 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+err_requery_resources:
err_config_profile:
err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
@@ -1819,19 +1862,33 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
static u32 mlxsw_pci_read_frc_h(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_h;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
+ frc_offset_h = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
}
static u32 mlxsw_pci_read_frc_l(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_l;
+
+ frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
+}
+
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
}
static const struct mlxsw_bus mlxsw_pci_bus = {
@@ -1843,6 +1900,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
.read_frc_h = mlxsw_pci_read_frc_h,
.read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -1933,7 +1992,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->bus_info.read_frc_capable = true;
+ mlxsw_pci->bus_info.read_clock_capable = true;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7b531228d6c0..48dbfea0a2a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -41,9 +41,6 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4)
-
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
@@ -217,6 +214,25 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_time_stamp_low
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_low, 0x0C, 16, 16);
+
#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
/* pci_cqe_mirror_tclass
@@ -280,8 +296,67 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+enum mlxsw_pci_cqe_time_stamp_type {
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_USEC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_FRC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC,
+};
+
+/* pci_cqe_time_stamp_type
+ * Time stamp type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type)
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_type, 0x18, 22, 2);
+
#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+/* pci_cqe_time_stamp_high
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_high, 0x18, 0, 22);
+
+static inline u64 mlxsw_pci_cqe2_time_stamp_get(const char *cqe)
+{
+ u64 ts_high = mlxsw_pci_cqe2_time_stamp_high_get(cqe);
+ u64 ts_low = mlxsw_pci_cqe2_time_stamp_low_get(cqe);
+
+ return ts_high << 16 | ts_low;
+}
+
+static inline u8 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts >> 30 & 0xFF;
+}
+
+static inline u32 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts & 0x3FFFFFFF;
+}
+
/* pci_cqe_mirror_latency
* End-to-end latency of the original packet that does mirroring to the CPU.
* Value of 0xFFFFFF means that the latency is invalid. Units are according to
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 741fd2989d12..ac4d4ea51597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -15,8 +15,6 @@
#define MLXSW_PORT_SWID_TYPE_IB 1
#define MLXSW_PORT_SWID_TYPE_ETH 2
-#define MLXSW_PORT_MID 0xd000
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 24cc65018b41..0777bed5bb1a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -322,6 +322,18 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -335,6 +347,15 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_vid
+ * New VID when set_vid=1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
/* reg_sfd_uc_system_port
* Unique port identifier for the final destination of the packet.
* Access: RW
@@ -359,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 fid_vid,
+ const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
@@ -368,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
@@ -379,6 +402,18 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_lag_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_lag_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -393,8 +428,10 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
/* reg_sfd_uc_lag_lag_vid
- * Indicates VID in case of vFIDs. Reserved for FIDs.
+ * New vlan ID.
* Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and set_vid=0.
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
@@ -419,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
@@ -997,7 +1035,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* to packet types used for flooding.
*/
#define MLXSW_REG_SFGC_ID 0x2011
-#define MLXSW_REG_SFGC_LEN 0x10
+#define MLXSW_REG_SFGC_LEN 0x14
MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
@@ -1019,9 +1057,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
-enum mlxsw_reg_sfgc_bridge_type {
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
@@ -1054,12 +1093,6 @@ MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
*/
MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
-/* reg_sfgc_mid
- * The multicast ID for the swid. Not supported for Spectrum
- * Access: RW
- */
-MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
-
/* reg_sfgc_counter_set_type
* Counter Set Type for flow counters.
* Access: RW
@@ -1072,18 +1105,26 @@ MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
*/
MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+/* reg_sfgc_mid_base
+ * MID Base.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
+
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
- unsigned int flood_table)
+ unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
- mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
@@ -1516,7 +1557,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
* virtualized ports.
*/
#define MLXSW_REG_SVFA_ID 0x201C
-#define MLXSW_REG_SVFA_LEN 0x10
+#define MLXSW_REG_SVFA_LEN 0x18
MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
@@ -1537,6 +1578,7 @@ MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_VNI_TO_FID,
};
/* reg_svfa_mapping_table
@@ -1586,20 +1628,76 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
- enum mlxsw_reg_svfa_mt mt, bool valid,
- u16 fid, u16 vid)
+/* reg_svfa_vni
+ * Virtual Network Identifier.
+ * Access: Index
+ *
+ * Note: Reserved when mapping_table is not 2 (VNI mapping table).
+ */
+MLXSW_ITEM32(reg, svfa, vni, 0x10, 0, 24);
+
+/* reg_svfa_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non enabled RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, svfa, irif_v, 0x14, 24, 1);
+
+/* reg_svfa_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
+
+static inline void __mlxsw_reg_svfa_pack(char *payload,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
- local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
mlxsw_reg_svfa_swid_set(payload, 0);
- mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
+}
+
+static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
+ u16 vid, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
+static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
+ u32 vni, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vni_set(payload, vni);
+}
+
/* SPVTR - Switch Port VLAN Stacking Register
* ------------------------------------------
* The Switch Port VLAN Stacking register configures the VLAN mode of the port
@@ -1741,7 +1839,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
* Creates and configures FIDs.
*/
#define MLXSW_REG_SFMR_ID 0x201F
-#define MLXSW_REG_SFMR_LEN 0x18
+#define MLXSW_REG_SFMR_LEN 0x30
MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
@@ -1764,6 +1862,28 @@ MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+/* reg_sfmr_flood_rsp
+ * Router sub-port flooding table.
+ * 0 - Regular flooding table.
+ * 1 - Router sub-port flooding table. For this FID the flooding is per
+ * router-sub-port local_port. Must not be set for a FID which is not a
+ * router-sub-port and must be set prior to enabling the relevant RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
+
+/* reg_sfmr_flood_bridge_type
+ * Flood bridge type (see SFGC.bridge_type).
+ * 0 - type_0.
+ * 1 - type_1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
+
/* reg_sfmr_fid_offset
* FID offset.
* Used to point into the flooding table selected by SFGC register if
@@ -1800,15 +1920,57 @@ MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
/* reg_sfmr_vni
* Virtual Network Identifier.
+ * When legacy bridge model is used, a given VNI can only be assigned to one
+ * FID. When unified bridge model is used, it configures only the FID->VNI,
+ * the VNI->FID is done by SVFA.
* Access: RW
- *
- * Note: A given VNI can only be assigned to one FID.
*/
MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+/* reg_sfmr_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non valid RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
+
+/* reg_sfmr_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+
+/* reg_sfmr_smpe_valid
+ * SMPE is valid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe_valid, 0x28, 20, 1);
+
+/* reg_sfmr_smpe
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
+
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset)
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
@@ -1816,6 +1978,10 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
@@ -2013,74 +2179,43 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
-/* SFTR-V2 - Switch Flooding Table Version 2 Register
- * --------------------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR2_ID 0x202F
-#define MLXSW_REG_SFTR2_LEN 0x120
-
-MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
-
-/* reg_sftr2_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
+/* SMPE - Switch Multicast Port to Egress VID
+ * ------------------------------------------
+ * The switch multicast port to egress VID maps
+ * {egress_port, SMPE index} -> {VID}.
*/
-MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
+#define MLXSW_REG_SMPE_ID 0x202B
+#define MLXSW_REG_SMPE_LEN 0x0C
-/* reg_sftr2_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
+MLXSW_REG_DEFINE(smpe, MLXSW_REG_SMPE_ID, MLXSW_REG_SMPE_LEN);
-/* reg_sftr2_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
+/* reg_smpe_local_port
+ * Local port number.
+ * CPU port is not supported.
* Access: Index
*/
-MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
-
-/* reg_sftr2_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
+MLXSW_ITEM32_LP(reg, smpe, 0x00, 16, 0x00, 12);
-/* reg_sftr2_range
- * Range of entries to update
+/* reg_smpe_smpe_index
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1.
* Access: Index
*/
-MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
+MLXSW_ITEM32(reg, smpe, smpe_index, 0x04, 0, 16);
-/* reg_sftr2_port
- * Local port membership (1 bit per port).
+/* reg_smpe_evid
+ * Egress VID.
* Access: RW
*/
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
+MLXSW_ITEM32(reg, smpe, evid, 0x08, 0, 12);
-/* reg_sftr2_port_mask
- * Local port mask (1 bit per port).
- * Access: WO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
-
-static inline void mlxsw_reg_sftr2_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u16 port, bool set)
+static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
+ u16 smpe_index, u16 evid)
{
- MLXSW_REG_ZERO(sftr2, payload);
- mlxsw_reg_sftr2_swid_set(payload, 0);
- mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr2_index_set(payload, index);
- mlxsw_reg_sftr2_table_type_set(payload, table_type);
- mlxsw_reg_sftr2_range_set(payload, range);
- mlxsw_reg_sftr2_port_set(payload, port, set);
- mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
+ MLXSW_REG_ZERO(smpe, payload);
+ mlxsw_reg_smpe_local_port_set(payload, local_port);
+ mlxsw_reg_smpe_smpe_index_set(payload, smpe_index);
+ mlxsw_reg_smpe_evid_set(payload, evid);
}
/* SMID-V2 - Switch Multicast ID Version 2 Register
@@ -2107,6 +2242,23 @@ MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+/* reg_smid2_smpe_valid
+ * SMPE is valid.
+ * When not valid, the egress VID will not be modified by the SMPE table.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe_valid, 0x08, 20, 1);
+
+/* reg_smid2_smpe
+ * Switch multicast port to egress VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe, 0x08, 0, 16);
+
/* reg_smid2_port
* Local port memebership (1 bit per port).
* Access: RW
@@ -2120,13 +2272,15 @@ MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
- bool set)
+ bool set, bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(smid2, payload);
mlxsw_reg_smid2_swid_set(payload, 0);
mlxsw_reg_smid2_mid_set(payload, mid);
mlxsw_reg_smid2_port_set(payload, port, set);
mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+ mlxsw_reg_smid2_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_smid2_smpe_set(payload, smpe_valid ? smpe : 0);
}
/* CWTP - Congetion WRED ECN TClass Profile
@@ -4325,6 +4479,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
+/* reg_pmlp_slot_index
+ * Module number.
+ * Slot_index
+ * Slot_index = 0 represent the onboard (motherboard).
+ * In case of non-modular system only slot_index = 0 is available.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false);
+
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
@@ -4482,6 +4645,8 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T BIT(25)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
@@ -4494,25 +4659,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
-/* reg_ptys_ib_link_width_cap
- * IB port supported widths.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16);
-
-#define MLXSW_REG_PTYS_IB_SPEED_SDR BIT(0)
-#define MLXSW_REG_PTYS_IB_SPEED_DDR BIT(1)
-#define MLXSW_REG_PTYS_IB_SPEED_QDR BIT(2)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR10 BIT(3)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR BIT(4)
-#define MLXSW_REG_PTYS_IB_SPEED_EDR BIT(5)
-
-/* reg_ptys_ib_proto_cap
- * IB port supported speeds and protocols.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16);
-
/* reg_ptys_ext_eth_proto_admin
* Extended speed and protocol to set port to.
* Access: RW
@@ -4525,18 +4671,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
-/* reg_ptys_ib_link_width_admin
- * IB width to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16);
-
-/* reg_ptys_ib_proto_admin
- * IB speeds and protocols to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16);
-
/* reg_ptys_ext_eth_proto_oper
* The extended current speed and protocol configured for the port.
* Access: RO
@@ -4549,18 +4683,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
-/* reg_ptys_ib_link_width_oper
- * The current IB width to set port to.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16);
-
-/* reg_ptys_ib_proto_oper
- * The current IB speed and protocol.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
-
enum mlxsw_reg_ptys_connector_type {
MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR,
MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE,
@@ -4631,33 +4753,6 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
- u16 proto_admin, u16 link_width)
-{
- MLXSW_REG_ZERO(ptys, payload);
- mlxsw_reg_ptys_local_port_set(payload, local_port);
- mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_IB);
- mlxsw_reg_ptys_ib_proto_admin_set(payload, proto_admin);
- mlxsw_reg_ptys_ib_link_width_admin_set(payload, link_width);
-}
-
-static inline void mlxsw_reg_ptys_ib_unpack(char *payload, u16 *p_ib_proto_cap,
- u16 *p_ib_link_width_cap,
- u16 *p_ib_proto_oper,
- u16 *p_ib_link_width_oper)
-{
- if (p_ib_proto_cap)
- *p_ib_proto_cap = mlxsw_reg_ptys_ib_proto_cap_get(payload);
- if (p_ib_link_width_cap)
- *p_ib_link_width_cap =
- mlxsw_reg_ptys_ib_link_width_cap_get(payload);
- if (p_ib_proto_oper)
- *p_ib_proto_oper = mlxsw_reg_ptys_ib_proto_oper_get(payload);
- if (p_ib_link_width_oper)
- *p_ib_link_width_oper =
- mlxsw_reg_ptys_ib_link_width_oper_get(payload);
-}
-
/* PPAD - Port Physical Address Register
* -------------------------------------
* The PPAD register configures the per port physical MAC address.
@@ -5431,27 +5526,6 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
-/* PLIB - Port Local to InfiniBand Port
- * ------------------------------------
- * The PLIB register performs mapping from Local Port into InfiniBand Port.
- */
-#define MLXSW_REG_PLIB_ID 0x500A
-#define MLXSW_REG_PLIB_LEN 0x10
-
-MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
-
-/* reg_plib_local_port
- * Local port number.
- * Access: Index
- */
-MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
-
-/* reg_plib_ib_port
- * InfiniBand port remapping for local_port.
- * Access: RW
- */
-MLXSW_ITEM32(reg, plib, ib_port, 0x00, 0, 8);
-
/* PPTB - Port Prio To Buffer Register
* -----------------------------------
* Configures the switch priority to buffer table.
@@ -5767,9 +5841,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_slot_index_set(payload, slot_index);
mlxsw_reg_pmaos_module_set(payload, module);
}
@@ -5872,6 +5947,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
}
+/* PMECR - Ports Mapping Event Configuration Register
+ * --------------------------------------------------
+ * The PMECR register is used to enable/disable event triggering
+ * in case of local port mapping change.
+ */
+#define MLXSW_REG_PMECR_ID 0x501B
+#define MLXSW_REG_PMECR_LEN 0x20
+
+MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN);
+
+/* reg_pmecr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12);
+
+/* reg_pmecr_ee
+ * Event update enable. If this bit is set, event generation will be updated
+ * based on the e field. Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1);
+
+/* reg_pmecr_eswi
+ * Software ignore enable bit. If this bit is set, the value of swi is used.
+ * If this bit is clear, the value of swi is ignored.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1);
+
+/* reg_pmecr_swi
+ * Software ignore. If this bit is set, the device shouldn't generate events
+ * in case of PMLP SET operation but only upon self local port mapping change
+ * (if applicable according to e configuration). This is supplementary
+ * configuration on top of e value.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1);
+
+enum mlxsw_reg_pmecr_e {
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmecr_e
+ * Event generation on local port mapping change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_pmecr_e e)
+{
+ MLXSW_REG_ZERO(pmecr, payload);
+ mlxsw_reg_pmecr_local_port_set(payload, local_port);
+ mlxsw_reg_pmecr_e_set(payload, e);
+ mlxsw_reg_pmecr_ee_set(payload, true);
+ mlxsw_reg_pmecr_swi_set(payload, true);
+ mlxsw_reg_pmecr_eswi_set(payload, true);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5982,6 +6120,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
*/
MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+/* reg_pmmp_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4);
+
/* reg_pmmp_sticky
* When set, will keep eeprom_override values after plug-out event.
* Access: OP
@@ -6009,9 +6153,10 @@ enum {
*/
MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_slot_index_set(payload, slot_index);
mlxsw_reg_pmmp_module_set(payload, module);
}
@@ -6062,6 +6207,58 @@ static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
*slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM register allows query or configuration of module types.
+ * The register can only be set when the module is disabled by PMAOS register
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, slot_index, 0x00, 24, 4);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_4_LANES = 0,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP = 1,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP = 2,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_SINGLE_LANE = 4,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_2_LANES = 8,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP4X = 10,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP2X = 11,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP1X = 12,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP = 15,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD = 16,
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP = 17,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP8X = 18,
+ MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR = 19,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 5);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -6087,9 +6284,7 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -6569,31 +6764,32 @@ MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
/* VLAN Interface */
-/* reg_ritr_vlan_if_vid
+/* reg_ritr_vlan_if_vlan_id
* VLAN ID.
* Access: RW
*/
-MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+MLXSW_ITEM32(reg, ritr, vlan_if_vlan_id, 0x08, 0, 12);
+
+/* reg_ritr_vlan_if_efid
+ * Egress FID.
+ * Used to connect the RIF to a bridge.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-1.
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
/* FID Interface */
/* reg_ritr_fid_if_fid
- * Filtering ID. Used to connect a bridge to the router. Only FIDs from
- * the vFID range are supported.
+ * Filtering ID. Used to connect a bridge to the router.
+ * When legacy bridge model is used, only FIDs from the vFID range are
+ * supported. When unified bridge model is used, this is the egress FID for
+ * router to bridge.
* Access: RW
*/
MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
-static inline void mlxsw_reg_ritr_fid_set(char *payload,
- enum mlxsw_reg_ritr_if_type rif_type,
- u16 fid)
-{
- if (rif_type == MLXSW_REG_RITR_FID_IF)
- mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
- else
- mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
-}
-
/* Sub-port Interface */
/* reg_ritr_sp_if_lag
@@ -6610,6 +6806,16 @@ MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+/* reg_ritr_sp_if_efid
+ * Egress filtering ID.
+ * Used to connect the eRIF to a bridge if eRIF-ACL has modified the DMAC or
+ * the VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_efid, 0x0C, 0, 16);
+
/* reg_ritr_sp_if_vid
* VLAN ID.
* Access: RW
@@ -6732,12 +6938,14 @@ static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
else
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
- mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
- if (egress)
+ if (egress) {
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_egress_counter_index_set(payload, index);
- else
+ } else {
+ mlxsw_reg_ritr_ingress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+ }
}
static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
@@ -6747,10 +6955,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
- u16 system_port, u16 vid)
+ u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
@@ -6784,6 +6993,20 @@ static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac)
}
static inline void
+mlxsw_reg_ritr_vlan_if_pack(char *payload, bool enable, u16 rif, u16 vr_id,
+ u16 mtu, const char *mac, u8 mac_profile_id,
+ u16 vlan_id, u16 efid)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_VLAN_IF;
+
+ mlxsw_reg_ritr_pack(payload, enable, type, rif, vr_id, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(payload, mac_profile_id);
+ mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, vlan_id);
+ mlxsw_reg_ritr_vlan_if_efid_set(payload, efid);
+}
+
+static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
@@ -7714,11 +7937,10 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 *dip)
+ u32 dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip4_set(payload, *dip);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7728,8 +7950,7 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8792,656 +9013,62 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
-/* RXLTE - Router XLT Enable Register
- * ----------------------------------
- * The RXLTE enables XLT (eXtended Lookup Table) LPM lookups if a capable
- * XM is present on the system.
- */
-
-#define MLXSW_REG_RXLTE_ID 0x8050
-#define MLXSW_REG_RXLTE_LEN 0x0C
-
-MLXSW_REG_DEFINE(rxlte, MLXSW_REG_RXLTE_ID, MLXSW_REG_RXLTE_LEN);
-
-/* reg_rxlte_virtual_router
- * Virtual router ID associated with the router interface.
- * Range is 0..cap_max_virtual_routers-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, virtual_router, 0x00, 0, 16);
-
-enum mlxsw_reg_rxlte_protocol {
- MLXSW_REG_RXLTE_PROTOCOL_IPV4,
- MLXSW_REG_RXLTE_PROTOCOL_IPV6,
-};
-
-/* reg_rxlte_protocol
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, protocol, 0x04, 0, 4);
-
-/* reg_rxlte_lpm_xlt_en
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxlte, lpm_xlt_en, 0x08, 0, 1);
-
-static inline void mlxsw_reg_rxlte_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_rxlte_protocol protocol,
- bool lpm_xlt_en)
-{
- MLXSW_REG_ZERO(rxlte, payload);
- mlxsw_reg_rxlte_virtual_router_set(payload, virtual_router);
- mlxsw_reg_rxlte_protocol_set(payload, protocol);
- mlxsw_reg_rxlte_lpm_xlt_en_set(payload, lpm_xlt_en);
-}
-
-/* RXLTM - Router XLT M select Register
- * ------------------------------------
- * The RXLTM configures and selects the M for the XM lookups.
- */
-
-#define MLXSW_REG_RXLTM_ID 0x8051
-#define MLXSW_REG_RXLTM_LEN 0x14
-
-MLXSW_REG_DEFINE(rxltm, MLXSW_REG_RXLTM_ID, MLXSW_REG_RXLTM_LEN);
-
-/* reg_rxltm_m0_val_v6
- * Global M0 value For IPv6.
- * Range 0..128
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v6, 0x10, 16, 8);
-
-/* reg_rxltm_m0_val_v4
- * Global M0 value For IPv4.
- * Range 0..32
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v4, 0x10, 0, 6);
-
-static inline void mlxsw_reg_rxltm_pack(char *payload, u8 m0_val_v4, u8 m0_val_v6)
-{
- MLXSW_REG_ZERO(rxltm, payload);
- mlxsw_reg_rxltm_m0_val_v6_set(payload, m0_val_v6);
- mlxsw_reg_rxltm_m0_val_v4_set(payload, m0_val_v4);
-}
-
-/* RLCMLD - Router LPM Cache ML Delete Register
- * --------------------------------------------
- * The RLCMLD register is used to bulk delete the XLT-LPM cache ML entries.
- * This can be used by SW when L is increased or decreased, thus need to
- * remove entries with old ML values.
- */
-
-#define MLXSW_REG_RLCMLD_ID 0x8055
-#define MLXSW_REG_RLCMLD_LEN 0x30
-
-MLXSW_REG_DEFINE(rlcmld, MLXSW_REG_RLCMLD_ID, MLXSW_REG_RLCMLD_LEN);
-
-enum mlxsw_reg_rlcmld_select {
- MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES,
-};
-
-/* reg_rlcmld_select
- * Which entries to delete.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, select, 0x00, 16, 2);
-
-enum mlxsw_reg_rlcmld_filter_fields {
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL = 0x04,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER = 0x08,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP = 0x10,
-};
-
-/* reg_rlcmld_filter_fields
- * If a bit is '0' then the relevant field is ignored.
- * Access: Index
+/* REIV - Router Egress Interface to VID Register
+ * ----------------------------------------------
+ * The REIV register maps {eRIF, egress_port} -> VID.
+ * This mapping is done at the egress, after the ACLs.
+ * This mapping always takes effect after router, regardless of cast
+ * (for unicast/multicast/port-base multicast), regardless of eRIF type and
+ * regardless of bridge decisions (e.g. SFD for unicast or SMPE).
+ * Reserved when the RIF is a loopback RIF.
+ *
+ * Note: Reserved when legacy bridge model is used.
*/
-MLXSW_ITEM32(reg, rlcmld, filter_fields, 0x00, 0, 8);
+#define MLXSW_REG_REIV_ID 0x8034
+#define MLXSW_REG_REIV_BASE_LEN 0x20 /* base length, without records */
+#define MLXSW_REG_REIV_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_REIV_REC_MAX_COUNT 256 /* firmware limitation */
+#define MLXSW_REG_REIV_LEN (MLXSW_REG_REIV_BASE_LEN + \
+ MLXSW_REG_REIV_REC_LEN * \
+ MLXSW_REG_REIV_REC_MAX_COUNT)
-enum mlxsw_reg_rlcmld_protocol {
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
-};
+MLXSW_REG_DEFINE(reiv, MLXSW_REG_REIV_ID, MLXSW_REG_REIV_LEN);
-/* reg_rlcmld_protocol
+/* reg_reiv_port_page
+ * Port page - elport_record[0] is 256*port_page.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, protocol, 0x08, 0, 4);
+MLXSW_ITEM32(reg, reiv, port_page, 0x00, 0, 4);
-/* reg_rlcmld_virtual_router
- * Virtual router ID.
- * Range is 0..cap_max_virtual_routers-1
+/* reg_reiv_erif
+ * Egress RIF.
+ * Range is 0..cap_max_router_interfaces-1.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, virtual_router, 0x0C, 0, 16);
+MLXSW_ITEM32(reg, reiv, erif, 0x04, 0, 16);
-/* reg_rlcmld_dip
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip6, 0x10, 16);
-
-/* reg_rlcmld_dip_mask
- * per bit:
- * 0: no match
- * 1: match
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip_mask4, 0x2C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip_mask6, 0x20, 16);
-
-static inline void __mlxsw_reg_rlcmld_pack(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- enum mlxsw_reg_rlcmld_protocol protocol,
- u16 virtual_router)
-{
- u8 filter_fields = MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP;
-
- MLXSW_REG_ZERO(rlcmld, payload);
- mlxsw_reg_rlcmld_select_set(payload, select);
- mlxsw_reg_rlcmld_filter_fields_set(payload, filter_fields);
- mlxsw_reg_rlcmld_protocol_set(payload, protocol);
- mlxsw_reg_rlcmld_virtual_router_set(payload, virtual_router);
-}
-
-static inline void mlxsw_reg_rlcmld_pack4(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- u32 dip, u32 dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- virtual_router);
- mlxsw_reg_rlcmld_dip4_set(payload, dip);
- mlxsw_reg_rlcmld_dip_mask4_set(payload, dip_mask);
-}
-
-static inline void mlxsw_reg_rlcmld_pack6(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- const void *dip, const void *dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
- virtual_router);
- mlxsw_reg_rlcmld_dip6_memcpy_to(payload, dip);
- mlxsw_reg_rlcmld_dip_mask6_memcpy_to(payload, dip_mask);
-}
-
-/* RLPMCE - Router LPM Cache Enable Register
- * -----------------------------------------
- * Allows disabling the LPM cache. Can be changed on the fly.
- */
-
-#define MLXSW_REG_RLPMCE_ID 0x8056
-#define MLXSW_REG_RLPMCE_LEN 0x4
-
-MLXSW_REG_DEFINE(rlpmce, MLXSW_REG_RLPMCE_ID, MLXSW_REG_RLPMCE_LEN);
-
-/* reg_rlpmce_flush
- * Flush:
- * 0: do not flush the cache (default)
- * 1: flush (clear) the cache
- * Access: WO
- */
-MLXSW_ITEM32(reg, rlpmce, flush, 0x00, 4, 1);
-
-/* reg_rlpmce_disable
- * LPM cache:
- * 0: enabled (default)
- * 1: disabled
- * Access: RW
- */
-MLXSW_ITEM32(reg, rlpmce, disable, 0x00, 0, 1);
-
-static inline void mlxsw_reg_rlpmce_pack(char *payload, bool flush,
- bool disable)
-{
- MLXSW_REG_ZERO(rlpmce, payload);
- mlxsw_reg_rlpmce_flush_set(payload, flush);
- mlxsw_reg_rlpmce_disable_set(payload, disable);
-}
-
-/* Note that XLTQ, XMDR, XRMT and XRALXX register positions violate the rule
- * of ordering register definitions by the ID. However, XRALXX pack helpers are
- * using RALXX pack helpers, RALXX registers have higher IDs.
- * Also XMDR is using RALUE enums. XLRQ and XRMT are just put alongside with the
- * related registers.
- */
-
-/* XLTQ - XM Lookup Table Query Register
- * -------------------------------------
- */
-#define MLXSW_REG_XLTQ_ID 0x7802
-#define MLXSW_REG_XLTQ_LEN 0x2C
-
-MLXSW_REG_DEFINE(xltq, MLXSW_REG_XLTQ_ID, MLXSW_REG_XLTQ_LEN);
-
-enum mlxsw_reg_xltq_xm_device_id {
- MLXSW_REG_XLTQ_XM_DEVICE_ID_UNKNOWN,
- MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT = 0xCF71,
-};
-
-/* reg_xltq_xm_device_id
- * XM device ID.
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xm_device_id, 0x04, 0, 16);
-
-/* reg_xltq_xlt_cap_ipv4_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv4_lpm, 0x10, 0, 1);
-
-/* reg_xltq_xlt_cap_ipv6_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv6_lpm, 0x10, 1, 1);
-
-/* reg_xltq_cap_xlt_entries
- * Number of XLT entries
- * Note: SW must not fill more than 80% in order to avoid overflow
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_entries, 0x20, 0, 32);
-
-/* reg_xltq_cap_xlt_mtable
- * XLT M-Table max size
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_mtable, 0x24, 0, 32);
-
-static inline void mlxsw_reg_xltq_pack(char *payload)
-{
- MLXSW_REG_ZERO(xltq, payload);
-}
-
-static inline void mlxsw_reg_xltq_unpack(char *payload, u16 *xm_device_id, bool *xlt_cap_ipv4_lpm,
- bool *xlt_cap_ipv6_lpm, u32 *cap_xlt_entries,
- u32 *cap_xlt_mtable)
-{
- *xm_device_id = mlxsw_reg_xltq_xm_device_id_get(payload);
- *xlt_cap_ipv4_lpm = mlxsw_reg_xltq_xlt_cap_ipv4_lpm_get(payload);
- *xlt_cap_ipv6_lpm = mlxsw_reg_xltq_xlt_cap_ipv6_lpm_get(payload);
- *cap_xlt_entries = mlxsw_reg_xltq_cap_xlt_entries_get(payload);
- *cap_xlt_mtable = mlxsw_reg_xltq_cap_xlt_mtable_get(payload);
-}
-
-/* XMDR - XM Direct Register
- * -------------------------
- * The XMDR allows direct access to the XM device via the switch.
- * Working in synchronous mode. FW waits for response from the XLT
- * for each command. FW acks the XMDR accordingly.
- */
-#define MLXSW_REG_XMDR_ID 0x7803
-#define MLXSW_REG_XMDR_BASE_LEN 0x20
-#define MLXSW_REG_XMDR_TRANS_LEN 0x80
-#define MLXSW_REG_XMDR_LEN (MLXSW_REG_XMDR_BASE_LEN + \
- MLXSW_REG_XMDR_TRANS_LEN)
-
-MLXSW_REG_DEFINE(xmdr, MLXSW_REG_XMDR_ID, MLXSW_REG_XMDR_LEN);
-
-/* reg_xmdr_bulk_entry
- * Bulk_entry
- * 0: Last entry - immediate flush of XRT-cache
- * 1: Bulk entry - do not flush the XRT-cache
+/* reg_reiv_rec_update
+ * Update enable (when write):
+ * 0 - Do not update the entry.
+ * 1 - Update the entry.
* Access: OP
*/
-MLXSW_ITEM32(reg, xmdr, bulk_entry, 0x04, 8, 1);
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_update, MLXSW_REG_REIV_BASE_LEN, 31, 1,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-/* reg_xmdr_num_rec
- * Number of records for Direct access to XM
- * Supported: 0..4 commands (except NOP which is a filler)
- * 0 commands is reserved when bulk_entry = 1.
- * 0 commands is allowed when bulk_entry = 0 for immediate XRT-cache flush.
- * Access: OP
- */
-MLXSW_ITEM32(reg, xmdr, num_rec, 0x04, 0, 4);
-
-/* reg_xmdr_reply_vect
- * Reply Vector
- * Bit i for command index i+1
- * values per bit:
- * 0: failed
- * 1: succeeded
- * e.g. if commands 1, 2, 4 succeeded and command 3 failed then binary
- * value will be 0b1011
- * Access: RO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, xmdr, reply_vect, 0x08, 4, 1);
-
-static inline void mlxsw_reg_xmdr_pack(char *payload, bool bulk_entry)
-{
- MLXSW_REG_ZERO(xmdr, payload);
- mlxsw_reg_xmdr_bulk_entry_set(payload, bulk_entry);
-}
-
-enum mlxsw_reg_xmdr_c_cmd_id {
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4 = 0x30,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6 = 0x31,
-};
-
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN 32
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN 48
-
-/* reg_xmdr_c_cmd_id
- */
-MLXSW_ITEM32(reg, xmdr_c, cmd_id, 0x00, 24, 8);
-
-/* reg_xmdr_c_seq_number
- */
-MLXSW_ITEM32(reg, xmdr_c, seq_number, 0x00, 12, 12);
-
-enum mlxsw_reg_xmdr_c_ltr_op {
- /* Activity is set */
- MLXSW_REG_XMDR_C_LTR_OP_WRITE = 0,
- /* There is no update mask. All fields are updated. */
- MLXSW_REG_XMDR_C_LTR_OP_UPDATE = 1,
- MLXSW_REG_XMDR_C_LTR_OP_DELETE = 2,
-};
-
-/* reg_xmdr_c_ltr_op
- * Operation.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_op, 0x04, 24, 8);
-
-/* reg_xmdr_c_ltr_trap_action
- * Trap action.
- * Values are defined in enum mlxsw_reg_ralue_trap_action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_action, 0x04, 20, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_trap_id_num {
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS0,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS1,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS2,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS3,
-};
-
-/* reg_xmdr_c_ltr_trap_id_num
- * Trap-ID number.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_id_num, 0x04, 16, 4);
-
-/* reg_xmdr_c_ltr_virtual_router
- * Virtual Router ID.
- * Range is 0..cap_max_virtual_routers-1
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_virtual_router, 0x04, 0, 16);
-
-/* reg_xmdr_c_ltr_prefix_len
- * Number of bits in the prefix of the LPM route.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_prefix_len, 0x08, 24, 8);
-
-/* reg_xmdr_c_ltr_bmp_len
- * The best match prefix length in the case that there is no match for
- * longer prefixes.
- * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_bmp_len, 0x08, 16, 8);
-
-/* reg_xmdr_c_ltr_entry_type
- * Entry type.
- * Values are defined in enum mlxsw_reg_ralue_entry_type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_entry_type, 0x08, 4, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_action_type {
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME,
-};
-
-/* reg_xmdr_c_ltr_action_type
- * Action Type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_action_type, 0x08, 0, 4);
-
-/* reg_xmdr_c_ltr_erif
- * Egress Router Interface.
- * Only relevant in case of LOCAL action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_erif, 0x10, 0, 16);
-
-/* reg_xmdr_c_ltr_adjacency_index
- * Points to the first entry of the group-based ECMP.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_adjacency_index, 0x10, 0, 24);
-
-#define MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC 0xFFFFFF
-
-/* reg_xmdr_c_ltr_pointer_to_tunnel
- * Only relevant in case of IP2ME action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_pointer_to_tunnel, 0x10, 0, 24);
-
-/* reg_xmdr_c_ltr_ecmp_size
- * Amount of sequential entries starting
- * from the adjacency_index (the number of ECMPs).
- * The valid range is 1-64, 512, 1024, 2048 and 4096.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_ecmp_size, 0x14, 0, 32);
-
-/* reg_xmdr_c_ltr_dip*
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * The least significant bits must be '0' if the prefix_len is smaller
- * than 128 for IPv6 or smaller than 32 for IPv4.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, xmdr_c, ltr_dip6, 0x1C, 16);
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_xmdr_c_cmd_id cmd_id, u16 seq_number,
- enum mlxsw_reg_xmdr_c_ltr_op op, u16 virtual_router,
- u8 prefix_len)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
- u8 num_rec = mlxsw_reg_xmdr_num_rec_get(xmdr_payload);
-
- mlxsw_reg_xmdr_num_rec_set(xmdr_payload, num_rec + 1);
-
- mlxsw_reg_xmdr_c_cmd_id_set(payload, cmd_id);
- mlxsw_reg_xmdr_c_seq_number_set(payload, seq_number);
- mlxsw_reg_xmdr_c_ltr_op_set(payload, op);
- mlxsw_reg_xmdr_c_ltr_virtual_router_set(payload, virtual_router);
- mlxsw_reg_xmdr_c_ltr_prefix_len_set(payload, prefix_len);
- mlxsw_reg_xmdr_c_ltr_entry_type_set(payload,
- MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
- mlxsw_reg_xmdr_c_ltr_bmp_len_set(payload, prefix_len);
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack4(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, u32 *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip4_set(payload, *dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN;
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack6(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, const void *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip6_memcpy_to(payload, dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN;
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_remote_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num,
- u32 adjacency_index, u16 ecmp_size)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_adjacency_index_set(payload, adjacency_index);
- mlxsw_reg_xmdr_c_ltr_ecmp_size_set(payload, ecmp_size);
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_local_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, u16 erif)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_erif_set(payload, erif);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(char *xmdr_payload,
- unsigned int trans_offset)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload,
- MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(char *xmdr_payload,
- unsigned int trans_offset,
- u32 pointer_to_tunnel)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, pointer_to_tunnel);
-}
-
-/* XRMT - XM Router M Table Register
- * ---------------------------------
- * The XRMT configures the M-Table for the XLT-LPM.
- */
-#define MLXSW_REG_XRMT_ID 0x7810
-#define MLXSW_REG_XRMT_LEN 0x14
-
-MLXSW_REG_DEFINE(xrmt, MLXSW_REG_XRMT_ID, MLXSW_REG_XRMT_LEN);
-
-/* reg_xrmt_index
- * Index in M-Table.
- * Range 0..cap_xlt_mtable-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, xrmt, index, 0x04, 0, 20);
-
-/* reg_xrmt_l0_val
+/* reg_reiv_rec_evid
+ * Egress VID.
+ * Range is 0..4095.
* Access: RW
*/
-MLXSW_ITEM32(reg, xrmt, l0_val, 0x10, 24, 8);
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_evid, MLXSW_REG_REIV_BASE_LEN, 0, 12,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_xrmt_pack(char *payload, u32 index, u8 l0_val)
+static inline void mlxsw_reg_reiv_pack(char *payload, u8 port_page, u16 erif)
{
- MLXSW_REG_ZERO(xrmt, payload);
- mlxsw_reg_xrmt_index_set(payload, index);
- mlxsw_reg_xrmt_l0_val_set(payload, l0_val);
-}
-
-/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
- * -----------------------------------------------------------
- * The XRALTA is used to allocate the XLT LPM trees.
- *
- * This register embeds original RALTA register.
- */
-#define MLXSW_REG_XRALTA_ID 0x7811
-#define MLXSW_REG_XRALTA_LEN 0x08
-#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
-
-static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
-{
- char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
-
- MLXSW_REG_ZERO(xralta, payload);
- mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
-}
-
-/* XRALST - XM Router Algorithmic LPM Structure Tree Register
- * ----------------------------------------------------------
- * The XRALST is used to set and query the structure of an XLT LPM tree.
- *
- * This register embeds original RALST register.
- */
-#define MLXSW_REG_XRALST_ID 0x7812
-#define MLXSW_REG_XRALST_LEN 0x108
-#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
-
-static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- MLXSW_REG_ZERO(xralst, payload);
- mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
-}
-
-static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
- u8 left_child_bin,
- u8 right_child_bin)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
- right_child_bin);
-}
-
-/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
- * --------------------------------------------------------
- * The XRALTB register is used to bind virtual router and protocol
- * to an allocated LPM tree.
- *
- * This register embeds original RALTB register.
- */
-#define MLXSW_REG_XRALTB_ID 0x7813
-#define MLXSW_REG_XRALTB_LEN 0x08
-#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
-
-static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
-{
- char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
-
- MLXSW_REG_ZERO(xraltb, payload);
- mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+ MLXSW_REG_ZERO(reiv, payload);
+ mlxsw_reg_reiv_port_page_set(payload, port_page);
+ mlxsw_reg_reiv_erif_set(payload, erif);
}
/* MFCR - Management Fan Control Register
@@ -9667,6 +9294,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+/* reg_mtmp_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4);
+
#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
@@ -9756,11 +9389,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
- bool max_temp_enable,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index,
+ u16 sensor_index, bool max_temp_enable,
bool max_temp_reset)
{
MLXSW_REG_ZERO(mtmp, payload);
+ mlxsw_reg_mtmp_slot_index_set(payload, slot_index);
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
@@ -9826,6 +9460,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+/* reg_mtbr_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4);
+
/* reg_mtbr_base_sensor_index
* Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
@@ -9858,10 +9498,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
- u8 num_rec)
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
+ u16 base_sensor_index, u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
}
@@ -9910,6 +9551,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
*/
MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+/* reg_mcia_slot_index
+ * Slot index (0: Main board)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCIA_STATUS_GOOD = 0,
/* No response from module's EEPROM. */
@@ -9985,6 +9632,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id {
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP = 0x19,
};
enum mlxsw_reg_mcia_eeprom_module_info {
@@ -10008,11 +9656,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
-static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
- u8 page_number, u16 device_addr,
- u8 size, u8 i2c_device_addr)
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
+ u8 lock, u8 page_number,
+ u16 device_addr, u8 size,
+ u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
@@ -10444,6 +10094,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
*/
MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+/* reg_mcion_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
@@ -10455,9 +10111,10 @@ enum {
*/
MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
-static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_slot_index_set(payload, slot_index);
mlxsw_reg_mcion_module_set(payload, module);
}
@@ -10529,6 +10186,8 @@ MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
enum mlxsw_reg_mtutc_operation {
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 1,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME = 2,
MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
};
@@ -10541,25 +10200,50 @@ MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
/* reg_mtutc_freq_adjustment
* Frequency adjustment: Every PPS the HW frequency will be
* adjusted by this value. Units of HW clock, where HW counts
- * 10^9 HW clocks for 1 HW second.
+ * 10^9 HW clocks for 1 HW second. Range is from -50,000,000 to +50,000,000.
+ * In Spectrum-2, the field is reversed, positive values mean to decrease the
+ * frequency.
* Access: RW
*/
MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+#define MLXSW_REG_MTUTC_MAX_FREQ_ADJ (50 * 1000 * 1000)
+
/* reg_mtutc_utc_sec
* UTC seconds.
* Access: WO
*/
MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+/* reg_mtutc_utc_nsec
+ * UTC nSecs.
+ * Range 0..(10^9-1)
+ * Updated when operation is SET_TIME_IMMEDIATE.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_nsec, 0x14, 0, 30);
+
+/* reg_mtutc_time_adjustment
+ * Time adjustment.
+ * Units of nSec.
+ * Range is from -32768 to +32767.
+ * Updated when operation is ADJUST_TIME.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, time_adjustment, 0x18, 0, 32);
+
static inline void
mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
- u32 freq_adj, u32 utc_sec)
+ u32 freq_adj, u32 utc_sec, u32 utc_nsec, u32 time_adj)
{
MLXSW_REG_ZERO(mtutc, payload);
mlxsw_reg_mtutc_operation_set(payload, oper);
mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+ mlxsw_reg_mtutc_utc_nsec_set(payload, utc_nsec);
+ mlxsw_reg_mtutc_time_adjustment_set(payload, time_adj);
}
/* MCQI - Management Component Query Information
@@ -11227,15 +10911,76 @@ MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
-static inline void mlxsw_reg_mtptptp_pack(char *payload,
- enum mlxsw_reg_mtptpt_trap_id trap_id,
- u16 message_type)
+static inline void mlxsw_reg_mtptpt_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
{
MLXSW_REG_ZERO(mtptpt, payload);
mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MTPCPC - Monitoring Time Precision Correction Port Configuration Register
+ * -------------------------------------------------------------------------
+ */
+#define MLXSW_REG_MTPCPC_ID 0x9093
+#define MLXSW_REG_MTPCPC_LEN 0x2C
+
+MLXSW_REG_DEFINE(mtpcpc, MLXSW_REG_MTPCPC_ID, MLXSW_REG_MTPCPC_LEN);
+
+/* reg_mtpcpc_pport
+ * Per port:
+ * 0: config is global. When reading - the local_port is 1.
+ * 1: config is per port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpcpc, pport, 0x00, 31, 1);
+
+/* reg_mtpcpc_local_port
+ * Local port number.
+ * Supported to/from CPU port.
+ * Reserved when pport = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpcpc, 0x00, 16, 0x00, 12);
+
+/* reg_mtpcpc_ptp_trap_en
+ * Enable PTP traps.
+ * The trap_id is configured by MTPTPT.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ptp_trap_en, 0x04, 0, 1);
+
+/* reg_mtpcpc_ing_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at ingress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ing_correction_message_type, 0x10, 0, 16);
+
+/* reg_mtpcpc_egr_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at egress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, egr_correction_message_type, 0x14, 0, 16);
+
+static inline void mlxsw_reg_mtpcpc_pack(char *payload, bool pport,
+ u16 local_port, bool ptp_trap_en,
+ u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpcpc, payload);
+ mlxsw_reg_mtpcpc_pport_set(payload, pport);
+ mlxsw_reg_mtpcpc_local_port_set(payload, pport ? local_port : 0);
+ mlxsw_reg_mtpcpc_ptp_trap_en_set(payload, ptp_trap_en);
+ mlxsw_reg_mtpcpc_ing_correction_message_type_set(payload, ing);
+ mlxsw_reg_mtpcpc_egr_correction_message_type_set(payload, egr);
+}
+
/* MFGD - Monitoring FW General Debug Register
* -------------------------------------------
*/
@@ -11271,38 +11016,58 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
-/* device_type
+/* mgpir_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4);
+
+/* mgpir_device_type
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
-/* devices_per_flash
+/* mgpir_devices_per_flash
* Number of devices of device_type per flash (can be shared by few devices).
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
-/* num_of_devices
+/* mgpir_num_of_devices
* Number of devices of device_type.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
-/* num_of_modules
+/* max_modules_per_slot
+ * Maximum number of modules that can be connected per slot.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8);
+
+/* mgpir_num_of_slots
+ * Number of slots in the system.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8);
+
+/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
-static inline void mlxsw_reg_mgpir_pack(char *payload)
+static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index)
{
MLXSW_REG_ZERO(mgpir, payload);
+ mlxsw_reg_mgpir_slot_index_set(payload, slot_index);
}
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash, u8 *num_of_modules)
+ u8 *devices_per_flash, u8 *num_of_modules,
+ u8 *num_of_slots)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -11313,6 +11078,478 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
mlxsw_reg_mgpir_devices_per_flash_get(payload);
if (num_of_modules)
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
+ if (num_of_slots)
+ *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload);
+}
+
+/* MBCT - Management Binary Code Transfer Register
+ * -----------------------------------------------
+ * This register allows to transfer binary codes from the host to
+ * the management FW by transferring it by chunks of maximum 1KB.
+ */
+#define MLXSW_REG_MBCT_ID 0x9120
+#define MLXSW_REG_MBCT_LEN 0x420
+
+MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN);
+
+/* reg_mbct_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4);
+
+/* reg_mbct_data_size
+ * Actual data field size in bytes for the current data transfer.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11);
+
+enum mlxsw_reg_mbct_op {
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */
+ MLXSW_REG_MBCT_OP_ACTIVATE,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS,
+};
+
+/* reg_mbct_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4);
+
+/* reg_mbct_last
+ * Indicates that the current data field is the last chunk of the INI.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1);
+
+/* reg_mbct_oee
+ * Opcode Event Enable. When set a BCTOE event will be sent once the opcode
+ * was executed and the fsm_state has changed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1);
+
+enum mlxsw_reg_mbct_status {
+ /* Partial data transfer completed successfully and ready for next
+ * data transfer.
+ */
+ MLXSW_REG_MBCT_STATUS_PART_DATA = 2,
+ MLXSW_REG_MBCT_STATUS_LAST_DATA,
+ MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE,
+ /* Error - trying to erase INI while it being used. */
+ MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE,
+ /* Last data transfer completed, applying magic pattern. */
+ MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7,
+ MLXSW_REG_MBCT_STATUS_INI_ERROR,
+ MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED,
+ MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11,
+};
+
+/* reg_mbct_status
+ * Status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5);
+
+enum mlxsw_reg_mbct_fsm_state {
+ MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5,
+ MLXSW_REG_MBCT_FSM_STATE_ERROR,
+};
+
+/* reg_mbct_fsm_state
+ * FSM state.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4);
+
+#define MLXSW_REG_MBCT_DATA_LEN 1024
+
+/* reg_mbct_data
+ * Up to 1KB of data.
+ * Access: WO
+ */
+MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN);
+
+static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mbct_op op, bool oee)
+{
+ MLXSW_REG_ZERO(mbct, payload);
+ mlxsw_reg_mbct_slot_index_set(payload, slot_index);
+ mlxsw_reg_mbct_op_set(payload, op);
+ mlxsw_reg_mbct_oee_set(payload, oee);
+}
+
+static inline void mlxsw_reg_mbct_dt_pack(char *payload,
+ u16 data_size, bool last,
+ const char *data)
+{
+ if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN))
+ return;
+ mlxsw_reg_mbct_data_size_set(payload, data_size);
+ mlxsw_reg_mbct_last_set(payload, last);
+ mlxsw_reg_mbct_data_memcpy_to(payload, data);
+}
+
+static inline void
+mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
+ enum mlxsw_reg_mbct_status *p_status,
+ enum mlxsw_reg_mbct_fsm_state *p_fsm_state)
+{
+ if (p_slot_index)
+ *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload);
+ *p_status = mlxsw_reg_mbct_status_get(payload);
+ if (p_fsm_state)
+ *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
+}
+
+/* MDDT - Management DownStream Device Tunneling Register
+ * ------------------------------------------------------
+ * This register allows to deliver query and request messages (PRM registers,
+ * commands) to a DownStream device.
+ */
+#define MLXSW_REG_MDDT_ID 0x9160
+#define MLXSW_REG_MDDT_LEN 0x110
+
+MLXSW_REG_DEFINE(mddt, MLXSW_REG_MDDT_ID, MLXSW_REG_MDDT_LEN);
+
+/* reg_mddt_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, slot_index, 0x00, 8, 4);
+
+/* reg_mddt_device_index
+ * Device index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, device_index, 0x00, 0, 8);
+
+/* reg_mddt_read_size
+ * Read size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, read_size, 0x04, 24, 8);
+
+/* reg_mddt_write_size
+ * Write size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, write_size, 0x04, 16, 8);
+
+enum mlxsw_reg_mddt_status {
+ MLXSW_REG_MDDT_STATUS_OK,
+};
+
+/* reg_mddt_status
+ * Return code of the Downstream Device to the register that was sent.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddt, status, 0x0C, 24, 8);
+
+enum mlxsw_reg_mddt_method {
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+};
+
+/* reg_mddt_method
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, method, 0x0C, 22, 2);
+
+/* reg_mddt_register_id
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, register_id, 0x0C, 0, 16);
+
+#define MLXSW_REG_MDDT_PAYLOAD_OFFSET 0x0C
+#define MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN 4
+
+static inline char *mlxsw_reg_mddt_inner_payload(char *payload)
+{
+ return payload + MLXSW_REG_MDDT_PAYLOAD_OFFSET +
+ MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+}
+
+static inline void mlxsw_reg_mddt_pack(char *payload, u8 slot_index,
+ u8 device_index,
+ enum mlxsw_reg_mddt_method method,
+ const struct mlxsw_reg_info *reg,
+ char **inner_payload)
+{
+ int len = reg->len + MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+
+ if (WARN_ON(len + MLXSW_REG_MDDT_PAYLOAD_OFFSET > MLXSW_REG_MDDT_LEN))
+ len = MLXSW_REG_MDDT_LEN - MLXSW_REG_MDDT_PAYLOAD_OFFSET;
+
+ MLXSW_REG_ZERO(mddt, payload);
+ mlxsw_reg_mddt_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddt_device_index_set(payload, device_index);
+ mlxsw_reg_mddt_method_set(payload, method);
+ mlxsw_reg_mddt_register_id_set(payload, reg->id);
+ mlxsw_reg_mddt_read_size_set(payload, len / 4);
+ mlxsw_reg_mddt_write_size_set(payload, len / 4);
+ *inner_payload = mlxsw_reg_mddt_inner_payload(payload);
+}
+
+/* MDDQ - Management DownStream Device Query Register
+ * --------------------------------------------------
+ * This register allows to query the DownStream device properties. The desired
+ * information is chosen upon the query_type field and is delivered by 32B
+ * of data blocks.
+ */
+#define MLXSW_REG_MDDQ_ID 0x9161
+#define MLXSW_REG_MDDQ_LEN 0x30
+
+MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN);
+
+/* reg_mddq_sie
+ * Slot info event enable.
+ * When set to '1', each change in the slot_info.provisioned / sr_valid /
+ * active / ready will generate a DSDSC event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
+
+enum mlxsw_reg_mddq_query_type {
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
+};
+
+/* reg_mddq_query_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
+
+/* reg_mddq_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
+/* reg_mddq_slot_info_provisioned
+ * If set, the INI file is applied and the card is provisioned.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1);
+
+/* reg_mddq_slot_info_sr_valid
+ * If set, Shift Register is valid (after being provisioned) and data
+ * can be sent from the switch ASIC to the line-card CPLD over Shift-Register.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1);
+
+enum mlxsw_reg_mddq_slot_info_ready {
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR,
+};
+
+/* reg_mddq_slot_info_lc_ready
+ * If set, the LC is powered on, matching the INI version and a new FW
+ * version can be burnt (if necessary).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2);
+
+/* reg_mddq_slot_info_active
+ * If set, the FW has completed the MDDC.device_enable command.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1);
+
+/* reg_mddq_slot_info_hw_revision
+ * Major user-configured version number of the current INI file.
+ * Valid only when active or ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16);
+
+/* reg_mddq_slot_info_ini_file_version
+ * User-configured version number of the current INI file.
+ * Valid only when active or lc_ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16);
+
+/* reg_mddq_slot_info_card_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8);
+
+static inline void
+__mlxsw_reg_mddq_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mddq_query_type query_type)
+{
+ MLXSW_REG_ZERO(mddq, payload);
+ mlxsw_reg_mddq_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddq_query_type_set(payload, query_type);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO);
+ mlxsw_reg_mddq_sie_set(payload, sie);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
+ bool *p_provisioned, bool *p_sr_valid,
+ enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready,
+ bool *p_active, u16 *p_hw_revision,
+ u16 *p_ini_file_version,
+ u8 *p_card_type)
+{
+ *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload);
+ *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload);
+ *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload);
+ *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload);
+ *p_active = mlxsw_reg_mddq_slot_info_active_get(payload);
+ *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload);
+ *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload);
+ *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
+}
+
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
+#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
+
+/* reg_mddq_slot_ascii_name
+ * Slot's ASCII name.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10,
+ MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN);
+
+static inline void
+mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name)
+{
+ mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name);
+}
+
+/* MDDC - Management DownStream Device Control Register
+ * ----------------------------------------------------
+ * This register allows to control downstream devices and line cards.
+ */
+#define MLXSW_REG_MDDC_ID 0x9163
+#define MLXSW_REG_MDDC_LEN 0x30
+
+MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN);
+
+/* reg_mddc_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4);
+
+/* reg_mddc_rst
+ * Reset request.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1);
+
+/* reg_mddc_device_enable
+ * When set, FW is the manager and allowed to program the downstream device.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1);
+
+static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst,
+ bool device_enable)
+{
+ MLXSW_REG_ZERO(mddc, payload);
+ mlxsw_reg_mddc_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddc_rst_set(payload, rst);
+ mlxsw_reg_mddc_device_enable_set(payload, device_enable);
}
/* MFDE - Monitoring FW Debug Register
@@ -12070,6 +12307,12 @@ static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN);
+/* reg_sbpr_desc
+ * When set, configures descriptor buffer.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, desc, 0x00, 31, 1);
+
/* shared direstion enum for SBPR, SBCM, SBPM */
enum mlxsw_reg_sbxx_dir {
MLXSW_REG_SBXX_DIR_INGRESS,
@@ -12519,7 +12762,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
- MLXSW_REG(sftr2),
+ MLXSW_REG(smpe),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
@@ -12557,17 +12800,18 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(paos),
MLXSW_REG(pfcc),
MLXSW_REG(ppcnt),
- MLXSW_REG(plib),
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
MLXSW_REG(pmtdb),
+ MLXSW_REG(pmecr),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
MLXSW_REG(pllp),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -12590,16 +12834,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
- MLXSW_REG(rxlte),
- MLXSW_REG(rxltm),
- MLXSW_REG(rlcmld),
- MLXSW_REG(rlpmce),
- MLXSW_REG(xltq),
- MLXSW_REG(xmdr),
- MLXSW_REG(xrmt),
- MLXSW_REG(xralta),
- MLXSW_REG(xralst),
- MLXSW_REG(xraltb),
+ MLXSW_REG(reiv),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
@@ -12630,8 +12865,13 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mtpcpc),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mbct),
+ MLXSW_REG(mddt),
+ MLXSW_REG(mddq),
+ MLXSW_REG(mddc),
MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index c7fc650608eb..19ae0d1c74a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
@@ -23,6 +24,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
@@ -33,6 +35,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_MAX_REGIONS,
MLXSW_RES_ID_ACL_MAX_GROUPS,
MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+ MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS,
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
@@ -68,6 +71,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
@@ -80,6 +84,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
@@ -90,6 +95,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
[MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
[MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+ [MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS] = 0x2908,
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index aa411dec62f0..5bcf5bceff71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
#include "spectrum.h"
#include "pci.h"
@@ -45,52 +46,54 @@
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
+#define MLXSW_SP_FWREV_MINOR 2010
+#define MLXSW_SP_FWREV_SUBMINOR 1006
+
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2010
-#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
.major = MLXSW_SP1_FWREV_MAJOR,
- .minor = MLXSW_SP1_FWREV_MINOR,
- .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
};
#define MLXSW_SP1_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP1_FWREV_MINOR) \
- "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2010
-#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
- .minor = MLXSW_SP2_FWREV_MINOR,
- .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP2_FW_FILENAME \
"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP2_FWREV_MINOR) \
- "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2010
-#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
- .minor = MLXSW_SP3_FWREV_MINOR,
- .subminor = MLXSW_SP3_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP3_FW_FILENAME \
"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP3_FWREV_MINOR) \
- "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+
+#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
+ "mellanox/lc_ini_bundle_" \
+ __stringify(MLXSW_SP_FWREV_MINOR) "_" \
+ __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
@@ -164,7 +167,7 @@ MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
* set, otherwise calculated based on the packet's VID using VID to FID mapping.
* Valid for data packets only.
*/
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
/* tx_hdr_type
* 0 - Data packets
@@ -228,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
@@ -244,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
switch (state) {
@@ -484,23 +563,22 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- struct mlxsw_sp_port_mapping *port_mapping)
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, char *pmlp_pl,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
bool separate_rxtx;
+ u8 first_lane;
+ u8 slot_index;
u8 module;
u8 width;
- int err;
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
width = mlxsw_reg_pmlp_width_get(pmlp_pl);
separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+ first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
if (width && !is_power_of_2(width)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
@@ -514,6 +592,11 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
+ if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
+ local_port);
+ return -EINVAL;
+ }
if (separate_rxtx &&
mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
@@ -521,7 +604,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
- if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
local_port);
return -EINVAL;
@@ -529,6 +612,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
port_mapping->module = module;
+ port_mapping->slot_index = slot_index;
port_mapping->width = width;
port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
@@ -536,17 +620,35 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ pmlp_pl, port_mapping);
+}
+
+static int
mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i, err;
- mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
+ port_mapping->slot_index);
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
@@ -557,19 +659,20 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return 0;
err_pmlp_write:
- mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- u8 module)
+ u8 slot_index, u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
@@ -579,6 +682,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
int err;
err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
if (err)
return err;
@@ -590,6 +694,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
err_port_admin_status_set:
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return err;
}
@@ -602,6 +707,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev)
netif_stop_queue(dev);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return 0;
}
@@ -619,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -635,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- mlxsw_sp_txhdr_construct(skb, &tx_info);
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
@@ -1448,12 +1552,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
u64 overheat_counter;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
- &overheat_counter);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
+ module, &overheat_counter);
if (err)
return err;
@@ -1528,7 +1633,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
splittable = lanes > 1 && !split;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
@@ -1778,20 +1883,23 @@ err_port_label_info_get:
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
+ port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
@@ -1807,7 +1915,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1861,11 +1969,119 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
return mlxsw_sp->ports[local_port] != NULL;
}
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, bool enable)
+{
+ char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+ mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+ enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+ struct list_head list;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ LIST_HEAD(event_queue);
+ u16 local_port;
+ int err;
+
+ events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+ mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+ devlink = priv_to_devlink(mlxsw_sp->core);
+
+ spin_lock_bh(&events->queue_lock);
+ list_splice_init(&events->queue, &event_queue);
+ spin_unlock_bh(&events->queue_lock);
+
+ list_for_each_entry_safe(event, next_event, &event_queue, list) {
+ local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+ err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ event->pmlp_pl, &port_mapping);
+ if (err)
+ goto out;
+
+ if (WARN_ON_ONCE(!port_mapping.width))
+ goto out;
+
+ devl_lock(devlink);
+
+ if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, &port_mapping);
+ else
+ WARN_ON_ONCE(1);
+
+ devl_unlock(devlink);
+
+ mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+ kfree(event);
+ }
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+ char *pmlp_pl, void *priv)
+{
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping_event *event;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+
+ events = &mlxsw_sp->port_mapping_events;
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+ spin_lock(&events->queue_lock);
+ list_add_tail(&event->list, &events->queue);
+ spin_unlock(&events->queue_lock);
+ mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+
+ events = &mlxsw_sp->port_mapping_events;
+
+ /* Caller needs to make sure that no new event is going to appear. */
+ cancel_work_sync(&events->work);
+ list_for_each_entry_safe(event, next_event, &events->queue, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
int i;
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ for (i = 1; i < max_ports; i++)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
+ for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
@@ -1873,9 +2089,24 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->ports = NULL;
}
+static void
+mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+}
+
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
@@ -1886,13 +2117,24 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
+ events = &mlxsw_sp->port_mapping_events;
+ INIT_LIST_HEAD(&events->queue);
+ spin_lock_init(&events->queue_lock);
+ INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+ if (err)
+ goto err_event_enable;
+ }
+
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- port_mapping = mlxsw_sp->port_mapping[i];
- if (!port_mapping)
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ if (!port_mapping->width)
continue;
err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
@@ -1904,8 +2146,14 @@ err_port_create:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
+err_event_enable:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
return err;
@@ -1914,50 +2162,31 @@ err_cpu_port_create:
static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
int err;
mlxsw_sp->port_mapping = kcalloc(max_ports,
- sizeof(struct mlxsw_sp_port_mapping *),
+ sizeof(struct mlxsw_sp_port_mapping),
GFP_KERNEL);
if (!mlxsw_sp->port_mapping)
return -ENOMEM;
for (i = 1; i < max_ports; i++) {
- if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
- continue;
-
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
goto err_port_module_info_get;
- if (!port_mapping.width)
- continue;
-
- mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
- sizeof(port_mapping),
- GFP_KERNEL);
- if (!mlxsw_sp->port_mapping[i]) {
- err = -ENOMEM;
- goto err_port_module_info_dup;
- }
}
return 0;
err_port_module_info_get:
-err_port_module_info_dup:
- for (i--; i >= 1; i--)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
return err;
}
static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- int i;
-
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
}
@@ -2007,8 +2236,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- port_mapping = mlxsw_sp->port_mapping[local_port];
- if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
+ port_mapping = &mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
continue;
mlxsw_sp_port_create(mlxsw_sp, local_port,
false, port_mapping);
@@ -2048,7 +2277,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
return -EINVAL;
}
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2083,6 +2313,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
return err;
}
@@ -2112,7 +2343,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
count = mlxsw_sp_port->mapping.module_width /
mlxsw_sp_port->mapping.width;
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2148,13 +2380,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
- unsigned int max_ports;
u16 local_port;
- max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -2305,6 +2535,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
};
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2393,45 +2628,6 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
return 0;
}
-static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
- int err;
-
- for (i = 0; i < listeners_count; i++) {
- err = mlxsw_core_trap_register(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- if (err)
- goto err_listener_register;
-
- }
- return 0;
-
-err_listener_register:
- for (i--; i >= 0; i--) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
- return err;
-}
-
-static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
-
- for (i = 0; i < listeners_count; i++) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
-}
-
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_trap *trap;
@@ -2456,21 +2652,23 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_trap_groups_set;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
if (err)
goto err_traps_register;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count, mlxsw_sp);
if (err)
goto err_extra_traps_init;
return 0;
err_extra_traps_init:
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
err_traps_register:
err_trap_groups_set:
err_cpu_policers_set:
@@ -2480,10 +2678,11 @@ err_cpu_policers_set:
static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count,
+ mlxsw_sp);
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
kfree(mlxsw_sp->trap);
}
@@ -2492,6 +2691,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u16 max_lag;
u32 seed;
int err;
@@ -2510,12 +2710,14 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
return -EIO;
- mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
- sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
if (!mlxsw_sp->lags)
return -ENOMEM;
@@ -2528,42 +2730,6 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->lags);
}
-static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- int err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
-}
-
static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.clock_init = mlxsw_sp1_ptp_clock_init,
.clock_fini = mlxsw_sp1_ptp_clock_fini,
@@ -2578,6 +2744,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2594,6 +2761,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -2915,6 +3100,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3002,7 +3193,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_router_init;
}
- if (mlxsw_sp->bus_info->read_frc_capable) {
+ if (mlxsw_sp->bus_info->read_clock_capable) {
/* NULL is a valid return value from clock_init */
mlxsw_sp->clock =
mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
@@ -3024,9 +3215,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
}
}
- /* Initialize netdevice notifier after router and SPAN is initialized,
- * so that the event handler can use router structures and call SPAN
- * respin.
+ /* Initialize netdevice notifier after SPAN is initialized, so that the
+ * event handler can call SPAN respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
@@ -3107,6 +3297,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
@@ -3138,7 +3330,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3168,7 +3362,11 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3198,7 +3396,11 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3222,13 +3424,17 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
- mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3261,28 +3467,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
-/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
- * 802.1Q FIDs
- */
-#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
- VLAN_VID_MASK - 1)
-
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
@@ -3296,25 +3494,49 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
+ * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
+ * table.
+ */
+#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
+
+static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
+ .used_max_lag = 1,
+ .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvh_xlt_cache_mode = 1,
- .kvh_xlt_cache_mode = 1,
+ .used_ubridge = 1,
+ .ubridge = 1,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
static void
@@ -3374,19 +3596,19 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
if (err)
return err;
linear_size = profile->kvd_linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- linear_size,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_RESOURCE_KVD,
- &linear_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &linear_size_params);
if (err)
return err;
@@ -3399,20 +3621,20 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- double_size,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_double_size_params);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- single_size,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_single_size_params);
if (err)
return err;
@@ -3433,10 +3655,10 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
}
static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
@@ -3452,10 +3674,10 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&span_size_params, max_span, max_span,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
- max_span, MLXSW_SP_RESOURCE_SPAN,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &span_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
}
static int
@@ -3474,12 +3696,31 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- "rif_mac_profiles",
- max_rif_mac_profiles,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ return devl_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max_rifs;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
+ devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "rifs", max_rifs,
+ MLXSW_SP_RESOURCE_RIFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
}
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
@@ -3506,13 +3747,18 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3540,13 +3786,18 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3570,15 +3821,15 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
* granularity from the profile. In case the user
* provided the sizes they are obtained via devlink.
*/
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- p_linear_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
if (err)
*p_linear_size = profile->kvd_linear_size;
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- p_double_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
if (err) {
double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_linear_size;
@@ -3589,9 +3840,9 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
MLXSW_SP_KVD_GRANULARITY);
}
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- p_single_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
if (err)
*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_double_size - *p_linear_size;
@@ -3677,7 +3928,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3705,9 +3955,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
+ .sdq_supports_cqe_v2 = false,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3717,9 +3965,9 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3746,9 +3994,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3758,9 +4004,9 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3787,9 +4033,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3797,9 +4041,9 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp4_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3825,10 +4069,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
+ .profile = &mlxsw_sp4_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
@@ -4051,10 +4293,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
- u64 max_lag;
- int i;
+ u16 max_lag;
+ int err, i;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
- max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
for (i = 0; i < max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
@@ -4434,7 +4679,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
!netif_is_ovs_master(upper_dev) &&
- !netif_is_macvlan(upper_dev)) {
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4633,7 +4879,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev) &&
- !netif_is_macvlan(upper_dev)) {
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4672,9 +4919,6 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
} else if (netif_is_macvlan(upper_dev)) {
if (!info->linking)
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
- } else {
- err = -EINVAL;
- WARN_ON(1);
}
break;
}
@@ -4722,7 +4966,8 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_macvlan(upper_dev)) {
+ if (!netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EOPNOTSUPP;
}
@@ -4783,7 +5028,9 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EOPNOTSUPP;
}
@@ -4827,25 +5074,20 @@ static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
+ upper_dev = info->upper_dev;
- /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
- NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
-
- return -EOPNOTSUPP;
-}
-
-static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
-{
- struct netdev_notifier_changeupper_info *info = ptr;
+ if (!netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
- if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
- return false;
- return netif_is_l3_master(info->upper_dev);
+ return 0;
}
static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
@@ -4934,18 +5176,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
if (netif_is_vxlan(dev))
err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
- if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
- err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
- event, ptr);
- else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
- err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
- event, ptr);
- else if (event == NETDEV_PRE_CHANGEADDR ||
- event == NETDEV_CHANGEADDR ||
- event == NETDEV_CHANGEMTU)
- err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
- else if (mlxsw_sp_is_vrf_event(event, ptr))
- err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
else if (netif_is_lag_master(dev))
@@ -5096,3 +5326,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bb2442e1f705..c8ff2a6d7e90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -68,6 +68,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ MLXSW_SP_RESOURCE_RIFS,
};
struct mlxsw_sp_port;
@@ -111,15 +112,6 @@ enum mlxsw_sp_nve_type {
MLXSW_SP_NVE_TYPE_VXLAN,
};
-struct mlxsw_sp_mid {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 fid;
- u16 mid;
- bool in_hw;
- unsigned long *ports_in_mid; /* bits array */
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -142,14 +134,22 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
+ u8 slot_index;
u8 width; /* Number of lanes used by the port */
u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
+struct mlxsw_sp_port_mapping_events {
+ struct list_head queue;
+ spinlock_t queue_lock; /* protects queue */
+ struct work_struct work;
+};
+
struct mlxsw_sp_parsing {
refcount_t parsing_depth_ref;
u16 parsing_depth;
@@ -164,7 +164,8 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- struct mlxsw_sp_port_mapping **port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
@@ -202,10 +203,13 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
@@ -239,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
};
static inline struct mlxsw_sp_upper *
@@ -381,6 +389,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
@@ -481,6 +514,13 @@ int
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_8021ad_tagged,
bool is_8021q_tagged);
+static inline bool
+mlxsw_sp_local_port_is_valid(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ return local_port < max_ports && local_port;
+}
/* spectrum_buffers.c */
struct mlxsw_sp_hdroom_prio {
@@ -664,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -700,32 +746,16 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
- unsigned long event, void *ptr);
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
- struct netdev_notifier_changeupper_info *info);
-bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- unsigned long event,
- struct netdev_notifier_info *info);
-int
-mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- unsigned long event,
- struct netdev_notifier_info *info);
int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct net_device *l3_dev,
@@ -813,6 +843,24 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum2_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4,
+};
+
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
@@ -821,9 +869,14 @@ struct mlxsw_sp_acl_rule_info {
ingress_bind_blocker:1,
egress_bind_blocker:1,
counter_valid:1,
- policer_index_valid:1;
+ policer_index_valid:1,
+ ipv6_valid:1;
unsigned int counter_index;
u16 policer_index;
+ struct {
+ u32 prev_val;
+ enum mlxsw_sp_acl_mangle_field prev_field;
+ } ipv6;
};
/* spectrum_flow.c */
@@ -1215,7 +1268,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1243,7 +1295,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1265,6 +1318,9 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
@@ -1422,4 +1478,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index a9fff8adc75e..1e3fc989393c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -213,20 +213,19 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
- size_t usage_size;
u64 resource_size;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
+ err = devl_resource_size_get(devlink, info->resource_id,
+ &resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
}
nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ part = kzalloc(struct_size(part, usage, BITS_TO_LONGS(nr_entries)),
+ GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
@@ -339,22 +338,22 @@ static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp1_kvdl_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp1_kvdl_single_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp1_kvdl_chunks_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp1_kvdl_large_chunks_occ_get,
- kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
return 0;
}
@@ -363,14 +362,14 @@ static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
@@ -397,32 +396,32 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP1_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP1_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index ad69913f19c1..5b0210862655 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -77,7 +77,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
int i;
int err;
+ /* Some TCAM regions are not exposed to the host and used internally
+ * by the device. Allocate KVDL entries for the default actions of
+ * these regions to avoid the host from overwriting them.
+ */
tcam->kvdl_count = _tcam->max_regions;
+ if (MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_DEFAULT_ACTIONS))
+ tcam->kvdl_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_DEFAULT_ACTIONS);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, &tcam->kvdl_index);
if (err)
@@ -97,7 +104,10 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_continue;
enc_actions = mlxsw_afa_block_cur_set(afa_block);
- for (i = 0; i < tcam->kvdl_count; i++) {
+ /* Only write to KVDL entries used by TCAM regions exposed to the
+ * host.
+ */
+ for (i = 0; i < _tcam->max_regions; i++) {
mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
true, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 10ae1115de6c..24ff305a2995 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -15,7 +15,7 @@ struct mlxsw_sp2_kvdl_part_info {
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
- * ids for for this purpose in partition definition.
+ * ids for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 70c11bfac08f..6c5af018546f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -505,14 +505,6 @@ int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
extack);
}
-enum mlxsw_sp_acl_mangle_field {
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
-};
-
struct mlxsw_sp_acl_mangle_action {
enum flow_action_mangle_base htype;
/* Offset is u32-aligned. */
@@ -561,6 +553,18 @@ static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
};
static int
@@ -599,6 +603,22 @@ static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int
+mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_sp_acl_mangle_field field,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ if (!rulei->ipv6_valid) {
+ rulei->ipv6.prev_val = val;
+ rulei->ipv6_valid = true;
+ rulei->ipv6.prev_field = field;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
+ return -EOPNOTSUPP;
+}
+
static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_mangle_action *mact,
@@ -615,6 +635,61 @@ static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
+ /* IPv4 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, false,
+ true, val, 0, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, true,
+ true, val, 0, extack);
+ /* IPv6 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
+ return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
+ mact->field,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 31f7f4c3acc3..3b9ba8fa247a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1827,10 +1827,9 @@ static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
void *rule_priv, bool *activity)
{
- struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+ *activity = false;
- return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
- activity);
+ return 0;
}
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 98f26f596e30..c9f1c79f3f9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -202,6 +202,21 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
return 0;
}
+static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode,
+ u32 size, bool infi_size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+
+ /* The FW default descriptor buffer configuration uses only pool 14 for
+ * descriptors.
+ */
+ mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size);
+ mlxsw_reg_sbpr_desc_set(sbpr_pl, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+}
+
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 pg_buff, u32 min_buff, u32 max_buff,
bool infi_max, u16 pool_index)
@@ -775,6 +790,17 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
}
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
return 0;
}
@@ -1264,12 +1290,12 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_mms_init;
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
- err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- mlxsw_sp->sb->sb_size,
- ing_pool_count,
- eg_pool_count,
- MLXSW_SP_SB_ING_TC_COUNT,
- MLXSW_SP_SB_EG_TC_COUNT);
+ err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
if (err)
goto err_devlink_sb_register;
@@ -1288,7 +1314,7 @@ err_sb_ports_init:
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
- devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
kfree(mlxsw_sp->sb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index fc2257753b9b..ee59c79156e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -67,16 +67,16 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
res_id);
- err = devlink_resource_size_get(devlink,
- sub_pool->resource_id,
- &sub_pool->size);
+ err = devl_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
if (err)
goto err_resource_size_get;
- devlink_resource_occ_get_register(devlink,
- sub_pool->resource_id,
- mlxsw_sp_counter_sub_pool_occ_get,
- sub_pool);
+ devl_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
sub_pool->base_index = base_index;
base_index += sub_pool->size;
@@ -88,8 +88,8 @@ err_resource_size_get:
for (i--; i >= 0; i--) {
sub_pool = &pool->sub_pools[i];
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
return err;
}
@@ -105,8 +105,8 @@ static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
sub_pool = &pool->sub_pools[i];
WARN_ON(atomic_read(&sub_pool->active_entries_count));
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
}
@@ -135,12 +135,12 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
spin_lock_init(&pool->counter_pool_lock);
atomic_set(&pool->active_entries_count, 0);
- err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- &pool->pool_size);
+ err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
if (err)
goto err_pool_resource_size_get;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- mlxsw_sp_counter_pool_occ_get, pool);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
@@ -157,8 +157,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
err_sub_pools_init:
bitmap_free(pool->usage);
err_usage_alloc:
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
err_pool_resource_size_get:
kfree(pool);
return err;
@@ -174,8 +174,8 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -262,12 +262,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, pool_size,
pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- MLXSW_SP_RESOURCE_NAME_COUNTERS,
- pool_size,
- MLXSW_SP_RESOURCE_COUNTERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
@@ -287,12 +287,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, sub_pool_size,
sub_pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- sub_pool->resource_name,
- sub_pool_size,
- sub_pool->resource_id,
- MLXSW_SP_RESOURCE_COUNTERS,
- &size_params);
+ err = devl_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
if (err)
return err;
total_bank_config += sub_pool->bank_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index a68d931090dd..15c8d4de8350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -8,8 +8,8 @@
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
- MLXSW_SP_COUNTER_SUB_POOL_FLOW,
MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 5f92b1691360..aff6d4f35cd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -168,8 +168,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
struct dcb_app *app)
{
- int prio;
-
if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
netdev_err(dev, "APP entry with priority value %u is invalid\n",
app->priority);
@@ -183,17 +181,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
app->protocol);
return -EINVAL;
}
-
- /* Warn about any DSCP APP entries with the same PID. */
- prio = fls(dcb_ieee_getapp_mask(dev, app));
- if (prio--) {
- if (prio < app->priority)
- netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
- app->priority, app->protocol, prio);
- else if (prio > app->priority)
- netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
- app->priority, app->protocol, prio);
- }
break;
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 1a2fef2a5379..5416093c0e35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -266,10 +266,10 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_alloc(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_free(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
mutex_unlock(&mlxsw_sp->router->lock);
@@ -295,17 +295,17 @@ static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
- &mlxsw_sp_erif_ops,
- mlxsw_sp, false);
+ return devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, false);
}
static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+ devl_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
}
static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
@@ -749,25 +749,25 @@ static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
return err;
}
@@ -775,8 +775,8 @@ static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
}
static int
@@ -826,25 +826,25 @@ static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
return err;
}
@@ -852,8 +852,8 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
@@ -1231,25 +1231,25 @@ static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
return err;
}
@@ -1257,8 +1257,8 @@ static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
}
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
@@ -1266,10 +1266,8 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_headers_register(devlink,
- &mlxsw_sp_dpipe_headers);
- if (err)
- return err;
+ devl_dpipe_headers_register(devlink, &mlxsw_sp_dpipe_headers);
+
err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
if (err)
goto err_erif_table_init;
@@ -1294,7 +1292,7 @@ err_host6_table_init:
err_host4_table_init:
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
err_erif_table_init:
- devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ devl_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
return err;
}
@@ -1306,5 +1304,5 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
- devlink_dpipe_headers_unregister(devlink);
+ devl_dpipe_headers_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 20530712eadb..dcd79d7e2af4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -14,16 +14,16 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ strscpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_sp->bus_info->fw_rev.major,
mlxsw_sp->bus_info->fw_rev.minor,
mlxsw_sp->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
@@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats {
static u64
mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
u64 stats;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
- port_mapping.module,
- &stats);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index,
+ module, &stats);
if (err)
return mlxsw_sp_port->module_overheat_initial_val;
@@ -1034,13 +1034,11 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
- err = mlxsw_env_get_module_info(mlxsw_sp->core,
- mlxsw_sp_port->mapping.module,
- modinfo);
-
- return err;
+ return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
+ modinfo);
}
static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
@@ -1048,13 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
-
- err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
- return err;
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index,
+ module, ee, data);
}
static int
@@ -1064,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
- extack);
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
}
static int
@@ -1208,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index,
+ module, flags);
}
static int
@@ -1220,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
- extack);
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params, extack);
}
static int
@@ -1233,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
- params->policy, extack);
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params->policy, extack);
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
@@ -1273,12 +1274,22 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = SPEED_1000,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index ce80931f0402..045a24cacfa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -22,11 +22,18 @@ struct mlxsw_sp_fid_core {
unsigned int *port_fid_mappings;
};
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
refcount_t ref_count;
u16 fid_index;
+ u16 fid_offset;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -37,6 +44,7 @@ struct mlxsw_sp_fid {
int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
};
struct mlxsw_sp_fid_8021q {
@@ -63,7 +71,6 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_reg_sfgc_bridge_type bridge_type;
enum mlxsw_flood_table_type table_type;
int table_index;
};
@@ -76,18 +83,18 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
- u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
- int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
void (*vni_clear)(struct mlxsw_sp_fid *fid);
- int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
@@ -102,7 +109,10 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- u8 lag_vid_valid:1;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -137,11 +147,6 @@ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
return fid_family->start_index == fid_index;
}
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->lag_vid_valid;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
@@ -206,17 +211,20 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
int err;
- if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ if (WARN_ON(fid->nve_flood_index_valid))
return -EINVAL;
- err = ops->nve_flood_index_set(fid, nve_flood_index);
- if (err)
- return err;
-
fid->nve_flood_index = nve_flood_index;
fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
}
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
@@ -224,7 +232,7 @@ void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
- if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ if (WARN_ON(!fid->nve_flood_index_valid))
return;
fid->nve_flood_index_valid = false;
@@ -244,7 +252,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
int err;
- if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ if (WARN_ON(fid->vni_valid))
return -EINVAL;
fid->nve_type = type;
@@ -256,15 +264,15 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
if (err)
return err;
- err = ops->vni_set(fid, vni);
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
if (err)
goto err_vni_set;
- fid->vni_valid = true;
-
return 0;
err_vni_set:
+ fid->vni_valid = false;
rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
mlxsw_sp_fid_vni_ht_params);
return err;
@@ -276,7 +284,7 @@ void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ if (WARN_ON(!fid->vni_valid))
return;
fid->vni_valid = false;
@@ -316,34 +324,43 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
return NULL;
}
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr2_pl;
- int err;
+ u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
- if (!sftr2_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
- sftr2_pl);
- kfree(sftr2_pl);
- return err;
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
@@ -370,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
-{
- fid->rif = rif;
-}
-
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
@@ -405,6 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -413,38 +426,341 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 fid_offset, bool valid)
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
- fid_offset);
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- __be32 vni, bool vni_valid, u32 nve_flood_index,
- bool nve_flood_index_valid)
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
- 0);
- mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
- mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
- mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
- mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
- mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
@@ -459,20 +775,19 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
@@ -498,14 +813,8 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
-static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index - VLAN_N_VID;
-}
-
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
@@ -517,7 +826,7 @@ static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ err = __mlxsw_sp_fid_port_vid_map(fid,
mlxsw_sp_port->local_port,
vid, true);
if (err)
@@ -540,8 +849,7 @@ err_fid_port_vid_map:
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
return err;
@@ -549,7 +857,6 @@ err_fid_port_vid_map:
static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -562,12 +869,108 @@ static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
}
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
@@ -576,11 +979,20 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, true);
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
if (err)
return err;
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -591,8 +1003,11 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
@@ -606,43 +1021,29 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
-static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
- true, fid->nve_flood_index,
- fid->nve_flood_index_valid);
+ return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
- fid->nve_flood_index, fid->nve_flood_index_valid);
+ mlxsw_sp_fid_vni_op(fid);
}
-static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index)
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
- fid->vni, fid->vni_valid, nve_flood_index,
- true);
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
- fid->vni_valid, 0, false);
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
@@ -652,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
@@ -666,42 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
- .type = MLXSW_SP_FID_TYPE_8021D,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
- .start_index = VLAN_N_VID,
- .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
- .lag_vid_valid = 1,
-};
-
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -717,48 +1114,19 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021d_configure,
- .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
- .vni_set = mlxsw_sp_fid_8021d_vni_set,
- .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
- .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
- .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
- .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
-};
-
-/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
-#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
-#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
- VLAN_VID_MASK - 2)
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = MLXSW_SP_FID_8021Q_EMU_START,
- .end_index = MLXSW_SP_FID_8021Q_EMU_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_emu_ops,
- .lag_vid_valid = 1,
-};
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
- /* rFIDs are allocated by the device during init */
- return 0;
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
@@ -787,9 +1155,28 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- /* We only need to transition the port to virtual mode since
- * {Port, VID} => FID is done by the firmware upon RIF creation.
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
*/
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -800,6 +1187,13 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
@@ -813,39 +1207,69 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
.compare = mlxsw_sp_fid_rfid_compare,
.port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
-#define MLXSW_SP_RFID_BASE (15 * 1024)
-#define MLXSW_SP_RFID_MAX 1024
-
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
- .type = MLXSW_SP_FID_TYPE_RFID,
- .fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE,
- .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
- .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
-};
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
-
- return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
{
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
@@ -862,26 +1286,252 @@ static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
return true;
}
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
.configure = mlxsw_sp_fid_dummy_configure,
.deconfigure = mlxsw_sp_fid_dummy_deconfigure,
.index_alloc = mlxsw_sp_fid_dummy_index_alloc,
.compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = VLAN_N_VID - 1,
- .end_index = VLAN_N_VID - 1,
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
- [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -919,6 +1569,8 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
fid->fid_family = fid_family;
err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
@@ -927,8 +1579,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- if (fid->fid_family->ops->setup)
- fid->fid_family->ops->setup(fid, arg);
+ fid->fid_family->ops->setup(fid, arg);
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -967,6 +1618,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
kfree(fid);
}
@@ -1010,26 +1662,49 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table)
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- int i;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- int err;
if (!sfgc_packet_types[i])
continue;
- mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
- flood_table->table_type,
- flood_table->table_index);
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- return err;
+ goto err_reg_write;
}
return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
@@ -1050,6 +1725,19 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
return 0;
}
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fid_family *tmpl)
{
@@ -1091,6 +1779,10 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
@@ -1144,7 +1836,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp_fid_family_arr[i]);
+ mlxsw_sp->fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index bb417db773b9..e91fb205e0b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,6 +15,46 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
+static int mlxsw_sp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -191,10 +231,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ err = mlxsw_sp_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
@@ -233,6 +272,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
}
+
+ if (rulei->ipv6_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 01cf5a6a26bd..a2ee695a3f17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,10 +568,8 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &ipip_entry->parms.daddr.addr6,
&ipip_entry->dip_kvdl_index);
}
@@ -579,10 +577,7 @@ static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000000..7dd3dba0fa83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
index 39052e5c12fd..22ebb207ce4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
@@ -94,10 +94,10 @@ mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
atomic_set(&family->policers_count, 0);
devlink = priv_to_devlink(core);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- mlxsw_sp_policer_single_rate_occ_get,
- family);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ mlxsw_sp_policer_single_rate_occ_get,
+ family);
return 0;
}
@@ -107,8 +107,8 @@ mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
{
struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
WARN_ON(atomic_read(&family->policers_count) != 0);
}
@@ -419,22 +419,22 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, global_policers,
global_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "global_policers",
- global_policers,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink, "global_policers",
+ global_policers,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, single_rate_policers,
single_rate_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "single_rate_policers",
- single_rate_policers,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- &size_params);
+ err = devl_resource_register(devlink, "single_rate_policers",
+ single_rate_policers,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ &size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 0ff163fbc775..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "spectrum_ptp.h"
@@ -29,12 +30,25 @@
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp1_ptp_state {
+ struct mlxsw_sp_ptp_state common;
struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
};
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
+};
+
struct mlxsw_sp1_ptp_key {
u16 local_port;
u8 message_type;
@@ -60,20 +74,44 @@ static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
struct mlxsw_sp_ptp_clock {
struct mlxsw_core *core;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+};
+
+struct mlxsw_sp1_ptp_clock {
+ struct mlxsw_sp_ptp_clock common;
spinlock_t lock; /* protect this structure */
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
unsigned long overflow_period;
struct delayed_work overflow_work;
};
-static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
+static struct mlxsw_sp1_ptp_state *
+mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp1_ptp_clock *
+mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
+}
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u32 frc_h1, frc_h2, frc_l;
frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
@@ -94,20 +132,20 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
}
static int
-mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
{
struct mlxsw_core *mlxsw_core = clock->core;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
- freq_adj, 0);
+ freq_adj, 0, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
@@ -122,9 +160,9 @@ static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
}
static int
-mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u64 next_sec, next_sec_in_nsec, cycles;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
char mtpps_pl[MLXSW_REG_MTPPS_LEN];
@@ -144,14 +182,13 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
mlxsw_reg_mtutc_pack(mtutc_pl,
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
- 0, next_sec);
+ 0, next_sec, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
int neg_adj = 0;
u32 diff;
u64 adj;
@@ -174,13 +211,12 @@ static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
clock->nominal_c_mult + diff;
spin_unlock_bh(&clock->lock);
- return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
}
static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec;
spin_lock_bh(&clock->lock);
@@ -195,8 +231,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 cycles, nsec;
spin_lock_bh(&clock->lock);
@@ -212,8 +247,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec = timespec64_to_ns(ts);
spin_lock_bh(&clock->lock);
@@ -237,9 +271,9 @@ static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
- clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
+ clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
@@ -251,7 +285,7 @@ struct mlxsw_sp_ptp_clock *
mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
u64 overflow_cycles, nsec, frac = 0;
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
int err;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
@@ -265,10 +299,9 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
- clock->core = mlxsw_sp->core;
+ clock->common.core = mlxsw_sp->core;
- timecounter_init(&clock->tc, &clock->cycles,
- ktime_to_ns(ktime_get_real()));
+ timecounter_init(&clock->tc, &clock->cycles, 0);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -286,7 +319,158 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
mlxsw_core_schedule_dw(&clock->overflow_work, 0);
- clock->ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
+ if (IS_ERR(clock->common.ptp)) {
+ err = PTR_ERR(clock->common.ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return &clock->common;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ ptp_clock_unregister(clock_common->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
if (IS_ERR(clock->ptp)) {
err = PTR_ERR(clock->ptp);
@@ -297,15 +481,14 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
return clock;
err_ptp_clock_register:
- cancel_delayed_work_sync(&clock->overflow_work);
+err_ptp_phc_settime:
kfree(clock);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
{
ptp_clock_unregister(clock->ptp);
- cancel_delayed_work_sync(&clock->overflow_work);
kfree(clock);
}
@@ -348,7 +531,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
u64 timestamp)
{
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
- struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int err;
@@ -359,7 +542,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
- unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
+ unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
@@ -373,11 +556,12 @@ static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key, int *p_length)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
struct rhlist_head *tmp, *list;
int length = 0;
- list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
mlxsw_sp1_ptp_unmatched_ht_params);
rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
last = unmatched;
@@ -392,7 +576,9 @@ static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ return rhltable_remove(&ptp_state->unmatched_ht,
&unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
}
@@ -438,12 +624,16 @@ static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb,
u64 timestamp)
{
+ struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
struct skb_shared_hwtstamps hwtstamps;
u64 nsec;
- spin_lock_bh(&mlxsw_sp->clock->lock);
- nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
- spin_unlock_bh(&mlxsw_sp->clock->lock);
+ spin_lock_bh(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ spin_unlock_bh(&clock->lock);
hwtstamps.hwtstamp = ns_to_ktime(nsec);
mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
@@ -481,13 +671,14 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int length;
int err;
rcu_read_lock();
- spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_lock(&ptp_state->unmatched_lock);
unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
@@ -515,7 +706,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(err);
}
- spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_unlock(&ptp_state->unmatched_lock);
if (unmatched)
mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
@@ -568,12 +759,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp1_ptp_key key;
u8 types;
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -612,9 +802,10 @@ void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
struct mlxsw_sp_ptp_port_dir_stats *stats;
struct mlxsw_sp_port *mlxsw_sp_port;
int err;
@@ -637,7 +828,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
- mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
if (mlxsw_sp_port) {
stats = unmatched->key.ingress ?
&mlxsw_sp_port->ptp.stats.rx_gcd :
@@ -654,7 +845,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
* netif_receive_skb(), in process context, is seen elsewhere in the
* kernel, notably in pktgen.
*/
- mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
out:
local_bh_enable();
@@ -664,12 +855,12 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp_ptp_state *ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state;
struct rhashtable_iter iter;
u32 gc_cycle;
void *obj;
- ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
+ ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
@@ -695,7 +886,7 @@ static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
{
char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
- mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
+ mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
}
@@ -808,10 +999,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_ptp_state *ptp_state;
- u16 message_type;
+ struct mlxsw_sp1_ptp_state *ptp_state;
int err;
err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -821,7 +1046,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
- ptp_state->mlxsw_sp = mlxsw_sp;
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
spin_lock_init(&ptp_state->unmatched_lock);
@@ -830,22 +1055,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_hashtable_init;
- /* Delive these message types as PTP0. */
- message_type = BIT(PTP_MSGTYPE_SYNC) |
- BIT(PTP_MSGTYPE_DELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_RESP);
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
- message_type);
- if (err)
- goto err_mtptpt_set;
-
- /* Everything else is PTP1. */
- message_type = ~message_type;
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
- message_type);
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
if (err)
- goto err_mtptpt1_set;
+ goto err_ptp_traps_set;
err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
if (err)
@@ -854,28 +1066,28 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
MLXSW_SP1_PTP_HT_GC_INTERVAL);
- return ptp_state;
+ return &ptp_state->common;
err_fifo_clr:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
{
- struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
rhltable_free_and_destroy(&ptp_state->unmatched_ht,
&mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
@@ -888,9 +1100,10 @@ int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
- u16 *p_ing_types, u16 *p_egr_types,
- enum hwtstamp_rx_filters *p_rx_filter)
+static int
+mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
{
enum hwtstamp_rx_filters rx_filter = config->rx_filter;
enum hwtstamp_tx_types tx_type = config->tx_type;
@@ -1051,8 +1264,8 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 egr_types;
int err;
- err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
- &rx_filter);
+ err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
if (err)
return err;
@@ -1145,3 +1358,369 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
*data++ = *(u64 *)(stats + offset);
}
}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mutex_destroy(&ptp_state->lock);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ mutex_lock(&ptp_state->lock);
+ *config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ goto err_get_message_types;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ goto err_configure_port;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ goto err_deconfigure_port;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c06cd1384bca..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,15 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index)
{
}
-#endif
+
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +226,26 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
static inline int mlxsw_sp2_get_stats_count(void)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d40762cfc453..48f1fa62a4fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
@@ -225,6 +226,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+struct mlxsw_sp_rif_counter_set_basic {
+ u64 good_unicast_packets;
+ u64 good_multicast_packets;
+ u64 good_broadcast_packets;
+ u64 good_unicast_bytes;
+ u64 good_multicast_bytes;
+ u64 good_broadcast_bytes;
+ u64 error_packets;
+ u64 discard_packets;
+ u64 error_bytes;
+ u64 discard_bytes;
+};
+
+static int
+mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ struct mlxsw_sp_rif_counter_set_basic *set)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ int err;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+
+ if (!set)
+ return 0;
+
+#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
+ (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
+
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
+
+#undef MLXSW_SP_RIF_COUNTER_EXTRACT
+
+ return 0;
+}
+
static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
@@ -235,16 +294,20 @@ static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
}
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
int err;
+ if (mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return 0;
+
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
+
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
p_counter_index);
if (err)
@@ -268,10 +331,10 @@ err_counter_clear:
return err;
}
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
@@ -296,14 +359,12 @@ static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
if (!devlink_dpipe_table_counter_enabled(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
return;
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
@@ -382,72 +443,19 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
-static struct mlxsw_sp_fib_entry_priv *
-mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- struct mlxsw_sp_fib_entry_priv *priv;
-
- if (!ll_ops->fib_entry_priv_size)
- /* No need to have priv */
- return NULL;
-
- priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- refcount_set(&priv->refcnt, 1);
- return priv;
-}
-
-static void
-mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
-{
- kfree(priv);
-}
-
-static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
-{
- refcount_inc(&priv->refcnt);
-}
-
-static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv || !refcount_dec_and_test(&priv->refcnt))
- return;
- mlxsw_sp_fib_entry_priv_destroy(priv);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv)
- return;
- mlxsw_sp_fib_entry_priv_hold(priv);
- list_add(&priv->list, &op_ctx->fib_entry_priv_list);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_priv *priv, *tmp;
-
- list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
- mlxsw_sp_fib_entry_priv_put(priv);
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
- struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
struct fib_info *fi;
u32 tb_id;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -476,7 +484,6 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
- const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -490,45 +497,16 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
-static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- return 0;
-}
-
-static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
- xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
- xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
-}
-
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
- err = ll_ops->init(mlxsw_sp, vr->id, proto);
- if (err)
- return ERR_PTR(err);
-
lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
@@ -540,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
- fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -579,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralst_pl[MLXSW_REG_XRALST_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -616,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -640,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -655,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -671,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -685,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -696,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops =
- mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
-
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -790,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -1469,8 +1438,8 @@ static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
return false;
}
-bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
+static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
{
return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
}
@@ -1514,16 +1483,10 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
+static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
{
- bool is_ipip_ul;
-
- mutex_lock(&mlxsw_sp->router->lock);
- is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return is_ipip_ul;
+ return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
}
static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
@@ -1856,7 +1819,7 @@ void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
/* The configuration where several tunnels have the same local address in the
* same underlay table needs special treatment in the HW. That is currently not
* implemented in the driver. This function finds and demotes the first tunnel
- * with a given source address, except the one passed in in the argument
+ * with a given source address, except the one passed in the argument
* `except'.
*/
bool
@@ -1899,16 +1862,15 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
}
}
-int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
- struct net_device *ol_dev,
- unsigned long event,
- struct netdev_notifier_info *info)
+static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
{
struct netdev_notifier_changeupper_info *chup;
struct netlink_ext_ack *extack;
int err = 0;
- mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_REGISTER:
err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
@@ -1939,7 +1901,6 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1977,16 +1938,15 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-int
+static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ul_dev,
unsigned long event,
struct netdev_notifier_info *info)
{
struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
- int err = 0;
+ int err;
- mutex_lock(&mlxsw_sp->router->lock);
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
@@ -1999,7 +1959,7 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
- break;
+ return err;
}
if (demote_this) {
@@ -2016,9 +1976,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
ipip_entry = prev;
}
}
- mutex_unlock(&mlxsw_sp->router->lock);
- return err;
+ return 0;
}
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
@@ -2299,6 +2258,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
goto err_neigh_entry_insert;
mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
+ atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
return neigh_entry;
@@ -2313,6 +2273,7 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
list_del(&neigh_entry->rif_list_node);
+ atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
mlxsw_sp_neigh_entry_free(neigh_entry);
@@ -2510,6 +2471,9 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
char *rauhtd_pl;
int err;
+ if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
+ return 0;
+
rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
if (!rauhtd_pl)
return -ENOMEM;
@@ -2889,6 +2853,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_neighs_update_work);
INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
+ atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
@@ -4358,6 +4323,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_nexthop_neigh_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -5325,7 +5292,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@@ -5499,7 +5466,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- return !fib4_entry->tos;
+ return !fib4_entry->dscp;
}
static bool
@@ -5560,7 +5527,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -5585,7 +5552,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
@@ -5608,7 +5575,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
@@ -5721,14 +5688,13 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -5736,140 +5702,39 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
-struct mlxsw_sp_fib_entry_op_ctx_basic {
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-};
-
static void
-mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
- enum mlxsw_reg_ralxx_protocol ralxx_proto;
- char *ralue_pl = op_ctx_basic->ralue_pl;
- enum mlxsw_reg_ralue_op ralue_op;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
- ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
+ switch (fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, (u32 *) addr);
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
break;
}
}
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
-}
-
-static int
-mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- op_ctx_basic->ralue_pl);
-}
-
-static bool
-mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- return true;
-}
-
-static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
-{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
-
- mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
- fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr,
- fib_entry->priv);
-}
-
-static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- bool postponed_for_bulk = false;
- int err;
-
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
- if (!postponed_for_bulk)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -5892,20 +5757,19 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -5917,64 +5781,61 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
if (WARN_ON(!ipip_entry))
@@ -5986,55 +5847,54 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
if (err)
return err;
@@ -6044,35 +5904,18 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- bool is_new)
-{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
- MLXSW_SP_FIB_ENTRY_OP_UPDATE);
-}
-
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
-
- if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
- return 0;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- MLXSW_SP_FIB_ENTRY_OP_DELETE);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
static int
@@ -6167,12 +6010,6 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
@@ -6190,7 +6027,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
- fib4_entry->tos = fen_info->tos;
+ fib4_entry->dscp = fen_info->dscp;
fib_entry->fib_node = fib_node;
@@ -6201,8 +6038,6 @@ err_fib4_entry_type_set:
err_nexthop_group_vr_link:
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
err_nexthop4_group_get:
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -6217,7 +6052,6 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -6244,7 +6078,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib4_entry, common);
if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
+ fib4_entry->dscp == fen_info->dscp &&
fib4_entry->type == fen_info->type &&
fib4_entry->fi == fen_info->fi)
return fib4_entry;
@@ -6455,16 +6289,14 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_update;
@@ -6475,25 +6307,14 @@ err_fib_entry_update:
return err;
}
-static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- int err;
- err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
fib_node->fib_entry = NULL;
- return err;
-}
-
-static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -6515,7 +6336,6 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -6550,7 +6370,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -6575,23 +6395,20 @@ err_fib4_entry_create:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
- int err;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return 0;
+ return;
fib_node = fib4_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -6683,6 +6500,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+ int err;
nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
@@ -6698,7 +6516,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_type_init;
+
+ return 0;
+
+err_nexthop_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
@@ -6889,9 +6716,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
@@ -6914,8 +6741,7 @@ static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
- &fib6_entry->common, false);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_entry_update;
@@ -6939,7 +6765,6 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -6950,23 +6775,21 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_update;
+ goto err_rt6_unwind;
return 0;
-err_nexthop6_group_update:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
@@ -6978,7 +6801,6 @@ err_rt6_create:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -6996,7 +6818,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
}
static int
@@ -7082,19 +6904,13 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
@@ -7102,7 +6918,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_get;
+ goto err_rt6_unwind;
err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
fib_node->fib);
@@ -7121,18 +6937,14 @@ err_fib6_entry_type_set:
mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
-err_nexthop6_group_get:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -7155,7 +6967,6 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
- mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -7213,8 +7024,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -7253,7 +7064,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -7277,8 +7088,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -7306,7 +7117,8 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -7317,17 +7129,16 @@ err_fib6_entry_nexthop_add:
return err;
}
-static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
- int err;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return 0;
+ return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -7336,22 +7147,22 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return 0;
+ return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
- return 0;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
}
fib_node = fib6_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static struct mlxsw_sp_mr_table *
@@ -7504,15 +7315,15 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
}
}
-struct mlxsw_sp_fib6_event {
+struct mlxsw_sp_fib6_event_work {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event {
- struct list_head list; /* node in fib queue */
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
union {
- struct mlxsw_sp_fib6_event fib6_event;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -7521,12 +7332,11 @@ struct mlxsw_sp_fib_event {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
- int family;
};
static int
-mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -7540,8 +7350,8 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
if (!rt_arr)
return -ENOMEM;
- fib6_event->rt_arr = rt_arr;
- fib6_event->nrt6 = nrt6;
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -7563,242 +7373,182 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
}
static void
-mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
{
int i;
- for (i = 0; i < fib6_event->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
- kfree(fib6_event->rt_arr);
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
- &fib_event->fen_info);
+ &fib_work->fen_info);
}
- fib_info_put(fib_event->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- fib_info_put(fib_event->fen_info.fi);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
- fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
{
- struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
- mr_cache_put(fib_event->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
- mr_cache_put(fib_event->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_event->ven_info);
+ &fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_event->ven_info.dev);
+ dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
- dev_put(fib_event->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
- struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
- struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
- struct mlxsw_sp_fib_event *next_fib_event;
- struct mlxsw_sp_fib_event *fib_event;
- int last_family = AF_UNSPEC;
- LIST_HEAD(fib_event_queue);
-
- spin_lock_bh(&router->fib_event_queue_lock);
- list_splice_init(&router->fib_event_queue, &fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
-
- /* Router lock is held here to make sure per-instance
- * operation context is not used in between FIB4/6 events
- * processing.
- */
- mutex_lock(&router->lock);
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- list_for_each_entry_safe(fib_event, next_fib_event,
- &fib_event_queue, list) {
- /* Check if the next entry in the queue exists and it is
- * of the same type (family and event) as the currect one.
- * In that case it is permitted to do the bulking
- * of multiple FIB entries to a single register write.
- */
- op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
- fib_event->family == next_fib_event->family &&
- fib_event->event == next_fib_event->event;
- op_ctx->event = fib_event->event;
-
- /* In case family of this and the previous entry are different, context
- * reinitialization is going to be needed now, indicate that.
- * Note that since last_family is initialized to AF_UNSPEC, this is always
- * going to happen for the first entry processed in the work.
- */
- if (fib_event->family != last_family)
- op_ctx->initialized = false;
-
- switch (fib_event->family) {
- case AF_INET:
- mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case AF_INET6:
- mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case RTNL_FAMILY_IP6MR:
- case RTNL_FAMILY_IPMR:
- /* Unlock here as inside FIBMR the lock is taken again
- * under RTNL. The per-instance operation context
- * is not used by FIBMR.
- */
- mutex_unlock(&router->lock);
- mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
- fib_event);
- mutex_lock(&router->lock);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- last_family = fib_event->family;
- kfree(fib_event);
- cond_resched();
- }
- WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- mutex_unlock(&router->lock);
-}
-
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_event->fen_info = *fen_info;
+ fib_work->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while event is queued. Release it afterwards.
+ * freed while work is queued. Release it afterwards.
*/
- fib_info_hold(fib_event->fen_info.fi);
+ fib_info_hold(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_event->fnh_info = *fnh_info;
- fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
- fen6_info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
if (err)
return err;
break;
@@ -7808,20 +7558,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
- mr_cache_hold(fib_event->men_info.mfc);
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
- dev_hold(fib_event->ven_info.dev);
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
@@ -7875,7 +7625,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event *fib_event;
+ struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -7907,39 +7657,37 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
- if (!fib_event)
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
return NOTIFY_BAD;
- fib_event->mlxsw_sp = router->mlxsw_sp;
- fib_event->event = event;
- fib_event->family = info->family;
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
switch (info->family) {
case AF_INET:
- mlxsw_sp_router_fib4_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
break;
case AF_INET6:
- err = mlxsw_sp_router_fib6_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- mlxsw_sp_router_fibmr_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
break;
}
- /* Enqueue the event and trigger the work */
- spin_lock_bh(&router->fib_event_queue_lock);
- list_add_tail(&fib_event->list, &router->fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
- mlxsw_core_schedule_work(&router->fib_event_work);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_event);
+ kfree(fib_work);
return NOTIFY_BAD;
}
@@ -8148,6 +7896,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->ul_rif_id;
}
+static bool
+mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS) &&
+ mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ if (err)
+ return err;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ NULL);
+ if (err)
+ goto err_clear_ingress;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ goto err_alloc_egress;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ NULL);
+ if (err)
+ goto err_clear_egress;
+
+ return 0;
+
+err_clear_egress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+err_alloc_egress:
+err_clear_ingress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ return err;
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return;
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
+ struct rtnl_hw_stats64 *p_stats)
+{
+ struct mlxsw_sp_rif_counter_set_basic ingress;
+ struct mlxsw_sp_rif_counter_set_basic egress;
+ int err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ &ingress);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &egress);
+ if (err)
+ return err;
+
+#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
+ ((SET.good_unicast_ ## SFX) + \
+ (SET.good_multicast_ ## SFX) + \
+ (SET.good_broadcast_ ## SFX))
+
+ p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
+ p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
+ p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
+ p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
+ p_stats->rx_errors = ingress.error_packets;
+ p_stats->tx_errors = egress.error_packets;
+ p_stats->rx_dropped = ingress.discard_packets;
+ p_stats->tx_dropped = egress.discard_packets;
+ p_stats->multicast = ingress.good_multicast_packets +
+ ingress.good_broadcast_packets;
+
+#undef MLXSW_SP_ROUTER_ALL_GOOD
+
+ return 0;
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct rtnl_hw_stats64 stats = {};
+ int err;
+
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return 0;
+
+ err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
+ if (err)
+ return err;
+
+ netdev_offload_xstats_report_delta(info->report_delta, &stats);
+ return 0;
+}
+
+struct mlxsw_sp_router_hwstats_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work =
+ container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
+ work);
+
+ rtnl_lock();
+ rtnl_offload_xstats_notify(hws_work->dev);
+ rtnl_unlock();
+ dev_put(hws_work->dev);
+ kfree(hws_work);
+}
+
+static void
+mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work;
+
+ /* To collect notification payload, the core ends up sending another
+ * notifier block message, which would deadlock on the attempt to
+ * acquire the router lock again. Just postpone the notification until
+ * later.
+ */
+
+ hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
+ if (!hws_work)
+ return;
+
+ INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
+ dev_hold(dev);
+ hws_work->dev = dev;
+ mlxsw_core_schedule_work(&hws_work->work);
+}
+
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
return rif->dev->ifindex;
@@ -8158,6 +8066,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
+{
+ struct rtnl_hw_stats64 stats = {};
+
+ if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
+ netdev_offload_xstats_push_delta(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3,
+ &stats);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -8218,10 +8136,20 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
- mlxsw_sp_rif_counters_alloc(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ err = mlxsw_sp_router_port_l3_stats_enable(rif);
+ if (err)
+ goto err_stats_enable;
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_alloc(rif);
+ }
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return rif;
+err_stats_enable:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8248,10 +8176,19 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *vr;
int i;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_rif_counters_free(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ mlxsw_sp_rif_push_l3_stats(rif);
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_free(rif);
+ }
+
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
@@ -8398,6 +8335,13 @@ static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
}
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
struct netlink_ext_ack *extack)
@@ -8646,9 +8590,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(port_dev) ||
- netif_is_lag_port(port_dev) ||
- netif_is_ovs_port(port_dev))
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -9128,36 +9070,102 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
- unsigned long event, void *ptr)
+static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ switch (info->type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ return mlxsw_sp_router_port_l3_stats_enable(rif);
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ mlxsw_sp_router_port_l3_stats_report_used(rif, info);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int
+mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
+}
+
+static bool mlxsw_sp_is_router_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
- int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
- mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- goto out;
+ return 0;
switch (event) {
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
- break;
+ return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
case NETDEV_PRE_CHANGEADDR:
- err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ default:
+ WARN_ON_ONCE(1);
break;
}
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return err;
+ return 0;
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
@@ -9188,8 +9196,18 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
}
-int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
- struct netdev_notifier_changeupper_info *info)
+static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
+ return false;
+ return netif_is_l3_master(info->upper_dev);
+}
+
+static int
+mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
@@ -9200,7 +9218,6 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
- mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_PRECHANGEUPPER:
break;
@@ -9215,11 +9232,42 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
}
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
+static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_router *router;
+ struct mlxsw_sp *mlxsw_sp;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
+ mlxsw_sp = router->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (mlxsw_sp_is_offload_xstats_event(event))
+ err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_is_router_event(event))
+ err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
+ else if (mlxsw_sp_is_vrf_event(event, ptr))
+ err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
+
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return notifier_from_errno(err);
+}
+
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
struct netdev_nested_priv *priv)
{
@@ -9266,17 +9314,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9301,9 +9350,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9315,7 +9370,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9339,10 +9394,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9350,7 +9404,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9374,10 +9428,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9394,9 +9447,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9404,8 +9463,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9416,7 +9475,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9424,7 +9483,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9501,11 +9560,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9580,7 +9747,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9623,6 +9790,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
if (err)
goto ul_rif_op_err;
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
@@ -9635,6 +9803,7 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
@@ -9766,7 +9935,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
@@ -9790,10 +9959,15 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- mlxsw_sp_rif_mac_profiles_occ_get,
- mlxsw_sp);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
return 0;
}
@@ -9803,11 +9977,13 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
@@ -9966,7 +10142,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
unsigned long *fields = config->fields;
u32 hash_fields;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0:
mlxsw_sp_mp4_hash_outer_addr(config);
break;
@@ -9984,7 +10160,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mp_hash_inner_l3(config);
break;
case 3:
- hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
/* Outer */
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -10165,13 +10341,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
+ bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
@@ -10187,46 +10364,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
- .init = mlxsw_sp_router_ll_basic_init,
- .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
- .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
-};
-
-static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
-{
- size_t max_size = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
- size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
-
- if (size > max_size)
- max_size = size;
- }
- router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
- GFP_KERNEL);
- if (!router->ll_op_ctx)
- return -ENOMEM;
- INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
- return 0;
-}
-
-static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
-{
- WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- kfree(router->ll_op_ctx);
-}
-
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
{
u16 lb_rif_index;
@@ -10300,23 +10437,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_ops_init;
- err = mlxsw_sp_router_xm_init(mlxsw_sp);
- if (err)
- goto err_xm_init;
-
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
- &mlxsw_sp_router_ll_xm_ops :
- &mlxsw_sp_router_ll_basic_ops;
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
-
- err = mlxsw_sp_router_ll_op_ctx_init(router);
- if (err)
- goto err_ll_op_ctx_init;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
mlxsw_sp_nh_grp_activity_work);
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -10369,10 +10492,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
- INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
- INIT_LIST_HEAD(&router->fib_event_queue);
- spin_lock_init(&router->fib_event_queue_lock);
-
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -10404,8 +10523,18 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_fib_notifier;
+ mlxsw_sp->router->netdevice_nb.notifier_call =
+ mlxsw_sp_router_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ if (err)
+ goto err_register_netdev_notifier;
+
return 0;
+err_register_netdev_notifier:
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
err_register_fib_notifier:
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
@@ -10417,7 +10546,6 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10441,10 +10569,6 @@ err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(router);
-err_ll_op_ctx_init:
- mlxsw_sp_router_xm_fini(mlxsw_sp);
-err_xm_init:
err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
@@ -10453,6 +10577,8 @@ err_router_ops_init:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
@@ -10461,7 +10587,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -10473,8 +10598,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
- mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 99e8371a82a5..c5dfb972b433 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,32 +15,12 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
-struct mlxsw_sp_fib_entry_op_ctx {
- u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
- * the actual entry with the one that is the next
- * in queue.
- */
- initialized:1; /* Bit that the low-level op sets in case
- * the context priv is initialized.
- */
- struct list_head fib_entry_priv_list;
- unsigned long event;
- unsigned long ll_priv[];
-};
-
-static inline void
-mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
- memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
atomic_t rif_mac_profiles_count;
+ atomic_t rifs_count;
u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
@@ -56,6 +36,7 @@ struct mlxsw_sp_router {
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
+ atomic_t neigh_count;
} neighs_update;
struct delayed_work nexthop_probe_dw;
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
@@ -66,18 +47,13 @@ struct mlxsw_sp_router {
struct notifier_block netevent_nb;
struct notifier_block inetaddr_nb;
struct notifier_block inet6addr_nb;
+ struct notifier_block netdevice_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
- struct work_struct fib_event_work;
- struct list_head fib_event_queue;
- spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
- /* One set of ops for each protocol: IPv4 and IPv6 */
- const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
- struct mlxsw_sp_router_xm *xm;
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
size_t adj_grp_size_ranges_count;
struct delayed_work nh_grp_activity_dw;
@@ -87,48 +63,6 @@ struct mlxsw_sp_router {
u32 adj_trap_index;
};
-struct mlxsw_sp_fib_entry_priv {
- refcount_t refcnt;
- struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
- unsigned long priv[];
-};
-
-enum mlxsw_sp_fib_entry_op {
- MLXSW_SP_FIB_ENTRY_OP_WRITE,
- MLXSW_SP_FIB_ENTRY_OP_UPDATE,
- MLXSW_SP_FIB_ENTRY_OP_DELETE,
-};
-
-/* Low-level router ops. Basically this is to handle the different
- * register sets to work with ordinary and XM trees and FIB entries.
- */
-struct mlxsw_sp_router_ll_ops {
- int (*init)(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto);
- int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
- int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
- int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
- size_t fib_entry_op_ctx_size;
- size_t fib_entry_priv_size;
- void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len, unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv);
- void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size);
- void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif);
- void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
- void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr);
- int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk);
- bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -148,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
-u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
@@ -159,11 +92,9 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
u64 *cnt);
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
@@ -232,10 +163,4 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
-extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp);
-
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
deleted file mode 100644
index d213af723a2a..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/rhashtable.h>
-
-#include "spectrum.h"
-#include "core.h"
-#include "reg.h"
-#include "spectrum_router.h"
-
-#define MLXSW_SP_ROUTER_XM_M_VAL 16
-
-static const u8 mlxsw_sp_router_xm_m_val[] = {
- [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL,
- [MLXSW_SP_L3_PROTO_IPV6] = 0, /* Currently unused. */
-};
-
-#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16
-
-struct mlxsw_sp_router_xm {
- bool ipv4_supported;
- bool ipv6_supported;
- unsigned int entries_size;
- struct rhashtable ltable_ht;
- struct rhashtable flush_ht; /* Stores items about to be flushed from cache */
- unsigned int flush_count;
- bool flush_all_mode;
-};
-
-struct mlxsw_sp_router_xm_ltable_node {
- struct rhash_head ht_node; /* Member of router_xm->ltable_ht */
- u16 mindex;
- u8 current_lvalue;
- refcount_t refcnt;
- unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1];
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node),
- .key_len = sizeof(u16),
- .automatic_shrinking = true,
-};
-
-struct mlxsw_sp_router_xm_flush_info {
- bool all;
- enum mlxsw_sp_l3proto proto;
- u16 virtual_router;
- u8 prefix_len;
- unsigned char addr[sizeof(struct in6_addr)];
-};
-
-struct mlxsw_sp_router_xm_fib_entry {
- bool committed;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node; /* Parent node */
- u16 mindex; /* Store for processing from commit op */
- u8 lvalue;
- struct mlxsw_sp_router_xm_flush_info flush_info;
-};
-
-#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \
- (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN)
-
-struct mlxsw_sp_fib_entry_op_ctx_xm {
- bool initialized;
- char xmdr_pl[MLXSW_REG_XMDR_LEN];
- unsigned int trans_offset; /* Offset of the current command within one
- * transaction of XMDR register.
- */
- unsigned int trans_item_len; /* The current command length. This is used
- * to advance 'trans_offset' when the next
- * command is appended.
- */
- unsigned int entries_count;
- struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX];
-};
-
-static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- char rxlte_pl[MLXSW_REG_RXLTE_LEN];
-
- mlxsw_reg_rxlte_pack(rxlte_pl, vr_id,
- (enum mlxsw_reg_rxlte_protocol) proto, true);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl);
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr)
-{
- /* Currently the M-index is set to linear mode. That means it is defined
- * as 16 MSB of IP address.
- */
- return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX;
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr)
-{
- WARN_ON_ONCE(1);
- return 0; /* currently unused */
-}
-
-static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- if (op_ctx->initialized)
- return;
- op_ctx->initialized = true;
-
- mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true);
- op_ctx_xm->trans_offset = 0;
- op_ctx_xm->entries_count = 0;
-}
-
-static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op;
- unsigned int len;
-
- mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm);
-
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, (u32 *) addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr);
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
- if (!op_ctx_xm->trans_offset)
- op_ctx_xm->trans_item_len = len;
- else
- WARN_ON_ONCE(op_ctx_xm->trans_item_len != len);
-
- op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry;
-
- fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ?
- prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0;
-
- flush_info = &fib_entry->flush_info;
- flush_info->proto = proto;
- flush_info->virtual_router = virtual_router;
- flush_info->prefix_len = prefix_len;
- if (addr)
- memcpy(flush_info->addr, addr, sizeof(flush_info->addr));
- else
- memset(flush_info->addr, 0, sizeof(flush_info->addr));
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- tunnel_ptr);
-}
-
-static struct mlxsw_sp_router_xm_ltable_node *
-mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- int err;
-
- ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (ltable_node) {
- refcount_inc(&ltable_node->refcnt);
- return ltable_node;
- }
- ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL);
- if (!ltable_node)
- return ERR_PTR(-ENOMEM);
- ltable_node->mindex = mindex;
- refcount_set(&ltable_node->refcnt, 1);
-
- err = rhashtable_insert_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_insert;
-
- return ltable_node;
-
-err_insert:
- kfree(ltable_node);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- if (!refcount_dec_and_test(&ltable_node->refcnt))
- return;
- rhashtable_remove_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- kfree(ltable_node);
-}
-
-static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- char xrmt_pl[MLXSW_REG_XRMT_LEN];
-
- mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl);
-}
-
-struct mlxsw_sp_router_xm_flush_node {
- struct rhash_head ht_node; /* Member of router_xm->flush_ht */
- struct list_head list;
- struct mlxsw_sp_router_xm_flush_info flush_info;
- struct delayed_work dw;
- struct mlxsw_sp *mlxsw_sp;
- unsigned long start_jiffies;
- unsigned int reuses; /* By how many flush calls this was reused. */
- refcount_t refcnt;
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node),
- .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info),
- .automatic_shrinking = true,
-};
-
-static struct mlxsw_sp_router_xm_flush_node *
-mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- int err;
-
- flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL);
- if (!flush_node)
- return ERR_PTR(-ENOMEM);
-
- flush_node->flush_info = *flush_info;
- err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- if (err) {
- kfree(flush_node);
- return ERR_PTR(err);
- }
- router_xm->flush_count++;
- flush_node->mlxsw_sp = mlxsw_sp;
- flush_node->start_jiffies = jiffies;
- refcount_set(&flush_node->refcnt, 1);
- return flush_node;
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node)
- return;
- refcount_inc(&flush_node->refcnt);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt))
- return;
- kfree(flush_node);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- router_xm->flush_count--;
- rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
-}
-
-static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len)
-{
- return GENMASK(31, 32 - prefix_len);
-}
-
-static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len)
-{
- static unsigned char mask[sizeof(struct in6_addr)];
-
- memset(mask, 0, sizeof(mask));
- memset(mask, 0xff, prefix_len / 8);
- mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8);
- return mask;
-}
-
-#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15
-#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15
-#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50 /* usecs */
-#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10)
-
-static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work)
-{
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- char rlcmld_pl[MLXSW_REG_RLCMLD_LEN];
- enum mlxsw_reg_rlcmld_select select;
- struct mlxsw_sp *mlxsw_sp;
- u32 addr4;
- int err;
-
- flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node,
- dw.work);
- mlxsw_sp = flush_node->mlxsw_sp;
- flush_info = &flush_node->flush_info;
-
- if (flush_info->all) {
- char rlpmce_pl[MLXSW_REG_RLPMCE_LEN];
-
- mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce),
- rlpmce_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
- if (flush_node->reuses <
- MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES)
- /* Leaving flush-all mode. */
- mlxsw_sp->router->xm->flush_all_mode = false;
- goto out;
- }
-
- select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES;
-
- switch (flush_info->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- addr4 = *((u32 *) flush_info->addr);
- addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len);
-
- /* In case the flush prefix length is bigger than M-value,
- * it makes no sense to flush M entries. So just flush
- * the ML entries.
- */
- if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL)
- select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES;
-
- mlxsw_reg_rlcmld_pack4(rlcmld_pl, select,
- flush_info->virtual_router, addr4,
- mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rlcmld_pack6(rlcmld_pl, select,
- flush_info->virtual_router, flush_info->addr,
- mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len));
- break;
- default:
- WARN_ON(true);
- goto out;
- }
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
-out:
- mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node);
-}
-
-static bool
-mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT);
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
-
- /* In case there is the same flushing work pending, check
- * if we can consolidate with it. We can do it up to MAX_WAIT.
- * Cancel the delayed work. If the work was still pending.
- */
- if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) &&
- cancel_delayed_work_sync(&flush_node->dw))
- return true;
- return false;
-}
-
-static int
-mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
- struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true};
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
-
- /* Check if the queued number of flushes reached critical amount after
- * which it is better to just flush the whole cache.
- */
- if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT)
- /* Entering flush-all mode. */
- router_xm->flush_all_mode = true;
-
- if (router_xm->flush_all_mode)
- flush_info = &flush_all_info;
-
- rcu_read_lock();
- flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info,
- mlxsw_sp_router_xm_flush_ht_params);
- /* Take a reference so the object is not freed before possible
- * delayed work cancel could be done.
- */
- mlxsw_sp_router_xm_cache_flush_node_hold(flush_node);
- rcu_read_unlock();
-
- if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) {
- flush_node->reuses++;
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- /* Original work was within wait period and was canceled.
- * That means that the reference is still held and the
- * flush_node_put() call above did not free the flush_node.
- * Reschedule it with fresh delay.
- */
- goto schedule_work;
- } else {
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- }
-
- flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info);
- if (IS_ERR(flush_node))
- return PTR_ERR(flush_node);
- INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work);
-
-schedule_work:
- mlxsw_core_schedule_dw(&flush_node->dw, delay);
- return 0;
-}
-
-static int
-mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- u8 lvalue = fib_entry->lvalue;
- int err;
-
- ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm,
- fib_entry->mindex);
- if (IS_ERR(ltable_node))
- return PTR_ERR(ltable_node);
- if (lvalue > ltable_node->current_lvalue) {
- /* The L-value is bigger then the one currently set, update. */
- ltable_node->current_lvalue = lvalue;
- err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp,
- ltable_node);
- if (err)
- goto err_lvalue_set;
-
- /* The L value for prefix/M is increased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
-
- ltable_node->lvalue_ref[lvalue]++;
- fib_entry->ltable_node = ltable_node;
-
- return 0;
-
-err_lvalue_set:
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node =
- fib_entry->ltable_node;
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- u8 lvalue = fib_entry->lvalue;
-
- ltable_node->lvalue_ref[lvalue]--;
- if (lvalue == ltable_node->current_lvalue && lvalue &&
- !ltable_node->lvalue_ref[lvalue]) {
- u8 new_lvalue = lvalue - 1;
-
- /* Find the biggest L-value left out there. */
- while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue])
- new_lvalue--;
-
- ltable_node->current_lvalue = new_lvalue;
- mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node);
-
- /* The L value for prefix/M is decreased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
-}
-
-static int
-mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp,
- &fib_entry->flush_info);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
- }
-}
-
-static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- u8 num_rec;
- int err;
- int i;
-
- op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len;
- op_ctx_xm->entries_count++;
-
- /* Check if bulking is possible and there is still room for another
- * FIB entry record. The size of 'trans_item_len' is either size of IPv4
- * command or size of IPv6 command. Not possible to mix those in a
- * single XMDR write.
- */
- if (op_ctx->bulk_ok &&
- op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) {
- if (postponed_for_bulk)
- *postponed_for_bulk = true;
- return 0;
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) {
- /* The L-table is updated inside. It has to be done before
- * the prefix is inserted.
- */
- err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm);
- if (err)
- goto out;
- }
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl);
- if (err)
- goto out;
- num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl);
- if (num_rec > op_ctx_xm->entries_count) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n");
- err = -EIO;
- goto out;
- }
- for (i = 0; i < num_rec; i++) {
- if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) {
- dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n");
- err = -EIO;
- goto out;
- } else {
- fib_entry = op_ctx_xm->entries[i];
- fib_entry->committed = true;
- }
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_DEL)
- /* The L-table is updated inside. It has to be done after
- * the prefix was removed.
- */
- mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm);
-
- /* At the very end, do the XLT cache flushing to evict stale
- * M and ML cache entries after prefixes were inserted/removed.
- */
- mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm);
-
-out:
- /* Next pack call is going to do reinitialization */
- op_ctx->initialized = false;
- return err;
-}
-
-static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
-
- return fib_entry->committed;
-}
-
-const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = {
- .init = mlxsw_sp_router_ll_xm_init,
- .ralta_write = mlxsw_sp_router_ll_xm_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_xm_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_xm_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm),
- .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry),
- .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed,
-};
-
-#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024)
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm;
- char rxltm_pl[MLXSW_REG_RXLTM_LEN];
- char xltq_pl[MLXSW_REG_XLTQ_LEN];
- u32 mindex_size;
- u16 device_id;
- int err;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return 0;
-
- router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL);
- if (!router_xm)
- return -ENOMEM;
-
- mlxsw_reg_xltq_pack(xltq_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl);
- if (err)
- goto err_xltq_query;
- mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported,
- &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size);
-
- if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n");
- err = -EINVAL;
- goto err_device_id_check;
- }
-
- if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) {
- dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n");
- err = -EINVAL;
- goto err_mindex_size_check;
- }
-
- mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4],
- mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl);
- if (err)
- goto err_rxltm_write;
-
- err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_ltable_ht_init;
-
- err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params);
- if (err)
- goto err_flush_ht_init;
-
- mlxsw_sp->router->xm = router_xm;
- return 0;
-
-err_flush_ht_init:
- rhashtable_destroy(&router_xm->ltable_ht);
-err_ltable_ht_init:
-err_rxltm_write:
-err_mindex_size_check:
-err_device_id_check:
-err_xltq_query:
- kfree(router_xm);
- return err;
-}
-
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return;
-
- rhashtable_destroy(&router_xm->flush_ht);
- rhashtable_destroy(&router_xm->ltable_ht);
- kfree(router_xm);
-}
-
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- return router_xm && router_xm->ipv4_supported;
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f9671cc53002..b3472fb94617 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -106,8 +106,8 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_init;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
- mlxsw_sp_span_occ_get, mlxsw_sp);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
@@ -123,7 +123,7 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
cancel_work_sync(&mlxsw_sp->span->work);
- devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
@@ -269,8 +269,7 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
return NULL;
- if (!vid ||
- br_vlan_get_info(br_dev, vid, &vinfo) ||
+ if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) ||
!(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
return NULL;
@@ -424,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 65c1724c63b0..4efccd942fb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -48,7 +48,8 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
- struct list_head mids_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
u8 vlan_enabled:1,
multicast_enabled:1,
mrouter:1;
@@ -102,6 +103,33 @@ struct mlxsw_sp_switchdev_ops {
void (*init)(struct mlxsw_sp *mlxsw_sp);
};
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -109,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port);
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device
- *bridge_device);
+ *bridge_device, bool mc_enabled);
static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -207,6 +236,16 @@ static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
}
}
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
+ bool no_delay)
+{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(interval));
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev,
@@ -227,6 +266,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
if (!bridge_device)
return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
@@ -244,7 +287,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
- INIT_LIST_HEAD(&bridge_device->mids_list);
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
+ if (list_empty(&bridge->bridges_list))
+ mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
/* It is possible we already have VXLAN devices enslaved to the bridge.
@@ -261,6 +307,8 @@ err_vxlan_init:
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
kfree(bridge_device);
return ERR_PTR(err);
}
@@ -273,10 +321,13 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
+ if (list_empty(&bridge->bridges_list))
+ cancel_delayed_work(&bridge->fdb_notify.dw);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
- WARN_ON(!list_empty(&bridge_device->mids_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
kfree(bridge_device);
}
@@ -629,6 +680,64 @@ err_port_bridge_vlan_flood_set:
}
static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
@@ -799,6 +908,9 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
if (!bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -808,8 +920,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
- is_port_mrouter);
out:
bridge_port->mrouter = is_port_mrouter;
return 0;
@@ -828,6 +938,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
bool mc_disabled)
{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -840,43 +951,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
- if (bridge_device->multicast_enabled != !mc_disabled) {
- bridge_device->multicast_enabled = !mc_disabled;
- mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
- bridge_device);
- }
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
- enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
bool member = mlxsw_sp_mc_flood(bridge_port);
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
- bridge_port,
- packet_type, member);
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
if (err)
- return err;
+ goto err_flood_table_set;
}
- bridge_device->multicast_enabled = !mc_disabled;
-
return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
}
-static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
- u16 mid_idx, bool add)
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
{
- char *smid2_pl;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
int err;
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
}
static void
@@ -884,10 +1136,17 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
bool add)
{
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- list_for_each_entry(mid, &bridge_device->mids_list, list)
- mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
}
static int
@@ -1113,14 +1372,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last_port, last_vlan;
+ bool last_port;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
- last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last_port = list_is_singular(&bridge_vlan->port_vlan_list);
@@ -1132,8 +1390,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
- if (last_vlan)
- mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
@@ -1234,8 +1493,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
- if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev))
+ if (br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -1423,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
@@ -1436,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1451,18 +1711,18 @@ out:
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
@@ -1524,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
- fdb_info->addr, fid_index,
+ fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
@@ -1533,8 +1793,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
vid, adding, false);
}
-static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid_idx, bool adding)
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
{
char *sfd_pl;
u8 num_rec;
@@ -1545,8 +1806,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1560,79 +1822,17 @@ out:
return err;
}
-static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap,
- bool set_router_port)
-{
- char *smid2_pl;
- int err, i;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
- }
-
- mlxsw_reg_smid2_port_mask_set(smid2_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
-
- for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
-
- mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 mid_idx, bool add)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid2_pl;
- int err;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
-{
- struct mlxsw_sp_mid *mid;
-
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
- return mid;
- }
- return NULL;
-}
-
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
- unsigned long *ports_bitmap)
+ struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
- set_bit(bridge_port->system_port, ports_bitmap);
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
@@ -1642,13 +1842,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
- ports_bitmap);
+ ports_bm->bitmap);
}
}
}
static void
-mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
@@ -1658,116 +1858,226 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
- flood_bitmap);
+ flood_bm);
}
}
}
-static bool
-mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid,
- struct mlxsw_sp_bridge_device *bridge_device)
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
{
- long *flood_bitmap;
- int num_of_ports;
- u16 mid_idx;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
+{
+ struct mlxsw_sp_ports_bitmap ports_bm;
int err;
- mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
- MLXSW_SP_MID_MAX);
- if (mid_idx == MLXSW_SP_MID_MAX)
- return false;
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
- num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
- if (!flood_bitmap)
- return false;
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
- mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
- mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
- bridge_device->mrouter);
- bitmap_free(flood_bitmap);
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
if (err)
- return false;
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
- true);
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
if (err)
- return false;
+ goto err_mdb_mrouters_set;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = true;
- return true;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
{
- if (!mid->in_hw)
- return 0;
-
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = false;
- return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
- false);
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
}
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- mid = kzalloc(sizeof(*mid), GFP_KERNEL);
- if (!mid)
- return NULL;
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
- GFP_KERNEL);
- if (!mid->ports_in_mid)
- goto err_ports_in_mid_alloc;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
- ether_addr_copy(mid->addr, addr);
- mid->fid = fid;
- mid->in_hw = false;
+ return mdb_entry;
+ }
- if (!bridge_device->multicast_enabled)
- goto out;
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
- if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
- goto err_write_mdb_entry;
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
-out:
- list_add_tail(&mid->list, &bridge_device->mids_list);
- return mid;
+ if (force)
+ return true;
-err_write_mdb_entry:
- bitmap_free(mid->ports_in_mid);
-err_ports_in_mid_alloc:
- kfree(mid);
- return NULL;
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
}
-static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err = 0;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
- if (bitmap_empty(mid->ports_in_mid,
- mlxsw_core_max_ports(mlxsw_sp->core))) {
- err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
- list_del(&mid->list);
- bitmap_free(mid->ports_in_mid);
- kfree(mid);
- }
- return err;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1776,12 +2086,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1796,54 +2104,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
- fid_index);
- if (!mid) {
- netdev_err(dev, "Unable to allocate MC group\n");
- return -ENOMEM;
- }
- }
- set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
-
- if (!bridge_device->multicast_enabled)
- return 0;
-
- if (bridge_port->mrouter)
- return 0;
-
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set SMID\n");
- goto err_out;
- }
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
return 0;
-
-err_out:
- mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- return err;
}
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_device
- *bridge_device)
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_mid *mid;
- bool mc_enabled;
-
- mc_enabled = bridge_device->multicast_enabled;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (mc_enabled)
- mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
- bridge_device);
- else
- mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
}
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
}
static void
@@ -1851,14 +2140,20 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool add)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
}
}
@@ -1936,28 +2231,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int
-__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_mid *mid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (bridge_port->bridge_device->multicast_enabled &&
- !bridge_port->mrouter) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
-
- err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
-
- return err;
-}
-
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1967,7 +2240,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
@@ -1983,32 +2257,44 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
}
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid, *tmp;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
- if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
- __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
- mid);
- } else if (bridge_device->multicast_enabled &&
- bridge_port->mrouter) {
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- }
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
}
}
@@ -2616,21 +2902,19 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u16 local_port;
- u16 vid, fid;
bool do_notification = true;
int err;
mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -2655,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
@@ -2717,8 +3002,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
- mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
@@ -2888,22 +3172,13 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
- bool no_delay)
-{
- struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
- unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
-
- mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
- msecs_to_jiffies(interval));
-}
-
#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
+ bool reschedule = false;
char *sfn_pl;
int queries;
u8 num_rec;
@@ -2918,6 +3193,9 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
+ if (list_empty(&bridge->bridges_list))
+ goto out;
+ reschedule = true;
queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
while (queries > 0) {
mlxsw_reg_sfn_pack(sfn_pl);
@@ -2937,6 +3215,8 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
out:
rtnl_unlock();
kfree(sfn_pl);
+ if (!reschedule)
+ return;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
}
@@ -3667,7 +3947,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
return 0;
err_register_switchdev_blocking_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 47b061b99160..f4bfdb6dab9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -864,7 +864,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
.listeners_arr = {
MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
+ true, SP_LLDP, DISCARD),
},
},
{
@@ -953,16 +953,16 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
@@ -1298,8 +1298,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->policers_count; i++) {
policer_item = &trap->policer_items_arr[i];
- err = devlink_trap_policers_register(devlink,
- &policer_item->policer, 1);
+ err = devl_trap_policers_register(devlink,
+ &policer_item->policer, 1);
if (err)
goto err_trap_policer_register;
}
@@ -1309,8 +1309,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
err_trap_policer_register:
for (i--; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
@@ -1325,8 +1325,8 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
for (i = trap->policers_count - 1; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
@@ -1381,8 +1381,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
- err = devlink_trap_groups_register(devlink, &group_item->group,
- 1);
+ err = devl_trap_groups_register(devlink, &group_item->group, 1);
if (err)
goto err_trap_group_register;
}
@@ -1392,7 +1391,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
err_trap_group_register:
for (i--; i >= 0; i--) {
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
@@ -1408,7 +1407,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_group_item *group_item;
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
@@ -1469,8 +1468,8 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
- err = devlink_traps_register(devlink, &trap_item->trap, 1,
- mlxsw_sp);
+ err = devl_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
if (err)
goto err_trap_register;
}
@@ -1480,7 +1479,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_trap_register:
for (i--; i >= 0; i--) {
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
@@ -1496,7 +1495,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 9e070ab3ed76..8da169663bda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -27,8 +27,6 @@ enum {
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
- MLXSW_TRAP_ID_ARPBC = 0x50,
- MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -71,6 +69,8 @@ enum {
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
+ MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
@@ -133,6 +133,12 @@ enum mlxsw_event_trap_id {
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+ /* Downstream Device Status Change */
+ MLXSW_TRAP_ID_DSDSC = 0x321,
+ /* Binary Code Transfer Operation Executed Event */
+ MLXSW_TRAP_ID_BCTOE = 0x322,
+ /* Port mapping change */
+ MLXSW_TRAP_ID_PMLPE = 0x32E,
};
#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 93df3049cdc0..830363bafcce 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -28,6 +28,7 @@ config KS8842
config KS8851
tristate "Micrel KS8851 SPI"
depends on SPI
+ depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6
@@ -39,6 +40,7 @@ config KS8851
config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MII
select CRC32
select EEPROM_93CX6
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 6f34a61739b6..fecd43754cea 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -403,7 +403,7 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
struct regulator *vdd_io;
- int gpio;
+ struct gpio_desc *gpio;
struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 691206f19ea7..cfbc900d4aeb 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -17,10 +17,9 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -703,9 +702,9 @@ static const struct net_device_ops ks8851_netdev_ops = {
static void ks8851_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *di)
{
- strlcpy(di->driver, "KS8851", sizeof(di->driver));
- strlcpy(di->version, "1.00", sizeof(di->version));
- strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+ strscpy(di->driver, "KS8851", sizeof(di->driver));
+ strscpy(di->version, "1.00", sizeof(di->version));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
}
static u32 ks8851_get_msglevel(struct net_device *dev)
@@ -1117,24 +1116,23 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
{
struct ks8851_net *ks = netdev_priv(netdev);
unsigned cider;
- int gpio;
int ret;
ks->netdev = netdev;
ks->tx_space = 6144;
- gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
- if (gpio == -EPROBE_DEFER)
- return gpio;
-
- ks->gpio = gpio;
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(dev, gpio,
- GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
- if (ret) {
- dev_err(dev, "reset gpio request failed\n");
- return ret;
- }
+ ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(ks->gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "reset gpio request failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(ks->gpio, "ks8851_rst_n");
+ if (ret) {
+ dev_err(dev, "failed to set reset gpio name: %d\n", ret);
+ return ret;
}
ks->vdd_io = devm_regulator_get(dev, "vdd-io");
@@ -1161,9 +1159,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
goto err_reg;
}
- if (gpio_is_valid(gpio)) {
+ if (ks->gpio) {
usleep_range(10000, 11000);
- gpio_set_value(gpio, 1);
+ gpiod_set_value_cansleep(ks->gpio, 0);
}
spin_lock_init(&ks->statelock);
@@ -1239,8 +1237,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
err_id:
ks8851_unregister_mdiobus(ks);
err_mdio:
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, 0);
+ if (ks->gpio)
+ gpiod_set_value_cansleep(ks->gpio, 1);
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1259,8 +1257,8 @@ void ks8851_remove_common(struct device *dev)
dev_info(dev, "remove\n");
unregister_netdev(priv->netdev);
- if (gpio_is_valid(priv->gpio))
- gpio_set_value(priv->gpio, 0);
+ if (priv->gpio)
+ gpiod_set_value_cansleep(priv->gpio, 1);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
}
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 0303e727e99f..70bc7253454f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -293,7 +293,7 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
*/
static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
{
- netif_rx_ni(skb);
+ netif_rx(skb);
}
/**
@@ -413,7 +413,8 @@ static int ks8851_probe_spi(struct spi_device *spi)
spi->bits_per_word = 8;
- ks = netdev_priv(netdev);
+ kss = netdev_priv(netdev);
+ ks = &kss->ks8851;
ks->lock = ks8851_lock_spi;
ks->unlock = ks8851_unlock_spi;
@@ -433,8 +434,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
IRQ_RXPSI) /* RX process stop */
ks->rc_ier = STD_IRQ;
- kss = to_ks8851_spi(ks);
-
kss->spidev = spi;
mutex_init(&kss->lock);
INIT_WORK(&kss->tx_work, ks8851_tx_work);
@@ -452,11 +451,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
return ks8851_probe_common(netdev, dev, msg_enable);
}
-static int ks8851_remove_spi(struct spi_device *spi)
+static void ks8851_remove_spi(struct spi_device *spi)
{
ks8851_remove_common(&spi->dev);
-
- return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d024983815da..e6acd1e7b263 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5225,7 +5225,6 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
* Linux network device functions
*/
-static unsigned long next_jiffies;
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netdev_netpoll(struct net_device *dev)
@@ -5411,10 +5410,12 @@ static int netdev_open(struct net_device *dev)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
+ unsigned long next_jiffies;
int i;
int p;
int rc = 0;
+ next_jiffies = jiffies + HZ * 2;
priv->multicast = 0;
priv->promiscuous = 0;
@@ -5428,10 +5429,7 @@ static int netdev_open(struct net_device *dev)
if (rc)
return rc;
for (i = 0; i < hw->mib_port_cnt; i++) {
- if (next_jiffies < jiffies)
- next_jiffies = jiffies + HZ * 2;
- else
- next_jiffies += HZ * 1;
+ next_jiffies += HZ * 1;
hw_priv->counter[i].time = next_jiffies;
hw->port_mib[i].state = media_disconnected;
port_init_cnt(hw, i);
@@ -6000,9 +5998,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(hw_priv->pdev),
sizeof(info->bus_info));
}
@@ -6563,6 +6561,7 @@ static void mib_read_work(struct work_struct *work)
struct dev_info *hw_priv =
container_of(work, struct dev_info, mib_read);
struct ksz_hw *hw = &hw_priv->hw;
+ unsigned long next_jiffies;
struct ksz_port_mib *mib;
int i;
@@ -6852,7 +6851,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
- result = pci_enable_device(pdev);
+ result = pcim_enable_device(pdev);
if (result)
return result;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 634ac7649c43..176efbeae127 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -975,7 +975,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/* update statistics */
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
/*
@@ -1467,9 +1467,9 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
static void
enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info,
dev_name(dev->dev.parent), sizeof(info->bus_info));
}
@@ -1612,15 +1612,13 @@ error_alloc:
return ret;
}
-static int enc28j60_remove(struct spi_device *spi)
+static void enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
-
- return 0;
}
static const struct of_device_id enc28j60_dt_ids[] = {
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b90efc80fb59..d7c8aa77ec75 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -925,9 +925,9 @@ static void encx24j600_get_regs(struct net_device *dev,
static void encx24j600_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -1093,7 +1093,7 @@ error_out:
return ret;
}
-static int encx24j600_spi_remove(struct spi_device *spi)
+static void encx24j600_spi_remove(struct spi_device *spi)
{
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1101,8 +1101,6 @@ static int encx24j600_spi_remove(struct spi_device *spi)
kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
-
- return 0;
}
static const struct spi_device_id encx24j600_spi_id_table[] = {
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 91a755efe2e6..c739d60ee17d 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -7,6 +7,8 @@
#include <linux/phy.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#include <linux/sched.h>
+#include <linux/iopoll.h>
/* eeprom */
#define LAN743X_EEPROM_MAGIC (0x74A5)
@@ -19,6 +21,10 @@
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
+#define LOCK_TIMEOUT_MAX_CNT (100) // 1 sec (10 msce * 100)
+
+#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
+
static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
{
u32 reg_value;
@@ -149,6 +155,217 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
return 0;
}
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
+{
+ u16 timeout_cnt = 0;
+ u32 val;
+
+ do {
+ spin_lock(&adapter->eth_syslock_spinlock);
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG,
+ SYS_LOCK_REG_ENET_SS_LOCK_);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ if (val & SYS_LOCK_REG_ENET_SS_LOCK_) {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+ } else {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+
+ if (timeout_cnt++ < timeout)
+ usleep_range(10000, 11000);
+ else
+ return -ETIMEDOUT;
+ } while (true);
+
+ return 0;
+}
+
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ spin_lock(&adapter->eth_syslock_spinlock);
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+
+ if (adapter->eth_syslock_acquire_cnt) {
+ adapter->eth_syslock_acquire_cnt--;
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0);
+ }
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+}
+
+static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val,
+ !(val & OTP_STATUS_BUSY_),
+ 80, 10000);
+}
+
+static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_hs_otp_read_go(adapter);
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
+static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, HS_OTP_TST_CMD,
+ OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
{
unsigned long start_time = jiffies;
@@ -263,13 +480,107 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
return 0;
}
+static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val,
+ (!(val & HS_E2P_CMD_EPC_BUSY_) ||
+ (val & HS_E2P_CMD_EPC_TIMEOUT_)),
+ 50, 10000);
+}
+
+static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ if (retval < 0) {
+ lan743x_hs_syslock_release(adapter);
+ return retval;
+ }
+
+ val = lan743x_csr_read(adapter, HS_E2P_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, HS_E2P_DATA, val);
+
+ /* Send "write" command */
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info,
pci_name(adapter->pdev), sizeof(info->bus_info));
}
@@ -304,10 +615,21 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
int ret = 0;
- if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
- else
- ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_read(adapter, ee->offset,
+ ee->len, data);
+ } else {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ }
return ret;
}
@@ -321,13 +643,22 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
/* Beware! OTP is One Time Programming ONLY! */
if (ee->magic == LAN743X_OTP_MAGIC) {
- ret = lan743x_otp_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_write(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
}
} else {
if (ee->magic == LAN743X_EEPROM_MAGIC) {
- ret = lan743x_eeprom_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_write(adapter,
+ ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
}
}
@@ -365,6 +696,14 @@ static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Queue 3 Frames",
};
+static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = {
+ "TX Queue 0 Frames",
+ "TX Queue 1 Frames",
+ "TX Queue 2 Frames",
+ "TX Queue 3 Frames",
+ "TX Total Queue Frames",
+};
+
static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Total Frames",
"EEE RX LPI Transitions",
@@ -462,6 +801,8 @@ static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
static void lan743x_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, lan743x_set0_hw_cnt_strings,
@@ -473,6 +814,13 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
sizeof(lan743x_set1_sw_cnt_strings)],
lan743x_set2_hw_cnt_strings,
sizeof(lan743x_set2_hw_cnt_strings));
+ if (adapter->is_pci11x1x) {
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings) +
+ sizeof(lan743x_set2_hw_cnt_strings)],
+ lan743x_tx_queue_cnt_strings,
+ sizeof(lan743x_tx_queue_cnt_strings));
+ }
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, lan743x_priv_flags_strings,
@@ -486,7 +834,9 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u64 total_queue_count = 0;
int data_index = 0;
+ u64 pkt_cnt;
u32 buf;
int i;
@@ -500,6 +850,14 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
data[data_index++] = (u64)buf;
}
+ if (adapter->is_pci11x1x) {
+ for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) {
+ pkt_cnt = (u64)(adapter->tx[i].frame_count);
+ data[data_index++] = pkt_cnt;
+ total_queue_count += pkt_cnt;
+ }
+ data[data_index++] = total_queue_count;
+ }
}
static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
@@ -520,6 +878,8 @@ static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (sset) {
case ETH_SS_STATS:
{
@@ -528,6 +888,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ if (adapter->is_pci11x1x)
+ ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings);
return ret;
}
case ETH_SS_PRIV_FLAGS:
@@ -750,7 +1112,7 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
}
if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, 0);
+ ret = phy_init_eee(phydev, false);
if (ret) {
netif_err(adapter, drv, adapter->netdev,
"EEE initialization failed\n");
@@ -787,7 +1149,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (adapter->is_pci11x1x)
+ wol->supported |= WAKE_MAGICSECURE;
+
wol->wolopts |= adapter->wolopts;
+ if (adapter->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, adapter->sopass, sizeof(wol->sopass));
}
static int lan743x_ethtool_set_wol(struct net_device *netdev,
@@ -808,6 +1175,13 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
adapter->wolopts |= WAKE_PHY;
if (wol->wolopts & WAKE_ARP)
adapter->wolopts |= WAKE_ARP;
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ wol->wolopts & WAKE_MAGIC) {
+ memcpy(adapter->sopass, wol->sopass, sizeof(wol->sopass));
+ adapter->wolopts |= WAKE_MAGICSECURE;
+ } else {
+ memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ }
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
@@ -816,6 +1190,49 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
}
#endif /* CONFIG_PM */
+static void lan743x_common_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 *rb = p;
+
+ memset(p, 0, (MAX_LAN743X_ETH_REGS * sizeof(u32)));
+
+ rb[ETH_PRIV_FLAGS] = adapter->flags;
+ rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
+ rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV);
+ rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ);
+ rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS);
+ rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG);
+ rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL);
+ rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD);
+ rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA);
+ rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR);
+ rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX);
+ rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX);
+ rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW);
+ rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC);
+ rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA);
+ rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR);
+ rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
+}
+
+static int lan743x_get_regs_len(struct net_device *dev)
+{
+ return MAX_LAN743X_ETH_REGS * sizeof(u32);
+}
+
+static void lan743x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ regs->version = LAN743X_ETH_REG_VERSION;
+
+ lan743x_common_regs(dev, regs, p);
+}
+
const struct ethtool_ops lan743x_ethtool_ops = {
.get_drvinfo = lan743x_ethtool_get_drvinfo,
.get_msglevel = lan743x_ethtool_get_msglevel,
@@ -840,6 +1257,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_eee = lan743x_ethtool_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = lan743x_get_regs_len,
+ .get_regs = lan743x_get_regs,
#ifdef CONFIG_PM
.get_wol = lan743x_ethtool_get_wol,
.set_wol = lan743x_ethtool_set_wol,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
index d0d11a777a58..7f5996a52488 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.h
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -6,6 +6,32 @@
#include "linux/ethtool.h"
+#define LAN743X_ETH_REG_VERSION 1
+
+enum {
+ ETH_PRIV_FLAGS,
+ ETH_ID_REV,
+ ETH_FPGA_REV,
+ ETH_STRAP_READ,
+ ETH_INT_STS,
+ ETH_HW_CFG,
+ ETH_PMT_CTL,
+ ETH_E2P_CMD,
+ ETH_E2P_DATA,
+ ETH_MAC_CR,
+ ETH_MAC_RX,
+ ETH_MAC_TX,
+ ETH_FLOW,
+ ETH_MII_ACC,
+ ETH_MII_DATA,
+ ETH_EEE_TX_LPI_REQ_DLY,
+ ETH_WUCSR,
+ ETH_WK_SRC,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_REGS
+};
+
extern const struct ethtool_ops lan743x_ethtool_ops;
#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 8c6390d95158..50eeecba1f18 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -18,6 +18,67 @@
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#define MMD_ACCESS_ADDRESS 0
+#define MMD_ACCESS_WRITE 1
+#define MMD_ACCESS_READ 2
+#define MMD_ACCESS_READ_INC 3
+#define PCS_POWER_STATE_DOWN 0x6
+#define PCS_POWER_STATE_UP 0x4
+
+static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+{
+ u32 chip_rev;
+ u32 cfg_load;
+ u32 hw_cfg;
+ u32 strap;
+ int ret;
+
+ /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
+ ret = lan743x_hs_syslock_acquire(adapter, 100);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Sys Lock acquire failed ret:%d\n", ret);
+ return;
+ }
+
+ cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
+ lan743x_hs_syslock_release(adapter);
+ hw_cfg = lan743x_csr_read(adapter, HW_CFG);
+
+ if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
+ hw_cfg & HW_CFG_RST_PROTECT_) {
+ strap = lan743x_csr_read(adapter, STRAP_READ);
+ if (strap & STRAP_READ_SGMII_EN_)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ } else {
+ chip_rev = lan743x_csr_read(adapter, FPGA_REV);
+ if (chip_rev) {
+ if (chip_rev & FPGA_SGMII_OP)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ } else {
+ adapter->is_sgmii_en = false;
+ }
+ }
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
+}
+
+static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ u32 id_rev = csr->id_rev;
+
+ if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
+ ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
+ return true;
+ }
+ return false;
+}
+
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
pci_release_selected_regions(adapter->pdev,
@@ -250,7 +311,7 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
}
}
if (int_sts & INT_BIT_ALL_TX_) {
- for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
+ for (channel = 0; channel < adapter->used_tx_channels;
channel++) {
u32 int_bit = INT_BIT_DMA_TX_(channel);
@@ -410,7 +471,7 @@ static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
{
int index;
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < adapter->max_vector_count; index++) {
if (adapter->intr.vector_list[index].int_mask & int_mask)
return adapter->intr.vector_list[index].flags;
}
@@ -423,9 +484,12 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
int index = 0;
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
- lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
+ else
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < intr->number_of_vectors; index++) {
if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
lan743x_intr_unregister_isr(adapter, index);
intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
@@ -445,9 +509,11 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
static int lan743x_intr_open(struct lan743x_adapter *adapter)
{
- struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
+ struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
struct lan743x_intr *intr = &adapter->intr;
+ unsigned int used_tx_channels;
u32 int_vec_en_auto_clr = 0;
+ u8 max_vector_count;
u32 int_vec_map0 = 0;
u32 int_vec_map1 = 0;
int ret = -ENODEV;
@@ -457,13 +523,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
intr->number_of_vectors = 0;
/* Try to set up MSIX interrupts */
+ max_vector_count = adapter->max_vector_count;
memset(&msix_entries[0], 0,
- sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
+ sizeof(struct msix_entry) * max_vector_count);
+ for (index = 0; index < max_vector_count; index++)
msix_entries[index].entry = index;
+ used_tx_channels = adapter->used_tx_channels;
ret = pci_enable_msix_range(adapter->pdev,
msix_entries, 1,
- 1 + LAN743X_USED_TX_CHANNELS +
+ 1 + used_tx_channels +
LAN743X_USED_RX_CHANNELS);
if (ret > 0) {
@@ -556,8 +624,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
- lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
- lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ if (adapter->is_pci11x1x) {
+ lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
+ } else {
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ }
lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
}
@@ -570,8 +645,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (intr->number_of_vectors > 1) {
int number_of_tx_vectors = intr->number_of_vectors - 1;
- if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
- number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
+ if (number_of_tx_vectors > used_tx_channels)
+ number_of_tx_vectors = used_tx_channels;
flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
@@ -609,9 +684,9 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
INT_VEC_EN_(vector));
}
}
- if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
+ if ((intr->number_of_vectors - used_tx_channels) > 1) {
int number_of_rx_vectors = intr->number_of_vectors -
- LAN743X_USED_TX_CHANNELS - 1;
+ used_tx_channels - 1;
if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
@@ -632,7 +707,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
}
for (index = 0; index < number_of_rx_vectors; index++) {
- int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
+ int vector = index + 1 + used_tx_channels;
u32 int_bit = INT_BIT_DMA_RX_(index);
/* map RX interrupt to vector */
@@ -760,6 +835,408 @@ static int lan743x_mdiobus_write(struct mii_bus *bus,
return ret;
}
+static u32 lan743x_mac_mmd_access(int id, int index, int op)
+{
+ u16 dev_addr;
+ u32 ret;
+
+ dev_addr = (index >> 16) & 0x1f;
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
+ MAC_MII_ACC_MIIMMD_MASK_;
+ if (op == MMD_ACCESS_WRITE)
+ ret |= MAC_MII_ACC_MIICMD_WRITE_;
+ else if (op == MMD_ACCESS_READ)
+ ret |= MAC_MII_ACC_MIICMD_READ_;
+ else if (op == MMD_ACCESS_READ_INC)
+ ret |= MAC_MII_ACC_MIICMD_READ_INC_;
+ else
+ ret |= MAC_MII_ACC_MIICMD_ADDR_;
+ ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
+
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
+ }
+
+ ret = lan743x_mdiobus_read(bus, phy_id, index);
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ } else {
+ ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
+ }
+
+ return ret;
+}
+
+static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ u32 data;
+ int ret;
+
+ ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
+ !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "%s: error %d sgmii wait timeout\n", __func__, ret);
+
+ return ret;
+}
+
+static int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
+{
+ u32 mmd_access;
+ int ret;
+ u32 val;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ goto sgmii_unlock;
+
+ val = lan743x_csr_read(adapter, SGMII_DATA);
+ ret = (int)(val & SGMII_DATA_MASK_);
+
+sgmii_unlock:
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
+ u8 mmd, u16 addr, u16 val)
+{
+ u32 mmd_access;
+ int ret;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Data */
+ lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
+ u16 baud)
+{
+ int mpllctrl0;
+ int mpllctrl1;
+ int miscctrl1;
+ int ret;
+
+ mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0);
+ if (mpllctrl0 < 0)
+ return mpllctrl0;
+
+ mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
+ if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
+ /* mpll_baud_clk/4 */
+ miscctrl1 = 0xA;
+ } else {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
+ /* mpll_baud_clk/2 */
+ miscctrl1 = 0x5;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
+}
+
+static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
+ bool enable)
+{
+ if (enable)
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_3P125GBPS);
+ else
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_1P25GBPS);
+}
+
+static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
+ bool *status)
+{
+ int ret;
+
+ ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
+ ret == VR_MII_MPLL_MULTIPLIER_50)
+ *status = true;
+ else
+ *status = false;
+
+ return 0;
+}
+
+static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+{
+ enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
+ int mii_ctrl;
+ int dgt_ctrl;
+ int an_ctrl;
+ int ret;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
+ /* Switch to 2.5 Gbps */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
+ else
+ /* Switch to 10/100/1000 Mbps clock */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
+ if (ret < 0)
+ return ret;
+
+ /* Enable SGMII Auto NEG */
+ mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctrl < 0)
+ return mii_ctrl;
+
+ an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
+ if (an_ctrl < 0)
+ return an_ctrl;
+
+ dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1);
+ if (dgt_ctrl < 0)
+ return dgt_ctrl;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
+ mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
+ mii_ctrl |= BMCR_SPEED1000;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ /* In order for Auto-Negotiation to operate properly at
+ * 2.5 Gbps the 1.6ms link timer values must be adjusted
+ * The VR_MII_LINK_TIMER_CTRL Register must be set to
+ * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
+ * VR_MII_DIG_CTRL1 Register set to 1
+ */
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_LINK_TIMER_CTRL, 0x7A1);
+ if (ret < 0)
+ return ret;
+ } else {
+ mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
+ mii_ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1, dgt_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_AN_CTRL, an_ctrl);
+}
+
+static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
+{
+ u8 wait_cnt = 0;
+ u32 dig_sts;
+
+ do {
+ dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_STS);
+ if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
+ VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
+ break;
+ usleep_range(1000, 2000);
+ } while (wait_cnt++ < 10);
+
+ if (wait_cnt >= 10)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
+ enum lan743x_sgmii_lsd lsd = POWER_DOWN;
+ int mii_ctl;
+ bool status;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_2500_MASTER;
+ else
+ lsd = LINK_2500_SLAVE;
+ break;
+ case SPEED_1000:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_1000_MASTER;
+ else
+ lsd = LINK_1000_SLAVE;
+ break;
+ case SPEED_100:
+ if (phydev->duplex)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (phydev->duplex)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid speed %d\n", phydev->speed);
+ return -EINVAL;
+ }
+
+ adapter->sgmii_lsd = lsd;
+ ret = lan743x_sgmii_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d SGMII cfg failed\n", ret);
+ return ret;
+ }
+
+ ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "erro %d SGMII get mode failed\n", ret);
+ return ret;
+ }
+
+ if (status)
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 2.5G mode enable\n");
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 1G mode enable\n");
+
+ /* SGMII/1000/2500BASE-X PCS power down */
+ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctl < 0)
+ return mii_ctl;
+
+ mii_ctl |= BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
+ if (ret < 0)
+ return ret;
+
+ /* SGMII/1000/2500BASE-X PCS power up */
+ mii_ctl &= ~BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
@@ -975,6 +1452,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data |= MAC_CR_CFG_H_;
data &= ~MAC_CR_CFG_L_;
break;
+ case SPEED_2500:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
}
lan743x_csr_write(adapter, MAC_CR, data);
@@ -986,6 +1467,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
lan743x_phy_update_flowcontrol(adapter, local_advertisement,
remote_advertisement);
lan743x_ptp_update_latency(adapter, phydev->speed);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ lan743x_sgmii_config(adapter);
}
}
@@ -1015,9 +1500,14 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev)
goto return_error;
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
+ if (adapter->is_pci11x1x)
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ PHY_INTERFACE_MODE_RGMII);
+ else
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ PHY_INTERFACE_MODE_GMII);
if (ret)
goto return_error;
}
@@ -1095,6 +1585,9 @@ static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
rfctl |= RFE_CTL_AM_;
}
+ if (netdev->features & NETIF_F_RXCSUM)
+ rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
+
memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
if (netdev_mc_count(netdev)) {
struct netdev_hw_addr *ha;
@@ -1576,11 +2069,13 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
{
int required_number_of_descriptors = 0;
unsigned int start_frame_length = 0;
+ netdev_tx_t retval = NETDEV_TX_OK;
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
bool do_timestamp = false;
bool ignore_sync = false;
+ struct netdev_queue *txq;
int nr_frags = 0;
bool gso = false;
int j;
@@ -1593,9 +2088,12 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors > (tx->ring_size - 1)) {
dev_kfree_skb_irq(skb);
} else {
- /* save to overflow buffer */
- tx->overflow_skb = skb;
- netif_stop_queue(tx->adapter->netdev);
+ /* save how many descriptors we needed to restart the queue */
+ tx->rqd_descriptors = required_number_of_descriptors;
+ retval = NETDEV_TX_BUSY;
+ txq = netdev_get_tx_queue(tx->adapter->netdev,
+ tx->channel_number);
+ netif_tx_stop_queue(txq);
}
goto unlock;
}
@@ -1627,6 +2125,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
dev_kfree_skb_irq(skb);
goto unlock;
}
+ tx->frame_count++;
if (gso)
lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
@@ -1653,15 +2152,15 @@ finish:
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- return NETDEV_TX_OK;
+ return retval;
}
static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
{
struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
struct lan743x_adapter *adapter = tx->adapter;
- bool start_transmitter = false;
unsigned long irq_flags = 0;
+ struct netdev_queue *txq;
u32 ioc_bit = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
@@ -1672,24 +2171,20 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
/* clean up tx ring */
lan743x_tx_release_completed_descriptors(tx);
- if (netif_queue_stopped(adapter->netdev)) {
- if (tx->overflow_skb) {
- if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
- lan743x_tx_get_avail_desc(tx))
- start_transmitter = true;
+ txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
+ if (netif_tx_queue_stopped(txq)) {
+ if (tx->rqd_descriptors) {
+ if (tx->rqd_descriptors <=
+ lan743x_tx_get_avail_desc(tx)) {
+ tx->rqd_descriptors = 0;
+ netif_tx_wake_queue(txq);
+ }
} else {
- netif_wake_queue(adapter->netdev);
+ netif_tx_wake_queue(txq);
}
}
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- if (start_transmitter) {
- /* space is now available, transmit overflow skb */
- lan743x_tx_xmit_frame(tx, tx->overflow_skb);
- tx->overflow_skb = NULL;
- netif_wake_queue(adapter->netdev);
- }
-
if (!napi_complete(napi))
goto done;
@@ -1813,10 +2308,7 @@ static void lan743x_tx_close(struct lan743x_tx *tx)
lan743x_tx_release_all_descriptors(tx);
- if (tx->overflow_skb) {
- dev_kfree_skb(tx->overflow_skb);
- tx->overflow_skb = NULL;
- }
+ tx->rqd_descriptors = 0;
lan743x_tx_ring_cleanup(tx);
}
@@ -1894,9 +2386,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
INT_BIT_DMA_TX_
(tx->channel_number));
- netif_tx_napi_add(adapter->netdev,
- &tx->napi, lan743x_tx_napi_poll,
- tx->ring_size - 1);
+ netif_napi_add_tx_weight(adapter->netdev,
+ &tx->napi, lan743x_tx_napi_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&tx->napi);
data = 0;
@@ -2058,6 +2550,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
struct lan743x_rx_buffer_info *buffer_info;
int frame_length, buffer_length;
+ bool is_ice, is_tce, is_icsm;
int extension_index = -1;
bool is_last, is_first;
struct sk_buff *skb;
@@ -2104,6 +2597,9 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
frame_length =
RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
buffer_length = buffer_info->buffer_length;
+ is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
+ is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
+ is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
netdev_dbg(netdev, "%s%schunk: %d/%d",
is_first ? "first " : " ",
@@ -2172,6 +2668,10 @@ process_extension:
if (is_last && rx->skb_head) {
rx->skb_head->protocol = eth_type_trans(rx->skb_head,
rx->adapter->netdev);
+ if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (!is_ice && !is_tce && !is_icsm)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
netdev_dbg(netdev, "sending %d byte frame to OS",
rx->skb_head->len);
napi_gro_receive(&rx->napi, rx->skb_head);
@@ -2375,9 +2875,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
if (ret)
goto return_error;
- netif_napi_add(adapter->netdev,
- &rx->napi, lan743x_rx_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
lan743x_csr_write(adapter, DMAC_CMD,
DMAC_CMD_RX_SWR_(rx->channel_number));
@@ -2491,7 +2989,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
int index;
- lan743x_tx_close(&adapter->tx[0]);
+ for (index = 0; index < adapter->used_tx_channels; index++)
+ lan743x_tx_close(&adapter->tx[index]);
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
@@ -2537,12 +3036,19 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_rx;
}
- ret = lan743x_tx_open(&adapter->tx[0]);
- if (ret)
- goto close_rx;
-
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ ret = lan743x_tx_open(&adapter->tx[index]);
+ if (ret)
+ goto close_tx;
+ }
return 0;
+close_tx:
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ if (adapter->tx[index].ring_cpu_ptr)
+ lan743x_tx_close(&adapter->tx[index]);
+ }
+
close_rx:
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
if (adapter->rx[index].ring_cpu_ptr)
@@ -2569,8 +3075,12 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u8 ch = 0;
- return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
+ if (adapter->is_pci11x1x)
+ ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
+
+ return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
}
static int lan743x_netdev_ioctl(struct net_device *netdev,
@@ -2701,6 +3211,20 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
int index;
int ret;
+ adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
+ if (adapter->is_pci11x1x) {
+ adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
+ adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
+ pci11x1x_strap_get_status(adapter);
+ spin_lock_init(&adapter->eth_syslock_spinlock);
+ mutex_init(&adapter->sgmii_rw_lock);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+ adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
+ }
+
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
@@ -2731,15 +3255,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->rx[index].channel_number = index;
}
- tx = &adapter->tx[0];
- tx->adapter = adapter;
- tx->channel_number = 0;
- spin_lock_init(&tx->ring_lock);
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ tx = &adapter->tx[index];
+ tx->adapter = adapter;
+ tx->channel_number = index;
+ spin_lock_init(&tx->ring_lock);
+ }
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
+ u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -2749,9 +3277,42 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
}
adapter->mdiobus->priv = (void *)adapter;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
- adapter->mdiobus->name = "lan743x-mdiobus";
+ if (adapter->is_pci11x1x) {
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII operation\n");
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+ adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+ adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus-c45\n");
+ } else {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII operation\n");
+ // Only C22 support when RGMII I/F
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev,
+ "lan743x-mdiobus\n");
+ }
+ } else {
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
+ }
+
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
@@ -2786,8 +3347,19 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
struct net_device *netdev = NULL;
int ret = -ENODEV;
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
+ id->device == PCI_DEVICE_ID_SMSC_A041) {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ PCI11X1X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ } else {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ LAN743X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ }
+
if (!netdev)
goto return_error;
@@ -2820,7 +3392,8 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
/* carrier off reporting is important to ethtool even BEFORE open */
@@ -2898,6 +3471,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
int mask_index;
+ u32 sopass;
u32 pmtctl;
u32 wucsr;
u32 macrx;
@@ -2992,6 +3566,14 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
}
+ if (adapter->wolopts & WAKE_MAGICSECURE) {
+ sopass = *(u32 *)adapter->sopass;
+ lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
+ sopass = *(u16 *)&adapter->sopass[4];
+ lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
+ wucsr |= MAC_MP_SO_EN_;
+ }
+
lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
lan743x_csr_write(adapter, PMT_CTL, pmtctl);
lan743x_csr_write(adapter, MAC_RX, macrx);
@@ -3002,6 +3584,7 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
lan743x_pcidev_shutdown(pdev);
@@ -3013,6 +3596,18 @@ static int lan743x_pm_suspend(struct device *dev)
if (adapter->wolopts)
lan743x_pm_set_wol(adapter);
+ if (adapter->is_pci11x1x) {
+ /* Save HW_CFG to config again in PM resume */
+ data = lan743x_csr_read(adapter, HW_CFG);
+ adapter->hw_cfg = data;
+ data |= (HW_CFG_RST_PROTECT_PCIE_ |
+ HW_CFG_D3_RESET_DIS_ |
+ HW_CFG_D3_VAUX_OVR_ |
+ HW_CFG_HOT_RESET_DIS_ |
+ HW_CFG_RST_PROTECT_);
+ lan743x_csr_write(adapter, HW_CFG, data);
+ }
+
/* Host sets PME_En, put D3hot */
return pci_prepare_to_sleep(pdev);
}
@@ -3028,6 +3623,10 @@ static int lan743x_pm_resume(struct device *dev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ /* Restore HW_CFG that was saved during pm suspend */
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret) {
netif_err(adapter, probe, adapter->netdev,
@@ -3044,6 +3643,9 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_info(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
return 0;
}
@@ -3056,6 +3658,8 @@ static const struct dev_pm_ops lan743x_pm_ops = {
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
{ 0, }
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index aaf7aaeaba0c..67877d3b6dd9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -16,8 +16,13 @@
#define ID_REV_ID_MASK_ (0xFFFF0000)
#define ID_REV_ID_LAN7430_ (0x74300000)
#define ID_REV_ID_LAN7431_ (0x74310000)
-#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
- (((id_rev) & 0xFFF00000) == 0x74300000)
+#define ID_REV_ID_LAN743X_ (0x74300000)
+#define ID_REV_ID_A011_ (0xA0110000) // PCI11010
+#define ID_REV_ID_A041_ (0xA0410000) // PCI11414
+#define ID_REV_ID_A0X1_ (0xA0010000)
+#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
+ ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \
+ (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_))
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
@@ -25,8 +30,24 @@
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
+#define FPGA_SGMII_OP BIT(24)
+
+#define STRAP_READ (0x0C)
+#define STRAP_READ_USE_SGMII_EN_ BIT(22)
+#define STRAP_READ_SGMII_EN_ BIT(6)
+#define STRAP_READ_SGMII_REFCLK_ BIT(5)
+#define STRAP_READ_SGMII_2_5G_ BIT(4)
+#define STRAP_READ_BASE_X_ BIT(3)
+#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2)
+#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1)
+#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
+#define HW_CFG_RST_PROTECT_PCIE_ BIT(19)
+#define HW_CFG_HOT_RESET_DIS_ BIT(15)
+#define HW_CFG_D3_VAUX_OVR_ BIT(14)
+#define HW_CFG_D3_RESET_DIS_ BIT(13)
+#define HW_CFG_RST_PROTECT_ BIT(12)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
#define HW_CFG_LRST_ BIT(1)
@@ -70,6 +91,45 @@
#define E2P_DATA (0x044)
+/* Hearthstone top level & System Reg Addresses */
+#define ETH_CTRL_REG_ADDR_BASE (0x0000)
+#define ETH_SYS_REG_ADDR_BASE (0x4000)
+#define CONFIG_REG_ADDR_BASE (0x0000)
+#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
+#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define GEN_SYS_CONFIG_LOAD_STARTED_REG (0x0078)
+#define ETH_SYS_CONFIG_LOAD_STARTED_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ GEN_SYS_CONFIG_LOAD_STARTED_REG)
+#define GEN_SYS_LOAD_STARTED_REG_ETH_ BIT(4)
+#define SYS_LOCK_REG (0x00A0)
+#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
+#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
+#define SYS_LOCK_REG_SPI_PERI_LOCK_ BIT(4)
+#define SYS_LOCK_REG_SMBUS_PERI_LOCK_ BIT(3)
+#define SYS_LOCK_REG_UART_SS_LOCK_ BIT(2)
+#define SYS_LOCK_REG_ENET_SS_LOCK_ BIT(1)
+#define SYS_LOCK_REG_USB_SS_LOCK_ BIT(0)
+#define ETH_SYSTEM_SYS_LOCK_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ SYS_LOCK_REG)
+#define HS_EEPROM_REG_ADDR_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_EEPROM_REG_ADDR_BASE)
+#define HS_E2P_CMD (HS_EEPROM_REG_ADDR_BASE + 0x0000)
+#define HS_E2P_CMD_EPC_BUSY_ BIT(31)
+#define HS_E2P_CMD_EPC_CMD_WRITE_ GENMASK(29, 28)
+#define HS_E2P_CMD_EPC_CMD_READ_ (0x0)
+#define HS_E2P_CMD_EPC_TIMEOUT_ BIT(17)
+#define HS_E2P_CMD_EPC_ADDR_MASK_ GENMASK(15, 0)
+#define HS_E2P_DATA (HS_EEPROM_REG_ADDR_BASE + 0x0004)
+#define HS_E2P_DATA_MASK_ GENMASK(7, 0)
+#define HS_E2P_CFG (HS_EEPROM_REG_ADDR_BASE + 0x0008)
+#define HS_E2P_CFG_I2C_PULSE_MASK_ GENMASK(19, 16)
+#define HS_E2P_CFG_EEPROM_SIZE_SEL_ BIT(12)
+#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8)
+#define HS_E2P_CFG_TEST_EEPR_TO_BYP_ BIT(0)
+#define HS_E2P_PAD_CTL (HS_EEPROM_REG_ADDR_BASE + 0x000C)
+
#define GPIO_CFG0 (0x050)
#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
@@ -135,6 +195,13 @@
#define MAC_RX_ADDRL (0x11C)
#define MAC_MII_ACC (0x120)
+#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16)
+#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000)
+#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0)
+#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1)
+#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2)
+#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3)
+#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4)
#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11)
#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6)
@@ -143,11 +210,21 @@
#define MAC_MII_ACC_MII_WRITE_ (0x00000002)
#define MAC_MII_ACC_MII_BUSY_ BIT(0)
+#define MAC_MII_ACC_MIIMMD_SHIFT_ (6)
+#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MIICL45_ BIT(3)
+#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006)
+#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000)
+#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MIICMD_READ_ (0x00000004)
+#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006)
+
#define MAC_MII_DATA (0x124)
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
#define MAC_WUCSR (0x140)
+#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
@@ -155,6 +232,8 @@
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_MP_SO_HI (0x148)
+#define MAC_MP_SO_LO (0x14C)
#define MAC_WUF_CFG0 (0x150)
#define MAC_NUM_OF_WUF_CFG (32)
@@ -187,6 +266,8 @@
#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x)))
#define RFE_CTL (0x508)
+#define RFE_CTL_TCP_UDP_COE_ BIT(12)
+#define RFE_CTL_IP_COE_ BIT(11)
#define RFE_CTL_AB_ BIT(10)
#define RFE_CTL_AM_ BIT(9)
#define RFE_CTL_AU_ BIT(8)
@@ -214,6 +295,82 @@
#define MAC_WUCSR2 (0x600)
+#define SGMII_ACC (0x720)
+#define SGMII_ACC_SGMII_BZY_ BIT(31)
+#define SGMII_ACC_SGMII_WR_ BIT(30)
+#define SGMII_ACC_SGMII_MMD_SHIFT_ (16)
+#define SGMII_ACC_SGMII_MMD_MASK_ GENMASK(20, 16)
+#define SGMII_ACC_SGMII_MMD_VSR_ BIT(15)
+#define SGMII_ACC_SGMII_ADDR_SHIFT_ (0)
+#define SGMII_ACC_SGMII_ADDR_MASK_ GENMASK(15, 0)
+#define SGMII_DATA (0x724)
+#define SGMII_DATA_SHIFT_ (0)
+#define SGMII_DATA_MASK_ GENMASK(15, 0)
+#define SGMII_CTL (0x728)
+#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
+#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
+/* Vendor Specific SGMII MMD details */
+#define SR_VSMMD_PCS_ID1 0x0004
+#define SR_VSMMD_PCS_ID2 0x0005
+#define SR_VSMMD_STS 0x0008
+#define SR_VSMMD_CTRL 0x0009
+
+#define VR_MII_DIG_CTRL1 0x8000
+#define VR_MII_DIG_CTRL1_VR_RST_ BIT(15)
+#define VR_MII_DIG_CTRL1_R2TLBE_ BIT(14)
+#define VR_MII_DIG_CTRL1_EN_VSMMD1_ BIT(13)
+#define VR_MII_DIG_CTRL1_CS_EN_ BIT(10)
+#define VR_MII_DIG_CTRL1_MAC_AUTO_SW_ BIT(9)
+#define VR_MII_DIG_CTRL1_INIT_ BIT(8)
+#define VR_MII_DIG_CTRL1_DTXLANED_0_ BIT(4)
+#define VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_ BIT(3)
+#define VR_MII_DIG_CTRL1_EN_2_5G_MODE_ BIT(2)
+#define VR_MII_DIG_CTRL1_BYP_PWRUP_ BIT(1)
+#define VR_MII_DIG_CTRL1_PHY_MODE_CTRL_ BIT(0)
+#define VR_MII_AN_CTRL 0x8001
+#define VR_MII_AN_CTRL_MII_CTRL_ BIT(8)
+#define VR_MII_AN_CTRL_SGMII_LINK_STS_ BIT(4)
+#define VR_MII_AN_CTRL_TX_CONFIG_ BIT(3)
+#define VR_MII_AN_CTRL_1000BASE_X_ (0)
+#define VR_MII_AN_CTRL_SGMII_MODE_ (2)
+#define VR_MII_AN_CTRL_QSGMII_MODE_ (3)
+#define VR_MII_AN_CTRL_PCS_MODE_SHIFT_ (1)
+#define VR_MII_AN_CTRL_PCS_MODE_MASK_ GENMASK(2, 1)
+#define VR_MII_AN_CTRL_MII_AN_INTR_EN_ BIT(0)
+#define VR_MII_AN_INTR_STS 0x8002
+#define VR_MII_AN_INTR_STS_LINK_UP_ BIT(4)
+#define VR_MII_AN_INTR_STS_SPEED_MASK_ GENMASK(3, 2)
+#define VR_MII_AN_INTR_STS_1000_MBPS_ BIT(3)
+#define VR_MII_AN_INTR_STS_100_MBPS_ BIT(2)
+#define VR_MII_AN_INTR_STS_10_MBPS_ (0)
+#define VR_MII_AN_INTR_STS_FDX_ BIT(1)
+#define VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR_ BIT(0)
+
+#define VR_MII_LINK_TIMER_CTRL 0x800A
+#define VR_MII_DIG_STS 0x8010
+#define VR_MII_DIG_STS_PSEQ_STATE_MASK_ GENMASK(4, 2)
+#define VR_MII_DIG_STS_PSEQ_STATE_POS_ (2)
+#define VR_MII_GEN2_4_MPLL_CTRL0 0x8078
+#define VR_MII_MPLL_CTRL0_REF_CLK_DIV2_ BIT(12)
+#define VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_ BIT(4)
+#define VR_MII_GEN2_4_MPLL_CTRL1 0x8079
+#define VR_MII_MPLL_CTRL1_MPLL_MULTIPLIER_ GENMASK(6, 0)
+#define VR_MII_BAUD_RATE_3P125GBPS (3125)
+#define VR_MII_BAUD_RATE_1P25GBPS (1250)
+#define VR_MII_MPLL_MULTIPLIER_125 (125)
+#define VR_MII_MPLL_MULTIPLIER_100 (100)
+#define VR_MII_MPLL_MULTIPLIER_50 (50)
+#define VR_MII_MPLL_MULTIPLIER_40 (40)
+#define VR_MII_GEN2_4_MISC_CTRL1 0x809A
+#define VR_MII_CTRL1_RX_RATE_0_MASK_ GENMASK(3, 2)
+#define VR_MII_CTRL1_RX_RATE_0_SHIFT_ (2)
+#define VR_MII_CTRL1_TX_RATE_0_MASK_ GENMASK(1, 0)
+#define VR_MII_MPLL_BAUD_CLK (0)
+#define VR_MII_MPLL_BAUD_CLK_DIV_2 (1)
+#define VR_MII_MPLL_BAUD_CLK_DIV_4 (2)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
@@ -261,8 +418,11 @@
#define INT_MOD_CFG5 (0x7D4)
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define INT_MOD_CFG8 (0x7E0)
+#define INT_MOD_CFG9 (0x7E4)
#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_ BIT(13)
#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
@@ -284,9 +444,51 @@
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+#define HS_PTP_GENERAL_CONFIG (0x0A04)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0xf << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_ (1)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_ (2)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_ (3)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (4)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_ (5)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (6)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_ (7)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (8)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_ (9)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (10)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_ (11)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_ (12)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (13)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_ (14)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (15)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0xf) << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel) (BIT(1 + ((channel) * 2)))
+#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) * 2))
+
#define PTP_INT_STS (0x0A08)
+#define PTP_INT_IO_FE_MASK_ GENMASK(31, 24)
+#define PTP_INT_IO_FE_SHIFT_ (24)
+#define PTP_INT_IO_FE_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_IO_RE_MASK_ GENMASK(23, 16)
+#define PTP_INT_IO_RE_SHIFT_ (16)
+#define PTP_INT_IO_RE_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_TX_TS_OVRFL_INT_ BIT(14)
+#define PTP_INT_TX_SWTS_ERR_INT_ BIT(13)
+#define PTP_INT_TX_TS_INT_ BIT(12)
+#define PTP_INT_RX_TS_OVRFL_INT_ BIT(9)
+#define PTP_INT_RX_TS_INT_ BIT(8)
+#define PTP_INT_TIMER_INT_B_ BIT(1)
+#define PTP_INT_TIMER_INT_A_ BIT(0)
#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_FE_EN_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_EN_TIMER_SET_(channel) BIT(channel)
#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_EN_FE_EN_CLR_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_CLR_(channel) BIT(16 + (channel))
#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
#define PTP_INT_BIT_TX_TS_ BIT(12)
#define PTP_INT_BIT_TIMER_B_ BIT(1)
@@ -304,6 +506,16 @@
#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LTC_SET_SEC_HI (0x0A50)
+#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_VERSION (0x0A54)
+#define PTP_VERSION_TX_UP_MASK_ GENMASK(31, 24)
+#define PTP_VERSION_TX_LO_MASK_ GENMASK(23, 16)
+#define PTP_VERSION_RX_UP_MASK_ GENMASK(15, 8)
+#define PTP_VERSION_RX_LO_MASK_ GENMASK(7, 0)
+#define PTP_IO_SEL (0x0A58)
+#define PTP_IO_SEL_MASK_ GENMASK(10, 8)
+#define PTP_IO_SEL_SHIFT_ (8)
#define PTP_LATENCY (0x0A5C)
#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
#define PTP_LATENCY_RX_SET_(rx_latency) \
@@ -328,6 +540,59 @@
#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+#define PTP_TX_CAP_INFO (0x0AB8)
+#define PTP_TX_CAP_INFO_TX_CH_MASK_ GENMASK(1, 0)
+#define PTP_TX_DOMAIN (0x0ABC)
+#define PTP_TX_DOMAIN_MASK_ GENMASK(23, 16)
+#define PTP_TX_DOMAIN_RANGE_EN_ BIT(15)
+#define PTP_TX_DOMAIN_RANGE_MASK_ GENMASK(7, 0)
+#define PTP_TX_SDOID (0x0AC0)
+#define PTP_TX_SDOID_MASK_ GENMASK(23, 16)
+#define PTP_TX_SDOID_RANGE_EN_ BIT(15)
+#define PTP_TX_SDOID_11_0_MASK_ GENMASK(7, 0)
+#define PTP_IO_CAP_CONFIG (0x0AC4)
+#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel) BIT(24 + (channel))
+#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel) BIT(16 + (channel))
+#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel) BIT(8 + (channel))
+#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_RE_LTC_SEC_CAP_X (0x0AC8)
+#define PTP_IO_RE_LTC_NS_CAP_X (0x0ACC)
+#define PTP_IO_FE_LTC_SEC_CAP_X (0x0AD0)
+#define PTP_IO_FE_LTC_NS_CAP_X (0x0AD4)
+#define PTP_IO_EVENT_OUTPUT_CFG (0x0AD8)
+#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel) BIT(16 + (channel))
+#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_PIN_CFG (0x0ADC)
+#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel) BIT(0 + (channel))
+#define PTP_LTC_RD_SEC_HI (0x0AF0)
+#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_LTC_RD_SEC_LO (0x0AF4)
+#define PTP_LTC_RD_NS (0x0AF8)
+#define PTP_LTC_RD_NS_29_0_MASK_ GENMASK(29, 0)
+#define PTP_LTC_RD_SUBNS (0x0AFC)
+#define PTP_RX_USER_MAC_HI (0x0B00)
+#define PTP_RX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_RX_USER_MAC_LO (0x0B04)
+#define PTP_RX_USER_IP_ADDR_0 (0x0B20)
+#define PTP_RX_USER_IP_ADDR_1 (0x0B24)
+#define PTP_RX_USER_IP_ADDR_2 (0x0B28)
+#define PTP_RX_USER_IP_ADDR_3 (0x0B2C)
+#define PTP_RX_USER_IP_MASK_0 (0x0B30)
+#define PTP_RX_USER_IP_MASK_1 (0x0B34)
+#define PTP_RX_USER_IP_MASK_2 (0x0B38)
+#define PTP_RX_USER_IP_MASK_3 (0x0B3C)
+#define PTP_TX_USER_MAC_HI (0x0B40)
+#define PTP_TX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_TX_USER_MAC_LO (0x0B44)
+#define PTP_TX_USER_IP_ADDR_0 (0x0B60)
+#define PTP_TX_USER_IP_ADDR_1 (0x0B64)
+#define PTP_TX_USER_IP_ADDR_2 (0x0B68)
+#define PTP_TX_USER_IP_ADDR_3 (0x0B6C)
+#define PTP_TX_USER_IP_MASK_0 (0x0B70)
+#define PTP_TX_USER_IP_MASK_1 (0x0B74)
+#define PTP_TX_USER_IP_MASK_2 (0x0B78)
+#define PTP_TX_USER_IP_MASK_3 (0x0B7C)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -483,6 +748,20 @@
#define OTP_STATUS (0x1030)
#define OTP_STATUS_BUSY_ BIT(0)
+/* Hearthstone OTP block registers */
+#define HS_OTP_BLOCK_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_OTP_REG_ADDR_BASE)
+#define HS_OTP_PWR_DN (HS_OTP_BLOCK_BASE + 0x0)
+#define HS_OTP_ADDR_HIGH (HS_OTP_BLOCK_BASE + 0x4)
+#define HS_OTP_ADDR_LOW (HS_OTP_BLOCK_BASE + 0x8)
+#define HS_OTP_PRGM_DATA (HS_OTP_BLOCK_BASE + 0x10)
+#define HS_OTP_PRGM_MODE (HS_OTP_BLOCK_BASE + 0x14)
+#define HS_OTP_READ_DATA (HS_OTP_BLOCK_BASE + 0x18)
+#define HS_OTP_FUNC_CMD (HS_OTP_BLOCK_BASE + 0x20)
+#define HS_OTP_TST_CMD (HS_OTP_BLOCK_BASE + 0x24)
+#define HS_OTP_CMD_GO (HS_OTP_BLOCK_BASE + 0x28)
+#define HS_OTP_STATUS (HS_OTP_BLOCK_BASE + 0x30)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
@@ -541,10 +820,12 @@
#define LAN743X_MAX_RX_CHANNELS (4)
#define LAN743X_MAX_TX_CHANNELS (1)
+#define PCI11X1X_MAX_TX_CHANNELS (4)
struct lan743x_adapter;
#define LAN743X_USED_RX_CHANNELS (4)
#define LAN743X_USED_TX_CHANNELS (1)
+#define PCI11X1X_USED_TX_CHANNELS (4)
#define LAN743X_INT_MOD (400)
#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS)
@@ -553,12 +834,17 @@ struct lan743x_adapter;
#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS)
#error Invalid LAN743X_USED_TX_CHANNELS
#endif
+#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS)
+#error Invalid PCI11X1X_USED_TX_CHANNELS
+#endif
/* PCI */
/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
+#define PCI_DEVICE_ID_SMSC_A011 (0xA011)
+#define PCI_DEVICE_ID_SMSC_A041 (0xA041)
#define PCI_CONFIG_LENGTH (0x1000)
@@ -607,13 +893,14 @@ struct lan743x_vector {
};
#define LAN743X_MAX_VECTOR_COUNT (8)
+#define PCI11X1X_MAX_VECTOR_COUNT (16)
struct lan743x_intr {
int flags;
unsigned int irq;
- struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT];
+ struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT];
int number_of_vectors;
bool using_vectors;
@@ -668,8 +955,8 @@ struct lan743x_tx {
int last_tail;
struct napi_struct napi;
-
- struct sk_buff *overflow_skb;
+ u32 frame_count;
+ u32 rqd_descriptors;
};
void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
@@ -704,12 +991,28 @@ struct lan743x_rx {
struct sk_buff *skb_head, *skb_tail;
};
+/* SGMII Link Speed Duplex status */
+enum lan743x_sgmii_lsd {
+ POWER_DOWN = 0,
+ LINK_DOWN,
+ ANEG_BUSY,
+ LINK_10HD,
+ LINK_10FD,
+ LINK_100HD,
+ LINK_100FD,
+ LINK_1000_MASTER,
+ LINK_1000_SLAVE,
+ LINK_2500_MASTER,
+ LINK_2500_SLAVE
+};
+
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
@@ -721,11 +1024,24 @@ struct lan743x_adapter {
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
- struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
- struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+ struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS];
+ struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS];
+ bool is_pci11x1x;
+ bool is_sgmii_en;
+ /* protect ethernet syslock */
+ spinlock_t eth_syslock_spinlock;
+ bool eth_syslock_en;
+ u32 eth_syslock_acquire_cnt;
+ struct mutex sgmii_rw_lock;
+ /* SGMII Link Speed & Duplex status */
+ enum lan743x_sgmii_lsd sgmii_lsd;
+ u8 max_tx_channels;
+ u8 used_tx_channels;
+ u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
+ u32 hw_cfg;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -795,7 +1111,7 @@ struct lan743x_tx_buffer_info {
unsigned int buffer_length;
};
-#define LAN743X_TX_RING_SIZE (50)
+#define LAN743X_TX_RING_SIZE (128)
/* OWN bit is set. ie, Descs are owned by RX DMAC */
#define RX_DESC_DATA0_OWN_ (0x00008000)
@@ -807,6 +1123,9 @@ struct lan743x_tx_buffer_info {
(((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16)
#define RX_DESC_DATA0_EXT_ (0x00004000)
#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF)
+#define RX_DESC_DATA1_STATUS_ICE_ (0x00020000)
+#define RX_DESC_DATA1_STATUS_TCE_ (0x00010000)
+#define RX_DESC_DATA1_STATUS_ICSM_ (0x00000001)
#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF)
#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2))
@@ -838,5 +1157,7 @@ struct lan743x_rx_buffer_info {
u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout);
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 8b7a8d879083..da3ea905adbb 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -25,6 +25,18 @@ static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
u32 seconds, u32 nano_seconds,
u32 sub_nano_seconds);
+static int lan743x_get_channel(u32 ch_map)
+{
+ int idx;
+
+ for (idx = 0; idx < 32; idx++) {
+ if (ch_map & (0x1 << idx))
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
int lan743x_gpio_init(struct lan743x_adapter *adapter)
{
struct lan743x_gpio *gpio = &adapter->gpio;
@@ -179,6 +191,8 @@ static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
u32 *seconds, u32 *nano_seconds,
u32 *sub_nano_seconds);
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec);
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
@@ -407,7 +421,11 @@ static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
- lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds,
+ NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
ts->tv_sec = seconds;
ts->tv_nsec = nano_seconds;
@@ -671,6 +689,322 @@ failed:
return ret;
}
+static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ int val;
+
+ event_ch = ptp->ptp_io_perout[index];
+ if (event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ 0);
+
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (event_ch));
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch);
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+ if (event_ch)
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_B_);
+ else
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_A_);
+ lan743x_ptp_release_event_ch(adapter, event_ch);
+ ptp->ptp_io_perout[index] = -1;
+ }
+
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+
+ /* Deselect Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+
+ /* Disables the output of Local Time Target compare events */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ /* Configured as an opendrain driver*/
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+ /* Dummy read to make sure write operation success */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+}
+
+static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec, period_nsec;
+ u32 start_sec, start_nsec;
+ u32 pulse_sec, pulse_nsec;
+ int pulse_width;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ u32 index;
+ int val;
+
+ index = perout_request->index;
+ event_ch = ptp->ptp_io_perout[index];
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_io_perout_off(adapter, index);
+ return 0;
+ }
+
+ if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) {
+ /* already on, turn off first */
+ lan743x_ptp_io_perout_off(adapter, index);
+ }
+
+ event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+ if (event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
+ goto failed;
+ }
+ ptp->ptp_io_perout[index] = event_ch;
+
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_sec = perout_request->on.sec;
+ pulse_sec += perout_request->on.nsec / 1000000000;
+ pulse_nsec = perout_request->on.nsec % 1000000000;
+ } else {
+ pulse_sec = perout_request->period.sec;
+ pulse_sec += perout_request->period.nsec / 1000000000;
+ pulse_nsec = perout_request->period.nsec % 1000000000;
+ }
+
+ if (pulse_sec == 0) {
+ if (pulse_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (pulse_nsec >= 200000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_;
+ } else if (pulse_nsec >= 100000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_;
+ } else if (pulse_nsec >= 20000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (pulse_nsec >= 10000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_;
+ } else if (pulse_nsec >= 2000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (pulse_nsec >= 1000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_;
+ } else if (pulse_nsec >= 200000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (pulse_nsec >= 100000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_;
+ } else if (pulse_nsec >= 20000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (pulse_nsec >= 10000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_;
+ } else if (pulse_nsec >= 2000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_;
+ } else if (pulse_nsec >= 1000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_;
+ } else if (pulse_nsec >= 200) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, min is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch), 0);
+
+ /* Configure to pulse every period */
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (event_ch, pulse_width);
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch));
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+
+ /* set the reload to one toggle cycle */
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch),
+ period_nsec);
+
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ start_nsec);
+
+ /* Enable LTC Target Read */
+ val = lan743x_csr_read(adapter, PTP_CMD_CTL);
+ val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_;
+ lan743x_csr_write(adapter, PTP_CMD_CTL, val);
+
+ /* Configure as an push/pull driver */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+
+ /* Select Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+ if (event_ch)
+ /* Channel B as the output */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+ else
+ /* Channel A as the output */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+
+ /* Enables the output of Local Time Target compare events */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ return 0;
+
+failed:
+ lan743x_ptp_io_perout_off(adapter, index);
+ return -ENODEV;
+}
+
+static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ int val;
+
+ extts = &ptp->extts[index];
+ /* PTP Interrupt Enable Clear Register */
+ if (extts->flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_CLR_(index);
+ else
+ val = PTP_INT_EN_RE_EN_CLR_(index);
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR, val);
+
+ /* Disables PTP-IO edge lock */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (extts->flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index);
+ } else {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO De-select register */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* Clear timestamp */
+ memset(&extts->ts, 0, sizeof(struct timespec64));
+ extts->flags = 0;
+}
+
+static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter,
+ u32 flags, u32 channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int val;
+
+ if ((flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp->command_lock);
+ /* PTP-IO Event Capture Enable */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ } else {
+ /* Rising eventing as Default */
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO Select */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ val |= channel << PTP_IO_SEL_SHIFT_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* PTP Interrupt Enable Register */
+ if (flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_SET_(channel);
+ else
+ val = PTP_INT_EN_RE_EN_SET_(channel);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET, val);
+
+ mutex_unlock(&ptp->command_lock);
+
+ return 0;
+}
+
+static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
+ struct ptp_extts_request *extts_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 flags = extts_request->flags;
+ u32 index = extts_request->index;
+ struct lan743x_extts *extts;
+ int extts_pin;
+ int ret = 0;
+
+ extts = &ptp->extts[index];
+
+ if (on) {
+ extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
+ if (extts_pin < 0)
+ return -EBUSY;
+
+ ret = lan743x_ptp_io_event_cap_en(adapter, flags, index);
+ if (!ret)
+ extts->flags = flags;
+ } else {
+ lan743x_ptp_io_extts_off(adapter, index);
+ }
+
+ return ret;
+}
+
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
struct ptp_clock_request *request, int on)
{
@@ -682,11 +1016,19 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
if (request) {
switch (request->type) {
case PTP_CLK_REQ_EXTTS:
+ if (request->extts.index < ptpci->n_ext_ts)
+ return lan743x_ptp_io_extts(adapter, on,
+ &request->extts);
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index < ptpci->n_per_out)
- return lan743x_ptp_perout(adapter, on,
+ if (request->perout.index < ptpci->n_per_out) {
+ if (adapter->is_pci11x1x)
+ return lan743x_ptp_io_perout(adapter, on,
+ &request->perout);
+ else
+ return lan743x_ptp_perout(adapter, on,
&request->perout);
+ }
return -EINVAL;
case PTP_CLK_REQ_PPS:
return -EINVAL;
@@ -707,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
enum ptp_pin_function func,
unsigned int chan)
{
+ struct lan743x_ptp *lan_ptp =
+ container_of(ptp, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(lan_ptp, struct lan743x_adapter, ptp);
int result = 0;
/* Confirm the requested function is supported. Parameter
@@ -717,6 +1063,9 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
case PTP_PF_PEROUT:
break;
case PTP_PF_EXTTS:
+ if (!adapter->is_pci11x1x)
+ result = -1;
+ break;
case PTP_PF_PHYSYNC:
default:
result = -1;
@@ -725,6 +1074,33 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
return result;
}
+static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter,
+ bool fe, u8 channel,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ u32 sec, nsec;
+
+ mutex_lock(&ptp->command_lock);
+ if (fe) {
+ sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X);
+ } else {
+ sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X);
+ }
+
+ mutex_unlock(&ptp->command_lock);
+
+ /* Update Local timestamp */
+ extts = &ptp->extts[channel];
+ extts->ts.tv_sec = sec;
+ extts->ts.tv_nsec = nsec;
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -733,41 +1109,121 @@ static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
container_of(ptp, struct lan743x_adapter, ptp);
u32 cap_info, cause, header, nsec, seconds;
bool new_timestamp_available = false;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ int ptp_int_sts;
int count = 0;
+ int channel;
+ s64 ns;
- while ((count < 100) &&
- (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ while ((count < 100) && ptp_int_sts) {
count++;
- cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
-
- if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
- seconds = lan743x_csr_read(adapter,
- PTP_TX_EGRESS_SEC);
- nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
- cause = (nsec &
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
- header = lan743x_csr_read(adapter,
- PTP_TX_MSG_HEADER);
-
- if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
- nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
- lan743x_ptp_tx_ts_enqueue_ts(adapter,
- seconds, nsec,
- header);
- new_timestamp_available = true;
- } else if (cause ==
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
- netif_err(adapter, drv, adapter->netdev,
- "Auto capture cause not supported\n");
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds,
+ nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
} else {
netif_warn(adapter, drv, adapter->netdev,
- "unknown tx timestamp capture cause\n");
+ "TX TS INT but no TX TS CNT\n");
}
- } else {
- netif_warn(adapter, drv, adapter->netdev,
- "TX TS INT but no TX TS CNT\n");
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_TS_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_FE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_FE_MASK_) >>
+ PTP_INT_IO_FE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ true,
+ channel,
+ &ts);
+ /* PTP Falling Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_FE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear falling event interrupts */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_FE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_FE_MASK_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_RE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_RE_MASK_) >>
+ PTP_INT_IO_RE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ false,
+ channel,
+ &ts);
+ /* PTP Rising Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_RE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear Rising event interrupt */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_RE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_RE_MASK_);
}
- lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
}
if (new_timestamp_available)
@@ -802,6 +1258,28 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
mutex_unlock(&ptp->command_lock);
}
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (sec)
+ (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO);
+
+ if (nsec)
+ (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS);
+
+ if (sub_nsec)
+ (*sub_nsec) =
+ lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns)
{
@@ -815,8 +1293,12 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds += remainder;
@@ -831,8 +1313,13 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x) {
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ } else {
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ }
unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds_step = remainder;
@@ -1061,6 +1548,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
n_pins = LAN7430_N_GPIO;
break;
case ID_REV_ID_LAN7431_:
+ case ID_REV_ID_A011_:
+ case ID_REV_ID_A041_:
n_pins = LAN7431_N_GPIO;
break;
default:
@@ -1088,10 +1577,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
adapter->netdev->dev_addr);
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
- ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS;
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
- ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
@@ -1307,21 +1796,21 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
- index++)
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
false, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ON:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, true);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 7663bf5d2e33..e26d4eff7133 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,6 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+#define LAN743X_PTP_N_EXTTS 4
+#define LAN743X_PTP_N_PPS 0
+#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
struct lan743x_adapter;
@@ -60,6 +63,11 @@ struct lan743x_ptp_perout {
int gpio_pin; /* GPIO pin where output appears */
};
+struct lan743x_extts {
+ int flags;
+ struct timespec64 ts;
+};
+
struct lan743x_ptp {
int flags;
@@ -72,6 +80,8 @@ struct lan743x_ptp {
unsigned long used_event_ch;
struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
+ int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */
+ struct lan743x_extts extts[LAN743X_PTP_N_EXTTS];
bool leds_multiplexed;
bool led_enabled[LAN7430_N_LED];
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index ac273f84b69e..49e1464a4313 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -1,8 +1,10 @@
config LAN966X_SWITCH
tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
+ depends on BRIDGE || BRIDGE=n
select PHYLINK
select PACKING
help
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 040cfff9f577..962f7c5f9e7d 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -7,4 +7,8 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
- lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
+ lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
new file mode 100644
index 000000000000..70cbbf8d2b67
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 cir, cbs;
+ u8 se_idx;
+
+ /* Check for invalid values */
+ if (qopt->idleslope <= 0 ||
+ qopt->sendslope >= 0 ||
+ qopt->locredit >= qopt->hicredit)
+ return -EINVAL;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+ cir = qopt->idleslope;
+ cbs = (qopt->idleslope - qopt->sendslope) *
+ (qopt->hicredit - qopt->locredit) /
+ -qopt->sendslope;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 se_idx;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 614f12c2fe6a..fea42542be28 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -545,6 +545,39 @@ static int lan966x_set_pauseparam(struct net_device *dev,
return phylink_ethtool_set_pauseparam(port->phylink, pause);
}
+static int lan966x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ if (!lan966x->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops lan966x_ethtool_ops = {
.get_link_ksettings = lan966x_get_link_ksettings,
.set_link_ksettings = lan966x_set_link_ksettings,
@@ -556,6 +589,7 @@ const struct ethtool_ops lan966x_ethtool_ops = {
.get_eth_mac_stats = lan966x_get_eth_mac_stats,
.get_rmon_stats = lan966x_get_eth_rmon_stats,
.get_link = ethtool_op_get_link,
+ .get_ts_info = lan966x_get_ts_info,
};
static void lan966x_check_stats_work(struct work_struct *work)
@@ -622,7 +656,15 @@ void lan966x_stats_get(struct net_device *dev,
stats->rx_dropped = dev->stats.rx_dropped +
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
- lan966x->stats[idx + SYS_COUNT_DR_TAIL];
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7];
for (i = 0; i < LAN966X_NUM_TC; i++) {
stats->rx_dropped +=
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
new file mode 100644
index 000000000000..8310d3f35404
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define DWRR_COST_BIT_WIDTH BIT(5)
+
+static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight)
+{
+ u32 res;
+
+ /* Round half up: Multiply with 16 before division,
+ * add 8 and divide result with 16 again
+ */
+ res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4;
+ return max_t(u32, 1, res) - 1;
+}
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params;
+ struct lan966x *lan966x = port->lan966x;
+ u32 w_min = 100;
+ u8 count = 0;
+ u32 se_idx;
+ u8 i;
+
+ /* Check the input */
+ if (qopt->parent != TC_H_ROOT)
+ return -EINVAL;
+
+ params = &qopt->replace_params;
+ if (params->bands != NUM_PRIO_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < params->bands; ++i) {
+ /* In the switch the DWRR is always on the lowest consecutive
+ * priorities. Due to this, the first priority must map to the
+ * first DWRR band.
+ */
+ if (params->priomap[i] != (7 - i))
+ return -EINVAL;
+
+ if (params->quanta[i] && params->weights[i] == 0)
+ return -EINVAL;
+ }
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ /* Find minimum weight */
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ ++count;
+
+ lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]),
+ lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i));
+ }
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 se_idx;
+ int i;
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ for (i = 0; i < NUM_PRIO_QUEUES; ++i)
+ lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i));
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
index da5ca7188679..2ea263e893ee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -8,6 +8,7 @@ struct lan966x_fdb_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct net_device *orig_dev;
struct lan966x *lan966x;
unsigned long event;
};
@@ -127,75 +128,119 @@ void lan966x_fdb_deinit(struct lan966x *lan966x)
lan966x_fdb_purge_entries(lan966x);
}
-static void lan966x_fdb_event_work(struct work_struct *work)
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
+{
+ flush_workqueue(lan966x->fdb_work);
+}
+
+static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
{
- struct lan966x_fdb_event_work *fdb_work =
- container_of(work, struct lan966x_fdb_event_work, work);
struct switchdev_notifier_fdb_info *fdb_info;
- struct net_device *dev = fdb_work->dev;
struct lan966x_port *port;
struct lan966x *lan966x;
- int ret;
- fdb_info = &fdb_work->fdb_info;
lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->orig_dev);
+ fdb_info = &fdb_work->fdb_info;
- if (lan966x_netdevice_check(dev)) {
- port = netdev_priv(dev);
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x *lan966x;
+ int ret;
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
- fdb_info->vid);
+ lan966x = fdb_work->lan966x;
+ fdb_info = &fdb_work->fdb_info;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_del_entry(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- }
- } else {
- if (!netif_is_bridge_master(dev))
- goto out;
-
- /* In case the bridge is called */
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- /* If there is no front port in this vlan, there is no
- * point to copy the frame to CPU because it would be
- * just dropped at later point. So add it only if
- * there is a port but it is required to store the fdb
- * entry for later point when a port actually gets in
- * the vlan.
- */
- lan966x_fdb_add_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
+ return;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- ret = lan966x_fdb_del_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- if (ret)
- lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
- fdb_info->vid);
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- }
+ lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
+ break;
}
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+
+ if (lan966x_netdevice_check(fdb_work->orig_dev))
+ lan966x_fdb_port_event_work(fdb_work);
+ else if (netif_is_bridge_master(fdb_work->orig_dev))
+ lan966x_fdb_bridge_event_work(fdb_work);
+ else if (netif_is_lag_master(fdb_work->orig_dev))
+ lan966x_fdb_lag_event_work(fdb_work);
-out:
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
- dev_put(dev);
}
int lan966x_handle_fdb(struct net_device *dev,
@@ -221,7 +266,8 @@ int lan966x_handle_fdb(struct net_device *dev,
if (!fdb_work)
return -ENOMEM;
- fdb_work->dev = orig_dev;
+ fdb_work->dev = dev;
+ fdb_work->orig_dev = orig_dev;
fdb_work->lan966x = lan966x;
fdb_work->event = event;
INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
@@ -231,7 +277,6 @@ int lan966x_handle_fdb(struct net_device *dev,
goto err_addr_alloc;
ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
- dev_hold(orig_dev);
queue_work(lan966x->fdb_work, &fdb_work->work);
break;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 000000000000..e6948939ccc2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ page = dev_alloc_pages(rx->page_order);
+ if (unlikely(!page))
+ return NULL;
+
+ dma_addr = dma_map_page(lan966x->dev, page, 0,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
+ goto free_page;
+
+ db->dataptr = dma_addr;
+
+ return page;
+
+free_page:
+ __free_pages(page, rx->page_order);
+ return NULL;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ dma_unmap_single(lan966x->dev,
+ (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rx->page[i][j], rx->page_order);
+ }
+ }
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+ tx->last_in_use = -1;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+
+ dcb_buf->used = false;
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->skb->len,
+ DMA_TO_DEVICE);
+ if (!dcb_buf->ptp)
+ dev_kfree_skb_any(dcb_buf->skb);
+
+ clear = true;
+ }
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u64 src_port, timestamp;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+
+ dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto unmap_page;
+
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_src_port(skb->data, &src_port);
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ if (WARN_ON(src_port >= lan966x->num_phys_ports))
+ goto free_skb;
+
+ dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_skb:
+ kfree_skb(skb);
+unmap_page:
+ dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __free_pages(page, rx->page_order);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ skb = lan966x_fdma_rx_get_frame(rx);
+
+ rx->page[rx->dcb_index][rx->db_index] = NULL;
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+
+ if (!skb)
+ break;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ counter++;
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx_dcb *next_dcb, *dcb;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_db *next_db;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN * sizeof(u32));
+ memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->skb = skb;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+ int mtu;
+
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ dma_addr_t rx_dma;
+ void *rx_dcbs;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.dma = rx_dma;
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+ int err;
+ u32 val;
+
+ max_mtu = lan966x_fdma_get_max_mtu(lan966x);
+ max_mtu += IFH_LEN * sizeof(u32);
+ max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu += VLAN_HLEN * 2;
+
+ if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
+ lan966x->rx.page_order)
+ return 0;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
new file mode 100644
index 000000000000..41fa2523d91d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+{
+ u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
+ int p, lag, i;
+
+ /* Reset destination and aggregation PGIDS */
+ for (p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(ANA_PGID_PGID_SET(BIT(p)),
+ lan966x, ANA_PGID(p));
+
+ for (p = PGID_AGGR; p < PGID_SRC; ++p)
+ lan_wr(ANA_PGID_PGID_SET(visited),
+ lan966x, ANA_PGID(p));
+
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ visited &= ~BIT(p);
+ }
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+ struct net_device *bond = lan966x->ports[lag]->bond;
+ int num_active_ports = 0;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+ if (!bond || (visited & BIT(lag)))
+ continue;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+ aggr_idx[num_active_ports++] = p;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; ++i) {
+ u32 ac;
+
+ ac = lan_rd(lan966x, ANA_PGID(i));
+ ac &= ~bond_mask;
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
+ lan_wr(ANA_PGID_PGID_SET(ac),
+ lan966x, ANA_PGID(i));
+ }
+
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (p = lag; p < lan966x->num_phys_ports; p++) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ visited |= BIT(p);
+ }
+ }
+}
+
+static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ u32 bond_mask;
+ u32 lag_id;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ lag_id = port->chip_port;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ }
+}
+
+static void lan966x_lag_update_ids(struct lan966x *lan966x)
+{
+ lan966x_lag_set_port_ids(lan966x);
+ lan966x_update_fwd_mask(lan966x);
+ lan966x_lag_set_aggr_pgids(lan966x);
+}
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ u32 lag_id = -1;
+ u32 bond_mask;
+ int err;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ port->bond = bond;
+ lan966x_lag_update_ids(lan966x);
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto out;
+
+ lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
+
+ if (lan966x_lag_first_port(port->bond, port->dev) &&
+ lag_id != -1)
+ lan966x_mac_lag_replace_port_entry(lan966x,
+ lan966x->ports[lag_id],
+ port);
+
+ return 0;
+
+out:
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+
+ return err;
+}
+
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 bond_mask;
+ u32 lag_id;
+
+ if (lan966x_lag_first_port(port->bond, port->dev)) {
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ bond_mask &= ~BIT(port->chip_port);
+ if (bond_mask) {
+ lag_id = __ffs(bond_mask);
+ lan966x_mac_lag_replace_port_entry(lan966x, port,
+ lan966x->ports[lag_id]);
+ } else {
+ lan966x_mac_lag_remove_port_entry(lan966x, port);
+ }
+ }
+
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+ lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
+}
+
+static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
+ enum netdev_lag_hash hash_type)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ if (port->hash_type != hash_type)
+ return false;
+ }
+
+ return true;
+}
+
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct netdev_lag_upper_info *lui;
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ lui = info->upper_info;
+ if (!lui) {
+ port->hash_type = NETDEV_LAG_HASH_NONE;
+ return NOTIFY_DONE;
+ }
+
+ if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported Tx type");
+ return -EINVAL;
+ }
+
+ if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG devices can have only the same hash_type");
+ return -EINVAL;
+ }
+
+ switch (lui->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L34:
+ lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L23:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported hash type");
+ return -EINVAL;
+ }
+
+ port->hash_type = lui->hash_type;
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag = info->lower_state_info;
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool is_active;
+
+ if (!port->bond)
+ return NOTIFY_DONE;
+
+ is_active = lag->link_up && lag->tx_enabled;
+ if (port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ port->lag_tx_active = is_active;
+ lan966x_lag_set_aggr_pgids(lan966x);
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_prechangeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_changeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long bond_mask;
+
+ if (port->bond != lag)
+ return false;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, lag);
+ if (bond_mask && port->chip_port == __ffs(bond_mask))
+ return true;
+
+ return false;
+}
+
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
+{
+ struct lan966x_port *port;
+ u32 mask = 0;
+ int p;
+
+ if (!bond)
+ return mask;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ mask |= BIT(p);
+ }
+
+ return mask;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ca5f1177963d..baa3a30c039f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -22,6 +22,7 @@ struct lan966x_mac_entry {
u16 vid;
u16 port_index;
int row;
+ bool lag;
};
struct lan966x_mac_raw_entry {
@@ -40,11 +41,12 @@ static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
{
u32 val;
- return readx_poll_timeout(lan966x_mac_get_status,
- lan966x, val,
- (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
- MACACCESS_CMD_IDLE,
- TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
}
static void lan966x_mac_select(struct lan966x *lan966x,
@@ -68,12 +70,14 @@ static void lan966x_mac_select(struct lan966x *lan966x,
lan_wr(mach, lan966x, ANA_MACHDATA);
}
-static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
- bool cpu_copy,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
+ lockdep_assert_held(&lan966x->mac_lock);
+
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
@@ -88,6 +92,21 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
return lan966x_mac_wait_for_completion(lan966x);
}
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
/* The mask of the front ports is encoded inside the mac parameter via a call
* to lan966x_mdb_encode_mac().
*/
@@ -112,11 +131,23 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
-int lan966x_mac_forget(struct lan966x *lan966x,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
+}
+
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lockdep_assert_held(&lan966x->mac_lock);
+
lan966x_mac_select(lan966x, mac, vid);
/* Issue a forget command */
@@ -127,6 +158,20 @@ int lan966x_mac_forget(struct lan966x *lan966x,
return lan966x_mac_wait_for_completion(lan966x);
}
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
{
return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
@@ -155,19 +200,21 @@ void lan966x_mac_init(struct lan966x *lan966x)
INIT_LIST_HEAD(&lan966x->mac_entries);
}
-static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
- u16 vid, u16 port_index)
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
+ const unsigned char *mac,
+ u16 vid)
{
struct lan966x_mac_entry *mac_entry;
- mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
if (!mac_entry)
return NULL;
memcpy(mac_entry->mac, mac, ETH_ALEN);
mac_entry->vid = vid;
- mac_entry->port_index = port_index;
+ mac_entry->port_index = port->chip_port;
mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ mac_entry->lag = port->bond ? true : false;
return mac_entry;
}
@@ -178,7 +225,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
struct lan966x_mac_entry *res = NULL;
struct lan966x_mac_entry *mac_entry;
- spin_lock(&lan966x->mac_lock);
list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
if (mac_entry->vid == vid &&
ether_addr_equal(mac, mac_entry->mac) &&
@@ -187,7 +233,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
break;
}
}
- spin_unlock(&lan966x->mac_lock);
return res;
}
@@ -230,8 +275,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
{
struct lan966x_mac_entry *mac_entry;
- if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ spin_lock(&lan966x->mac_lock);
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+ spin_unlock(&lan966x->mac_lock);
return 0;
+ }
/* In case the entry already exists, don't add it again to SW,
* just update HW, but we need to look in the actual HW because
@@ -240,20 +288,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
* add the entry but without the extern_learn flag.
*/
mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
- if (mac_entry)
- return lan966x_mac_learn(lan966x, port->chip_port,
- addr, vid, ENTRYTYPE_LOCKED);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ goto mac_learn;
+ }
- mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
- if (!mac_entry)
+ mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
+ }
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
+ port->bond ?: port->dev);
+
+mac_learn:
lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
- lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
return 0;
}
@@ -268,8 +321,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
list) {
if (mac_entry->vid == vid &&
ether_addr_equal(addr, mac_entry->mac)) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -280,6 +334,50 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
return 0;
}
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ lan966x_mac_learn_locked(lan966x, dst->chip_port,
+ mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+ mac_entry->port_index = dst->chip_port;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
void lan966x_mac_purge_entries(struct lan966x *lan966x)
{
struct lan966x_mac_entry *mac_entry, *tmp;
@@ -287,8 +385,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x)
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
list) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid, ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -324,10 +422,14 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
{
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
+ struct list_head mac_deleted_entries;
+ struct lan966x_port *port;
u32 dest_idx;
u32 column;
u16 vid;
+ INIT_LIST_HEAD(&mac_deleted_entries);
+
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
bool found = false;
@@ -345,7 +447,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
/* If the entry in SW is found, then there is nothing
* to do
@@ -360,20 +463,27 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
}
if (!found) {
- /* Notify the bridge that the entry doesn't exist
- * anymore in the HW and remove the entry from the SW
- * list
- */
- lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- mac_entry->mac, mac_entry->vid,
- lan966x->ports[mac_entry->port_index]->dev);
-
list_del(&mac_entry->list);
- kfree(mac_entry);
+ /* Move the entry from SW list to a tmp list such that
+ * it would be deleted later
+ */
+ list_add_tail(&mac_entry->list, &mac_deleted_entries);
}
}
spin_unlock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW
+ */
+ port = lan966x->ports[mac_entry->port_index];
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ port->bond ?: port->dev);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
/* Now go to the list of columns and see if any entry was not in the SW
* list, then that means that the entry is new so it needs to notify the
* bridge.
@@ -391,20 +501,29 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
+
+ spin_lock(&lan966x->mac_lock);
+ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ continue;
+ }
- mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
- if (!mac_entry)
+ port = lan966x->ports[dest_idx];
+ mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return;
+ }
mac_entry->row = row;
-
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- mac, vid, lan966x->ports[dest_idx]->dev);
+ mac, vid, port->bond ?: port->dev);
}
}
@@ -421,6 +540,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
lan966x, ANA_MACTINDX);
while (1) {
+ spin_lock(&lan966x->mac_lock);
lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
ANA_MACACCESS_MAC_TABLE_CMD,
lan966x, ANA_MACACCESS);
@@ -444,12 +564,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
stop = false;
if (column == LAN966X_MAC_COLUMNS - 1 &&
- index == 0 && stop)
+ index == 0 && stop) {
+ spin_unlock(&lan966x->mac_lock);
break;
+ }
entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+ spin_unlock(&lan966x->mac_lock);
/* Once all the columns are read process them */
if (column == LAN966X_MAC_COLUMNS - 1) {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2cb70da63db3..20ee5b28f70a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -4,11 +4,13 @@
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
+#include <linux/ip.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/packing.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <net/addrconf.h>
#include "lan966x_main.h"
@@ -22,9 +24,6 @@
#define XTR_NOT_READY 0x07000080U
#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
-#define READL_SLEEP_US 10
-#define READL_TIMEOUT_US 100000000
-
#define IO_RANGES 2
static const struct of_device_id lan966x_match[] = {
@@ -41,9 +40,11 @@ struct lan966x_main_io_resource {
static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
{ TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
{ TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
{ TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
@@ -100,6 +101,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
return 0;
}
+static bool lan966x_port_unique_address(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port || port->dev == dev)
+ continue;
+
+ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
+ return false;
+ }
+
+ return true;
+}
+
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
{
struct lan966x_port *port = netdev_priv(dev);
@@ -107,16 +126,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p;
int ret;
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
/* Learn the new net device MAC address in the mac table. */
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
if (ret)
return ret;
+ /* If there is another port with the same address as the dev, then don't
+ * delete it from the MAC table
+ */
+ if (!lan966x_port_unique_address(dev))
+ goto out;
+
/* Then forget the previous one. */
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
if (ret)
return ret;
+out:
eth_hw_addr_set(dev, addr->sa_data);
return ret;
}
@@ -182,9 +211,12 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
- return readx_poll_timeout(lan966x_port_inj_status, lan966x, val,
- QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
- READL_SLEEP_US, READL_TIMEOUT_US);
+ if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+ return 0;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
}
static int lan966x_port_ifh_xmit(struct sk_buff *skb,
@@ -201,7 +233,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
val = lan_rd(lan966x, QS_INJ_STATUS);
if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
(QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
- return NETDEV_TX_BUSY;
+ goto err;
/* Write start of frame */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
@@ -213,7 +245,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
}
@@ -225,7 +257,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
}
@@ -235,7 +267,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(0, lan966x, QS_INJ_WR(grp));
++i;
@@ -255,8 +287,19 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
+
+err:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ return NETDEV_TX_BUSY;
}
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
@@ -289,10 +332,25 @@ static void lan966x_ifh_set_vid(void *ifh, u64 vid)
IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
}
-static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+{
+ packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1,
+ IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+{
+ packing(ifh, &timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
+}
+
+static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
__be32 ifh[IFH_LEN];
+ int err;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
@@ -302,19 +360,47 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
- return lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ err = lan966x_ptp_txtstamp_request(port, skb);
+ if (err)
+ return err;
+
+ lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ }
+
+ spin_lock(&lan966x->tx_lock);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ spin_unlock(&lan966x->tx_lock);
+
+ return err;
}
static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
- lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
dev->mtu = new_mtu;
- return 0;
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
}
static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
@@ -350,6 +436,26 @@ static int lan966x_port_get_parent_id(struct net_device *dev,
return 0;
}
+static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return lan966x_ptp_hwtstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return lan966x_ptp_hwtstamp_get(port, ifr);
+ }
+ }
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_open = lan966x_port_open,
.ndo_stop = lan966x_port_stop,
@@ -360,6 +466,8 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_stats64 = lan966x_stats_get,
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
+ .ndo_eth_ioctl = lan966x_port_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -367,6 +475,38 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
+{
+ u32 val;
+
+ /* The IGMP and MLD frames are not forward by the HW if
+ * multicast snooping is enabled, therefor don't mark as
+ * offload to allow the SW to forward the frames accordingly.
+ */
+ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
+ if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
+ return true;
+
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_IGMP)
+ return false;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
+ !ipv6_mc_check_mld(skb))
+ return false;
+
+ return true;
+}
+
static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
{
return lan_rd(lan966x, QS_XTR_RD(grp));
@@ -422,7 +562,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
}
}
-static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
@@ -434,6 +574,12 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+{
+ packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
+}
+
static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
{
struct lan966x *lan966x = args;
@@ -443,10 +589,10 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
return IRQ_NONE;
do {
+ u64 src_port, len, timestamp;
struct net_device *dev;
struct sk_buff *skb;
int sz = 0, buf_len;
- u64 src_port, len;
u32 ifh[IFH_LEN];
u32 *buf;
u32 val;
@@ -461,6 +607,7 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
lan966x_ifh_get_src_port(ifh, &src_port);
lan966x_ifh_get_len(ifh, &len);
+ lan966x_ifh_get_timestamp(ifh, &timestamp);
WARN_ON(src_port >= lan966x->num_phys_ports);
@@ -501,12 +648,20 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
*buf = val;
}
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
skb->protocol = eth_type_trans(skb, dev);
- if (lan966x->bridge_mask & BIT(src_port))
+ if (lan966x->bridge_mask & BIT(src_port)) {
skb->offload_fwd_mark = 1;
- netif_rx_ni(skb);
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
@@ -539,6 +694,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (port->dev)
unregister_netdev(port->dev);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
if (port->phylink) {
rtnl_lock();
lan966x_port_stop(port->dev);
@@ -554,10 +712,19 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
+
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
+ if (lan966x->ptp_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
@@ -573,7 +740,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
return -EINVAL;
dev = devm_alloc_etherdev_mqs(lan966x->dev,
- sizeof(struct lan966x_port), 8, 1);
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
if (!dev)
return -ENOMEM;
@@ -589,7 +757,9 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -605,6 +775,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
@@ -613,6 +784,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QUSGMII,
+ port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
@@ -628,7 +801,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
}
port->phylink = phylink;
- phylink_set_pcs(phylink, &port->phylink_pcs);
err = register_netdev(dev);
if (err) {
@@ -687,12 +859,12 @@ static void lan966x_init(struct lan966x *lan966x)
/* Do byte-swap and expect status after last data word
* Extraction: Mode: manual extraction) | Byte_swap
*/
- lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_XTR_GRP_CFG(0));
/* Injection: Mode: manual injection | Byte_swap */
- lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_INJ_GRP_CFG(0));
@@ -708,7 +880,7 @@ static void lan966x_init(struct lan966x *lan966x)
/* Setup flooding PGIDs */
lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
- ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
lan966x, ANA_FLOODING_IPMC);
@@ -770,6 +942,10 @@ static void lan966x_init(struct lan966x *lan966x)
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MCIPV4));
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV6));
+
/* Unicast to all other ports */
lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
ANA_PGID_PGID,
@@ -786,6 +962,10 @@ static void lan966x_init(struct lan966x *lan966x)
lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
ANA_ANAINTR_INTR_ENA,
lan966x, ANA_ANAINTR);
+
+ spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
}
static int lan966x_ram_init(struct lan966x *lan966x)
@@ -795,22 +975,17 @@ static int lan966x_ram_init(struct lan966x *lan966x)
static int lan966x_reset_switch(struct lan966x *lan966x)
{
- struct reset_control *switch_reset, *phy_reset;
+ struct reset_control *switch_reset;
int val = 0;
int ret;
- switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
+ "switch");
if (IS_ERR(switch_reset))
return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
"Could not obtain switch reset");
- phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
- if (IS_ERR(phy_reset))
- return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
- "Could not obtain phy reset\n");
-
reset_control_reset(switch_reset);
- reset_control_reset(phy_reset);
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
@@ -830,7 +1005,7 @@ static int lan966x_probe(struct platform_device *pdev)
struct fwnode_handle *ports, *portnp;
struct lan966x *lan966x;
u8 mac_addr[ETH_ALEN];
- int err, i;
+ int err;
lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
if (!lan966x)
@@ -861,11 +1036,7 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
return dev_err_probe(&pdev->dev, err, "Reset failed");
- i = 0;
- fwnode_for_each_available_child_node(ports, portnp)
- ++i;
-
- lan966x->num_phys_ports = i;
+ lan966x->num_phys_ports = NUM_PHYS_PORTS;
lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
sizeof(struct lan966x_port *),
GFP_KERNEL);
@@ -889,7 +1060,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
@@ -897,6 +1068,42 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
}
+ lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
+ if (lan966x->ptp_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
+ lan966x_ptp_irq_handler, IRQF_ONESHOT,
+ "ptp irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
+
+ lan966x->ptp = 1;
+ }
+
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -920,8 +1127,13 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
- if (!IS_ERR(serdes))
- lan966x->ports[p]->serdes = serdes;
+ if (PTR_ERR(serdes) == -ENODEV)
+ serdes = NULL;
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ goto cleanup_ports;
+ }
+ lan966x->ports[p]->serdes = serdes;
lan966x_port_init(lan966x->ports[p]);
}
@@ -931,8 +1143,22 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_ports;
+ err = lan966x_ptp_init(lan966x);
+ if (err)
+ goto cleanup_fdb;
+
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
return 0;
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
+cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
cleanup_ports:
fwnode_handle_put(portnp);
@@ -949,6 +1175,8 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_taprio_deinit(lan966x);
+ lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
@@ -958,6 +1186,7 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
lan966x_fdb_deinit(lan966x);
+ lan966x_ptp_deinit(lan966x);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 99c6d0a9f946..4ec33999e4df 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -8,6 +8,9 @@
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
@@ -16,10 +19,15 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
#define LAN966X_BUFFER_CELL_SZ 64
#define LAN966X_BUFFER_MEMORY (160 * 1024)
#define LAN966X_BUFFER_MIN_SZ 60
+#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN)
+
#define PGID_AGGR 64
#define PGID_SRC 80
#define PGID_ENTRIES 89
@@ -30,7 +38,9 @@
/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
#define QSYS_Q_RSRV 95
+#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
/* Reserved PGIDs */
#define PGID_CPU (PGID_AGGR - 6)
@@ -50,6 +60,33 @@
#define LAN966X_SPEED_100 2
#define LAN966X_SPEED_10 3
+#define LAN966X_PHC_COUNT 3
+#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
+#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
+#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -65,11 +102,107 @@ enum macaccess_entry_type {
struct lan966x_port;
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ u8 channel_id;
+};
+
+struct lan966x_tx_dcb_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
struct lan966x_stat_layout {
u32 offset;
char name[ETH_GSTRING_LEN];
};
+struct lan966x_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
+ struct hwtstamp_config hwtstamp_config;
+ struct lan966x *lan966x;
+ u8 index;
+};
+
+struct lan966x_skb_cb {
+ u8 rew_op;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10)
+#define LAN966X_SKB_CB(skb) \
+ ((struct lan966x_skb_cb *)((skb)->cb))
+
struct lan966x {
struct device *dev;
@@ -82,6 +215,8 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
+ spinlock_t tx_lock; /* lock for frame transmition */
+
struct net_device *bridge;
u16 bridge_mask;
u16 bridge_fwd_mask;
@@ -105,6 +240,9 @@ struct lan966x {
/* interrupts */
int xtr_irq;
int ana_irq;
+ int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -113,6 +251,26 @@ struct lan966x {
/* mdb */
struct list_head mdb_entries;
struct list_head pgid_entries;
+
+ /* ptp */
+ bool ptp;
+ struct lan966x_phc phc[LAN966X_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
+
+ /* Mirror */
+ struct lan966x_port *mirror_monitor;
+ u32 mirror_mask[2];
+ u32 mirror_count;
};
struct lan966x_port_config {
@@ -125,6 +283,15 @@ struct lan966x_port_config {
bool autoneg;
};
+struct lan966x_port_tc {
+ bool ingress_shared_block;
+ unsigned long police_id;
+ unsigned long ingress_mirror_id;
+ unsigned long egress_mirror_id;
+ struct flow_stats police_stat;
+ struct flow_stats mirror_stat;
+};
+
struct lan966x_port {
struct net_device *dev;
struct lan966x *lan966x;
@@ -135,6 +302,7 @@ struct lan966x_port {
bool vlan_aware;
bool learn_ena;
+ bool mcast_ena;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
@@ -142,17 +310,34 @@ struct lan966x_port {
struct phylink *phylink;
struct phy *serdes;
struct fwnode_handle *fwnode;
+
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
+
+ struct net_device *bond;
+ bool lag_tx_active;
+ enum netdev_lag_hash hash_type;
+
+ struct lan966x_port_tc tc;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
+extern struct notifier_block lan966x_switchdev_nb __read_mostly;
+extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
bool lan966x_netdevice_check(const struct net_device *dev);
void lan966x_register_notifier_blocks(void);
void lan966x_unregister_notifier_blocks(void);
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
@@ -190,6 +375,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x,
struct lan966x_port *port,
const unsigned char *addr,
u16 vid);
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst);
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src);
void lan966x_mac_purge_entries(struct lan966x *lan966x);
irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
@@ -214,6 +404,7 @@ void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
int lan966x_fdb_init(struct lan966x *lan966x);
void lan966x_fdb_deinit(struct lan966x *lan966x);
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
int lan966x_handle_fdb(struct net_device *dev,
struct net_device *orig_dev,
unsigned long event, const void *ctx,
@@ -227,6 +418,114 @@ int lan966x_handle_port_mdb_del(struct lan966x_port *port,
const struct switchdev_obj *obj);
void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_clear_entries(struct lan966x *lan966x);
+void lan966x_mdb_restore_entries(struct lan966x *lan966x);
+
+int lan966x_ptp_init(struct lan966x *lan966x);
+void lan966x_ptp_deinit(struct lan966x *lan966x);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr);
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp);
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb);
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb);
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack);
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info);
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t);
+void lan966x_update_fwd_mask(struct lan966x *lan966x);
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress);
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack);
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats);
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
index c68d0a99d292..2af55268bf4d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -504,3 +504,48 @@ void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
}
}
+
+void lan966x_mdb_clear_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ /* Remove just the MAC entry, still keep the PGID in case of L2
+ * entries because this can be restored at later point
+ */
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+}
+
+void lan966x_mdb_restore_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) {
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mac_ip_learn(lan966x, cpu_copy, mac,
+ mdb_entry->vid, type);
+ } else {
+ lan966x_mac_learn(lan966x, mdb_entry->pgid->index,
+ mdb_entry->mac,
+ mdb_entry->vid, type);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
new file mode 100644
index 000000000000..7e1ba3f40c35
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_port *monitor_port;
+
+ if (!lan966x_netdevice_check(action->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an lan966x port");
+ return -EOPNOTSUPP;
+ }
+
+ monitor_port = netdev_priv(action->dev);
+
+ if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror already exists");
+ return -EEXIST;
+ }
+
+ if (lan966x->mirror_monitor &&
+ lan966x->mirror_monitor != monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change mirror port while in use");
+ return -EBUSY;
+ }
+
+ if (port == monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mirror the monitor port");
+ return -EINVAL;
+ }
+
+ lan966x->mirror_mask[ingress] |= BIT(port->chip_port);
+
+ lan966x->mirror_monitor = monitor_port;
+ lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count++;
+
+ if (ingress)
+ port->tc.ingress_mirror_id = mirror_id;
+ else
+ port->tc.egress_mirror_id = mirror_id;
+
+ return 0;
+}
+
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "There is no mirroring for this port");
+ return -ENOENT;
+ }
+
+ lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count--;
+
+ if (lan966x->mirror_count == 0) {
+ lan966x->mirror_monitor = NULL;
+ lan_wr(0, lan966x, ANA_MIRRORPORTS);
+ }
+
+ if (ingress)
+ port->tc.ingress_mirror_id = 0;
+ else
+ port->tc.egress_mirror_id = 0;
+
+ return 0;
+}
+
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.mirror_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ if (ingress) {
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(stats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
new file mode 100644
index 000000000000..7fa76e74f9e2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d traffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index b66a9aa00ea4..e4ac59480514 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -9,6 +9,14 @@
#include "lan966x_main.h"
+static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void lan966x_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -20,11 +28,12 @@ static int lan966x_phylink_mac_prepare(struct phylink_config *config,
phy_interface_t iface)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ phy_interface_t serdes_mode = iface;
int err;
if (port->serdes) {
err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- iface);
+ serdes_mode);
if (err) {
netdev_err(to_net_dev(config->dev),
"Could not set mode of SerDes\n");
@@ -51,6 +60,9 @@ static void lan966x_phylink_mac_link_up(struct phylink_config *config,
port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+ if (phy_interface_mode_is_rgmii(interface))
+ phy_set_speed(port->serdes, speed);
+
lan966x_port_config_up(port);
}
@@ -114,6 +126,7 @@ static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
const struct phylink_mac_ops lan966x_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = lan966x_phylink_mac_select,
.mac_config = lan966x_phylink_mac_config,
.mac_prepare = lan966x_phylink_mac_prepare,
.mac_link_down = lan966x_phylink_mac_link_down,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
new file mode 100644
index 000000000000..a9aec900d608
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+/* 0-8 : 9 port policers */
+#define POL_IDX_PORT 0
+
+/* Policer order: Serial (QoS -> Port -> VCAP) */
+#define POL_ORDER 0x1d3
+
+struct lan966x_tc_policer {
+ /* kilobit per second */
+ u32 rate;
+ /* bytes */
+ u32 burst;
+};
+
+static int lan966x_police_add(struct lan966x_port *port,
+ struct lan966x_tc_policer *pol,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Rate unit is 33 1/3 kpps */
+ pol->rate = DIV_ROUND_UP(pol->rate * 3, 100);
+ /* Avoid zero burst size */
+ pol->burst = pol->burst ?: 1;
+ /* Unit is 4kB */
+ pol->burst = DIV_ROUND_UP(pol->burst, 4096);
+
+ if (pol->rate > GENMASK(15, 0) ||
+ pol->burst > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(1) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_del(struct lan966x_port *port,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(2) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(0),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_validate(struct lan966x_port *port,
+ const struct flow_action *action,
+ const struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.ingress_shared_block) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on shared ingress blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct rtnl_link_stats64 new_stats;
+ struct lan966x_tc_policer pol;
+ struct flow_stats *old_stats;
+ int err;
+
+ err = lan966x_police_validate(port, action, act, police_id, ingress,
+ extack);
+ if (err)
+ return err;
+
+ memset(&pol, 0, sizeof(pol));
+
+ pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = act->police.burst;
+
+ err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = police_id;
+
+ /* Setup initial stats */
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+
+ return 0;
+}
+
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ if (port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid policer id");
+ return -EINVAL;
+ }
+
+ err = lan966x_police_del(port, port->tc.police_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = 0;
+
+ return 0;
+}
+
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 237555845a52..1a61c6cdb077 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -165,10 +165,12 @@ static void lan966x_port_link_up(struct lan966x_port *port)
break;
}
+ lan966x_taprio_speed_set(port, config->speed);
+
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
* port speed for QSGMII ports.
*/
- if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ if (phy_interface_num_ports(config->portmode) == 4)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
@@ -331,10 +333,14 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
bool inband_aneg = false;
bool outband;
+ bool full_preamble = false;
+
+ if (config->portmode == PHY_INTERFACE_MODE_QUSGMII)
+ full_preamble = true;
if (config->inband) {
if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
- config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ phy_interface_num_ports(config->portmode) == 4)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
config->autoneg)
@@ -345,9 +351,15 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
outband = true;
}
- /* Disable or enable inband */
- lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
- DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ /* Disable or enable inband.
+ * For QUSGMII, we rely on the preamble to transmit data such as
+ * timestamps, therefore force full preamble transmission, and prevent
+ * premable shortening
+ */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA,
lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
/* Enable PCS */
@@ -393,7 +405,10 @@ void lan966x_port_init(struct lan966x_port *port)
lan966x_port_config_down(port);
- if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
+ if (phy_interface_num_ports(config->portmode) != 4)
return;
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
new file mode 100644
index 000000000000..e5a2bbe064f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ptp_classify.h>
+
+#include "lan966x_main.h"
+
+#define LAN966X_MAX_PTP_ID 512
+
+/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
+ */
+#define LAN966X_1PPM_FORMAT 3480517749723LL
+
+/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
+ */
+#define LAN966X_1PPB_FORMAT 3480517749LL
+
+#define TOD_ACC_PIN 0x7
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 lan966x_ptp_get_nominal_value(void)
+{
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
+}
+
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct hwtstamp_config cfg;
+ struct lan966x_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+ if (lan966x->bridge_mask & BIT(port->chip_port))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&lan966x->ptp_lock);
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&lan966x->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP)
+ return IFH_REW_OP_NOOP;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return IFH_REW_OP_NOOP;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return IFH_REW_OP_NOOP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ return IFH_REW_OP_TWO_STEP_PTP;
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0)
+ return IFH_REW_OP_ONE_STEP_PTP;
+
+ return IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+ u8 rew_op;
+
+ rew_op = lan966x_ptp_classify(port, skb);
+ LAN966X_SKB_CB(skb)->rew_op = rew_op;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ lan966x_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
+ LAN966X_SKB_CB(skb)->jiffies = jiffies;
+
+ lan966x->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == LAN966X_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ lan966x->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+}
+
+static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+}
+
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = LAN966X_MAX_PTP_ID;
+ struct lan966x *lan966x = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct lan966x_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = lan966x->ports[txport];
+
+ /* Retrieve the delay */
+ delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+ delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (LAN966X_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&lan966x->ptp_ts_id_lock);
+ lan966x->ptp_skbs--;
+ spin_unlock(&lan966x->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ lan966x_get_hwtimestamp(lan966x, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = lan966x_ptp_get_nominal_value();
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
+ ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ lan_wr((u32)(tod_inc >> 32), lan966x,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ lan_wr(lower_32_bits(ts->tv_sec),
+ lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
+ lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using lan966x_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ lan966x_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ lan966x_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info lan966x_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan966x ptp",
+ .max_adj = 200000,
+ .gettime64 = lan966x_ptp_gettime64,
+ .settime64 = lan966x_ptp_settime64,
+ .adjtime = lan966x_ptp_adjtime,
+ .adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
+};
+
+static int lan966x_ptp_phc_init(struct lan966x *lan966x,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
+ phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->lan966x = lan966x;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int lan966x_ptp_init(struct lan966x *lan966x)
+{
+ u64 tod_adj = lan966x_ptp_get_nominal_value();
+ struct lan966x_port *port;
+ int err, i;
+
+ if (!lan966x->ptp)
+ return 0;
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&lan966x->ptp_clock_lock);
+ spin_lock_init(&lan966x->ptp_ts_id_lock);
+ mutex_init(&lan966x->ptp_lock);
+
+ /* Disable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(i, 0));
+ lan_wr((u32)(tod_adj >> 32), lan966x,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ /* Enable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void lan966x_ptp_deinit(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i)
+ ptp_clock_unregister(lan966x->phc[i].clock);
+}
+
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan966x_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!lan966x->ptp)
+ return;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ lan966x_ptp_gettime64(&phc->info, &ts);
+
+ /* Drop the sub-ns precision */
+ timestamp = timestamp >> 2;
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 797560172aca..fb5087fef22e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -17,8 +17,10 @@ enum lan966x_target {
TARGET_CHIP_TOP = 5,
TARGET_CPU = 6,
TARGET_DEV = 13,
+ TARGET_FDMA = 21,
TARGET_GCB = 27,
TARGET_ORG = 36,
+ TARGET_PTP = 41,
TARGET_QS = 42,
TARGET_QSYS = 46,
TARGET_REW = 47,
@@ -88,6 +90,24 @@ enum lan966x_target {
#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+/* ANA:ANA:MIRRORPORTS */
+#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4)
+
+#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0)
+#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x)
+#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x)
+
+/* ANA:ANA:EMIRRORPORTS */
+#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4)
+
+#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+
/* ANA:ANA:FLOODING */
#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
@@ -298,6 +318,24 @@ enum lan966x_target {
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
@@ -310,6 +348,12 @@ enum lan966x_target {
/* ANA:PORT:PORT_CFG */
#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+
#define ANA_PORT_CFG_LEARNAUTO BIT(6)
#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
@@ -334,6 +378,21 @@ enum lan966x_target {
#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+/* ANA:PORT:POL_CFG */
+#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4)
+
+#define ANA_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x)
+#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\
+ FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x)
+
+#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0)
+#define ANA_POL_CFG_POL_ORDER_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_POL_ORDER, x)
+#define ANA_POL_CFG_POL_ORDER_GET(x)\
+ FIELD_GET(ANA_POL_CFG_POL_ORDER, x)
+
/* ANA:PFC:PFC_CFG */
#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
@@ -343,6 +402,108 @@ enum lan966x_target {
#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+/* ANA:COMMON:AGGR_CFG */
+#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
+#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
+
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+
+/* ANA:POL:POL_PIR_CFG */
+#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4)
+
+#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x)
+#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x)
+
+#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0)
+#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x)
+#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x)
+
+/* ANA:POL:POL_MODE_CFG */
+#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4)
+
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5)
+#define ANA_POL_MODE_IPG_SIZE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x)
+#define ANA_POL_MODE_IPG_SIZE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_IPG_SIZE, x)
+
+#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3)
+#define ANA_POL_MODE_FRM_MODE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_FRM_MODE, x)
+#define ANA_POL_MODE_FRM_MODE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_FRM_MODE, x)
+
+#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0)
+#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x)
+#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x)
+
+/* ANA:POL:POL_PIR_STATE */
+#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4)
+
+#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0)
+#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x)
+#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\
+ FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x)
+
/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
@@ -424,6 +585,21 @@ enum lan966x_target {
#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4)
+
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
@@ -484,6 +660,12 @@ enum lan966x_target {
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
@@ -559,6 +741,253 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
+/* PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_DOM_CFG_ENA GENMASK(11, 9)
+#define PTP_DOM_CFG_ENA_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_ENA, x)
+#define PTP_DOM_CFG_ENA_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_ENA, x)
+
+#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x)
+#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x)
+
+/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4)
+
+/* PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4)
+
+#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27)
+#define PTP_PIN_CFG_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x)
+#define PTP_PIN_CFG_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x)
+
+#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25)
+#define PTP_PIN_CFG_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
+#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
+#define PTP_PIN_CFG_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
+#define PTP_PIN_CFG_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_DOM, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4)
+
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4)
+
+/* PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4)
+
+#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0)
+#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x)
+#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x)
+#define PTP_TWOSTEP_CTRL_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x)
+#define PTP_TWOSTEP_CTRL_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x)
+#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */
+#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
@@ -700,6 +1129,215 @@ enum lan966x_target {
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+/* QSYS:HSCH:CIR_CFG */
+#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4)
+
+#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x)
+#define QSYS_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x)
+
+#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define QSYS_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x)
+#define QSYS_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x)
+
+/* QSYS:HSCH:SE_CFG */
+#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4)
+
+#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x)
+#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x)
+
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x)
+#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x)
+
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x)
+#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x)
+
+#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x)
+#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x)
+
+#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4)
+
+#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\
+ FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\
+ FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
/* REW:PORT:PORT_VLAN_CFG */
#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 7de55f6a4da8..1c88120eb291 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -6,8 +6,37 @@
#include "lan966x_main.h"
static struct notifier_block lan966x_netdevice_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
+ u32 pgid_ip)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 flood_mask_ip;
+
+ flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
+ flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
+
+ /* If mcast snooping is not enabled then use mcast flood mask
+ * to decide to enable multicast flooding or not.
+ */
+ if (!port->mcast_ena) {
+ u32 flood_mask;
+
+ flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
+ flood_mask = ANA_PGID_PGID_GET(flood_mask);
+
+ if (flood_mask & BIT(port->chip_port))
+ flood_mask_ip |= BIT(port->chip_port);
+ else
+ flood_mask_ip &= ~BIT(port->chip_port);
+ } else {
+ flood_mask_ip &= ~BIT(port->chip_port);
+ }
+
+ lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_ip));
+}
static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
bool enabled)
@@ -23,6 +52,11 @@ static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
lan_rmw(ANA_PGID_PGID_SET(val),
ANA_PGID_PGID,
port->lan966x, ANA_PGID(PGID_MC));
+
+ if (!port->mcast_ena) {
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+ }
}
static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
@@ -96,7 +130,7 @@ static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
return 0;
}
-static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+void lan966x_update_fwd_mask(struct lan966x *lan966x)
{
int i;
@@ -104,9 +138,14 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
struct lan966x_port *port = lan966x->ports[i];
unsigned long mask = 0;
- if (port && lan966x->bridge_fwd_mask & BIT(i))
+ if (port && lan966x->bridge_fwd_mask & BIT(i)) {
mask = lan966x->bridge_fwd_mask & ~BIT(i);
+ if (port->bond)
+ mask &= ~lan966x_lag_get_mask(lan966x,
+ port->bond);
+ }
+
mask |= BIT(CPU_PORT);
lan_wr(ANA_PGID_PGID_SET(mask),
@@ -114,7 +153,7 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
}
}
-static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
{
struct lan966x *lan966x = port->lan966x;
bool learn_ena = false;
@@ -135,8 +174,8 @@ static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
lan966x_update_fwd_mask(lan966x);
}
-static void lan966x_port_ageing_set(struct lan966x_port *port,
- unsigned long ageing_clock_t)
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
@@ -144,6 +183,28 @@ static void lan966x_port_ageing_set(struct lan966x_port *port,
lan966x_mac_set_ageing(port->lan966x, ageing_time);
}
+static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ port->mcast_ena = mcast_ena;
+ if (mcast_ena)
+ lan966x_mdb_restore_entries(lan966x);
+ else
+ lan966x_mdb_clear_entries(lan966x);
+
+ lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
+ ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
+ lan966x, ANA_CPU_FWD_CFG(port->chip_port));
+
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+}
+
static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -171,6 +232,9 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
lan966x_vlan_port_apply(port);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ lan966x_port_mc_set(port, !attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -180,6 +244,7 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
}
static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
@@ -197,7 +262,7 @@ static int lan966x_port_bridge_join(struct lan966x_port *port,
}
}
- err = switchdev_bridge_port_offload(dev, dev, port,
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
&lan966x_switchdev_nb,
&lan966x_switchdev_blocking_nb,
false, extack);
@@ -234,8 +299,9 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_apply(port);
}
-static int lan966x_port_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
@@ -245,45 +311,68 @@ static int lan966x_port_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = lan966x_port_bridge_join(port, info->upper_dev,
+ err = lan966x_port_bridge_join(port, brport_dev,
+ info->upper_dev,
extack);
else
lan966x_port_bridge_leave(port, info->upper_dev);
}
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_lag_port_join(port, info->upper_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_lag_port_leave(port, info->upper_dev);
+ }
+
return err;
}
-static int lan966x_port_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
+ int err = NOTIFY_DONE;
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
+ switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
- if (netif_is_bridge_master(info->upper_dev) && !info->linking)
- switchdev_bridge_port_unoffload(port->dev, port,
- &lan966x_switchdev_nb,
- &lan966x_switchdev_blocking_nb);
+ if (netif_is_lag_master(info->upper_dev)) {
+ err = lan966x_lag_port_prechangeupper(dev, info);
+ if (err || info->linking)
+ return err;
- return NOTIFY_DONE;
+ switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ return err;
}
-static int lan966x_foreign_bridging_check(struct net_device *bridge,
+static int lan966x_foreign_bridging_check(struct net_device *upper,
+ bool *has_foreign,
+ bool *seen_lan966x,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = NULL;
- bool has_foreign = false;
struct net_device *dev;
struct list_head *iter;
- if (!netif_is_bridge_master(bridge))
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper))
return 0;
- netdev_for_each_lower_dev(bridge, dev, iter) {
+ netdev_for_each_lower_dev(upper, dev, iter) {
if (lan966x_netdevice_check(dev)) {
struct lan966x_port *port = netdev_priv(dev);
if (lan966x) {
- /* Bridge already has at least one port of a
+ /* Upper already has at least one port of a
* lan966x switch inside it, check that it's
* the same instance of the driver.
*/
@@ -294,15 +383,24 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
}
} else {
/* This is the first lan966x port inside this
- * bridge
+ * upper device
*/
lan966x = port->lan966x;
+ *seen_lan966x = true;
}
+ } else if (netif_is_lag_master(dev)) {
+ /* Allow to have bond interfaces that have only lan966x
+ * devices
+ */
+ if (lan966x_foreign_bridging_check(dev, has_foreign,
+ seen_lan966x,
+ extack))
+ return -EINVAL;
} else {
- has_foreign = true;
+ *has_foreign = true;
}
- if (lan966x && has_foreign) {
+ if (*seen_lan966x && *has_foreign) {
NL_SET_ERR_MSG_MOD(extack,
"Bridging lan966x ports with foreign interfaces disallowed");
return -EINVAL;
@@ -315,7 +413,12 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
static int lan966x_bridge_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
+ bool has_foreign = false;
+ bool seen_lan966x = false;
+
return lan966x_foreign_bridging_check(info->upper_dev,
+ &has_foreign,
+ &seen_lan966x,
info->info.extack);
}
@@ -326,21 +429,44 @@ static int lan966x_netdevice_port_event(struct net_device *dev,
int err = 0;
if (!lan966x_netdevice_check(dev)) {
- if (event == NETDEV_CHANGEUPPER)
- return lan966x_bridge_check(dev, ptr);
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ if (netif_is_lag_master(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ err = lan966x_lag_netdev_changeupper(dev,
+ ptr);
+ else
+ err = lan966x_lag_netdev_prechangeupper(dev,
+ ptr);
+
+ return err;
+ }
+ break;
+ default:
+ return 0;
+ }
+
return 0;
}
switch (event) {
case NETDEV_PRECHANGEUPPER:
- err = lan966x_port_prechangeupper(dev, ptr);
+ err = lan966x_port_prechangeupper(dev, dev, ptr);
break;
case NETDEV_CHANGEUPPER:
err = lan966x_bridge_check(dev, ptr);
if (err)
return err;
- err = lan966x_port_changeupper(dev, ptr);
+ err = lan966x_port_changeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ err = lan966x_lag_port_changelowerstate(dev, ptr);
break;
}
@@ -363,12 +489,19 @@ static bool lan966x_foreign_dev_check(const struct net_device *dev,
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int i;
if (netif_is_bridge_master(foreign_dev))
- if (lan966x->bridge != foreign_dev)
- return true;
+ if (lan966x->bridge == foreign_dev)
+ return false;
+
+ if (netif_is_lag_master(foreign_dev))
+ for (i = 0; i < lan966x->num_phys_ports; ++i)
+ if (lan966x->ports[i] &&
+ lan966x->ports[i]->bond == foreign_dev)
+ return false;
- return false;
+ return true;
}
static int lan966x_switchdev_event(struct notifier_block *nb,
@@ -388,8 +521,7 @@ static int lan966x_switchdev_event(struct notifier_block *nb,
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
lan966x_netdevice_check,
lan966x_foreign_dev_check,
- lan966x_handle_fdb,
- NULL);
+ lan966x_handle_fdb);
return notifier_from_errno(err);
}
@@ -402,18 +534,6 @@ static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct lan966x *lan966x = port->lan966x;
- /* When adding a port to a vlan, we get a callback for the port but
- * also for the bridge. When get the callback for the bridge just bail
- * out. Then when the bridge is added to the vlan, then we get a
- * callback here but in this case the flags has set:
- * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
- * port is added to the vlan, so the broadcast frames and unicast frames
- * with dmac of the bridge should be foward to CPU.
- */
- if (netif_is_bridge_master(obj->orig_dev) &&
- !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
if (!netif_is_bridge_master(obj->orig_dev))
lan966x_vlan_port_add_vlan(port, v->vid,
v->flags & BRIDGE_VLAN_INFO_PVID,
@@ -521,11 +641,11 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly = {
.notifier_call = lan966x_netdevice_event,
};
-static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_nb __read_mostly = {
.notifier_call = lan966x_switchdev_event,
};
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
.notifier_call = lan966x_switchdev_blocking_event,
};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
new file mode 100644
index 000000000000..3f5b212066c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
new file mode 100644
index 000000000000..4555a35d0d28
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 cir, cbs;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8;
+ cbs = qopt->replace_params.max_size;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
new file mode 100644
index 000000000000..651d5493ae55
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+
+#include "lan966x_main.h"
+
+static LIST_HEAD(lan966x_tc_block_cb_list);
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ return taprio->enable ? lan966x_taprio_add(port, taprio) :
+ lan966x_taprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return lan966x_tbf_add(port, qopt);
+ case TC_TBF_DESTROY:
+ return lan966x_tbf_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ return qopt->enable ? lan966x_cbs_add(port, qopt) :
+ lan966x_cbs_del(port, qopt);
+}
+
+static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ return lan966x_ets_add(port, qopt);
+ case TC_ETS_DESTROY:
+ return lan966x_ets_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct lan966x_port *port = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return lan966x_tc_matchall(port, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int lan966x_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int lan966x_tc_setup_block(struct lan966x_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = lan966x_tc_block_cb_ingress;
+ port->tc.ingress_shared_block = f->block_shared;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = lan966x_tc_block_cb_egress;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list,
+ cb, port, port, ingress);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return lan966x_tc_setup_qdisc_tbf(port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return lan966x_tc_setup_qdisc_cbs(port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return lan966x_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_BLOCK:
+ return lan966x_tc_setup_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
new file mode 100644
index 000000000000..7368433b9277
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_matchall_add(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return lan966x_police_port_add(port, &f->rule->action, act,
+ f->cookie, ingress,
+ f->common.extack);
+ case FLOW_ACTION_MIRRED:
+ return lan966x_mirror_port_add(port, act, f->cookie,
+ ingress, f->common.extack);
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_del(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ return lan966x_police_port_del(port, f->cookie,
+ f->common.extack);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ return lan966x_mirror_port_del(port, ingress,
+ f->common.extack);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_stats(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ lan966x_police_port_stats(port, &f->stats);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ lan966x_mirror_port_stats(port, &f->stats, ingress);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only chain zero is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return lan966x_tc_matchall_add(port, f, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return lan966x_tc_matchall_del(port, f, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return lan966x_tc_matchall_stats(port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index 8d7260cd7da9..3c44660128da 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -169,6 +169,12 @@ void lan966x_vlan_port_apply(struct lan966x_port *port)
ANA_VLAN_CFG_VLAN_POP_CNT,
lan966x, ANA_VLAN_CFG(port->chip_port));
+ lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware),
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA,
+ lan966x, DEV_MAC_TAGS_CFG(port->chip_port));
+
/* Drop frames with multicast source address */
val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
if (port->vlan_aware && !pvid)
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 7bdbb2d09a14..cc5e48e1bb4c 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -4,6 +4,8 @@ config SPARX5_SWITCH
depends on HAS_IOMEM
depends on OF
depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on BRIDGE || BRIDGE=n
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index c271e86ee292..d1c6ad966747 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
- sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 59783fc46a7b..6b0febcb7fa9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
- for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
@@ -1183,6 +1183,39 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
}
+static int sparx5_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ if (!sparx5->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1194,6 +1227,7 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_mac_stats = sparx5_get_eth_mac_stats,
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
+ .get_ts_info = sparx5_get_ts_info,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 7436f62fa152..66360c8c5a38 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -240,6 +240,8 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
skb_pull(skb, IFH_LEN * sizeof(u32));
if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, skb->dev);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded
@@ -379,7 +381,8 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
}
sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx);
}
- netif_napi_add(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT);
+ netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
+ FDMA_WEIGHT);
napi_enable(&rx->napi);
sparx5_fdma_rx_activate(sparx5, rx);
return 0;
@@ -420,6 +423,8 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
db_hw->dataptr = phys;
db_hw->status = 0;
db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
db->cpu_addr = cpu_addr;
list_add_tail(&db->list, &tx->db_list);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 9a8e4f201eb1..4af285918ea2 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -186,11 +186,11 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
return ret == 0;
}
-static int sparx5_mact_lookup(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN],
- u16 vid)
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
{
int ret;
+ u32 cfg2;
mutex_lock(&sparx5->lock);
@@ -202,13 +202,14 @@ static int sparx5_mact_lookup(struct sparx5 *sparx5,
sparx5, LRN_COMMON_ACCESS_CTRL);
ret = sparx5_mact_wait_for_completion(sparx5);
- if (ret)
- goto out;
-
- ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
- (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+ if (ret == 0) {
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
+ *pcfg2 = cfg2;
+ else
+ ret = -ENOENT;
+ }
-out:
mutex_unlock(&sparx5->lock);
return ret;
@@ -286,14 +287,16 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
}
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid)
{
struct sparx5_mact_entry *mact_entry;
int ret;
+ u32 cfg2;
- ret = sparx5_mact_lookup(sparx5, addr, vid);
- if (ret)
+ ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
+ if (!ret)
return 0;
/* In case the entry already exists, don't add it again to SW,
@@ -302,14 +305,14 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
* mact thread to start the frame will reach CPU and the CPU will
* add the entry but without the extern_learn flag.
*/
- mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = find_mact_entry(sparx5, addr, vid, portno);
if (mact_entry)
goto update_hw;
/* Add the entry in SW MAC table not to get the notification when
* SW is pulling again
*/
- mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
if (!mact_entry)
return -ENOMEM;
@@ -318,13 +321,13 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
mutex_unlock(&sparx5->mact_lock);
update_hw:
- ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+ ret = sparx5_mact_learn(sparx5, portno, addr, vid);
/* New entry? */
if (mact_entry->flags == 0) {
mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
- port->ndev, true);
+ dev, true);
}
return ret;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 16266275dd36..62a325e96345 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -27,6 +27,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
@@ -190,6 +191,7 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */
{ TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */
{ TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */
+ { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */
{ TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */
{ TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */
{ TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */
@@ -276,6 +278,7 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+ spx5_port->is_mrouter = false;
sparx5->ports[config->portno] = spx5_port;
err = sparx5_port_init(sparx5, spx5_port, &config->conf);
@@ -291,7 +294,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
/* Create a phylink for PHY management. Also handles SFPs */
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
- spx5_port->phylink_config.pcs_poll = true;
spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
@@ -328,7 +330,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
return PTR_ERR(phylink);
spx5_port->phylink = phylink;
- phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
return 0;
}
@@ -627,6 +628,9 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init MAC table, ageing */
sparx5_mact_init(sparx5);
+ /* Init PGID table arbitrator */
+ sparx5_pgid_init(sparx5);
+
/* Setup VLANs */
sparx5_vlan_init(sparx5);
@@ -659,6 +663,9 @@ static int sparx5_start(struct sparx5 *sparx5)
queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
SPX5_MACT_PULL_DELAY);
+ mutex_init(&sparx5->mdb_lock);
+ INIT_LIST_HEAD(&sparx5->mdb_entries);
+
err = sparx5_register_netdevs(sparx5);
if (err)
return err;
@@ -694,6 +701,18 @@ static int sparx5_start(struct sparx5 *sparx5)
} else {
sparx5->xtr_irq = -ENXIO;
}
+
+ if (sparx5->ptp_irq >= 0) {
+ err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
+ NULL, sparx5_ptp_irq_handler,
+ IRQF_ONESHOT, "sparx5-ptp",
+ sparx5);
+ if (err)
+ sparx5->ptp_irq = -ENXIO;
+
+ sparx5->ptp = 1;
+ }
+
return err;
}
@@ -810,6 +829,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma");
sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+ sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp");
/* Read chip ID to check CPU interface */
sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
@@ -848,6 +868,18 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "Start failed\n");
goto cleanup_ports;
}
+
+ err = sparx5_qos_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to initialize QoS\n");
+ goto cleanup_ports;
+ }
+
+ err = sparx5_ptp_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "PTP failed\n");
+ goto cleanup_ports;
+ }
goto cleanup_config;
cleanup_ports:
@@ -871,6 +903,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
disable_irq(sparx5->fdma_irq);
sparx5->fdma_irq = -ENXIO;
}
+ sparx5_ptp_deinit(sparx5);
sparx5_fdma_stop(sparx5);
sparx5_cleanup_ports(sparx5);
/* Unregister netdevs */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index a1acc9b461f2..7a83222caa73 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -14,8 +14,12 @@
#include <linux/if_vlan.h>
#include <linux/bitmap.h>
#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
+#include "sparx5_main_regs.h"
+
/* Target chip type */
enum spx5_target_chiptype {
SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
@@ -61,6 +65,9 @@ enum sparx5_vlan_port_type {
#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
#define PGID_BCAST (PGID_BASE + 6)
#define PGID_CPU (PGID_BASE + 7)
+#define PGID_MCAST_START (PGID_BASE + 8)
+
+#define PGID_TABLE_SIZE 3290
#define IFH_LEN 9 /* 36 bytes */
#define NULL_VID 0
@@ -77,6 +84,18 @@ enum sparx5_vlan_port_type {
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
+#define SPARX5_PHC_COUNT 3
+#define SPARX5_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define IFH_PDU_TYPE_NONE 0x0
+#define IFH_PDU_TYPE_PTP 0x5
+#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
+#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+
struct sparx5;
struct sparx5_db_hw {
@@ -165,9 +184,13 @@ struct sparx5_port {
enum sparx5_port_max_tags max_vlan_tags;
enum sparx5_vlan_port_type vlan_type;
u32 custom_etype;
- u32 ifh[IFH_LEN];
bool vlan_aware;
struct hrtimer inj_timer;
+ /* ptp */
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
+ bool is_mrouter;
};
enum sparx5_core_clockfreq {
@@ -177,6 +200,35 @@ enum sparx5_core_clockfreq {
SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
};
+struct sparx5_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct hwtstamp_config hwtstamp_config;
+ struct sparx5 *sparx5;
+ u8 index;
+};
+
+struct sparx5_skb_cb {
+ u8 rew_op;
+ u8 pdu_type;
+ u8 pdu_w16_offset;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+struct sparx5_mdb_entry {
+ struct list_head list;
+ DECLARE_BITMAP(port_mask, SPX5_PORTS);
+ unsigned char addr[ETH_ALEN];
+ bool cpu_copy;
+ u16 vid;
+ u16 pgid_idx;
+};
+
+#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
+#define SPARX5_SKB_CB(skb) \
+ ((struct sparx5_skb_cb *)((skb)->cb))
+
struct sparx5 {
struct platform_device *pdev;
struct device *dev;
@@ -214,6 +266,10 @@ struct sparx5 {
struct list_head mact_entries;
/* mac table list (mact_entries) mutex */
struct mutex mact_lock;
+ /* SW MDB table */
+ struct list_head mdb_entries;
+ /* mdb list mutex */
+ struct mutex mdb_lock;
struct delayed_work mact_work;
struct workqueue_struct *mact_queue;
/* Board specifics */
@@ -224,6 +280,16 @@ struct sparx5 {
int fdma_irq;
struct sparx5_rx rx;
struct sparx5_tx tx;
+ /* PTP */
+ bool ptp;
+ struct sparx5_phc phc[SPARX5_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+ int ptp_irq;
+ /* PGID allocation map */
+ u8 pgid_map[PGID_TABLE_SIZE];
};
/* sparx5_switchdev.c */
@@ -233,12 +299,13 @@ void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
/* sparx5_packet.c */
struct frame_info {
int src_port;
+ u32 timestamp;
};
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
@@ -254,10 +321,13 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
const unsigned char mac[ETH_ALEN], u16 vid);
bool sparx5_mact_getnext(struct sparx5 *sparx5,
unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
int sparx5_mact_forget(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN], u16 vid);
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid);
int sparx5_del_mact_entry(struct sparx5 *sparx5,
const unsigned char *addr,
@@ -269,6 +339,8 @@ void sparx5_mact_init(struct sparx5 *sparx5);
/* sparx5_vlan.c */
void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid);
+void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
void sparx5_update_fwd(struct sparx5 *sparx5);
void sparx5_vlan_init(struct sparx5 *sparx5);
void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
@@ -286,12 +358,42 @@ void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats
int sparx_stats_init(struct sparx5 *sparx5);
/* sparx5_netdev.c */
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
bool sparx5_netdevice_check(const struct net_device *dev);
struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
int sparx5_register_netdevs(struct sparx5 *sparx5);
void sparx5_destroy_netdevs(struct sparx5 *sparx5);
void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+/* sparx5_ptp.c */
+int sparx5_ptp_init(struct sparx5 *sparx5);
+void sparx5_ptp_deinit(struct sparx5 *sparx5);
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr);
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr);
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp);
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb);
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb);
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+
+/* sparx5_pgid.c */
+enum sparx5_pgid_type {
+ SPX5_PGID_FREE,
+ SPX5_PGID_RESERVED,
+ SPX5_PGID_MULTICAST,
+};
+
+void sparx5_pgid_init(struct sparx5 *spx5);
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 5ab2373a7178..fa2eb70f487a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
- * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100.
+ * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty)
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -40,6 +40,7 @@ enum sparx5_target {
TARGET_PCS25G_BR = 144,
TARGET_PCS5G_BR = 160,
TARGET_PORT_CONF = 173,
+ TARGET_PTP = 174,
TARGET_QFWD = 175,
TARGET_QRES = 176,
TARGET_QS = 177,
@@ -2992,6 +2993,147 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+
+#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
+#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
+#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
+
+#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
+#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
+
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+
+#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
+#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
+#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
+
+#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
+#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
+#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
+
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+
+#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+
+#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
+#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
+#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
+
+#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
+#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_STOP BIT(0)
+#define HSCH_SE_CFG_SE_STOP_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
+#define HSCH_SE_CFG_SE_STOP_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
+
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+
+#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
+ FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
+ FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
@@ -3001,6 +3143,30 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
+ FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
+ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
@@ -4156,6 +4322,249 @@ enum sparx5_target {
#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
+ FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
+ FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
+#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x)
+#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x)
+
+#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
+ FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
+ FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+
+#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+
+#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+
+#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
+#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
+#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x)
+
+#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0)
+#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x)
+#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+
/* QFWD:SYSTEM:SWITCH_PORT_MODE */
#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
@@ -4528,6 +4937,93 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
+ FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
+ FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+
/* REW:RAM_CTRL:RAM_INIT */
#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index e042f117dc7a..19516ccad533 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -7,6 +7,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_tc.h"
/* The IFH bit position of the first VSTAX bit. This is because the
* VSTAX bit positions in Data sheet is starting from zero.
@@ -54,7 +55,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
}
-static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
{
/* VSTAX.RSV = 1. MSBit must be 1 */
ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
@@ -74,6 +75,26 @@ static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
}
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
+{
+ ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
+}
+
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+}
+
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+}
+
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+{
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+}
+
static int sparx5_port_open(struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
@@ -179,6 +200,24 @@ static int sparx5_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return sparx5_ptp_hwtstamp_set(sparx5_port, ifr);
+ case SIOCGHWTSTAMP:
+ return sparx5_ptp_hwtstamp_get(sparx5_port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_open = sparx5_port_open,
.ndo_stop = sparx5_port_stop,
@@ -189,6 +228,8 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
+ .ndo_eth_ioctl = sparx5_port_ioctl,
+ .ndo_setup_tc = sparx5_port_setup_tc,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@@ -201,16 +242,19 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
struct sparx5_port *spx5_port;
struct net_device *ndev;
- ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+ ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
+ SPX5_PRIOS, 1);
if (!ndev)
return ERR_PTR(-ENOMEM);
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= NETIF_F_HW_TC;
+
SET_NETDEV_DEV(ndev, sparx5->dev);
spx5_port = netdev_priv(ndev);
spx5_port->ndev = ndev;
spx5_port->sparx5 = sparx5;
spx5_port->portno = portno;
- sparx5_set_port_ifh(spx5_port->ifh, portno);
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index dc7e5ea6ec15..83c16ca5b30f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -44,6 +44,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
((u32)xtr_hdr[30] << 0);
fwd = (fwd >> 5);
info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+
+ info->timestamp =
+ ((u64)xtr_hdr[2] << 24) |
+ ((u64)xtr_hdr[3] << 16) |
+ ((u64)xtr_hdr[4] << 8) |
+ ((u64)xtr_hdr[5] << 0);
}
static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
@@ -107,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* This assumes STATUS_WORD_POS == 1, Status
* just after last data
*/
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
byte_cnt -= (4 - XTR_VALID_BYTES(val));
eof_flag = true;
break;
@@ -144,10 +152,11 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* Finish up skb */
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
+ netif_rx(skb);
}
static int sparx5_inject(struct sparx5 *sparx5,
@@ -213,25 +222,49 @@ static int sparx5_inject(struct sparx5 *sparx5,
return NETDEV_TX_OK;
}
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
- int ret;
+ u32 ifh[IFH_LEN];
+ netdev_tx_t ret;
+
+ memset(ifh, 0, IFH_LEN * 4);
+ sparx5_set_port_ifh(ifh, port->portno);
+
+ if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ret = sparx5_ptp_txtstamp_request(port, skb);
+ if (ret)
+ return ret;
+ sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
+ sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ }
+
+ skb_tx_timestamp(skb);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, port->ifh, skb);
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
- ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
if (ret == NETDEV_TX_OK) {
stats->tx_bytes += skb->len;
stats->tx_packets++;
- skb_tx_timestamp(skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return ret;
+
dev_kfree_skb_any(skb);
} else {
stats->tx_dropped++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ sparx5_ptp_txtstamp_release(port, skb);
}
return ret;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
new file mode 100644
index 000000000000..af8b435009f4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include "sparx5_main.h"
+
+void sparx5_pgid_init(struct sparx5 *spx5)
+{
+ int i;
+
+ for (i = 0; i < PGID_TABLE_SIZE; i++)
+ spx5->pgid_map[i] = SPX5_PGID_FREE;
+
+ /* Reserved for unicast, flood control, broadcast, and CPU.
+ * These cannot be freed.
+ */
+ for (i = 0; i <= PGID_CPU; i++)
+ spx5->pgid_map[i] = SPX5_PGID_RESERVED;
+}
+
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ /* The multicast area starts at index 65, but the first 7
+ * are reserved for flood masks and CPU. Start alloc after that.
+ */
+ for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
+{
+ if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ return -EINVAL;
+
+ if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
+ return -EINVAL;
+
+ spx5->pgid_map[idx] = SPX5_PGID_FREE;
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index 8ba33bc1a001..830da0e5ff27 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,6 +26,15 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
+static struct phylink_pcs *
+sparx5_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -130,6 +139,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = sparx5_phylink_mac_select_pcs,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 189a6a0a2e08..32709d21ab2f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -742,7 +742,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
} else {
- sgmii = true; /* Phy is connnected to the MAC */
+ sgmii = true; /* Phy is connected to the MAC */
}
/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
new file mode 100644
index 000000000000..0ed1ea7727c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/ptp_classify.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPARX5_MAX_PTP_ID 512
+
+#define TOD_ACC_PIN 0x4
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
+{
+ /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
+ * 1.99609375000(500), 3.99218750000(250) as reference
+ * The value is calculated as following:
+ * (1/1000000)/((2^-59)/X)
+ */
+
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 2301339409586;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 1150669704793;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 920535763834;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
+{
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 0x1FF0000000000000;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 0x0FF8000000000000;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 0x0CC6666666666666;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct hwtstamp_config cfg;
+ struct sparx5_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&sparx5->ptp_lock);
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&sparx5->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ header = ptp_parse_header(skb, type);
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ *pdu_w16_offset = 7;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_PTP;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
+
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 rew_op, pdu_type, pdu_w16_offset;
+ unsigned long flags;
+
+ sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
+ SPARX5_SKB_CB(skb)->rew_op = rew_op;
+ SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
+ SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ sparx5_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
+ SPARX5_SKB_CB(skb)->jiffies = jiffies;
+
+ sparx5->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == SPARX5_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ sparx5->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+}
+
+static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+}
+
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ id <<= 8;
+ id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&sparx5->ptp_ts_id_lock);
+ sparx5->ptp_skbs--;
+ spin_unlock(&sparx5->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = sparx5_ptp_get_nominal_value(sparx5);
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
+ ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ spx5_wr((u32)(tod_inc >> 32), sparx5,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
+ PTP_PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ spx5_wr(lower_32_bits(ts->tv_sec),
+ sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
+ sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using sparx5_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ sparx5_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ sparx5_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info sparx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sparx5 ptp",
+ .max_adj = 200000,
+ .gettime64 = sparx5_ptp_gettime64,
+ .settime64 = sparx5_ptp_settime64,
+ .adjtime = sparx5_ptp_adjtime,
+ .adjfine = sparx5_ptp_adjfine,
+};
+
+static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct sparx5_phc *phc = &sparx5->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->sparx5 = sparx5;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int sparx5_ptp_init(struct sparx5 *sparx5)
+{
+ u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
+ struct sparx5_port *port;
+ int err, i;
+
+ if (!sparx5->ptp)
+ return 0;
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&sparx5->ptp_clock_lock);
+ spin_lock_init(&sparx5->ptp_ts_id_lock);
+ mutex_init(&sparx5->ptp_lock);
+
+ /* Disable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(i, 0));
+ spx5_wr((u32)(tod_adj >> 32), sparx5,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ /* Enable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void sparx5_ptp_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i)
+ ptp_clock_unregister(sparx5->phc[i].clock);
+}
+
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sparx5_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!sparx5->ptp)
+ return;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ sparx5_ptp_gettime64(&phc->info, &ts);
+
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
new file mode 100644
index 000000000000..1e79d0ef0cb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* Max rates for leak groups */
+static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
+ 1048568, /* 1.049 Gbps */
+ 2621420, /* 2.621 Gbps */
+ 10485680, /* 10.486 Gbps */
+ 26214200 /* 26.214 Gbps */
+};
+
+static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
+
+static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
+ return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
+}
+
+static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
+ HSCH_HSCH_TIMER_CFG(layer, group));
+}
+
+static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
+ return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
+}
+
+static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
+ return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
+}
+
+static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_lg_get_first(sparx5, layer, group);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
+}
+
+static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_first(sparx5, layer, group);
+}
+
+static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
+}
+
+static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ if (sparx5_lg_is_empty(sparx5, layer, group))
+ return false;
+
+ return sparx5_lg_get_first(sparx5, layer, group) ==
+ sparx5_lg_get_last(sparx5, layer, group);
+}
+
+static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
+}
+
+static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, 0);
+}
+
+static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
+ u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ if (sparx5_lg_is_empty(sparx5, layer, i))
+ continue;
+
+ itr = sparx5_lg_get_first(sparx5, layer, i);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
+{
+ struct sparx5_layer *l = &layers[layer];
+ struct sparx5_lg *lg;
+ u32 i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ lg = &l->leak_groups[i];
+ if (rate <= lg->max_rate) {
+ *group = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx, u32 *prev, u32 *next, u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_lg_get_first(sparx5, layer, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_lg_get_next(sparx5, layer, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -1; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 se_first, u32 idx, u32 idx_next, bool empty)
+{
+ u32 leak_time = layers[layer].leak_groups[group].leak_time;
+
+ /* Stop leaking */
+ sparx5_lg_disable(sparx5, layer, group);
+
+ if (empty)
+ return 0;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Link elements */
+ spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
+ HSCH_SE_CONNECT(idx));
+
+ /* Set the first element. */
+ spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
+ HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
+ HSCH_HSCH_LEAK_CFG(layer, group));
+
+ /* Start leaking */
+ sparx5_lg_enable(sparx5, layer, group, leak_time);
+
+ return 0;
+}
+
+static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ /* idx *must* be present in the leak group */
+ WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
+ &first) < 0);
+
+ if (sparx5_lg_is_singular(sparx5, layer, group)) {
+ empty = true;
+ } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
+ empty);
+}
+
+static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
+ u32 idx)
+{
+ u32 first, next, old_group;
+
+ pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
+ idx);
+
+ /* Is this SE already shaping ? */
+ if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
+ if (old_group != new_group) {
+ /* Delete from old group */
+ sparx5_lg_del(sparx5, layer, old_group, idx);
+ } else {
+ /* Nothing to do here */
+ return 0;
+ }
+ }
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_lg_is_empty(sparx5, layer, new_group))
+ next = idx;
+ else
+ next = sparx5_lg_get_first(sparx5, layer, new_group);
+
+ return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
+ false);
+}
+
+static int sparx5_shaper_conf_set(struct sparx5_port *port,
+ const struct sparx5_shaper *sh, u32 layer,
+ u32 idx, u32 group)
+{
+ int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!sh->rate && !sh->burst)
+ sparx5_lg_action = &sparx5_lg_del;
+ else
+ sparx5_lg_action = &sparx5_lg_add;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Set frame mode */
+ spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
+ sparx5, HSCH_SE_CFG(idx));
+
+ /* Set committed rate and burst */
+ spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
+ HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
+ sparx5, HSCH_CIR_CFG(idx));
+
+ /* This has to be done after the shaper configuration has been set */
+ sparx5_lg_action(sparx5, layer, group, idx);
+
+ return 0;
+}
+
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_leak_groups_init(struct sparx5 *sparx5)
+{
+ struct sparx5_layer *layer;
+ u32 sys_clk_per_100ps;
+ struct sparx5_lg *lg;
+ u32 leak_time_us;
+ int i, ii;
+
+ sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
+
+ for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
+ layer = &layers[i];
+ for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
+ lg = &layer->leak_groups[ii];
+ lg->max_rate = spx5_hsch_max_group_rate[ii];
+
+ /* Calculate the leak time in us, to serve a maximum
+ * rate of 'max_rate' for this group
+ */
+ leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
+
+ /* Hardware wants leak time in ns */
+ lg->leak_time = 1000 * leak_time_us;
+
+ /* Calculate resolution */
+ lg->resolution = 1000 / leak_time_us;
+
+ /* Maximum number of shapers that can be served by
+ * this leak group
+ */
+ lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
+
+ /* Example:
+ * Wanted bandwidth is 100Mbit:
+ *
+ * 100 mbps can be served by leak group zero.
+ *
+ * leak_time is 125000 ns.
+ * resolution is: 8
+ *
+ * cir = 100000 / 8 = 12500
+ * leaks_pr_sec = 125000 / 10^9 = 8000
+ * bw = 12500 * 8000 = 10^8 (100 Mbit)
+ */
+
+ /* Disable by default - this also indicates an empty
+ * leak group
+ */
+ sparx5_lg_disable(sparx5, i, ii);
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_qos_init(struct sparx5 *sparx5)
+{
+ int ret;
+
+ ret = sparx5_leak_groups_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
+{
+ int i;
+
+ if (num_tc != SPX5_PRIOS) {
+ netdev_err(ndev, "Only %d traffic classes supported\n",
+ SPX5_PRIOS);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_del(struct net_device *ndev)
+{
+ netdev_reset_tc(ndev);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {
+ .mode = SPX5_SE_MODE_DATARATE,
+ .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
+ .burst = params->max_size,
+ };
+ struct sparx5_lg *lg;
+ u32 group;
+
+ /* Find suitable group for this se */
+ if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
+ pr_debug("Could not find leak group for se with rate: %d",
+ sh.rate);
+ return -EINVAL;
+ }
+
+ lg = &layers[layer].leak_groups[group];
+
+ pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
+
+ if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
+ return -EINVAL;
+
+ /* Calculate committed rate and burst */
+ sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
+ sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
+
+ if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
+ return -EINVAL;
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {0};
+ u32 group;
+
+ sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
new file mode 100644
index 000000000000..ced35033a6c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_QOS_H__
+#define __SPARX5_QOS_H__
+
+#include <linux/netdevice.h>
+
+/* Number of Layers */
+#define SPX5_HSCH_LAYER_CNT 3
+
+/* Scheduling elements per layer */
+#define SPX5_HSCH_L0_SE_CNT 5040
+#define SPX5_HSCH_L1_SE_CNT 64
+#define SPX5_HSCH_L2_SE_CNT 64
+
+/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
+#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
+
+/* Number of leak groups */
+#define SPX5_HSCH_LEAK_GRP_CNT 4
+
+/* Scheduler modes */
+#define SPX5_SE_MODE_LINERATE 0
+#define SPX5_SE_MODE_DATARATE 1
+
+/* Rate and burst */
+#define SPX5_SE_RATE_MAX 262143
+#define SPX5_SE_BURST_MAX 127
+#define SPX5_SE_RATE_MIN 1
+#define SPX5_SE_BURST_MIN 1
+#define SPX5_SE_BURST_UNIT 4096
+
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
+struct sparx5_shaper {
+ u32 mode;
+ u32 rate;
+ u32 burst;
+};
+
+struct sparx5_lg {
+ u32 max_rate;
+ u32 resolution;
+ u32 leak_time;
+ u32 max_ses;
+};
+
+struct sparx5_layer {
+ struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
+};
+
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
+int sparx5_qos_init(struct sparx5 *sparx5);
+
+/* Multi-Queue Priority */
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
+int sparx5_tc_mqprio_del(struct net_device *ndev);
+
+/* Token Bucket Filter */
+struct tc_tbf_qopt_offload_replace_params;
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx);
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
+#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 649ca609884a..4af85d108a06 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -16,14 +16,40 @@ struct sparx5_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct sparx5 *sparx5;
unsigned long event;
};
+static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
+{
+ bool should_flood = flood_flag || port->is_mrouter;
+ int pgid;
+
+ for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, should_flood);
+}
+
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
- if (flags.mask & BR_MCAST_FLOOD)
- sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+ if (flags.mask & BR_MCAST_FLOOD) {
+ sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
+ }
+
+ if (flags.mask & BR_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_BCAST_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
}
static void sparx5_attr_stp_state_set(struct sparx5_port *port,
@@ -65,6 +91,37 @@ static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
sparx5_set_ageing(port->sparx5, ageing_time);
}
+static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
+ struct net_device *orig_dev,
+ bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mdb_entry *e;
+ bool flood_flag;
+
+ if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
+ return;
+
+ /* Add/del mrouter port on all active mdb entries in HW.
+ * Don't change entry port mask, since that represents
+ * ports that actually joined that group.
+ */
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (!test_bit(port->portno, e->port_mask) &&
+ ether_addr_is_ip_mcast(e->addr))
+ sparx5_pgid_update_mask(port, e->pgid_idx, enable);
+ }
+ mutex_unlock(&sparx5->mdb_lock);
+
+ /* Enable/disable flooding depending on if port is mrouter port
+ * or if mcast flood is enabled.
+ */
+ port->is_mrouter = enable;
+ flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
+ sparx5_port_update_mcast_ip_flood(port, flood_flag);
+}
+
static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -72,6 +129,9 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
struct sparx5_port *port = netdev_priv(dev);
switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ return sparx5_port_attr_pre_bridge_flags(port,
+ attr->u.brport_flags);
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
break;
@@ -82,9 +142,19 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (port->pvid == 0)
+ port->pvid = 1;
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ sparx5_port_attr_mrouter_set(port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -117,6 +187,9 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
if (err)
goto err_switchdev_offload;
+ /* Remove standalone port entry */
+ sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
+
/* Port enters in bridge mode therefor don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
@@ -145,6 +218,9 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
port->pvid = NULL_VID;
port->vid = NULL_VID;
+ /* Forward frames to CPU */
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
}
@@ -228,31 +304,43 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
struct switchdev_notifier_fdb_info *fdb_info;
struct sparx5_port *port;
struct sparx5 *sparx5;
+ bool host_addr;
+ u16 vid;
rtnl_lock();
- if (!sparx5_netdevice_check(dev))
- goto out;
-
- port = netdev_priv(dev);
- sparx5 = port->sparx5;
+ if (!sparx5_netdevice_check(dev)) {
+ host_addr = true;
+ sparx5 = switchdev_work->sparx5;
+ } else {
+ host_addr = false;
+ sparx5 = switchdev_work->sparx5;
+ port = netdev_priv(dev);
+ }
fdb_info = &switchdev_work->fdb_info;
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (fdb_info->vid == 0)
+ vid = 1;
+ else
+ vid = fdb_info->vid;
+
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
- fdb_info->vid);
+ if (host_addr)
+ sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ fdb_info->addr, vid);
+ else
+ sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
+ fdb_info->addr, vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+ sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
break;
}
-out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
kfree(switchdev_work);
@@ -264,15 +352,18 @@ static void sparx5_schedule_work(struct work_struct *work)
queue_work(sparx5_owq, work);
}
-static int sparx5_switchdev_event(struct notifier_block *unused,
+static int sparx5_switchdev_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct sparx5_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
+ struct sparx5 *spx5;
int err;
+ spx5 = container_of(nb, struct sparx5, switchdev_nb);
+
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
@@ -288,6 +379,7 @@ static int sparx5_switchdev_event(struct notifier_block *unused,
switchdev_work->dev = dev;
switchdev_work->event = event;
+ switchdev_work->sparx5 = spx5;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
@@ -314,77 +406,213 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
- struct sparx5_port *port,
- u16 vid, bool add)
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_vlan *v)
{
- if (!port ||
- !test_bit(port->portno, sparx5->bridge_mask))
- return; /* Skip null/host interfaces */
-
- /* Bridge connects to vid? */
- if (add) {
- /* Add port MAC address from the VLAN */
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- } else {
- /* Control port addr visibility depending on
- * port VLAN connectivity.
- */
- if (test_bit(port->portno, sparx5->vlan_mask[vid]))
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- else
- sparx5_mact_forget(sparx5,
- port->ndev->dev_addr, vid);
+ struct sparx5_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ /* Flood broadcast to CPU */
+ sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+ v->vid);
+ return 0;
}
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ return sparx5_vlan_vid_add(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
-static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
- struct sparx5 *sparx5,
- u16 vid, bool add)
+static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid,
+ struct sparx5_mdb_entry **entry_out)
{
- int i;
+ struct sparx5_mdb_entry *entry;
+ u16 pgid_idx;
+ int err;
- /* First, handle bridge address'es */
- if (add) {
- sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
- vid);
- sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
- vid);
- } else {
- sparx5_mact_forget(sparx5, dev->dev_addr, vid);
- sparx5_mact_forget(sparx5, dev->broadcast, vid);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
+ if (err) {
+ kfree(entry);
+ return err;
}
- /* Now look at bridged ports */
- for (i = 0; i < SPX5_PORTS; i++)
- sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+ memcpy(entry->addr, addr, ETH_ALEN);
+ entry->vid = vid;
+ entry->pgid_idx = pgid_idx;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_add_tail(&entry->list, &sparx5->mdb_entries);
+ mutex_unlock(&sparx5->mdb_lock);
+
+ *entry_out = entry;
+ return 0;
}
-static int sparx5_handle_port_vlan_add(struct net_device *dev,
- struct notifier_block *nb,
- const struct switchdev_obj_port_vlan *v)
+static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
{
- struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5_mdb_entry *entry, *tmp;
- if (netif_is_bridge_master(dev)) {
- if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
- struct sparx5 *sparx5 =
- container_of(nb, struct sparx5,
- switchdev_blocking_nb);
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
+ if ((vid == 0 || entry->vid == vid) &&
+ ether_addr_equal(addr, entry->addr)) {
+ list_del(&entry->list);
- sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+ sparx5_pgid_free(sparx5, entry->pgid_idx);
+ kfree(entry);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+}
+
+static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *e, *found = NULL;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
+ found = e;
+ goto out;
}
- return 0;
}
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+ return found;
+}
+
+static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
+{
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid));
+}
+
+static int sparx5_handle_port_mdb_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ struct sparx5_mdb_entry *entry;
+ bool is_host, is_new;
+ int err, i;
+ u16 vid;
+
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
- return sparx5_vlan_vid_add(port, v->vid,
- v->flags & BRIDGE_VLAN_INFO_PVID,
- v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
+ /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+ * Fall back to bridge vid 1.
+ */
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ is_new = false;
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry) {
+ err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
+ is_new = true;
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&spx5->mdb_lock);
+
+ /* Add any mrouter ports to the new entry */
+ if (is_new && ether_addr_is_ip_mcast(v->addr))
+ for (i = 0; i < SPX5_PORTS; i++)
+ if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
+ sparx5_pgid_update_mask(spx5->ports[i],
+ entry->pgid_idx,
+ true);
+
+ if (is_host && !entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
+ entry->cpu_copy = true;
+ } else if (!is_host) {
+ sparx5_pgid_update_mask(port, entry->pgid_idx, true);
+ set_bit(port->portno, entry->port_mask);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
+
+ return 0;
+}
+
+static int sparx5_handle_port_mdb_del(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ struct sparx5_mdb_entry *entry;
+ bool is_host;
+ u16 vid;
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry)
+ return 0;
+
+ mutex_lock(&spx5->mdb_lock);
+ if (is_host && entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
+ entry->cpu_copy = false;
+ } else if (!is_host) {
+ clear_bit(port->portno, entry->port_mask);
+
+ /* Port not mrouter port or addr is L2 mcast, remove port from mask. */
+ if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
+ sparx5_pgid_update_mask(port, entry->pgid_idx, false);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
+ /* Clear pgid in case mrouter ports exists
+ * that are not part of the group.
+ */
+ sparx5_pgid_clear(spx5, entry->pgid_idx);
+ sparx5_mact_forget(spx5, entry->addr, entry->vid);
+ sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
+ }
+ return 0;
}
static int sparx5_handle_port_obj_add(struct net_device *dev,
@@ -399,6 +627,11 @@ static int sparx5_handle_port_obj_add(struct net_device *dev,
err = sparx5_handle_port_vlan_add(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = sparx5_handle_port_mdb_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -421,7 +654,7 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
container_of(nb, struct sparx5,
switchdev_blocking_nb);
- sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+ sparx5_mact_forget(sparx5, dev->broadcast, vid);
return 0;
}
@@ -432,9 +665,6 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
if (ret)
return ret;
- /* Delete the port MAC address with the matching VLAN information */
- sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
-
return 0;
}
@@ -450,6 +680,11 @@ static int sparx5_handle_port_obj_del(struct net_device *dev,
err = sparx5_handle_port_vlan_del(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = sparx5_handle_port_mdb_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
new file mode 100644
index 000000000000..e05429c751ee
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
+ u32 *idx)
+{
+ if (parent == TC_H_ROOT) {
+ *layer = 2;
+ *idx = portno;
+ } else {
+ u32 queue = TC_H_MIN(parent) - 1;
+ *layer = 0;
+ *idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
+ }
+}
+
+static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
+ struct tc_mqprio_qopt_offload *m)
+{
+ m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (m->qopt.num_tc == 0)
+ return sparx5_tc_mqprio_del(ndev);
+ else
+ return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
+}
+
+static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 layer, se_idx;
+
+ sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
+ &se_idx);
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
+ se_idx);
+ case TC_TBF_DESTROY:
+ return sparx5_tc_tbf_del(port, layer, se_idx);
+ case TC_TBF_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ sparx5_tc_ets_add(port, params);
+ break;
+ case TC_ETS_DESTROY:
+
+ sparx5_tc_ets_del(port);
+
+ break;
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
new file mode 100644
index 000000000000..5b55e11b77e1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_TC_H__
+#define __SPARX5_TC_H__
+
+#include <linux/netdevice.h>
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* __SPARX5_TC_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 4ce490a25f33..34f954bbf815 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
struct sparx5 *sparx5 = port->sparx5;
int ret;
- /* Make the port a member of the VLAN */
- set_bit(port->portno, sparx5->vlan_mask[vid]);
- ret = sparx5_vlant_set_mask(sparx5, vid);
- if (ret)
- return ret;
-
- /* Default ingress vlan classification */
- if (pvid)
- port->pvid = vid;
-
/* Untagged egress vlan classification */
if (untagged && port->vid != vid) {
if (port->vid) {
@@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
port->vid = vid;
}
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
sparx5_vlan_port_apply(sparx5, port);
return 0;
@@ -138,6 +138,20 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
}
}
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
+{
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
+void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
+{
+ portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
+ portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
+ portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
void sparx5_update_fwd(struct sparx5 *sparx5)
{
DECLARE_BITMAP(workmask, SPX5_PORTS);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
index 41ecd156e95f..4a6efe6ada08 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma.h
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -348,6 +348,7 @@ struct gdma_context {
struct completion eq_test_event;
u32 test_event_eq_id;
+ bool is_pf;
void __iomem *bar0_va;
void __iomem *shm_base;
void __iomem *db_page_base;
@@ -469,6 +470,15 @@ struct gdma_eqe {
#define GDMA_REG_DB_PAGE_SIZE 0x10
#define GDMA_REG_SHM_OFFSET 0x18
+#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
+#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
+#define GDMA_PF_REG_SHM_OFF 0x70
+
+#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
+
+#define MANA_PF_DEVICE_ID 0x00B9
+#define MANA_VF_DEVICE_ID 0x00BA
+
struct gdma_posted_wqe_info {
u32 wqe_size_in_bu;
};
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 636dfef24a6c..a6f99b4344d9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -18,7 +18,24 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
return readq(g->bar0_va + offset);
}
-static void mana_gd_init_registers(struct pci_dev *pdev)
+static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ void __iomem *sriov_base_va;
+ u64 sriov_base_off;
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
+ sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+
+ sriov_base_va = gc->bar0_va + sriov_base_off;
+ gc->shm_base = sriov_base_va +
+ mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+}
+
+static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -30,6 +47,16 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ if (gc->is_pf)
+ mana_gd_init_pf_regs(pdev);
+ else
+ mana_gd_init_vf_regs(pdev);
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -370,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
mana_gd_process_eqe(eq);
eq->head++;
@@ -663,7 +695,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_context *gc = gd->gdma_context;
struct hw_channel_context *hwc;
u32 length = gmi->length;
- u32 req_msg_size;
+ size_t req_msg_size;
int err;
int i;
@@ -674,7 +706,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
return -EINVAL;
hwc = gc->hwc.driver_data;
- req_msg_size = sizeof(*req) + num_page * sizeof(u64);
+ req_msg_size = struct_size(req, page_addr_list, num_page);
if (req_msg_size > hwc->max_req_msg_size)
return -EINVAL;
@@ -1107,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
@@ -1304,6 +1341,11 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_gd_remove_irqs(pdev);
}
+static bool mana_is_pf(unsigned short dev_id)
+{
+ return dev_id == MANA_PF_DEVICE_ID;
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -1340,10 +1382,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bar0_va)
goto free_gc;
+ gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
-
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
@@ -1433,12 +1475,9 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
static const struct pci_device_id mana_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
{ }
};
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 078d6a5a0768..543a5d5c304f 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -158,6 +158,14 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
hwc->rxq->msg_buf->gpa_mkey = val;
hwc->txq->msg_buf->gpa_mkey = val;
break;
+
+ case HWC_INIT_DATA_PF_DEST_RQ_ID:
+ hwc->pf_dest_vrq_id = val;
+ break;
+
+ case HWC_INIT_DATA_PF_DEST_CQ_ID:
+ hwc->pf_dest_vrcq_id = val;
+ break;
}
break;
@@ -773,10 +781,13 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
const void *req, u32 resp_len, void *resp)
{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
struct hwc_work_request *tx_wr;
struct hwc_wq *txq = hwc->txq;
struct gdma_req_hdr *req_msg;
struct hwc_caller_ctx *ctx;
+ u32 dest_vrcq = 0;
+ u32 dest_vrq = 0;
u16 msg_id;
int err;
@@ -803,7 +814,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
tx_wr->msg_size = req_len;
- err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
+ if (gc->is_pf) {
+ dest_vrq = hwc->pf_dest_vrq_id;
+ dest_vrcq = hwc->pf_dest_vrcq_id;
+ }
+
+ err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
if (err) {
dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
index 31c6e83c454a..6a757a6e2732 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.h
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h
@@ -20,6 +20,8 @@
#define HWC_INIT_DATA_MAX_NUM_CQS 7
#define HWC_INIT_DATA_PDID 8
#define HWC_INIT_DATA_GPA_MKEY 9
+#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
+#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -178,6 +180,9 @@ struct hw_channel_context {
struct semaphore sema;
struct gdma_resource inflight_msg_res;
+ u32 pf_dest_vrq_id;
+ u32 pf_dest_vrcq_id;
+
struct hwc_caller_ctx *caller_ctx;
};
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index 9a12607fb511..d58be64374c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -48,9 +48,19 @@ enum TRI_STATE {
#define MAX_PORTS_IN_MANA_DEV 256
-struct mana_stats {
+struct mana_stats_rx {
u64 packets;
u64 bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
struct u64_stats_sync syncp;
};
@@ -76,7 +86,7 @@ struct mana_txq {
atomic_t pending_sends;
- struct mana_stats stats;
+ struct mana_stats_tx stats;
};
/* skb data and frags dma mappings */
@@ -298,10 +308,13 @@ struct mana_rxq {
u32 buf_index;
- struct mana_stats stats;
+ struct mana_stats_rx stats;
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
+ bool xdp_flush;
+ int xdp_rc; /* XDP redirect return code */
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -365,6 +378,7 @@ struct mana_port_context {
unsigned int num_queues;
mana_handle_t port_handle;
+ mana_handle_t pf_filter_handle;
u16 port_idx;
@@ -386,6 +400,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags);
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len);
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
@@ -411,6 +427,12 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+ /* Privileged commands for the PF mode */
+ MANA_REGISTER_FILTER = 0x28000,
+ MANA_DEREGISTER_FILTER = 0x28001,
+ MANA_REGISTER_HW_PORT = 0x28003,
+ MANA_DEREGISTER_HW_PORT = 0x28004,
};
/* Query Device Configuration */
@@ -538,6 +560,63 @@ struct mana_cfg_rx_steer_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ u16 attached_gfid;
+ u8 is_pf_default_vport;
+ u8 reserved1;
+ u8 allow_all_ether_types;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u8 mac_addr[6];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u16 reserved5;
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 1d2f948b5c00..421fd39ff3a8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -32,9 +32,55 @@ void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
}
+static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
+ u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_set_queue_mapping(skb, q_idx);
+
+ mana_xdp_tx(skb, ndev);
+
+ return 0;
+}
+
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct mana_stats_tx *tx_stats;
+ int i, count = 0;
+ u16 q_idx;
+
+ if (unlikely(!apc->port_is_up))
+ return 0;
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &apc->tx_qp[q_idx].txq.stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
+
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len)
{
+ struct mana_stats_rx *rx_stats;
struct bpf_prog *prog;
u32 act = XDP_PASS;
@@ -49,12 +95,30 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
act = bpf_prog_run_xdp(prog, xdp);
+ rx_stats = &rxq->stats;
+
switch (act) {
case XDP_PASS:
case XDP_TX:
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
+ if (!rxq->xdp_rc) {
+ rxq->xdp_flush = true;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+ rx_stats->xdp_redirect++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 498d0f999275..9259a74eca40 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -6,6 +6,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/filter.h>
#include <linux/mm.h>
#include <net/checksum.h>
@@ -136,7 +137,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
bool ipv4 = false, ipv6 = false;
struct mana_tx_package pkg = {};
struct netdev_queue *net_txq;
- struct mana_stats *tx_stats;
+ struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
unsigned int csum_type;
struct mana_txq *txq;
@@ -299,7 +300,8 @@ static void mana_get_stats64(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
int q;
@@ -310,26 +312,26 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
st->rx_packets += packets;
st->rx_bytes += bytes;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
st->tx_packets += packets;
st->tx_bytes += bytes;
@@ -381,6 +383,7 @@ static const struct net_device_ops mana_devops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
+ .ndo_xdp_xmit = mana_xdp_xmit,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -445,6 +448,119 @@ static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
return 0;
}
+static int mana_pf_register_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_register_hw_vport_resp resp = {};
+ struct mana_register_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.attached_gfid = 1;
+ req.is_pf_default_vport = 1;
+ req.allow_all_ether_types = 1;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->port_handle = resp.hw_vport_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_deregister_hw_vport_resp resp = {};
+ struct mana_deregister_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.hw_vport_handle = apc->port_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_pf_register_filter(struct mana_port_context *apc)
+{
+ struct mana_register_filter_resp resp = {};
+ struct mana_register_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->pf_filter_handle = resp.filter_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_filter(struct mana_port_context *apc)
+{
+ struct mana_deregister_filter_resp resp = {};
+ struct mana_deregister_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.filter_handle = apc->pf_filter_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
u16 *max_num_vports)
@@ -986,7 +1102,7 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
- struct mana_stats *rx_stats = &rxq->stats;
+ struct mana_stats_rx *rx_stats = &rxq->stats;
struct net_device *ndev = rxq->ndev;
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
@@ -1006,8 +1122,11 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
+ if (act == XDP_REDIRECT && !rxq->xdp_rc)
+ return;
+
if (act != XDP_PASS && act != XDP_TX)
- goto drop;
+ goto drop_xdp;
skb = mana_build_skb(buf_va, pkt_len, &xdp);
@@ -1034,6 +1153,14 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+ u64_stats_update_end(&rx_stats->syncp);
+
if (act == XDP_TX) {
skb_set_queue_mapping(skb, rxq_idx);
mana_xdp_tx(skb, ndev);
@@ -1042,15 +1169,19 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
napi_gro_receive(napi, skb);
+ return;
+
+drop_xdp:
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->packets++;
- rx_stats->bytes += pkt_len;
+ rx_stats->xdp_drop++;
u64_stats_update_end(&rx_stats->syncp);
- return;
drop:
- free_page((unsigned long)buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
++ndev->stats.rx_dropped;
+
return;
}
@@ -1072,8 +1203,10 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
break;
case CQE_RX_TRUNCATED:
- netdev_err(ndev, "Dropped a truncated packet\n");
- return;
+ ++ndev->stats.rx_dropped;
+ rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
+ netdev_warn_once(ndev, "Dropped a truncated packet\n");
+ goto drop;
case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n");
@@ -1089,9 +1222,6 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
}
- if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
- return;
-
pktlen = oob->ppi[0].pkt_len;
if (pktlen == 0) {
@@ -1105,7 +1235,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- new_page = alloc_page(GFP_ATOMIC);
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1135,6 +1271,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
mana_rx_skb(old_buf, oob, rxq);
+drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
mana_post_pkt_rxq(rxq);
@@ -1143,11 +1280,14 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
+ struct mana_rxq *rxq = cq->rxq;
int comp_read, i;
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+ rxq->xdp_flush = false;
+
for (i = 0; i < comp_read; i++) {
if (WARN_ON_ONCE(comp[i].is_sq))
return;
@@ -1156,8 +1296,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
return;
- mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
+ mana_process_rx_cqe(rxq, cq, &comp[i]);
}
+
+ if (rxq->xdp_flush)
+ xdp_do_flush();
}
static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
@@ -1352,7 +1495,7 @@ static int mana_create_txq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(net, &cq->napi, mana_poll);
napi_enable(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -1392,6 +1535,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
@@ -1580,7 +1726,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add(ndev, &cq->napi, mana_poll, 1);
+ netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
@@ -1631,6 +1777,7 @@ out:
static void mana_destroy_vport(struct mana_port_context *apc)
{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_rxq *rxq;
u32 rxq_idx;
@@ -1644,6 +1791,9 @@ static void mana_destroy_vport(struct mana_port_context *apc)
}
mana_destroy_txq(apc);
+
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_hw_vport(apc);
}
static int mana_create_vport(struct mana_port_context *apc,
@@ -1654,6 +1804,12 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_hw_vport(apc);
+ if (err)
+ return err;
+ }
+
err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
if (err)
return err;
@@ -1733,6 +1889,7 @@ reset_apc:
int mana_alloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
int err;
err = mana_create_vport(apc, ndev);
@@ -1759,6 +1916,12 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_filter(apc);
+ if (err)
+ goto destroy_vport;
+ }
+
mana_chn_setxdp(apc, mana_xdp_get(apc));
return 0;
@@ -1803,6 +1966,7 @@ int mana_attach(struct net_device *ndev)
static int mana_dealloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_txq *txq;
int i, err;
@@ -1811,6 +1975,9 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_filter(apc);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
@@ -1893,6 +2060,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
apc->port_handle = INVALID_MANA_HANDLE;
+ apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
ndev->netdev_ops = &mana_devops;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c3c81ae3fafd..c530db76880f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -23,7 +23,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 4;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -46,6 +46,12 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_tx", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_redirect", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
@@ -53,6 +59,8 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_xdp_xmit", i);
+ p += ETH_GSTRING_LEN;
}
}
@@ -62,9 +70,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_redirect;
+ u64 xdp_xmit;
+ u64 xdp_drop;
+ u64 xdp_tx;
int q, i = 0;
if (!apc->port_is_up)
@@ -74,29 +87,37 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_tx = rx_stats->xdp_tx;
+ xdp_redirect = rx_stats->xdp_redirect;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
+ data[i++] = xdp_tx;
+ data[i++] = xdp_redirect;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
}
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 15179b9529e1..3da99b62797d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -29,12 +29,12 @@
#include "moxart_ether.h"
-static inline void moxart_desc_write(u32 data, u32 *desc)
+static inline void moxart_desc_write(u32 data, __le32 *desc)
{
*desc = cpu_to_le32(data);
}
-static inline u32 moxart_desc_read(u32 *desc)
+static inline u32 moxart_desc_read(__le32 *desc)
{
return le32_to_cpu(*desc);
}
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
@@ -510,14 +512,14 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->tx_buf_base) {
ret = -ENOMEM;
goto init_fail;
}
priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->rx_buf_base) {
ret = -ENOMEM;
goto init_fail;
@@ -533,7 +535,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &moxart_netdev_ops;
- netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
+ netif_napi_add_weight(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 41b34a509308..5d435a565d4c 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -2,16 +2,17 @@
obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o
mscc_ocelot_switch_lib-y := \
ocelot.o \
+ ocelot_devlink.o \
+ ocelot_flower.o \
ocelot_io.o \
ocelot_police.o \
- ocelot_vcap.o \
- ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o \
+ ocelot_stats.o \
+ ocelot_vcap.o \
vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_fdma.o \
- ocelot_vsc7514.o \
- ocelot_net.o
+ ocelot_net.o \
+ ocelot_vsc7514.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b1311b656e17..13b14110a060 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,13 +6,13 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
-#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
u8 mac[ETH_ALEN];
@@ -221,6 +221,35 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
REW_PORT_CFG, port);
}
+static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = NULL;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->bridge ||
+ !br_vlan_enabled(ocelot_port->bridge))
+ continue;
+
+ if (!bridge) {
+ bridge = ocelot_port->bridge;
+ continue;
+ }
+
+ if (bridge == ocelot_port->bridge)
+ continue;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -260,6 +289,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
if (!(vlan->portmask & BIT(port)))
continue;
+ /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
+ * because this is never active in hardware at the same time as
+ * the bridge VLANs, which only matter in VLAN-aware mode.
+ */
+ if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
+ continue;
+
if (vlan->untagged & BIT(port))
num_untagged++;
}
@@ -347,12 +383,45 @@ static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
}
}
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port && ocelot_port->bridge == bridge)
+ return ocelot_port->bridge_num;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
+
+static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int bridge_num;
+
+ /* Standalone ports use VID 0 */
+ if (!bridge)
+ return 0;
+
+ bridge_num = ocelot_bridge_num_find(ocelot, bridge);
+ if (WARN_ON(bridge_num < 0))
+ return 0;
+
+ /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
+ return VLAN_N_VID - bridge_num - 1;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
+ u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
@@ -466,12 +535,29 @@ static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
return 0;
}
+static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_add(ocelot, port, vid, true);
+}
+
+static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_del(ocelot, port, vid);
+}
+
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
bool vlan_aware, struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
+ int err = 0;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
@@ -483,6 +569,19 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
}
}
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
+
+ if (vlan_aware)
+ err = ocelot_del_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ else if (ocelot_port->bridge)
+ err = ocelot_add_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ if (err)
+ return err;
+
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
@@ -521,6 +620,12 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
}
}
+ if (vid > OCELOT_RSV_VLAN_RANGE_START) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN range 4000-4095 reserved for VLAN-unaware bridging");
+ return -EBUSY;
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_prepare);
@@ -530,6 +635,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
+ /* Ignore VID 0 added to our RX filter by the 8021q module, since
+ * that collides with OCELOT_STANDALONE_PVID and changes it from
+ * egress-untagged to egress-tagged.
+ */
+ if (!vid)
+ return 0;
+
err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
@@ -549,14 +661,21 @@ EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (!vid)
+ return 0;
+
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
@@ -580,11 +699,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
for (vid = 1; vid < VLAN_N_VID; vid++)
ocelot_vlant_set_mask(ocelot, vid, 0);
- /* Because VLAN filtering is enabled, we need VID 0 to get untagged
- * traffic. It is added automatically if 8021q module is loaded, but
- * we can't rely on it since module may be not loaded.
+ /* We need VID 0 to get traffic on standalone ports.
+ * It is added automatically if the 8021q module is loaded, but we
+ * can't rely on that since it might not be.
*/
- ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
+ ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -771,7 +890,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+ /* Don't attempt to send PAUSE frames on the NPI port, it's broken */
+ if (port != ocelot->npi)
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
+ tx_pause);
/* Undo the effects of ocelot_phylink_mac_link_down:
* enable MAC module
@@ -794,211 +916,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
-
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
-
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
- }
-
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
-
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
-
- ocelot_port->ptp_skbs_in_flight++;
- ocelot->ptp_skbs_in_flight++;
-
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
-
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
-
- return 0;
-}
-
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
- unsigned int ptp_class)
-{
- struct ptp_header *hdr;
- u8 msgtype, twostep;
-
- hdr = ptp_parse_header(skb, ptp_class);
- if (!hdr)
- return false;
-
- msgtype = ptp_get_msgtype(hdr, ptp_class);
- twostep = hdr->flag_field[0] & 0x2;
-
- if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
- return true;
-
- return false;
-}
-
-int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
- struct sk_buff *skb,
- struct sk_buff **clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u8 ptp_cmd = ocelot_port->ptp_cmd;
- unsigned int ptp_class;
- int err;
-
- /* Don't do anything if PTP timestamping not enabled */
- if (!ptp_cmd)
- return 0;
-
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
-
- /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
- if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- return 0;
- }
-
- /* Fall back to two-step timestamping */
- ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- }
-
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- *clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
-
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
-
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ocelot_port_txtstamp_request);
-
-static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
- struct timespec64 *ts)
-{
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- /* Read current PTP time to get seconds */
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
-
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
- ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
-
- /* Read packet HW timestamp from FIFO */
- val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
- ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
-
- /* Sec has incremented since the ts was registered */
- if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
- ts->tv_sec--;
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-}
-
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
-void ocelot_get_txtstamp(struct ocelot *ocelot)
-{
- int budget = OCELOT_PTP_QUEUE_SZ;
-
- while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct skb_shared_hwtstamps shhwtstamps;
- u32 val, id, seqid, txport;
- struct ocelot_port *port;
- struct timespec64 ts;
- unsigned long flags;
-
- val = ocelot_read(ocelot, SYS_PTP_STATUS);
-
- /* Check if a timestamp can be retrieved */
- if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
- break;
-
- WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
-
- /* Retrieve the ts ID and Tx port */
- id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
- txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
- seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
-
- /* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
- }
-
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
-
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
- /* Set the timestamp into the skb */
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_complete_tx_timestamp(skb_match, &shhwtstamps);
-
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
- }
-}
-EXPORT_SYMBOL(ocelot_get_txtstamp);
-
static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
u32 *rval)
{
@@ -1230,69 +1147,26 @@ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_drain_cpu_queue);
-int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
- int pgid = port;
-
- if (port == ocelot->npi)
- pgid = PGID_CPU;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
- return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
-int ocelot_fdb_del(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
return ocelot_mact_forget(ocelot, addr, vid);
}
EXPORT_SYMBOL(ocelot_fdb_del);
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data)
-{
- struct ocelot_dump_ctx *dump = data;
- u32 portid = NETLINK_CB(dump->cb->skb).portid;
- u32 seq = dump->cb->nlh->nlmsg_seq;
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- if (dump->idx < dump->cb->args[2])
- goto skip;
-
- nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
- sizeof(*ndm), NLM_F_MULTI);
- if (!nlh)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
-
- if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
- goto nla_put_failure;
-
- nlmsg_end(dump->skb, nlh);
-
-skip:
- dump->idx++;
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(dump->skb, nlh);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
-
/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
@@ -1406,6 +1280,12 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
is_static = (entry.type == ENTRYTYPE_LOCKED);
+ /* Hide the reserved VLANs used for
+ * VLAN-unaware bridging.
+ */
+ if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
+ entry.vid = 0;
+
err = cb(entry.mac, entry.vid, is_static, data);
if (err)
break;
@@ -1418,48 +1298,9 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_fdb_dump);
-static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
-}
-
-static void
-ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.dport.value = PTP_EV_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv6.dport.value = PTP_EV_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.dport.value = PTP_GEN_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv6.dport.value = PTP_GEN_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
-static int ocelot_trap_add(struct ocelot *ocelot, int port,
- unsigned long cookie,
- void (*populate)(struct ocelot_vcap_filter *f))
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1485,6 +1326,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
+ trap->take_ts = take_ts;
+ trap->is_trap = true;
new = true;
}
@@ -1504,8 +1347,7 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
return 0;
}
-static int ocelot_trap_del(struct ocelot *ocelot, int port,
- unsigned long cookie)
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1524,307 +1366,52 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port,
return ocelot_vcap_filter_replace(ocelot, trap);
}
-static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
-
- return ocelot_trap_add(ocelot, port, l2_cookie,
- ocelot_populate_l2_ptp_trap_key);
-}
-
-static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
-
- return ocelot_trap_del(ocelot, port, l2_cookie);
-}
-
-static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
- ocelot_populate_ipv4_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
- ocelot_populate_ipv4_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
- ocelot_populate_ipv6_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
- ocelot_populate_ipv6_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
- return err;
-}
-
-static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
- bool l2, bool l4)
-{
- int err;
-
- if (l2)
- err = ocelot_l2_ptp_trap_add(ocelot, port);
- else
- err = ocelot_l2_ptp_trap_del(ocelot, port);
- if (err)
- return err;
-
- if (l4) {
- err = ocelot_ipv4_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv4;
-
- err = ocelot_ipv6_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv6;
- } else {
- err = ocelot_ipv4_ptp_trap_del(ocelot, port);
-
- err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
- }
- if (err)
- return err;
-
- return 0;
-
-err_ipv6:
- ocelot_ipv4_ptp_trap_del(ocelot, port);
-err_ipv4:
- if (l2)
- ocelot_l2_ptp_trap_del(ocelot, port);
- return err;
-}
-
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
- sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_get);
-
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
- int err;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&ocelot->ptp_lock);
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- l2 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- l2 = true;
- l4 = true;
- break;
- default:
- mutex_unlock(&ocelot->ptp_lock);
- return -ERANGE;
- }
-
- err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
- if (err) {
- mutex_unlock(&ocelot->ptp_lock);
- return err;
- }
-
- if (l2 && l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- else if (l2)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- else
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
-
- /* Commit back the result & save it */
- memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
- mutex_unlock(&ocelot->ptp_lock);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_set);
-
-void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
-{
- int i;
-
- if (sset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ocelot->num_stats; i++)
- memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
- ETH_GSTRING_LEN);
-}
-EXPORT_SYMBOL(ocelot_get_strings);
-
-static void ocelot_update_stats(struct ocelot *ocelot)
+static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
- int i, j;
-
- mutex_lock(&ocelot->stats_lock);
-
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+ u32 mask = 0;
+ int port;
- for (j = 0; j < ocelot->num_stats; j++) {
- u32 val;
- unsigned int idx = i * ocelot->num_stats + j;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
- val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- ocelot->stats_layout[j].offset);
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (val < (ocelot->stats[idx] & U32_MAX))
- ocelot->stats[idx] += (u64)1 << 32;
+ if (!ocelot_port)
+ continue;
- ocelot->stats[idx] = (ocelot->stats[idx] &
- ~(u64)U32_MAX) + val;
- }
+ if (ocelot_port->bond == bond)
+ mask |= BIT(port);
}
- mutex_unlock(&ocelot->stats_lock);
-}
-
-static void ocelot_check_stats_work(struct work_struct *work)
-{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot,
- stats_work);
-
- ocelot_update_stats(ocelot);
-
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-}
-
-void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
-{
- int i;
-
- /* check and update now */
- ocelot_update_stats(ocelot);
-
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ return mask;
}
-EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+/* The logical port number of a LAG is equal to the lowest numbered physical
+ * port ID present in that LAG. It may change if that port ever leaves the LAG.
+ */
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
- if (sset != ETH_SS_STATS)
- return -EOPNOTSUPP;
+ int bond_mask = ocelot_get_bond_mask(ocelot, bond);
- return ocelot->num_stats;
-}
-EXPORT_SYMBOL(ocelot_get_sset_count);
-
-int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
-{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- return 0;
- }
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
- BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+ if (!bond_mask)
+ return -ENOENT;
- return 0;
+ return __ffs(bond_mask);
}
-EXPORT_SYMBOL(ocelot_get_ts_info);
+EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
-static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
+/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
+ * Note that when CPU ports are in a LAG, the user ports are assigned to the
+ * 'primary' CPU port, the one whose physical port number gives the logical
+ * port number of the LAG.
+ *
+ * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
+ * (to which no user port is assigned), but it appears that forwarding from
+ * this secondary CPU port looks at the PGID_SRC associated with the logical
+ * port ID that it's assigned to, which *is* configured properly.
+ */
+static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
+ struct ocelot_port *cpu)
{
u32 mask = 0;
int port;
@@ -1835,13 +1422,34 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
if (!ocelot_port)
continue;
- if (ocelot_port->bond == bond)
+ if (ocelot_port->dsa_8021q_cpu == cpu)
mask |= BIT(port);
}
+ if (cpu->bond)
+ mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
+
return mask;
}
+/* Returns the DSA tag_8021q CPU port that the given port is assigned to,
+ * or the bit mask of CPU ports if said CPU port is in a LAG.
+ */
+u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu;
+
+ if (!cpu_port)
+ return 0;
+
+ if (cpu_port->bond)
+ return ocelot_get_bond_mask(ocelot, cpu_port->bond);
+
+ return BIT(cpu_port->index);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
+
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
@@ -1871,28 +1479,8 @@ u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
}
EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
- u32 mask = 0;
- int port;
-
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- if (!ocelot_port)
- continue;
-
- if (ocelot_port->is_dsa_8021q_cpu)
- mask |= BIT(port);
- }
-
- return mask;
-}
-EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask);
-
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
-{
- unsigned long cpu_fwd_mask;
int port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -1904,15 +1492,6 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
if (joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
- /* If a DSA tag_8021q CPU exists, it needs to be included in the
- * regular forwarding path of the front ports regardless of whether
- * those are bridged or standalone.
- * If DSA tag_8021q is not used, this returns 0, which is fine because
- * the hardware-based CPU port module can be a destination for packets
- * even if it isn't part of PGID_SRC.
- */
- cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
-
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
@@ -1925,17 +1504,19 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
mask = 0;
} else if (ocelot_port->is_dsa_8021q_cpu) {
/* The DSA tag_8021q CPU ports need to be able to
- * forward packets to all other ports except for
- * themselves
+ * forward packets to all ports assigned to them.
*/
- mask = GENMASK(ocelot->num_phys_ports - 1, 0);
- mask &= ~cpu_fwd_mask;
+ mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot,
+ ocelot_port);
} else if (ocelot_port->bridge) {
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, port);
- mask |= cpu_fwd_mask;
mask &= ~BIT(port);
+
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
+
if (bond)
mask &= ~ocelot_get_bond_mask(ocelot, bond);
} else {
@@ -1943,7 +1524,8 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
* ports (if those exist), or to the hardware CPU port
* module otherwise.
*/
- mask = cpu_fwd_mask;
+ mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
}
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
@@ -1959,7 +1541,94 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
if (!joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
}
-EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
+
+/* Update PGID_CPU which is the destination port mask used for whitelisting
+ * unicast addresses filtered towards the host. In the normal and NPI modes,
+ * this points to the analyzer entry for the CPU port module, while in DSA
+ * tag_8021q mode, it is a bit mask of all active CPU ports.
+ * PGID_SRC will take care of forwarding a packet from one user port to
+ * no more than a single CPU port.
+ */
+static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
+{
+ int pgid_cpu = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu)
+ continue;
+
+ pgid_cpu |= BIT(port);
+ }
+
+ if (!pgid_cpu)
+ pgid_cpu = BIT(ocelot->num_phys_ports);
+
+ ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
+}
+
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
+{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
+ u16 vid;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ cpu_port->is_dsa_8021q_cpu = true;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, cpu, vid, true);
+
+ ocelot_update_pgid_cpu(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
+
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
+{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
+ u16 vid;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ cpu_port->is_dsa_8021q_cpu = false;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
+
+ ocelot_update_pgid_cpu(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
+
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
+ int cpu)
+{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
+
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = NULL;
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
@@ -2105,15 +1774,16 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr,
}
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
- if (port == ocelot->npi)
- port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -2161,15 +1831,16 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
- if (port == ocelot->npi)
- port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -2204,18 +1875,30 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
-void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge, int bridge_num,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->bridge = bridge;
+ ocelot_port->bridge_num = bridge_num;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
+
+ if (br_vlan_enabled(bridge))
+ return 0;
+
+ return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2226,7 +1909,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
mutex_lock(&ocelot->fwd_domain_lock);
+ if (!br_vlan_enabled(bridge))
+ ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
+
ocelot_port->bridge = NULL;
+ ocelot_port->bridge_num = -1;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
@@ -2335,7 +2022,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
- int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
+ int lag = ocelot_bond_get_id(ocelot, bond);
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
@@ -2350,12 +2037,117 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
}
}
+static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc,
+ unsigned long from_mask, unsigned long to_mask)
+{
+ unsigned char addr[ETH_ALEN];
+ struct ocelot_pgid *pgid;
+ u16 vid = mc->vid;
+
+ dev_dbg(ocelot->dev,
+ "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n",
+ mc->addr, mc->vid, from_mask, to_mask);
+
+ /* First clean up the current port mask from hardware, because
+ * we'll be modifying it.
+ */
+ ocelot_pgid_free(ocelot, mc->pgid);
+ ocelot_encode_ports_to_mdb(addr, mc);
+ ocelot_mact_forget(ocelot, addr, vid);
+
+ mc->ports &= ~from_mask;
+ mc->ports |= to_mask;
+
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid)) {
+ dev_err(ocelot->dev,
+ "Cannot allocate PGID for mdb %pM vid %d\n",
+ mc->addr, mc->vid);
+ devm_kfree(ocelot->dev, mc);
+ return PTR_ERR(pgid);
+ }
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
+
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
+}
+
+int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
+ unsigned long to_mask)
+{
+ struct ocelot_multicast *mc;
+ int err;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (!(mc->ports & from_mask))
+ continue;
+
+ err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs);
+
+/* Documentation for PORTID_VAL says:
+ * Logical port number for front port. If port is not a member of a LLAG,
+ * then PORTID must be set to the physical port number.
+ * If port is a member of a LLAG, then PORTID must be set to the common
+ * PORTID_VAL used for all member ports of the LLAG.
+ * The value must not exceed the number of physical ports on the device.
+ *
+ * This means we have little choice but to migrate FDB entries pointing towards
+ * a logical port when that changes.
+ */
+static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
+ struct net_device *bond,
+ int lag)
+{
+ struct ocelot_lag_fdb *fdb;
+ int err;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
+ if (fdb->bond != bond)
+ continue;
+
+ err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to delete LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+
+ err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
+ ENTRYTYPE_LOCKED);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to migrate LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+ }
+}
+
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return -EOPNOTSUPP;
+ }
mutex_lock(&ocelot->fwd_domain_lock);
@@ -2374,14 +2166,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ int old_lag_id, new_lag_id;
+
mutex_lock(&ocelot->fwd_domain_lock);
+ old_lag_id = ocelot_bond_get_id(ocelot, bond);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+ new_lag_id = ocelot_bond_get_id(ocelot, bond);
+
+ if (new_lag_id >= 0 && old_lag_id != new_lag_id)
+ ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
+
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2390,13 +2191,83 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->lag_tx_active = lag_tx_active;
/* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_change);
+int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb;
+ int lag, err;
+
+ fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
+ if (!fdb)
+ return -ENOMEM;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ ether_addr_copy(fdb->addr, addr);
+ fdb->vid = vid;
+ fdb->bond = bond;
+
+ lag = ocelot_bond_get_id(ocelot, bond);
+
+ err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
+ if (err) {
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+ return err;
+ }
+
+ list_add_tail(&fdb->list, &ocelot->lag_fdbs);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
+
+int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb, *tmp;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
+ if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
+ fdb->bond != bond)
+ continue;
+
+ ocelot_mact_forget(ocelot, addr, vid);
+ list_del(&fdb->list);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+
+ return 0;
+ }
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
+
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2490,6 +2361,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
@@ -2535,6 +2408,198 @@ void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_bridge_flags);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
+{
+ int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+
+ return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
+
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
+{
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
+
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
+{
+ int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+
+ /* Return error if DSCP prioritization isn't enabled */
+ if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
+ return -EOPNOTSUPP;
+
+ if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
+ dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
+ /* Re-read ANA_DSCP_CFG for the translated DSCP */
+ dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ }
+
+ /* If the DSCP value is not trusted, the QoS classification falls back
+ * to VLAN PCP or port-based default.
+ */
+ if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
+ return -EOPNOTSUPP;
+
+ return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
+
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int mask, val;
+
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ /* There is at least one app table priority (this one), so we need to
+ * make sure DSCP prioritization is enabled on the port.
+ * Also make sure DSCP translation is disabled
+ * (dcbnl doesn't support it).
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
+ ANA_PORT_QOS_CFG, port);
+
+ /* Trust this DSCP value and map it to the given QoS class */
+ val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
+
+ ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
+
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ int mask, i;
+
+ /* During a "dcb app replace" command, the new app table entry will be
+ * added first, then the old one will be deleted. But the hardware only
+ * supports one QoS class per DSCP value (duh), so if we blindly delete
+ * the app table entry for this DSCP value, we end up deleting the
+ * entry with the new priority. Avoid that by checking whether user
+ * space wants to delete the priority which is currently configured, or
+ * something else which is no longer current.
+ */
+ if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
+ return 0;
+
+ /* Untrust this DSCP value */
+ ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
+
+ for (i = 0; i < 64; i++) {
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
+
+ /* There are still app table entries on the port, so we need to
+ * keep DSCP enabled, nothing to do.
+ */
+ if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
+ return 0;
+ }
+
+ /* Disable DSCP QoS classification if there isn't any trusted
+ * DSCP value left.
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (m) {
+ if (m->to != to) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring already configured towards different egress port");
+ return ERR_PTR(-EBUSY);
+ }
+
+ refcount_inc(&m->refcount);
+ return m;
+ }
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ m->to = to;
+ refcount_set(&m->refcount, 1);
+ ocelot->mirror = m;
+
+ /* Program the mirror port to hardware */
+ ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
+
+ return m;
+}
+
+void ocelot_mirror_put(struct ocelot *ocelot)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (!refcount_dec_and_test(&m->refcount))
+ return;
+
+ ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
+ ocelot->mirror = NULL;
+ kfree(m);
+}
+
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
+
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, BIT(from), BIT(from),
+ ANA_EMIRRORPORTS);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
+
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
+{
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
+ }
+
+ ocelot_mirror_put(ocelot);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2629,7 +2694,7 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
@@ -2652,7 +2717,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- char queue_name[32];
int i, ret;
u32 port;
@@ -2664,33 +2728,27 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
- sizeof(u64), GFP_KERNEL);
- if (!ocelot->stats)
- return -ENOMEM;
-
- mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
+ mutex_init(&ocelot->tas_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
- snprintf(queue_name, sizeof(queue_name), "%s-stats",
- dev_name(ocelot->dev));
- ocelot->stats_queue = create_singlethread_workqueue(queue_name);
- if (!ocelot->stats_queue)
- return -ENOMEM;
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
- if (!ocelot->owq) {
- destroy_workqueue(ocelot->stats_queue);
+ if (!ocelot->owq)
return -ENOMEM;
+
+ ret = ocelot_stats_init(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->owq);
+ return ret;
}
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
INIT_LIST_HEAD(&ocelot->vlans);
+ INIT_LIST_HEAD(&ocelot->lag_fdbs);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
@@ -2796,20 +2854,14 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
- destroy_workqueue(ocelot->stats_queue);
+ ocelot_stats_deinit(ocelot);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index bf4eff6d7086..70dbd9c4e512 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -21,11 +21,12 @@
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
-#define OCELOT_VLAN_UNAWARE_PVID 0
+#define OCELOT_STANDALONE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
@@ -37,7 +38,8 @@
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
-
+ unsigned long ingress_mirred_id;
+ unsigned long egress_mirred_id;
unsigned long police_id;
};
@@ -46,17 +48,9 @@ struct ocelot_port_private {
struct net_device *dev;
struct phylink *phylink;
struct phylink_config phylink_config;
- u8 chip_port;
struct ocelot_port_tc tc;
};
-struct ocelot_dump_ctx {
- struct net_device *dev;
- struct sk_buff *skb;
- struct netlink_callback *cb;
- int idx;
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
@@ -80,8 +74,9 @@ struct ocelot_multicast {
struct ocelot_pgid *pgid;
};
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data);
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge);
+
int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type);
@@ -102,6 +97,18 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f));
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack);
+void ocelot_mirror_put(struct ocelot *ocelot);
+
+int ocelot_stats_init(struct ocelot *ocelot);
+void ocelot_stats_deinit(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index dffa597bffe6..8e3894cf5f7c 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
}
+static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
+{
+ return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+}
+
static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
{
- unsigned long timeout;
u32 safe;
- timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
- do {
- safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
- if (safe & BIT(chan))
- return 0;
- } while (time_after(jiffies, timeout));
-
- return -ETIMEDOUT;
+ return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
+ safe & BIT(chan), 0,
+ OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
}
static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
@@ -799,8 +798,8 @@ void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
return;
fdma->ndev = dev;
- netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll,
- OCELOT_FDMA_WEIGHT);
+ netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll,
+ OCELOT_FDMA_WEIGHT);
}
void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index beb9379424c0..7c0897e779dc 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -6,6 +6,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
@@ -60,6 +61,12 @@ static int ocelot_chain_to_block(int chain, bool ingress)
*/
static int ocelot_chain_to_lookup(int chain)
{
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
return (chain / VCAP_LOOKUP) % 10;
}
@@ -68,7 +75,15 @@ static int ocelot_chain_to_lookup(int chain)
*/
static int ocelot_chain_to_pag(int chain)
{
- int lookup = ocelot_chain_to_lookup(chain);
+ int lookup;
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
+ lookup = ocelot_chain_to_lookup(chain);
/* calculate PAG value as chain index relative to the first PAG */
return chain - VCAP_IS2_CHAIN(lookup, 0);
@@ -217,6 +232,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
+ const struct flow_action *action = &f->rule->action;
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
@@ -244,7 +260,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->goto_target = -1;
filter->type = OCELOT_VCAP_FILTER_DUMMY;
- flow_action_for_each(i, a, &f->rule->action) {
+ flow_action_for_each(i, a, action) {
switch (a->id) {
case FLOW_ACTION_DROP:
if (filter->block_id != VCAP_IS2) {
@@ -263,10 +279,27 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.pol_ix = OCELOT_POLICER_DISCARD;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_ACCEPT:
+ if (filter->block_id != VCAP_ES0 &&
+ filter->block_id != VCAP_IS1 &&
+ filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Accept action can only be offloaded to VCAP chains");
+ return -EOPNOTSUPP;
+ }
+ if (filter->block_id != VCAP_ES0 &&
+ filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
case FLOW_ACTION_TRAP:
- if (filter->block_id != VCAP_IS2) {
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Trap action can only be offloaded to VCAP IS2");
+ "Trap action can only be offloaded to VCAP IS2 lookup 0");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -279,6 +312,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->is_trap = true;
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
@@ -296,11 +330,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- if (a->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ err = ocelot_policer_validate(action, a, extack);
+ if (err)
+ return err;
+
filter->action.police_ena = true;
pol_ix = a->hw_index + ocelot->vcap_pol.base;
@@ -342,6 +376,27 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_MIRRED:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->egress_port.value = egress_port;
+ filter->action.mirror_ena = true;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
case FLOW_ACTION_VLAN_POP:
if (filter->block_id != VCAP_IS1) {
NL_SET_ERR_MSG_MOD(extack,
@@ -559,13 +614,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
}
- if (filter->block_id == VCAP_IS1 &&
- !is_zero_ether_addr(match.mask->dst)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Key type S1_NORMAL cannot match on destination MAC");
- return -EOPNOTSUPP;
- }
-
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -580,6 +628,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
ether_addr_copy(filter->key.etype.dmac.value,
match.key->dst);
@@ -805,13 +861,34 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
int chain = f->common.chain_index;
- int ret;
+ int block_id, ret;
if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
return -EOPNOTSUPP;
}
+ block_id = ocelot_chain_to_block(chain, ingress);
+ if (block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id],
+ f->cookie, true);
+ if (filter) {
+ /* Filter already exists on other ports */
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters");
+ return -EOPNOTSUPP;
+ }
+
+ filter->ingress_port_mask |= BIT(port);
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter didn't exist, create it now */
filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
@@ -874,6 +951,12 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_del(ocelot, filter);
+ if (ingress) {
+ filter->ingress_port_mask &= ~BIT(port);
+ if (filter->ingress_port_mask)
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 7390fa3980ec..2067382d0ee1 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -10,6 +10,19 @@
#include "ocelot.h"
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ return regmap_bulk_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ buf, count);
+}
+EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
+
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
{
u16 target = reg >> TARGET_OFFSET;
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 1fa58546abdc..3ccec488a304 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -60,7 +60,7 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
- filter->id.cookie = src_port;
+ filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -77,55 +77,46 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
return err;
}
-static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
- int prio, unsigned long cookie)
+static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
- struct ocelot_vcap_filter *filter;
- int err;
-
- filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!filter)
- return -ENOMEM;
- filter->key_type = OCELOT_VCAP_KEY_ETYPE;
- filter->prio = prio;
- filter->id.cookie = cookie;
- filter->id.tc_offload = false;
- filter->block_id = VCAP_IS2;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- filter->ingress_port_mask = BIT(port);
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
- filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- filter->action.port_mask = 0x0;
- filter->action.cpu_copy_ena = true;
- filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+}
- err = ocelot_vcap_filter_add(ocelot, filter, NULL);
- if (err)
- kfree(filter);
+static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
- return err;
+ return ocelot_trap_add(ocelot, port, cookie, false,
+ ocelot_populate_mrp_trap_key);
+}
+
+static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
- ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
@@ -186,7 +177,7 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
- return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
+ return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
@@ -196,10 +187,10 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
if (err)
return err;
- err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
- port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
- ocelot_mrp_del_vcap(ocelot, port);
+ ocelot_mrp_del_vcap(ocelot,
+ OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
@@ -211,7 +202,7 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int i;
+ int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
@@ -222,8 +213,11 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- ocelot_mrp_del_vcap(ocelot, port);
- ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 8115c3db252e..50858cc10fef 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -14,11 +14,21 @@
#include <linux/phy/phy.h>
#include <net/pkt_cls.h>
#include "ocelot.h"
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static bool ocelot_netdevice_dev_check(const struct net_device *dev);
+
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
@@ -188,7 +198,7 @@ static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return &ocelot->devlink_ports[port];
}
@@ -198,7 +208,7 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
bool ingress)
{
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
if (!ingress)
return -EOPNOTSUPP;
@@ -215,15 +225,15 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
}
}
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
- struct tc_cls_matchall_offload *f,
- bool ingress)
+static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
- struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action = &f->rule->action.entries[0];
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
- struct flow_action_entry *action;
- int port = priv->chip_port;
+ int port = priv->port.index;
int err;
if (!ingress) {
@@ -231,6 +241,119 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
return -EOPNOTSUPP;
}
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ err = ocelot_policer_validate(&f->rule->action, action, extack);
+ if (err)
+ return err;
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = action->police.burst;
+
+ err = ocelot_port_policer_add(ocelot, port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+ return err;
+ }
+
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action *action = &f->rule->action;
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port_private *other_priv;
+ const struct flow_action_entry *a;
+ int err;
+
+ if (f->common.protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
+ a = &action->entries[0];
+ if (!a->dev)
+ return -EINVAL;
+
+ if (!ocelot_netdevice_dev_check(a->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+
+ other_priv = netdev_priv(a->dev);
+
+ err = ocelot_port_mirror_add(ocelot, priv->port.index,
+ other_priv->port.index, ingress, extack);
+ if (err)
+ return err;
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = f->cookie;
+ else
+ priv->tc.egress_mirred_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+ int err;
+
+ err = ocelot_port_policer_del(ocelot, port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer");
+ return err;
+ }
+
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_mirror_del(ocelot, port, ingress);
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = 0;
+ else
+ priv->tc.egress_mirred_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action;
+
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
if (!flow_offload_has_one_action(&f->rule->action)) {
@@ -241,54 +364,41 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
- "Rate limit is not supported on shared blocks");
+ "Matchall offloads not supported on shared blocks");
return -EOPNOTSUPP;
}
action = &f->rule->action.entries[0];
- if (action->id != FLOW_ACTION_POLICE) {
+ switch (action->id) {
+ case FLOW_ACTION_POLICE:
+ return ocelot_setup_tc_cls_matchall_police(priv, f,
+ ingress,
+ extack);
+ break;
+ case FLOW_ACTION_MIRRED:
+ return ocelot_setup_tc_cls_matchall_mirred(priv, f,
+ ingress,
+ extack);
+ default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
- if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
- NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported");
- return -EEXIST;
- }
-
- if (action->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
- pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
- pol.burst = action->police.burst;
-
- err = ocelot_port_policer_add(ocelot, port, &pol);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
- return err;
- }
-
- priv->tc.police_id = f->cookie;
- priv->tc.offload_cnt++;
- return 0;
+ break;
case TC_CLSMATCHALL_DESTROY:
- if (priv->tc.police_id != f->cookie)
+ action = &f->rule->action.entries[0];
+
+ if (f->cookie == priv->tc.police_id)
+ return ocelot_del_tc_cls_matchall_police(priv, extack);
+ else if (f->cookie == priv->tc.ingress_mirred_id ||
+ f->cookie == priv->tc.egress_mirred_id)
+ return ocelot_del_tc_cls_matchall_mirred(priv, ingress,
+ extack);
+ else
return -ENOENT;
- err = ocelot_port_policer_del(ocelot, port);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer");
- return err;
- }
- priv->tc.police_id = 0;
- priv->tc.offload_cnt--;
- return 0;
+ break;
case TC_CLSMATCHALL_STATS:
default:
return -EOPNOTSUPP;
@@ -394,7 +504,7 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
int ret;
ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
@@ -412,14 +522,14 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
int ret;
/* 8021q removes VID 0 on module unload for all interfaces
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == OCELOT_VLAN_UNAWARE_PVID)
+ if (vid == OCELOT_STANDALONE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -455,7 +565,7 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
u32 rew_op = 0;
if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
@@ -559,7 +669,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.forget.vid = OCELOT_STANDALONE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -573,7 +683,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.learn.vid = OCELOT_STANDALONE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -608,9 +718,9 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID);
eth_hw_addr_set(dev, addr->sa_data);
return 0;
@@ -621,38 +731,9 @@ static void ocelot_get_stats64(struct net_device *dev,
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
-
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
-
- /* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
- stats->rx_dropped = dev->stats.rx_dropped;
-
- /* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ int port = priv->port.index;
+
+ return ocelot_port_get_stats64(ocelot, port, stats);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -662,21 +743,67 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->port.index;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->port.index;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
+}
+
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
+{
+ struct ocelot_dump_ctx *dump = data;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
}
static int ocelot_port_fdb_dump(struct sk_buff *skb,
@@ -692,7 +819,7 @@ static int ocelot_port_fdb_dump(struct sk_buff *skb,
.cb = cb,
.idx = *idx,
};
- int port = priv->chip_port;
+ int port = priv->port.index;
int ret;
ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
@@ -734,7 +861,7 @@ static int ocelot_set_features(struct net_device *dev,
netdev_features_t changed = dev->features ^ features;
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
priv->tc.offload_cnt) {
@@ -753,7 +880,7 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
/* If the attached PHY device isn't capable of timestamping operations,
* use our own (when possible).
@@ -776,7 +903,7 @@ static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu);
+ ocelot_port_set_maxlen(ocelot, priv->port.index, new_mtu);
WRITE_ONCE(dev->mtu, new_mtu);
return 0;
@@ -829,7 +956,7 @@ int ocelot_netdev_to_port(struct net_device *dev)
priv = netdev_priv(dev);
- return priv->chip_port;
+ return priv->port.index;
}
static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
@@ -837,7 +964,7 @@ static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
{
struct ocelot_port_private *priv = netdev_priv(netdev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
ocelot_get_strings(ocelot, port, sset, data);
}
@@ -848,7 +975,7 @@ static void ocelot_port_get_ethtool_stats(struct net_device *dev,
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
ocelot_get_ethtool_stats(ocelot, port, data);
}
@@ -857,7 +984,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_get_sset_count(ocelot, port, sset);
}
@@ -867,7 +994,7 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
if (!ocelot->ptp)
return ethtool_op_get_ts_info(dev, info);
@@ -919,7 +1046,7 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx,
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
int err = 0;
if (ctx && ctx != priv)
@@ -960,7 +1087,7 @@ static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack);
}
@@ -986,9 +1113,9 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_del_mdb(struct net_device *dev,
@@ -997,9 +1124,9 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_mrp_add(struct net_device *dev,
@@ -1008,7 +1135,7 @@ static int ocelot_port_obj_mrp_add(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_mrp_add(ocelot, port, mrp);
}
@@ -1019,7 +1146,7 @@ static int ocelot_port_obj_mrp_del(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_mrp_del(ocelot, port, mrp);
}
@@ -1031,7 +1158,7 @@ ocelot_port_obj_mrp_add_ring_role(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_mrp_add_ring_role(ocelot, port, mrp);
}
@@ -1043,7 +1170,7 @@ ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
@@ -1173,6 +1300,33 @@ static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
return 0;
}
+static int ocelot_bridge_num_get(struct ocelot *ocelot,
+ const struct net_device *bridge_dev)
+{
+ int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev);
+
+ if (bridge_num < 0) {
+ /* First port that offloads this bridge */
+ bridge_num = find_first_zero_bit(&ocelot->bridges,
+ ocelot->num_phys_ports);
+
+ set_bit(bridge_num, &ocelot->bridges);
+ }
+
+ return bridge_num;
+}
+
+static void ocelot_bridge_num_put(struct ocelot *ocelot,
+ const struct net_device *bridge_dev,
+ int bridge_num)
+{
+ /* Check if the bridge is still in use, otherwise it is time
+ * to clean it up so we can reuse this bridge_num later.
+ */
+ if (!ocelot_bridge_num_find(ocelot, bridge_dev))
+ clear_bit(bridge_num, &ocelot->bridges);
+}
+
static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge,
@@ -1181,13 +1335,18 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
- int err;
+ int port = priv->port.index;
+ int bridge_num, err;
+
+ bridge_num = ocelot_bridge_num_get(ocelot, bridge);
- ocelot_port_bridge_join(ocelot, port, bridge);
+ err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num,
+ extack);
+ if (err)
+ goto err_join;
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb,
false, extack);
if (err)
@@ -1201,10 +1360,12 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
err_switchdev_sync:
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
+err_join:
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return err;
}
@@ -1214,7 +1375,7 @@ static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
}
@@ -1225,7 +1386,8 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int bridge_num = ocelot_port->bridge_num;
+ int port = priv->port.index;
int err;
err = ocelot_switchdev_unsync(ocelot, port);
@@ -1233,6 +1395,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
return err;
ocelot_port_bridge_leave(ocelot, port, bridge);
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return 0;
}
@@ -1246,14 +1409,13 @@ static int ocelot_netdevice_lag_join(struct net_device *dev,
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct net_device *bridge_dev;
- int port = priv->chip_port;
+ int port = priv->port.index;
int err;
- err = ocelot_port_lag_join(ocelot, port, bond, info);
- if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(extack, "Offloading not supported");
+ err = ocelot_port_lag_join(ocelot, port, bond, info, extack);
+ if (err == -EOPNOTSUPP)
+ /* Offloading not supported, fall back to software LAG */
return 0;
- }
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
@@ -1289,7 +1451,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct net_device *bridge_dev;
- int port = priv->chip_port;
+ int port = priv->port.index;
ocelot_port_lag_leave(ocelot, port, bond);
@@ -1403,7 +1565,7 @@ ocelot_netdevice_changelowerstate(struct net_device *dev,
bool is_active = info->link_up && info->tx_enabled;
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
if (!ocelot_port->bond)
return NOTIFY_DONE;
@@ -1551,7 +1713,7 @@ static void vsc7514_phylink_mac_link_down(struct phylink_config *config,
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
OCELOT_MAC_QUIRKS);
@@ -1567,7 +1729,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
+ int port = priv->port.index;
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex,
@@ -1681,9 +1843,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
SET_NETDEV_DEV(dev, ocelot->dev);
priv = netdev_priv(dev);
priv->dev = dev;
- priv->chip_port = port;
ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
+ ocelot_port->index = port;
ocelot_port->target = target;
ocelot->ports[port] = ocelot_port;
@@ -1700,7 +1862,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 6f5068c1041a..7e1f67be38f5 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -20,7 +20,7 @@
/* Default policer order */
#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
-int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
@@ -102,26 +102,30 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
/* Check limits */
if (pir > GENMASK(15, 0)) {
- dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
- port, pir, GENMASK(15, 0));
+ dev_err(ocelot->dev,
+ "Invalid pir for policer %u: %u (max %lu)\n",
+ pol_ix, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
- dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
- port, cir, GENMASK(15, 0));
+ dev_err(ocelot->dev,
+ "Invalid cir for policer %u: %u (max %lu)\n",
+ pol_ix, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
- dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
- port, pbs, pbs_max);
+ dev_err(ocelot->dev,
+ "Invalid pbs for policer %u: %u (max %u)\n",
+ pol_ix, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
- dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
- port, cbs, cbs_max);
+ dev_err(ocelot->dev,
+ "Invalid cbs for policer %u: %u (max %u)\n",
+ pol_ix, cbs, cbs_max);
return -EINVAL;
}
@@ -154,6 +158,47 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
return 0;
}
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ if (a->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, a)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but police action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.peakrate_bytes_ps ||
+ a->police.avrate || a->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload does not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_policer_validate);
+
int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
@@ -170,7 +215,7 @@ int ocelot_port_policer_add(struct ocelot *ocelot, int port,
dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
__func__, port, pp.pir, pp.pbs);
- err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
@@ -194,7 +239,7 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port)
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index 7adb05f71999..0749f23684f2 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -8,6 +8,7 @@
#define _MSCC_OCELOT_POLICE_H_
#include "ocelot.h"
+#include <net/flow_offload.h>
enum mscc_qos_rate_mode {
MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
@@ -30,7 +31,11 @@ struct qos_policer_conf {
u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
};
-int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix,
struct qos_policer_conf *conf);
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack);
+
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 87ad2137ba06..1a82f10c8853 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -6,9 +6,13 @@
*/
#include <linux/time64.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
+#include "ocelot.h"
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
@@ -72,6 +76,10 @@ int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_settime64);
@@ -105,6 +113,9 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
} else {
/* Fall back using ocelot_ptp_settime64 which is not exact. */
struct timespec64 ts;
@@ -117,6 +128,7 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_ptp_settime64(ptp, &ts);
}
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_adjtime);
@@ -302,6 +314,483 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_EV_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_EV_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_GEN_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
+ ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
+ ocelot_populate_ipv4_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
+ ocelot_populate_ipv4_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
+ ocelot_populate_ipv6_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
+ ocelot_populate_ipv6_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+ return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ bool l2, bool l4)
+{
+ int err;
+
+ if (l2)
+ err = ocelot_l2_ptp_trap_add(ocelot, port);
+ else
+ err = ocelot_l2_ptp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ if (l4) {
+ err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv4;
+
+ err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv6;
+ } else {
+ err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+ err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+ }
+ if (err)
+ return err;
+
+ return 0;
+
+err_ipv6:
+ ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+ if (l2)
+ ocelot_l2_ptp_trap_del(ocelot, port);
+ return err;
+}
+
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
+
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool l2 = false, l4 = false;
+ struct hwtstamp_config cfg;
+ int err;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ ocelot_port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ l2 = true;
+ l4 = true;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ if (err) {
+ mutex_unlock(&ocelot->ptp_lock);
+ return err;
+ }
+
+ if (l2 && l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (l2)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ else
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
+
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
+
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
+
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+ return 0;
+}
+
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
+{
+ struct ptp_header *hdr;
+ u8 msgtype, twostep;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ twostep = hdr->flag_field[0] & 0x2;
+
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
+ return true;
+
+ return false;
+}
+
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
+ if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ return 0;
+ }
+
+ /* Fall back to two-step timestamping */
+ ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ }
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *clone = skb_clone_sk(skb);
+ if (!(*clone))
+ return -ENOMEM;
+
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_txtstamp_request);
+
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+
+ port = ocelot->ports[txport];
+
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
+
int ocelot_init_timestamp(struct ocelot *ocelot,
const struct ptp_clock_info *info)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
new file mode 100644
index 000000000000..dbd20b125cea
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Statistics for Ocelot switch family
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2022 NXP
+ */
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "ocelot.h"
+
+/* Read the counters from hardware and keep them in region->buf.
+ * Caller must hold &ocelot->stat_view_lock.
+ */
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
+{
+ struct ocelot_stats_region *region;
+ int err;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Transfer the counters from region->buf to ocelot->stats.
+ * Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock.
+ */
+static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
+{
+ unsigned int idx = port * OCELOT_NUM_STATS;
+ struct ocelot_stats_region *region;
+ int j;
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
+
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
+
+ *stat = (*stat & ~(u64)U32_MAX) + val;
+ }
+
+ idx += region->count;
+ }
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+ int port, err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err)
+ break;
+
+ spin_lock(&ocelot->stats_lock);
+ ocelot_port_transfer_stats(ocelot, port);
+ spin_unlock(&ocelot->stats_lock);
+ }
+
+ if (!err && ocelot->ops->update_stats)
+ ocelot->ops->update_stats(ocelot);
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
+
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+}
+
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
+{
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+/* Update ocelot->stats for the given port and run the given callback */
+static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv,
+ void (*cb)(struct ocelot *ocelot, int port,
+ void *priv))
+{
+ int err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n",
+ port, ERR_PTR(err));
+ goto out_unlock;
+ }
+
+ spin_lock(&ocelot->stats_lock);
+
+ ocelot_port_transfer_stats(ocelot, port);
+ cb(ocelot, port, priv);
+
+ spin_unlock(&ocelot->stats_lock);
+
+out_unlock:
+ mutex_unlock(&ocelot->stat_view_lock);
+}
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
+ int i, num_stats = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
+}
+EXPORT_SYMBOL(ocelot_get_sset_count);
+
+static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *data = priv;
+ int i;
+
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
+}
+
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
+{
+ ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb);
+}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
+
+static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_pause_stats *pause_stats = priv;
+
+ pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE];
+ pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
+}
+
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pause_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
+
+static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1526 },
+ { 1527, 65535 },
+ {},
+};
+
+static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_rmon_stats *rmon_stats = priv;
+
+ rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS];
+ rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS];
+ rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS];
+ rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS];
+
+ rmon_stats->hist[0] = s[OCELOT_STAT_RX_64];
+ rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127];
+ rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255];
+ rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511];
+ rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023];
+ rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526];
+ rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX];
+
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
+}
+
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ *ranges = ocelot_rmon_ranges;
+
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_rmon_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
+
+static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
+
+ ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
+}
+
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_ctrl_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
+
+static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_mac_stats *mac_stats = priv;
+
+ mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS];
+ mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS];
+ mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_1] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_2] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_3] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_4] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_5] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_6] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_7] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_7];
+ mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST];
+ mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST];
+ mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST];
+ mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST];
+ mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS];
+ /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
+ * counted individually.
+ */
+ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+ mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+}
+
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_mac_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
+
+static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_phy_stats *phy_stats = priv;
+
+ phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
+}
+
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_phy_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+
+ spin_lock(&ocelot->stats_lock);
+
+ /* Get Rx stats */
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
+ stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL];
+ stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] +
+ s[OCELOT_STAT_RX_RED_PRIO_1] +
+ s[OCELOT_STAT_RX_RED_PRIO_2] +
+ s[OCELOT_STAT_RX_RED_PRIO_3] +
+ s[OCELOT_STAT_RX_RED_PRIO_4] +
+ s[OCELOT_STAT_RX_RED_PRIO_5] +
+ s[OCELOT_STAT_RX_RED_PRIO_6] +
+ s[OCELOT_STAT_RX_RED_PRIO_7] +
+ s[OCELOT_STAT_DROP_LOCAL] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_7] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_0] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_1] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_2] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_3] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_4] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_5] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_6] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_7];
+
+ /* Get Tx stats */
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_port_get_stats64);
+
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (!ocelot->stats_layout[i].reg)
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->base = ocelot->stats_layout[i].reg;
+ region->count = 1;
+ list_add_tail(&region->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].reg;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int ocelot_stats_init(struct ocelot *ocelot)
+{
+ char queue_name[32];
+ int ret;
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ spin_lock_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->stat_view_lock);
+
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ return 0;
+}
+
+void ocelot_stats_deinit(struct ocelot *ocelot)
+{
+ cancel_delayed_work(&ocelot->stats_work);
+ destroy_workqueue(ocelot->stats_queue);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d3544413a8a4..73cdec5ca6a3 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -335,6 +335,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
@@ -373,7 +374,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@ -564,9 +564,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
- if (msk == 0xff && (val == 6 || val == 17)) {
+ if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) {
/* UDP/TCP protocol match */
- tcp = (val == 6 ?
+ tcp = (val == IPPROTO_TCP ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
vcap_key_l4_port_set(vcap, &data,
@@ -671,12 +671,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix / 2;
u32 type;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -812,11 +810,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -917,7 +913,7 @@ int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
if (!tmp)
return -ENOMEM;
- ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ ret = qos_policer_conf_set(ocelot, pol_ix, &pp);
if (ret) {
kfree(tmp);
return ret;
@@ -948,21 +944,28 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
if (z) {
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ return qos_policer_conf_set(ocelot, pol_ix, &pp);
}
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int
+ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *n;
+ struct ocelot_mirror *m;
int ret;
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) {
+ m = ocelot_mirror_get(ocelot, filter->egress_port.value,
+ extack);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+ }
+
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
&filter->action.pol);
@@ -970,19 +973,42 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
return ret;
}
- block->count++;
+ return 0;
+}
- if (list_empty(&block->rules)) {
- list_add(&filter->list, &block->rules);
- return 0;
- }
+static void
+ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena)
+ ocelot_vcap_policer_del(ocelot, filter->action.pol_ix);
- list_for_each_safe(pos, n, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
- if (filter->prio < tmp->prio)
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena)
+ ocelot_mirror_put(ocelot);
+}
+
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *pos = &block->rules;
+ struct ocelot_vcap_filter *tmp;
+ int ret;
+
+ ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
+ if (ret)
+ return ret;
+
+ block->count++;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (filter->prio < tmp->prio) {
+ pos = &tmp->list;
break;
+ }
}
- list_add(&filter->list, pos->prev);
+ list_add_tail(&filter->list, pos);
return 0;
}
@@ -1168,7 +1194,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack);
if (ret)
return ret;
@@ -1182,6 +1208,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@ -1195,18 +1223,12 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *q;
+ struct ocelot_vcap_filter *tmp, *n;
- list_for_each_safe(pos, q, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+ list_for_each_entry_safe(tmp, n, &block->rules, list) {
if (ocelot_vcap_filter_equal(filter, tmp)) {
- if (tmp->block_id == VCAP_IS2 &&
- tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot,
- tmp->action.pol_ix);
-
- list_del(pos);
+ ocelot_vcap_filter_del_aux_resources(ocelot, tmp);
+ list_del(&tmp->list);
kfree(tmp);
}
}
@@ -1221,7 +1243,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter del_filter;
int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1236,6 +1262,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@ -1373,22 +1401,18 @@ static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
int ocelot_vcap_init(struct ocelot *ocelot)
{
- int i;
+ struct qos_policer_conf cpu_drop = {
+ .mode = MSCC_QOS_RATE_MODE_DATA,
+ };
+ int ret, i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
* frames.
*/
- ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
- OCELOT_POLICER_DISCARD);
- ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
- OCELOT_POLICER_DISCARD);
+ ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop);
+ if (ret)
+ return ret;
for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
struct ocelot_vcap_block *block = &ocelot->block[i];
@@ -1401,6 +1425,7 @@ int ocelot_vcap_init(struct ocelot *ocelot)
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 4f4a495a60ad..6f22aea08a64 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,6 +6,7 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -25,6 +26,9 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
static const u32 *ocelot_regmap[TARGET_MAX] = {
[ANA] = vsc7514_ana_regmap,
[QS] = vsc7514_qs_regmap,
@@ -96,100 +100,8 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
};
static void ocelot_pll5_init(struct ocelot *ocelot)
@@ -227,7 +139,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
- ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -284,27 +195,43 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
static int ocelot_reset(struct ocelot *ocelot)
{
- int retries = 100;
+ int err;
u32 val;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val && --retries);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- if (!retries)
- return -ETIMEDOUT;
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- return 0;
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
}
/* Watermark encode
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9d2d3e13cacf 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
@@ -223,7 +283,6 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_MMGT_FAST, 0x0006a0),
REG(SYS_EVENTS_DIF, 0x0006a4),
REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x0006b8),
REG(SYS_PTP_TXSTAMP, 0x0006bc),
REG(SYS_PTP_NXT, 0x0006c0),
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 50ac3ee2577a..9063e2e22cd5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
int status;
unsigned i;
- if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ if (request_firmware(&fw, mgp->fw_name, dev) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
@@ -1647,10 +1647,10 @@ myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- strlcpy(info->driver, "myri10ge", sizeof(info->driver));
- strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
- strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "myri10ge", sizeof(info->driver));
+ strscpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strscpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int myri10ge_get_coalesce(struct net_device *netdev,
@@ -2692,7 +2692,7 @@ again:
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
- cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
@@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
- if (segs != NULL) {
- curr = segs;
- segs = next;
+ skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
- dev_kfree_skb_any(segs);
+ dev_kfree_skb_any(curr);
}
goto drop;
}
@@ -3588,8 +3586,8 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
goto abort;
ss->mgp = mgp;
ss->dev = mgp->dev;
- netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
- myri10ge_napi_weight);
+ netif_napi_add_weight(ss->dev, &ss->napi, myri10ge_poll,
+ myri10ge_napi_weight);
}
return 0;
abort:
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 82a22711ce45..650a5a166070 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
I. Board Compatibility
This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
-It also works with other chips in in the DP83810 series.
+It also works with other chips in the DP83810 series.
II. Board-specific settings
@@ -869,7 +869,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
np->ioaddr = ioaddr;
- netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+ netif_napi_add(dev, &np->napi, natsemi_poll);
np->dev = dev;
np->pci_dev = pdev;
@@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) readl(ee_addr)
@@ -2566,9 +2564,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 49ea130c9067..998586872599 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1351,9 +1351,9 @@ static int ns83820_set_link_ksettings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strlcpy(info->driver, "ns83820", sizeof(info->driver));
- strlcpy(info->version, VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, "ns83820", sizeof(info->driver));
+ strscpy(info->version, VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 0c0d127906dd..09a89e72f904 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,28 +32,4 @@ config S2IO
To compile this driver as a module, choose M here. The module
will be called s2io.
-config VXGE
- tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter. These were originally released from
- Neterion, which was later acquired by Exar. So, the adapters might be
- labeled as either one, depending on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/vxge.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called vxge.
-
-config VXGE_DEBUG_TRACE_ALL
- bool "Enabling All Debug trace statements in driver"
- default n
- depends on VXGE
- help
- Say Y here if you want to enabling all the debug trace statements in
- the vxge driver. By default only few debug trace statements are
- enabled.
-
endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
index 87ede8a47bb8..de98b4e6eff9 100644
--- a/drivers/net/ethernet/neterion/Makefile
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_S2IO) += s2io.o
-obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6dd451adc331..1d3c4474b7cb 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2156,7 +2156,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
/*
* In PCI 33 mode, the P_PLL is not used, and therefore,
- * the the P_PLL_LOCK bit in the adapter_status register will
+ * the P_PLL_LOCK bit in the adapter_status register will
* not be asserted.
*/
if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
@@ -3817,7 +3817,7 @@ static irqreturn_t s2io_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int s2io_test_msi(struct s2io_nic *sp)
{
struct pci_dev *pdev = sp->pdev;
@@ -5348,9 +5348,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strlcpy(info->version, s2io_driver_version, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strscpy(info->version, s2io_driver_version, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
}
/**
@@ -5492,7 +5492,7 @@ s2io_ethtool_gringparam(struct net_device *dev,
}
/**
- * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * s2io_ethtool_getpause_data -Pause frame generation and reception.
* @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
@@ -7128,9 +7128,8 @@ static int s2io_card_up(struct s2io_nic *sp)
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_fill_buff;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left);
@@ -7168,18 +7167,16 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
/* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
@@ -7199,6 +7196,20 @@ static int s2io_card_up(struct s2io_nic *sp)
}
return 0;
+
+err_out:
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_disable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_disable(&sp->napi);
+ }
+ }
+err_fill_buff:
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return ret;
}
/**
@@ -7359,10 +7370,9 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
int get_off = ring_data->rx_curr_get_info.offset;
int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
- unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- memcpy(buff, ba->ba_0, buf0_len);
+ skb_put_data(skb, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
@@ -7449,7 +7459,7 @@ aggregate:
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is is down or up. This is called by the Alarm
+ * status of the NIC is down or up. This is called by the Alarm
* interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
@@ -7732,7 +7742,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* Setting the device configuration parameters.
* Most of these parameters can be specified by the user during
* module insertion as they are module loadable parameters. If
- * these parameters are not not specified during load time, they
+ * these parameters are not specified during load time, they
* are initialized with default values.
*/
config = &sp->config;
@@ -7905,10 +7915,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
for (i = 0; i < config->rx_ring_num ; i++) {
struct ring_info *ring = &mac_control->rings[i];
- netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
+ netif_napi_add(dev, &ring->napi, s2io_poll_msix);
}
} else {
- netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta);
}
/* Not needed for Herc */
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
deleted file mode 100644
index 0820e81ca7fb..000000000000
--- a/drivers/net/ethernet/neterion/vxge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
-# Virtualized Server Adapter linux driver
-
-obj-$(CONFIG_VXGE) += vxge.o
-
-vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
deleted file mode 100644
index a3204a7ef750..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ /dev/null
@@ -1,5099 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/vmalloc.h>
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-static void
-vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
-{
- u64 val64;
-
- val64 = readq(&vp_reg->rxmac_vcfg0);
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- writeq(val64, &vp_reg->rxmac_vcfg0);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-}
-
-/*
- * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
- */
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_virtualpath *vpath;
- u64 val64, rxd_count, rxd_spat;
- int count = 0, total_count = 0;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
-
- vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-
- /* Check that the ring controller for this vpath has enough free RxDs
- * to send frames to the host. This is done by reading the
- * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
- * RXD_SPAT value for the vpath.
- */
- val64 = readq(&vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
- /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
- * leg room.
- */
- rxd_spat *= 2;
-
- do {
- mdelay(1);
-
- rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-
- /* Check that the ring controller for this vpath does
- * not have any frame in its pipeline.
- */
- val64 = readq(&vp_reg->frm_in_progress_cnt);
- if ((rxd_count <= rxd_spat) || (val64 > 0))
- count = 0;
- else
- count++;
- total_count++;
- } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
- (total_count < VXGE_HW_MAX_POLLING_COUNT));
-
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
- __func__);
-
- return total_count;
-}
-
-/* vxge_hw_device_wait_receive_idle - This function waits until all frames
- * stored in the frame buffer for each vpath assigned to the given
- * function (hldev) have been sent to the host.
- */
-void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
-{
- int i, total_count = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- break;
- }
-}
-
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return VXGE_HW_FAIL;
-}
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
-}
-
-static enum vxge_hw_status
-vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
- u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
- u64 *steer_ctrl)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- enum vxge_hw_status status;
- u64 val64;
- u32 retry = 0, max_retry = 3;
-
- spin_lock(&vpath->lock);
- if (!vpath->vp_open) {
- spin_unlock(&vpath->lock);
- max_retry = 100;
- }
-
- writeq(*data0, &vp_reg->rts_access_steer_data0);
- writeq(*data1, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- *steer_ctrl;
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- /* The __vxge_hw_device_register_poll can udelay for a significant
- * amount of time, blocking other process from the CPU. If it delays
- * for ~5secs, a NMI error can occur. A way around this is to give up
- * the processor via msleep, but this is not allowed is under lock.
- * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
- * 1sec and sleep for 10ms until the firmware operation has completed
- * or timed-out.
- */
- while ((status != VXGE_HW_OK) && retry++ < max_retry) {
- if (!vpath->vp_open)
- msleep(20);
- status = __vxge_hw_device_register_poll(
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- }
-
- if (status != VXGE_HW_OK)
- goto out;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- *data0 = readq(&vp_reg->rts_access_steer_data0);
- *data1 = readq(&vp_reg->rts_access_steer_data1);
- *steer_ctrl = val64;
- } else
- status = VXGE_HW_FAIL;
-
-out:
- if (vpath->vp_open)
- spin_unlock(&vpath->lock);
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_READ,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- return status;
-}
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- u32 ret;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
- goto exit;
- }
-
- ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
- if (ret != 1) {
- vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
- __func__, ret);
- status = VXGE_HW_FAIL;
- }
-
-exit:
- return status;
-}
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int ret_code, sec_code;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- /* send upgrade start command */
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_START,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
- __func__);
- return status;
- }
-
- /* Transfer fw image to adapter 16 bytes at a time */
- for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
- steer_ctrl = 0;
-
- /* The next 128bits of fwdata to be loaded onto the adapter */
- data0 = *((u64 *)fwdata);
- data1 = *((u64 *)fwdata + 1);
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_SEND,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
- __func__);
- goto out;
- }
-
- ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
- switch (ret_code) {
- case VXGE_HW_FW_UPGRADE_OK:
- /* All OK, send next 16 bytes. */
- break;
- case VXGE_FW_UPGRADE_BYTES2SKIP:
- /* skip bytes in the stream */
- fwdata += (data0 >> 8) & 0xFFFFFFFF;
- break;
- case VXGE_HW_FW_UPGRADE_DONE:
- goto out;
- case VXGE_HW_FW_UPGRADE_ERR:
- sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
- switch (sec_code) {
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
- printk(KERN_ERR
- "corrupted data from .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
- printk(KERN_ERR "invalid .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
- printk(KERN_ERR "buffer overflow\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
- printk(KERN_ERR "failed to flash the image\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
- printk(KERN_ERR
- "generic error. Unknown error type\n");
- break;
- default:
- printk(KERN_ERR "Unknown error of type %d\n",
- sec_code);
- break;
- }
- status = VXGE_HW_FAIL;
- goto out;
- default:
- printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
- status = VXGE_HW_FAIL;
- goto out;
- }
- /* point to next 16 bytes */
- fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
- }
-out:
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *img)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int i;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_EPROM_REV,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- break;
-
- img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
- img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
- img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
- img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_channel_free - Free memory allocated for channel
- * This function deallocates memory from the channel and various arrays
- * in the channel
- */
-static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
-{
- kfree(channel->work_arr);
- kfree(channel->free_arr);
- kfree(channel->reserve_arr);
- kfree(channel->orig_arr);
- kfree(channel);
-}
-
-/*
- * __vxge_hw_channel_initialize - Initialize a channel
- * This function initializes a channel by properly setting the
- * various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
-{
- u32 i;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = channel->vph->vpath;
-
- if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
- for (i = 0; i < channel->length; i++)
- channel->orig_arr[i] = channel->reserve_arr[i];
- }
-
- switch (channel->type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- vpath->fifoh = (struct __vxge_hw_fifo *)channel;
- channel->stats = &((struct __vxge_hw_fifo *)
- channel)->stats->common_stats;
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- vpath->ringh = (struct __vxge_hw_ring *)channel;
- channel->stats = &((struct __vxge_hw_ring *)
- channel)->stats->common_stats;
- break;
- default:
- break;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_channel_reset - Resets a channel
- * This function resets a channel by properly setting the various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
-{
- u32 i;
-
- for (i = 0; i < channel->length; i++) {
- if (channel->reserve_arr != NULL)
- channel->reserve_arr[i] = channel->orig_arr[i];
- if (channel->free_arr != NULL)
- channel->free_arr[i] = NULL;
- if (channel->work_arr != NULL)
- channel->work_arr[i] = NULL;
- }
- channel->free_ptr = channel->length;
- channel->reserve_ptr = channel->length;
- channel->reserve_top = 0;
- channel->post_index = 0;
- channel->compl_index = 0;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_pci_e_init
- * Initialize certain PCI/PCI-X configuration registers
- * with recommended values. Save config space for future hw resets.
- */
-static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
-{
- u16 cmd = 0;
-
- /* Set the PErr Repconse bit and SERR in PCI command register. */
- pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
- cmd |= 0x140;
- pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
-
- pci_save_state(hldev->pdev);
-}
-
-/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
- * in progress
- * This routine checks the vpath reset in progress register is turned zero
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
-{
- enum vxge_hw_status status;
- status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- return status;
-}
-
-/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
- * __vxge_hw_device_toc_get
- * This routine sets the swapper and reads the toc pointer and returns the
- * memory mapped address of the toc
- */
-static struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0)
-{
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc = NULL;
- enum vxge_hw_status status;
-
- struct vxge_hw_legacy_reg __iomem *legacy_reg =
- (struct vxge_hw_legacy_reg __iomem *)bar0;
-
- status = __vxge_hw_legacy_swapper_set(legacy_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&legacy_reg->toc_first_pointer);
- toc = bar0 + val64;
-exit:
- return toc;
-}
-
-/*
- * __vxge_hw_device_reg_addr_get
- * This routine sets the swapper and reads the toc pointer and initializes the
- * register location pointers in the device object. It waits until the ric is
- * completed initializing registers.
- */
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- hldev->legacy_reg = hldev->bar0;
-
- hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
- if (hldev->toc_reg == NULL) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg = hldev->bar0 + val64;
-
- val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg = hldev->bar0 + val64;
-
- for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] = hldev->bar0 + val64;
- }
-
- val64 = readq(&hldev->toc_reg->toc_kdfc);
-
- switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
- case 0:
- hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
- break;
- default:
- break;
- }
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
- * This routine returns the Access Rights of the driver
- */
-static u32
-__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
-{
- u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
-
- switch (host_type) {
- case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- if (func_id == 0) {
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- }
- break;
- case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
- case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
- case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
- break;
- case VXGE_HW_SR_VH_FUNCTION0:
- case VXGE_HW_VH_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- }
-
- return access_rights;
-}
-/*
- * __vxge_hw_device_is_privilaged
- * This routine checks if the device function is privilaged or not
- */
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
-{
- if (__vxge_hw_device_access_rights_get(host_type,
- func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
- return VXGE_HW_OK;
- else
- return VXGE_HW_ERR_PRIVILEGED_OPERATION;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_device_host_info_get
- * This routine returns the host type assignments
- */
-static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
-
- val64 = readq(&hldev->common_reg->host_type_assignments);
-
- hldev->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- hldev->func_id =
- __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
-
- hldev->access_rights = __vxge_hw_device_access_rights_get(
- hldev->host_type, hldev->func_id);
-
- hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
- hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
-
- hldev->first_vp_id = i;
- break;
- }
-}
-
-/*
- * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
- * link width and signalling rate.
- */
-static enum vxge_hw_status
-__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
-
- if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
- return VXGE_HW_ERR_INVALID_PCI_INFO;
-
- switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
- case PCIE_LNK_WIDTH_RESRV:
- case PCIE_LNK_X1:
- case PCIE_LNK_X2:
- case PCIE_LNK_X4:
- case PCIE_LNK_X8:
- break;
- default:
- return VXGE_HW_ERR_INVALID_PCI_INFO;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
- */
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id)) {
- /* Validate the pci-e link width and speed */
- status = __vxge_hw_verify_pci_e_info(hldev);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- fw_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
- fw_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
- fw_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- fw_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- fw_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
- flash_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
- flash_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
- flash_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
- flash_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- __be64 *serial_number = (void *)hw_info->serial_number;
- __be64 *product_desc = (void *)hw_info->product_desc;
- __be64 *part_number = (void *)hw_info->part_number;
- enum vxge_hw_status status;
- u64 data0, data1 = 0, steer_ctrl = 0;
- u32 i, j = 0;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- serial_number[0] = cpu_to_be64(data0);
- serial_number[1] = cpu_to_be64(data1);
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- part_number[0] = cpu_to_be64(data0);
- part_number[1] = cpu_to_be64(data1);
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
- data0 = i;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- product_desc[j++] = cpu_to_be64(data0);
- product_desc[j++] = cpu_to_be64(data1);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- data0 = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_FUNC_MODE,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
- u8 *macaddr, u8 *macaddr_mask)
-{
- u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
- int i;
-
- do {
- status = vxge_hw_vpath_fw_api(vpath, action,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data1);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i - 1] = (u8) (data0 & 0xFF);
- data0 >>= 8;
-
- macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
- data1 >>= 8;
- }
-
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
- data0 = 0, data1 = 0, steer_ctrl = 0;
-
- } while (!is_valid_ether_addr(macaddr));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_hw_info_get - Get the hw information
- * @bar0: the bar
- * @hw_info: the hw_info struct
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver, FW version information, and the first mac address for
- * each vpath
- */
-enum vxge_hw_status
-vxge_hw_device_hw_info_get(void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i;
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status;
- struct __vxge_hw_virtualpath vpath;
-
- memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
-
- toc = __vxge_hw_device_toc_get(bar0);
- if (toc == NULL) {
- status = VXGE_HW_ERR_CRITICAL;
- goto exit;
- }
-
- val64 = readq(&toc->toc_common_pointer);
- common_reg = bar0 + val64;
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&common_reg->vpath_rst_in_prog);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
-
- val64 = readq(&common_reg->host_type_assignments);
-
- hw_info->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpmgmt_pointer[i]);
-
- vpmgmt_reg = bar0 + val64;
-
- hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
- if (__vxge_hw_device_access_rights_get(hw_info->host_type,
- hw_info->func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
-
- val64 = readq(&toc->toc_mrpcim_pointer);
-
- mrpcim_reg = bar0 + val64;
-
- writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
- wmb();
- }
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
-
- spin_lock_init(&vpath.lock);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- break;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_addr_get(&vpath,
- hw_info->mac_addrs[i],
- hw_info->mac_addr_masks[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
-
- if (!blockpool)
- return;
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
- dma_unmap_single(&hldev->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- }
-
- return;
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
- VXGE_HW_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
-{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
- enum vxge_hw_status status;
-
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
-
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
-
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
-
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
-
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_device_initialize - Initialize Titan device.
- * Initialize Titan device. Note that all the arguments of this public API
- * are 'IN', including @hldev. Driver cooperates with
- * OS to find new Titan device, locate its PCI and memory spaces.
- *
- * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
- * to enable the latter to perform Titan hardware initialization.
- */
-enum vxge_hw_status
-vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config)
-{
- u32 i;
- u32 nblocks = 0;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_config_check(device_config);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hldev = vzalloc(sizeof(struct __vxge_hw_device));
- if (hldev == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- hldev->magic = VXGE_HW_DEVICE_MAGIC;
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
-
- /* apply config */
- memcpy(&hldev->config, device_config,
- sizeof(struct vxge_hw_device_config));
-
- hldev->bar0 = attr->bar0;
- hldev->pdev = attr->pdev;
-
- hldev->uld_callbacks = attr->uld_callbacks;
-
- __vxge_hw_device_pci_e_init(hldev);
-
- status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK) {
- vfree(hldev);
- goto exit;
- }
-
- __vxge_hw_device_host_info_get(hldev);
-
- /* Incrementing for stats blocks */
- nblocks++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- if (device_config->vp_config[i].ring.enable ==
- VXGE_HW_RING_ENABLE)
- nblocks += device_config->vp_config[i].ring.ring_blocks;
-
- if (device_config->vp_config[i].fifo.enable ==
- VXGE_HW_FIFO_ENABLE)
- nblocks += device_config->vp_config[i].fifo.fifo_blocks;
- nblocks++;
- }
-
- if (__vxge_hw_blockpool_create(hldev,
- &hldev->block_pool,
- device_config->dma_blockpool_initial + nblocks,
- device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
-
- vxge_hw_device_terminate(hldev);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_device_initialize(hldev);
- if (status != VXGE_HW_OK) {
- vxge_hw_device_terminate(hldev);
- goto exit;
- }
-
- *devh = hldev;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_terminate - Terminate Titan device.
- * Terminate HW device.
- */
-void
-vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
-{
- vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
-
- hldev->magic = VXGE_HW_DEVICE_DEAD;
- __vxge_hw_blockpool_destroy(&hldev->block_pool);
- vfree(hldev);
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_stats_get - Get the device hw statistics.
- * Returns the vpath h/w stats for the device.
- */
-enum vxge_hw_status
-vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_hw_info *hw_stats)
-{
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
- (hldev->virtual_paths[i].vp_open ==
- VXGE_HW_VP_NOT_OPEN))
- continue;
-
- memcpy(hldev->virtual_paths[i].hw_stats_sav,
- hldev->virtual_paths[i].hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(
- &hldev->virtual_paths[i],
- hldev->virtual_paths[i].hw_stats);
- }
-
- memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_hw_info));
-
- return status;
-}
-
-/*
- * vxge_hw_driver_stats_get - Get the device sw statistics.
- * Returns the vpath s/w stats for the device.
- */
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_sw_info *sw_stats)
-{
- memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_sw_info));
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
- * and offset and perform an operation
- * Get the statistics from the given location and offset.
- */
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
- u32 operation, u32 location, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
- VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &hldev->mrpcim_reg->xmac_stats_sys_cmd,
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
- hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
- else
- *stat = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
- * Get the Statistics on aggregate port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)aggr_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (104 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
- * Get the Statistics on port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = 0x0;
- val64 = (u64 *) port_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (608 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
- * Get the XMAC Statistics
- */
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_xmac_stats *xmac_stats)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 i;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 0, &xmac_stats->aggr_stats[0]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 1, &xmac_stats->aggr_stats[1]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- status = vxge_hw_device_xmac_port_stats_get(hldev,
- i, &xmac_stats->port_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_tx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_rx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_debug_set - Set the debug module, level and timestamp
- * This routine is used to dynamically change the debug output
- */
-void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
- enum vxge_debug_level level, u32 mask)
-{
- if (hldev == NULL)
- return;
-
-#if defined(VXGE_DEBUG_TRACE_MASK) || \
- defined(VXGE_DEBUG_ERR_MASK)
- hldev->debug_module_mask = mask;
- hldev->debug_level = level;
-#endif
-
-#if defined(VXGE_DEBUG_ERR_MASK)
- hldev->level_err = level & VXGE_ERR;
-#endif
-
-#if defined(VXGE_DEBUG_TRACE_MASK)
- hldev->level_trace = level & VXGE_TRACE;
-#endif
-}
-
-/*
- * vxge_hw_device_error_level_get - Get the error level
- * This routine returns the current error level set
- */
-u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return VXGE_ERR;
- else
- return hldev->level_err;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_device_trace_level_get - Get the trace level
- * This routine returns the current trace level set
- */
-u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK)
- if (hldev == NULL)
- return VXGE_TRACE;
- else
- return hldev->level_trace;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_getpause_data -Pause frame frame generation and reception.
- * Returns the Pause frame generation and reception capability of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 *tx, u32 *rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- goto exit;
- }
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
- *tx = 1;
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
- *rx = 1;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_setpause_data - set/reset pause frame generation.
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 tx, u32 rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (tx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- if (rx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
-
- writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
-exit:
- return status;
-}
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
- return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx - Return the memblock index
- * This function returns the index of memory block
- */
-static inline u32
-__vxge_hw_ring_block_memblock_idx(u8 *block)
-{
- return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
- * This function sets index to a memory block
- */
-static inline void
-__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
-{
- *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
-}
-
-/*
- * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
- * in RxD block
- * Sets the next block pointer in RxD block
- */
-static inline void
-__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
-{
- *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
-}
-
-/*
- * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
- * first block
- * Returns the dma address of the first RxD block
- */
-static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
-{
- struct vxge_hw_mempool_dma *dma_object;
-
- dma_object = ring->mempool->memblocks_dma_arr;
- vxge_assert(dma_object != NULL);
-
- return dma_object->addr;
-}
-
-/*
- * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
- * This function returns the dma address of a given item
- */
-static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
- void *item)
-{
- u32 memblock_idx;
- void *memblock;
- struct vxge_hw_mempool_dma *memblock_dma_object;
- ptrdiff_t dma_item_offset;
-
- /* get owner memblock index */
- memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
-
- /* get owner memblock by memblock index */
- memblock = mempoolh->memblocks_arr[memblock_idx];
-
- /* get memblock DMA object by memblock index */
- memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
-
- /* calculate offset in the memblock of this item */
- dma_item_offset = (u8 *)item - (u8 *)memblock;
-
- return memblock_dma_object->addr + dma_item_offset;
-}
-
-/*
- * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
- * This function returns the dma address of a given item
- */
-static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
- struct __vxge_hw_ring *ring, u32 from,
- u32 to)
-{
- u8 *to_item , *from_item;
- dma_addr_t to_dma;
-
- /* get "from" RxD block */
- from_item = mempoolh->items_arr[from];
- vxge_assert(from_item);
-
- /* get "to" RxD block */
- to_item = mempoolh->items_arr[to];
- vxge_assert(to_item);
-
- /* return address of the beginning of previous RxD block */
- to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
-
- /* set next pointer for this RxD block to point on
- * previous item's DMA start address */
- __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
-}
-
-/*
- * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
- * block callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for RxD block
- */
-static void
-__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 i;
- void *item = mempoolh->items_arr[index];
- struct __vxge_hw_ring *ring =
- (struct __vxge_hw_ring *)mempoolh->userdata;
-
- /* format rxds array */
- for (i = 0; i < ring->rxds_per_block; i++) {
- void *rxdblock_priv;
- void *uld_priv;
- struct vxge_hw_ring_rxd_1 *rxdp;
-
- u32 reserve_index = ring->channel.reserve_ptr -
- (index * ring->rxds_per_block + i + 1);
- u32 memblock_item_idx;
-
- ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
- i * ring->rxd_size;
-
- /* Note: memblock_item_idx is index of the item within
- * the memblock. For instance, in case of three RxD-blocks
- * per memblock this value can be 0, 1 or 2. */
- rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
- memblock_index, item,
- &memblock_item_idx);
-
- rxdp = ring->channel.reserve_arr[reserve_index];
-
- uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
-
- /* pre-format Host_Control */
- rxdp->host_control = (u64)(size_t)uld_priv;
- }
-
- __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
-
- if (is_last) {
- /* link last one with first one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
- }
-
- if (index > 0) {
- /* link this RxD block with previous one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
- }
-}
-
-/*
- * __vxge_hw_ring_replenish - Initial replenish of RxDs
- * This function replenishes the RxDs from reserve array to work array
- */
-static enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
-{
- void *rxd;
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &ring->channel;
-
- while (vxge_hw_channel_dtr_count(channel) > 0) {
-
- status = vxge_hw_ring_rxd_reserve(ring, &rxd);
-
- vxge_assert(status == VXGE_HW_OK);
-
- if (ring->rxd_init) {
- status = ring->rxd_init(rxd, channel->userdata);
- if (status != VXGE_HW_OK) {
- vxge_hw_ring_rxd_free(ring, rxd);
- goto exit;
- }
- }
-
- vxge_hw_ring_rxd_post(ring, rxd);
- }
- status = VXGE_HW_OK;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-static struct __vxge_hw_channel *
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space,
- void *userdata)
-{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
-
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
-
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
- }
-
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
-
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
-
- channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
-
- channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
-
- channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
-
- channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
-
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
-
-exit0:
- return NULL;
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- goto exit;
- }
-
- dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- goto exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- blockpool->req_out--;
-
-exit:
- return;
-}
-
-static inline void
-vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
-{
- void *vaddr;
-
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- (blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (!memblock)
- goto exit;
-
- dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
- size, DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- memblock = NULL;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static void
-__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- dma_unmap_single(&(blockpool->hldev)->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- (blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- * __vxge_hw_blockpool_malloc
- */
-static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
- DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * vxge_hw_mempool_destroy
- */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
-{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
-
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
-
- dma_object = mempool->memblocks_dma_arr + i;
-
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
-
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
-
- vfree(mempool->memblocks_priv_arr[i]);
-
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
- }
-
- vfree(mempool->items_arr);
- vfree(mempool->memblocks_dma_arr);
- vfree(mempool->memblocks_priv_arr);
- vfree(mempool->memblocks_arr);
- vfree(mempool);
-}
-
-/*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
- */
-static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
- u32 *num_allocated)
-{
- u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
- u32 n_items = mempool->items_per_memblock;
- u32 start_block_idx = mempool->memblocks_allocated;
- u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- *num_allocated = 0;
-
- if (end_block_idx > mempool->memblocks_max) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- for (i = start_block_idx; i < end_block_idx; i++) {
- u32 j;
- u32 is_last = ((end_block_idx - 1) == i);
- struct vxge_hw_mempool_dma *dma_object =
- mempool->memblocks_dma_arr + i;
- void *the_memblock;
-
- /* allocate memblock's private part. Each DMA memblock
- * has a space allocated for item's private usage upon
- * mempool's user request. Each time mempool grows, it will
- * allocate new memblock and its private part at once.
- * This helps to minimize memory usage a lot. */
- mempool->memblocks_priv_arr[i] =
- vzalloc(array_size(mempool->items_priv_size, n_items));
- if (mempool->memblocks_priv_arr[i] == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- /* allocate DMA-capable memblock */
- mempool->memblocks_arr[i] =
- __vxge_hw_blockpool_malloc(mempool->devh,
- mempool->memblock_size, dma_object);
- if (mempool->memblocks_arr[i] == NULL) {
- vfree(mempool->memblocks_priv_arr[i]);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- (*num_allocated)++;
- mempool->memblocks_allocated++;
-
- memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
-
- the_memblock = mempool->memblocks_arr[i];
-
- /* fill the items hash array */
- for (j = 0; j < n_items; j++) {
- u32 index = i * n_items + j;
-
- if (first_time && index >= mempool->items_initial)
- break;
-
- mempool->items_arr[index] =
- ((char *)the_memblock + j*mempool->item_size);
-
- /* let caller to do more job on each item */
- if (mempool->item_func_alloc != NULL)
- mempool->item_func_alloc(mempool, i,
- dma_object, index, is_last);
-
- mempool->items_current = index + 1;
- }
-
- if (first_time && mempool->items_current ==
- mempool->items_initial)
- break;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_mempool_create
- * This function will create memory pool object. Pool may grow but will
- * never shrink. Pool consists of number of dynamically allocated blocks
- * with size enough to hold %items_initial number of items. Memory is
- * DMA-able but client must map/unmap before interoperating with the device.
- */
-static struct vxge_hw_mempool *
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- const struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 memblocks_to_allocate;
- struct vxge_hw_mempool *mempool = NULL;
- u32 allocated;
-
- if (memblock_size < item_size) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- mempool = vzalloc(sizeof(struct vxge_hw_mempool));
- if (mempool == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- mempool->devh = devh;
- mempool->memblock_size = memblock_size;
- mempool->items_max = items_max;
- mempool->items_initial = items_initial;
- mempool->item_size = item_size;
- mempool->items_priv_size = items_priv_size;
- mempool->item_func_alloc = mp_callback->item_func_alloc;
- mempool->userdata = userdata;
-
- mempool->memblocks_allocated = 0;
-
- mempool->items_per_memblock = memblock_size / item_size;
-
- mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* allocate array of memblocks */
- mempool->memblocks_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of private parts of items per memblocks */
- mempool->memblocks_priv_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_priv_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr =
- vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma),
- mempool->memblocks_max));
- if (mempool->memblocks_dma_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate hash array of items */
- mempool->items_arr = vzalloc(array_size(sizeof(void *),
- mempool->items_max));
- if (mempool->items_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* calculate initial number of memblocks */
- memblocks_to_allocate = (mempool->items_initial +
- mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* pre-allocate the mempool */
- status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
- &allocated);
- if (status != VXGE_HW_OK) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
-exit:
- return mempool;
-}
-
-/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
- */
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
-{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
-
- if (rxdh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(channel);
-
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
-
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- __vxge_hw_ring_abort(ring);
-
- status = __vxge_hw_channel_reset(channel);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
- */
-static enum vxge_hw_status
-__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
-
- __vxge_hw_ring_abort(ring);
-
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
-
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
- static const struct vxge_hw_mempool_cbs ring_mp_callback = {
- .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
- };
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
-
- config = &hldev->config.vp_config[vp_id].ring;
-
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
- if (ring == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
- ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
- ring->rxds_limit = config->rxds_limit;
-
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
-
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
-
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
-
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
- }
-
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_config_default_get - Initialize device config with defaults.
- * Initialize Titan device config with default values.
- */
-enum vxge_hw_status
-vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
-{
- u32 i;
-
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
- device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
- device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
- device_config->rth_en = VXGE_HW_RTH_DEFAULT;
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
- device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
- device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
-
- device_config->vp_config[i].min_bandwidth =
- VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
-
- device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- VXGE_HW_MIN_FIFO_BLOCKS;
-
- device_config->vp_config[i].fifo.max_frags =
- VXGE_HW_MAX_FIFO_FRAGS;
-
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
-
- device_config->vp_config[i].fifo.alignment_size =
- VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
-
- device_config->vp_config[i].fifo.no_snoop_bits =
- VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].tti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].rti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].mtu =
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
-#ifndef __BIG_ENDIAN
- u64 val64;
-
- val64 = readq(&vpath_reg->vpath_general_cfg1);
- wmb();
- val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
- writeq(val64, &vpath_reg->vpath_general_cfg1);
- wmb();
-#endif
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
-
- val64 = readq(&legacy_reg->pifm_wr_swap_en);
-
- if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
- val64 = readq(&vpath_reg->kdfcctl_cfg0);
- wmb();
-
- val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
-
- writeq(val64, &vpath_reg->kdfcctl_cfg0);
- wmb();
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mgmt_reg_read - Read Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 *value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->srpcim_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
- */
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
-{
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- int i = 0, j = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- vpmgmt_reg = hldev->vpmgmt_reg[i];
- for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
- if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
- & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
- return VXGE_HW_FAIL;
- }
- }
- return VXGE_HW_OK;
-}
-/*
- * vxge_hw_mgmt_reg_Write - Write Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
- offset);
-
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
- * list callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for TxD list
- */
-static void
-__vxge_hw_fifo_mempool_item_alloc(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 memblock_item_idx;
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
- struct __vxge_hw_fifo *fifo =
- (struct __vxge_hw_fifo *)mempoolh->userdata;
- void *memblock = mempoolh->memblocks_arr[memblock_index];
-
- vxge_assert(txdp);
-
- txdp->host_control = (u64) (size_t)
- __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
- &memblock_item_idx);
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- vxge_assert(txdl_priv);
-
- fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
-
- /* pre-format HW's TxDL's private */
- txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
- txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
- txdl_priv->dma_handle = dma_object->handle;
- txdl_priv->memblock = memblock;
- txdl_priv->first_txdp = txdp;
- txdl_priv->next_txdl_priv = NULL;
- txdl_priv->alloc_frags = 0;
-}
-
-/*
- * __vxge_hw_fifo_create - Create a FIFO
- * This function creates FIFO and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_fifo_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_fifo *fifo;
- struct vxge_hw_fifo_config *config;
- u32 txdl_size, txdl_per_memblock;
- struct vxge_hw_mempool_cbs fifo_mp_callback;
- struct __vxge_hw_virtualpath *vpath;
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
- config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
-
- txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
-
- txdl_per_memblock = config->memblock_size / txdl_size;
-
- fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_FIFO,
- config->fifo_blocks * txdl_per_memblock,
- attr->per_txdl_space, attr->userdata);
-
- if (fifo == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vpath->fifoh = fifo;
- fifo->nofl_db = vpath->nofl_db;
-
- fifo->vp_id = vpath->vp_id;
- fifo->vp_reg = vpath->vp_reg;
- fifo->stats = &vpath->sw_stats->fifo_stats;
-
- fifo->config = config;
-
- /* apply "interrupts per txdl" attribute */
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
- fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
- fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
-
- if (fifo->config->intr)
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
-
- fifo->no_snoop_bits = config->no_snoop_bits;
-
- /*
- * FIFO memory management strategy:
- *
- * TxDL split into three independent parts:
- * - set of TxD's
- * - TxD HW private part
- * - driver private part
- *
- * Adaptative memory allocation used. i.e. Memory allocated on
- * demand with the size which will fit into one memory block.
- * One memory block may contain more than one TxDL.
- *
- * During "reserve" operations more memory can be allocated on demand
- * for example due to FIFO full condition.
- *
- * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
- * routine which will essentially stop the channel and free resources.
- */
-
- /* TxDL common private size == TxDL private + driver private */
- fifo->priv_size =
- sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
- fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- fifo->per_txdl_space = attr->per_txdl_space;
-
- /* recompute txdl size to be cacheline aligned */
- fifo->txdl_size = txdl_size;
- fifo->txdl_per_memblock = txdl_per_memblock;
-
- fifo->txdl_term = attr->txdl_term;
- fifo->callback = attr->callback;
-
- if (fifo->txdl_per_memblock == 0) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
- goto exit;
- }
-
- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
- fifo->mempool =
- __vxge_hw_mempool_create(vpath->hldev,
- fifo->config->memblock_size,
- fifo->txdl_size,
- fifo->priv_size,
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- &fifo_mp_callback,
- fifo);
-
- if (fifo->mempool == NULL) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_channel_initialize(&fifo->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_fifo_delete(vp);
- goto exit;
- }
-
- vxge_assert(fifo->channel.reserve_ptr);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- * in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0, u32 offset, u32 *val)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
- if (phy_func_0)
- val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
- writeq(val64, &vp_reg->pci_config_access_cfg1);
- wmb();
- writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
- &vp_reg->pci_config_access_cfg2);
- wmb();
-
- status = __vxge_hw_device_register_poll(
- &vp_reg->pci_config_access_cfg2,
- VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->pci_config_access_status);
-
- if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
- status = VXGE_HW_FAIL;
- *val = 0;
- } else
- *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_flick_link_led - Flick (blink) link LED.
- * @hldev: HW device.
- * @on_off: TRUE if flickering to be on, FALSE to be off
- *
- * Flicker the link LED.
- */
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
-{
- struct __vxge_hw_virtualpath *vpath;
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (hldev == NULL) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- data0 = on_off;
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset,
- u64 *data0, u64 *data1)
-{
- enum vxge_hw_status status;
- u64 steer_ctrl = 0;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
- }
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- data0, data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
- (rts_table !=
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- *data1 = 0;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
- u32 rts_table, u32 offset, u64 steer_data0,
- u64 steer_data1)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- data0 = steer_data0;
-
- if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- data1 = steer_data1;
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vp,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size)
-{
- u64 data0, data1;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, &data0, &data1);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
-
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
-
- if (hash_type->hash_type_tcpipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
-
- if (hash_type->hash_type_ipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
-
- if (hash_type->hash_type_tcpipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
-
- if (hash_type->hash_type_ipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
-
- if (hash_type->hash_type_tcpipv6ex_en)
- data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
-
- if (hash_type->hash_type_ipv6ex_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
-
- if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
- data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
- else
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, data0, 0);
-exit:
- return status;
-}
-
-static void
-vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
- u16 flag, u8 *itable)
-{
- switch (flag) {
- case 1:
- *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 2:
- *data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 3:
- *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 4:
- *data1 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- return;
- default:
- return;
- }
-}
-/*
- * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size)
-{
- u32 i, j, action, rts_table;
- u64 data0;
- u64 data1;
- u32 max_entries;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- max_entries = (((u32)1) << itable_size);
-
- if (vp->vpath->hldev->config.rth_it_type
- == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
- action, rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[mtable[itable[j]]], action,
- rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- } else {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
- for (i = 0; i < vpath_count; i++) {
-
- for (j = 0; j < max_entries;) {
-
- data0 = 0;
- data1 = 0;
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 1, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 2, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 3, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 4, itable);
- j++;
- break;
- }
-
- if (data0 != 0) {
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[i],
- action, rts_table,
- 0, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- }
- }
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_check_leak - Check for memory leak
- * @ring: Handle to the ring object used for receive
- *
- * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
- * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
- * Returns: VXGE_HW_FAIL, if leak has occurred.
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 rxd_new_count, rxd_spat;
-
- if (ring == NULL)
- return status;
-
- rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
- rxd_spat = readq(&ring->vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
-
- if (rxd_new_count >= rxd_spat)
- status = VXGE_HW_FAIL;
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mgmt_read
- * This routine reads the vpath_mgmt registers
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mgmt_read(
- struct __vxge_hw_device *hldev,
- struct __vxge_hw_virtualpath *vpath)
-{
- u32 i, mtu = 0, max_pyld = 0;
- u64 val64;
-
- for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- val64 = readq(&vpath->vpmgmt_reg->
- rxmac_cfg0_port_vpmgmt_clone[i]);
- max_pyld =
- (u32)
- VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
- (val64);
- if (mtu < max_pyld)
- mtu = max_pyld;
- }
-
- vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (val64 & vxge_mBIT(i))
- vpath->vsport_number = i;
- }
-
- val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
-
- if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
- else
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
- * This routine checks the vpath_rst_in_prog register to see if
- * adapter completed the reset process for the vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
-{
- enum vxge_hw_status status;
-
- status = __vxge_hw_device_register_poll(
- &vpath->hldev->common_reg->vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
- 1 << (16 - vpath->vp_id)),
- vpath->hldev->config.device_poll_millis);
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_reset
- * This routine resets the vpath on the device
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg0);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_sw_reset
- * This routine resets the vpath structures
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->ringh) {
- status = __vxge_hw_ring_reset(vpath->ringh);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- if (vpath->fifoh)
- status = __vxge_hw_fifo_reset(vpath->fifoh);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_prc_configure
- * This routine configures the prc registers of virtual path using the config
- * passed
- */
-static void
-__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
- return;
-
- val64 = readq(&vp_reg->prc_cfg1);
- val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
- writeq(val64, &vp_reg->prc_cfg1);
-
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
- writeq(val64, &vpath->vp_reg->prc_cfg6);
-
- val64 = readq(&vp_reg->prc_cfg7);
-
- if (vpath->vp_config->ring.scatter_mode !=
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
-
- val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
-
- switch (vpath->vp_config->ring.scatter_mode) {
- case VXGE_HW_RING_SCATTER_MODE_A:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
- break;
- case VXGE_HW_RING_SCATTER_MODE_B:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
- break;
- case VXGE_HW_RING_SCATTER_MODE_C:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
- break;
- }
- }
-
- writeq(val64, &vp_reg->prc_cfg7);
-
- writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
- __vxge_hw_ring_first_block_address_get(
- vpath->ringh) >> 3), &vp_reg->prc_cfg5);
-
- val64 = readq(&vp_reg->prc_cfg4);
- val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
- val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
-
- val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
- VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
-
- if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
- val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
- else
- val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
-
- writeq(val64, &vp_reg->prc_cfg4);
-}
-
-/*
- * __vxge_hw_vpath_kdfc_configure
- * This routine configures the kdfc registers of virtual path using the
- * config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u64 vpath_stride;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
-
- vpath->max_kdfc_db =
- (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
- val64+1)/2;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- vpath->max_nofl_db = vpath->max_kdfc_db;
-
- if (vpath->max_nofl_db <
- ((vpath->vp_config->fifo.memblock_size /
- (vpath->vp_config->fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd))) *
- vpath->vp_config->fifo.fifo_blocks)) {
-
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
- }
- val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
- (vpath->max_nofl_db*2)-1);
- }
-
- writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
-
- writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
- &vp_reg->kdfc_fifo_trpl_ctrl);
-
- val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
-
- val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
-
- val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
-#ifndef __BIG_ENDIAN
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
-#endif
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
-
- writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
- writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
- wmb();
- vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
-
- vpath->nofl_db =
- (struct __vxge_hw_non_offload_db_wrapper __iomem *)
- (hldev->kdfc + (vp_id *
- VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
- vpath_stride)));
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mac_configure
- * This routine configures the mac of virtual path using the config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
- vpath->vsport_number), &vp_reg->xmac_vsport_choice);
-
- if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->xmac_rpa_vcfg);
-
- if (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
- if (vp_config->rpa_strip_vlan_tag)
- val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- else
- val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- }
-
- writeq(val64, &vp_reg->xmac_rpa_vcfg);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-
- if (vp_config->mtu !=
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- if ((vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE);
- else
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vpath->max_mtu);
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg0);
-
- val64 = readq(&vp_reg->rxmac_vcfg1);
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
-
- if (hldev->config.rth_it_type ==
- VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
- val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
- 0x2) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg1);
- }
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_tim_configure
- * This routine configures the tim registers of virtual path using the config
- * passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- writeq(0, &vp_reg->tim_dest_addr);
- writeq(0, &vp_reg->tim_vpath_map);
- writeq(0, &vp_reg->tim_bitmap);
- writeq(0, &vp_reg->tim_remap);
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE)
- writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
- (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
-
- val64 = readq(&vp_reg->tim_pci_cfg);
- val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
- writeq(val64, &vp_reg->tim_pci_cfg);
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->tti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->tti.urange_a);
- }
-
- if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->tti.urange_b);
- }
-
- if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->tti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->tti.uec_a);
- }
-
- if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->tti.uec_b);
- }
-
- if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->tti.uec_c);
- }
-
- if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->tti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->tti.rtimer_val);
- }
-
- if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->tti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg3_saved = val64;
- }
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->rti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->rti.urange_a);
- }
-
- if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->rti.urange_b);
- }
-
- if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->rti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->rti.uec_a);
- }
-
- if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->rti.uec_b);
- }
-
- if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->rti.uec_c);
- }
-
- if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->rti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->rti.rtimer_val);
- }
-
- if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->rti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg3_saved = val64;
- }
-
- val64 = 0;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-
- val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
- writeq(val64, &vp_reg->tim_wrkld_clc);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_initialize
- * This routine is the final phase of init which initializes the
- * registers of the vpath using the configuration passed.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u32 val32;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
-
- /* Get MRRS value from device control */
- status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
- if (status == VXGE_HW_OK) {
- val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
- val64 &=
- ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
- }
-
- val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
- VXGE_HW_MAX_PAYLOAD_SIZE_512);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
- writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
- * work after the interface is brought down.
- */
- spin_lock(&vpath->lock);
- vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
- spin_unlock(&vpath->lock);
-
- vpath->vpmgmt_reg = NULL;
- vpath->nofl_db = NULL;
- vpath->max_mtu = 0;
- vpath->vsport_number = 0;
- vpath->max_kdfc_db = 0;
- vpath->max_nofl_db = 0;
- vpath->ringh = NULL;
- vpath->fifoh = NULL;
- memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
- vpath->stats_block = NULL;
- vpath->hw_stats = NULL;
- vpath->hw_stats_sav = NULL;
- vpath->sw_stats = NULL;
-
-exit:
- return;
-}
-
-/*
- * __vxge_hw_vp_initialize - Initialize Virtual Path structure
- * This routine is the initial phase of init which resets the vpath and
- * initializes the software support structures.
- */
-static enum vxge_hw_status
-__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
- struct vxge_hw_vp_config *config)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[vp_id];
-
- spin_lock_init(&vpath->lock);
- vpath->vp_id = vp_id;
- vpath->vp_open = VXGE_HW_VP_OPEN;
- vpath->hldev = hldev;
- vpath->vp_config = config;
- vpath->vp_reg = hldev->vpath_reg[vp_id];
- vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
-
- __vxge_hw_vpath_reset(hldev, vp_id);
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- INIT_LIST_HEAD(&vpath->vpath_handles);
-
- vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
- hldev->tim_int_mask1, vp_id);
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- __vxge_hw_vp_terminate(hldev, vp_id);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_mtu_set - Set MTU.
- * Set new MTU value. Example, to use jumbo frames:
- * vxge_hw_vpath_mtu_set(my_device, 9600);
- */
-enum vxge_hw_status
-vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
-
- new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
- status = VXGE_HW_ERR_INVALID_MTU_SIZE;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-
- vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * vxge_hw_vpath_open - Open a virtual path on a given adapter
- * This function is used to open access to virtual path of an
- * adapter for offload, GRO operations. This function returns
- * synchronously.
- */
-enum vxge_hw_status
-vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct __vxge_hw_vpath_handle *vp;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[attr->vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_OPEN) {
- status = VXGE_HW_ERR_INVALID_STATE;
- goto vpath_open_exit1;
- }
-
- status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
- &hldev->config.vp_config[attr->vp_id]);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit1;
-
- vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
- if (vp == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit2;
- }
-
- vp->vpath = vpath;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
- status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit6;
- }
-
- if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
- status = __vxge_hw_ring_create(vp, &attr->ring_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit7;
-
- __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
- }
-
- vpath->fifoh->tx_intr_num =
- (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_TX;
-
- vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
- VXGE_HW_BLOCK_SIZE);
- if (vpath->stats_block == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit8;
- }
-
- vpath->hw_stats = vpath->stats_block->memblock;
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
- vpath->hw_stats;
-
- vpath->hw_stats_sav =
- &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit8;
-
- list_add(&vp->item, &vpath->vpath_handles);
-
- hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
-
- *vpath_handle = vp;
-
- attr->fifo_attr.userdata = vpath->fifoh;
- attr->ring_attr.userdata = vpath->ringh;
-
- return VXGE_HW_OK;
-
-vpath_open_exit8:
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-vpath_open_exit7:
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-vpath_open_exit6:
- vfree(vp);
-vpath_open_exit2:
- __vxge_hw_vp_terminate(hldev, attr->vp_id);
-vpath_open_exit1:
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
- * (vpath) open
- * @vp: Handle got from previous vpath open
- *
- * This function is used to close access to virtual path opened
- * earlier.
- */
-void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct __vxge_hw_ring *ring = vpath->ringh;
- struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
- u64 new_count, val64, val164;
-
- if (vdev->titan1) {
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- } else
- new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
-
- val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
-
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
- &vpath->vp_reg->prc_rxd_doorbell);
- readl(&vpath->vp_reg->prc_rxd_doorbell);
-
- val164 /= 2;
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
- val64 &= 0x1ff;
-
- /*
- * Each RxD is of 4 qwords
- */
- new_count -= (val64 + 1);
- val64 = min(val164, new_count) / 4;
-
- ring->rxds_limit = min(ring->rxds_limit, val64);
- if (ring->rxds_limit < 4)
- ring->rxds_limit = 4;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
-
-/*
- * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
- * This function is used to close access to virtual path opened
- * earlier.
- */
-enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- struct __vxge_hw_device *devh = NULL;
- u32 vp_id = vp->vpath->vp_id;
- u32 is_empty = TRUE;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- vpath = vp->vpath;
- devh = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_close_exit;
- }
-
- list_del(&vp->item);
-
- if (!list_empty(&vpath->vpath_handles)) {
- list_add(&vp->item, &vpath->vpath_handles);
- is_empty = FALSE;
- }
-
- if (!is_empty) {
- status = VXGE_HW_FAIL;
- goto vpath_close_exit;
- }
-
- devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
-
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-
- if (vpath->stats_block != NULL)
- __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
-
- vfree(vp);
-
- __vxge_hw_vp_terminate(devh, vp_id);
-
-vpath_close_exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_reset - Resets vpath
- * This function is used to request a reset of vpath
- */
-enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status;
- u32 vp_id;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
-
- vp_id = vpath->vp_id;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
- if (status == VXGE_HW_OK)
- vpath->sw_stats->soft_reset_cnt++;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
- * This function poll's for the vpath reset completion and re initializes
- * the vpath.
- */
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- enum vxge_hw_status status;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
-
- vp_id = vp->vpath->vp_id;
- vpath = vp->vpath;
- hldev = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (vpath->ringh != NULL)
- __vxge_hw_vpath_prc_configure(hldev, vp_id);
-
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr,
- &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_enable - Enable vpath.
- * This routine clears the vpath reset thereby enabling a vpath
- * to start forwarding frames and generating interrupts.
- */
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_device *hldev;
- u64 val64;
-
- hldev = vp->vpath->hldev;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
- 1 << (16 - vp->vpath->vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg1);
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
deleted file mode 100644
index 0cd0750484ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ /dev/null
@@ -1,2086 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_CONFIG_H
-#define VXGE_CONFIG_H
-#include <linux/hardirq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-#ifndef VXGE_CACHE_LINE_SIZE
-#define VXGE_CACHE_LINE_SIZE 128
-#endif
-
-#ifndef VXGE_ALIGN
-#define VXGE_ALIGN(adrs, size) \
- (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
-#endif
-
-#define VXGE_HW_MIN_MTU ETH_MIN_MTU
-#define VXGE_HW_MAX_MTU 9600
-#define VXGE_HW_DEFAULT_MTU 1500
-
-#define VXGE_HW_MAX_ROM_IMAGES 8
-
-struct eprom_image {
- u8 is_valid:1;
- u8 index;
- u8 type;
- u16 version;
-};
-
-#ifdef VXGE_DEBUG_ASSERT
-/**
- * vxge_assert
- * @test: C-condition to check
- * @fmt: printf like format string
- *
- * This function implements traditional assert. By default assertions
- * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
- * compilation
- * time.
- */
-#define vxge_assert(test) BUG_ON(!(test))
-#else
-#define vxge_assert(test)
-#endif /* end of VXGE_DEBUG_ASSERT */
-
-/**
- * enum vxge_debug_level
- * @VXGE_NONE: debug disabled
- * @VXGE_ERR: all errors going to be logged out
- * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
- * going to be logged out. Very noisy.
- *
- * This enumeration going to be used to switch between different
- * debug levels during runtime if DEBUG macro defined during
- * compilation. If DEBUG macro not defined than code will be
- * compiled out.
- */
-enum vxge_debug_level {
- VXGE_NONE = 0,
- VXGE_TRACE = 1,
- VXGE_ERR = 2
-};
-
-#define NULL_VPID 0xFFFFFFFF
-#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
-#define VXGE_DEBUG_MODULE_MASK 0xffffffff
-#define VXGE_DEBUG_TRACE_MASK 0xffffffff
-#define VXGE_DEBUG_ERR_MASK 0xffffffff
-#define VXGE_DEBUG_MASK 0x000001ff
-#else
-#define VXGE_DEBUG_MODULE_MASK 0x20000000
-#define VXGE_DEBUG_TRACE_MASK 0x20000000
-#define VXGE_DEBUG_ERR_MASK 0x20000000
-#define VXGE_DEBUG_MASK 0x00000001
-#endif
-
-/*
- * @VXGE_COMPONENT_LL: do debug for vxge link layer module
- * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
- *
- * This enumeration going to be used to distinguish modules
- * or libraries during compilation and runtime. Makefile must declare
- * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
- */
-#define VXGE_COMPONENT_LL 0x20000000
-#define VXGE_COMPONENT_ALL 0xffffffff
-
-#define VXGE_HW_BASE_INF 100
-#define VXGE_HW_BASE_ERR 200
-#define VXGE_HW_BASE_BADCFG 300
-
-enum vxge_hw_status {
- VXGE_HW_OK = 0,
- VXGE_HW_FAIL = 1,
- VXGE_HW_PENDING = 2,
- VXGE_HW_COMPLETIONS_REMAIN = 3,
-
- VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
- VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
-
- VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
- VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
- VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
- VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
- VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
- VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
- VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
- VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
- VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
- VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
- VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
- VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
- VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
- VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
- VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
- VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
- VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17,
- VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
- VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
- VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
- VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
- VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
-
- VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
- VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
- VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
- VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
- VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
- VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
- VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
-
- VXGE_HW_EOF_TRACE_BUF = -1
-};
-
-/**
- * enum enum vxge_hw_device_link_state - Link state enumeration.
- * @VXGE_HW_LINK_NONE: Invalid link state.
- * @VXGE_HW_LINK_DOWN: Link is down.
- * @VXGE_HW_LINK_UP: Link is up.
- *
- */
-enum vxge_hw_device_link_state {
- VXGE_HW_LINK_NONE,
- VXGE_HW_LINK_DOWN,
- VXGE_HW_LINK_UP
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
- * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
- * @VXGE_HW_FW_UPGRADE_DONE: upload completed
- * @VXGE_HW_FW_UPGRADE_ERR: upload error
- * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
- *
- */
-enum vxge_hw_fw_upgrade_code {
- VXGE_HW_FW_UPGRADE_OK = 0,
- VXGE_HW_FW_UPGRADE_DONE = 1,
- VXGE_HW_FW_UPGRADE_ERR = 2,
- VXGE_FW_UPGRADE_BYTES2SKIP = 3
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
- * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
- */
-enum vxge_hw_fw_upgrade_err_code {
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
- VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
- VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
- VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
-};
-
-/**
- * struct vxge_hw_device_date - Date Format
- * @day: Day
- * @month: Month
- * @year: Year
- * @date: Date in string format
- *
- * Structure for returning date
- */
-
-#define VXGE_HW_FW_STRLEN 32
-struct vxge_hw_device_date {
- u32 day;
- u32 month;
- u32 year;
- char date[VXGE_HW_FW_STRLEN];
-};
-
-struct vxge_hw_device_version {
- u32 major;
- u32 minor;
- u32 build;
- char version[VXGE_HW_FW_STRLEN];
-};
-
-/**
- * struct vxge_hw_fifo_config - Configuration of fifo.
- * @enable: Is this fifo to be commissioned
- * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
- * blocks per queue.
- * @max_frags: Max number of Tx buffers per TxDL (that is, per single
- * transmit operation).
- * No more than 256 transmit buffers can be specified.
- * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
- * bytes. Setting @memblock_size to page size ensures
- * by-page allocation of descriptors. 128K bytes is the
- * maximum supported block size.
- * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
- * (e.g., to align on a cache line).
- * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
- * Use 0 otherwise.
- * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
- * which generally improves latency of the host bridge operation
- * (see PCI specification). For valid values please refer
- * to struct vxge_hw_fifo_config{} in the driver sources.
- * Configuration of all Titan fifos.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_fifo_config{} structure.
- */
-struct vxge_hw_fifo_config {
- u32 enable;
-#define VXGE_HW_FIFO_ENABLE 1
-#define VXGE_HW_FIFO_DISABLE 0
-
- u32 fifo_blocks;
-#define VXGE_HW_MIN_FIFO_BLOCKS 2
-#define VXGE_HW_MAX_FIFO_BLOCKS 128
-
- u32 max_frags;
-#define VXGE_HW_MIN_FIFO_FRAGS 1
-#define VXGE_HW_MAX_FIFO_FRAGS 256
-
- u32 memblock_size;
-#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
-#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
-#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
-
- u32 alignment_size;
-#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
-#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
-#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
-
- u32 intr;
-#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
-#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
-#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
-
- u32 no_snoop_bits;
-#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
-#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
-#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
-#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
-#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
-
-};
-/**
- * struct vxge_hw_ring_config - Ring configurations.
- * @enable: Is this ring to be commissioned
- * @ring_blocks: Numbers of RxD blocks in the ring
- * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
- * to Titan User Guide.
- * @scatter_mode: Titan supports two receive scatter modes: A and B.
- * For details please refer to Titan User Guide.
- * @rx_timer_val: The number of 32ns periods that would be counted between two
- * timer interrupts.
- * @greedy_return: If Set it forces the device to return absolutely all RxD
- * that are consumed and still on board when a timer interrupt
- * triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt triggered and after the
- * previous timer interrupt triggered, then the device is not
- * forced to returned the rest of the consumed RxD that it has
- * on board which account for a byte count less than the one
- * programmed into PRC_CFG6.RXD_CRXDT field
- * @rx_timer_ci: TBD
- * @backoff_interval_us: Time (in microseconds), after which Titan
- * tries to download RxDs posted by the host.
- * Note that the "backoff" does not happen if host posts receive
- * descriptors in the timely fashion.
- * Ring configuration.
- */
-struct vxge_hw_ring_config {
- u32 enable;
-#define VXGE_HW_RING_ENABLE 1
-#define VXGE_HW_RING_DISABLE 0
-#define VXGE_HW_RING_DEFAULT 1
-
- u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
-
- u32 buffer_mode;
-#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
-#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
-#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
-#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
-
- u32 scatter_mode;
-#define VXGE_HW_RING_SCATTER_MODE_A 0
-#define VXGE_HW_RING_SCATTER_MODE_B 1
-#define VXGE_HW_RING_SCATTER_MODE_C 2
-#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
-
- u64 rxds_limit;
-#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
-};
-
-/**
- * struct vxge_hw_vp_config - Configuration of virtual path
- * @vp_id: Virtual Path Id
- * @min_bandwidth: Minimum Guaranteed bandwidth
- * @ring: See struct vxge_hw_ring_config{}.
- * @fifo: See struct vxge_hw_fifo_config{}.
- * @tti: Configuration of interrupt associated with Transmit.
- * see struct vxge_hw_tim_intr_config();
- * @rti: Configuration of interrupt associated with Receive.
- * see struct vxge_hw_tim_intr_config();
- * @mtu: mtu size used on this port.
- * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
- * remove the VLAN tag from all received tagged frames that are not
- * replicated at the internal L2 switch.
- * 0 - Do not strip the VLAN tag.
- * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
- * always placed into the RxDMA descriptor.
- *
- * This structure is used by the driver to pass the configuration parameters to
- * configure Virtual Path.
- */
-struct vxge_hw_vp_config {
- u32 vp_id;
-
-#define VXGE_HW_VPATH_PRIORITY_MIN 0
-#define VXGE_HW_VPATH_PRIORITY_MAX 16
-#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
-
- u32 min_bandwidth;
-#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
-#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
-#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
-
- struct vxge_hw_ring_config ring;
- struct vxge_hw_fifo_config fifo;
- struct vxge_hw_tim_intr_config tti;
- struct vxge_hw_tim_intr_config rti;
-
- u32 mtu;
-#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
-#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
-#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
-
- u32 rpa_strip_vlan_tag;
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
-
-};
-/**
- * struct vxge_hw_device_config - Device configuration.
- * @dma_blockpool_initial: Initial size of DMA Pool
- * @dma_blockpool_max: Maximum blocks in DMA pool
- * @intr_mode: Line, or MSI-X interrupt.
- *
- * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
- * @rth_it_type: RTH IT table programming type
- * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
- * @vp_config: Configuration for virtual paths
- * @device_poll_millis: Specify the interval (in mulliseconds)
- * to wait for register reads
- *
- * Titan configuration.
- * Contains per-device configuration parameters, including:
- * - stats sampling interval, etc.
- *
- * In addition, struct vxge_hw_device_config{} includes "subordinate"
- * configurations, including:
- * - fifos and rings;
- * - MAC (done at firmware level).
- *
- * See Titan User Guide for more details.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_device_config{} structure. Please refer to the
- * corresponding include file.
- * See also: struct vxge_hw_tim_intr_config{}.
- */
-struct vxge_hw_device_config {
- u32 device_poll_millis;
-#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
-#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
-#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
-
- u32 dma_blockpool_initial;
- u32 dma_blockpool_max;
-#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
-#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
-
-#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
-
- u32 intr_mode:2,
-#define VXGE_HW_INTR_MODE_IRQLINE 0
-#define VXGE_HW_INTR_MODE_MSIX 1
-#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
-
-#define VXGE_HW_INTR_MODE_DEF 0
-
- rth_en:1,
-#define VXGE_HW_RTH_DISABLE 0
-#define VXGE_HW_RTH_ENABLE 1
-#define VXGE_HW_RTH_DEFAULT 0
-
- rth_it_type:1,
-#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
-#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
-#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
-
- rts_mac_en:1,
-#define VXGE_HW_RTS_MAC_DISABLE 0
-#define VXGE_HW_RTS_MAC_ENABLE 1
-#define VXGE_HW_RTS_MAC_DEFAULT 0
-
- hwts_en:1;
-#define VXGE_HW_HWTS_DISABLE 0
-#define VXGE_HW_HWTS_ENABLE 1
-#define VXGE_HW_HWTS_DEFAULT 1
-
- struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * function vxge_uld_link_up_f - Link-Up callback provided by driver.
- * @devh: HW device handle.
- * Link-up notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_link_down_f - Link-Down callback provided by
- * driver.
- * @devh: HW device handle.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_crit_err_f - Critical Error notification callback.
- * @devh: HW device handle.
- * (typically - at HW device iinitialization time).
- * @type: Enumerated hw error, e.g.: double ECC.
- * @serr_data: Titan status.
- * @ext_data: Extended data. The contents depends on the @type.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
- * @link_up: See vxge_uld_link_up_f{}.
- * @link_down: See vxge_uld_link_down_f{}.
- * @crit_err: See vxge_uld_crit_err_f{}.
- *
- * Driver slow-path (per-driver) callbacks.
- * Implemented by driver and provided to HW via
- * vxge_hw_driver_initialize().
- * Note that these callbacks are not mandatory: HW will not invoke
- * a callback if NULL is specified.
- *
- * See also: vxge_hw_driver_initialize().
- */
-struct vxge_hw_uld_cbs {
- void (*link_up)(struct __vxge_hw_device *devh);
- void (*link_down)(struct __vxge_hw_device *devh);
- void (*crit_err)(struct __vxge_hw_device *devh,
- enum vxge_hw_event type, u64 ext_data);
-};
-
-/*
- * struct __vxge_hw_blockpool_entry - Block private data structure
- * @item: List header used to link.
- * @length: Length of the block
- * @memblock: Virtual address block
- * @dma_addr: DMA Address of the block.
- * @dma_handle: DMA handle of the block.
- * @acc_handle: DMA acc handle
- *
- * Block is allocated with a header to put the blocks into list.
- *
- */
-struct __vxge_hw_blockpool_entry {
- struct list_head item;
- u32 length;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * struct __vxge_hw_blockpool - Block Pool
- * @hldev: HW device
- * @block_size: size of each block.
- * @Pool_size: Number of blocks in the pool
- * @pool_max: Maximum number of blocks above which to free additional blocks
- * @req_out: Number of block requests with OS out standing
- * @free_block_list: List of free blocks
- *
- * Block pool contains the DMA blocks preallocated.
- *
- */
-struct __vxge_hw_blockpool {
- struct __vxge_hw_device *hldev;
- u32 block_size;
- u32 pool_size;
- u32 pool_max;
- u32 req_out;
- struct list_head free_block_list;
- struct list_head free_entry_list;
-};
-
-/*
- * enum enum __vxge_hw_channel_type - Enumerated channel types.
- * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
- * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
- * @VXGE_HW_CHANNEL_TYPE_RING: ring.
- * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
- * (and recognized) channel types. Currently: 2.
- *
- * Enumerated channel types. Currently there are only two link-layer
- * channels - Titan fifo and Titan ring. In the future the list will grow.
- */
-enum __vxge_hw_channel_type {
- VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
- VXGE_HW_CHANNEL_TYPE_FIFO = 1,
- VXGE_HW_CHANNEL_TYPE_RING = 2,
- VXGE_HW_CHANNEL_TYPE_MAX = 3
-};
-
-/*
- * struct __vxge_hw_channel
- * @item: List item; used to maintain a list of open channels.
- * @type: Channel type. See enum vxge_hw_channel_type{}.
- * @devh: Device handle. HW device object that contains _this_ channel.
- * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
- * @length: Channel length. Currently allocated number of descriptors.
- * The channel length "grows" when more descriptors get allocated.
- * See _hw_mempool_grow.
- * @reserve_arr: Reserve array. Contains descriptors that can be reserved
- * by driver for the subsequent send or receive operation.
- * See vxge_hw_fifo_txdl_reserve(),
- * vxge_hw_ring_rxd_reserve().
- * @reserve_ptr: Current pointer in the resrve array
- * @reserve_top: Reserve top gives the maximum number of dtrs available in
- * reserve array.
- * @work_arr: Work array. Contains descriptors posted to the channel.
- * Note that at any point in time @work_arr contains 3 types of
- * descriptors:
- * 1) posted but not yet consumed by Titan device;
- * 2) consumed but not yet completed;
- * 3) completed but not yet freed
- * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
- * @post_index: Post index. At any point in time points on the
- * position in the channel, which'll contain next to-be-posted
- * descriptor.
- * @compl_index: Completion index. At any point in time points on the
- * position in the channel, which will contain next
- * to-be-completed descriptor.
- * @free_arr: Free array. Contains completed descriptors that were freed
- * (i.e., handed over back to HW) by driver.
- * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
- * @free_ptr: current pointer in free array
- * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
- * to store per-operation control information.
- * @stats: Pointer to common statistics
- * @userdata: Per-channel opaque (void*) user-defined context, which may be
- * driver object, ULP connection, etc.
- * Once channel is open, @userdata is passed back to user via
- * vxge_hw_channel_callback_f.
- *
- * HW channel object.
- *
- * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
- */
-struct __vxge_hw_channel {
- struct list_head item;
- enum __vxge_hw_channel_type type;
- struct __vxge_hw_device *devh;
- struct __vxge_hw_vpath_handle *vph;
- u32 length;
- u32 vp_id;
- void **reserve_arr;
- u32 reserve_ptr;
- u32 reserve_top;
- void **work_arr;
- u32 post_index ____cacheline_aligned;
- u32 compl_index ____cacheline_aligned;
- void **free_arr;
- u32 free_ptr;
- void **orig_arr;
- u32 per_dtr_space;
- void *userdata;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 first_vp_id;
- struct vxge_hw_vpath_stats_sw_common_info *stats;
-
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_virtualpath - Virtual Path
- *
- * @vp_id: Virtual path id
- * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
- * @hldev: Hal device
- * @vp_config: Virtual Path Config
- * @vp_reg: VPATH Register map address in BAR0
- * @vpmgmt_reg: VPATH_MGMT register map address
- * @max_mtu: Max mtu that can be supported
- * @vsport_number: vsport attached to this vpath
- * @max_kdfc_db: Maximum kernel mode doorbells
- * @max_nofl_db: Maximum non offload doorbells
- * @tx_intr_num: Interrupt Number associated with the TX
-
- * @ringh: Ring Queue
- * @fifoh: FIFO Queue
- * @vpath_handles: Virtual Path handles list
- * @stats_block: Memory for DMAing stats
- * @stats: Vpath statistics
- *
- * Virtual path structure to encapsulate the data related to a virtual path.
- * Virtual paths are allocated by the HW upon getting configuration from the
- * driver and inserted into the list of virtual paths.
- */
-struct __vxge_hw_virtualpath {
- u32 vp_id;
-
- u32 vp_open;
-#define VXGE_HW_VP_NOT_OPEN 0
-#define VXGE_HW_VP_OPEN 1
-
- struct __vxge_hw_device *hldev;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
-
- u32 max_mtu;
- u32 vsport_number;
- u32 max_kdfc_db;
- u32 max_nofl_db;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- struct __vxge_hw_ring *____cacheline_aligned ringh;
- struct __vxge_hw_fifo *____cacheline_aligned fifoh;
- struct list_head vpath_handles;
- struct __vxge_hw_blockpool_entry *stats_block;
- struct vxge_hw_vpath_stats_hw_info *hw_stats;
- struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- spinlock_t lock;
-};
-
-/*
- * struct __vxge_hw_vpath_handle - List item to store callback information
- * @item: List head to keep the item in linked list
- * @vpath: Virtual path to which this item belongs
- *
- * This structure is used to store the callback information.
- */
-struct __vxge_hw_vpath_handle {
- struct list_head item;
- struct __vxge_hw_virtualpath *vpath;
-};
-
-/*
- * struct __vxge_hw_device
- *
- * HW device object.
- */
-/**
- * struct __vxge_hw_device - Hal device object
- * @magic: Magic Number
- * @bar0: BAR0 virtual address.
- * @pdev: Physical device handle
- * @config: Confguration passed by the LL driver at initialization
- * @link_state: Link state
- *
- * HW device object. Represents Titan adapter
- */
-struct __vxge_hw_device {
- u32 magic;
-#define VXGE_HW_DEVICE_MAGIC 0x12345678
-#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- void __iomem *bar0;
- struct pci_dev *pdev;
- struct net_device *ndev;
- struct vxge_hw_device_config config;
- enum vxge_hw_device_link_state link_state;
-
- const struct vxge_hw_uld_cbs *uld_callbacks;
-
- u32 host_type;
- u32 func_id;
- u32 access_rights;
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
- struct vxge_hw_legacy_reg __iomem *legacy_reg;
- struct vxge_hw_toc_reg __iomem *toc_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
- [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
- [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
- struct vxge_hw_vpath_reg __iomem *vpath_reg \
- [VXGE_HW_TITAN_VPATH_REG_SPACES];
- u8 __iomem *kdfc;
- u8 __iomem *usdc;
- struct __vxge_hw_virtualpath virtual_paths \
- [VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpath_assignments;
- u64 vpaths_deployed;
- u32 first_vp_id;
- u64 tim_int_mask0[4];
- u32 tim_int_mask1[4];
-
- struct __vxge_hw_blockpool block_pool;
- struct vxge_hw_device_stats stats;
- u32 debug_module_mask;
- u32 debug_level;
- u32 level_err;
- u32 level_trace;
- u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
-};
-
-#define VXGE_HW_INFO_LEN 64
-/**
- * struct vxge_hw_device_hw_info - Device information
- * @host_type: Host Type
- * @func_id: Function Id
- * @vpath_mask: vpath bit mask
- * @fw_version: Firmware version
- * @fw_date: Firmware Date
- * @flash_version: Firmware version
- * @flash_date: Firmware Date
- * @mac_addrs: Mac addresses for each vpath
- * @mac_addr_masks: Mac address masks for each vpath
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver and the first mac address for each vpath
- */
-struct vxge_hw_device_hw_info {
- u32 host_type;
-#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
-#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
-#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
-#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
-#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
-#define VXGE_HW_SR_VH_FUNCTION0 5
-#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
-#define VXGE_HW_VH_NORMAL_FUNCTION 7
- u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
-#define VXGE_HW_FUNCTION_MODE_SRIOV 2
-#define VXGE_HW_FUNCTION_MODE_MRIOV 3
-#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
-#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
-#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
-#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
-
- u32 func_id;
- u64 vpath_mask;
- struct vxge_hw_device_version fw_version;
- struct vxge_hw_device_date fw_date;
- struct vxge_hw_device_version flash_version;
- struct vxge_hw_device_date flash_date;
- u8 serial_number[VXGE_HW_INFO_LEN];
- u8 part_number[VXGE_HW_INFO_LEN];
- u8 product_desc[VXGE_HW_INFO_LEN];
- u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-};
-
-/**
- * struct vxge_hw_device_attr - Device memory spaces.
- * @bar0: BAR0 virtual address.
- * @pdev: PCI device object.
- *
- * Device memory spaces. Includes configuration, BAR0 etc. per device
- * mapped memories. Also, includes a pointer to OS-specific PCI device object.
- */
-struct vxge_hw_device_attr {
- void __iomem *bar0;
- struct pci_dev *pdev;
- const struct vxge_hw_uld_cbs *uld_callbacks;
-};
-
-#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
- m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0x80000000; \
- m1[1] = 0x40000000; \
- } \
-}
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
- m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0; \
- m1[1] = 0; \
- } \
-}
-
-#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
- status = vxge_hw_mrpcim_stats_access(hldev, \
- VXGE_HW_STATS_OP_READ, \
- loc, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-/*
- * struct __vxge_hw_ring - Ring channel.
- * @channel: Channel "base" of this ring, the common part of all HW
- * channels.
- * @mempool: Memory pool, the pool from which descriptors get allocated.
- * (See vxge_hw_mm.h).
- * @config: Ring configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @ring_length: Length of the ring
- * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
- * as per Titan User Guide.
- * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
- * 1-buffer mode descriptor is 32 byte long, etc.
- * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
- * per-descriptor data (e.g., DMA handle for Solaris)
- * @per_rxd_space: Per rxd space requested by driver
- * @rxds_per_block: Number of descriptors per hardware-defined RxD
- * block. Depends on the (1-, 3-, 5-) buffer mode.
- * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
- * usage. Not to confuse with @rxd_priv_size.
- * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
- * @callback: Channel completion callback. HW invokes the callback when there
- * are new completions on that channel. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Channel's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding channel.
- * See also vxge_hw_channel_rxd_term_f{}.
- * @stats: Statistics for ring
- * Ring channel.
- *
- * Note: The structure is cache line aligned to better utilize
- * CPU cache performance.
- */
-struct __vxge_hw_ring {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 ring_length;
- u32 buffer_mode;
- u32 rxd_size;
- u32 rxd_priv_size;
- u32 per_rxd_space;
- u32 rxds_per_block;
- u32 rxdblock_priv_size;
- u32 cmpl_cnt;
- u32 vp_id;
- u32 doorbell_cnt;
- u32 total_db_cnt;
- u64 rxds_limit;
- u32 rtimer;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
- struct vxge_hw_ring_config *config;
-} ____cacheline_aligned;
-
-/**
- * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
- * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
- * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_txdl_state {
- VXGE_HW_TXDL_STATE_NONE = 0,
- VXGE_HW_TXDL_STATE_AVAIL = 1,
- VXGE_HW_TXDL_STATE_POSTED = 2,
- VXGE_HW_TXDL_STATE_FREED = 3
-};
-/*
- * struct __vxge_hw_fifo - Fifo.
- * @channel: Channel "base" of this fifo, the common part of all HW
- * channels.
- * @mempool: Memory pool, from which descriptors get allocated.
- * @config: Fifo configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @interrupt_type: Interrupt type to be used
- * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
- * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
- * on TxDL please refer to Titan UG.
- * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
- * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
- * @priv_size: Per-Tx descriptor space reserved for driver
- * usage.
- * @per_txdl_space: Per txdl private space for the driver
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @stats: Statistics of this fifo
- *
- * Fifo channel.
- * Note: The structure is cache line aligned.
- */
-struct __vxge_hw_fifo {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_fifo_config *config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
- u64 interrupt_type;
- u32 no_snoop_bits;
- u32 txdl_per_memblock;
- u32 txdl_size;
- u32 priv_size;
- u32 per_txdl_space;
- u32 vp_id;
- u32 tx_intr_num;
- u32 rtimer;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb,
- int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
- * Each memblock is a contiguous block of DMA-able memory.
- * @frags: Total number of fragments (that is, contiguous data buffers)
- * carried by this TxDL.
- * @align_vaddr_start: Aligned virtual address start
- * @align_vaddr: Virtual address of the per-TxDL area in memory used for
- * alignement. Used to place one or more mis-aligned fragments
- * @align_dma_addr: DMA address translated from the @align_vaddr.
- * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
- * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
- * @align_dma_offset: The current offset into the @align_vaddr area.
- * Grows while filling the descriptor, gets reset.
- * @align_used_frags: Number of fragments used.
- * @alloc_frags: Total number of fragments allocated.
- * @unused: TODO
- * @next_txdl_priv: (TODO).
- * @first_txdp: (TODO).
- * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
- * TxDL list.
- * @txdlh: Corresponding txdlh to this TxDL.
- * @memblock: Pointer to the TxDL memory block or memory page.
- * on the next send operation.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- *
- * See also: struct vxge_hw_ring_rxd_priv{}.
- */
-struct __vxge_hw_fifo_txdl_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
- u32 frags;
- u8 *align_vaddr_start;
- u8 *align_vaddr;
- dma_addr_t align_dma_addr;
- struct pci_dev *align_dma_handle;
- struct pci_dev *align_dma_acch;
- ptrdiff_t align_dma_offset;
- u32 align_used_frags;
- u32 alloc_frags;
- u32 unused;
- struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
- struct vxge_hw_fifo_txd *first_txdp;
- void *memblock;
-};
-
-/*
- * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
- * @control_0: Bits 0 to 7 - Doorbell type.
- * Bits 8 to 31 - Reserved.
- * Bits 32 to 39 - The highest TxD in this TxDL.
- * Bits 40 to 47 - Reserved.
- * Bits 48 to 55 - Reserved.
- * Bits 56 to 63 - No snoop flags.
- * @txdl_ptr: The starting location of the TxDL in host memory.
- *
- * Created by the host and written to the adapter via PIO to a Kernel Doorbell
- * FIFO. All non-offload doorbell wrapper fields must be written by the host as
- * part of a doorbell write. Consumed by the adapter but is not written by the
- * adapter.
- */
-struct __vxge_hw_non_offload_db_wrapper {
- u64 control_0;
-#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
-#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_NODBW_TYPE_NODBW 0
-
-#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
-#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
-
-#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
-
- u64 txdl_ptr;
-};
-
-/*
- * TX Descriptor
- */
-
-/**
- * struct vxge_hw_fifo_txd - Transmit Descriptor
- * @control_0: Bits 0 to 6 - Reserved.
- * Bit 7 - List Ownership. This field should be initialized
- * to '1' by the driver before the transmit list pointer is
- * written to the adapter. This field will be set to '0' by the
- * adapter once it has completed transmitting the frame or frames in
- * the list. Note - This field is only valid in TxD0. Additionally,
- * for multi-list sequences, the driver should not release any
- * buffers until the ownership of the last list in the multi-list
- * sequence has been returned to the host.
- * Bits 8 to 11 - Reserved
- * Bits 12 to 15 - Transfer_Code. This field is only valid in
- * TxD0. It is used to describe the status of the transmit data
- * buffer transfer. This field is always overwritten by the
- * adapter, so this field may be initialized to any value.
- * Bits 16 to 17 - Host steering. This field allows the host to
- * override the selection of the physical transmit port.
- * Attention:
- * Normal sounds as if learned from the switch rather than from
- * the aggregation algorythms.
- * 00: Normal. Use Destination/MAC Address
- * lookup to determine the transmit port.
- * 01: Send on physical Port1.
- * 10: Send on physical Port0.
- * 11: Send on both ports.
- * Bits 18 to 21 - Reserved
- * Bits 22 to 23 - Gather_Code. This field is set by the host and
- * is used to describe how individual buffers comprise a frame.
- * 10: First descriptor of a frame.
- * 00: Middle of a multi-descriptor frame.
- * 01: Last descriptor of a frame.
- * 11: First and last descriptor of a frame (the entire frame
- * resides in a single buffer).
- * For multi-descriptor frames, the only valid gather code sequence
- * is {10, [00], 01}. In other words, the descriptors must be placed
- * in the list in the correct order.
- * Bits 24 to 27 - Reserved
- * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
- * definition. Only valid in TxD0. This field allows the host to
- * indicate the Ethernet encapsulation of an outbound LSO packet.
- * 00 - classic mode (best guess)
- * 01 - LLC
- * 10 - SNAP
- * 11 - DIX
- * If "classic mode" is selected, the adapter will attempt to
- * decode the frame's Ethernet encapsulation by examining the L/T
- * field as follows:
- * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
- * if packet is IPv4 or IPv6.
- * 0x8870 Jumbo-SNAP encoding.
- * 0x0800 IPv4 DIX encoding
- * 0x86DD IPv6 DIX encoding
- * others illegal encapsulation
- * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
- * Set to 1 to perform segmentation offload for TCP/UDP.
- * This field is valid only in TxD0.
- * Bits 31 to 33 - Reserved.
- * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
- * This field is meaningful only when LSO_Control is non-zero.
- * When LSO_Control is set to TCP_LSO, the single (possibly large)
- * TCP segment described by this TxDL will be sent as a series of
- * TCP segments each of which contains no more than LSO_MSS
- * payload bytes.
- * When LSO_Control is set to UDP_LSO, the single (possibly large)
- * UDP datagram described by this TxDL will be sent as a series of
- * UDP datagrams each of which contains no more than LSO_MSS
- * payload bytes.
- * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
- * or TCP payload, with the exception of the last, which will have
- * <= LSO_MSS bytes of payload.
- * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
- * buffer to be read by the adapter. This field is written by the
- * host. A value of 0 is illegal.
- * Bits 32 to 63 - This value is written by the adapter upon
- * completion of a UDP or TCP LSO operation and indicates the number
- * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
- * returned for any non-LSO operation.
- * @control_1: Bits 0 to 4 - Reserved.
- * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
- * offload. This field is only valid in the first TxD of a frame.
- * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that TCP is present.
- * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that UDP is present.
- * Bits 8 to 14 - Reserved.
- * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
- * instruct the adapter to insert the VLAN tag specified by the
- * Tx_VLAN_Tag field. This field is only valid in the first TxD of
- * a frame.
- * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
- * to be inserted into the frame by the adapter (the first two bytes
- * of a VLAN tag are always 0x8100). This field is only valid if the
- * Tx_VLAN_Enable field is set to '1'.
- * Bits 32 to 33 - Reserved.
- * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
- * number the frame associated with. This field is written by the
- * host. It is only valid in the first TxD of a frame.
- * Bits 40 to 42 - Reserved.
- * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
- * functions. This field is valid only in the first TxD
- * of a frame.
- * Bits 44 to 45 - Reserved.
- * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
- * generate an interrupt as soon as all of the frames in the list
- * have been transmitted. In order to have per-frame interrupts,
- * the driver should place a maximum of one frame per list. This
- * field is only valid in the first TxD of a frame.
- * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
- * to count the frame toward the utilization interrupt specified in
- * the Tx_Int_Number field. This field is only valid in the first
- * TxD of a frame.
- * Bits 48 to 63 - Reserved.
- * @buffer_pointer: Buffer start address.
- * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
- * Titan descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
- * to the driver with each completed descriptor.
- *
- * Transmit descriptor (TxD).Fifo descriptor contains configured number
- * (list) of TxDs. * For more details please refer to Titan User Guide,
- * Section 5.4.2 "Transmit Descriptor (TxD) Format".
- */
-struct vxge_hw_fifo_txd {
- u64 control_0;
-#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
-
-
-#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
-
-
-#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
-
-#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
-
-#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
-
- u64 control_1;
-#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
-#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
-#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
-#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
-
-#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
-
-#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
-#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
-
- u64 buffer_pointer;
-
- u64 host_control;
-};
-
-/**
- * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
- * @host_control: This field is exclusively for host use and is "readonly"
- * from the adapter's perspective.
- * @control_0:Bits 0 to 6 - RTH_Bucket get
- * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
- * by the host, and is set to 0 by the adapter.
- * 0 - Host owns RxD and buffer.
- * 1 - The adapter owns RxD and buffer.
- * Bit 8 - Fast_Path_Eligible When set, indicates that the
- * received frame meets all of the criteria for fast path processing.
- * The required criteria are as follows:
- * !SYN &
- * (Transfer_Code == "Transfer OK") &
- * (!Is_IP_Fragment) &
- * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
- * (Is_IPv6)) &
- * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
- * (Is_UDP & (computed_L4_checksum == 0xFFFF |
- * computed _L4_checksum == 0x0000)))
- * (same meaning for all RxD buffer modes)
- * Bit 9 - L3 Checksum Correct
- * Bit 10 - L4 Checksum Correct
- * Bit 11 - Reserved
- * Bit 12 to 15 - This field is written by the adapter. It is
- * used to report the status of the frame transfer to the host.
- * 0x0 - Transfer OK
- * 0x4 - RDA Failure During Transfer
- * 0x5 - Unparseable Packet, such as unknown IPv6 header.
- * 0x6 - Frame integrity error (FCS or ECC).
- * 0x7 - Buffer Size Error. The provided buffer(s) were not
- * appropriately sized and data loss occurred.
- * 0x8 - Internal ECC Error. RxD corrupted.
- * 0x9 - IPv4 Checksum error
- * 0xA - TCP/UDP Checksum error
- * 0xF - Unknown Error or Multiple Error. Indicates an
- * unknown problem or that more than one of transfer codes is set.
- * Bit 16 - SYN The adapter sets this field to indicate that
- * the incoming frame contained a TCP segment with its SYN bit
- * set and its ACK bit NOT set. (same meaning for all RxD buffer
- * modes)
- * Bit 17 - Is ICMP
- * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
- * Socket Pair Direct Match Table and the frame was steered based
- * on SPDM.
- * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
- * Indirection Table and the frame was steered based on hash
- * indirection.
- * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
- * type) that was used to calculate the hash.
- * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
- * tagged.
- * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
- * of the received frame.
- * 0x0 - Ethernet DIX
- * 0x1 - LLC
- * 0x2 - SNAP (includes Jumbo-SNAP)
- * 0x3 - IPX
- * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
- * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
- * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
- * IP packet.
- * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
- * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
- * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
- * arrived with the frame. If the resulting computed IPv4 header
- * checksum for the frame did not produce the expected 0xFFFF value,
- * then the transfer code would be set to 0x9.
- * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
- * arrived with the frame. If the resulting computed TCP/UDP checksum
- * for the frame did not produce the expected 0xFFFF value, then the
- * transfer code would be set to 0xA.
- * @control_1:Bits 0 to 1 - Reserved
- * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
- * eventually overwritten by the adapter. The host writes the
- * available buffer size in bytes when it passes the descriptor to
- * the adapter. When a frame is delivered the host, the adapter
- * populates this field with the number of bytes written into the
- * buffer. The largest supported buffer is 16, 383 bytes.
- * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
- * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
- * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
- * of the VLAN tag, if one was detected by the adapter. This field is
- * populated even if VLAN-tag stripping is enabled.
- * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
- *
- * One buffer mode RxD for ring structure
- */
-struct vxge_hw_ring_rxd_1 {
- u64 host_control;
- u64 control_0;
-#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
-
-#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
-
-#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-
-#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
-
-#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
-
-#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
-
-#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
-
-#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
-
-#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
-
-#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
-
-#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
-
-#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
-
- u64 control_1;
-
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
-
-#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
-
-#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
-
- u64 buffer0_ptr;
-};
-
-enum vxge_hw_rth_algoritms {
- RTH_ALG_JENKINS = 0,
- RTH_ALG_MS_RSS = 1,
- RTH_ALG_CRC32C = 2
-};
-
-/**
- * struct vxge_hw_rth_hash_types - RTH hash types.
- * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
- * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
- * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
- * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
- * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
- * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
- *
- * Used to pass RTH hash types to rts_rts_set.
- *
- * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
- */
-struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en:1,
- hash_type_ipv4_en:1,
- hash_type_tcpipv6_en:1,
- hash_type_ipv6_en:1,
- hash_type_tcpipv6ex_en:1,
- hash_type_ipv6ex_en:1;
-};
-
-void vxge_hw_device_debug_set(
- struct __vxge_hw_device *devh,
- enum vxge_debug_level level,
- u32 mask);
-
-u32
-vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
-
-u32
-vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-
-/**
- * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
- * @buf_mode: Buffer mode (1, 3 or 5)
- *
- * This function returns the size of RxD for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
-{
- return sizeof(struct vxge_hw_ring_rxd_1);
-}
-
-/**
- * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
- * @buf_mode: Buffer mode (1 buffer mode only)
- *
- * This function returns the number of RxD for RxD block for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
-{
- return (u32)((VXGE_HW_BLOCK_SIZE-16) /
- sizeof(struct vxge_hw_ring_rxd_1));
-}
-
-/**
- * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
- * the receive buffer should be already mapped to the device
- * @size: Size of the receive @dma_pointer buffer.
- *
- * Prepare 1-buffer-mode Rx descriptor for posting
- * (via vxge_hw_ring_rxd_post()).
- *
- * This inline helper-function does not return any parameters and always
- * succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_set(
- void *rxdh,
- dma_addr_t dma_pointer,
- u32 size)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxdp->buffer0_ptr = dma_pointer;
- rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
- rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
- * descriptor.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * carries. Returned by HW.
- * @pkt_length: Length (in bytes) of the data in the buffer pointed by
- *
- * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u32 *pkt_length)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- *pkt_length =
- (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
- * a completed receive descriptor for 1b mode.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @rxd_info: Descriptor information
- *
- * Retrieve extended information associated with a completed receive descriptor.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_info_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- struct vxge_hw_ring_rxd_info *rxd_info)
-{
-
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxd_info->syn_flag =
- (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
- rxd_info->is_icmp =
- (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
- rxd_info->fast_path_eligible =
- (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
- rxd_info->l3_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l3_cksum =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
- rxd_info->l4_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l4_cksum =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
- rxd_info->frame =
- (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
- rxd_info->proto =
- (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
- rxd_info->is_vlan =
- (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
- rxd_info->vlan =
- (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
- rxd_info->rth_bucket =
- (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
- rxd_info->rth_it_hit =
- (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
- rxd_info->rth_spdm_hit =
- (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
- rxd_info->rth_hash_type =
- (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
- rxd_info->rth_value =
- (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
- * of 1b mode 3b mode ring.
- * @rxdh: Descriptor handle.
- *
- * Returns: private driver info associated with the descriptor.
- * driver requests per-descriptor space via vxge_hw_ring_attr.
- *
- */
-static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- return (void *)(size_t)rxdp->host_control;
-}
-
-/**
- * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
- * @txdlh: Descriptor handle.
- * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
- * and/or TCP and/or UDP.
- *
- * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
- * descriptor.
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_buffer_set().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
- txdp->control_1 |= cksum_bits;
-}
-
-/**
- * vxge_hw_fifo_txdl_mss_set - Set MSS.
- * @txdlh: Descriptor handle.
- * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
- * driver, which in turn inserts the MSS into the @txdlh.
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_cksum_set_bits().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
-}
-
-/**
- * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
- * @txdlh: Descriptor handle.
- * @vlan_tag: 16bit VLAN tag.
- *
- * Insert VLAN tag into specified transmit descriptor.
- * The actual insertion of the tag into outgoing frame is done by the hardware.
- */
-static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
-}
-
-/**
- * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
- * @txdlh: Descriptor handle.
- *
- * Retrieve per-descriptor private data.
- * Note that driver requests per-descriptor space via
- * struct vxge_hw_fifo_attr passed to
- * vxge_hw_vpath_open().
- *
- * Returns: private driver data associated with the descriptor.
- */
-static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- return (void *)(size_t)txdp->host_control;
-}
-
-/**
- * struct vxge_hw_ring_attr - Ring open "template".
- * @callback: Ring completion callback. HW invokes the callback when there
- * are new completions on that ring. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Ring's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding ring.
- * See also vxge_hw_ring_rxd_term_f{}.
- * @userdata: User-defined "context" of _that_ ring. Passed back to the
- * user as one of the @callback, @rxd_init, and @rxd_term arguments.
- * @per_rxd_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each receive descriptor.
- * Can be used to store
- * and retrieve on completion, information specific
- * to the driver.
- *
- * Ring open "template". User fills the structure with ring
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_ring_attr {
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- void *userdata;
- u32 per_rxd_space;
-};
-
-/**
- * function vxge_hw_fifo_callback_f - FIFO callback.
- * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
- * descriptors.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @t_code: Transfer code, as per Titan User Guide.
- * Returned by HW.
- * @host_control: Opaque 64bit data stored by driver inside the Titan
- * descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post(). The @host_control is returned
- * as is to the driver with each completed descriptor.
- * @userdata: Opaque per-fifo data specified at fifo open
- * time, via vxge_hw_vpath_open().
- *
- * Fifo completion callback (type declaration). A single per-fifo
- * callback is specified at fifo open time, via
- * vxge_hw_vpath_open(). Typically gets called as part of the processing
- * of the Interrupt Service Routine.
- *
- * Fifo callback gets called by HW if, and only if, there is at least
- * one new completion on a given fifo. Upon processing the first @txdlh driver
- * is _supposed_ to continue consuming completions using:
- * - vxge_hw_fifo_txdl_next_completed()
- *
- * Note that failure to process new completions in a timely fashion
- * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
- *
- * Non-zero @t_code means failure to process transmit descriptor.
- *
- * In the "transmit" case the failure could happen, for instance, when the
- * link is down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
- */
-/**
- * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
- * @userdata: Per-fifo user data (a.k.a. context) specified at
- * fifo open time, via vxge_hw_vpath_open().
- *
- * Terminate descriptor callback. Unless NULL is specified in the
- * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
- * HW invokes the callback as part of closing fifo, prior to
- * de-allocating the ring and associated data structures
- * (including descriptors).
- * driver should utilize the callback to (for instance) unmap
- * and free DMA data buffers associated with the posted (state =
- * VXGE_HW_TXDL_STATE_POSTED) descriptors,
- * as well as other relevant cleanup functions.
- *
- * See also: struct vxge_hw_fifo_attr{}
- */
-/**
- * struct vxge_hw_fifo_attr - Fifo open "template".
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @userdata: User-defined "context" of _that_ fifo. Passed back to the
- * user as one of the @callback, and @txdl_term arguments.
- * @per_txdl_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each transmit descriptor. Can be used to
- * store, and retrieve on completion, information specific
- * to the driver.
- *
- * Fifo open "template". User fills the structure with fifo
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_fifo_attr {
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb, int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- void *userdata;
- u32 per_txdl_space;
-};
-
-/**
- * struct vxge_hw_vpath_attr - Attributes of virtual path
- * @vp_id: Identifier of Virtual Path
- * @ring_attr: Attributes of ring for non-offload receive
- * @fifo_attr: Attributes of fifo for non-offload transmit
- *
- * Attributes of virtual path. This structure is passed as parameter
- * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
- */
-struct vxge_hw_vpath_attr {
- u32 vp_id;
- struct vxge_hw_ring_attr ring_attr;
- struct vxge_hw_fifo_attr fifo_attr;
-};
-
-enum vxge_hw_status vxge_hw_device_hw_info_get(
- void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status vxge_hw_device_config_default_get(
- struct vxge_hw_device_config *device_config);
-
-/**
- * vxge_hw_device_link_state_get - Get link state.
- * @devh: HW device handle.
- *
- * Get link state.
- * Returns: link state.
- */
-static inline
-enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
- struct __vxge_hw_device *devh)
-{
- return devh->link_state;
-}
-
-void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config);
-
-enum vxge_hw_status vxge_hw_device_getpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 *tx,
- u32 *rx);
-
-enum vxge_hw_status vxge_hw_device_setpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 tx,
- u32 rx);
-
-static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
- unsigned long size,
- struct pci_dev **p_dmah,
- struct pci_dev **p_dma_acch)
-{
- void *vaddr;
- unsigned long misaligned = 0;
- int realloc_flag = 0;
- *p_dma_acch = *p_dmah = NULL;
-
-realloc:
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- if (vaddr == NULL)
- return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
- VXGE_CACHE_LINE_SIZE);
- if (realloc_flag)
- goto out;
-
- if (misaligned) {
- /* misaligned, free current one and try allocating
- * size + VXGE_CACHE_LINE_SIZE memory
- */
- kfree(vaddr);
- size += VXGE_CACHE_LINE_SIZE;
- realloc_flag = 1;
- goto realloc;
- }
-out:
- *(unsigned long *)p_dma_acch = misaligned;
- vaddr = (void *)((u8 *)vaddr + misaligned);
- return vaddr;
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_mempool_item_priv - will return pointer on per item private space
- */
-static inline void*
-__vxge_hw_mempool_item_priv(
- struct vxge_hw_mempool *mempool,
- u32 memblock_idx,
- void *item,
- u32 *memblock_item_idx)
-{
- ptrdiff_t offset;
- void *memblock = mempool->memblocks_arr[memblock_idx];
-
-
- offset = (u32)((u8 *)item - (u8 *)memblock);
- vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
-
- (*memblock_item_idx) = (u32) offset / mempool->item_size;
- vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
-
- return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
- (*memblock_item_idx) * mempool->items_priv_size;
-}
-
-/*
- * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
- * for the fifo.
- * @fifo: Fifo
- * @txdp: Poniter to a TxD
- */
-static inline struct __vxge_hw_fifo_txdl_priv *
-__vxge_hw_fifo_txdl_priv(
- struct __vxge_hw_fifo *fifo,
- struct vxge_hw_fifo_txd *txdp)
-{
- return (struct __vxge_hw_fifo_txdl_priv *)
- (((char *)((ulong)txdp->host_control)) +
- fifo->per_txdl_space);
-}
-
-enum vxge_hw_status vxge_hw_vpath_open(
- struct __vxge_hw_device *devh,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_close(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
-
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_mtu_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 new_mtu);
-
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
-static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
-{
- writel(val, addr + 4);
-}
-
-static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
-
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
-
-/**
- * vxge_debug_ll
- * @level: level of debug verbosity.
- * @mask: mask for the debug
- * @buf: Circular buffer for tracing
- * @fmt: printf like format string
- *
- * Provides logging facilities. Can be customized on per-module
- * basis or/and with debug levels. Input parameters, except
- * module and level, are the same as posix printf. This function
- * may be compiled out if DEBUG macro was never defined.
- * See also: enum vxge_debug_level{}.
- */
-#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) do { \
- if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
- (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
- if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", ##__VA_ARGS__); \
-} while (0)
-#else
-#define vxge_debug_ll(level, mask, fmt, ...)
-#endif
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size);
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size);
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
-
-#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
-#define VXGE_HW_MAX_POLLING_COUNT 100
-
-void
-vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build);
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
- int size);
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *eprom_image_data);
-
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
deleted file mode 100644
index 4d91026485ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-
-#include "vxge-ethtool.h"
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"vpaths_opened"},
- {"vpath_open_fail_cnt"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"tx_frms"},
- {"tx_errors"},
- {"tx_bytes"},
- {"txd_not_free"},
- {"txd_out_of_desc"},
- {"rx_frms"},
- {"rx_errors"},
- {"rx_bytes"},
- {"rx_mcast"},
- {"pci_map_fail_cnt"},
- {"skb_alloc_fail_cnt"}
-};
-
-/**
- * vxge_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- *
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-static int
-vxge_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- /* We currently only support 10Gb/FULL */
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * vxge_ethtool_get_link_ksettings - Return link specific information.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool
- * to return link information.
- *
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * vxge_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to
- * return driver information.
- *
- * Returns driver specefic information like name, version etc.. to ethtool.
- */
-static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-}
-
-/**
- * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
- * @dev: device pointer.
- * @regs: pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- *
- * Dumps the vpath register space of Titan NIC into the user given
- * buffer area.
- */
-static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int index, offset;
- enum vxge_hw_status status;
- u64 reg;
- u64 *reg_space = (u64 *)space;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
- regs->version = vdev->pdev->subsystem_device;
- for (index = 0; index < vdev->no_of_vpath; index++) {
- for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
- offset += 8) {
- status = vxge_hw_mgmt_reg_read(hldev,
- vxge_hw_mgmt_reg_type_vpath,
- vdev->vpaths[index].device_id,
- offset, &reg);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Getting reg dump Failed",
- __func__, __LINE__);
- return;
- }
- *reg_space++ = reg;
- }
- }
-}
-
-/**
- * vxge_ethtool_idnic - To physically identify the nic on the system.
- * @dev : device pointer.
- * @state : requested LED state
- *
- * Used to physically identify the NIC on the system.
- * 0 on success
- */
-static int vxge_ethtool_idnic(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
-}
-
-/**
- * vxge_ethtool_setpause_data - set/reset pause frame generation.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
-
- vdev->config.tx_pause_enable = ep->tx_pause;
- vdev->config.rx_pause_enable = ep->rx_pause;
-
- return 0;
-}
-
-static void vxge_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *tmp_stats)
-{
- int j, k;
- enum vxge_hw_status status;
- enum vxge_hw_status swstatus;
- struct vxge_vpath *vpath = NULL;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
- struct vxge_hw_xmac_stats *xmac_stats;
- struct vxge_hw_device_stats_sw_info *sw_stats;
- struct vxge_hw_device_stats_hw_info *hw_stats;
-
- u64 *ptr = tmp_stats;
-
- memset(tmp_stats, 0,
- vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
-
- xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
- if (xmac_stats == NULL) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for xmac_stats",
- __func__, __LINE__);
- return;
- }
-
- sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
- GFP_KERNEL);
- if (sw_stats == NULL) {
- kfree(xmac_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for sw_stats",
- __func__, __LINE__);
- return;
- }
-
- hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
- GFP_KERNEL);
- if (hw_stats == NULL) {
- kfree(xmac_stats);
- kfree(sw_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for hw_stats",
- __func__, __LINE__);
- return;
- }
-
- *ptr++ = 0;
- status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
- if (status != VXGE_HW_OK) {
- if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting xmac stats",
- __func__, __LINE__);
- }
- }
- swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
- if (swstatus != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting sw stats",
- __func__, __LINE__);
- }
-
- status = vxge_hw_device_stats_get(hldev, hw_stats);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d hw_stats_get error", __func__, __LINE__);
- }
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
- ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN);
- continue;
- }
-
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
- *ptr++ = vpath_info->tx_stats.tx_data_octets;
- *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
- *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
- *ptr++ = vpath_info->tx_stats.tx_icmp;
- *ptr++ = vpath_info->tx_stats.tx_tcp;
- *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
- *ptr++ = vpath_info->tx_stats.tx_udp;
- *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip;
- *ptr++ = vpath_info->tx_stats.tx_parse_error;
- *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_frms;
- *ptr++ = vpath_info->rx_stats.rx_offload_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
- *ptr++ = vpath_info->rx_stats.rx_data_octets;
- *ptr++ = vpath_info->rx_stats.rx_offload_octets;
- *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
- *ptr++ = vpath_info->rx_stats.rx_long_frms;
- *ptr++ = vpath_info->rx_stats.rx_usized_frms;
- *ptr++ = vpath_info->rx_stats.rx_osized_frms;
- *ptr++ = vpath_info->rx_stats.rx_frag_frms;
- *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ip;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
- *ptr++ = vpath_info->rx_stats.rx_ip_octets;
- *ptr++ = vpath_info->rx_stats.rx_err_ip;
- *ptr++ = vpath_info->rx_stats.rx_icmp;
- *ptr++ = vpath_info->rx_stats.rx_tcp;
- *ptr++ = vpath_info->rx_stats.rx_udp;
- *ptr++ = vpath_info->rx_stats.rx_err_tcp;
- *ptr++ = vpath_info->rx_stats.rx_lost_frms;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_various_discard;
- *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
- *ptr++ = vpath_info->rx_stats.rx_red_discard;
- *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
- *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_icmp;
- *ptr++ = xmac_stats->port_stats[k].tx_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_udp;
- *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
- *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
- *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_icmp;
- *ptr++ = xmac_stats->port_stats[k].rx_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_jettison;
- *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_sw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
- &sw_stats->vpath_info[j];
- *ptr++ = vpath_info->soft_reset_cnt;
- *ptr++ = vpath_info->error_stats.unknown_alarms;
- *ptr++ = vpath_info->error_stats.network_sustained_fault;
- *ptr++ = vpath_info->error_stats.network_sustained_ok;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
- *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
- *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
- *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
- *ptr++ = vpath_info->error_stats.target_illegal_access;
- *ptr++ = vpath_info->error_stats.ini_serr_det;
- *ptr++ = vpath_info->error_stats.prc_ring_bumps;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
- *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
- *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
- *ptr++ = vpath_info->ring_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
- *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
- *ptr++ = vpath_info->fifo_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
- *ptr++ = vpath_info->fifo_stats.total_posts;
- *ptr++ = vpath_info->fifo_stats.total_buffers;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
- ptr += VXGE_HW_VPATH_STATS_LEN;
- continue;
- }
- *ptr++ = vpath_info->ini_num_mwr_sent;
- *ptr++ = vpath_info->ini_num_mrd_sent;
- *ptr++ = vpath_info->ini_num_cpl_rcvd;
- *ptr++ = vpath_info->ini_num_mwr_byte_sent;
- *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
- *ptr++ = vpath_info->wrcrdtarb_xoff;
- *ptr++ = vpath_info->rdcrdtarb_xoff;
- *ptr++ = vpath_info->vpath_genstats_count0;
- *ptr++ = vpath_info->vpath_genstats_count1;
- *ptr++ = vpath_info->vpath_genstats_count2;
- *ptr++ = vpath_info->vpath_genstats_count3;
- *ptr++ = vpath_info->vpath_genstats_count4;
- *ptr++ = vpath_info->vpath_genstats_count5;
- *ptr++ = vpath_info->prog_event_vnum0;
- *ptr++ = vpath_info->prog_event_vnum1;
- *ptr++ = vpath_info->prog_event_vnum2;
- *ptr++ = vpath_info->prog_event_vnum3;
- *ptr++ = vpath_info->rx_multi_cast_frame_discard;
- *ptr++ = vpath_info->rx_frm_transferred;
- *ptr++ = vpath_info->rxd_returned;
- *ptr++ = vpath_info->rx_mpa_len_fail_frms;
- *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
- *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
- *ptr++ = vpath_info->rx_permitted_frms;
- *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
- *ptr++ = vpath_info->rx_wol_frms;
- *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
- }
-
- *ptr++ = 0;
- *ptr++ = vdev->stats.vpaths_open;
- *ptr++ = vdev->stats.vpath_open_fail;
- *ptr++ = vdev->stats.link_up;
- *ptr++ = vdev->stats.link_down;
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
- *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
- *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
- *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
- *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
- *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
- *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
- *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
- *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
- *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
- vdev->vpaths[k].ring.stats.pci_map_fail;
- *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
- }
-
- ptr += 12;
-
- kfree(xmac_stats);
- kfree(sw_stats);
- kfree(hw_stats);
-}
-
-static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
-{
- int stat_size = 0;
- int i, j;
- struct vxgedev *vdev = netdev_priv(dev);
- switch (stringset) {
- case ETH_SS_STATS:
- vxge_add_string("VPATH STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_proto_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_offload_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_retx_tcp_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_various_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_sleep_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_queue_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_protocol_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_any_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octets_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_count_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discard_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_udp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_switch_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_len_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rpa_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rts_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_trash_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_buff_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_local_fault_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jettison_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_remote_fault_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("soft_reset_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("unknown_alarms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_fault_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_ok_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_pif_chain_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_drop_timeout_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("target_illegal_access_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_serr_det_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_ring_bumps_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_quanta_size_err_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("ring_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- vxge_add_string("fifo_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_posts_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_buffers_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- }
-
- vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count0_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count1_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count2_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count3_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count4_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count5_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum0_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum1_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum2_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum3_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_multi_cast_frame_discard_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_frm_transferred_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rxd_returned_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_permitted_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_wol_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int vxge_ethtool_get_regs_len(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
-}
-
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return VXGE_TITLE_LEN +
- (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
- DRIVER_STAT_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
- printk(KERN_INFO "Single Function Mode is required to flash the"
- " firmware\n");
- return -EINVAL;
- }
-
- if (netif_running(dev)) {
- printk(KERN_INFO "Interface %s must be down to flash the "
- "firmware\n", dev->name);
- return -EBUSY;
- }
-
- return vxge_fw_upgrade(vdev, parms->data, 1);
-}
-
-static const struct ethtool_ops vxge_ethtool_ops = {
- .get_drvinfo = vxge_ethtool_gdrvinfo,
- .get_regs_len = vxge_ethtool_get_regs_len,
- .get_regs = vxge_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = vxge_ethtool_getpause_data,
- .set_pauseparam = vxge_ethtool_setpause_data,
- .get_strings = vxge_ethtool_get_strings,
- .set_phys_id = vxge_ethtool_idnic,
- .get_sset_count = vxge_ethtool_get_sset_count,
- .get_ethtool_stats = vxge_get_ethtool_stats,
- .flash_device = vxge_fw_flash,
- .get_link_ksettings = vxge_ethtool_get_link_ksettings,
- .set_link_ksettings = vxge_ethtool_set_link_ksettings,
-};
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev)
-{
- ndev->ethtool_ops = &vxge_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
deleted file mode 100644
index 065a2c0429a4..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef _VXGE_ETHTOOL_H
-#define _VXGE_ETHTOOL_H
-
-#include "vxge-main.h"
-
-/* Ethtool related variables and Macros. */
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
-
-#define VXGE_TITLE_LEN 5
-#define VXGE_HW_VPATH_STATS_LEN 27
-#define VXGE_HW_AGGR_STATS_LEN 13
-#define VXGE_HW_PORT_STATS_LEN 94
-#define VXGE_HW_VPATH_TX_STATS_LEN 19
-#define VXGE_HW_VPATH_RX_STATS_LEN 42
-#define VXGE_SW_STATS_LEN 60
-#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
- VXGE_HW_AGGR_STATS_LEN +\
- VXGE_HW_PORT_STATS_LEN +\
- VXGE_HW_VPATH_TX_STATS_LEN +\
- VXGE_HW_VPATH_RX_STATS_LEN)
-
-#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
-#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
-
-/* Maximum flicker time of adapter LED */
-#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
-#define VXGE_FLICKER_ON 1
-#define VXGE_FLICKER_OFF 0
-
-#define vxge_add_string(fmt, size, buf, ...) {\
- snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
- *size += ETH_GSTRING_LEN; \
-}
-
-#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
deleted file mode 100644
index aa7c093f1f91..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ /dev/null
@@ -1,4809 +0,0 @@
-/******************************************************************************
-* This software may be used and distributed according to the terms of
-* the GNU General Public License (GPL), incorporated herein by reference.
-* Drivers based on or derived from this code fall under the GPL and must
-* retain the authorship, copyright and license notice. This file is not
-* a complete program and may only be used when the entire operating
-* system is licensed under the GPL.
-* See the file COPYING in this distribution for more information.
-*
-* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
-* Virtualized Server Adapter.
-* Copyright(c) 2002-2010 Exar Corp.
-*
-* The module loadable parameters that are supported by the driver and a brief
-* explanation of all the variables:
-* vlan_tag_strip:
-* Strip VLAN Tag enable/disable. Instructs the device to remove
-* the VLAN tag from all received tagged frames that are not
-* replicated at the internal L2 switch.
-* 0 - Do not strip the VLAN tag.
-* 1 - Strip the VLAN tag.
-*
-* addr_learn_en:
-* Enable learning the mac address of the guest OS interface in
-* a virtualization environment.
-* 0 - DISABLE
-* 1 - ENABLE
-*
-* max_config_port:
-* Maximum number of port to be supported.
-* MIN -1 and MAX - 2
-*
-* max_config_vpath:
-* This configures the maximum no of VPATH configures for each
-* device function.
-* MIN - 1 and MAX - 17
-*
-* max_config_dev:
-* This configures maximum no of Device function to be enabled.
-* MIN - 1 and MAX - 17
-*
-******************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <net/ip.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/net_tstamp.h>
-#include <linux/prefetch.h>
-#include <linux/module.h>
-#include "vxge-main.h"
-#include "vxge-reg.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
- "Virtualized Server Adapter");
-
-static const struct pci_device_id vxge_id_table[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
- PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
- PCI_ANY_ID},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, vxge_id_table);
-
-VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
-VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
-VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
-VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
-
-static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
-static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
-module_param_array(bw_percentage, uint, NULL, 0);
-
-static struct vxge_drv_config *driver_config;
-static void vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-static inline int is_vxge_card_up(struct vxgedev *vdev)
-{
- return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-}
-
-static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
-{
- struct sk_buff **skb_ptr = NULL;
- struct sk_buff **temp;
-#define NR_SKB_COMPLETED 16
- struct sk_buff *completed[NR_SKB_COMPLETED];
- int more;
-
- do {
- more = 0;
- skb_ptr = completed;
-
- if (__netif_tx_trylock(fifo->txq)) {
- vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
- NR_SKB_COMPLETED, &more);
- __netif_tx_unlock(fifo->txq);
- }
-
- /* free SKBs */
- for (temp = completed; temp != skb_ptr; temp++)
- dev_consume_skb_irq(*temp);
- } while (more);
-}
-
-static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
-{
- int i;
-
- /* Complete all transmits */
- for (i = 0; i < vdev->no_of_vpath; i++)
- VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
-}
-
-static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
-{
- int i;
- struct vxge_ring *ring;
-
- /* Complete all receives*/
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- vxge_hw_vpath_poll_rx(ring->handle);
- }
-}
-
-/*
- * vxge_callback_link_up
- *
- * This function is called during interrupt context to notify link up state
- * change.
- */
-static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
-
- netif_carrier_on(vdev->ndev);
- netif_tx_wake_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_callback_link_down
- *
- * This function is called during interrupt context to notify link down state
- * change.
- */
-static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Down\n");
-
- vdev->stats.link_down++;
- netif_carrier_off(vdev->ndev);
- netif_tx_stop_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_rx_alloc
- *
- * Allocate SKB.
- */
-static struct sk_buff *
-vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
-{
- struct net_device *dev;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
-
- dev = ring->ndev;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- /* try to allocate skb first. this one may fail */
- skb = netdev_alloc_skb(dev, skb_size +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb == NULL) {
- vxge_debug_mem(VXGE_ERR,
- "%s: out of memory to allocate SKB", dev->name);
- ring->stats.skb_alloc_fail++;
- return NULL;
- }
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d Skb : 0x%p", ring->ndev->name,
- __func__, __LINE__, skb);
-
- skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- rx_priv->skb = skb;
- rx_priv->skb_data = NULL;
- rx_priv->data_size = skb_size;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return skb;
-}
-
-/*
- * vxge_rx_map
- */
-static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
-{
- struct vxge_rx_priv *rx_priv;
- dma_addr_t dma_addr;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- rx_priv->skb_data = rx_priv->skb->data;
- dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
- ring->stats.pci_map_fail++;
- return -EIO;
- }
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
- ring->ndev->name, __func__, __LINE__,
- (unsigned long long)dma_addr);
- vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
-
- rx_priv->data_dma = dma_addr;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return 0;
-}
-
-/*
- * vxge_rx_initial_replenish
- * Allocation of RxD as an initial replenish procedure.
- */
-static enum vxge_hw_status
-vxge_rx_initial_replenish(void *dtrh, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (vxge_rx_alloc(dtrh, ring,
- VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
- return VXGE_HW_FAIL;
-
- if (vxge_rx_map(dtrh, ring)) {
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
- dev_kfree_skb(rx_priv->skb);
-
- return VXGE_HW_FAIL;
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return VXGE_HW_OK;
-}
-
-static inline void
-vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
- int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
-{
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- skb_record_rx_queue(skb, ring->driver_id);
- skb->protocol = eth_type_trans(skb, ring->ndev);
-
- u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.rx_frms++;
- ring->stats.rx_bytes += pkt_length;
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ring->stats.rx_mcast++;
- u64_stats_update_end(&ring->stats.syncp);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb protocol = %d",
- ring->ndev->name, __func__, __LINE__, skb->protocol);
-
- if (ext_info->vlan &&
- ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
- napi_gro_receive(ring->napi_p, skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-}
-
-static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
- struct vxge_rx_priv *rx_priv)
-{
- dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
- vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
-}
-
-static inline void vxge_post(int *dtr_cnt, void **first_dtr,
- void *post_dtr, struct __vxge_hw_ring *ringh)
-{
- int dtr_count = *dtr_cnt;
- if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
- if (*first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
- *first_dtr = post_dtr;
- } else
- vxge_hw_ring_rxd_post_post(ringh, post_dtr);
- dtr_count++;
- *dtr_cnt = dtr_count;
-}
-
-/*
- * vxge_rx_1b_compl
- *
- * If the interrupt is because of a received frame or if the receive ring
- * contains fresh as yet un-processed frames, this function is called.
- */
-static enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
- unsigned int dma_sizes;
- void *first_dtr = NULL;
- int dtr_cnt = 0;
- int data_size;
- dma_addr_t data_dma;
- int pkt_length;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
- struct vxge_hw_ring_rxd_info ext_info;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- if (ring->budget <= 0)
- goto out;
-
- do {
- prefetch((char *)dtr + L1_CACHE_BYTES);
- rx_priv = vxge_hw_ring_rxd_private_get(dtr);
- skb = rx_priv->skb;
- data_size = rx_priv->data_size;
- data_dma = rx_priv->data_dma;
- prefetch(rx_priv->skb_data);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb = 0x%p",
- ring->ndev->name, __func__, __LINE__, skb);
-
- vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
- pkt_length = dma_sizes;
-
- pkt_length -= ETH_FCS_LEN;
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d Packet Length = %d",
- ring->ndev->name, __func__, __LINE__, pkt_length);
-
- vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
-
- /* check skb validity */
- vxge_assert(skb);
-
- prefetch((char *)skb + L1_CACHE_BYTES);
- if (unlikely(t_code)) {
- if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
- VXGE_HW_OK) {
-
- ring->stats.rx_errors++;
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s :%d Rx T_code is %d",
- ring->ndev->name, __func__,
- __LINE__, t_code);
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- continue;
- }
- }
-
- if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
- if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
- if (!vxge_rx_map(dtr, ring)) {
- skb_put(skb, pkt_length);
-
- dma_unmap_single(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_pre_post(ringh, dtr);
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- } else {
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb = skb;
- rx_priv->data_size = data_size;
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- struct sk_buff *skb_up;
-
- skb_up = netdev_alloc_skb(dev, pkt_length +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb_up != NULL) {
- skb_reserve(skb_up,
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- dma_sync_single_for_cpu(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d skb_up = %p",
- ring->ndev->name, __func__,
- __LINE__, skb);
- memcpy(skb_up->data, skb->data, pkt_length);
-
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- /* will netif_rx small SKB instead */
- skb = skb_up;
- skb_put(skb, pkt_length);
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- vxge_debug_rx(VXGE_ERR,
- "%s: vxge_rx_1b_compl: out of "
- "memory", dev->name);
- ring->stats.skb_alloc_fail++;
- break;
- }
- }
-
- if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
- !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
- ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
- ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-
-
- if (ring->rx_hwts) {
- struct skb_shared_hwtstamps *skb_hwts;
- u32 ns = *(u32 *)(skb->head + pkt_length);
-
- skb_hwts = skb_hwtstamps(skb);
- skb_hwts->hwtstamp = ns_to_ktime(ns);
- }
-
- /* rth_hash_type and rth_it_hit are non-zero regardless of
- * whether rss is enabled. Only the rth_value is zero/non-zero
- * if rss is disabled/enabled, so key off of that.
- */
- if (ext_info.rth_value)
- skb_set_hash(skb, ext_info.rth_value,
- PKT_HASH_TYPE_L3);
-
- vxge_rx_complete(ring, skb, ext_info.vlan,
- pkt_length, &ext_info);
-
- ring->budget--;
- ring->pkts_processed++;
- if (!ring->budget)
- break;
-
- } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
- &t_code) == VXGE_HW_OK);
-
- if (first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...",
- __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_xmit_compl
- *
- * If an interrupt was raised to indicate DMA complete of the Tx packet,
- * this function is called. It identifies the last TxD whose buffer was
- * freed and frees all skbs whose data have already DMA'ed into the NICs
- * internal memory.
- */
-static enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skb, int *more)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- struct sk_buff *skb, **done_skb = *skb_ptr;
- int pkt_cnt = 0;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Entered....", __func__, __LINE__);
-
- do {
- int frg_cnt;
- skb_frag_t *frag;
- int i = 0, j;
- struct vxge_tx_priv *txd_priv =
- vxge_hw_fifo_txdl_private_get(dtr);
-
- skb = txd_priv->skb;
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p "
- "tcode = 0x%x", fifo->ndev->name, __func__,
- __LINE__, fifo_hw, dtr, t_code);
- /* check skb validity */
- vxge_assert(skb);
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
- fifo->ndev->name, __func__, __LINE__,
- skb, txd_priv, frg_cnt);
- if (unlikely(t_code)) {
- fifo->stats.tx_errors++;
- vxge_debug_tx(VXGE_ERR,
- "%s: tx: dtr %p completed due to "
- "error t_code %01x", fifo->ndev->name,
- dtr, t_code);
- vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
- }
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev,
- txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-
- /* Updating the statistics block */
- u64_stats_update_begin(&fifo->stats.syncp);
- fifo->stats.tx_frms++;
- fifo->stats.tx_bytes += skb->len;
- u64_stats_update_end(&fifo->stats.syncp);
-
- *done_skb++ = skb;
-
- if (--nr_skb <= 0) {
- *more = 1;
- break;
- }
-
- pkt_cnt++;
- if (pkt_cnt > fifo->indicate_max_pkts)
- break;
-
- } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
- &dtr, &t_code) == VXGE_HW_OK);
-
- *skb_ptr = done_skb;
- if (netif_tx_queue_stopped(fifo->txq))
- netif_tx_wake_queue(fifo->txq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- fifo->ndev->name, __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
-{
- u16 queue_len, counter = 0;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
-
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- queue_len = vdev->no_of_vpath;
- counter = (ntohs(th->source) +
- ntohs(th->dest)) &
- vdev->vpath_selector[queue_len - 1];
- if (counter >= queue_len)
- counter = queue_len - 1;
- }
- }
- return counter;
-}
-
-static enum vxge_hw_status vxge_search_mac_addr_in_list(
- struct vxge_vpath *vpath, u64 del_mac)
-{
- struct list_head *entry, *next;
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
- return TRUE;
- }
- return FALSE;
-}
-
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status
-vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (is_multicast_ether_addr(mac->macaddr))
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
-{
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- u64 mac_addr = 0, vpath_vector = 0;
- int vpath_idx = 0;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = NULL;
-
- mac_address = (u8 *)&mac_addr;
- memcpy(mac_address, mac_header, ETH_ALEN);
-
- /* Is this mac address already in the list? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vxge_search_mac_addr_in_list(vpath, mac_addr))
- return vpath_idx;
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
-
- /* Any vpath has room to add mac address to its da table? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK)
- return -EPERM;
- return vpath_idx;
- }
- }
-
- mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
- vpath_idx = 0;
- mac_info.vpath_no = vpath_idx;
- /* Is the first vpath already selected as catch-basin ? */
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
- return vpath_idx;
- }
-
- /* Select first vpath as catch-basin */
- vpath_vector = vxge_mBIT(vpath->device_id);
- status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- vpath_vector);
- if (status != VXGE_HW_OK) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Unable to set the vpath-%d in catch-basin mode",
- VXGE_DRIVER_NAME, vpath->device_id);
- return -EPERM;
- }
-
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
-
- return vpath_idx;
-}
-
-/**
- * vxge_xmit
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- *
- * This function is the Tx entry point of the driver. Neterion NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
-*/
-static netdev_tx_t
-vxge_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct vxge_fifo *fifo = NULL;
- void *dtr_priv;
- void *dtr = NULL;
- struct vxgedev *vdev = NULL;
- enum vxge_hw_status status;
- int frg_cnt, first_frg_len;
- skb_frag_t *frag;
- int i = 0, j = 0, avail;
- u64 dma_pointer;
- struct vxge_tx_priv *txdl_priv = NULL;
- struct __vxge_hw_fifo *fifo_hw;
- int offload_type;
- int vpath_no = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- /* A buffer with no data will be dropped */
- if (unlikely(skb->len <= 0)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Buffer has no data..", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- vxge_debug_tx(VXGE_ERR,
- "%s: vdev not initialized", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (vdev->config.addr_learn_en) {
- vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
- if (vpath_no == -EPERM) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Failed to store the mac address",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
- vpath_no = skb_get_queue_mapping(skb);
- else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
- vpath_no = vxge_get_vpath_no(vdev, skb);
-
- vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
-
- if (vpath_no >= vdev->no_of_vpath)
- vpath_no = 0;
-
- fifo = &vdev->vpaths[vpath_no].fifo;
- fifo_hw = fifo->handle;
-
- if (netif_tx_queue_stopped(fifo->txq))
- return NETDEV_TX_BUSY;
-
- avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
- if (avail == 0) {
- vxge_debug_tx(VXGE_ERR,
- "%s: No free TXDs available", dev->name);
- fifo->stats.txd_not_free++;
- goto _exit0;
- }
-
- /* Last TXD? Stop tx queue to avoid dropping packets. TX
- * completion will resume the queue.
- */
- if (avail == 1)
- netif_tx_stop_queue(fifo->txq);
-
- status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
- if (unlikely(status != VXGE_HW_OK)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Out of descriptors .", dev->name);
- fifo->stats.txd_out_of_desc++;
- goto _exit0;
- }
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
- dev->name, __func__, __LINE__,
- fifo_hw, dtr, dtr_priv);
-
- if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag = skb_vlan_tag_get(skb);
- vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
- }
-
- first_frg_len = skb_headlen(skb);
-
- dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
- first_frg_len, DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
- fifo->stats.pci_map_fail++;
- goto _exit0;
- }
-
- txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
- txdl_priv->skb = skb;
- txdl_priv->dma_buffers[j] = dma_pointer;
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p txdl_priv = %p "
- "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
- __func__, __LINE__, skb, txdl_priv,
- frg_cnt, (unsigned long long)dma_pointer);
-
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- first_frg_len);
-
- frag = &skb_shinfo(skb)->frags[0];
- for (i = 0; i < frg_cnt; i++) {
- /* ignore 0 length fragment */
- if (!skb_frag_size(frag))
- continue;
-
- dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
- goto _exit2;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d frag = %d dma_pointer = 0x%llx",
- dev->name, __func__, __LINE__, i,
- (unsigned long long)dma_pointer);
-
- txdl_priv->dma_buffers[j] = dma_pointer;
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- skb_frag_size(frag));
- frag += 1;
- }
-
- offload_type = vxge_offload_type(skb);
-
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- int mss = vxge_tcp_mss(skb);
- if (mss) {
- vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
- dev->name, __func__, __LINE__, mss);
- vxge_hw_fifo_txdl_mss_set(dtr, mss);
- } else {
- vxge_assert(skb->len <=
- dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
- vxge_assert(0);
- goto _exit1;
- }
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vxge_hw_fifo_txdl_cksum_set_bits(dtr,
- VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
-
- vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return NETDEV_TX_OK;
-
-_exit2:
- vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-_exit1:
- j = 0;
- frag = &skb_shinfo(skb)->frags[0];
-
- dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (; j < i; j++) {
- dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit0:
- netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * vxge_rx_term
- *
- * Function will be called by hw function to abort all outstanding receive
- * descriptors.
- */
-static void
-vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv =
- vxge_hw_ring_rxd_private_get(dtrh);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (state != VXGE_HW_RXD_STATE_POSTED)
- return;
-
- dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb_data = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- ring->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_tx_term
- *
- * Function will be called to abort all outstanding tx descriptors
- */
-static void
-vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- skb_frag_t *frag;
- int i = 0, j, frg_cnt;
- struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
- struct sk_buff *skb = txd_priv->skb;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (state != VXGE_HW_TXDL_STATE_POSTED)
- return;
-
- /* check skb validity */
- vxge_assert(skb);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- dev_kfree_skb(skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree(entry);
- vpath->mac_addr_cnt--;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-/* delete a mac address from DA table */
-static enum vxge_hw_status
-vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/**
- * vxge_set_multicast
- * @dev: pointer to the device structure
- *
- * Entry point for multicast address enable/disable
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- */
-static void vxge_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- struct vxgedev *vdev;
- int i, mcast_cnt = 0;
- struct vxge_vpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- int vpath_idx = 0;
- struct vxge_mac_addrs *mac_entry;
- struct list_head *list_head;
- struct list_head *entry, *next;
- u8 *mac_address = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return;
-
- if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to enable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 1;
- }
- } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_disable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to disable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 0;
- }
- }
-
-
- if (!vdev->config.addr_learn_en) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- if (dev->flags & IFF_PROMISC)
- status = vxge_hw_vpath_promisc_enable(
- vpath->handle);
- else
- status = vxge_hw_vpath_promisc_disable(
- vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to %s promisc"
- ", status %d", dev->flags&IFF_PROMISC ?
- "enable" : "disable", status);
- }
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- /* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- list_head = &vdev->vpaths[0].mac_addr_list;
- if ((netdev_mc_count(dev) +
- (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
- vdev->vpaths[0].max_mac_addr_cnt)
- goto _set_all_mcast;
-
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr)) {
- for (vpath_idx = 0; vpath_idx <
- vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(
- vdev,
- &mac_info);
- }
- }
- }
- }
-
- /* Add new ones */
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Setting individual"
- "multicast address failed",
- __func__, __LINE__);
- goto _set_all_mcast;
- }
- }
- }
-
- return;
-_set_all_mcast:
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr))
- break;
- }
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info);
- }
- }
-
- /* Enable all multicast */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling all multicasts failed",
- __func__, __LINE__);
- }
- vdev->all_multi_flg = 1;
- }
- dev->flags |= IFF_ALLMULTI;
- }
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_set_mac_addr
- * @dev: pointer to the device structure
- * @p: socket info
- *
- * Update entry "0" (default MAC addr)
- */
-static int vxge_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct vxgedev *vdev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info_new, mac_info_old;
- int vpath_idx = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memset(&mac_info_new, 0, sizeof(struct macInfo));
- memset(&mac_info_old, 0, sizeof(struct macInfo));
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
- __func__, __LINE__);
-
- /* Get the old address */
- memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
-
- /* Copy the new address */
- memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
-
- /* First delete the old mac address from all the vpaths
- as we can't specify the index while adding new mac address */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
- if (!vpath->is_open) {
- /* This can happen when this interface is added/removed
- to the bonding interface. Delete this station address
- from the linked list */
- vxge_mac_list_del(vpath, &mac_info_old);
-
- /* Add this new address to the linked list
- for later restoring */
- vxge_mac_list_add(vpath, &mac_info_new);
-
- continue;
- }
- /* Delete the station address */
- mac_info_old.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info_old);
- }
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- eth_hw_addr_set(dev, addr->sa_data);
- return VXGE_HW_OK;
- }
-
- /* Set this mac address to all the vpaths */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- mac_info_new.vpath_no = vpath_idx;
- mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info_new);
- if (status != VXGE_HW_OK)
- return -EINVAL;
- }
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- return status;
-}
-
-/*
- * vxge_vpath_intr_enable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to enable the interrupts
- *
- * Enables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id = 0;
- int tim_msix_id[4] = {0, 1, 0, 0};
- int alarm_msix_id = VXGE_ALARM_MSIX_ID;
-
- vxge_hw_vpath_intr_enable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
- else {
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- alarm_msix_id);
-
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
-
- /* enable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- }
-}
-
-/*
- * vxge_vpath_intr_disable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to disable the interrupts
- *
- * Disables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- struct __vxge_hw_device *hldev;
- int msix_id;
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
-
- vxge_hw_vpath_intr_disable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
- else {
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
-
- /* disable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- }
-}
-
-/* list all mac addresses from DA table */
-static enum vxge_hw_status
-vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (!ether_addr_equal(mac->macaddr, macaddr)) {
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status
-vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (!vpath->is_open)
- return status;
-
- for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-
- return status;
-}
-
-/*
- * vxge_reset_vpath
- * @vdev: pointer to vdev
- * @vp_id: vpath to reset
- *
- * Resets the vpath
-*/
-static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int ret = 0;
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is device reset already scheduled */
- if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
-
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(vpath->handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_from_reset"
- "failed for vpath:%d", vp_id);
- return status;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for"
- "vpath:%d", vp_id);
- return status;
- }
- } else
- return VXGE_HW_FAIL;
-
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- /* Enable all broadcast */
- vxge_hw_vpath_bcast_enable(vpath->handle);
-
- /* Enable all multicast */
- if (vdev->all_multi_flg) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
-
- /* Enable the interrupts */
- vxge_vpath_intr_enable(vdev, vp_id);
-
- smp_wmb();
-
- /* Enable the flow of traffic through the vpath */
- vxge_hw_vpath_enable(vpath->handle);
-
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- vpath->ring.last_status = VXGE_HW_OK;
-
- /* Vpath reset done */
- clear_bit(vp_id, &vdev->vp_reset);
-
- /* Start the vpath queue */
- if (netif_tx_queue_stopped(vpath->fifo.txq))
- netif_tx_wake_queue(vpath->fifo.txq);
-
- return ret;
-}
-
-/* Configure CI */
-static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
-{
- int i = 0;
-
- /* Enable CI for RTI */
- if (vdev->config.intr_type == MSI_X) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_ring *hw_ring;
-
- hw_ring = vdev->vpaths[i].ring.handle;
- vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
- }
- }
-
- /* Enable CI for TTI */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
- vxge_hw_vpath_tti_ci_set(hw_fifo);
- /*
- * For Inta (with or without napi), Set CI ON for only one
- * vpath. (Have only one free running timer).
- */
- if ((vdev->config.intr_type == INTA) && (i == 0))
- break;
- }
-
- return;
-}
-
-static int do_vxge_reset(struct vxgedev *vdev, int event)
-{
- int ret = 0, vp_id, i;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is reset already scheduled */
- if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- netif_carrier_off(vdev->ndev);
-
- /* wait for all the vpath reset to complete */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- while (test_bit(vp_id, &vdev->vp_reset))
- msleep(50);
- }
-
- netif_carrier_on(vdev->ndev);
-
- /* if execution mode is set to debug, don't reset the adapter */
- if (unlikely(vdev->exec_mode)) {
- vxge_debug_init(VXGE_ERR,
- "%s: execution mode is debug, returning..",
- vdev->ndev->name);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- netif_tx_stop_all_queues(vdev->ndev);
- return 0;
- }
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_hw_device_wait_receive_idle(vdev->devh);
- vxge_hw_device_intr_disable(vdev->devh);
-
- switch (vdev->cric_err_event) {
- case VXGE_HW_EVENT_UNKNOWN:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "unknown error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_RESET_START:
- break;
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- case VXGE_HW_EVENT_ALARM_CLEARED:
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- break;
- case VXGE_HW_EVENT_CRITICAL_ERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- /* SOP or device reset required */
- /* This event is not currently used */
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "slot freeze",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- default:
- break;
-
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
- netif_tx_stop_all_queues(vdev->ndev);
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_reset_all_vpaths(vdev);
- }
-
- if (event == VXGE_LL_COMPL_RESET) {
- for (i = 0; i < vdev->no_of_vpath; i++)
- if (vdev->vpaths[i].handle) {
- if (vxge_hw_vpath_recover_from_reset(
- vdev->vpaths[i].handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- ret = -EPERM;
- goto out;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- ret = -EPERM;
- goto out;
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
- /* Reprogram the DA table with populated mac addresses */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
- vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
- }
-
- /* enable vpath interrupts */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_vpath_intr_enable(vdev, i);
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- /* Indicate card up */
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Get the traffic to flow through the vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_enable(vdev->vpaths[i].handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
- }
-
- netif_tx_wake_all_queues(vdev->ndev);
- }
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-
- /* Indicate reset done */
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
- return ret;
-}
-
-/*
- * vxge_reset
- * @vdev: pointer to ll device
- *
- * driver may reset the chip on events of serr, eccerr, etc
- */
-static void vxge_reset(struct work_struct *work)
-{
- struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
-
- if (!netif_running(vdev->ndev))
- return;
-
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-}
-
-/**
- * vxge_poll_msix - Receive handler when Receive Polling is used.
- * @napi: pointer to the napi structure.
- * @budget: Number of packets budgeted to be processed in this iteration.
- *
- * This function comes into picture only if Receive side is being handled
- * through polling (called NAPI in linux). It mostly does what the normal
- * Rx interrupt handler does in terms of descriptor and packet processing
- * but not in an interrupt context. Also it will process a specified number
- * of packets at most in one iteration. This value is passed down by the
- * kernel as the function argument 'budget'.
- */
-static int vxge_poll_msix(struct napi_struct *napi, int budget)
-{
- struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
- int pkts_processed;
- int budget_org = budget;
-
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed = ring->pkts_processed;
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
-
- /* Re enable the Rx interrupts for the vpath */
- vxge_hw_channel_msix_unmask(
- (struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
- }
-
- /* We are copying and returning the local variable, in case if after
- * clearing the msix interrupt above, if the interrupt fires right
- * away which can preempt this NAPI thread */
- return pkts_processed;
-}
-
-static int vxge_poll_inta(struct napi_struct *napi, int budget)
-{
- struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
- int pkts_processed = 0;
- int i;
- int budget_org = budget;
- struct vxge_ring *ring;
-
- struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed += ring->pkts_processed;
- budget -= ring->pkts_processed;
- if (budget <= 0)
- break;
- }
-
- VXGE_COMPLETE_ALL_TX(vdev);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- vxge_hw_device_unmask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- }
-
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * vxge_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void vxge_netpoll(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct pci_dev *pdev = vdev->pdev;
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- const int irq = pdev->irq;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (pci_channel_offline(pdev))
- return;
-
- disable_irq(irq);
- vxge_hw_device_clear_tx_rx(hldev);
-
- vxge_hw_device_clear_tx_rx(hldev);
- VXGE_COMPLETE_ALL_RX(vdev);
- VXGE_COMPLETE_ALL_TX(vdev);
-
- enable_irq(irq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-#endif
-
-/* RTH configuration */
-static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_rth_hash_types hash_types;
- u8 itable[256] = {0}; /* indirection table */
- u8 mtable[256] = {0}; /* CPU to vpath mapping */
- int index;
-
- /*
- * Filling
- * - itable with bucket numbers
- * - mtable with bucket-to-vpath mapping
- */
- for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
- itable[index] = index;
- mtable[index] = index % vdev->no_of_vpath;
- }
-
- /* set indirection table, bucket-to-vpath mapping */
- status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
- vdev->no_of_vpath,
- mtable, itable,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH indirection table configuration failed "
- "for vpath:%d", vdev->vpaths[0].device_id);
- return status;
- }
-
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
- /*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
- for (index = 0; index < vdev->no_of_vpath; index++) {
- status = vxge_hw_vpath_rts_rth_set(
- vdev->vpaths[index].handle,
- vdev->config.rth_algorithm,
- &hash_types,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH configuration failed for vpath:%d",
- vdev->vpaths[index].device_id);
- return status;
- }
- }
-
- return status;
-}
-
-/* reset vpaths */
-static void vxge_reset_all_vpaths(struct vxgedev *vdev)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(
- vpath->handle) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- return;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- return;
- }
- }
- }
-}
-
-/* close vpaths */
-static void vxge_close_vpaths(struct vxgedev *vdev, int index)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = index; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- if (vpath->handle && vpath->is_open) {
- vxge_hw_vpath_close(vpath->handle);
- vdev->stats.vpaths_open--;
- }
- vpath->is_open = 0;
- vpath->handle = NULL;
- }
-}
-
-/* open vpaths */
-static int vxge_open_vpaths(struct vxgedev *vdev)
-{
- struct vxge_hw_vpath_attr attr;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath;
- u32 vp_id = 0;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_configured);
-
- if (!vdev->titan1) {
- struct vxge_hw_vp_config *vcfg;
- vcfg = &vdev->devh->config.vp_config[vpath->device_id];
-
- vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
- vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
- vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
- vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
- vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
- vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
- vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
- vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
- vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
- }
-
- attr.vp_id = vpath->device_id;
- attr.fifo_attr.callback = vxge_xmit_compl;
- attr.fifo_attr.txdl_term = vxge_tx_term;
- attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
- attr.fifo_attr.userdata = &vpath->fifo;
-
- attr.ring_attr.callback = vxge_rx_1b_compl;
- attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
- attr.ring_attr.rxd_term = vxge_rx_term;
- attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
- attr.ring_attr.userdata = &vpath->ring;
-
- vpath->ring.ndev = vdev->ndev;
- vpath->ring.pdev = vdev->pdev;
-
- status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
- if (status == VXGE_HW_OK) {
- vpath->fifo.handle =
- (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
- vpath->ring.handle =
- (struct __vxge_hw_ring *)attr.ring_attr.userdata;
- vpath->fifo.tx_steering_type =
- vdev->config.tx_steering_type;
- vpath->fifo.ndev = vdev->ndev;
- vpath->fifo.pdev = vdev->pdev;
-
- u64_stats_init(&vpath->fifo.stats.syncp);
- u64_stats_init(&vpath->ring.stats.syncp);
-
- if (vdev->config.tx_steering_type)
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, i);
- else
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, 0);
- vpath->fifo.indicate_max_pkts =
- vdev->config.fifo_indicate_max_pkts;
- vpath->fifo.tx_vector_no = 0;
- vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_hwts = vdev->rx_hwts;
- vpath->is_open = 1;
- vdev->vp_handles[i] = vpath->handle;
- vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
- vdev->stats.vpaths_open++;
- } else {
- vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
- "open with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
- vxge_close_vpaths(vdev, 0);
- return -EPERM;
- }
-
- vp_id = vpath->handle->vpath->vp_id;
- vdev->vpaths_deployed |= vxge_mBIT(vp_id);
- }
-
- return VXGE_HW_OK;
-}
-
-/**
- * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @fifo: pointer to transmit fifo structure
- * Description: The function changes boundary timer and restriction timer
- * value depends on the traffic
- * Return Value: None
- */
-static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
-{
- fifo->interrupt_count++;
- if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_fifo *hw_fifo = fifo->handle;
-
- fifo->jiffies = jiffies;
- if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
- hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
- hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- } else if (hw_fifo->rtimer != 0) {
- hw_fifo->rtimer = 0;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- }
- fifo->interrupt_count = 0;
- }
-}
-
-/**
- * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @ring: pointer to receive ring structure
- * Description: The function increases of decreases the packet counts within
- * the ranges of traffic utilization, if the interrupts due to this ring are
- * not within a fixed range.
- * Return Value: Nothing
- */
-static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
-{
- ring->interrupt_count++;
- if (time_before(ring->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_ring *hw_ring = ring->handle;
-
- ring->jiffies = jiffies;
- if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
- hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
- hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- } else if (hw_ring->rtimer != 0) {
- hw_ring->rtimer = 0;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- }
- ring->interrupt_count = 0;
- }
-}
-
-/*
- * vxge_isr_napi
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the hldev structure of the Titan device
- * @ptregs: pointer to the registers pushed on the stack.
- *
- * This function is the ISR handler of the device when napi is enabled. It
- * identifies the reason for the interrupt and calls the relevant service
- * routines.
- */
-static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
-{
- struct __vxge_hw_device *hldev;
- u64 reason;
- enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *)dev_id;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (pci_channel_offline(vdev->pdev))
- return IRQ_NONE;
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_HANDLED;
-
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
- if (status == VXGE_HW_OK) {
- vxge_hw_device_mask_all(hldev);
-
- if (reason &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
- vdev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
-
- vxge_hw_device_clear_tx_rx(hldev);
- napi_schedule(&vdev->napi);
- vxge_debug_intr(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_HANDLED;
- } else
- vxge_hw_device_unmask_all(hldev);
- } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
- (status == VXGE_HW_ERR_CRITICAL) ||
- (status == VXGE_HW_ERR_FIFO))) {
- vxge_hw_device_mask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- return IRQ_HANDLED;
- } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
- return IRQ_HANDLED;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_NONE;
-}
-
-static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
-
- adaptive_coalesce_tx_interrupts(fifo);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- VXGE_COMPLETE_VPATH_TX(fifo);
-
- vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
-{
- struct vxge_ring *ring = (struct vxge_ring *)dev_id;
-
- adaptive_coalesce_rx_interrupts(ring);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- napi_schedule(&ring->napi);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-vxge_alarm_msix_handle(int irq, void *dev_id)
-{
- int i;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
- struct vxgedev *vdev = vpath->vdev;
- int msix_id = (vpath->handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- /* Reduce the chance of losing alarm interrupts by masking
- * the vector. A pending bit will be set if an alarm is
- * generated and on unmask the interrupt will be fired.
- */
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
- vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
-
- status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
- vdev->exec_mode);
- if (status == VXGE_HW_OK) {
- vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
- continue;
- }
- vxge_debug_intr(VXGE_ERR,
- "%s: vxge_hw_vpath_alarm_process failed %x ",
- VXGE_DRIVER_NAME, status);
- }
- return IRQ_HANDLED;
-}
-
-static int vxge_alloc_msix(struct vxgedev *vdev)
-{
- int j, i, ret = 0;
- int msix_intr_vect = 0, temp;
- vdev->intr_cnt = 0;
-
-start:
- /* Tx/Rx MSIX Vectors count */
- vdev->intr_cnt = vdev->no_of_vpath * 2;
-
- /* Alarm MSIX Vectors count */
- vdev->intr_cnt++;
-
- vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!vdev->entries) {
- vxge_debug_init(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_entries_failed;
- }
-
- vdev->vxge_entries = kcalloc(vdev->intr_cnt,
- sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
- if (!vdev->vxge_entries) {
- vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_vxge_entries_failed;
- }
-
- for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
-
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
-
- /* Initialize the fifo vector */
- vdev->entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].in_use = 0;
- j++;
-
- /* Initialize the ring vector */
- vdev->entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].in_use = 0;
- j++;
- }
-
- /* Initialize the alarm vector */
- vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].in_use = 0;
-
- ret = pci_enable_msix_range(vdev->pdev,
- vdev->entries, 3, vdev->intr_cnt);
- if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
- } else if (ret < vdev->intr_cnt) {
- pci_disable_msix(vdev->pdev);
-
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if (max_config_vpath != VXGE_USE_DEFAULT) {
- ret = -ENODEV;
- goto enable_msix_failed;
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
- /* Try with less no of vector by reducing no of vpaths count */
- temp = (ret - 1)/2;
- vxge_close_vpaths(vdev, temp);
- vdev->no_of_vpath = temp;
- goto start;
- }
- return 0;
-
-enable_msix_failed:
- kfree(vdev->vxge_entries);
-alloc_vxge_entries_failed:
- kfree(vdev->entries);
-alloc_entries_failed:
- return ret;
-}
-
-static int vxge_enable_msix(struct vxgedev *vdev)
-{
-
- int i, ret = 0;
- /* 0 - Tx, 1 - Rx */
- int tim_msix_id[4] = {0, 1, 0, 0};
-
- vdev->intr_cnt = 0;
-
- /* allocate msix vectors */
- ret = vxge_alloc_msix(vdev);
- if (!ret) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct vxge_vpath *vpath = &vdev->vpaths[i];
-
- /* If fifo or ring are not enabled, the MSIX vector for
- * it should be set to 0.
- */
- vpath->ring.rx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
-
- vpath->fifo.tx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE);
-
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- VXGE_ALARM_MSIX_ID);
- }
- }
-
- return ret;
-}
-
-static void vxge_rem_msix_isr(struct vxgedev *vdev)
-{
- int intr_cnt;
-
- for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
- intr_cnt++) {
- if (vdev->vxge_entries[intr_cnt].in_use) {
- synchronize_irq(vdev->entries[intr_cnt].vector);
- free_irq(vdev->entries[intr_cnt].vector,
- vdev->vxge_entries[intr_cnt].arg);
- vdev->vxge_entries[intr_cnt].in_use = 0;
- }
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
-
- if (vdev->config.intr_type == MSI_X)
- pci_disable_msix(vdev->pdev);
-}
-
-static void vxge_rem_isr(struct vxgedev *vdev)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI) &&
- vdev->config.intr_type == MSI_X) {
- vxge_rem_msix_isr(vdev);
- } else if (vdev->config.intr_type == INTA) {
- synchronize_irq(vdev->pdev->irq);
- free_irq(vdev->pdev->irq, vdev);
- }
-}
-
-static int vxge_add_isr(struct vxgedev *vdev)
-{
- int ret = 0;
- int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
- int pci_fun = PCI_FUNC(vdev->pdev->devfn);
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
- ret = vxge_enable_msix(vdev);
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
- vdev->config.intr_type = INTA;
- }
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
- for (intr_idx = 0;
- intr_idx < (vdev->no_of_vpath *
- VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
-
- msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
- irq_req = 0;
-
- switch (msix_idx) {
- case 0:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_tx_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].fifo;
- irq_req = 1;
- break;
- case 1:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].ring;
- irq_req = 1;
- break;
- }
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- if (irq_req) {
- /* We requested for this msix interrupt */
- vdev->vxge_entries[intr_cnt].in_use = 1;
- msix_idx += vdev->vpaths[vp_idx].device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(
- vdev->vpaths[vp_idx].handle,
- msix_idx);
- intr_cnt++;
- }
-
- /* Point to next vpath handler */
- if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
- (vp_idx < (vdev->no_of_vpath - 1)))
- vp_idx++;
- }
-
- intr_cnt = vdev->no_of_vpath * 2;
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Alarm - fn:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun);
- /* For Alarm interrupts */
- ret = request_irq(vdev->entries[intr_cnt].vector,
- vxge_alarm_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[0]);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- msix_idx);
- vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
- }
-
-INTA_MODE:
- if (vdev->config.intr_type == INTA) {
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
- "%s:vxge:INTA", vdev->ndev->name);
- vxge_hw_device_set_intr_type(vdev->devh,
- VXGE_HW_INTR_MODE_IRQLINE);
-
- vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
-
- ret = request_irq((int) vdev->pdev->irq,
- vxge_isr_napi,
- IRQF_SHARED, vdev->desc[0], vdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s %s-%d: ISR registration failed",
- VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
- return -ENODEV;
- }
- vxge_debug_init(VXGE_TRACE,
- "new %s-%d line allocated",
- "IRQ", vdev->pdev->irq);
- }
-
- return VXGE_HW_OK;
-}
-
-static void vxge_poll_vp_reset(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
- int i, j = 0;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- if (test_bit(i, &vdev->vp_reset)) {
- vxge_reset_vpath(vdev, i);
- j++;
- }
- }
- if (j && (vdev->config.intr_type != MSI_X)) {
- vxge_hw_device_unmask_all(vdev->devh);
- vxge_hw_device_flush_io(vdev->devh);
- }
-
- mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
-}
-
-static void vxge_poll_vp_lockup(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- struct vxge_ring *ring;
- int i;
- unsigned long rx_frms;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
-
- /* Truncated to machine word size number of frames */
- rx_frms = READ_ONCE(ring->stats.rx_frms);
-
- /* Did this vpath received any packets */
- if (ring->stats.prev_rx_frms == rx_frms) {
- status = vxge_hw_vpath_check_leak(ring->handle);
-
- /* Did it received any packets last time */
- if ((VXGE_HW_FAIL == status) &&
- (VXGE_HW_FAIL == ring->last_status)) {
-
- /* schedule vpath reset */
- if (!test_and_set_bit(i, &vdev->vp_reset)) {
- vpath = &vdev->vpaths[i];
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, i);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- continue;
- }
- }
- }
- ring->stats.prev_rx_frms = rx_frms;
- ring->last_status = status;
- }
-
- /* Check every 1 milli second */
- mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
-}
-
-static netdev_features_t vxge_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t changed = dev->features ^ features;
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- if ((changed & NETIF_F_RXHASH) && netif_running(dev))
- features ^= NETIF_F_RXHASH;
-
- return features;
-}
-
-static int vxge_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- netdev_features_t changed = dev->features ^ features;
-
- if (!(changed & NETIF_F_RXHASH))
- return 0;
-
- /* !netif_running() ensured by vxge_fix_features() */
-
- vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- vxge_reset_all_vpaths(vdev);
-
- return 0;
-}
-
-/**
- * vxge_open
- * @dev: pointer to the device structure.
- *
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_open(struct net_device *dev)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- struct vxge_vpath *vpath;
- int ret = 0;
- int i;
- u64 val64;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- /* make sure you have link off by default every time Nic is
- * initialized */
- netif_carrier_off(dev);
-
- /* Open VPATHs */
- status = vxge_open_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: Vpath open failed", vdev->ndev->name);
- ret = -EPERM;
- goto out0;
- }
-
- vdev->mtu = dev->mtu;
-
- status = vxge_add_isr(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: ISR add failed", dev->name);
- ret = -EPERM;
- goto out1;
- }
-
- if (vdev->config.intr_type != MSI_X) {
- netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
- vdev->config.napi_weight);
- napi_enable(&vdev->napi);
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vpath->ring.napi_p = &vdev->napi;
- }
- } else {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- netif_napi_add(dev, &vpath->ring.napi,
- vxge_poll_msix, vdev->config.napi_weight);
- napi_enable(&vpath->ring.napi);
- vpath->ring.napi_p = &vpath->ring.napi;
- }
- }
-
- /* configure RTH */
- if (vdev->config.rth_steering) {
- status = vxge_rth_configure(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: RTH configuration failed",
- dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
- printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
- hldev->config.rth_en ? "enabled" : "disabled");
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- /* set initial mtu before enabling the device */
- status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: can not set new MTU", dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
-
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
- vxge_debug_init(vdev->level_trace,
- "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
-
- /* Restore the DA, VID table and also multicast and promiscuous mode
- * states
- */
- if (vdev->all_multi_flg) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
- }
-
- /* Enable vpath to sniff all unicast/multicast traffic that not
- * addressed to them. We allow promiscuous mode for PF only
- */
-
- val64 = 0;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- val64);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- val64);
-
- vxge_set_multicast(dev);
-
- /* Enabling Bcast and mcast for all vpath */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- status = vxge_hw_vpath_bcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable bcast for vpath "
- "id %d", dev->name, i);
- if (vdev->config.addr_learn_en) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable mcast for vpath "
- "id %d", dev->name, i);
- }
- }
-
- vxge_hw_device_setpause_data(vdev->devh, 0,
- vdev->config.tx_pause_enable,
- vdev->config.rx_pause_enable);
-
- if (vdev->vp_reset_timer.function == NULL)
- vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
- HZ / 2);
-
- /* There is no need to check for RxD leak and RxD lookup on Titan1A */
- if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
- HZ / 2);
-
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- smp_wmb();
-
- if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
- netif_carrier_on(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
- }
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- vxge_hw_vpath_enable(vpath->handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- }
-
- netif_tx_start_all_queues(vdev->ndev);
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
- goto out0;
-
-out2:
- vxge_rem_isr(vdev);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
-out1:
- vxge_close_vpaths(vdev, 0);
-out0:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return ret;
-}
-
-/* Loop through the mac address list and delete all the entries */
-static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
-{
-
- struct list_head *entry, *next;
- if (list_empty(&vpath->mac_addr_list))
- return;
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- list_del(entry);
- kfree(entry);
- }
-}
-
-static void vxge_napi_del_all(struct vxgedev *vdev)
-{
- int i;
- if (vdev->config.intr_type != MSI_X)
- netif_napi_del(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- netif_napi_del(&vdev->vpaths[i].ring.napi);
- }
-}
-
-static int do_vxge_close(struct net_device *dev, int do_io)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- int i;
- u64 val64, vpath_vector;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* If vxge_handle_crit_err task is executing,
- * wait till it completes. */
- while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- msleep(50);
-
- if (do_io) {
- /* Put the vpath back in normal mode */
- vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
- status = vxge_hw_mgmt_reg_read(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- &val64);
- if (status == VXGE_HW_OK) {
- val64 &= ~vpath_vector;
- status = vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- val64);
- }
-
- /* Remove the function 0 from promiscuous mode */
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- 0);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- 0);
-
- smp_wmb();
- }
-
- if (vdev->titan1)
- del_timer_sync(&vdev->vp_lockup_timer);
-
- del_timer_sync(&vdev->vp_reset_timer);
-
- if (do_io)
- vxge_hw_device_wait_receive_idle(hldev);
-
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
- netif_carrier_off(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Down\n");
- netif_tx_stop_all_queues(vdev->ndev);
-
- /* Note that at this point xmit() is stopped by upper layer */
- if (do_io)
- vxge_hw_device_intr_disable(vdev->devh);
-
- vxge_rem_isr(vdev);
-
- vxge_napi_del_all(vdev);
-
- if (do_io)
- vxge_reset_all_vpaths(vdev);
-
- vxge_close_vpaths(vdev, 0);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
-
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
-
- return 0;
-}
-
-/**
- * vxge_close
- * @dev: device pointer.
- *
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point, thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_close(struct net_device *dev)
-{
- do_vxge_close(dev, 1);
- return 0;
-}
-
-/**
- * vxge_change_mtu
- * @dev: net device pointer.
- * @new_mtu :the new MTU size for the device.
- *
- * A driver entry point to change MTU size for the device. Before changing
- * the MTU the device must be stopped.
- */
-static int vxge_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d", __func__, __LINE__);
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev))) {
- /* just store new value, will use later on open() */
- dev->mtu = new_mtu;
- vxge_debug_init(vdev->level_err,
- "%s", "device is down on MTU change");
- return 0;
- }
-
- vxge_debug_init(vdev->level_trace,
- "trying to apply new MTU %d", new_mtu);
-
- if (vxge_close(dev))
- return -EIO;
-
- dev->mtu = new_mtu;
- vdev->mtu = new_mtu;
-
- if (vxge_open(dev))
- return -EIO;
-
- vxge_debug_init(vdev->level_trace,
- "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
-
- return 0;
-}
-
-/**
- * vxge_get_stats64
- * @dev: pointer to the device structure
- * @net_stats: pointer to struct rtnl_link_stats64
- *
- */
-static void
-vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- int k;
-
- /* net_stats already zeroed by caller */
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
- struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
- unsigned int start;
- u64 packets, bytes, multicast;
-
- do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-
- packets = rxstats->rx_frms;
- multicast = rxstats->rx_mcast;
- bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-
- net_stats->rx_packets += packets;
- net_stats->rx_bytes += bytes;
- net_stats->multicast += multicast;
-
- net_stats->rx_errors += rxstats->rx_errors;
- net_stats->rx_dropped += rxstats->rx_dropped;
-
- do {
- start = u64_stats_fetch_begin_irq(&txstats->syncp);
-
- packets = txstats->tx_frms;
- bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
-
- net_stats->tx_packets += packets;
- net_stats->tx_bytes += bytes;
- net_stats->tx_errors += txstats->tx_errors;
- }
-}
-
-static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
-{
- enum vxge_hw_status status;
- u64 val64;
-
- /* Timestamp is passed to the driver via the FCS, therefore we
- * must disable the FCS stripping by the adapter. Since this is
- * required for the driver to load (due to a hardware bug),
- * there is no need to do anything special here.
- */
- val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
- VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
- VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
-
- status = vxge_hw_mgmt_reg_write(devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- offsetof(struct vxge_hw_mrpcim_reg,
- xmac_timestamp),
- val64);
- vxge_hw_device_flush_io(devh);
- devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
- return status;
-}
-
-static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
- int i;
-
- if (copy_from_user(&config, data, sizeof(config)))
- return -EFAULT;
-
- /* Transmit HW Timestamp not supported */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- vdev->rx_hwts = 0;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
-
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
- return -EFAULT;
-
- vdev->rx_hwts = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
-
- default:
- return -ERANGE;
- }
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (vdev->rx_hwts ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * vxge_ioctl
- * @dev: Device pointer.
- * @rq: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd: This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- *
- * Entry point for the Ioctl.
- */
-static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return vxge_hwtstamp_set(vdev, rq->ifr_data);
- case SIOCGHWTSTAMP:
- return vxge_hwtstamp_get(vdev, rq->ifr_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * vxge_tx_watchdog
- * @dev: pointer to net device structure
- * @txqueue: index of the hanging queue
- *
- * Watchdog for transmit side.
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- */
-static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct vxgedev *vdev;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
-
- schedule_work(&vdev->reset_task);
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_vlan_rx_add_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Add the vlan id to the devices vlan id table
- */
-static int
-vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- /* Add these vlan to the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- set_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-/**
- * vxge_vlan_rx_kill_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Remove the vlan id from the device's vlan id table
- */
-static int
-vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- /* Delete this vlan from the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_delete(vpath->handle, vid);
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- clear_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-static const struct net_device_ops vxge_netdev_ops = {
- .ndo_open = vxge_open,
- .ndo_stop = vxge_close,
- .ndo_get_stats64 = vxge_get_stats64,
- .ndo_start_xmit = vxge_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = vxge_set_multicast,
- .ndo_eth_ioctl = vxge_ioctl,
- .ndo_set_mac_address = vxge_set_mac_addr,
- .ndo_change_mtu = vxge_change_mtu,
- .ndo_fix_features = vxge_fix_features,
- .ndo_set_features = vxge_set_features,
- .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
- .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
- .ndo_tx_timeout = vxge_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = vxge_netpoll,
-#endif
-};
-
-static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int no_of_vpath, struct vxgedev **vdev_out)
-{
- struct net_device *ndev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev;
- int ret = 0, no_of_queue = 1;
- u64 stat;
-
- *vdev_out = NULL;
- if (config->tx_steering_type)
- no_of_queue = no_of_vpath;
-
- ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
- no_of_queue);
- if (ndev == NULL) {
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s : device allocation failed", __func__);
- ret = -ENODEV;
- goto _out0;
- }
-
- vxge_debug_entryexit(
- vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Entering...",
- ndev->name, __func__, __LINE__);
-
- vdev = netdev_priv(ndev);
- memset(vdev, 0, sizeof(struct vxgedev));
-
- vdev->ndev = ndev;
- vdev->devh = hldev;
- vdev->pdev = hldev->pdev;
- memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_hwts = 0;
- vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
-
- SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
-
- ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_CTAG_TX;
- if (vdev->config.rth_steering != NO_STEERING)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->features |= ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
-
-
- ndev->netdev_ops = &vxge_netdev_ops;
-
- ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- INIT_WORK(&vdev->reset_task, vxge_reset);
-
- vxge_initialize_ethtool_ops(ndev);
-
- /* Allocate memory for vpath */
- vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
- GFP_KERNEL);
- if (!vdev->vpaths) {
- vxge_debug_init(VXGE_ERR,
- "%s: vpath memory allocation failed",
- vdev->ndev->name);
- ret = -ENOMEM;
- goto _out1;
- }
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : checksumming enabled", __func__);
-
- ndev->features |= NETIF_F_HIGHDMA;
-
- /* MTU range: 68 - 9600 */
- ndev->min_mtu = VXGE_HW_MIN_MTU;
- ndev->max_mtu = VXGE_HW_MAX_MTU;
-
- ret = register_netdev(ndev);
- if (ret) {
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: %s : device registration failed!",
- ndev->name, __func__);
- goto _out2;
- }
-
- /* Set the factory defined MAC address initially */
- ndev->addr_len = ETH_ALEN;
-
- /* Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(ndev);
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: Ethernet device registered",
- ndev->name);
-
- hldev->ndev = ndev;
- *vdev_out = vdev;
-
- /* Resetting the Device stats */
- status = vxge_hw_mrpcim_stats_access(
- hldev,
- VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
- 0,
- 0,
- &stat);
-
- if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s: device stats clear returns"
- "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
-
- vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Exiting...",
- ndev->name, __func__, __LINE__);
-
- return ret;
-_out2:
- kfree(vdev->vpaths);
-_out1:
- free_netdev(ndev);
-_out0:
- return ret;
-}
-
-/*
- * vxge_device_unregister
- *
- * This function will unregister and free network device
- */
-static void vxge_device_unregister(struct __vxge_hw_device *hldev)
-{
- struct vxgedev *vdev;
- struct net_device *dev;
- char buf[IFNAMSIZ];
-
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
- __func__, __LINE__);
-
- strlcpy(buf, dev->name, IFNAMSIZ);
-
- flush_work(&vdev->reset_task);
-
- /* in 2.6 will call stop() if device is up */
- unregister_netdev(dev);
-
- kfree(vdev->vpaths);
-
- vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
- buf);
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
- __func__, __LINE__);
-
- /* we are safe to free it now */
- free_netdev(dev);
-}
-
-/*
- * vxge_callback_crit_err
- *
- * This function is called by the alarm handler in interrupt context.
- * Driver must analyze it based on the event type.
- */
-static void
-vxge_callback_crit_err(struct __vxge_hw_device *hldev,
- enum vxge_hw_event type, u64 vp_id)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath = NULL;
- int vpath_idx;
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-
- /* Note: This event type should be used for device wide
- * indications only - Serious errors, Slot freeze and critical errors
- */
- vdev->cric_err_event = type;
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->device_id == vp_id)
- break;
- }
-
- if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
- if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
- vxge_debug_init(VXGE_ERR,
- "%s: Slot is frozen", vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_SERR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Serious Error",
- vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Critical Error",
- vdev->ndev->name);
- }
-
- if ((type == VXGE_HW_EVENT_SERR) ||
- (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
- vxge_hw_device_mask_all(hldev);
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
- (type == VXGE_HW_EVENT_VPATH_ERR)) {
-
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- else {
- /* check if this vpath is already set for reset */
- if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, vpath_idx);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- }
- }
- }
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-}
-
-static void verify_bandwidth(void)
-{
- int i, band_width, total = 0, equal_priority = 0;
-
- /* 1. If user enters 0 for some fifo, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0) {
- equal_priority = 1;
- break;
- }
- }
-
- if (!equal_priority) {
- /* 2. If sum exceeds 100, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0xFF)
- break;
-
- total += bw_percentage[i];
- if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
- equal_priority = 1;
- break;
- }
- }
- }
-
- if (!equal_priority) {
- /* Is all the bandwidth consumed? */
- if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
- if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
- /* Split rest of bw equally among next VPs*/
- band_width =
- (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
- (VXGE_HW_MAX_VIRTUAL_PATHS - i);
- if (band_width < 2) /* min of 2% */
- equal_priority = 1;
- else {
- for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
- i++)
- bw_percentage[i] =
- band_width;
- }
- }
- } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
- equal_priority = 1;
- }
-
- if (equal_priority) {
- vxge_debug_init(VXGE_ERR,
- "%s: Assigning equal bandwidth to all the vpaths",
- VXGE_DRIVER_NAME);
- bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
- VXGE_HW_MAX_VIRTUAL_PATHS;
- for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- bw_percentage[i] = bw_percentage[0];
- }
-}
-
-/*
- * Vpath configuration
- */
-static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
- u64 vpath_mask, struct vxge_config *config_param)
-{
- int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
- u32 txdl_size, txdl_per_memblock;
-
- temp = driver_config->vpath_per_dev;
- if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
- (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
- /* No more CPU. Return vpath number as zero.*/
- if (driver_config->g_no_cpus == -1)
- return 0;
-
- if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus =
- netif_get_num_default_rss_queues();
-
- driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
- if (!driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = 1;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- if (vxge_bVALn(vpath_mask, i, 1))
- default_no_vpath++;
-
- if (default_no_vpath < driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = default_no_vpath;
-
- driver_config->g_no_cpus = driver_config->g_no_cpus -
- (driver_config->vpath_per_dev * 2);
- if (driver_config->g_no_cpus <= 0)
- driver_config->g_no_cpus = -1;
- }
-
- if (driver_config->vpath_per_dev == 1) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Disable tx and rx steering, "
- "as single vpath is configured", VXGE_DRIVER_NAME);
- config_param->rth_steering = NO_STEERING;
- config_param->tx_steering_type = NO_STEERING;
- device_config->rth_en = 0;
- }
-
- /* configure bandwidth */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- device_config->vp_config[i].min_bandwidth = bw_percentage[i];
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
- device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
- if (no_of_vpaths < driver_config->vpath_per_dev) {
- if (!vxge_bVALn(vpath_mask, i, 1)) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not available",
- VXGE_DRIVER_NAME, i);
- continue;
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d available",
- VXGE_DRIVER_NAME, i);
- no_of_vpaths++;
- }
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not configured, "
- "max_config_vpath exceeded",
- VXGE_DRIVER_NAME, i);
- break;
- }
-
- /* Configure Tx fifo's */
- device_config->vp_config[i].fifo.enable =
- VXGE_HW_FIFO_ENABLE;
- device_config->vp_config[i].fifo.max_frags =
- MAX_SKB_FRAGS + 1;
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
-
- txdl_size = device_config->vp_config[i].fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd);
- txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
-
- /* Configure tti properties */
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].tti.btimer_val =
- (VXGE_TTI_BTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- /* For msi-x with napi (each vector has a handler of its own) -
- * Set CI to OFF for all vpaths
- */
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
-
- device_config->vp_config[i].tti.ltimer_val =
- (VXGE_TTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.rtimer_val =
- (VXGE_TTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
- device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
- device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
- device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
- device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
- device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
- device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
-
- /* Configure Rx rings */
- device_config->vp_config[i].ring.enable =
- VXGE_HW_RING_ENABLE;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_1;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_A;
-
- /* Configure rti properties */
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].rti.btimer_val =
- (VXGE_RTI_BTIMER_VAL * 1000)/272;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
-
- device_config->vp_config[i].rti.urange_a =
- RTI_RX_URANGE_A;
- device_config->vp_config[i].rti.urange_b =
- RTI_RX_URANGE_B;
- device_config->vp_config[i].rti.urange_c =
- RTI_RX_URANGE_C;
- device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
- device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
- device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
- device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
-
- device_config->vp_config[i].rti.rtimer_val =
- (VXGE_RTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rti.ltimer_val =
- (VXGE_RTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- vlan_tag_strip;
- }
-
- driver_config->vpath_per_dev = temp;
- return no_of_vpaths;
-}
-
-/* initialize device configuratrions */
-static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
- int *intr_type)
-{
- /* Used for CQRQ/SRQ. */
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
-
- device_config->dma_blockpool_max =
- VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
-
- if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
- max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
-
- if (!IS_ENABLED(CONFIG_PCI_MSI)) {
- vxge_debug_init(VXGE_ERR,
- "%s: This Kernel does not support "
- "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
- *intr_type = INTA;
- }
-
- /* Configure whether MSI-X or IRQL. */
- switch (*intr_type) {
- case INTA:
- device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
- break;
-
- case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
- break;
- }
-
- /* Timer period between device poll */
- device_config->device_poll_millis = VXGE_TIMER_DELAY;
-
- /* Configure mac based steering. */
- device_config->rts_mac_en = addr_learn_en;
-
- /* Configure Vpaths */
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
-
- vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
- __func__);
- vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
- device_config->intr_mode);
- vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
- device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
- device_config->rth_en);
- vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
- device_config->rth_it_type);
-}
-
-static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
-{
- int i;
-
- vxge_debug_init(VXGE_TRACE,
- "%s: %d Vpath(s) opened",
- vdev->ndev->name, vdev->no_of_vpath);
-
- switch (vdev->config.intr_type) {
- case INTA:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type INTA", vdev->ndev->name);
- break;
-
- case MSI_X:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type MSI-X", vdev->ndev->name);
- break;
- }
-
- if (vdev->config.rth_steering) {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering enabled for TCP_IPV4",
- vdev->ndev->name);
- } else {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering disabled", vdev->ndev->name);
- }
-
- switch (vdev->config.tx_steering_type) {
- case NO_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- break;
- case TX_PRIORITY_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_VLAN_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_MULTIQ_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx multiqueue steering enabled",
- vdev->ndev->name);
- break;
- case TX_PORT_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx port steering enabled",
- vdev->ndev->name);
- break;
- default:
- vxge_debug_init(VXGE_ERR,
- "%s: Unsupported tx steering type",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- }
-
- if (vdev->config.addr_learn_en)
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC Address learning enabled", vdev->ndev->name);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: MTU size - %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].mtu);
- vxge_debug_init(VXGE_TRACE,
- "%s: VLAN tag stripping %s", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].rpa_strip_vlan_tag
- ? "Enabled" : "Disabled");
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Max frags : %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].fifo.max_frags);
- break;
- }
-}
-
-/**
- * vxge_pm_suspend - vxge power management suspend entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
-{
- return -ENOSYS;
-}
-/**
- * vxge_pm_resume - vxge power management resume entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_resume(struct device *dev_d)
-{
- return -ENOSYS;
-}
-
-/**
- * vxge_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_vxge_close(netdev, 0);
- }
-
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * vxge_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- struct vxgedev *vdev = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * vxge_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void vxge_io_resume(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- if (netif_running(netdev)) {
- if (vxge_open(netdev)) {
- netdev_err(netdev,
- "Can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-}
-
-static inline u32 vxge_get_num_vfs(u64 function_mode)
-{
- u32 num_functions = 0;
-
- switch (function_mode) {
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- case VXGE_HW_FUNCTION_MODE_SRIOV_8:
- num_functions = 8;
- break;
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- num_functions = 1;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
- num_functions = 17;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV_4:
- num_functions = 4;
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
- num_functions = 2;
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV_8:
- num_functions = 8; /* TODO */
- break;
- }
- return num_functions;
-}
-
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
-{
- struct __vxge_hw_device *hldev = vdev->devh;
- u32 maj, min, bld, cmaj, cmin, cbld;
- enum vxge_hw_status status;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
- if (ret) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
- VXGE_DRIVER_NAME, fw_name);
- goto out;
- }
-
- /* Load the new firmware onto the adapter */
- status = vxge_update_fw_image(hldev, fw->data, fw->size);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FW image download to adapter failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- /* Read the version of the new firmware */
- status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Upgrade read version failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- cmaj = vdev->config.device_hw_info.fw_version.major;
- cmin = vdev->config.device_hw_info.fw_version.minor;
- cbld = vdev->config.device_hw_info.fw_version.build;
- /* It's possible the version in /lib/firmware is not the latest version.
- * If so, we could get into a loop of trying to upgrade to the latest
- * and flashing the older version.
- */
- if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
- !override) {
- ret = -EINVAL;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
- maj, min, bld);
-
- /* Flash the adapter with the new firmware */
- status = vxge_hw_flash_fw(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
- "hard reset before using, thus requiring a system reboot or a "
- "hotplug event.\n");
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int vxge_probe_fw_update(struct vxgedev *vdev)
-{
- u32 maj, min, bld;
- int ret, gpxe = 0;
- char *fw_name;
-
- maj = vdev->config.device_hw_info.fw_version.major;
- min = vdev->config.device_hw_info.fw_version.minor;
- bld = vdev->config.device_hw_info.fw_version.build;
-
- if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
- return 0;
-
- /* Ignore the build number when determining if the current firmware is
- * "too new" to load the driver
- */
- if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
- "version, unable to load driver\n",
- VXGE_DRIVER_NAME);
- return -EINVAL;
- }
-
- /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
- * work with this driver.
- */
- if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
- "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- /* If file not specified, determine gPXE or not */
- if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
- int i;
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
- if (vdev->devh->eprom_versions[i]) {
- gpxe = 1;
- break;
- }
- }
- if (gpxe)
- fw_name = "vxge/X3fw-pxe.ncf";
- else
- fw_name = "vxge/X3fw.ncf";
-
- ret = vxge_fw_upgrade(vdev, fw_name, 0);
- /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
- * probe, so ignore them
- */
- if (ret != -EINVAL && ret != -ENOENT)
- return -EIO;
- else
- ret = 0;
-
- if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
- VXGE_FW_VER(maj, min, 0)) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.",
- VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int is_sriov_initialized(struct pci_dev *pdev)
-{
- int pos;
- u16 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
- if (ctrl & PCI_SRIOV_CTRL_VFE)
- return 1;
- }
- return 0;
-}
-
-static const struct vxge_hw_uld_cbs vxge_callbacks = {
- .link_up = vxge_callback_link_up,
- .link_down = vxge_callback_link_down,
- .crit_err = vxge_callback_crit_err,
-};
-
-/**
- * vxge_probe
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
- * Description:
- * This function is called when a new PCI device gets detected and initializes
- * it.
- * Return value:
- * returns 0 on success and negative on failure.
- *
- */
-static int
-vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct __vxge_hw_device *hldev;
- enum vxge_hw_status status;
- int ret;
- u64 vpath_mask = 0;
- struct vxgedev *vdev;
- struct vxge_config *ll_config = NULL;
- struct vxge_hw_device_config *device_config = NULL;
- struct vxge_hw_device_attr attr;
- int i, j, no_of_vpath = 0, max_vpath_supported = 0;
- u8 *macaddr;
- struct vxge_mac_addrs *entry;
- static int bus = -1, device = -1;
- u32 host_type;
- u8 new_device = 0;
- enum vxge_hw_status is_privileged;
- u32 function_mode;
- u32 num_vfs = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- attr.pdev = pdev;
-
- /* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses
- */
- if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
- !pdev->is_virtfn)
- new_device = 1;
-
- bus = pdev->bus->number;
- device = PCI_SLOT(pdev->devfn);
-
- if (new_device) {
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt !=
- driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME,
- driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
- driver_config->config_dev_cnt = 0;
- driver_config->total_dev_cnt = 0;
- }
-
- /* Now making the CPU based no of vpath calculation
- * applicable for individual functions as well.
- */
- driver_config->g_no_cpus = 0;
- driver_config->vpath_per_dev = max_config_vpath;
-
- driver_config->total_dev_cnt++;
- if (++driver_config->config_dev_cnt > max_config_dev) {
- ret = 0;
- goto _exit0;
- }
-
- device_config = kzalloc(sizeof(struct vxge_hw_device_config),
- GFP_KERNEL);
- if (!device_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
-
- ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
- if (!ll_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
- ll_config->tx_steering_type = TX_MULTIQ_STEERING;
- ll_config->intr_type = MSI_X;
- ll_config->napi_weight = NEW_NAPI_WEIGHT;
- ll_config->rth_steering = RTH_STEERING;
-
- /* get the default configuration parameters */
- vxge_hw_device_config_default_get(device_config);
-
- /* initialize configuration parameters */
- vxge_device_config_init(device_config, &ll_config->intr_type);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : can not enable PCI device", __func__);
- goto _exit0;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 64bit DMA", __func__);
- } else {
- ret = -ENOMEM;
- goto _exit1;
- }
-
- ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : request regions failed", __func__);
- goto _exit1;
- }
-
- pci_set_master(pdev);
-
- attr.bar0 = pci_ioremap_bar(pdev, 0);
- if (!attr.bar0) {
- vxge_debug_init(VXGE_ERR,
- "%s : cannot remap io memory bar0", __func__);
- ret = -ENODEV;
- goto _exit2;
- }
- vxge_debug_ll_config(VXGE_TRACE,
- "pci ioremap bar0: %p:0x%llx",
- attr.bar0,
- (unsigned long long)pci_resource_start(pdev, 0));
-
- status = vxge_hw_device_hw_info_get(attr.bar0,
- &ll_config->device_hw_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Reading of hardware info failed."
- "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vpath_mask = ll_config->device_hw_info.vpath_mask;
- if (vpath_mask == 0) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: No vpaths available in device", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vxge_debug_ll_config(VXGE_TRACE,
- "%s:%d Vpath mask = %llx", __func__, __LINE__,
- (unsigned long long)vpath_mask);
-
- function_mode = ll_config->device_hw_info.function_mode;
- host_type = ll_config->device_hw_info.host_type;
- is_privileged = __vxge_hw_device_is_privilaged(host_type,
- ll_config->device_hw_info.func_id);
-
- /* Check how many vpaths are available */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- max_vpath_supported++;
- }
-
- if (new_device)
- num_vfs = vxge_get_num_vfs(function_mode) - 1;
-
- /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
- (ll_config->intr_type != INTA)) {
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed in enabling SRIOV mode: %d\n", ret);
- /* No need to fail out, as an error here is non-fatal */
- }
-
- /*
- * Configure vpaths and get driver configured number of vpaths
- * which is less than or equal to the maximum vpaths per function.
- */
- no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
- if (!no_of_vpath) {
- vxge_debug_ll_config(VXGE_ERR,
- "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
- ret = 0;
- goto _exit3;
- }
-
- /* Setting driver callbacks */
- attr.uld_callbacks = &vxge_callbacks;
-
- status = vxge_hw_device_initialize(&hldev, &attr, device_config);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
- }
-
- if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
- ll_config->device_hw_info.fw_version.minor,
- ll_config->device_hw_info.fw_version.build) >=
- VXGE_EPROM_FW_VER) {
- struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
-
- status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
- VXGE_DRIVER_NAME);
- /* This is a non-fatal error, continue */
- }
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- hldev->eprom_versions[i] = img[i].version;
- if (!img[i].is_valid)
- break;
- vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
- VXGE_EPROM_IMG_MAJOR(img[i].version),
- VXGE_EPROM_IMG_MINOR(img[i].version),
- VXGE_EPROM_IMG_FIX(img[i].version),
- VXGE_EPROM_IMG_BUILD(img[i].version));
- }
- }
-
- /* if FCS stripping is not disabled in MAC fail driver load */
- status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit4;
- }
-
- /* Always enable HWTS. This will always cause the FCS to be invalid,
- * due to the fact that HWTS is using the FCS as the location of the
- * timestamp. The HW FCS checking will still correctly determine if
- * there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no functionality is being lost. Since it is always
- * enabled, we now simply use the ioctl call to set whether or not the
- * driver should be paying attention to the HWTS.
- */
- if (is_privileged == VXGE_HW_OK) {
- status = vxge_timestamp_config(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
- VXGE_DRIVER_NAME);
- ret = -EFAULT;
- goto _exit4;
- }
- }
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
-
- /* set private device info */
- pci_set_drvdata(pdev, hldev);
-
- ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
- ll_config->addr_learn_en = addr_learn_en;
- ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = 1;
- ll_config->rth_hash_type_ipv4 = 0;
- ll_config->rth_hash_type_tcpipv6 = 0;
- ll_config->rth_hash_type_ipv6 = 0;
- ll_config->rth_hash_type_tcpipv6ex = 0;
- ll_config->rth_hash_type_ipv6ex = 0;
- ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
- ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
- ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
- if (ret) {
- ret = -EINVAL;
- goto _exit4;
- }
-
- ret = vxge_probe_fw_update(vdev);
- if (ret)
- goto _exit5;
-
- vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- /* set private HW device info */
- vdev->mtu = VXGE_HW_DEFAULT_MTU;
- vdev->bar0 = attr.bar0;
- vdev->max_vpath_supported = max_vpath_supported;
- vdev->no_of_vpath = no_of_vpath;
-
- /* Virtual Path count */
- for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- if (j >= vdev->no_of_vpath)
- break;
-
- vdev->vpaths[j].is_configured = 1;
- vdev->vpaths[j].device_id = i;
- vdev->vpaths[j].ring.driver_id = j;
- vdev->vpaths[j].vdev = vdev;
- vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
- memcpy((u8 *)vdev->vpaths[j].macaddr,
- ll_config->device_hw_info.mac_addrs[i],
- ETH_ALEN);
-
- /* Initialize the mac address list header */
- INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
-
- vdev->vpaths[j].mac_addr_cnt = 0;
- vdev->vpaths[j].mcast_addr_cnt = 0;
- j++;
- }
- vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
- vdev->max_config_port = max_config_port;
-
- vdev->vlan_tag_strip = vlan_tag_strip;
-
- /* map the hashing selector table to the configured vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpath_selector[i] = vpath_selector[i];
-
- macaddr = (u8 *)vdev->vpaths[0].macaddr;
-
- ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
-
- vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.serial_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.part_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
- vdev->ndev->name, ll_config->device_hw_info.product_desc);
-
- vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
- vdev->ndev->name, macaddr);
-
- vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
- vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
-
- vxge_debug_init(VXGE_TRACE,
- "%s: Firmware version : %s Date : %s", vdev->ndev->name,
- ll_config->device_hw_info.fw_version.version,
- ll_config->device_hw_info.fw_date.date);
-
- if (new_device) {
- switch (ll_config->device_hw_info.function_mode) {
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
- break;
- }
- }
-
- vxge_print_parm(vdev, vpath_mask);
-
- /* Store the fw version for ethttool option */
- strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
-
- /* Copy the station mac address to the list */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
- if (NULL == entry) {
- vxge_debug_init(VXGE_ERR,
- "%s: mac_addr_list : memory allocation failed",
- vdev->ndev->name);
- ret = -EPERM;
- goto _exit6;
- }
- macaddr = (u8 *)&entry->macaddr;
- memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
- list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
- vdev->vpaths[i].mac_addr_cnt = 1;
- }
-
- kfree(device_config);
-
- /*
- * INTA is shared in multi-function mode. This is unlike the INTA
- * implementation in MR mode, where each VH has its own INTA message.
- * - INTA is masked (disabled) as long as at least one function sets
- * its TITAN_MASK_ALL_INT.ALARM bit.
- * - INTA is unmasked (enabled) when all enabled functions have cleared
- * their own TITAN_MASK_ALL_INT.ALARM bit.
- * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
- * Though this driver leaves the top level interrupts unmasked while
- * leaving the required module interrupt bits masked on exit, there
- * could be a rougue driver around that does not follow this procedure
- * resulting in a failure to generate interrupts. The following code is
- * present to prevent such a failure.
- */
-
- if (ll_config->device_hw_info.function_mode ==
- VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
- if (vdev->config.intr_type == INTA)
- vxge_hw_device_unmask_all(hldev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- kfree(ll_config);
- return 0;
-
-_exit6:
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-_exit5:
- vxge_device_unregister(hldev);
-_exit4:
- vxge_hw_device_terminate(hldev);
- pci_disable_sriov(pdev);
-_exit3:
- iounmap(attr.bar0);
-_exit2:
- pci_release_region(pdev, 0);
-_exit1:
- pci_disable_device(pdev);
-_exit0:
- kfree(ll_config);
- kfree(device_config);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
- return ret;
-}
-
-/**
- * vxge_remove - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device.
- */
-static void vxge_remove(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev;
- struct vxgedev *vdev;
- int i;
-
- hldev = pci_get_drvdata(pdev);
- if (hldev == NULL)
- return;
-
- vdev = netdev_priv(hldev->ndev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
- __func__);
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-
- vxge_device_unregister(hldev);
- /* Do not call pci_disable_sriov here, as it will break child devices */
- vxge_hw_device_terminate(hldev);
- iounmap(vdev->bar0);
- pci_release_region(pdev, 0);
- pci_disable_device(pdev);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
-
- vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
- __func__, __LINE__);
- vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
- __LINE__);
-}
-
-static const struct pci_error_handlers vxge_err_handler = {
- .error_detected = vxge_io_error_detected,
- .slot_reset = vxge_io_slot_reset,
- .resume = vxge_io_resume,
-};
-
-static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
-
-static struct pci_driver vxge_driver = {
- .name = VXGE_DRIVER_NAME,
- .id_table = vxge_id_table,
- .probe = vxge_probe,
- .remove = vxge_remove,
- .driver.pm = &vxge_pm_ops,
- .err_handler = &vxge_err_handler,
-};
-
-static int __init
-vxge_starter(void)
-{
- int ret = 0;
-
- pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
- pr_info("Driver version: %s\n", DRV_VERSION);
-
- verify_bandwidth();
-
- driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
- if (!driver_config)
- return -ENOMEM;
-
- ret = pci_register_driver(&vxge_driver);
- if (ret) {
- kfree(driver_config);
- goto err;
- }
-
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
-err:
- return ret;
-}
-
-static void __exit
-vxge_closer(void)
-{
- pci_unregister_driver(&vxge_driver);
- kfree(driver_config);
-}
-module_init(vxge_starter);
-module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
deleted file mode 100644
index 63f65193dd49..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_MAIN_H
-#define VXGE_MAIN_H
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-version.h"
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-
-#define VXGE_DRIVER_NAME "vxge"
-#define VXGE_DRIVER_VENDOR "Neterion, Inc"
-#define VXGE_DRIVER_FW_VERSION_MAJOR 1
-
-#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
- VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
- VXGE_VERSION_FOR
-
-#define PCI_DEVICE_ID_TITAN_WIN 0x5733
-#define PCI_DEVICE_ID_TITAN_UNI 0x5833
-#define VXGE_HW_TITAN1_PCI_REVISION 1
-#define VXGE_HW_TITAN1A_PCI_REVISION 2
-
-#define VXGE_USE_DEFAULT 0xffffffff
-#define VXGE_HW_VPATH_MSIX_ACTIVE 4
-#define VXGE_ALARM_MSIX_ID 2
-#define VXGE_HW_RXSYNC_FREQ_CNT 4
-#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
-#define VXGE_LL_RX_COPY_THRESHOLD 256
-#define VXGE_DEF_FIFO_LENGTH 84
-
-#define NO_STEERING 0
-#define PORT_STEERING 0x1
-#define RTH_STEERING 0x2
-#define RX_TOS_STEERING 0x3
-#define RX_VLAN_STEERING 0x4
-#define RTH_BUCKET_SIZE 4
-
-#define TX_PRIORITY_STEERING 1
-#define TX_VLAN_STEERING 2
-#define TX_PORT_STEERING 3
-#define TX_MULTIQ_STEERING 4
-
-#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
-
-#define VXGE_TTI_BTIMER_VAL 250000
-
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_T1A_TTI_LTIMER_VAL 80
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_TTI_RTIMER_ADAPT_VAL 10
-#define VXGE_T1A_TTI_RTIMER_VAL 400
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_RTI_RTIMER_ADAPT_VAL 15
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
-#define VXGE_ISR_POLLING_CNT 8
-#define VXGE_MAX_CONFIG_DEV 0xFF
-#define VXGE_EXEC_MODE_DISABLE 0
-#define VXGE_EXEC_MODE_ENABLE 1
-#define VXGE_MAX_CONFIG_PORT 1
-#define VXGE_ALL_VID_DISABLE 0
-#define VXGE_ALL_VID_ENABLE 1
-#define VXGE_PAUSE_CTRL_DISABLE 0
-#define VXGE_PAUSE_CTRL_ENABLE 1
-
-#define TTI_TX_URANGE_A 5
-#define TTI_TX_URANGE_B 15
-#define TTI_TX_URANGE_C 40
-#define TTI_TX_UFC_A 5
-#define TTI_TX_UFC_B 40
-#define TTI_TX_UFC_C 60
-#define TTI_TX_UFC_D 100
-#define TTI_T1A_TX_UFC_A 30
-#define TTI_T1A_TX_UFC_B 80
-/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
-/* Slope - 93 */
-/* 60 - 9k Mtu, 140 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
-
-/* Slope - 37 */
-/* 100 - 9k Mtu, 300 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
-
-
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_T1A_RX_URANGE_A 1
-#define RTI_T1A_RX_URANGE_B 20
-#define RTI_T1A_RX_URANGE_C 50
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
-#define RTI_T1A_RX_UFC_B 20
-#define RTI_T1A_RX_UFC_C 50
-#define RTI_T1A_RX_UFC_D 60
-
-/*
- * The interrupt rate is maintained at 3k per second with the moderation
- * parameters for most traffic but not all. This is the maximum interrupt
- * count allowed per function with INTA or per vector in the case of
- * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
- */
-#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
-#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
-
-/* Milli secs timer period */
-#define VXGE_TIMER_DELAY 10000
-
-#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
-
-#define is_sriov(function_mode) \
- ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
-
-enum vxge_reset_event {
- /* reset events */
- VXGE_LL_VPATH_RESET = 0,
- VXGE_LL_DEVICE_RESET = 1,
- VXGE_LL_FULL_RESET = 2,
- VXGE_LL_START_RESET = 3,
- VXGE_LL_COMPL_RESET = 4
-};
-/* These flags represent the devices temporary state */
-enum vxge_device_state_t {
-__VXGE_STATE_RESET_CARD = 0,
-__VXGE_STATE_CARD_UP
-};
-
-enum vxge_mac_addr_state {
- /* mac address states */
- VXGE_LL_MAC_ADDR_IN_LIST = 0,
- VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
-};
-
-struct vxge_drv_config {
- int config_dev_cnt;
- int total_dev_cnt;
- int g_no_cpus;
- unsigned int vpath_per_dev;
-};
-
-struct macInfo {
- unsigned char macaddr[ETH_ALEN];
- unsigned char macmask[ETH_ALEN];
- unsigned int vpath_no;
- enum vxge_mac_addr_state state;
-};
-
-struct vxge_config {
- int tx_pause_enable;
- int rx_pause_enable;
-
-#define NEW_NAPI_WEIGHT 64
- int napi_weight;
- int intr_type;
-#define INTA 0
-#define MSI 1
-#define MSI_X 2
-
- int addr_learn_en;
-
- u32 rth_steering:2,
- rth_algorithm:2,
- rth_hash_type_tcpipv4:1,
- rth_hash_type_ipv4:1,
- rth_hash_type_tcpipv6:1,
- rth_hash_type_ipv6:1,
- rth_hash_type_tcpipv6ex:1,
- rth_hash_type_ipv6ex:1,
- rth_bkt_sz:8;
- int rth_jhash_golden_ratio;
- int tx_steering_type;
- int fifo_indicate_max_pkts;
- struct vxge_hw_device_hw_info device_hw_info;
-};
-
-struct vxge_msix_entry {
- /* Mimicing the msix_entry struct of Kernel. */
- u16 vector;
- u16 entry;
- u16 in_use;
- void *arg;
-};
-
-/* Software Statistics */
-
-struct vxge_sw_stats {
-
- /* Virtual Path */
- unsigned long vpaths_open;
- unsigned long vpath_open_fail;
-
- /* Misc. */
- unsigned long link_up;
- unsigned long link_down;
-};
-
-struct vxge_mac_addrs {
- struct list_head item;
- u64 macaddr;
- u64 macmask;
- enum vxge_mac_addr_state state;
-};
-
-struct vxgedev;
-
-struct vxge_fifo_stats {
- struct u64_stats_sync syncp;
- u64 tx_frms;
- u64 tx_bytes;
-
- unsigned long tx_errors;
- unsigned long txd_not_free;
- unsigned long txd_out_of_desc;
- unsigned long pci_map_fail;
-};
-
-struct vxge_fifo {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_fifo *handle;
- struct netdev_queue *txq;
-
- int tx_steering_type;
- int indicate_max_pkts;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- u32 tx_vector_no;
- /* Tx stats */
- struct vxge_fifo_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_ring_stats {
- struct u64_stats_sync syncp;
- u64 rx_frms;
- u64 rx_mcast;
- u64 rx_bytes;
-
- unsigned long rx_errors;
- unsigned long rx_dropped;
- unsigned long prev_rx_frms;
- unsigned long pci_map_fail;
- unsigned long skb_alloc_fail;
-};
-
-struct vxge_ring {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_ring *handle;
- /* The vpath id maintained in the driver -
- * 0 to 'maximum_vpaths_in_function - 1'
- */
- int driver_id;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- /* copy of the flag indicating whether rx_hwts is to be used */
- u32 rx_hwts:1;
-
- int pkts_processed;
- int budget;
-
- struct napi_struct napi;
- struct napi_struct *napi_p;
-
-#define VXGE_MAX_MAC_ADDR_COUNT 30
-
- int vlan_tag_strip;
- u32 rx_vector_no;
- enum vxge_hw_status last_status;
-
- /* Rx stats */
- struct vxge_ring_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_vpath {
- struct vxge_fifo fifo;
- struct vxge_ring ring;
-
- struct __vxge_hw_vpath_handle *handle;
-
- /* Actual vpath id for this vpath in the device - 0 to 16 */
- int device_id;
- int max_mac_addr_cnt;
- int is_configured;
- int is_open;
- struct vxgedev *vdev;
- u8 macaddr[ETH_ALEN];
- u8 macmask[ETH_ALEN];
-
-#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
- /* mac addresses currently programmed into NIC */
- u16 mac_addr_cnt;
- u16 mcast_addr_cnt;
- struct list_head mac_addr_list;
-
- u32 level_err;
- u32 level_trace;
-};
-#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
- for (i = 0; i < vdev->no_of_vpath; i++) { \
- vdev->vpaths[i].level_err = err; \
- vdev->vpaths[i].level_trace = trace; \
- } \
- vdev->level_err = err; \
- vdev->level_trace = trace; \
-}
-
-struct vxgedev {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_device *devh;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- int vlan_tag_strip;
- struct vxge_config config;
- unsigned long state;
-
- /* Indicates which vpath to reset */
- unsigned long vp_reset;
-
- /* Timer used for polling vpath resets */
- struct timer_list vp_reset_timer;
-
- /* Timer used for polling vpath lockup */
- struct timer_list vp_lockup_timer;
-
- /*
- * Flags to track whether device is in All Multicast
- * or in promiscuous mode.
- */
- u16 all_multi_flg;
-
- /* A flag indicating whether rx_hwts is to be used or not. */
- u32 rx_hwts:1,
- titan1:1;
-
- struct vxge_msix_entry *vxge_entries;
- struct msix_entry *entries;
- /*
- * 4 for each vpath * 17;
- * total is 68
- */
-#define VXGE_MAX_REQUESTED_MSIX 68
-#define VXGE_INTR_STRLEN 80
- char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
-
- enum vxge_hw_event cric_err_event;
-
- int max_vpath_supported;
- int no_of_vpath;
-
- struct napi_struct napi;
- /* A debug option, when enabled and if error condition occurs,
- * the driver will do following steps:
- * - mask all interrupts
- * - Not clear the source of the alarm
- * - gracefully stop all I/O
- * A diagnostic dump of register and stats at this point
- * reveals very useful information.
- */
- int exec_mode;
- int max_config_port;
- struct vxge_vpath *vpaths;
-
- struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
- void __iomem *bar0;
- struct vxge_sw_stats stats;
- int mtu;
- /* Below variables are used for vpath selection to transmit a packet */
- u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpaths_deployed;
-
- u32 intr_cnt;
- u32 level_err;
- u32 level_trace;
- char fw_version[VXGE_HW_FW_STRLEN];
- struct work_struct reset_task;
-};
-
-struct vxge_rx_priv {
- struct sk_buff *skb;
- unsigned char *skb_data;
- dma_addr_t data_dma;
- dma_addr_t data_size;
-};
-
-struct vxge_tx_priv {
- struct sk_buff *skb;
- dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
-};
-
-#define VXGE_MODULE_PARAM_INT(p, val) \
- static int p = val; \
- module_param(p, int, 0)
-
-static inline
-void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
- unsigned long timeout)
-{
- timer_setup(timer, func, 0);
- mod_timer(timer, jiffies + timeout);
-}
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev);
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-
-/* #define VXGE_DEBUG_INIT: debug for initialization functions
- * #define VXGE_DEBUG_TX : debug transmit related functions
- * #define VXGE_DEBUG_RX : debug recevice related functions
- * #define VXGE_DEBUG_MEM : debug memory module
- * #define VXGE_DEBUG_LOCK: debug locks
- * #define VXGE_DEBUG_SEM : debug semaphore
- * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
-*/
-#define VXGE_DEBUG_INIT 0x00000001
-#define VXGE_DEBUG_TX 0x00000002
-#define VXGE_DEBUG_RX 0x00000004
-#define VXGE_DEBUG_MEM 0x00000008
-#define VXGE_DEBUG_LOCK 0x00000010
-#define VXGE_DEBUG_SEM 0x00000020
-#define VXGE_DEBUG_ENTRYEXIT 0x00000040
-#define VXGE_DEBUG_INTR 0x00000080
-#define VXGE_DEBUG_LL_CONFIG 0x00000100
-
-/* Debug tracing for VXGE driver */
-#ifndef VXGE_DEBUG_MASK
-#define VXGE_DEBUG_MASK 0x0
-#endif
-
-#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
-#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
-#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
-#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
-#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
-#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
-#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
-#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
- vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
- level, mask);\
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
- vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
- vdev->devh), \
- vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
- vdev->devh));\
-}
-
-#ifdef NETIF_F_GSO
-#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
-#endif
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
deleted file mode 100644
index 3e658b175947..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-reg.h
+++ /dev/null
@@ -1,4636 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
- * Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_REG_H
-#define VXGE_REG_H
-
-/*
- * vxge_mBIT(loc) - set bit at offset
- */
-#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
-
-/*
- * vxge_vBIT(val, loc, sz) - set bits at offset
- */
-#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
-#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
-
-/*
- * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
- */
-#define vxge_bVALn(bits, loc, n) \
- ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
-
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
- vxge_bVALn(bits, 56, 8)
-
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
- vxge_bVALn(bits, 3, 5)
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
- vxge_bVALn(bits, 5, 3)
-#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
-
-#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
-#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
-
-#define VXGE_HW_FW_API_GET_EPROM_REV 31
-
-#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
-#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
-#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
-#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
-
-#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
-#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
-
-#define VXGE_HW_FW_API_GET_FUNC_MODE 29
-#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
-
-#define VXGE_HW_FW_UPGRADE_MEMO 13
-#define VXGE_HW_FW_UPGRADE_ACTION 16
-#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
-#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
-#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
-#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
-
-#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
-#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
-#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
-
-#define VXGE_HW_ASIC_MODE_RESERVED 0
-#define VXGE_HW_ASIC_MODE_NO_IOV 1
-#define VXGE_HW_ASIC_MODE_SR_IOV 2
-#define VXGE_HW_ASIC_MODE_MR_IOV 3
-
-#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
-#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
-
-#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
-
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
- vxge_bVALn(bits, 50, 14)
-
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
- vxge_bVALn(bits, 0, 17)
-
-#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
- vxge_bVALn(bits, 3, 5)
-
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
- vxge_bVALn(bits, 17, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
-
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
-
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
- vxge_bVALn(bits, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
- vxge_bVALn(bits, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
- vxge_bVALn(bits, 33, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
- vxge_vBIT(val, 49, 15)
-
-#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
-#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
-#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
-
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
-
-#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
- vxge_mBIT(54)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
- vxge_bVALn(bits, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
- vxge_vBIT(val, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
- vxge_bVALn(bits, 62, 2)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
- vxge_bVALn(bits, 7, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
- vxge_bVALn(bits, 8, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
- vxge_bVALn(bits, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
- vxge_bVALn(bits, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
- vxge_bVALn(bits, 15, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
- vxge_bVALn(bits, 19, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
- vxge_bVALn(bits, 23, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
- vxge_bVALn(bits, 27, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 31, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 35, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
- vxge_bVALn(bits, 39, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
- vxge_bVALn(bits, 43, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
- vxge_vBIT(val, 32, 32)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
- vxge_bVALn(bits, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
- vxge_bVALn(bits, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
- vxge_vBIT(val, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
- vxge_bVALn(bits, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
- vxge_bVALn(bits, 42, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
- vxge_vBIT(val, 42, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
- vxge_bVALn(bits, 0, 64)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
- vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
- vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
- vxge_bVALn(bits, 40, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
- vxge_vBIT(val, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
- vxge_bVALn(bits, 56, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 57, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
- vxge_vBIT(val, 57, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
-
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
- vxge_bVALn(bits, 0, 18)
-
-#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
-) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define \
-VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_CONFIG_PRIV_H
-
-#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
-#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
-
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-/*
- * The registers are memory mapped and are native big-endian byte order. The
- * little-endian hosts are handled by enabling hardware byte-swapping for
- * register and dma operations.
- */
-struct vxge_hw_legacy_reg {
-
- u8 unused00010[0x00010];
-
-/*0x00010*/ u64 toc_swapper_fb;
-#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00018*/ u64 pifm_rd_swap_en;
-#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00020*/ u64 pifm_rd_flip_en;
-#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00028*/ u64 pifm_wr_swap_en;
-#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00030*/ u64 pifm_wr_flip_en;
-#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00038*/ u64 toc_first_pointer;
-#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00040*/ u64 host_access_en;
-#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_toc_reg {
-
- u8 unused00050[0x00050];
-
-/*0x00050*/ u64 toc_common_pointer;
-#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 toc_memrepair_pointer;
-#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
-#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused001e0[0x001e0-0x000e8];
-
-/*0x001e0*/ u64 toc_mrpcim_pointer;
-#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x001e8*/ u64 toc_srpcim_pointer[17];
-#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00278[0x00278-0x00270];
-
-/*0x00278*/ u64 toc_vpmgmt_pointer[17];
-#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00390[0x00390-0x00300];
-
-/*0x00390*/ u64 toc_vpath_pointer[17];
-#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused004a0[0x004a0-0x00418];
-
-/*0x004a0*/ u64 toc_kdfc;
-#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004a8*/ u64 toc_usdc;
-#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004b0*/ u64 toc_kdfc_vpath_stride;
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-/*0x004b8*/ u64 toc_kdfc_fifo_stride;
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_common_reg {
-
- u8 unused00a00[0x00a00];
-
-/*0x00a00*/ u64 prc_status1;
-#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
-/*0x00a08*/ u64 rxdcm_reset_in_progress;
-#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a10*/ u64 replicq_flush_in_progress;
-#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
-#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
-#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a28*/ u64 noffload_reset_in_progress;
-#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rd_req_in_progress;
-#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
-/*0x00a38*/ u64 rd_req_outstanding;
-#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
-/*0x00a40*/ u64 kdfc_reset_in_progress;
-#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
- u8 unused00b00[0x00b00-0x00a48];
-
-/*0x00b00*/ u64 one_cfg_vp;
-#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
-/*0x00b08*/ u64 one_common;
-#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
- u8 unused00b80[0x00b80-0x00b10];
-
-/*0x00b80*/ u64 tim_int_en;
-#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
-/*0x00b88*/ u64 tim_set_int_en;
-#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b90*/ u64 tim_clr_int_en;
-#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b98*/ u64 tim_mask_int_during_reset;
-#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
-/*0x00ba0*/ u64 tim_reset_in_progress;
-#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
-/*0x00ba8*/ u64 tim_outstanding_bmap;
-#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
- u8 unused00c00[0x00c00-0x00bb0];
-
-/*0x00c00*/ u64 msg_reset_in_progress;
-#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
-/*0x00c08*/ u64 msg_mxp_mr_ready;
-#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
-/*0x00c10*/ u64 msg_uxp_mr_ready;
-#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
-/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
-#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
-/*0x00c20*/ u64 msg_umq_rtl_bwr;
-#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
- u8 unused00d00[0x00d00-0x00c28];
-
-/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
-#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
-/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
-#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
-/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
-#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
-/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
-#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
-/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
-#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
- u8 unused00d40[0x00d40-0x00d28];
-
-/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
-#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
-/*0x00d48*/ u64 stats_cfg0;
-#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
- u8 unused00da8[0x00da8-0x00d50];
-
-/*0x00da8*/ u64 clear_msix_mask_vect[4];
-#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00dc8*/ u64 set_msix_mask_vect[4];
-#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
-/*0x00de8*/ u64 clear_msix_mask_all_vect;
-#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df0*/ u64 set_msix_mask_all_vect;
-#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df8*/ u64 mask_vector[4];
-#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x00e18*/ u64 msix_pending_vector[4];
-#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
-#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e58*/ u64 titan_asic_id;
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
-/*0x00e60*/ u64 titan_general_int_status;
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
- vxge_vBIT(val, 3, 17)
- u8 unused00e70[0x00e70-0x00e68];
-
-/*0x00e70*/ u64 titan_mask_all_int;
-#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
-#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
- u8 unused00e80[0x00e80-0x00e78];
-
-/*0x00e80*/ u64 tim_int_status0;
-#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
-/*0x00e88*/ u64 tim_int_mask0;
-#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
-/*0x00e90*/ u64 tim_int_status1;
-#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
-/*0x00e98*/ u64 tim_int_mask1;
-#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
-/*0x00ea0*/ u64 rti_int_status;
-#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
-/*0x00ea8*/ u64 rti_int_mask;
-#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
-/*0x00eb0*/ u64 adapter_status;
-#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
-#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
-#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
-#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
-#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
-#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
-#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
-#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
-#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
-/*0x00eb8*/ u64 gen_ctrl;
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
-#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
-#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
-#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
- u8 unused00ed0[0x00ed0-0x00ec0];
-
-/*0x00ed0*/ u64 adapter_ready;
-#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
-/*0x00ed8*/ u64 outstanding_read;
-#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
-/*0x00ee0*/ u64 vpath_rst_in_prog;
-#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
-/*0x00ee8*/ u64 vpath_reg_modified;
-#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
- u8 unused00fc0[0x00fc0-0x00ef0];
-
-/*0x00fc0*/ u64 cp_reset_in_progress;
-#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
- u8 unused01080[0x01080-0x00fc8];
-
-/*0x01080*/ u64 xgmac_ready;
-#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused010c0[0x010c0-0x01088];
-
-/*0x010c0*/ u64 fbif_ready;
-#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused01100[0x01100-0x010c8];
-
-/*0x01100*/ u64 vplane_assignments;
-#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
-/*0x01108*/ u64 vpath_assignments;
-#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
-/*0x01110*/ u64 resource_assignments;
-#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01118*/ u64 host_type_assignments;
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 5, 3)
- u8 unused01128[0x01128-0x01120];
-
-/*0x01128*/ u64 max_resource_assignments;
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
- vxge_vBIT(val, 11, 5)
-/*0x01130*/ u64 pf_vpath_assignments;
-#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01200[0x01200-0x01138];
-
-/*0x01200*/ u64 rts_access_icmp;
-#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01208*/ u64 rts_access_tcpsyn;
-#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01210*/ u64 rts_access_zl4pyld;
-#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01218*/ u64 rts_access_l4prtcl_tcp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01220*/ u64 rts_access_l4prtcl_udp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01228*/ u64 rts_access_l4prtcl_flex;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01230*/ u64 rts_access_ipfrag;
-#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
-
-} __packed;
-
-struct vxge_hw_memrepair_reg {
- u64 unused1;
- u64 unused2;
-} __packed;
-
-struct vxge_hw_pcicfgmgmt_reg {
-
-/*0x00000*/ u64 resource_no;
-#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
-/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00020*/ u64 msixgrp_no;
-#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
-
-} __packed;
-
-struct vxge_hw_mrpcim_reg {
-/*0x00000*/ u64 g3fbct_int_status;
-#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x00008*/ u64 g3fbct_int_mask;
-/*0x00010*/ u64 g3fbct_err_reg;
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x00018*/ u64 g3fbct_err_mask;
-/*0x00020*/ u64 g3fbct_err_alarm;
-
- u8 unused00a00[0x00a00-0x00028];
-
-/*0x00a00*/ u64 wrdma_int_status;
-#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
-#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
-#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
-/*0x00a08*/ u64 wrdma_int_mask;
-/*0x00a10*/ u64 rc_alarm_reg;
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
-#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
-#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
-#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
-#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
-/*0x00a18*/ u64 rc_alarm_mask;
-/*0x00a20*/ u64 rc_alarm_alarm;
-/*0x00a28*/ u64 rxdrm_sm_err_reg;
-#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rxdrm_sm_err_mask;
-/*0x00a38*/ u64 rxdrm_sm_err_alarm;
-/*0x00a40*/ u64 rxdcm_sm_err_reg;
-#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a48*/ u64 rxdcm_sm_err_mask;
-/*0x00a50*/ u64 rxdcm_sm_err_alarm;
-/*0x00a58*/ u64 rxdwm_sm_err_reg;
-#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a60*/ u64 rxdwm_sm_err_mask;
-/*0x00a68*/ u64 rxdwm_sm_err_alarm;
-/*0x00a70*/ u64 rda_err_reg;
-#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
-#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
-#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
-#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
-#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
-#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
-/*0x00a78*/ u64 rda_err_mask;
-/*0x00a80*/ u64 rda_err_alarm;
-/*0x00a88*/ u64 rda_ecc_db_reg;
-#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00a90*/ u64 rda_ecc_db_mask;
-/*0x00a98*/ u64 rda_ecc_db_alarm;
-/*0x00aa0*/ u64 rda_ecc_sg_reg;
-#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00aa8*/ u64 rda_ecc_sg_mask;
-/*0x00ab0*/ u64 rda_ecc_sg_alarm;
-/*0x00ab8*/ u64 rqa_err_reg;
-#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
-/*0x00ac0*/ u64 rqa_err_mask;
-/*0x00ac8*/ u64 rqa_err_alarm;
-/*0x00ad0*/ u64 frf_alarm_reg;
-#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
-/*0x00ad8*/ u64 frf_alarm_mask;
-/*0x00ae0*/ u64 frf_alarm_alarm;
-/*0x00ae8*/ u64 rocrc_alarm_reg;
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
-/*0x00af0*/ u64 rocrc_alarm_mask;
-/*0x00af8*/ u64 rocrc_alarm_alarm;
-/*0x00b00*/ u64 wde0_alarm_reg;
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b08*/ u64 wde0_alarm_mask;
-/*0x00b10*/ u64 wde0_alarm_alarm;
-/*0x00b18*/ u64 wde1_alarm_reg;
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b20*/ u64 wde1_alarm_mask;
-/*0x00b28*/ u64 wde1_alarm_alarm;
-/*0x00b30*/ u64 wde2_alarm_reg;
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b38*/ u64 wde2_alarm_mask;
-/*0x00b40*/ u64 wde2_alarm_alarm;
-/*0x00b48*/ u64 wde3_alarm_reg;
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b50*/ u64 wde3_alarm_mask;
-/*0x00b58*/ u64 wde3_alarm_alarm;
-
- u8 unused00be8[0x00be8-0x00b60];
-
-/*0x00be8*/ u64 rx_w_round_robin_0;
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
-/*0x00bf0*/ u64 rx_w_round_robin_1;
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00bf8*/ u64 rx_w_round_robin_2;
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c00*/ u64 rx_w_round_robin_3;
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c08*/ u64 rx_w_round_robin_4;
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c10*/ u64 rx_w_round_robin_5;
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c18*/ u64 rx_w_round_robin_6;
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c20*/ u64 rx_w_round_robin_7;
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c28*/ u64 rx_w_round_robin_8;
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c30*/ u64 rx_w_round_robin_9;
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c38*/ u64 rx_w_round_robin_10;
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c40*/ u64 rx_w_round_robin_11;
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c48*/ u64 rx_w_round_robin_12;
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c50*/ u64 rx_w_round_robin_13;
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c58*/ u64 rx_w_round_robin_14;
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c60*/ u64 rx_w_round_robin_15;
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c68*/ u64 rx_w_round_robin_16;
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c70*/ u64 rx_w_round_robin_17;
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c78*/ u64 rx_w_round_robin_18;
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c80*/ u64 rx_w_round_robin_19;
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c88*/ u64 rx_w_round_robin_20;
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c90*/ u64 rx_w_round_robin_21;
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
- vxge_vBIT(val, 19, 5)
-
-#define VXGE_HW_WRR_RING_SERVICE_STATES 171
-#define VXGE_HW_WRR_RING_COUNT 22
-
-/*0x00c98*/ u64 rx_queue_priority_0;
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-/*0x00ca0*/ u64 rx_queue_priority_1;
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
-/*0x00ca8*/ u64 rx_queue_priority_2;
-#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
- u8 unused00cc8[0x00cc8-0x00cb0];
-
-/*0x00cc8*/ u64 replication_queue_priority;
-#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00cd0*/ u64 rx_queue_select;
-#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
-/*0x00cd8*/ u64 rqa_vpbp_ctrl;
-#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
-#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
-#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
-/*0x00ce0*/ u64 rx_multi_cast_ctrl;
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
- vxge_vBIT(val, 2, 30)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
-/*0x00ce8*/ u64 wde_prm_ctrl;
-#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
-#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
-/*0x00cf0*/ u64 noa_ctrl;
-#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
-/*0x00cf8*/ u64 phase_cfg;
-#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
-#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
-#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
-#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
-#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
-#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
-#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
-#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
-#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
-/*0x00d00*/ u64 rcq_bypq_cfg;
-#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
- u8 unused00e00[0x00e00-0x00d08];
-
-/*0x00e00*/ u64 doorbell_int_status;
-#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
-#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
-/*0x00e08*/ u64 doorbell_int_mask;
-/*0x00e10*/ u64 kdfc_err_reg;
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
-/*0x00e18*/ u64 kdfc_err_mask;
-/*0x00e20*/ u64 kdfc_err_reg_alarm;
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
- u8 unused00e40[0x00e40-0x00e28];
-/*0x00e40*/ u64 kdfc_vp_partition_0;
-#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
-/*0x00e48*/ u64 kdfc_vp_partition_1;
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
-/*0x00e50*/ u64 kdfc_vp_partition_2;
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
-/*0x00e58*/ u64 kdfc_vp_partition_3;
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
-/*0x00e60*/ u64 kdfc_vp_partition_4;
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
-/*0x00e68*/ u64 kdfc_vp_partition_5;
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
-/*0x00e70*/ u64 kdfc_vp_partition_6;
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
-/*0x00e78*/ u64 kdfc_vp_partition_7;
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
-/*0x00e80*/ u64 kdfc_vp_partition_8;
-#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
-/*0x00e88*/ u64 kdfc_w_round_robin_0;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused0f28[0x0f28-0x0e90];
-
-/*0x00f28*/ u64 kdfc_w_round_robin_20;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
-#define VXGE_HW_WRR_FIFO_COUNT 20
-
- u8 unused0fc8[0x0fc8-0x0f30];
-
-/*0x00fc8*/ u64 kdfc_w_round_robin_40;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused1068[0x01068-0x0fd0];
-
-/*0x01068*/ u64 kdfc_entry_type_sel_0;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
-/*0x01070*/ u64 kdfc_entry_type_sel_1;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 kdfc_fifo_0_ctrl;
-#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
-#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
-
- u8 unused1100[0x01100-0x1080];
-
-/*0x01100*/ u64 kdfc_fifo_17_ctrl;
-#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-
- u8 unused1600[0x01600-0x1108];
-
-/*0x01600*/ u64 rxmac_int_status;
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
- vxge_mBIT(11)
-/*0x01608*/ u64 rxmac_int_mask;
- u8 unused01618[0x01618-0x01610];
-
-/*0x01618*/ u64 rxmac_gen_err_reg;
-/*0x01620*/ u64 rxmac_gen_err_mask;
-/*0x01628*/ u64 rxmac_gen_err_alarm;
-/*0x01630*/ u64 rxmac_ecc_err_reg;
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
- vxge_vBIT(val, 24, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
- vxge_vBIT(val, 26, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
- vxge_vBIT(val, 28, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
- vxge_vBIT(val, 30, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
- vxge_vBIT(val, 40, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
- vxge_vBIT(val, 47, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
- vxge_vBIT(val, 54, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
- vxge_vBIT(val, 57, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
- vxge_mBIT(60)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
- vxge_mBIT(61)
-/*0x01638*/ u64 rxmac_ecc_err_mask;
-/*0x01640*/ u64 rxmac_ecc_err_alarm;
-/*0x01648*/ u64 rxmac_various_err_reg;
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
-/*0x01650*/ u64 rxmac_various_err_mask;
-/*0x01658*/ u64 rxmac_various_err_alarm;
-/*0x01660*/ u64 rxmac_gen_cfg;
-#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
-/*0x01668*/ u64 rxmac_authorize_all_addr;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
-/*0x01670*/ u64 rxmac_authorize_all_vid;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
- u8 unused016c0[0x016c0-0x01678];
-
-/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
- u8 unused016e0[0x016e0-0x016c8];
-
-/*0x016e0*/ u64 rxmac_cfg0_port[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
- u8 unused01710[0x01710-0x016f8];
-
-/*0x01710*/ u64 rxmac_cfg2_port[3];
-#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
-/*0x01728*/ u64 rxmac_pause_cfg_port[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
- u8 unused01758[0x01758-0x01740];
-
-/*0x01758*/ u64 rxmac_red_cfg0_port[3];
-#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
-/*0x01770*/ u64 rxmac_red_cfg1_port[3];
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
-/*0x01788*/ u64 rxmac_red_cfg2_port[3];
-#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
-/*0x017a0*/ u64 rxmac_link_util_port[3];
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
- u8 unused017d0[0x017d0-0x017b8];
-
-/*0x017d0*/ u64 rxmac_status_port[3];
-#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
- u8 unused01800[0x01800-0x017e8];
-
-/*0x01800*/ u64 rxmac_rx_pa_cfg0;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x01808*/ u64 rxmac_rx_pa_cfg1;
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
- u8 unused01828[0x01828-0x01810];
-
-/*0x01828*/ u64 rts_mgr_cfg0;
-#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
-/*0x01830*/ u64 rts_mgr_cfg1;
-#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
-/*0x01838*/ u64 rts_mgr_criteria_priority;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
-/*0x01840*/ u64 rts_mgr_da_pause_cfg;
-#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
-#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01890[0x01890-0x01850];
-/*0x01890*/ u64 rts_mgr_cbasin_cfg;
- u8 unused01968[0x01968-0x01898];
-
-/*0x01968*/ u64 dbg_stat_rx_any_frms;
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused01a00[0x01a00-0x01970];
-
-/*0x01a00*/ u64 rxmac_red_rate_vp[17];
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
- u8 unused01e00[0x01e00-0x01a88];
-
-/*0x01e00*/ u64 xgmac_int_status;
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
- vxge_mBIT(7)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
-/*0x01e08*/ u64 xgmac_int_mask;
-/*0x01e10*/ u64 xmac_gen_err_reg;
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
- vxge_mBIT(11)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
- vxge_mBIT(23)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
- vxge_vBIT(val, 42, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
- vxge_vBIT(val, 44, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
- vxge_vBIT(val, 46, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
- vxge_vBIT(val, 48, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
- vxge_vBIT(val, 50, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
- vxge_vBIT(val, 52, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
- vxge_vBIT(val, 54, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
- vxge_vBIT(val, 56, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
- vxge_vBIT(val, 58, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
-/*0x01e18*/ u64 xmac_gen_err_mask;
-/*0x01e20*/ u64 xmac_gen_err_alarm;
-/*0x01e28*/ u64 xmac_link_err_port0_reg;
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
- vxge_mBIT(47)
-/*0x01e30*/ u64 xmac_link_err_port0_mask;
-/*0x01e38*/ u64 xmac_link_err_port0_alarm;
-/*0x01e40*/ u64 xmac_link_err_port1_reg;
-/*0x01e48*/ u64 xmac_link_err_port1_mask;
-/*0x01e50*/ u64 xmac_link_err_port1_alarm;
-/*0x01e58*/ u64 xgxs_gen_err_reg;
-#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
-/*0x01e60*/ u64 xgxs_gen_err_mask;
-/*0x01e68*/ u64 xgxs_gen_err_alarm;
-/*0x01e70*/ u64 asic_ntwk_err_reg;
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x01e78*/ u64 asic_ntwk_err_mask;
-/*0x01e80*/ u64 asic_ntwk_err_alarm;
-/*0x01e88*/ u64 asic_gpio_err_reg;
-#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
-/*0x01e90*/ u64 asic_gpio_err_mask;
-/*0x01e98*/ u64 asic_gpio_err_alarm;
-/*0x01ea0*/ u64 xgmac_gen_status;
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
-/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
-#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
-#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
- u8 unused01f40[0x01f40-0x01ed0];
-
-/*0x01f40*/ u64 xmac_gen_cfg;
-#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
-/*0x01f48*/ u64 xmac_timestamp;
-#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
-/*0x01f50*/ u64 xmac_stats_gen_cfg;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
-/*0x01f58*/ u64 xmac_stats_sys_cmd;
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x01f60*/ u64 xmac_stats_sys_data;
-#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused01f80[0x01f80-0x01f68];
-
-/*0x01f80*/ u64 asic_ntwk_ctrl;
-#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
-/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
-#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
-/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
-#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
-/*0x01f98*/ u64 xmac_cfg_port[3];
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
-/*0x01fb0*/ u64 xmac_station_addr_port[2];
-#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
- u8 unused02020[0x02020-0x01fc0];
-
-/*0x02020*/ u64 lag_cfg;
-#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
-#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
-#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
-/*0x02028*/ u64 lag_status;
-#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
-#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
- vxge_vBIT(val, 8, 8)
-/*0x02030*/ u64 lag_active_passive_cfg;
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
- vxge_vBIT(val, 32, 16)
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 lag_lacp_cfg;
-#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
-#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
-#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
-/*0x02048*/ u64 lag_timer_cfg_1;
-#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
-/*0x02050*/ u64 lag_timer_cfg_2;
-#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
-/*0x02058*/ u64 lag_sys_id;
-#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
-/*0x02060*/ u64 lag_sys_cfg;
-#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
- u8 unused02070[0x02070-0x02068];
-
-/*0x02070*/ u64 lag_aggr_addr_cfg[2];
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
-/*0x02080*/ u64 lag_aggr_id_cfg[2];
-#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
-/*0x02090*/ u64 lag_aggr_admin_key[2];
-#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020a0*/ u64 lag_aggr_alt_admin_key;
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
-/*0x020a8*/ u64 lag_aggr_oper_key[2];
-#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x020c8*/ u64 lag_aggr_partner_info[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
- vxge_vBIT(val, 16, 16)
-/*0x020d8*/ u64 lag_aggr_state[2];
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
- u8 unused020f0[0x020f0-0x020e8];
-
-/*0x020f0*/ u64 lag_port_cfg[2];
-#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
-/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
-/*0x02110*/ u64 lag_port_actor_admin_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x02140*/ u64 lag_port_partner_admin_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02150*/ u64 lag_port_to_aggr[2];
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
-/*0x02160*/ u64 lag_port_actor_oper_key[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x02170*/ u64 lag_port_actor_oper_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
- vxge_vBIT(val, 0, 48)
-/*0x02190*/ u64 lag_port_partner_oper_info[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x021a0*/ u64 lag_port_partner_oper_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
- vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x021b0*/ u64 lag_port_state_vars[2];
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
- vxge_mBIT(32)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
- vxge_mBIT(33)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
- vxge_vBIT(val, 41, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
- vxge_vBIT(val, 60, 4)
-/*0x021c0*/ u64 lag_port_timer_cntr[2];
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 40, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
- vxge_vBIT(val, 56, 8)
- u8 unused02208[0x02700-0x021d0];
-
-/*0x02700*/ u64 rtdma_int_status;
-#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
-#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
-#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
-#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
-/*0x02708*/ u64 rtdma_int_mask;
-/*0x02710*/ u64 pda_alarm_reg;
-#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
-/*0x02718*/ u64 pda_alarm_mask;
-/*0x02720*/ u64 pda_alarm_alarm;
-/*0x02728*/ u64 pcc_error_reg;
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
-/*0x02730*/ u64 pcc_error_mask;
-/*0x02738*/ u64 pcc_error_alarm;
-/*0x02740*/ u64 lso_error_reg;
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
-/*0x02748*/ u64 lso_error_mask;
-/*0x02750*/ u64 lso_error_alarm;
-/*0x02758*/ u64 sm_error_reg;
-#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
-/*0x02760*/ u64 sm_error_mask;
-/*0x02768*/ u64 sm_error_alarm;
-
- u8 unused027a8[0x027a8-0x02770];
-
-/*0x027a8*/ u64 txd_ownership_ctrl;
-#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
-/*0x027b0*/ u64 pcc_cfg;
-#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
-/*0x027b8*/ u64 pcc_control;
-#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
-#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
-/*0x027c0*/ u64 pda_status1;
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
-/*0x027c8*/ u64 rtdma_bw_timer;
-#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
-
- u8 unused02900[0x02900-0x027d0];
-/*0x02900*/ u64 g3cmct_int_status;
-#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x02908*/ u64 g3cmct_int_mask;
-/*0x02910*/ u64 g3cmct_err_reg;
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x02918*/ u64 g3cmct_err_mask;
-/*0x02920*/ u64 g3cmct_err_alarm;
- u8 unused03000[0x03000-0x02928];
-
-/*0x03000*/ u64 mc_int_status;
-#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
-#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
-#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
-#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
-/*0x03008*/ u64 mc_int_mask;
-/*0x03010*/ u64 mc_err_reg;
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
-#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
-/*0x03018*/ u64 mc_err_mask;
-/*0x03020*/ u64 mc_err_alarm;
-/*0x03028*/ u64 grocrc_alarm_reg;
-#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
-/*0x03030*/ u64 grocrc_alarm_mask;
-/*0x03038*/ u64 grocrc_alarm_alarm;
- u8 unused03100[0x03100-0x03040];
-
-/*0x03100*/ u64 rx_thresh_cfg_repl;
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
-#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
- u8 unused033b8[0x033b8-0x03108];
-
-/*0x033b8*/ u64 fbmc_ecc_cfg;
-#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
- u8 unused03400[0x03400-0x033c0];
-
-/*0x03400*/ u64 pcipif_int_status;
-#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
-#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
-#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
-#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
-#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
- vxge_mBIT(19)
-/*0x03408*/ u64 pcipif_int_mask;
-/*0x03410*/ u64 dbecc_err_reg;
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
-/*0x03418*/ u64 dbecc_err_mask;
-/*0x03420*/ u64 dbecc_err_alarm;
-/*0x03428*/ u64 sbecc_err_reg;
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
-/*0x03430*/ u64 sbecc_err_mask;
-/*0x03438*/ u64 sbecc_err_alarm;
-/*0x03440*/ u64 general_err_reg;
-#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
-/*0x03448*/ u64 general_err_mask;
-/*0x03450*/ u64 general_err_alarm;
-/*0x03458*/ u64 srpcim_msg_reg;
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
- vxge_mBIT(0)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
- vxge_mBIT(1)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
- vxge_mBIT(2)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
- vxge_mBIT(3)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
- vxge_mBIT(4)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
- vxge_mBIT(5)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
- vxge_mBIT(6)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
- vxge_mBIT(7)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
- vxge_mBIT(8)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
- vxge_mBIT(9)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
- vxge_mBIT(10)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
- vxge_mBIT(11)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
- vxge_mBIT(12)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
- vxge_mBIT(13)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
- vxge_mBIT(14)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
- vxge_mBIT(15)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
- vxge_mBIT(16)
-/*0x03460*/ u64 srpcim_msg_mask;
-/*0x03468*/ u64 srpcim_msg_alarm;
- u8 unused03600[0x03600-0x03470];
-
-/*0x03600*/ u64 gcmg1_int_status;
-#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
-#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
-/*0x03608*/ u64 gcmg1_int_mask;
- u8 unused03a00[0x03a00-0x03610];
-
-/*0x03a00*/ u64 pcmg1_int_status;
-#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
-/*0x03a08*/ u64 pcmg1_int_mask;
- u8 unused04000[0x04000-0x03a10];
-
-/*0x04000*/ u64 one_int_status;
-#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
- vxge_mBIT(13)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
- vxge_mBIT(14)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
-#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
-#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
-#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
-#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
-#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
-/*0x04008*/ u64 one_int_mask;
- u8 unused04818[0x04818-0x04010];
-
-/*0x04818*/ u64 noa_wct_ctrl;
-#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
-/*0x04820*/ u64 rc_cfg2;
-#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
-/*0x04828*/ u64 rc_cfg3;
-#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
-/*0x04830*/ u64 rx_multi_cast_ctrl1;
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
-/*0x04838*/ u64 rxdm_dbg_rd;
-#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
-/*0x04840*/ u64 rxdm_dbg_rd_data;
-#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x04848*/ u64 rqa_top_prty_for_vh[17];
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused04900[0x04900-0x048d0];
-
-/*0x04900*/ u64 tim_status;
-#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
-/*0x04908*/ u64 tim_ecc_enable;
-#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
-/*0x04910*/ u64 tim_bp_ctrl;
-#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
-#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
-#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
-/*0x04918*/ u64 tim_resource_assignment_vh[17];
-#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
-#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
- u8 unused04b00[0x04b00-0x04a28];
-
-/*0x04b00*/ u64 gcmg2_int_status;
-#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
-#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
-/*0x04b08*/ u64 gcmg2_int_mask;
-/*0x04b10*/ u64 gxtmc_err_reg;
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
- vxge_mBIT(21)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
- vxge_mBIT(22)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
- vxge_mBIT(24)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
- vxge_mBIT(25)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
-/*0x04b18*/ u64 gxtmc_err_mask;
-/*0x04b20*/ u64 gxtmc_err_alarm;
-/*0x04b28*/ u64 cmc_err_reg;
-#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
-/*0x04b30*/ u64 cmc_err_mask;
-/*0x04b38*/ u64 cmc_err_alarm;
-/*0x04b40*/ u64 gcp_err_reg;
-#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
-#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
-/*0x04b48*/ u64 gcp_err_mask;
-/*0x04b50*/ u64 gcp_err_alarm;
- u8 unused04f00[0x04f00-0x04b58];
-
-/*0x04f00*/ u64 pcmg2_int_status;
-#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
-/*0x04f08*/ u64 pcmg2_int_mask;
-/*0x04f10*/ u64 pxtmc_err_reg;
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
-/*0x04f18*/ u64 pxtmc_err_mask;
-/*0x04f20*/ u64 pxtmc_err_alarm;
-/*0x04f28*/ u64 cp_err_reg;
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
-#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
-#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
-#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
-#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
-#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
-#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
-#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
-#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
-#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
-/*0x04f30*/ u64 cp_err_mask;
-/*0x04f38*/ u64 cp_err_alarm;
- u8 unused04fe8[0x04f50-0x04f40];
-
-/*0x04f50*/ u64 cp_exc_reg;
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
-/*0x04f58*/ u64 cp_exc_mask;
-/*0x04f60*/ u64 cp_exc_alarm;
-/*0x04f68*/ u64 cp_exc_cause;
-#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
- u8 unused05200[0x05200-0x04f70];
-
-/*0x05200*/ u64 msg_int_status;
-#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
-#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
-/*0x05208*/ u64 msg_int_mask;
-/*0x05210*/ u64 tim_err_reg;
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
-#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
-/*0x05218*/ u64 tim_err_mask;
-/*0x05220*/ u64 tim_err_alarm;
-/*0x05228*/ u64 msg_err_reg;
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
- vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
-/*0x05230*/ u64 msg_err_mask;
-/*0x05238*/ u64 msg_err_alarm;
- u8 unused05340[0x05340-0x05240];
-
-/*0x05340*/ u64 msg_exc_reg;
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
-/*0x05348*/ u64 msg_exc_mask;
-/*0x05350*/ u64 msg_exc_alarm;
-/*0x05358*/ u64 msg_exc_cause;
-#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
- u8 unused05368[0x05380-0x05360];
-
-/*0x05380*/ u64 msg_err2_reg;
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
- vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
- vxge_mBIT(13)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
- vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
- vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
- vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
- vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
- vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
- vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
- vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
- vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
- vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
- vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
- vxge_mBIT(28)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(30)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
-/*0x05388*/ u64 msg_err2_mask;
-/*0x05390*/ u64 msg_err2_alarm;
-/*0x05398*/ u64 msg_err3_reg;
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
-/*0x053a0*/ u64 msg_err3_mask;
-/*0x053a8*/ u64 msg_err3_alarm;
- u8 unused05600[0x05600-0x053b0];
-
-/*0x05600*/ u64 fau_gen_err_reg;
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
-#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
-/*0x05608*/ u64 fau_gen_err_mask;
-/*0x05610*/ u64 fau_gen_err_alarm;
-/*0x05618*/ u64 fau_ecc_err_reg;
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 4, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 8, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 14, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 16, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
- vxge_vBIT(val, 18, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
- vxge_vBIT(val, 20, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
-/*0x05620*/ u64 fau_ecc_err_mask;
-/*0x05628*/ u64 fau_ecc_err_alarm;
- u8 unused05658[0x05658-0x05630];
-/*0x05658*/ u64 fau_pa_cfg;
-#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
- u8 unused05668[0x05668-0x05660];
-
-/*0x05668*/ u64 dbg_stats_fau_rx_path;
-#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused056c0[0x056c0-0x05670];
-
-/*0x056c0*/ u64 fau_lag_cfg;
-#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
- u8 unused05800[0x05800-0x056c8];
-
-/*0x05800*/ u64 tpa_int_status;
-#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
-#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
-#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
-/*0x05808*/ u64 tpa_int_mask;
-/*0x05810*/ u64 orp_err_reg;
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
-#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
-#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
-/*0x05818*/ u64 orp_err_mask;
-/*0x05820*/ u64 orp_err_alarm;
-/*0x05828*/ u64 ptm_alarm_reg;
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
-/*0x05830*/ u64 ptm_alarm_mask;
-/*0x05838*/ u64 ptm_alarm_alarm;
-/*0x05840*/ u64 tpa_error_reg;
-#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
-/*0x05848*/ u64 tpa_error_mask;
-/*0x05850*/ u64 tpa_error_alarm;
-/*0x05858*/ u64 tpa_global_cfg;
-#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
- u8 unused05868[0x05870-0x05860];
-
-/*0x05870*/ u64 ptm_ecc_cfg;
-#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
-/*0x05878*/ u64 ptm_phase_cfg;
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
- u8 unused05898[0x05898-0x05880];
-
-/*0x05898*/ u64 dbg_stats_tpa_tx_path;
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused05900[0x05900-0x058a0];
-
-/*0x05900*/ u64 tmac_int_status;
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
-/*0x05908*/ u64 tmac_int_mask;
-/*0x05910*/ u64 txmac_gen_err_reg;
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
-/*0x05918*/ u64 txmac_gen_err_mask;
-/*0x05920*/ u64 txmac_gen_err_alarm;
-/*0x05928*/ u64 txmac_ecc_err_reg;
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
-/*0x05930*/ u64 txmac_ecc_err_mask;
-/*0x05938*/ u64 txmac_ecc_err_alarm;
- u8 unused05978[0x05978-0x05940];
-
-/*0x05978*/ u64 dbg_stat_tx_any_frms;
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused059a0[0x059a0-0x05980];
-
-/*0x059a0*/ u64 txmac_link_util_port[3];
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
-/*0x059b8*/ u64 txmac_cfg0_port[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
-/*0x059d0*/ u64 txmac_cfg1_port[3];
-#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
-/*0x059e8*/ u64 txmac_status_port[3];
-#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
- u8 unused05a20[0x05a20-0x05a00];
-
-/*0x05a20*/ u64 lag_distrib_dest;
-#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
-/*0x05a28*/ u64 lag_marker_cfg;
-#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
-/*0x05a30*/ u64 lag_tx_cfg;
-#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
-#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
-/*0x05a38*/ u64 lag_tx_status;
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused05d48[0x05d48-0x05a40];
-
-/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
-#define \
-VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
- vxge_vBIT(val, 0, 64)
- u8 unused06420[0x06420-0x05dd0];
-
-/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
-#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
-
-/*0x06530*/ u64 debug_stats0;
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
-/*0x06538*/ u64 debug_stats1;
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
-/*0x06540*/ u64 debug_stats2;
-#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
-/*0x06548*/ u64 debug_stats3_vplane[17];
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
-/*0x065d0*/ u64 debug_stats4_vplane[17];
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
-
- u8 unused07000[0x07000-0x06658];
-
-/*0x07000*/ u64 mrpcim_general_int_status;
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
-/*0x07008*/ u64 mrpcim_general_int_mask;
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
-/*0x07010*/ u64 mrpcim_ppif_int_status;
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
- vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
- vxge_mBIT(32)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
- vxge_mBIT(33)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
- vxge_mBIT(34)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
- vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
- vxge_mBIT(36)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
- vxge_mBIT(37)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
- vxge_mBIT(38)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
- vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
- vxge_mBIT(40)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
- vxge_mBIT(41)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
- vxge_mBIT(42)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
- vxge_mBIT(43)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
- vxge_mBIT(44)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
- vxge_mBIT(45)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
- vxge_mBIT(46)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
- vxge_mBIT(47)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
- vxge_mBIT(55)
-/*0x07018*/ u64 mrpcim_ppif_int_mask;
- u8 unused07028[0x07028-0x07020];
-
-/*0x07028*/ u64 ini_errors_reg;
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
-#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
-#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
-#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
-#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
-/*0x07030*/ u64 ini_errors_mask;
-/*0x07038*/ u64 ini_errors_alarm;
-/*0x07040*/ u64 dma_errors_reg;
-#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
-/*0x07048*/ u64 dma_errors_mask;
-/*0x07050*/ u64 dma_errors_alarm;
-/*0x07058*/ u64 tgt_errors_reg;
-#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
-/*0x07060*/ u64 tgt_errors_mask;
-/*0x07068*/ u64 tgt_errors_alarm;
-/*0x07070*/ u64 config_errors_reg;
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
-/*0x07078*/ u64 config_errors_mask;
-/*0x07080*/ u64 config_errors_alarm;
- u8 unused07090[0x07090-0x07088];
-
-/*0x07090*/ u64 crdt_errors_reg;
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
- vxge_mBIT(47)
-/*0x07098*/ u64 crdt_errors_mask;
-/*0x070a0*/ u64 crdt_errors_alarm;
- u8 unused070b0[0x070b0-0x070a8];
-
-/*0x070b0*/ u64 mrpcim_general_errors_reg;
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
- vxge_mBIT(47)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
-/*0x070b8*/ u64 mrpcim_general_errors_mask;
-/*0x070c0*/ u64 mrpcim_general_errors_alarm;
- u8 unused070d0[0x070d0-0x070c8];
-
-/*0x070d0*/ u64 pll_errors_reg;
-#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
-/*0x070d8*/ u64 pll_errors_mask;
-/*0x070e0*/ u64 pll_errors_alarm;
-/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
-/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
-/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
-#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
-/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
- u8 unused07128[0x07128-0x07118];
-
-/*0x07128*/ u64 crdt_errors_vplane_reg[17];
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
- vxge_mBIT(7)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
- vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
- vxge_mBIT(31)
-/*0x07130*/ u64 crdt_errors_vplane_mask[17];
-/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
- u8 unused072f0[0x072f0-0x072c0];
-
-/*0x072f0*/ u64 mrpcim_rst_in_prog;
-#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
-/*0x072f8*/ u64 mrpcim_reg_modified;
-#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
-
- u8 unused07378[0x07378-0x07300];
-
-/*0x07378*/ u64 write_arb_pending;
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
-/*0x07380*/ u64 read_arb_pending;
-#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
-/*0x07388*/ u64 dmaif_dmadbl_pending;
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
- vxge_vBIT(val, 13, 51)
-/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
- vxge_vBIT(val, 0, 8)
-/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
- vxge_vBIT(val, 4, 12)
- u8 unused07500[0x07500-0x074a0];
-
-/*0x07500*/ u64 mrpcim_general_cfg1;
-#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
-/*0x07508*/ u64 mrpcim_general_cfg2;
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
- vxge_vBIT(val, 47, 5)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
-/*0x07510*/ u64 mrpcim_general_cfg3;
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
- vxge_vBIT(val, 36, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
-/*0x07518*/ u64 mrpcim_stats_start_host_addr;
-#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
- vxge_vBIT(val, 0, 57)
-
- u8 unused07950[0x07950-0x07520];
-
-/*0x07950*/ u64 rdcrdtarb_cfg0;
-#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 26, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 34, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
-#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
- u8 unused07be8[0x07be8-0x07958];
-
-/*0x07be8*/ u64 bf_sw_reset;
-#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x07bf0*/ u64 sw_reset_status;
-#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
-#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
- u8 unused07d30[0x07d30-0x07bf8];
-
-/*0x07d30*/ u64 mrpcim_debug_stats0;
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
-/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed0*/ u64 mrpcim_debug_stats4;
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed8*/ u64 genstats_count01;
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
-/*0x07ee0*/ u64 genstats_count23;
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
-/*0x07ee8*/ u64 genstats_count4;
-#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
-/*0x07ef0*/ u64 genstats_count5;
-#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
-
- u8 unused07f08[0x07f08-0x07ef8];
-
-/*0x07f08*/ u64 genstats_cfg[6];
-#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
-/*0x07f38*/ u64 genstat_64bit_cfg;
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
- u8 unused08000[0x08000-0x07f40];
-/*0x08000*/ u64 gcmg3_int_status;
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
-/*0x08008*/ u64 gcmg3_int_mask;
- u8 unused09000[0x09000-0x8010];
-
-/*0x09000*/ u64 g3ifcmd_fb_int_status;
-#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09008*/ u64 g3ifcmd_fb_int_mask;
-/*0x09010*/ u64 g3ifcmd_fb_err_reg;
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09018*/ u64 g3ifcmd_fb_err_mask;
-/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
-
- u8 unused09400[0x09400-0x09028];
-
-/*0x09400*/ u64 g3ifcmd_cmu_int_status;
-#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
-/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
-/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
-
- u8 unused09800[0x09800-0x09428];
-
-/*0x09800*/ u64 g3ifcmd_cml_int_status;
-#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09808*/ u64 g3ifcmd_cml_int_mask;
-/*0x09810*/ u64 g3ifcmd_cml_err_reg;
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09818*/ u64 g3ifcmd_cml_err_mask;
-/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
- u8 unused09b00[0x09b00-0x09828];
-
-/*0x09b00*/ u64 vpath_to_vplane_map[17];
-#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
- vxge_vBIT(val, 3, 5)
- u8 unused09c30[0x09c30-0x09b88];
-
-/*0x09c30*/ u64 xgxs_cfg_port[2];
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
-/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
- vxge_vBIT(val, 16, 48)
-/*0x09c50*/ u64 xgxs_rxber_status_port[2];
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
- vxge_vBIT(val, 48, 16)
-/*0x09c60*/ u64 xgxs_status_port[2];
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
- vxge_vBIT(val, 36, 4)
-/*0x09c70*/ u64 xgxs_pma_reset_port[2];
-#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
- u8 unused09c90[0x09c90-0x09c80];
-
-/*0x09c90*/ u64 xgxs_static_cfg_port[2];
-#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
- u8 unused09d40[0x09d40-0x09ca0];
-
-/*0x09d40*/ u64 xgxs_info_port[2];
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
-/*0x09d50*/ u64 ratemgmt_cfg_port[2];
-#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
-/*0x09d60*/ u64 ratemgmt_status_port[2];
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
- u8 unused09d80[0x09d80-0x09d70];
-
-/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
-#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
-/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
- vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
-/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
- vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
- vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
-/*0x09db0*/ u64 anbe_cfg_port[2];
-#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
-#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
-/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused09de0[0x09de0-0x09dd0];
-
-/*0x09de0*/ u64 anbe_fw_mstr_port[2];
-#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
-#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
-/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
- vxge_mBIT(3)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
- vxge_mBIT(7)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
- vxge_mBIT(11)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
- vxge_mBIT(15)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
- vxge_mBIT(27)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
- vxge_mBIT(35)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
- vxge_mBIT(39)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
- vxge_mBIT(43)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
- vxge_mBIT(47)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
-vxge_mBIT(51)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
- vxge_mBIT(55)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
- vxge_vBIT(val, 60, 4)
-/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
- vxge_mBIT(32)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
- vxge_mBIT(33)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
- vxge_mBIT(40)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
- vxge_mBIT(41)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
- vxge_mBIT(42)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
- vxge_mBIT(50)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
- vxge_vBIT(val, 54, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 59, 5)
-/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused09e30[0x09e30-0x09e20];
-
-/*0x09e30*/ u64 antp_gen_cfg_port[2];
-/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
- vxge_vBIT(val, 10, 6)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
- vxge_mBIT(23)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
- vxge_mBIT(27)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
- vxge_mBIT(35)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
- vxge_mBIT(43)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
- vxge_mBIT(51)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
- vxge_mBIT(59)
-/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
- vxge_vBIT(val, 4, 7)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 11, 5)
-/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
- vxge_vBIT(val, 5, 11)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
- vxge_vBIT(val, 32, 16)
-/*0x09e70*/ u64 mdio_mgr_access_port[2];
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
- u8 unused0a200[0x0a200-0x09e80];
-/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused0a400[0x0a400-0x0a288];
-
-/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
- u8 unused0ac90[0x0ac90-0x0a488];
-} __packed;
-
-/*VXGE_HW_SRPCIM_REGS_H*/
-struct vxge_hw_srpcim_reg {
-
-/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
-#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
- vxge_vBIT(val, 0, 32)
- u8 unused00100[0x00100-0x00008];
-
-/*0x00100*/ u64 srpcim_pcipif_int_status;
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
- BIT(11)
-/*0x00108*/ u64 srpcim_pcipif_int_mask;
-/*0x00110*/ u64 mrpcim_msg_reg;
-#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
-/*0x00118*/ u64 mrpcim_msg_mask;
-/*0x00120*/ u64 mrpcim_msg_alarm;
-/*0x00128*/ u64 vpath_msg_reg;
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
-/*0x00130*/ u64 vpath_msg_mask;
-/*0x00138*/ u64 vpath_msg_alarm;
- u8 unused00160[0x00160-0x00140];
-
-/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
-/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
- vxge_vBIT(val, 0, 5)
-/*0x00180*/ u64 vpath_to_srpcim_rmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
- u8 unused00200[0x00200-0x00188];
-
-/*0x00200*/ u64 srpcim_general_int_status;
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
- u8 unused00210[0x00210-0x00208];
-
-/*0x00210*/ u64 srpcim_general_int_mask;
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
- u8 unused00220[0x00220-0x00218];
-
-/*0x00220*/ u64 srpcim_ppif_int_status;
-
-/*0x00228*/ u64 srpcim_ppif_int_mask;
-/*0x00230*/ u64 srpcim_gen_errors_reg;
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
-/*0x00238*/ u64 srpcim_gen_errors_mask;
-/*0x00240*/ u64 srpcim_gen_errors_alarm;
-/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
-/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
-/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
-/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
-
-/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
-/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
- u8 unused00280[0x00280-0x00278];
-
-/*0x00280*/ u64 pf_sw_reset;
-#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x00288*/ u64 srpcim_general_cfg1;
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
-/*0x00290*/ u64 srpcim_interrupt_cfg1;
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
- u8 unused002a8[0x002a8-0x00298];
-
-/*0x002a8*/ u64 srpcim_clear_msix_mask;
-#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
-/*0x002b0*/ u64 srpcim_set_msix_mask;
-#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
-/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
-#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
-/*0x002c0*/ u64 srpcim_rst_in_prog;
-#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
-/*0x002c8*/ u64 srpcim_reg_modified;
-#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
-/*0x002d0*/ u64 tgt_pf_illegal_access;
-#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
-/*0x002d8*/ u64 srpcim_msix_status;
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
- u8 unused00880[0x00880-0x002e0];
-
-/*0x00880*/ u64 xgmac_sr_int_status;
-#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
-/*0x00888*/ u64 xgmac_sr_int_mask;
-/*0x00890*/ u64 asic_ntwk_sr_err_reg;
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
- BIT(11)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
-/*0x00898*/ u64 asic_ntwk_sr_err_mask;
-/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
- u8 unused008c0[0x008c0-0x008a8];
-
-/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused00900[0x00900-0x008c8];
-
-/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
-#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00908*/ u64 umq_vh_data_list_empty;
-#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
- BIT(0)
-/*0x00910*/ u64 wde_cfg;
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
-#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
-#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
-#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
-#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
-#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
-#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
-#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
-#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
-
-} __packed;
-
-/*VXGE_HW_VPMGMT_REGS_H*/
-struct vxge_hw_vpmgmt_reg {
-
- u8 unused00040[0x00040-0x00000];
-
-/*0x00040*/ u64 vpath_to_func_map_cfg1;
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
- vxge_vBIT(val, 3, 5)
-/*0x00048*/ u64 vpath_is_first;
-#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
-/*0x00050*/ u64 srpcim_to_vpath_wmsg;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused00100[0x00100-0x00060];
-
-/*0x00100*/ u64 tim_vpath_assignment;
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
- u8 unused00140[0x00140-0x00108];
-
-/*0x00140*/ u64 rqa_top_prty_for_vp;
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused001c0[0x001c0-0x00148];
-
-/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
- vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
- vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
- vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
- vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
-/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
- vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
- vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
- vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
- vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
- vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
- vxge_vBIT(val, 37, 3)
-/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
- vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
- vxge_vBIT(val, 50, 14)
-/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
- vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
- vxge_mBIT(59)
- u8 unused00240[0x00240-0x00208];
-
-/*0x00240*/ u64 xmac_vsport_choices_vp;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused00260[0x00260-0x00248];
-
-/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
- vxge_mBIT(11)
-/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
- vxge_mBIT(3)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
-/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
- vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
- vxge_vBIT(val, 32, 4)
-/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
- vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
- vxge_vBIT(val, 32, 16)
-/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
-/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
- u8 unused002c0[0x002c0-0x002a8];
-
-/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
-#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
-/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
- u8 unused00300[0x00300-0x002e0];
-
-/*0x00300*/ u64 wol_mp_crc;
-#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
-/*0x00308*/ u64 wol_mp_mask_a;
-#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x00310*/ u64 wol_mp_mask_b;
-#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
- u8 unused00360[0x00360-0x00318];
-
-/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
-/*0x00368*/ u64 rx_datapath_util_vp_clone;
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
- u8 unused00380[0x00380-0x00370];
-
-/*0x00380*/ u64 tx_datapath_util_vp_clone;
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
-
-} __packed;
-
-struct vxge_hw_vpath_reg {
-
- u8 unused00300[0x00300];
-
-/*0x00300*/ u64 usdc_vpath;
-#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
- u8 unused00a00[0x00a00-0x00308];
-
-/*0x00a00*/ u64 wrdma_alarm_status;
-#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
-/*0x00a08*/ u64 wrdma_alarm_mask;
- u8 unused00a30[0x00a30-0x00a10];
-
-/*0x00a30*/ u64 prc_alarm_reg;
-#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
-#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
-/*0x00a38*/ u64 prc_alarm_mask;
-/*0x00a40*/ u64 prc_alarm_alarm;
-/*0x00a48*/ u64 prc_cfg1;
-#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
-#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
-#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
-#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
- u8 unused00a60[0x00a60-0x00a50];
-
-/*0x00a60*/ u64 prc_cfg4;
-#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
-#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
-#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
-#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
-#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
-#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
-/*0x00a68*/ u64 prc_cfg5;
-#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
-/*0x00a70*/ u64 prc_cfg6;
-#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
-#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
-#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
-#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
-#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
-#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
-#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
-#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
-/*0x00a78*/ u64 prc_cfg7;
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
-#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
-#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
-#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
-/*0x00a80*/ u64 tim_dest_addr;
-#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
-/*0x00a88*/ u64 prc_rxd_doorbell;
-#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
-/*0x00a90*/ u64 rqa_prty_for_vp;
-#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
-/*0x00a98*/ u64 rxdmem_size;
-#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
-/*0x00aa0*/ u64 frm_in_progress_cnt;
-#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00aa8*/ u64 rx_multi_cast_stats;
-#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
-/*0x00ab0*/ u64 rx_frm_transferred;
-#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x00ab8*/ u64 rxd_returned;
-#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
- u8 unused00c00[0x00c00-0x00ac0];
-
-/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
-/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
-#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
-/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
-/*0x00c48*/ u64 kdfc_drbl_triplet_total;
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
- vxge_vBIT(val, 17, 15)
- u8 unused00c60[0x00c60-0x00c50];
-
-/*0x00c60*/ u64 usdc_drbl_ctrl;
-#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
-/*0x00c68*/ u64 usdc_vp_ready;
-#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
-#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
-#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
-/*0x00c70*/ u64 kdfc_status;
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
- u8 unused00c80[0x00c80-0x00c78];
-
-/*0x00c80*/ u64 xmac_rpa_vcfg;
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
-/*0x00c88*/ u64 rxmac_vcfg0;
-#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
-#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
-#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
-#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
-#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
-/*0x00c90*/ u64 rxmac_vcfg1;
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
-/*0x00c98*/ u64 rts_access_steer_ctrl;
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
-/*0x00ca0*/ u64 rts_access_steer_data0;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00ca8*/ u64 rts_access_steer_data1;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused00d00[0x00d00-0x00cb0];
-
-/*0x00d00*/ u64 xmac_vsport_choice;
-#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
-/*0x00d08*/ u64 xmac_stats_cfg;
-/*0x00d10*/ u64 xmac_stats_access_cmd;
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x00d18*/ u64 xmac_stats_access_data;
-#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
- u8 unused00d30[0x00d30-0x00d28];
-
-/*0x00d30*/ u64 xgmac_vp_int_status;
-#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
- vxge_mBIT(3)
-/*0x00d38*/ u64 xgmac_vp_int_mask;
-/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
- vxge_mBIT(11)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
- vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
-/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
- u8 unused00d80[0x00d80-0x00d58];
-
-/*0x00d80*/ u64 rtdma_bw_ctrl;
-#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
-#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
-/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
-#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
- vxge_vBIT(val, 37, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
- vxge_vBIT(val, 61, 3)
-/*0x00d90*/ u64 pda_pcc_job_monitor;
-#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
-/*0x00d98*/ u64 tx_protocol_assist_cfg;
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
- u8 unused01000[0x01000-0x00da0];
-
-/*0x01000*/ u64 tim_cfg1_int_num[4];
-#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
-/*0x01020*/ u64 tim_cfg2_int_num[4];
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
-/*0x01040*/ u64 tim_cfg3_int_num[4];
-#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
-#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
-/*0x01060*/ u64 tim_wrkld_clc;
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
-#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
-/*0x01068*/ u64 tim_bitmap;
-#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
-/*0x01070*/ u64 tim_ring_assn;
-#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 tim_remap;
-#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
-#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
-#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
-#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
-/*0x01080*/ u64 tim_vpath_map;
-#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x01088*/ u64 tim_pci_cfg;
-#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
-#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
-#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
-#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
- u8 unused01100[0x01100-0x01090];
-
-/*0x01100*/ u64 sgrp_assign;
-#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
-/*0x01108*/ u64 sgrp_aoa_and_result;
-#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01110*/ u64 rpe_pci_cfg;
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
-#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
-#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
-/*0x01118*/ u64 rpe_lro_cfg;
-#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
-#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
-/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
-#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
-/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 32, 32)
-/*0x01130*/ u64 txpe_pci_nce_cfg;
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
- u8 unused01180[0x01180-0x01138];
-
-/*0x01180*/ u64 msg_qpad_en_cfg;
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
-#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
-/*0x01188*/ u64 msg_pci_cfg;
-#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
-#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
-#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
-#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
-/*0x01190*/ u64 umqdmq_ir_init;
-#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x01198*/ u64 dmq_ir_int;
-#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011a0*/ u64 dmq_bwr_init_add;
-#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011a8*/ u64 dmq_bwr_init_byte;
-#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011b0*/ u64 dmq_ir;
-#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
-/*0x011b8*/ u64 umq_int;
-#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
-#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
-/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
-#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
-/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
-#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
-/*0x011d8*/ u64 umq_bwr_init_add;
-#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011e0*/ u64 umq_bwr_init_byte;
-#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011e8*/ u64 gendma_int;
-/*0x011f0*/ u64 umqdmq_ir_init_notify;
-#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x011f8*/ u64 dmq_init_notify;
-#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x01200*/ u64 umq_init_notify;
-#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
- u8 unused01380[0x01380-0x01208];
-
-/*0x01380*/ u64 tpa_cfg;
-#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
-#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
-#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
- u8 unused01400[0x01400-0x01388];
-
-/*0x01400*/ u64 tx_vp_reset_discarded_frms;
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 48, 16)
- u8 unused01480[0x01480-0x01408];
-
-/*0x01480*/ u64 fau_rpa_vcfg;
-#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
-#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
-#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
- u8 unused014d0[0x014d0-0x01488];
-
-/*0x014d0*/ u64 dbg_stats_rx_mpa;
-#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
-/*0x014d8*/ u64 dbg_stats_rx_fau;
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused014f0[0x014f0-0x014e0];
-
-/*0x014f0*/ u64 fbmc_vp_rdy;
-#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
- u8 unused01e00[0x01e00-0x014f8];
-
-/*0x01e00*/ u64 vpath_pcipif_int_status;
-#define \
-VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
- vxge_mBIT(7)
-/*0x01e08*/ u64 vpath_pcipif_int_mask;
- u8 unused01e20[0x01e20-0x01e10];
-
-/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
-#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
- vxge_mBIT(3)
-/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
-/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
- u8 unused01ea0[0x01ea0-0x01e38];
-
-/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused02000[0x02000-0x01eb0];
-
-/*0x02000*/ u64 vpath_general_int_status;
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
-/*0x02008*/ u64 vpath_general_int_mask;
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
-/*0x02010*/ u64 vpath_ppif_int_status;
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
- vxge_mBIT(7)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
- vxge_mBIT(11)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(15)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(19)
-/*0x02018*/ u64 vpath_ppif_int_mask;
-/*0x02020*/ u64 kdfcctl_errors_reg;
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
-/*0x02028*/ u64 kdfcctl_errors_mask;
-/*0x02030*/ u64 kdfcctl_errors_alarm;
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 general_errors_reg;
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
-#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
-/*0x02048*/ u64 general_errors_mask;
-/*0x02050*/ u64 general_errors_alarm;
-/*0x02058*/ u64 pci_config_errors_reg;
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
-/*0x02060*/ u64 pci_config_errors_mask;
-/*0x02068*/ u64 pci_config_errors_alarm;
-/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
- vxge_mBIT(3)
-/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
-/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
-/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
-/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
- u8 unused02108[0x02108-0x020a0];
-
-/*0x02108*/ u64 kdfcctl_status;
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
-/*0x02110*/ u64 rsthdlr_status;
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
-/*0x02118*/ u64 fifo0_status;
-#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02120*/ u64 fifo1_status;
-#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02128*/ u64 fifo2_status;
-#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
- u8 unused02158[0x02158-0x02130];
-
-/*0x02158*/ u64 tgt_illegal_access;
-#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
- u8 unused02200[0x02200-0x02160];
-
-/*0x02200*/ u64 vpath_general_cfg1;
-#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
-/*0x02208*/ u64 vpath_general_cfg2;
-#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
-/*0x02210*/ u64 vpath_general_cfg3;
-#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
- u8 unused02220[0x02220-0x02218];
-
-/*0x02220*/ u64 kdfcctl_cfg0;
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
-
- u8 unused02268[0x02268-0x02228];
-
-/*0x02268*/ u64 stats_cfg;
-#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
-/*0x02270*/ u64 interrupt_cfg0;
-#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
- u8 unused02280[0x02280-0x02278];
-
-/*0x02280*/ u64 interrupt_cfg2;
-#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-/*0x02288*/ u64 one_shot_vect0_en;
-#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
-/*0x02290*/ u64 one_shot_vect1_en;
-#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
-/*0x02298*/ u64 one_shot_vect2_en;
-#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
-/*0x022a0*/ u64 one_shot_vect3_en;
-#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
- u8 unused022b0[0x022b0-0x022a8];
-
-/*0x022b0*/ u64 pci_config_access_cfg1;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
-/*0x022b8*/ u64 pci_config_access_cfg2;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
-/*0x022c0*/ u64 pci_config_access_status;
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused02300[0x02300-0x022c8];
-
-/*0x02300*/ u64 vpath_debug_stats0;
-#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02308*/ u64 vpath_debug_stats1;
-#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02310*/ u64 vpath_debug_stats2;
-#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
-/*0x02318*/ u64 vpath_debug_stats3;
-#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02320*/ u64 vpath_debug_stats4;
-#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02328*/ u64 vpath_debug_stats5;
-#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02330*/ u64 vpath_debug_stats6;
-#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02338*/ u64 vpath_genstats_count01;
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02340*/ u64 vpath_genstats_count23;
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02348*/ u64 vpath_genstats_count4;
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02350*/ u64 vpath_genstats_count5;
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused02648[0x02648-0x02358];
-} __packed;
-
-#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
-
-/* Capability lists */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
-#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
deleted file mode 100644
index ee164970b267..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ /dev/null
@@ -1,2428 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/prefetch.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-/*
- * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Enable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_disable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_status);
-
- readq(&vp_reg->vpath_general_int_status);
-
- /* Mask unwanted interrupts */
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- /* Unmask the individual interrupts */
-
- writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
- &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
-
- if (vpath->hldev->first_vp_id != vpath->vp_id)
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
- else
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_pio_mem_write32_upper(0,
- &vp_reg->vpath_general_int_mask);
-exit:
- return status;
-
-}
-
-/*
- * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Disable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_enable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_general_int_mask);
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
-
-exit:
- return status;
-}
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
- return;
-
- vp_reg = fifo->vp_reg;
- config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- fifo->tim_tti_cfg1_saved = val64;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg1_saved;
-
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- ring->tim_rti_cfg1_saved = val64;
- writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-}
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
-{
- u64 val64 = fifo->tim_tti_cfg3_saved;
- u64 timer = (fifo->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
-
- writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- /* tti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg3_saved;
- u64 timer = (ring->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
-
- writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- /* rti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-/**
- * vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->set_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- * if configured in MSIX oneshot mode
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
-{
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
-}
-
-/**
- * vxge_hw_device_set_intr_type - Updates the configuration
- * with new interrupt type.
- * @hldev: HW device handle.
- * @intr_mode: New interrupt type
- */
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
-{
-
- if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (intr_mode != VXGE_HW_INTR_MODE_DEF))
- intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
-
- hldev->config.intr_mode = intr_mode;
- return intr_mode;
-}
-
-/**
- * vxge_hw_device_intr_enable - Enable interrupts.
- * @hldev: HW device handle.
- *
- * Enable Titan interrupts. The function is to be executed the last in
- * Titan initialization sequence.
- *
- * See also: vxge_hw_device_intr_disable()
- */
-void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
-{
- u32 i;
- u64 val64;
- u32 val32;
-
- vxge_hw_device_mask_all(hldev);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_enable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
- val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
-
- if (val64 != 0) {
- writeq(val64, &hldev->common_reg->tim_int_status0);
-
- writeq(~val64, &hldev->common_reg->tim_int_mask0);
- }
-
- val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
-
- if (val32 != 0) {
- __vxge_hw_pio_mem_write32_upper(val32,
- &hldev->common_reg->tim_int_status1);
-
- __vxge_hw_pio_mem_write32_upper(~val32,
- &hldev->common_reg->tim_int_mask1);
- }
- }
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- vxge_hw_device_unmask_all(hldev);
-}
-
-/**
- * vxge_hw_device_intr_disable - Disable Titan interrupts.
- * @hldev: HW device handle.
- *
- * Disable Titan interrupts.
- *
- * See also: vxge_hw_device_intr_enable()
- */
-void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
-{
- u32 i;
-
- vxge_hw_device_mask_all(hldev);
-
- /* mask all the tim interrupts */
- writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
- __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
- &hldev->common_reg->tim_int_mask1);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_disable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-}
-
-/**
- * vxge_hw_device_mask_all - Mask all device interrupts.
- * @hldev: HW device handle.
- *
- * Mask all device interrupts.
- *
- * See also: vxge_hw_device_unmask_all()
- */
-void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
- VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_unmask_all - Unmask all device interrupts.
- * @hldev: HW device handle.
- *
- * Unmask all device interrupts.
- *
- * See also: vxge_hw_device_mask_all()
- */
-void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64 = 0;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_flush_io - Flush io writes.
- * @hldev: HW device handle.
- *
- * The function performs a read operation to flush io writes.
- *
- * Returns: void
- */
-void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
-{
- readl(&hldev->common_reg->titan_general_int_status);
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_down)
- hldev->uld_callbacks->link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_up)
- hldev->uld_callbacks->link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- * general_int_status register.
- *
- * The function performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
- u32 skip_alarms, u64 *reason)
-{
- u32 i;
- u64 val64;
- u64 adapter_status;
- u64 vpath_mask;
- enum vxge_hw_status ret = VXGE_HW_OK;
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- if (unlikely(!val64)) {
- /* not Titan interrupt */
- *reason = 0;
- ret = VXGE_HW_ERR_WRONG_IRQ;
- goto exit;
- }
-
- if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
- adapter_status = readq(&hldev->common_reg->adapter_status);
-
- if (adapter_status == VXGE_HW_ALL_FOXES) {
-
- __vxge_hw_device_handle_error(hldev,
- NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
- *reason = 0;
- ret = VXGE_HW_ERR_SLOT_FREEZE;
- goto exit;
- }
- }
-
- hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
- *reason = val64;
-
- vpath_mask = hldev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
- if (val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
- hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
- return VXGE_HW_OK;
- }
-
- hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
- if (unlikely(val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
- enum vxge_hw_status error_level = VXGE_HW_OK;
-
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- ret = __vxge_hw_vpath_alarm_process(
- &hldev->virtual_paths[i], skip_alarms);
-
- error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
- if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
- (ret == VXGE_HW_ERR_SLOT_FREEZE)))
- break;
- }
-
- ret = error_level;
- }
-exit:
- return ret;
-}
-
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
- *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
- */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
-{
-
- if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status0);
- }
-
- if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status1);
- }
-}
-
-/*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
- *
- */
-static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
-{
- if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
- *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
- return VXGE_HW_OK;
- }
-
- /* switch between empty and full arrays */
-
- /* the idea behind such a design is that by having free and reserved
- * arrays separated we basically separated irq and non-irq parts.
- * i.e. no additional lock need to be done when we free a resource */
-
- if (channel->length - channel->free_ptr > 0) {
- swap(channel->reserve_arr, channel->free_arr);
- channel->reserve_ptr = channel->length;
- channel->reserve_top = channel->free_ptr;
- channel->free_ptr = channel->length;
-
- channel->stats->reserve_free_swaps_cnt++;
-
- goto _alloc_after_swap;
- }
-
- channel->stats->full_cnt++;
-
- *dtrh = NULL;
- return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
-}
-
-/*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
- *
- * Posts a dtr to work array.
- *
- */
-static void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
-{
- vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
- channel->work_arr[channel->post_index++] = dtrh;
-
- /* wrap-around */
- if (channel->post_index == channel->length)
- channel->post_index = 0;
-}
-
-/*
- * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
- * @channel: Channel
- * @dtr: Buffer to return the next completed DTR pointer
- *
- * Returns the next completed dtr with out removing it from work array
- *
- */
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
-{
- vxge_assert(channel->compl_index < channel->length);
-
- *dtrh = channel->work_arr[channel->compl_index];
- prefetch(*dtrh);
-}
-
-/*
- * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
- * @channel: Channel handle
- *
- * Removes the next completed dtr from work array
- *
- */
-void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
-{
- channel->work_arr[channel->compl_index] = NULL;
-
- /* wrap-around */
- if (++channel->compl_index == channel->length)
- channel->compl_index = 0;
-
- channel->stats->total_compl_cnt++;
-}
-
-/*
- * vxge_hw_channel_dtr_free - Frees a dtr
- * @channel: Channel handle
- * @dtr: DTR pointer
- *
- * Returns the dtr to free array
- *
- */
-void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
-{
- channel->free_arr[--channel->free_ptr] = dtrh;
-}
-
-/*
- * vxge_hw_channel_dtr_count
- * @channel: Channel handle. Obtained via vxge_hw_channel_open().
- *
- * Retrieve number of DTRs available. This function can not be called
- * from data path. ring_initial_replenishi() is the only user.
- */
-int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
-{
- return (channel->reserve_ptr - channel->reserve_top) +
- (channel->length - channel->free_ptr);
-}
-
-/**
- * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- *
- * Reserve Rx descriptor for the subsequent filling-in driver
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_ring_rxd_post().
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
- *
- */
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
- void **rxdh)
-{
- enum vxge_hw_status status;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, rxdh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_ring_rxd_1 *rxdp =
- (struct vxge_hw_ring_rxd_1 *)*rxdh;
-
- rxdp->control_0 = rxdp->control_1 = 0;
- }
-
- return status;
-}
-
-/**
- * vxge_hw_ring_rxd_free - Free descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_ring_rxd_reserve);
- *
- * - posted (vxge_hw_ring_rxd_post);
- *
- * - completed (vxge_hw_ring_rxd_next_completed);
- *
- * - and recycled again (vxge_hw_ring_rxd_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_free(channel, rxdh);
-
-}
-
-/**
- * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * This routine prepares a rxd and posts
- */
-void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_post_post - Process rxd after post.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post
- */
-void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post - Post descriptor on the ring.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
- *
- * Post descriptor on the ring.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- wmb();
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post with memory barrier.
- */
-void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
-{
- wmb();
- vxge_hw_ring_rxd_post_post(ring, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Receive Descriptor Format. Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_ring_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_ring_rxd_next_completed either immediately from inside the
- * ring callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to fill-in receive buffer(s)
- * of the descriptor.
- * For instance, parity error detected during the data transfer.
- * In this case Titan will complete the descriptor and indicate
- * for the host that the received data is not to be used.
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- * See also: vxge_hw_ring_callback_f{},
- * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
- */
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_ring_rxd_1 *rxdp;
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 control_0, own;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, rxdh);
-
- rxdp = *rxdh;
- if (rxdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- control_0 = rxdp->control_0;
- own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
-
- /* check whether it is not the end */
- if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
-
- vxge_assert((rxdp)->host_control !=
- 0);
-
- ++ring->cmpl_cnt;
- vxge_hw_channel_dtr_complete(channel);
-
- vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
-
- ring->stats->common_stats.usage_cnt++;
- if (ring->stats->common_stats.usage_max <
- ring->stats->common_stats.usage_cnt)
- ring->stats->common_stats.usage_max =
- ring->stats->common_stats.usage_cnt;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* reset it. since we don't want to return
- * garbage to the driver */
- *rxdh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_ring_handle_tcode - Handle transfer code.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
-
- if (t_code == VXGE_HW_RING_T_CODE_OK ||
- t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
- status = VXGE_HW_OK;
- goto exit;
- }
-
- if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- ring->stats->rxd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * __vxge_hw_non_offload_db_post - Post non offload doorbell
- *
- * @fifo: fifohandle
- * @txdl_ptr: The starting location of the TxDL in host memory
- * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
- * @no_snoop: No snoop flags
- *
- * This function posts a non-offload doorbell to doorbell FIFO
- *
- */
-static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
- u64 txdl_ptr, u32 num_txds, u32 no_snoop)
-{
- writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
- VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
- VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
- &fifo->nofl_db->control_0);
-
- writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-}
-
-/**
- * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
- * the fifo
- * @fifoh: Handle to the fifo object used for non offload send
- */
-u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
-{
- return vxge_hw_channel_dtr_count(&fifoh->channel);
-}
-
-/**
- * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- * @txdl_priv: Buffer to return the pointer to per txdl space
- *
- * Reserve a single TxDL (that is, fifo descriptor)
- * for the subsequent filling-in by driver)
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_fifo_txdl_post().
- *
- * Note: it is the responsibility of driver to reserve multiple descriptors
- * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
- * carries up to configured number (fifo.max_frags) of contiguous buffers.
- *
- * Returns: VXGE_HW_OK - success;
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifo,
- void **txdlh, void **txdl_priv)
-{
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status;
- int i;
-
- channel = &fifo->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, txdlh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)*txdlh;
- struct __vxge_hw_fifo_txdl_priv *priv;
-
- priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- /* reset the TxDL's private */
- priv->align_dma_offset = 0;
- priv->align_vaddr_start = priv->align_vaddr;
- priv->align_used_frags = 0;
- priv->frags = 0;
- priv->alloc_frags = fifo->config->max_frags;
- priv->next_txdl_priv = NULL;
-
- *txdl_priv = (void *)(size_t)txdp->host_control;
-
- for (i = 0; i < fifo->config->max_frags; i++) {
- txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
- txdp->control_0 = txdp->control_1 = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
- * descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list
- * (of buffers).
- * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
- * @size: Size of the data buffer (in bytes).
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
- * All three APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
- void *txdlh, u32 frag_idx,
- dma_addr_t dma_pointer, u32 size)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp, *txdp_last;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
-
- if (frag_idx != 0)
- txdp->control_0 = txdp->control_1 = 0;
- else {
- txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
- txdp->control_1 |= fifo->interrupt_type;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
- fifo->tx_intr_num);
- if (txdl_priv->frags) {
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
- (txdl_priv->frags - 1);
- txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- }
- }
-
- vxge_assert(frag_idx < txdl_priv->alloc_frags);
-
- txdp->buffer_pointer = (u64)dma_pointer;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
- fifo->stats->total_buffers++;
- txdl_priv->frags++;
-}
-
-/**
- * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- *
- * Post descriptor on the 'fifo' type channel for transmission.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp_last;
- struct vxge_hw_fifo_txd *txdp_first;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = txdlh;
-
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
- txdp_last->control_0 |=
- VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
-
- __vxge_hw_non_offload_db_post(fifo,
- (u64)txdl_priv->dma_addr,
- txdl_priv->frags - 1,
- fifo->no_snoop_bits);
-
- fifo->stats->total_posts++;
- fifo->stats->common_stats.usage_cnt++;
- if (fifo->stats->common_stats.usage_max <
- fifo->stats->common_stats.usage_cnt)
- fifo->stats->common_stats.usage_max =
- fifo->stats->common_stats.usage_cnt;
-}
-
-/**
- * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Transmit Descriptor Format.
- * Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_channel_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_fifo_txdl_next_completed either immediately from inside the
- * channel callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to process the descriptor.
- * The failure could happen, for instance, when the link is
- * down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifo, void **txdlh,
- enum vxge_hw_fifo_tcode *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_fifo_txd *txdp;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, txdlh);
-
- txdp = *txdlh;
- if (txdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- /* check whether host owns it */
- if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
-
- vxge_assert(txdp->host_control != 0);
-
- vxge_hw_channel_dtr_complete(channel);
-
- *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
-
- if (fifo->stats->common_stats.usage_cnt > 0)
- fifo->stats->common_stats.usage_cnt--;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* no more completions */
- *txdlh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_handle_tcode - Handle transfer code.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- fifo->stats->txd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_free - Free descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_fifo_txdl_reserve);
- *
- * - posted (vxge_hw_fifo_txdl_post);
- *
- * - completed (vxge_hw_fifo_txdl_next_completed);
- *
- * - and recycled again (vxge_hw_fifo_txdl_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_free(channel, txdlh);
-}
-
-/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- * @duplicate_mode: Duplicate MAC address add mode. Please see
- * enum vxge_hw_vpath_mac_addr_add_mode{}
- *
- * Adds the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- switch (duplicate_mode) {
- case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
- i = 0;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
- i = 1;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
- i = 2;
- break;
- default:
- i = 0;
- break;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
- * @vp: Vpath handle.
- * @macaddr: First MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the first mac address entry for this vpath from MAC address table.
- * Return: the first mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
- * @vp: Vpath handle.
- * @macaddr: Next MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the next mac address entry for this vpath from MAC address table.
- * Return: the next mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- * to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- /* Enable promiscuous mode for function 0 only */
- if (!(vpath->hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
- return VXGE_HW_OK;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
- val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_BCAST_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
- * @vp: Vpath handle.
- *
- * Enable receiving broadcasts.
- */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
- * @vp: Vpath handle.
- *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
- * @vp: Vpath handle.
- *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vp,
- u32 skip_alarms)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
- * alrms
- * @vp: Virtual Path handle.
- * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
- * interrupts(Can be repeated). If fifo or ring are not enabled
- * the MSIX vector for that should be set to 0
- * @alarm_msix_id: MSIX vector for alarm.
- *
- * This API will associate a given MSIX vector numbers with the four TIM
- * interrupts and alarm interrupt.
- */
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
- int alarm_msix_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 vp_id = vp->vpath->vp_id;
-
- val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[0]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[1]);
-
- writeq(val64, &vp_reg->interrupt_cfg0);
-
- writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
- &vp_reg->interrupt_cfg2);
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
- 0, 32), &vp_reg->one_shot_vect0_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
- 0, 32), &vp_reg->one_shot_vect1_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
- 0, 32), &vp_reg->one_shot_vect2_en);
- }
-}
-
-/**
- * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
- else
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Mask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask0);
- }
-
- val64 = readl(&hldev->common_reg->tim_int_mask1);
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Unmask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask0);
- }
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
- * descriptors and process the same.
- * @ring: Handle to the ring object used for receive
- *
- * The function polls the Rx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- *
- * See also: vxge_hw_vpath_poll_rx()
- */
-enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
-{
- u8 t_code;
- enum vxge_hw_status status = VXGE_HW_OK;
- void *first_rxdh;
- int new_count = 0;
-
- ring->cmpl_cnt = 0;
-
- status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
- if (status == VXGE_HW_OK)
- ring->callback(ring, first_rxdh,
- t_code, ring->channel.userdata);
-
- if (ring->cmpl_cnt != 0) {
- ring->doorbell_cnt += ring->cmpl_cnt;
- if (ring->doorbell_cnt >= ring->rxds_limit) {
- /*
- * Each RxD is of 4 qwords, update the number of
- * qwords replenished
- */
- new_count = (ring->doorbell_cnt * 4);
-
- /* For each block add 4 more qwords */
- ring->total_db_cnt += ring->doorbell_cnt;
- if (ring->total_db_cnt >= ring->rxds_per_block) {
- new_count += 4;
- /* Reset total count */
- ring->total_db_cnt %= ring->rxds_per_block;
- }
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
- &ring->vp_reg->prc_rxd_doorbell);
- readl(&ring->common_reg->titan_general_int_status);
- ring->doorbell_cnt = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
- * @fifo: Handle to the fifo object used for non offload send
- * @skb_ptr: pointer to skb
- * @nr_skb: number of skbs
- * @more: more is coming
- *
- * The function polls the Tx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- */
-enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
- struct sk_buff ***skb_ptr, int nr_skb,
- int *more)
-{
- enum vxge_hw_fifo_tcode t_code;
- void *first_txdlh;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- status = vxge_hw_fifo_txdl_next_completed(fifo,
- &first_txdlh, &t_code);
- if (status == VXGE_HW_OK)
- if (fifo->callback(fifo, first_txdlh, t_code,
- channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
- status = VXGE_HW_COMPLETIONS_REMAIN;
-
- return status;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
deleted file mode 100644
index ba6f833bb059..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ /dev/null
@@ -1,2290 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_TRAFFIC_H
-#define VXGE_TRAFFIC_H
-
-#include "vxge-reg.h"
-#include "vxge-version.h"
-
-#define VXGE_HW_DTR_MAX_T_CODE 16
-#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_MAX_VIRTUAL_PATHS 17
-
-#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
-
-#define VXGE_HW_DEFAULT_32 0xffffffff
-/* frames sizes */
-#define VXGE_HW_HEADER_802_2_SIZE 3
-#define VXGE_HW_HEADER_SNAP_SIZE 5
-#define VXGE_HW_HEADER_VLAN_SIZE 4
-#define VXGE_HW_MAC_HEADER_MAX_SIZE \
- (ETH_HLEN + \
- VXGE_HW_HEADER_802_2_SIZE + \
- VXGE_HW_HEADER_VLAN_SIZE + \
- VXGE_HW_HEADER_SNAP_SIZE)
-
-/* 32bit alignments */
-#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
-#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
-#define VXGE_HW_HEADER_802_2_ALIGN 3
-#define VXGE_HW_HEADER_SNAP_ALIGN 1
-
-#define VXGE_HW_L3_CKSUM_OK 0xFFFF
-#define VXGE_HW_L4_CKSUM_OK 0xFFFF
-
-/* Forward declarations */
-struct __vxge_hw_device;
-struct __vxge_hw_vpath_handle;
-struct vxge_hw_vp_config;
-struct __vxge_hw_virtualpath;
-struct __vxge_hw_channel;
-struct __vxge_hw_fifo;
-struct __vxge_hw_ring;
-struct vxge_hw_ring_attr;
-struct vxge_hw_mempool;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/*VXGE_HW_STATUS_H*/
-
-#define VXGE_HW_EVENT_BASE 0
-#define VXGE_LL_EVENT_BASE 100
-
-/**
- * enum vxge_hw_event- Enumerates slow-path HW events.
- * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
- * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
- * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
- * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
- * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
- * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
- * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
- * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
- * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
- * slot-freeze from the rest critical events (e.g. ECC) when it is
- * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
- *
- * enum vxge_hw_event enumerates slow-path HW eventis.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_uld_link_down_f{}.
- */
-enum vxge_hw_event {
- VXGE_HW_EVENT_UNKNOWN = 0,
- /* HW events */
- VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
- VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
- VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
- VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
- VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
- VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
- VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
- VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
- VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
- VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
- VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
- VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
- VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
- VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
-};
-
-#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
-
-/*
- * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
- caller.
- */
-struct vxge_hw_mempool_dma {
- dma_addr_t addr;
- struct pci_dev *handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * vxge_hw_mempool_item_f - Mempool item alloc/free callback
- * @mempoolh: Memory pool handle.
- * @memblock: Address of memory block
- * @memblock_index: Index of memory block
- * @item: Item that gets allocated or freed.
- * @index: Item's index in the memory pool.
- * @is_last: True, if this item is the last one in the pool; false - otherwise.
- * userdata: Per-pool user context.
- *
- * Memory pool allocation/deallocation callback.
- */
-
-/*
- * struct vxge_hw_mempool - Memory pool.
- */
-struct vxge_hw_mempool {
-
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-
- void *userdata;
- void **memblocks_arr;
- void **memblocks_priv_arr;
- struct vxge_hw_mempool_dma *memblocks_dma_arr;
- struct __vxge_hw_device *devh;
- u32 memblock_size;
- u32 memblocks_max;
- u32 memblocks_allocated;
- u32 item_size;
- u32 items_max;
- u32 items_initial;
- u32 items_current;
- u32 items_per_memblock;
- void **items_arr;
- u32 items_priv_size;
-};
-
-#define VXGE_HW_MAX_INTR_PER_VP 4
-#define VXGE_HW_VPATH_INTR_TX 0
-#define VXGE_HW_VPATH_INTR_RX 1
-#define VXGE_HW_VPATH_INTR_EINTA 2
-#define VXGE_HW_VPATH_INTR_BMAP 3
-
-#define VXGE_HW_BLOCK_SIZE 4096
-
-/**
- * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
- * @intr_enable: Set to 1, if interrupt is enabled.
- * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
- * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
- * asserted, other interrupt-generating entities will cancel the
- * scheduled timer interrupt.
- * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
- * When asserted, an interrupt will be generated every time the
- * boundary timer expires, even if no traffic has been transmitted
- * on this interrupt.
- * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
- * (Re-) Interrupt Enable: When asserted, an interrupt will be
- * generated the next time the timer expires, even if no traffic has
- * been transmitted on this interrupt. (This will only happen once
- * each time that this value is written to the TIM.) This bit is
- * cleared by H/W at the end of the current-timer-interval when
- * the interrupt is triggered.
- * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
- * @util_sel: Utilization Selector. Selects which of the workload approximations
- * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
- * specified utilization etc.), selects one of
- * the 17 host configured values.
- * 0-Virtual Path 0
- * 1-Virtual Path 1
- * ...
- * 16-Virtual Path 17
- * 17-Legacy Tx network utilization, provided by TPA
- * 18-Legacy Rx network utilization, provided by FAU
- * 19-Average of legacy Rx and Tx utilization calculated from link
- * utilization values.
- * 20-31-Invalid configurations
- * 32-Host utilization for Virtual Path 0
- * 33-Host utilization for Virtual Path 1
- * ...
- * 48-Host utilization for Virtual Path 17
- * 49-Legacy Tx network utilization, provided by TPA
- * 50-Legacy Rx network utilization, provided by FAU
- * 51-Average of legacy Rx and Tx utilization calculated from
- * link utilization values.
- * 52-63-Invalid configurations
- * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
- * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
- * to 1 enables counting of TxD0 returns (signalled by PCC's),
- * towards utilization event count values.
- * @urange_a: Defines the upper limit (in percent) for this utilization range
- * to be active. This range is considered active
- * if 0 = UTIL = URNG_A
- * and the UEC_A field (below) is non-zero.
- * @uec_a: Utilization Event Count A. If this range is active, the adapter will
- * wait until UEC_A events have occurred on the interrupt before
- * generating an interrupt.
- * @urange_b: Link utilization range B.
- * @uec_b: Utilization Event Count B.
- * @urange_c: Link utilization range C.
- * @uec_c: Utilization Event Count C.
- * @urange_d: Link utilization range D.
- * @uec_d: Utilization Event Count D.
- * Traffic Interrupt Controller Module interrupt configuration.
- */
-struct vxge_hw_tim_intr_config {
-
- u32 intr_enable;
-#define VXGE_HW_TIM_INTR_ENABLE 1
-#define VXGE_HW_TIM_INTR_DISABLE 0
-#define VXGE_HW_TIM_INTR_DEFAULT 0
-
- u32 btimer_val;
-#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
-#define VXGE_HW_USE_FLASH_DEFAULT (~0)
-
- u32 timer_ac_en;
-#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
-#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
-
- u32 timer_ci_en;
-#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
-
- u32 timer_ri_en;
-#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
-
- u32 rtimer_val;
-#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
-
- u32 util_sel;
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
-#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
-
- u32 ltimer_val;
-#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
-
- /* Line utilization interrupts */
- u32 urange_a;
-#define VXGE_HW_MIN_TIM_URANGE_A 0
-#define VXGE_HW_MAX_TIM_URANGE_A 100
-
- u32 uec_a;
-#define VXGE_HW_MIN_TIM_UEC_A 0
-#define VXGE_HW_MAX_TIM_UEC_A 65535
-
- u32 urange_b;
-#define VXGE_HW_MIN_TIM_URANGE_B 0
-#define VXGE_HW_MAX_TIM_URANGE_B 100
-
- u32 uec_b;
-#define VXGE_HW_MIN_TIM_UEC_B 0
-#define VXGE_HW_MAX_TIM_UEC_B 65535
-
- u32 urange_c;
-#define VXGE_HW_MIN_TIM_URANGE_C 0
-#define VXGE_HW_MAX_TIM_URANGE_C 100
-
- u32 uec_c;
-#define VXGE_HW_MIN_TIM_UEC_C 0
-#define VXGE_HW_MAX_TIM_UEC_C 65535
-
- u32 uec_d;
-#define VXGE_HW_MIN_TIM_UEC_D 0
-#define VXGE_HW_MAX_TIM_UEC_D 65535
-};
-
-#define VXGE_HW_STATS_OP_READ 0
-#define VXGE_HW_STATS_OP_CLEAR_STAT 1
-#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
-
-#define VXGE_HW_STATS_LOC_AGGR 17
-#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
-
-#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
-#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
- vxge_bVALn(bits, 32, 32)
-
-/**
- * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
- *
- * @tx_frms: Count of data frames transmitted on this Aggregator on all
- * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * However, does include frames discarded by the Distribution
- * function.
- * @tx_data_octets: Count of data and padding octets of frames transmitted
- * on this Aggregator on all its Aggregation ports. Does not include
- * octets of LACPDUs or Marker PDUs. However, does include octets of
- * frames discarded by the Distribution function.
- * @tx_mcast_frms: Count of data frames transmitted (to a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
- * on all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
- * that are discarded by the Distribution function. This occurs when
- * conversation are allocated to different ports and have to be
- * flushed on old ports
- * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
- * experience transmission errors on its Aggregation ports.
- * @rx_frms: Count of data frames received on this Aggregator on all its
- * Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * Also, does not include frames discarded by the Collection
- * function.
- * @rx_data_octets: Count of data and padding octets of frames received on this
- * Aggregator on all its Aggregation ports. Does not include octets
- * of LACPDUs or Marker PDUs. Also, does not include
- * octets of frames
- * discarded by the Collection function.
- * @rx_mcast_frms: Count of data frames received (from a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_discarded_frms: Count of data frames received on this Aggregator that are
- * discarded by the Collection function because the Collection
- * function was disabled on the port which the frames are received.
- * @rx_errored_frms: Count of data frames received on this Aggregator that are
- * discarded by its Aggregation ports, or are discarded by the
- * Collection function of the Aggregator, or that are discarded by
- * the Aggregator due to detection of an illegal Slow Protocols PDU.
- * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
- * that are discarded by its Aggregation ports due to detection of
- * an unknown Slow Protocols PDU.
- *
- * Per aggregator XMAC RX statistics.
- */
-struct vxge_hw_xmac_aggr_stats {
-/*0x000*/ u64 tx_frms;
-/*0x008*/ u64 tx_data_octets;
-/*0x010*/ u64 tx_mcast_frms;
-/*0x018*/ u64 tx_bcast_frms;
-/*0x020*/ u64 tx_discarded_frms;
-/*0x028*/ u64 tx_errored_frms;
-/*0x030*/ u64 rx_frms;
-/*0x038*/ u64 rx_data_octets;
-/*0x040*/ u64 rx_mcast_frms;
-/*0x048*/ u64 rx_bcast_frms;
-/*0x050*/ u64 rx_discarded_frms;
-/*0x058*/ u64 rx_errored_frms;
-/*0x060*/ u64 rx_unknown_slow_proto_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
- *
- * @tx_ttl_frms: Count of successfully transmitted MAC frames
- * @tx_ttl_octets: Count of total octets of transmitted frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of transmitted frames, including framing characters,
- * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have
- * 8 bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
- * including the preamble octets.
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
- * due to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown
- * protocol, such as a new IPv6 extension header, or an unsupported
- * Routing Type. The packet still has a checksum calculated but it
- * may be incorrect.
- * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
- * Since, the only control frames supported by this device are
- * PAUSE frames, this register is a count of all transmitted MAC
- * control frames.
- * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
- * on this Aggregation port.
- * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
- * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
- * the network. Increments because of:
- * 1) An internal processing error
- * (such as an uncorrectable ECC error). 2) A frame parsing error
- * during IP checksum calculation.
- * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
- * Aggregation port.
- * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /T/ (i.e. the terminate character), thus the statistic
- * tracks the number of transmitted Terminate characters.
- * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /S/ (i.e. the start character),
- * thus the statistic tracks
- * the number of transmitted Start characters.
- * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns transmitted at
- * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
- * set to 1, then this stat increments when COLUMN2 is found within
- * 'n' clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
- * to 0, then it means to search anywhere for COLUMN2).
- * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /I/ (i.e. a column containing all idle characters),
- * thus the statistic tracks the number of transmitted Idle columns.
- * @tx_any_err_frms: Count of transmitted frames containing any error that
- * prevents them from being passed to the network. Increments if
- * there is an ECC while reading the frame out of the transmit
- * buffer. Also increments if the transmit protocol assist (TPA)
- * block determines that the frame should not be sent.
- * @tx_drop_frms: Count of frames that could not be sent for no other reason
- * than internal MAC processing. Increments once whenever the
- * transmit buffer is flushed (due to an ECC error on a memory
- * descriptor).
- * @rx_ttl_frms: Count of total received MAC frames, including frames received
- * with frame-too-long, FCS, or length errors. This stat can be
- * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
- * everything, even "frames" as small one byte of preamble.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of received frames, including framing characters,
- * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have 8
- * bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
- * even the preamble octets of "frames" as small one byte of preamble
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing
- * characters, of offloaded received frames that are passed
- * to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
- * the broadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to
- * the system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
- * is set to 1, then "more than 1518 octets" becomes "more than 1518
- * (1522 if VLAN-tagged) octets".
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS. In other
- * words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers. Note: If register
- * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
- * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
- * octets".
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 65 and 127
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 128 and 255
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 256 and 511
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding
- * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
- * Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
- * errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_pause_count: Count of number of pause quanta that the MAC has been in
- * the paused state. Recall, one pause quantum equates to 512
- * bit times.
- * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
- * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
- * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
- * this register is a count of all received MAC control frames.
- * Note: This stat may be configured to count all layer 2 errors
- * (i.e. length errors and FCS errors).
- * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
- * not include frames received with frame-too-long or
- * frame-too-short error.
- * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
- * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
- * for VLAN-tagged frames), inclusive, that does not match the
- * number of data octets (including pad) received. Also contains
- * a count of received frames with a length/type field less than
- * 46 (42 for VLAN-tagged frames) and the number of data octets
- * (including pad) received is greater than 46 (42 for VLAN-tagged
- * frames).
- * @rx_out_rng_len_err_frms: Count of received frames with length/type field
- * between 1501 and 1535 decimal, inclusive.
- * @rx_drop_frms: Count of received frames that could not be passed to the host.
- * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
- * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
- * for a list of reasons. Because the RMAC drops one frame at a time,
- * this stat also indicates the number of drop events.
- * @rx_discarded_frms: Count of received frames containing
- * any error that prevents
- * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
- * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
- * reasons.
- * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
- * port.
- * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
- * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain an unknown
- * PDU. Or frames that contain the Slow Protocols group MAC address,
- * but do not carry the Slow Protocols EtherType.
- * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
- * this Aggregation port.
- * @rx_fcs_discard: Count of received frames that are discarded because the
- * FCS check failed.
- * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain a badly
- * formed PDU. Or frames that carry the Slow Protocols EtherType,
- * but contain an illegal value of Protocol Subtype.
- * @rx_switch_discard: Count of received frames that are discarded by the
- * internal switch because they did not have an entry in the
- * Filtering Database. This includes frames that had an invalid
- * destination MAC address or VLAN ID. It also includes frames are
- * discarded because they did not satisfy the length requirements
- * of the target VPATH.
- * @rx_len_discard: Count of received frames that are discarded because of an
- * invalid frame length (includes fragments, oversized frames and
- * mismatch between frame length and length/type field). This stat
- * can be configured
- * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
- * @rx_rpa_discard: Count of received frames that were discarded because the
- * receive protocol assist (RPA) discovered and error in the frame
- * or was unable to parse the frame.
- * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
- * Link Aggregation Control Protocol (LACP) frames, etc.) that are
- * discarded.
- * @rx_rts_discard: Count of received frames that are discarded by the receive
- * traffic steering (RTS) logic. Includes those frame discarded
- * because the SSC response contradicted the switch table, because
- * the SSC timed out, or because the target queue could not fit the
- * frame.
- * @rx_trash_discard: Count of received frames that are discarded because
- * receive traffic steering (RTS) steered the frame to the trash
- * queue.
- * @rx_buff_full_discard: Count of received frames that are discarded because
- * internal buffers are full. Includes frames discarded because the
- * RTS logic is waiting for an SSC lookup that has no timeout bound.
- * Also, includes frames that are dropped because the MAC2FAU buffer
- * is nearly full -- this can happen if the external receive buffer
- * is full and the receive path is backing up.
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
- * characters occurring between times of normal data transmission
- * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
- * incremented when either -
- * 1) The Reconciliation Sublayer (RS) is expecting one control
- * character and gets another (i.e. is expecting a Start
- * character, but gets another control character).
- * 2) Start control character is not in lane 0
- * Only increments the count by one for each XGMII column.
- * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
- * during normal data transmission. If the Reconciliation Sublayer
- * (RS) receives a control character, other than a terminate control
- * character, during receipt of data octets then this register is
- * incremented. Also increments if the start frame delimiter is not
- * found in the correct location. Only increments the count by one
- * for each XGMII column.
- * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
- * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
- * Only includes symbol errors that are observed between the XGMII
- * Start Frame Delimiter and End Frame Delimiter, inclusive. And
- * only increments the count by one for each frame.
- * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time.
- * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_local_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a local fault.
- * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
- * to 1, then this stat increments when COLUMN2 is found within 'n'
- * clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
- * 0, then it means to search anywhere for COLUMN2).
- * @rx_jettison: Count of received frames that are jettisoned because internal
- * buffers are full.
- * @rx_remote_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a remote fault.
- *
- * XMAC Port Statistics.
- */
-struct vxge_hw_xmac_port_stats {
-/*0x000*/ u64 tx_ttl_frms;
-/*0x008*/ u64 tx_ttl_octets;
-/*0x010*/ u64 tx_data_octets;
-/*0x018*/ u64 tx_mcast_frms;
-/*0x020*/ u64 tx_bcast_frms;
-/*0x028*/ u64 tx_ucast_frms;
-/*0x030*/ u64 tx_tagged_frms;
-/*0x038*/ u64 tx_vld_ip;
-/*0x040*/ u64 tx_vld_ip_octets;
-/*0x048*/ u64 tx_icmp;
-/*0x050*/ u64 tx_tcp;
-/*0x058*/ u64 tx_rst_tcp;
-/*0x060*/ u64 tx_udp;
-/*0x068*/ u32 tx_parse_error;
-/*0x06c*/ u32 tx_unknown_protocol;
-/*0x070*/ u64 tx_pause_ctrl_frms;
-/*0x078*/ u32 tx_marker_pdu_frms;
-/*0x07c*/ u32 tx_lacpdu_frms;
-/*0x080*/ u32 tx_drop_ip;
-/*0x084*/ u32 tx_marker_resp_pdu_frms;
-/*0x088*/ u32 tx_xgmii_char2_match;
-/*0x08c*/ u32 tx_xgmii_char1_match;
-/*0x090*/ u32 tx_xgmii_column2_match;
-/*0x094*/ u32 tx_xgmii_column1_match;
-/*0x098*/ u32 unused1;
-/*0x09c*/ u16 tx_any_err_frms;
-/*0x09e*/ u16 tx_drop_frms;
-/*0x0a0*/ u64 rx_ttl_frms;
-/*0x0a8*/ u64 rx_vld_frms;
-/*0x0b0*/ u64 rx_offload_frms;
-/*0x0b8*/ u64 rx_ttl_octets;
-/*0x0c0*/ u64 rx_data_octets;
-/*0x0c8*/ u64 rx_offload_octets;
-/*0x0d0*/ u64 rx_vld_mcast_frms;
-/*0x0d8*/ u64 rx_vld_bcast_frms;
-/*0x0e0*/ u64 rx_accepted_ucast_frms;
-/*0x0e8*/ u64 rx_accepted_nucast_frms;
-/*0x0f0*/ u64 rx_tagged_frms;
-/*0x0f8*/ u64 rx_long_frms;
-/*0x100*/ u64 rx_usized_frms;
-/*0x108*/ u64 rx_osized_frms;
-/*0x110*/ u64 rx_frag_frms;
-/*0x118*/ u64 rx_jabber_frms;
-/*0x120*/ u64 rx_ttl_64_frms;
-/*0x128*/ u64 rx_ttl_65_127_frms;
-/*0x130*/ u64 rx_ttl_128_255_frms;
-/*0x138*/ u64 rx_ttl_256_511_frms;
-/*0x140*/ u64 rx_ttl_512_1023_frms;
-/*0x148*/ u64 rx_ttl_1024_1518_frms;
-/*0x150*/ u64 rx_ttl_1519_4095_frms;
-/*0x158*/ u64 rx_ttl_4096_8191_frms;
-/*0x160*/ u64 rx_ttl_8192_max_frms;
-/*0x168*/ u64 rx_ttl_gt_max_frms;
-/*0x170*/ u64 rx_ip;
-/*0x178*/ u64 rx_accepted_ip;
-/*0x180*/ u64 rx_ip_octets;
-/*0x188*/ u64 rx_err_ip;
-/*0x190*/ u64 rx_icmp;
-/*0x198*/ u64 rx_tcp;
-/*0x1a0*/ u64 rx_udp;
-/*0x1a8*/ u64 rx_err_tcp;
-/*0x1b0*/ u64 rx_pause_count;
-/*0x1b8*/ u64 rx_pause_ctrl_frms;
-/*0x1c0*/ u64 rx_unsup_ctrl_frms;
-/*0x1c8*/ u64 rx_fcs_err_frms;
-/*0x1d0*/ u64 rx_in_rng_len_err_frms;
-/*0x1d8*/ u64 rx_out_rng_len_err_frms;
-/*0x1e0*/ u64 rx_drop_frms;
-/*0x1e8*/ u64 rx_discarded_frms;
-/*0x1f0*/ u64 rx_drop_ip;
-/*0x1f8*/ u64 rx_drop_udp;
-/*0x200*/ u32 rx_marker_pdu_frms;
-/*0x204*/ u32 rx_lacpdu_frms;
-/*0x208*/ u32 rx_unknown_pdu_frms;
-/*0x20c*/ u32 rx_marker_resp_pdu_frms;
-/*0x210*/ u32 rx_fcs_discard;
-/*0x214*/ u32 rx_illegal_pdu_frms;
-/*0x218*/ u32 rx_switch_discard;
-/*0x21c*/ u32 rx_len_discard;
-/*0x220*/ u32 rx_rpa_discard;
-/*0x224*/ u32 rx_l2_mgmt_discard;
-/*0x228*/ u32 rx_rts_discard;
-/*0x22c*/ u32 rx_trash_discard;
-/*0x230*/ u32 rx_buff_full_discard;
-/*0x234*/ u32 rx_red_discard;
-/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
-/*0x23c*/ u32 rx_xgmii_data_err_cnt;
-/*0x240*/ u32 rx_xgmii_char1_match;
-/*0x244*/ u32 rx_xgmii_err_sym;
-/*0x248*/ u32 rx_xgmii_column1_match;
-/*0x24c*/ u32 rx_xgmii_char2_match;
-/*0x250*/ u32 rx_local_fault;
-/*0x254*/ u32 rx_xgmii_column2_match;
-/*0x258*/ u32 rx_jettison;
-/*0x25c*/ u32 rx_remote_fault;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
- *
- * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
- * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
- * not including framing characters (i.e. less framing bits).
- * To determine the total octets of transmitted frames, including
- * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
- * this stat (the device always prepends 8 bytes of preamble for
- * each frame)
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
- * to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
- * such as a new IPv6 extension header, or an unsupported Routing
- * Type. The packet still has a checksum calculated but it may be
- * incorrect.
- * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
- * to the network. Increments because of: 1) An internal processing
- * error (such as an uncorrectable ECC error). 2) A frame parsing
- * error during IP checksum calculation.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted TCP segments. Does not include segments containing
- * retransmitted octets.
- * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
- * total number of segments retransmitted. Retransmitted segments
- * that are sourced by the host are counted by the host.
- * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted IP datagrams that could not be passed to the
- * network.
- *
- * XMAC Vpath TX Statistics.
- */
-struct vxge_hw_xmac_vpath_tx_stats {
- u64 tx_ttl_eth_frms;
- u64 tx_ttl_eth_octets;
- u64 tx_data_octets;
- u64 tx_mcast_frms;
- u64 tx_bcast_frms;
- u64 tx_ucast_frms;
- u64 tx_tagged_frms;
- u64 tx_vld_ip;
- u64 tx_vld_ip_octets;
- u64 tx_icmp;
- u64 tx_tcp;
- u64 tx_rst_tcp;
- u64 tx_udp;
- u32 tx_unknown_protocol;
- u32 tx_lost_ip;
- u32 unused1;
- u32 tx_parse_error;
- u64 tx_tcp_offload;
- u64 tx_retx_tcp_offload;
- u64 tx_lost_ip_offload;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
- *
- * @rx_ttl_eth_frms: Count of successfully received MAC frames.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_eth_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). Only counts octets
- * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
- * before FCS. To determine the total octets of received frames,
- * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
- * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
- * that have the required 8 bytes of preamble).
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing characters,
- * of offloaded received frames that are passed to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
- * broadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to the
- * system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed.
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS.
- * In other words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers.
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames
- * with length (including
- * FCS, but not framing bits) of between 65 and 127 octets inclusive.
- * Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 128 and 255 octets
- * inclusive. Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 256 and 511 octets
- * inclusive. Includes frames received with frame-too-long, FCS, or
- * length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
- * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams.
- * Includes errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_lost_frms: Count of received frames that could not be passed to the host.
- * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
- * for a list of reasons.
- * @rx_lost_ip: Count of received IP datagrams that could not be passed to
- * the host. See RX_LOST_FRMS for a list of reasons.
- * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of received IP datagrams that could not be passed to the host.
- * See RX_LOST_FRMS for a list of reasons.
- * @rx_various_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_sleep_discard: Count of received frames that are discarded because the
- * target VPATH is asleep (a Wake-on-LAN magic packet can be used
- * to awaken the VPATH).
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_queue_full_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
- *
- * XMAC Vpath RX Statistics.
- */
-struct vxge_hw_xmac_vpath_rx_stats {
- u64 rx_ttl_eth_frms;
- u64 rx_vld_frms;
- u64 rx_offload_frms;
- u64 rx_ttl_eth_octets;
- u64 rx_data_octets;
- u64 rx_offload_octets;
- u64 rx_vld_mcast_frms;
- u64 rx_vld_bcast_frms;
- u64 rx_accepted_ucast_frms;
- u64 rx_accepted_nucast_frms;
- u64 rx_tagged_frms;
- u64 rx_long_frms;
- u64 rx_usized_frms;
- u64 rx_osized_frms;
- u64 rx_frag_frms;
- u64 rx_jabber_frms;
- u64 rx_ttl_64_frms;
- u64 rx_ttl_65_127_frms;
- u64 rx_ttl_128_255_frms;
- u64 rx_ttl_256_511_frms;
- u64 rx_ttl_512_1023_frms;
- u64 rx_ttl_1024_1518_frms;
- u64 rx_ttl_1519_4095_frms;
- u64 rx_ttl_4096_8191_frms;
- u64 rx_ttl_8192_max_frms;
- u64 rx_ttl_gt_max_frms;
- u64 rx_ip;
- u64 rx_accepted_ip;
- u64 rx_ip_octets;
- u64 rx_err_ip;
- u64 rx_icmp;
- u64 rx_tcp;
- u64 rx_udp;
- u64 rx_err_tcp;
- u64 rx_lost_frms;
- u64 rx_lost_ip;
- u64 rx_lost_ip_offload;
- u16 rx_various_discard;
- u16 rx_sleep_discard;
- u16 rx_red_discard;
- u16 rx_queue_full_discard;
- u64 rx_mpa_ok_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_stats - XMAC Statistics
- *
- * @aggr_stats: Statistics on aggregate port(port 0, port 1)
- * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
- * @vpath_tx_stats: Per vpath XMAC TX stats
- * @vpath_rx_stats: Per vpath XMAC RX stats
- *
- * XMAC Statistics.
- */
-struct vxge_hw_xmac_stats {
- struct vxge_hw_xmac_aggr_stats
- aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
- struct vxge_hw_xmac_port_stats
- port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
- struct vxge_hw_xmac_vpath_tx_stats
- vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_xmac_vpath_rx_stats
- vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
- * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
- * for the given VPATH
- * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
- * @ini_num_cpl_rcvd: The number of PCI read completions received by the
- * PIC block
- * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
- * block to the host
- * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
- * the PIC block
- * @wrcrdtarb_xoff: TBD
- * @rdcrdtarb_xoff: TBD
- * @vpath_genstats_count0: TBD
- * @vpath_genstats_count1: TBD
- * @vpath_genstats_count2: TBD
- * @vpath_genstats_count3: TBD
- * @vpath_genstats_count4: TBD
- * @vpath_gennstats_count5: TBD
- * @tx_stats: Transmit stats
- * @rx_stats: Receive stats
- * @prog_event_vnum1: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
- * @prog_event_vnum0: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
- * @prog_event_vnum3: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
- * @prog_event_vnum2: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
- * @rx_multi_cast_frame_discard: TBD
- * @rx_frm_transferred: TBD
- * @rxd_returned: TBD
- * @rx_mpa_len_fail_frms: Count of received frames
- * that fail the MPA length check
- * @rx_mpa_mrk_fail_frms: Count of received frames
- * that fail the MPA marker check
- * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
- * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
- * frame buffer (and subsequently to the host).
- * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
- * because the VPATH is in reset
- * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
- * whenever the received frame matches the VPATH's Wake-on-LAN
- * signature(s) CRC.
- * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
- * because the VPATH is in reset. Includes frames that are discarded
- * because the current VPIN does not match that VPIN of the frame
- *
- * Titan vpath hardware statistics.
- */
-struct vxge_hw_vpath_stats_hw_info {
-/*0x000*/ u32 ini_num_mwr_sent;
-/*0x004*/ u32 unused1;
-/*0x008*/ u32 ini_num_mrd_sent;
-/*0x00c*/ u32 unused2;
-/*0x010*/ u32 ini_num_cpl_rcvd;
-/*0x014*/ u32 unused3;
-/*0x018*/ u64 ini_num_mwr_byte_sent;
-/*0x020*/ u64 ini_num_cpl_byte_rcvd;
-/*0x028*/ u32 wrcrdtarb_xoff;
-/*0x02c*/ u32 unused4;
-/*0x030*/ u32 rdcrdtarb_xoff;
-/*0x034*/ u32 unused5;
-/*0x038*/ u32 vpath_genstats_count0;
-/*0x03c*/ u32 vpath_genstats_count1;
-/*0x040*/ u32 vpath_genstats_count2;
-/*0x044*/ u32 vpath_genstats_count3;
-/*0x048*/ u32 vpath_genstats_count4;
-/*0x04c*/ u32 unused6;
-/*0x050*/ u32 vpath_genstats_count5;
-/*0x054*/ u32 unused7;
-/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
-/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
-/*0x220*/ u64 unused9;
-/*0x228*/ u32 prog_event_vnum1;
-/*0x22c*/ u32 prog_event_vnum0;
-/*0x230*/ u32 prog_event_vnum3;
-/*0x234*/ u32 prog_event_vnum2;
-/*0x238*/ u16 rx_multi_cast_frame_discard;
-/*0x23a*/ u8 unused10[6];
-/*0x240*/ u32 rx_frm_transferred;
-/*0x244*/ u32 unused11;
-/*0x248*/ u16 rxd_returned;
-/*0x24a*/ u8 unused12[6];
-/*0x252*/ u16 rx_mpa_len_fail_frms;
-/*0x254*/ u16 rx_mpa_mrk_fail_frms;
-/*0x256*/ u16 rx_mpa_crc_fail_frms;
-/*0x258*/ u16 rx_permitted_frms;
-/*0x25c*/ u64 rx_vp_reset_discarded_frms;
-/*0x25e*/ u64 rx_wol_frms;
-/*0x260*/ u64 tx_vp_reset_discarded_frms;
-} __packed;
-
-
-/**
- * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
- * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
- * by the adapter that were discarded because the VPATH is out of service
- * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
- * adapter that were discared because the VPATH is out of service
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
- * the adapter that were discarded because the VPATH instance number does
- * not match
- * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
- * by the adapter that were discarded because the VPATH instance number
- * does not match
- * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
- * to the GENSTATS0_CFG for information on configuring this statistic
- * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
- * to the GENSTATS1_CFG for information on configuring this statistic
- * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
- * to the GENSTATS2_CFG for information on configuring this statistic
- * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
- * to the GENSTATS3_CFG for information on configuring this statistic
- * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
- * to the GENSTATS4_CFG for information on configuring this statistic
- * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
- * to the GENSTATS5_CFG for information on configuring this statistic
- * @pci.rstdrop_cpl 0x01c8 4
- * @pci.rstdrop_msg 0x01cc 4
- * @pci.rstdrop_client1 0x01d0 4
- * @pci.rstdrop_client0 0x01d4 4
- * @pci.rstdrop_client2 0x01d8 4
- * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
- * header credits were depleted
- * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
- * header credits were depleted
- * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
- * header credits were depleted
- * @pci.depl_cplh[vplane1] 0x01ea 2
- * @pci.depl_nph[vplane1] 0x01ec 2
- * @pci.depl_ph[vplane1] 0x01ee 2
- * @pci.depl_cplh[vplane2] 0x01f2 2
- * @pci.depl_nph[vplane2] 0x01f4 2
- * @pci.depl_ph[vplane2] 0x01f6 2
- * @pci.depl_cplh[vplane3] 0x01fa 2
- * @pci.depl_nph[vplane3] 0x01fc 2
- * @pci.depl_ph[vplane3] 0x01fe 2
- * @pci.depl_cplh[vplane4] 0x0202 2
- * @pci.depl_nph[vplane4] 0x0204 2
- * @pci.depl_ph[vplane4] 0x0206 2
- * @pci.depl_cplh[vplane5] 0x020a 2
- * @pci.depl_nph[vplane5] 0x020c 2
- * @pci.depl_ph[vplane5] 0x020e 2
- * @pci.depl_cplh[vplane6] 0x0212 2
- * @pci.depl_nph[vplane6] 0x0214 2
- * @pci.depl_ph[vplane6] 0x0216 2
- * @pci.depl_cplh[vplane7] 0x021a 2
- * @pci.depl_nph[vplane7] 0x021c 2
- * @pci.depl_ph[vplane7] 0x021e 2
- * @pci.depl_cplh[vplane8] 0x0222 2
- * @pci.depl_nph[vplane8] 0x0224 2
- * @pci.depl_ph[vplane8] 0x0226 2
- * @pci.depl_cplh[vplane9] 0x022a 2
- * @pci.depl_nph[vplane9] 0x022c 2
- * @pci.depl_ph[vplane9] 0x022e 2
- * @pci.depl_cplh[vplane10] 0x0232 2
- * @pci.depl_nph[vplane10] 0x0234 2
- * @pci.depl_ph[vplane10] 0x0236 2
- * @pci.depl_cplh[vplane11] 0x023a 2
- * @pci.depl_nph[vplane11] 0x023c 2
- * @pci.depl_ph[vplane11] 0x023e 2
- * @pci.depl_cplh[vplane12] 0x0242 2
- * @pci.depl_nph[vplane12] 0x0244 2
- * @pci.depl_ph[vplane12] 0x0246 2
- * @pci.depl_cplh[vplane13] 0x024a 2
- * @pci.depl_nph[vplane13] 0x024c 2
- * @pci.depl_ph[vplane13] 0x024e 2
- * @pci.depl_cplh[vplane14] 0x0252 2
- * @pci.depl_nph[vplane14] 0x0254 2
- * @pci.depl_ph[vplane14] 0x0256 2
- * @pci.depl_cplh[vplane15] 0x025a 2
- * @pci.depl_nph[vplane15] 0x025c 2
- * @pci.depl_ph[vplane15] 0x025e 2
- * @pci.depl_cplh[vplane16] 0x0262 2
- * @pci.depl_nph[vplane16] 0x0264 2
- * @pci.depl_ph[vplane16] 0x0266 2
- * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
- * credits were depleted
- * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
- * credits were depleted
- * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
- * credits were depleted
- * @pci.depl_cpld[vplane1] 0x0272 2
- * @pci.depl_npd[vplane1] 0x0274 2
- * @pci.depl_pd[vplane1] 0x0276 2
- * @pci.depl_cpld[vplane2] 0x027a 2
- * @pci.depl_npd[vplane2] 0x027c 2
- * @pci.depl_pd[vplane2] 0x027e 2
- * @pci.depl_cpld[vplane3] 0x0282 2
- * @pci.depl_npd[vplane3] 0x0284 2
- * @pci.depl_pd[vplane3] 0x0286 2
- * @pci.depl_cpld[vplane4] 0x028a 2
- * @pci.depl_npd[vplane4] 0x028c 2
- * @pci.depl_pd[vplane4] 0x028e 2
- * @pci.depl_cpld[vplane5] 0x0292 2
- * @pci.depl_npd[vplane5] 0x0294 2
- * @pci.depl_pd[vplane5] 0x0296 2
- * @pci.depl_cpld[vplane6] 0x029a 2
- * @pci.depl_npd[vplane6] 0x029c 2
- * @pci.depl_pd[vplane6] 0x029e 2
- * @pci.depl_cpld[vplane7] 0x02a2 2
- * @pci.depl_npd[vplane7] 0x02a4 2
- * @pci.depl_pd[vplane7] 0x02a6 2
- * @pci.depl_cpld[vplane8] 0x02aa 2
- * @pci.depl_npd[vplane8] 0x02ac 2
- * @pci.depl_pd[vplane8] 0x02ae 2
- * @pci.depl_cpld[vplane9] 0x02b2 2
- * @pci.depl_npd[vplane9] 0x02b4 2
- * @pci.depl_pd[vplane9] 0x02b6 2
- * @pci.depl_cpld[vplane10] 0x02ba 2
- * @pci.depl_npd[vplane10] 0x02bc 2
- * @pci.depl_pd[vplane10] 0x02be 2
- * @pci.depl_cpld[vplane11] 0x02c2 2
- * @pci.depl_npd[vplane11] 0x02c4 2
- * @pci.depl_pd[vplane11] 0x02c6 2
- * @pci.depl_cpld[vplane12] 0x02ca 2
- * @pci.depl_npd[vplane12] 0x02cc 2
- * @pci.depl_pd[vplane12] 0x02ce 2
- * @pci.depl_cpld[vplane13] 0x02d2 2
- * @pci.depl_npd[vplane13] 0x02d4 2
- * @pci.depl_pd[vplane13] 0x02d6 2
- * @pci.depl_cpld[vplane14] 0x02da 2
- * @pci.depl_npd[vplane14] 0x02dc 2
- * @pci.depl_pd[vplane14] 0x02de 2
- * @pci.depl_cpld[vplane15] 0x02e2 2
- * @pci.depl_npd[vplane15] 0x02e4 2
- * @pci.depl_pd[vplane15] 0x02e6 2
- * @pci.depl_cpld[vplane16] 0x02ea 2
- * @pci.depl_npd[vplane16] 0x02ec 2
- * @pci.depl_pd[vplane16] 0x02ee 2
- * @xgmac_port[3];
- * @xgmac_aggr[2];
- * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
- * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
- * @xgmac.orp_lro_events 0x0af8 8
- * @xgmac.orp_bs_events 0x0b00 8
- * @xgmac.orp_iwarp_events 0x0b08 8
- * @xgmac.tx_permitted_frms 0x0b14 4
- * @xgmac.port2_tx_any_frms 0x0b1d 1
- * @xgmac.port1_tx_any_frms 0x0b1e 1
- * @xgmac.port0_tx_any_frms 0x0b1f 1
- * @xgmac.port2_rx_any_frms 0x0b25 1
- * @xgmac.port1_rx_any_frms 0x0b26 1
- * @xgmac.port0_rx_any_frms 0x0b27 1
- *
- * Titan mrpcim hardware statistics.
- */
-struct vxge_hw_device_stats_mrpcim_info {
-/*0x0000*/ u32 pic_ini_rd_drop;
-/*0x0004*/ u32 pic_ini_wr_drop;
-/*0x0008*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
- /*0x0004*/ u32 unused1;
- } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
-/*0x0090*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
- /*0x0004*/ u32 unused2;
- } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
-/*0x0118*/ struct {
- /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
- /*0x0004*/ u32 unused3;
- } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
-/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
-/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
-/*0x01a8*/ u32 pic_genstats_count0;
-/*0x01ac*/ u32 pic_genstats_count1;
-/*0x01b0*/ u32 pic_genstats_count2;
-/*0x01b4*/ u32 pic_genstats_count3;
-/*0x01b8*/ u32 pic_genstats_count4;
-/*0x01bc*/ u32 unused4;
-/*0x01c0*/ u32 pic_genstats_count5;
-/*0x01c4*/ u32 unused5;
-/*0x01c8*/ u32 pci_rstdrop_cpl;
-/*0x01cc*/ u32 pci_rstdrop_msg;
-/*0x01d0*/ u32 pci_rstdrop_client1;
-/*0x01d4*/ u32 pci_rstdrop_client0;
-/*0x01d8*/ u32 pci_rstdrop_client2;
-/*0x01dc*/ u32 unused6;
-/*0x01e0*/ struct {
- /*0x0000*/ u16 unused7;
- /*0x0002*/ u16 pci_depl_cplh;
- /*0x0004*/ u16 pci_depl_nph;
- /*0x0006*/ u16 pci_depl_ph;
- } pci_depl_h_vplane[17];
-/*0x0268*/ struct {
- /*0x0000*/ u16 unused8;
- /*0x0002*/ u16 pci_depl_cpld;
- /*0x0004*/ u16 pci_depl_npd;
- /*0x0006*/ u16 pci_depl_pd;
- } pci_depl_d_vplane[17];
-/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
-/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
-/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
-/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
-/*0x0af0*/ u64 unused7;
-/*0x0af8*/ u64 unused8;
-/*0x0b00*/ u64 unused9;
-/*0x0b08*/ u64 unused10;
-/*0x0b10*/ u32 unused11;
-/*0x0b14*/ u32 xgmac_tx_permitted_frms;
-/*0x0b18*/ u32 unused12;
-/*0x0b1c*/ u8 unused13;
-/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
-/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
-/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
-/*0x0b20*/ u32 unused14;
-/*0x0b24*/ u8 unused15;
-/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
-/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
-/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
-} __packed;
-
-/**
- * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
- * @vpath_info: VPath statistics
- * @vpath_info_sav: Vpath statistics saved
- *
- * Titan hardware statistics.
- */
-struct vxge_hw_device_stats_hw_info {
- struct vxge_hw_vpath_stats_hw_info
- *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_vpath_stats_hw_info
- vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_common_info - HW common
- * statistics for queues.
- * @full_cnt: Number of times the queue was full
- * @usage_cnt: usage count.
- * @usage_max: Maximum usage
- * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
- * @total_compl_cnt: Total descriptor completion count.
- *
- * Hw queue counters
- * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_common_info {
- u32 full_cnt;
- u32 usage_cnt;
- u32 usage_max;
- u32 reserve_free_swaps_cnt;
- u32 total_compl_cnt;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
- * @common_stats: Common counters for all queues
- * @total_posts: Total number of postings on the queue.
- * @total_buffers: Total number of buffers posted.
- * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
- * (index) in this array reflects the transfer code type, for instance
- * 0xA - "loss of link".
- * Value txd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW fifo counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_fifo_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 total_posts;
- u32 total_buffers;
- u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
- * @common_stats: Common counters for all queues
- * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
- * (index) in this array reflects the transfer code type,
- * for instance
- * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
- * Value rxd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW ring counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_fifo_info{},
- */
-struct vxge_hw_vpath_stats_sw_ring_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
- * @unknown_alarms:
- * @network_sustained_fault:
- * @network_sustained_ok:
- * @kdfcctl_fifo0_overwrite:
- * @kdfcctl_fifo0_poison:
- * @kdfcctl_fifo0_dma_error:
- * @dblgen_fifo0_overflow:
- * @statsb_pif_chain_error:
- * @statsb_drop_timeout:
- * @target_illegal_access:
- * @ini_serr_det:
- * @prc_ring_bumps:
- * @prc_rxdcm_sc_err:
- * @prc_rxdcm_sc_abort:
- * @prc_quanta_size_err:
- *
- * HW vpath error statistics
- */
-struct vxge_hw_vpath_stats_sw_err {
- u32 unknown_alarms;
- u32 network_sustained_fault;
- u32 network_sustained_ok;
- u32 kdfcctl_fifo0_overwrite;
- u32 kdfcctl_fifo0_poison;
- u32 kdfcctl_fifo0_dma_error;
- u32 dblgen_fifo0_overflow;
- u32 statsb_pif_chain_error;
- u32 statsb_drop_timeout;
- u32 target_illegal_access;
- u32 ini_serr_det;
- u32 prc_ring_bumps;
- u32 prc_rxdcm_sc_err;
- u32 prc_rxdcm_sc_abort;
- u32 prc_quanta_size_err;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
- * @soft_reset_cnt: Number of times soft reset is done on this vpath.
- * @error_stats: error counters for the vpath
- * @ring_stats: counters for ring belonging to the vpath
- * @fifo_stats: counters for fifo belonging to the vpath
- *
- * HW vpath sw statistics
- * See also: struct vxge_hw_device_info{} }.
- */
-struct vxge_hw_vpath_stats_sw_info {
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_err error_stats;
- struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
- struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
-};
-
-/**
- * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
- *
- * @not_traffic_intr_cnt: Number of times the host was interrupted
- * without new completions.
- * "Non-traffic interrupt counter".
- * @traffic_intr_cnt: Number of traffic interrupts for the device.
- * @total_intr_cnt: Total number of traffic interrupts for the device.
- * @total_intr_cnt == @traffic_intr_cnt +
- * @not_traffic_intr_cnt
- * @soft_reset_cnt: Number of times soft reset is done on this device.
- * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
- * HW per-device statistics.
- */
-struct vxge_hw_device_stats_sw_info {
- u32 not_traffic_intr_cnt;
- u32 traffic_intr_cnt;
- u32 total_intr_cnt;
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_info
- vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_device_stats_sw_err - HW device error statistics.
- * @vpath_alarms: Number of vpath alarms
- *
- * HW Device error stats
- */
-struct vxge_hw_device_stats_sw_err {
- u32 vpath_alarms;
-};
-
-/**
- * struct vxge_hw_device_stats - Contains HW per-device statistics,
- * including hw.
- * @devh: HW device handle.
- * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
- * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
- * space.
- * @hw_info_dma_acch: One more DMA handle used subsequently to free the
- * DMA object. Note that this and the previous handle have
- * physical meaning for Solaris; on Windows and Linux the
- * corresponding value will be simply pointer to PCI device.
- *
- * @hw_dev_info_stats: Titan statistics maintained by the hardware.
- * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
- * of completions per interrupt.
- * @sw_dev_err_stats: HW's "soft" device error statistics.
- *
- * Structure-container of HW per-device statistics. Note that per-channel
- * statistics are kept in separate structures under HW's fifo and ring
- * channels.
- */
-struct vxge_hw_device_stats {
- /* handles */
- struct __vxge_hw_device *devh;
-
- /* HW device hardware statistics */
- struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
-
- /* HW device "soft" stats */
- struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
- struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
-
-};
-
-enum vxge_hw_status vxge_hw_device_hw_stats_enable(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_hw_info *hw_stats);
-
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_sw_info *sw_stats);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(
- struct __vxge_hw_device *devh,
- u32 operation,
- u32 location,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
- struct vxge_hw_xmac_stats *xmac_stats);
-
-/**
- * enum enum vxge_hw_mgmt_reg_type - Register types.
- *
- * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
- * @vxge_hw_mgmt_reg_type_toc: TOC Registers
- * @vxge_hw_mgmt_reg_type_common: Common Registers
- * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
- * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
- * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
- * @vxge_hw_mgmt_reg_type_vpath: vpath registers
- *
- * Register type enumaration
- */
-enum vxge_hw_mgmt_reg_type {
- vxge_hw_mgmt_reg_type_legacy = 0,
- vxge_hw_mgmt_reg_type_toc = 1,
- vxge_hw_mgmt_reg_type_common = 2,
- vxge_hw_mgmt_reg_type_mrpcim = 3,
- vxge_hw_mgmt_reg_type_srpcim = 4,
- vxge_hw_mgmt_reg_type_vpmgmt = 5,
- vxge_hw_mgmt_reg_type_vpath = 6
-};
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 *value);
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 value);
-
-/**
- * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
- * @VXGE_HW_RXD_STATE_NONE: Invalid state.
- * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_rxd_state {
- VXGE_HW_RXD_STATE_NONE = 0,
- VXGE_HW_RXD_STATE_AVAIL = 1,
- VXGE_HW_RXD_STATE_POSTED = 2,
- VXGE_HW_RXD_STATE_FREED = 3
-};
-
-/**
- * struct vxge_hw_ring_rxd_info - Extended information associated with a
- * completed ring descriptor.
- * @syn_flag: SYN flag
- * @is_icmp: Is ICMP
- * @fast_path_eligible: Fast Path Eligible flag
- * @l3_cksum: in L3 checksum is valid
- * @l3_cksum: Result of IP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L3_CKSUM_OK would mean that
- * the checksum is correct, otherwise - the datagram is
- * corrupted.
- * @l4_cksum: in L4 checksum is valid
- * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L4_CKSUM_OK would mean that
- * the checksum is correct. Otherwise - the packet is
- * corrupted.
- * @frame: Zero or more of enum vxge_hw_frame_type flags.
- * See enum vxge_hw_frame_type{}.
- * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
- * various higher-layer protocols, including (but note restricted to)
- * TCP and UDP. See enum vxge_hw_frame_proto{}.
- * @is_vlan: If vlan tag is valid
- * @vlan: VLAN tag extracted from the received frame.
- * @rth_bucket: RTH bucket
- * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Indirection table.
- * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Socket Pair Direct Match table.
- * @rth_hash_type: RTH hash code of the function used to calculate the hash.
- * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
- * hardware if RTH is enabled.
- */
-struct vxge_hw_ring_rxd_info {
- u32 syn_flag;
- u32 is_icmp;
- u32 fast_path_eligible;
- u32 l3_cksum_valid;
- u32 l3_cksum;
- u32 l4_cksum_valid;
- u32 l4_cksum;
- u32 frame;
- u32 proto;
- u32 is_vlan;
- u32 vlan;
- u32 rth_bucket;
- u32 rth_it_hit;
- u32 rth_spdm_hit;
- u32 rth_hash_type;
- u32 rth_value;
-};
-/**
- * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
- * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
- * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
- * presentation configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
- * such as unknown IPv6 header.
- * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
- * error, such as FCS or ECC).
- * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
- * s) were not appropriately sized and data loss occurred.
- * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
- * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
- * Segment1 exceeded the capacity of Buffer1 and the remainder
- * was placed in Buffer2. Segment2 now starts in Buffer3.
- * No data loss or errors occurred.
- * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
- * assigned buffers has a size of 0 bytes.
- * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
- * VPath Reset or because of a VPIN mismatch.
- * @VXGE_HW_RING_T_CODE_UNUSED: Unused
- * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
- * transfer code condition occurred.
- *
- * Transfer codes returned by adapter.
- */
-enum vxge_hw_ring_tcode {
- VXGE_HW_RING_T_CODE_OK = 0x0,
- VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
- VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
- VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
- VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
- VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
- VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
- VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
- VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
- VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
- VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
- VXGE_HW_RING_T_CODE_UNUSED = 0xE,
- VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
-};
-
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh);
-
-void
-vxge_hw_ring_rxd_pre_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post_wmb(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void vxge_hw_ring_rxd_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh,
- u8 *t_code);
-
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u8 t_code);
-
-void vxge_hw_ring_rxd_free(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-/**
- * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
- * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
- * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
- * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
- * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
- * @VXGE_HW_FRAME_PROTO_TCP: TCP.
- * @VXGE_HW_FRAME_PROTO_UDP: UDP.
- * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
- *
- * Higher layer ethernet protocols and options.
- */
-enum vxge_hw_frame_proto {
- VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
- VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
- VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
- VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
- VXGE_HW_FRAME_PROTO_TCP = 0x02,
- VXGE_HW_FRAME_PROTO_UDP = 0x01,
- VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
- VXGE_HW_FRAME_PROTO_UDP)
-};
-
-/**
- * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
- *
- * These gather codes are used to indicate the position of a TxD in a TxD list
- */
-enum vxge_hw_fifo_gather_code {
- VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
- VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
- VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
- VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
-};
-
-/**
- * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
- * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
- * frame data) returned with corrupt data.
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
- * with no data.
- * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
- * frame or LSO MSS that was too long (>9800B).
- * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
- * Offload operation, due to improper header template,
- * unsupported protocol, etc.
- * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
- * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
- * data buffer transfer errors are encountered (see below).
- * Otherwise it is set to 0.
- *
- * These tcodes are returned in various API for TxD status
- */
-enum vxge_hw_fifo_tcode {
- VXGE_HW_FIFO_T_CODE_OK = 0x0,
- VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
- VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
- VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
- VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
- VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
- VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
-};
-
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- void **txdl_priv);
-
-void vxge_hw_fifo_txdl_buffer_set(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- u32 frag_idx,
- dma_addr_t dma_pointer,
- u32 size);
-
-void vxge_hw_fifo_txdl_post(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh);
-
-u32 vxge_hw_fifo_free_txdl_count_get(
- struct __vxge_hw_fifo *fifo_handle);
-
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- enum vxge_hw_fifo_tcode *t_code);
-
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code);
-
-void vxge_hw_fifo_txdl_free(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh);
-
-/*
- * Device
- */
-
-#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
-#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-
-/*
- * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
- * bytes. Each memblock is contiguous DMA-able memory. Each
- * memblock contains 1 or more 4KB RxD blocks visible to the
- * Titan hardware.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- */
-struct __vxge_hw_ring_rxd_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
-#ifdef VXGE_DEBUG_ASSERT
- struct vxge_hw_mempool_dma *dma_object;
-#endif
-};
-
-struct vxge_hw_mempool_cbs {
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-};
-
-#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
- ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 *data1,
- u64 *data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 data1,
- u64 data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_enable(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-void vxge_hw_device_intr_enable(
- struct __vxge_hw_device *devh);
-
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
-
-void vxge_hw_device_intr_disable(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_mask_all(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_unmask_all(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_begin_irq(
- struct __vxge_hw_device *devh,
- u32 skip_alarms,
- u64 *reason);
-
-void vxge_hw_device_clear_tx_rx(
- struct __vxge_hw_device *devh);
-
-/*
- * Virtual Paths
- */
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
-
-u32 vxge_hw_vpath_id(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_vpath_mac_addr_add_mode {
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
- VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
- VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
-};
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_poll_rx(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_poll_tx(
- struct __vxge_hw_fifo *fifoh,
- struct sk_buff ***skb_ptr, int nr_skb, int *more);
-
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 skip_alarms);
-
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
- int *tim_msix_id, int alarm_msix_id);
-
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
-
-void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
-
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-enum vxge_hw_status vxge_hw_vpath_intr_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_mask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_unmask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
- void **dtrh);
-
-void
-vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
-
-void
-vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
-
-int
-vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
deleted file mode 100644
index b9efa28bab3e..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-version.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_VERSION_H
-#define VXGE_VERSION_H
-
-#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "3"
-#define VXGE_VERSION_BUILD "22640"
-#define VXGE_VERSION_FOR "k"
-
-#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
-
-#define VXGE_DEAD_FW_VER_MAJOR 1
-#define VXGE_DEAD_FW_VER_MINOR 4
-#define VXGE_DEAD_FW_VER_BUILD 4
-
-#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
- VXGE_DEAD_FW_VER_MINOR, \
- VXGE_DEAD_FW_VER_BUILD)
-
-#define VXGE_EPROM_FW_VER_MAJOR 1
-#define VXGE_EPROM_FW_VER_MINOR 6
-#define VXGE_EPROM_FW_VER_BUILD 1
-
-#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
- VXGE_EPROM_FW_VER_MINOR, \
- VXGE_EPROM_FW_VER_BUILD)
-
-#define VXGE_CERT_FW_VER_MAJOR 1
-#define VXGE_CERT_FW_VER_MINOR 8
-#define VXGE_CERT_FW_VER_BUILD 1
-
-#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
- VXGE_CERT_FW_VER_MINOR, \
- VXGE_CERT_FW_VER_BUILD)
-
-#endif
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 9cff3d48acbc..9c0861d03634 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -5,6 +5,7 @@ nfp-objs := \
nfpcore/nfp6000_pcie.o \
nfpcore/nfp_cppcore.o \
nfpcore/nfp_cpplib.o \
+ nfpcore/nfp_dev.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
nfpcore/nfp_mutex.o \
@@ -19,18 +20,25 @@ nfp-objs := \
ccm_mbox.o \
devlink_param.o \
nfp_asm.o \
+ nfd3/dp.o \
+ nfd3/rings.o \
+ nfd3/xsk.o \
+ nfdk/dp.o \
+ nfdk/rings.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_dp.o \
nfp_net_ctrl.o \
nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
nfp_net_sriov.o \
+ nfp_net_xsk.o \
nfp_netvf_main.o \
nfp_port.o \
nfp_shared_buf.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e31f8fbbc696..df2ab5cbd49b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
/* If the chain is ended by an load/store pair then this
- * could serve as the new head of the the next chain.
+ * could serve as the new head of the next chain.
*/
if (curr_pair_is_memcpy(meta1, meta2)) {
head_ld_meta = meta1;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 84d66d138c3d..f80f1a6953fa 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
req_sz = sizeof(struct nfp_crypto_req_add_v6);
ipv6 = true;
@@ -474,6 +474,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
+ struct net *net = dev_net(netdev);
struct ipv6hdr *ipv6h;
struct tcphdr *th;
struct iphdr *iph;
@@ -494,13 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index a3242b36e216..2b383d92d7f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -149,7 +149,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
}
/* Pre_lag action must be first on action list.
- * If other actions already exist they need pushed forward.
+ * If other actions already exist they need to be pushed forward.
*/
if (act_len)
memmove(nfp_flow->action_data + act_size,
@@ -220,7 +220,8 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
}
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES) &&
+ !(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
return -EOPNOTSUPP;
}
@@ -426,6 +427,12 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
+ if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: loaded firmware does not support tunnel flag offload");
+ return -EOPNOTSUPP;
+ }
+
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -435,7 +442,8 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- set_tun->tun_id = ip_tun->key.tun_id;
+ if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
@@ -473,17 +481,11 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
ip_rt_put(rt);
} else {
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
}
}
set_tun->tos = ip_tun->key.tos;
-
- if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
- return -EOPNOTSUPP;
- }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -673,9 +675,9 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask->hop_limit;
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
- if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
+ if (mask & ~IPV6_FLOWINFO_MASK ||
+ exact & ~IPV6_FLOWINFO_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow info action");
return -EOPNOTSUPP;
}
@@ -922,6 +924,51 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
}
+static struct nfp_fl_meter *nfp_fl_meter(char *act_data)
+{
+ size_t act_size = sizeof(struct nfp_fl_meter);
+ struct nfp_fl_meter *meter_act;
+
+ meter_act = (struct nfp_fl_meter *)act_data;
+
+ memset(meter_act, 0, act_size);
+
+ meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER;
+ meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return meter_act;
+}
+
+static int
+nfp_flower_meter_action(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_fl_meter *fl_meter;
+ u32 meter_id;
+
+ if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload:meter action size beyond the allowed maximum");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = action->hw_index;
+ if (!nfp_flower_search_meter_entry(app, meter_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can not offload flow table with unsupported police action.");
+ return -EOPNOTSUPP;
+ }
+
+ fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]);
+ *a_len += sizeof(struct nfp_fl_meter);
+ fl_meter->meter_id = cpu_to_be32(meter_id);
+
+ return 0;
+}
+
static int
nfp_flower_output_action(struct nfp_app *app,
const struct flow_action_entry *act,
@@ -985,6 +1032,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
+ struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
@@ -1149,6 +1197,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*pkt_host = true;
break;
+ case FLOW_ACTION_POLICE:
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported police action in action list");
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
+ extack);
+ if (err)
+ return err;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 784292b16290..2df2af1da716 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -85,6 +85,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_METER 24
#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
@@ -95,8 +96,6 @@
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
-#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
-
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
@@ -260,6 +259,12 @@ struct nfp_fl_set_mpls {
__be32 lse;
};
+struct nfp_fl_meter {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 meter_id;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -723,6 +728,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index bfd7d1c35076..f693119541d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2021 Corigine, Inc. */
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_ct.h>
+
#include "conntrack.h"
#include "../nfp_port.h"
@@ -56,9 +59,17 @@ bool is_pre_ct_flow(struct flow_cls_offload *flow)
int i;
flow_action_for_each(i, act, &flow->rule->action) {
- if (act->id == FLOW_ACTION_CT && !act->ct.action)
- return true;
+ if (act->id == FLOW_ACTION_CT) {
+ /* The pre_ct rule only have the ct or ct nat action, cannot
+ * contains other ct action e.g ct commit and so on.
+ */
+ if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
+ return true;
+ else
+ return false;
+ }
}
+
return false;
}
@@ -66,24 +77,173 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action_entry *act;
+ bool exist_ct_clear = false;
struct flow_match_ct ct;
+ int i;
+
+ /* post ct entry cannot contains any ct action except ct_clear. */
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT) {
+ /* ignore ct clear action. */
+ if (act->ct.action == TCA_CT_ACT_CLEAR) {
+ exist_ct_clear = true;
+ continue;
+ }
+
+ return false;
+ }
+ }
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
+ } else {
+ /* when do nat with ct, the post ct entry ignore the ct status,
+ * will match the nat field(sip/dip) instead. In this situation,
+ * the flow chain index is not zero and contains ct clear action.
+ */
+ if (flow->common.chain_index && exist_ct_clear)
+ return true;
}
+
return false;
}
+/**
+ * get_mangled_key() - Mangle the key if mangle act exists
+ * @rule: rule that carries the actions
+ * @buf: pointer to key to be mangled
+ * @offset: used to adjust mangled offset in L2/L3/L4 header
+ * @key_sz: key size
+ * @htype: mangling type
+ *
+ * Returns buf where the mangled key stores.
+ */
+static void *get_mangled_key(struct flow_rule *rule, void *buf,
+ u32 offset, size_t key_sz,
+ enum flow_action_mangle_base htype)
+{
+ struct flow_action_entry *act;
+ u32 *val = (u32 *)buf;
+ u32 off, msk, key;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_MANGLE &&
+ act->mangle.htype == htype) {
+ off = act->mangle.offset - offset;
+ msk = act->mangle.mask;
+ key = act->mangle.val;
+
+ /* Mangling is supposed to be u32 aligned */
+ if (off % 4 || off >= key_sz)
+ continue;
+
+ val[off >> 2] &= msk;
+ val[off >> 2] |= key;
+ }
+ }
+
+ return buf;
+}
+
+/* Only tos and ttl are involved in flow_match_ip structure, which
+ * doesn't conform to the layout of ip/ipv6 header definition. So
+ * they need particular process here: fill them into the ip/ipv6
+ * header, so that mangling actions can work directly.
+ */
+#define NFP_IPV4_TOS_MASK GENMASK(23, 16)
+#define NFP_IPV4_TTL_MASK GENMASK(31, 24)
+#define NFP_IPV6_TCLASS_MASK GENMASK(27, 20)
+#define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0)
+static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
+ bool is_v6)
+{
+ struct flow_match_ip match;
+ /* IPv4's ttl field is in third dword. */
+ __be32 ip_hdr[3];
+ u32 tmp, hdr_len;
+
+ flow_rule_match_ip(rule, &match);
+
+ if (is_v6) {
+ tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
+ ip_hdr[0] = cpu_to_be32(tmp);
+ tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
+ ip_hdr[1] = cpu_to_be32(tmp);
+ hdr_len = 2 * sizeof(__be32);
+ } else {
+ tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
+ ip_hdr[0] = cpu_to_be32(tmp);
+ tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
+ ip_hdr[2] = cpu_to_be32(tmp);
+ hdr_len = 3 * sizeof(__be32);
+ }
+
+ get_mangled_key(rule, ip_hdr, 0, hdr_len,
+ is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4);
+
+ match.key = buf;
+
+ if (is_v6) {
+ tmp = be32_to_cpu(ip_hdr[0]);
+ match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
+ tmp = be32_to_cpu(ip_hdr[1]);
+ match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
+ } else {
+ tmp = be32_to_cpu(ip_hdr[0]);
+ match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
+ tmp = be32_to_cpu(ip_hdr[2]);
+ match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
+ }
+
+ return buf;
+}
+
+/* Note entry1 and entry2 are not swappable. only skip ip and
+ * tport merge check for pre_ct and post_ct when pre_ct do nat.
+ */
+static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
+ if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
+ entry2->type == CT_TYPE_POST_CT)
+ return false;
+
+ return true;
+}
+
+/* Note entry1 and entry2 are not swappable, entry1 should be
+ * the former flow whose mangle action need be taken into account
+ * if existed, and entry2 should be the latter flow whose action
+ * we don't care.
+ */
static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
struct nfp_fl_ct_flow_entry *entry2)
{
unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
entry2->rule->match.dissector->used_keys;
- bool out;
+ bool out, is_v6 = false;
+ u8 ip_proto = 0;
+ /* Temporary buffer for mangling keys, 64 is enough to cover max
+ * struct size of key in various fields that may be mangled.
+ * Supported fields to mangle:
+ * mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
+ * nw_tos/nw_ttl(struct flow_match_ip, 2B)
+ * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
+ * tp_src/tp_dst(struct flow_match_ports, 4B)
+ */
+ char buf[64];
+
+ if (entry1->netdev && entry2->netdev &&
+ entry1->netdev != entry2->netdev)
+ return -EINVAL;
- /* check the overlapped fields one by one, the unmasked part
+ /* Check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -101,36 +261,84 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_basic(entry1->rule, &match1);
flow_rule_match_basic(entry2->rule, &match2);
+
+ /* n_proto field is a must in ct-related flows,
+ * it should be either ipv4 or ipv6.
+ */
+ is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
+ /* ip_proto field is a must when port field is cared */
+ ip_proto = match1.key->ip_proto;
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv4_addrs match1, match2;
flow_rule_match_ipv4_addrs(entry1->rule, &match1);
flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf,
+ offsetof(struct iphdr, saddr),
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv6_addrs match1, match2;
flow_rule_match_ipv6_addrs(entry1->rule, &match1);
flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf,
+ offsetof(struct ipv6hdr, saddr),
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ /* if pre ct entry do nat, the nat tport exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this tport merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
+ enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
struct flow_match_ports match1, match2;
flow_rule_match_ports(entry1->rule, &match1);
flow_rule_match_ports(entry2->rule, &match2);
+
+ if (ip_proto == IPPROTO_UDP)
+ htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
+ else if (ip_proto == IPPROTO_TCP)
+ htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf, 0,
+ sizeof(*match1.key), htype);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -141,6 +349,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_eth_addrs(entry1->rule, &match1);
flow_rule_match_eth_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf, 0,
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -181,6 +395,8 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_ip(entry1->rule, &match1);
flow_rule_match_ip(entry2->rule, &match2);
+
+ match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -252,77 +468,46 @@ check_failed:
return -EINVAL;
}
-static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
- struct flow_rule *rule)
+static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
{
- enum flow_action_mangle_base htype = a_in->mangle.htype;
- u32 offset = a_in->mangle.offset;
+ struct flow_match_vlan match;
+
+ if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
+ return -EOPNOTSUPP;
+
+ /* post_ct does not match VLAN KEY, can be merged. */
+ if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
+ return 0;
+
+ switch (a_in->id) {
+ /* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
+ case FLOW_ACTION_VLAN_POP:
+ return -EOPNOTSUPP;
- switch (htype) {
- case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
+ flow_rule_match_vlan(rule, &match);
+ /* different vlan id, cannot be merged. */
+ if ((match.key->vlan_id & match.mask->vlan_id) ^
+ (a_in->vlan.vid & match.mask->vlan_id))
return -EOPNOTSUPP;
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_ip(rule, &match);
- if (offset == offsetof(struct iphdr, ttl) &&
- match.mask->ttl)
- return -EOPNOTSUPP;
- if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
- match.mask->tos)
- return -EOPNOTSUPP;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_match_ipv4_addrs match;
-
- flow_rule_match_ipv4_addrs(rule, &match);
- if (offset == offsetof(struct iphdr, saddr) &&
- match.mask->src)
- return -EOPNOTSUPP;
- if (offset == offsetof(struct iphdr, daddr) &&
- match.mask->dst)
- return -EOPNOTSUPP;
- }
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_ip(rule, &match);
- if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
- match.mask->ttl)
- return -EOPNOTSUPP;
- /* for ipv6, tos and flow_lbl are in the same word */
- if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
- match.mask->tos)
- return -EOPNOTSUPP;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_match_ipv6_addrs match;
-
- flow_rule_match_ipv6_addrs(rule, &match);
- if (offset >= offsetof(struct ipv6hdr, saddr) &&
- offset < offsetof(struct ipv6hdr, daddr) &&
- memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
- return -EOPNOTSUPP;
- if (offset >= offsetof(struct ipv6hdr, daddr) &&
- offset < sizeof(struct ipv6hdr) &&
- memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
- return -EOPNOTSUPP;
- }
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
- case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- /* currently only can modify ports */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+
+ /* different tpid, cannot be merged. */
+ if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
+ (a_in->vlan.proto & match.mask->vlan_tpid))
return -EOPNOTSUPP;
+
+ /* different priority, cannot be merged. */
+ if ((match.key->vlan_priority & match.mask->vlan_priority) ^
+ (a_in->vlan.prio & match.mask->vlan_priority))
+ return -EOPNOTSUPP;
+
break;
default:
- break;
+ return -EOPNOTSUPP;
}
+
return 0;
}
@@ -331,22 +516,18 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
struct nfp_fl_ct_flow_entry *nft_entry)
{
struct flow_action_entry *act;
- int err, i;
+ int i, err;
/* Check for pre_ct->action conflicts */
flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
switch (act->id) {
- case FLOW_ACTION_MANGLE:
- err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
- if (err)
- return err;
- err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
- if (err)
- return err;
- break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
+ err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
case FLOW_ACTION_MPLS_PUSH:
case FLOW_ACTION_MPLS_POP:
case FLOW_ACTION_MPLS_MANGLE:
@@ -359,11 +540,6 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
/* Check for nft->action conflicts */
flow_action_for_each(i, act, &nft_entry->rule->action) {
switch (act->id) {
- case FLOW_ACTION_MANGLE:
- err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
- if (err)
- return err;
- break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
@@ -403,6 +579,12 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
return -EINVAL;
return 0;
+ } else {
+ /* post_ct with ct clear action will not match the
+ * ct status when nft is nat entry.
+ */
+ if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
+ return 0;
}
return -EINVAL;
@@ -442,6 +624,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv6);
}
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
map[FLOW_PAY_GRE] = key_size;
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
@@ -450,11 +637,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
}
- if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
- map[FLOW_PAY_QINQ] = key_size;
- key_size += sizeof(struct nfp_flower_vlan);
- }
-
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
map[FLOW_PAY_UDP_TUN] = key_size;
@@ -472,11 +654,37 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
return key_size;
}
+/* get the csum flag according the ip proto and mangle action. */
+static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
+{
+ if (a_in->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (a_in->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+ if (ip_proto == IPPROTO_TCP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ else if (ip_proto == IPPROTO_UDP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ default:
+ break;
+ }
+}
+
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
struct nfp_flower_priv *priv,
struct net_device *netdev,
struct nfp_fl_payload *flow_pay)
{
+ enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
struct flow_action_entry *a_in;
int i, j, num_actions, id;
struct flow_rule *a_rule;
@@ -486,19 +694,29 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
rules[CT_TYPE_NFT]->action.num_entries +
rules[CT_TYPE_POST_CT]->action.num_entries;
- a_rule = flow_rule_alloc(num_actions);
+ /* Add one action to make sure there is enough room to add an checksum action
+ * when do nat.
+ */
+ a_rule = flow_rule_alloc(num_actions + 1);
if (!a_rule)
return -ENOMEM;
/* Actions need a BASIC dissector. */
a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+ /* post_ct entry have one action at least. */
+ if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
+ tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
+ }
/* Copy actions */
for (j = 0; j < _CT_TYPE_MAX; j++) {
+ u32 csum_updated = 0;
+ u8 ip_proto = 0;
+
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
- /* ip_proto is the only field that needed in later compile_action,
+ /* ip_proto is the only field that is needed in later compile_action,
* needed to set the correct checksum flags. It doesn't really matter
* which input rule's ip_proto field we take as the earlier merge checks
* would have made sure that they don't conflict. We do not know which
@@ -506,8 +724,10 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
* through the subflows and assign the proper subflow to a_rule
*/
flow_rule_match_basic(rules[j], &match);
- if (match.mask->ip_proto)
+ if (match.mask->ip_proto) {
a_rule->match = rules[j]->match;
+ ip_proto = match.key->ip_proto;
+ }
}
for (i = 0; i < rules[j]->action.num_entries; i++) {
@@ -524,11 +744,32 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
case FLOW_ACTION_CT_METADATA:
continue;
default:
+ /* nft entry is generated by tc ct, which mangle action do not care
+ * the stats, inherit the post entry stats to meet the
+ * flow_action_hw_stats_check.
+ */
+ if (j == CT_TYPE_NFT) {
+ if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+ a_in->hw_stats = tmp_stats;
+ nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
+ }
memcpy(&a_rule->action.entries[offset++],
a_in, sizeof(struct flow_action_entry));
break;
}
}
+ /* nft entry have mangle action, but do not have checksum action when do NAT,
+ * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
+ * to meet csum action check.
+ */
+ if (csum_updated) {
+ struct flow_action_entry *csum_action;
+
+ csum_action = &a_rule->action.entries[offset++];
+ csum_action->id = FLOW_ACTION_CSUM;
+ csum_action->csum_flags = csum_updated;
+ csum_action->hw_stats = tmp_stats;
+ }
}
/* Some actions would have been ignored, so update the num_entries field */
@@ -693,6 +934,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
offset = key_map[FLOW_PAY_GRE];
key = kdata + offset;
@@ -733,17 +985,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
- offset = key_map[FLOW_PAY_QINQ];
- key = kdata + offset;
- msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
- (struct nfp_flower_vlan *)msk,
- rules[i]);
- }
- }
-
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
offset = key_map[FLOW_PAY_UDP_TUN];
@@ -914,13 +1155,13 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
/* Check that the two tc flows are also compatible with
* the nft entry. No need to check the pre_ct and post_ct
* entries as that was already done during pre_merge.
- * The nft entry does not have a netdev or chain populated, so
+ * The nft entry does not have a chain populated, so
* skip this check.
*/
err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
if (err)
return err;
- err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ err = nfp_ct_merge_check(nft_entry, post_ct_entry);
if (err)
return err;
err = nfp_ct_check_meta(post_ct_entry, nft_entry);
@@ -948,7 +1189,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
nft_m_entry->tc_m_parent = tc_m_entry;
nft_m_entry->nft_parent = nft_entry;
nft_m_entry->tc_flower_cookie = 0;
- /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
* it only combined them if the netdevs were the same, so can use any of them.
*/
nft_m_entry->netdev = pre_ct_entry->netdev;
@@ -999,15 +1240,13 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
pre_ct_entry = ct_entry2;
}
- if (post_ct_entry->netdev != pre_ct_entry->netdev)
- return -EINVAL;
/* Checks that the chain_index of the filter matches the
* chain_index of the GOTO action.
*/
if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
return -EINVAL;
- err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
+ err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
if (err)
return err;
@@ -1080,7 +1319,7 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
zt->priv = priv;
zt->nft = NULL;
- /* init the various hash tables and lists*/
+ /* init the various hash tables and lists */
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
INIT_LIST_HEAD(&zt->nft_flows_list);
@@ -1114,6 +1353,63 @@ err_tc_merge_tb_init:
return ERR_PTR(err);
}
+static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
+ return __dev_get_by_index(&init_net,
+ match.key->ingress_ifindex);
+ }
+
+ return NULL;
+}
+
+static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
+{
+ if (mangle_action->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (mangle_action->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ return;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ return;
+
+ default:
+ return;
+ }
+}
+
+static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
+ struct nfp_fl_ct_flow_entry *entry)
+{
+ switch (act->id) {
+ case FLOW_ACTION_CT:
+ if (act->ct.action == TCA_CT_ACT_NAT)
+ entry->flags |= NFP_FL_ACTION_DO_NAT;
+ break;
+
+ case FLOW_ACTION_MANGLE:
+ entry->flags |= NFP_FL_ACTION_DO_MANGLE;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
@@ -1154,6 +1450,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
entry->rule->match.dissector = &nft_match->dissector;
entry->rule->match.mask = &nft_match->mask;
entry->rule->match.key = &nft_match->key;
+
+ if (!netdev)
+ netdev = get_netdev_from_rule(entry->rule);
} else {
entry->rule->match.dissector = flow->rule->match.dissector;
entry->rule->match.mask = flow->rule->match.mask;
@@ -1177,6 +1476,13 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
new_act = &entry->rule->action.entries[i];
memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* nft entry mangle field is host byte order, need translate to
+ * network byte order.
+ */
+ if (is_nft)
+ nfp_nft_ct_translate_mangle_action(new_act);
+
+ nfp_nft_ct_set_flow_flag(new_act, entry);
/* Entunnel is a special case, need to allocate and copy
* tunnel info.
*/
@@ -1266,7 +1572,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
*/
if (is_nft_flow) {
- /* Need to iterate through list of nft_flow entries*/
+ /* Need to iterate through list of nft_flow entries */
struct nfp_fl_ct_flow_entry *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
@@ -1274,7 +1580,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
cleanup_nft_merge_entry(m_entry);
}
} else {
- /* Need to iterate through list of tc_merged_flow entries*/
+ /* Need to iterate through list of tc_merged_flow entries */
struct nfp_fl_ct_tc_merge *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index beb6cceff9d8..762c0b36e269 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -103,6 +103,10 @@ enum nfp_nfp_layer_name {
_FLOW_PAY_LAYERS_MAX
};
+/* NFP flow entry flags. */
+#define NFP_FL_ACTION_DO_NAT BIT(0)
+#define NFP_FL_ACTION_DO_MANGLE BIT(1)
+
/**
* struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
* @cookie: Flow cookie, same as original TC flow, used as key
@@ -115,6 +119,7 @@ enum nfp_nfp_layer_name {
* @rule: Reference to the original TC flow rule
* @stats: Used to cache stats for updating
* @tun_offset: Used to indicate tunnel action offset in action list
+ * @flags: Used to indicate flow flag like NAT which used by merge.
*/
struct nfp_fl_ct_flow_entry {
unsigned long cookie;
@@ -127,6 +132,7 @@ struct nfp_fl_ct_flow_entry {
struct flow_rule *rule;
struct flow_stats stats;
u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+ u8 flags;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 63907aeb3884..e92860e20a24 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -234,7 +234,7 @@ nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
}
/* To signal the end of a batch, both the switch and last flags are set
- * and the the reserved SYNC group ID is used.
+ * and the reserved SYNC group ID is used.
*/
if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
@@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
group->dirty = true;
group->slave_cnt = slave_count;
- /* Group may have been on queue for removal but is now offfloable. */
+ /* Group may have been on queue for removal but is now offloadable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index ac1dcfa1d179..4d960a9641b3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -266,7 +266,7 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
int i, err, count = 0;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return 0;
@@ -295,7 +295,7 @@ nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
if (!tot_repl)
return 0;
- lockdep_assert_held(&app->pf->lock);
+ assert_nfp_app_locked(app);
if (!wait_event_timeout(priv->reify_wait_queue,
atomic_read(replies) >= tot_repl,
NFP_FL_REPLY_TIMEOUT)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 917c450a7aad..cb799d18682d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -12,7 +12,9 @@
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <net/flow_offload.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
#include <linux/idr.h>
@@ -48,6 +50,8 @@ struct nfp_app;
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_QOS_PPS BIT(9)
+#define NFP_FL_FEATS_QOS_METER BIT(10)
+#define NFP_FL_FEATS_DECAP_V2 BIT(11)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -63,7 +67,9 @@ struct nfp_app;
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
NFP_FL_FEATS_VLAN_QINQ | \
- NFP_FL_FEATS_QOS_PPS)
+ NFP_FL_FEATS_QOS_PPS | \
+ NFP_FL_FEATS_QOS_METER | \
+ NFP_FL_FEATS_DECAP_V2)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -82,12 +88,8 @@ struct nfp_fl_stats_id {
* @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload
* @ipv6_off_list: List of IPv6 addresses to offload
- * @neigh_off_list_v4: List of IPv4 neighbour offloads
- * @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list
* @ipv6_off_lock: Lock for the IPv6 address list
- * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
- * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state
*/
@@ -95,17 +97,95 @@ struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs;
struct list_head ipv4_off_list;
struct list_head ipv6_off_list;
- struct list_head neigh_off_list_v4;
- struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock;
struct mutex ipv6_off_lock;
- spinlock_t neigh_off_lock_v4;
- spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids;
struct notifier_block neigh_nb;
};
/**
+ * struct nfp_tun_neigh - basic neighbour data
+ * @dst_addr: Destination MAC address
+ * @src_addr: Source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv4
+ */
+struct nfp_tun_neigh {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
+ * struct nfp_tun_neigh_ext - extended neighbour data
+ * @vlan_tpid: VLAN_TPID match field
+ * @vlan_tci: VLAN_TCI match field
+ * @host_ctx: Host context ID to be saved here
+ */
+struct nfp_tun_neigh_ext {
+ __be16 vlan_tpid;
+ __be16 vlan_tci;
+ __be32 host_ctx;
+};
+
+/**
+ * struct nfp_tun_neigh_v4 - neighbour/route entry on the NFP for IPv4
+ * @dst_ipv4: Destination IPv4 address
+ * @src_ipv4: Source IPv4 address
+ * @common: Neighbour/route common info
+ * @ext: Neighbour/route extended info
+ */
+struct nfp_tun_neigh_v4 {
+ __be32 dst_ipv4;
+ __be32 src_ipv4;
+ struct nfp_tun_neigh common;
+ struct nfp_tun_neigh_ext ext;
+};
+
+/**
+ * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP for IPv6
+ * @dst_ipv6: Destination IPv6 address
+ * @src_ipv6: Source IPv6 address
+ * @common: Neighbour/route common info
+ * @ext: Neighbour/route extended info
+ */
+struct nfp_tun_neigh_v6 {
+ struct in6_addr dst_ipv6;
+ struct in6_addr src_ipv6;
+ struct nfp_tun_neigh common;
+ struct nfp_tun_neigh_ext ext;
+};
+
+/**
+ * struct nfp_neigh_entry
+ * @neigh_cookie: Cookie for hashtable lookup
+ * @ht_node: rhash_head entry for hashtable
+ * @list_head: Needed as member of linked_nn_entries list
+ * @payload: The neighbour info payload
+ * @flow: Linked flow rule
+ * @is_ipv6: Flag to indicate if payload is ipv6 or ipv4
+ */
+struct nfp_neigh_entry {
+ unsigned long neigh_cookie;
+ struct rhash_head ht_node;
+ struct list_head list_head;
+ char *payload;
+ struct nfp_predt_entry *flow;
+ bool is_ipv6;
+};
+
+/**
+ * struct nfp_predt_entry
+ * @list_head: List head to attach to predt_list
+ * @flow_pay: Direct link to flow_payload
+ * @nn_list: List of linked nfp_neigh_entries
+ */
+struct nfp_predt_entry {
+ struct list_head list_head;
+ struct nfp_fl_payload *flow_pay;
+ struct list_head nn_list;
+};
+
+/**
* struct nfp_mtu_conf - manage MTU setting
* @portnum: NFP port number of repr with requested MTU change
* @requested_val: MTU value requested for repr
@@ -191,11 +271,16 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @meter_stats_lock: Lock on meter stats updates
+ * @meter_table: Hash table used to store the meter table
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
* @ct_zone_wc: Special zone entry for wildcarded zone matches
* @ct_map_table: Hash table used to referennce ct flows
+ * @predt_list: List to keep track of decap pretun flows
+ * @neigh_table: Table to keep track of neighbor entries
+ * @predt_lock: Lock to serialise predt/neigh table updates
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -228,11 +313,16 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ struct mutex meter_stats_lock; /* Protect the meter stats */
+ struct rhashtable meter_table;
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
struct nfp_fl_ct_zone_entry *ct_zone_wc;
struct rhashtable ct_map_table;
+ struct list_head predt_list;
+ struct rhashtable neigh_table;
+ spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
};
/**
@@ -336,9 +426,14 @@ struct nfp_fl_payload {
struct list_head linked_flows;
bool in_hw;
struct {
+ struct nfp_predt_entry *predt;
struct net_device *dev;
+ __be16 vlan_tpid;
__be16 vlan_tci;
__be16 port_idx;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ bool is_ipv6;
} pre_tun_rule;
};
@@ -361,6 +456,7 @@ struct nfp_fl_payload_link {
extern const struct rhashtable_params nfp_flower_table_params;
extern const struct rhashtable_params merge_table_params;
+extern const struct rhashtable_params neigh_table_params;
struct nfp_merge_info {
u64 parent_ctx;
@@ -374,6 +470,31 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+struct nfp_meter_stats_entry {
+ u64 pkts;
+ u64 bytes;
+ u64 drops;
+};
+
+struct nfp_meter_entry {
+ struct rhash_head ht_node;
+ u32 meter_id;
+ bool bps;
+ u32 rate;
+ u32 burst;
+ u64 used;
+ struct nfp_meter_stats {
+ u64 update;
+ struct nfp_meter_stats_entry curr;
+ struct nfp_meter_stats_entry prev;
+ } stats;
+};
+
+enum nfp_meter_op {
+ NFP_METER_ADD,
+ NFP_METER_DEL,
+};
+
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
@@ -547,6 +668,10 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
+void nfp_tun_link_and_update_nn_entries(struct nfp_app *app,
+ struct nfp_predt_entry *predt);
+void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
+ struct nfp_predt_entry *predt);
int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
@@ -569,4 +694,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act);
+int nfp_init_meter_table(struct nfp_app *app);
+void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv);
+void nfp_act_stats_reply(struct nfp_app *app, void *pmsg);
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst);
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id);
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9d86eea4dc16..e01430139b6d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -98,16 +98,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ u8 tmp;
int i;
flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
for (i = 0; i < ETH_ALEN; i++) {
- ext->mac_dst[i] |= match.key->dst[i] &
- match.mask->dst[i];
+ tmp = match.key->dst[i] & match.mask->dst[i];
+ ext->mac_dst[i] |= tmp & (~msk->mac_dst[i]);
msk->mac_dst[i] |= match.mask->dst[i];
- ext->mac_src[i] |= match.key->src[i] &
- match.mask->src[i];
+
+ tmp = match.key->src[i] & match.mask->src[i];
+ ext->mac_src[i] |= tmp & (~msk->mac_src[i]);
msk->mac_src[i] |= match.mask->src[i];
}
}
@@ -189,11 +191,16 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
+ __be16 tmp;
flow_rule_match_ports(rule, &match);
- ext->port_src |= match.key->src & match.mask->src;
- ext->port_dst |= match.key->dst & match.mask->dst;
+
+ tmp = match.key->src & match.mask->src;
+ ext->port_src |= tmp & (~msk->port_src);
msk->port_src |= match.mask->src;
+
+ tmp = match.key->dst & match.mask->dst;
+ ext->port_dst |= tmp & (~msk->port_dst);
msk->port_dst |= match.mask->dst;
}
}
@@ -212,11 +219,16 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
+ u8 tmp;
flow_rule_match_ip(rule, &match);
- ext->tos |= match.key->tos & match.mask->tos;
- ext->ttl |= match.key->ttl & match.mask->ttl;
+
+ tmp = match.key->tos & match.mask->tos;
+ ext->tos |= tmp & (~msk->tos);
msk->tos |= match.mask->tos;
+
+ tmp = match.key->ttl & match.mask->ttl;
+ ext->ttl |= tmp & (~msk->ttl);
msk->ttl |= match.mask->ttl;
}
@@ -325,11 +337,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
+ __be32 tmp;
flow_rule_match_ipv4_addrs(rule, &match);
- ext->ipv4_src |= match.key->src & match.mask->src;
- ext->ipv4_dst |= match.key->dst & match.mask->dst;
+
+ tmp = match.key->src & match.mask->src;
+ ext->ipv4_src |= tmp & (~msk->ipv4_src);
msk->ipv4_src |= match.mask->src;
+
+ tmp = match.key->dst & match.mask->dst;
+ ext->ipv4_dst |= tmp & (~msk->ipv4_dst);
msk->ipv4_dst |= match.mask->dst;
}
@@ -342,15 +359,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ u8 tmp;
int i;
flow_rule_match_ipv6_addrs(rule, &match);
for (i = 0; i < sizeof(ext->ipv6_src); i++) {
- ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
- match.mask->src.s6_addr[i];
- ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
- match.mask->dst.s6_addr[i];
+ tmp = match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->ipv6_src.s6_addr[i] |= tmp &
+ (~msk->ipv6_src.s6_addr[i]);
msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
+
+ tmp = match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ ext->ipv6_dst.s6_addr[i] |= tmp &
+ (~msk->ipv6_dst.s6_addr[i]);
msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
}
}
@@ -602,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match;
@@ -637,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
- (struct nfp_flower_vlan *)msk,
- rule);
- ext += sizeof(struct nfp_flower_vlan);
- msk += sizeof(struct nfp_flower_vlan);
- }
-
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index f448c5682594..0f06ef6e24bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -339,7 +339,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
goto err_free_ctx_entry;
}
- /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+ /* Do not allocate a mask-id for pre_tun_rules. These flows are used to
* configure the pre_tun table and are never actually send to the
* firmware as an add-flow message. This causes the mask-id allocation
* on the firmware to get out of sync if allocated here.
@@ -502,6 +502,12 @@ const struct rhashtable_params nfp_ct_map_params = {
.automatic_shrinking = true,
};
+const struct rhashtable_params neigh_table_params = {
+ .key_offset = offsetof(struct nfp_neigh_entry, neigh_cookie),
+ .head_offset = offsetof(struct nfp_neigh_entry, ht_node),
+ .key_len = sizeof(unsigned long),
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -530,6 +536,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_ct_zone_table;
+ err = rhashtable_init(&priv->neigh_table, &neigh_table_params);
+ if (err)
+ goto err_free_ct_map_table;
+
+ INIT_LIST_HEAD(&priv->predt_list);
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -537,7 +549,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_ct_map_table;
+ goto err_free_neigh_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -565,6 +577,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
goto err_free_ring_buf;
spin_lock_init(&priv->stats_lock);
+ spin_lock_init(&priv->predt_lock);
return 0;
@@ -574,6 +587,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_neigh_table:
+ rhashtable_destroy(&priv->neigh_table);
err_free_ct_map_table:
rhashtable_destroy(&priv->ct_map_table);
err_free_ct_zone_table:
@@ -700,6 +715,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->ct_map_table,
nfp_free_map_table_entry, NULL);
+ rhashtable_free_and_destroy(&priv->neigh_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index f97eff5afd12..8593cafa6368 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -359,7 +359,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- /* check if GRE, which has no enc_ports */
+ /* Check if GRE, which has no enc_ports */
if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
@@ -373,10 +373,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (ipv6_tun) {
key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
key_size +=
- sizeof(struct nfp_flower_ipv6_udp_tun);
+ sizeof(struct nfp_flower_ipv6_gre_tun);
} else {
key_size +=
- sizeof(struct nfp_flower_ipv4_udp_tun);
+ sizeof(struct nfp_flower_ipv4_gre_tun);
}
if (enc_op.key) {
@@ -1016,7 +1016,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
- /* check if the two flows are already merged */
+ /* Check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
if (rhashtable_lookup_fast(&priv->merge_table,
@@ -1170,6 +1170,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
+ if (key_layer & NFP_FLOWER_LAYER_IPV6)
+ flow->pre_tun_rule.is_ipv6 = true;
+ else
+ flow->pre_tun_rule.is_ipv6 = false;
+
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
ext += sizeof(struct nfp_flower_meta_tci);
@@ -1180,13 +1185,6 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
mask += sizeof(struct nfp_flower_in_port);
ext += sizeof(struct nfp_flower_in_port);
- /* Ensure destination MAC address matches pre_tun_dev. */
- mac = (struct nfp_flower_mac_mpls *)ext;
- if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
- return -EOPNOTSUPP;
- }
-
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
@@ -1194,11 +1192,36 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
+ /* Ensure source MAC address is fully matched. This is only needed
+ * for firmware with the DECAP_V2 feature enabled. Don't do this
+ * for firmware without this feature to keep old behaviour.
+ */
+ if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
+ mac = (struct nfp_flower_mac_mpls *)mask;
+ if (!is_broadcast_ether_addr(&mac->mac_src[0])) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported pre-tunnel rule: source MAC field must not be masked");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (mac->mpls_lse) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
return -EOPNOTSUPP;
}
+ /* Ensure destination MAC address matches pre_tun_dev. */
+ mac = (struct nfp_flower_mac_mpls *)ext;
+ if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
+ return -EOPNOTSUPP;
+ }
+
+ /* Save mac addresses in pre_tun_rule entry for later use */
+ memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN);
+ memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN);
+
mask += sizeof(struct nfp_flower_mac_mpls);
ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
@@ -1227,17 +1250,21 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
struct nfp_flower_vlan *vlan_tags;
+ u16 vlan_tpid;
u16 vlan_tci;
vlan_tags = (struct nfp_flower_vlan *)ext;
vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
+ vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid);
vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid);
vlan = true;
} else {
flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff);
}
}
@@ -1274,9 +1301,14 @@ static bool offload_pre_check(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
- return false;
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ /* Allow special case where CT match is all 0 */
+ if (memchr_inv(ct.key, 0, sizeof(*ct.key)))
+ return false;
+ }
if (flow->common.chain_index)
return false;
@@ -1362,11 +1394,30 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata;
}
- if (flow_pay->pre_tun_rule.dev)
- err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
- else
+ if (flow_pay->pre_tun_rule.dev) {
+ if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
+ struct nfp_predt_entry *predt;
+
+ predt = kzalloc(sizeof(*predt), GFP_KERNEL);
+ if (!predt) {
+ err = -ENOMEM;
+ goto err_remove_rhash;
+ }
+ predt->flow_pay = flow_pay;
+ INIT_LIST_HEAD(&predt->nn_list);
+ spin_lock_bh(&priv->predt_lock);
+ list_add(&predt->list_head, &priv->predt_list);
+ flow_pay->pre_tun_rule.predt = predt;
+ nfp_tun_link_and_update_nn_entries(app, predt);
+ spin_unlock_bh(&priv->predt_lock);
+ } else {
+ err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
+ }
+ } else {
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ }
+
if (err)
goto err_remove_rhash;
@@ -1538,11 +1589,25 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow;
}
- if (nfp_flow->pre_tun_rule.dev)
- err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
- else
+ if (nfp_flow->pre_tun_rule.dev) {
+ if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
+ struct nfp_predt_entry *predt;
+
+ predt = nfp_flow->pre_tun_rule.predt;
+ if (predt) {
+ spin_lock_bh(&priv->predt_lock);
+ nfp_tun_unlink_and_update_nn_entries(app, predt);
+ list_del(&predt->list_head);
+ spin_unlock_bh(&priv->predt_lock);
+ kfree(predt);
+ }
+ } else {
+ err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
+ }
+ } else {
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ }
/* Fall through on error. */
err_free_merge_flow:
@@ -1861,6 +1926,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
return 0;
}
+static int
+nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return nfp_setup_tc_act_offload(app, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -1868,7 +1947,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return nfp_setup_tc_no_dev(cb_priv, type, data);
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 784c6dbf8bc4..99052a925d9e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
#include <linux/math64.h>
+#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
@@ -11,10 +15,14 @@
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
#define NFP_FL_QOS_PPS BIT(15)
+#define NFP_FL_QOS_METER BIT(10)
struct nfp_police_cfg_head {
__be32 flags_opts;
- __be32 port;
+ union {
+ __be32 meter_id;
+ __be32 port;
+ };
};
enum NFP_FL_QOS_TYPES {
@@ -46,7 +54,15 @@ enum NFP_FL_QOS_TYPES {
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Word[0](FLag options):
- * [15] p(pps) 1 for pps ,0 for bps
+ * [15] p(pps) 1 for pps, 0 for bps
+ *
+ * Meter control message
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R|
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | meter ID |
+ * +-------------------------------+-------------------------------+
*
*/
struct nfp_police_config {
@@ -67,6 +83,84 @@ struct nfp_police_stats_reply {
__be64 drop_pkts;
};
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst)
+{
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+ if (!ingress)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
+
+ if (ingress)
+ config->head.port = cpu_to_be32(id);
+ else
+ config->head.meter_id = cpu_to_be32(id);
+
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(app->ctrl, skb);
+
+ return 0;
+}
+
+static int nfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack,
+ bool ingress)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (ingress) {
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue or ok");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
@@ -77,15 +171,15 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *fl_priv = app->priv;
struct flow_action_entry *action = NULL;
struct nfp_flower_repr_priv *repr_priv;
- struct nfp_police_config *config;
u32 netdev_port_id, i;
struct nfp_repr *repr;
- struct sk_buff *skb;
bool pps_support;
u32 bps_num = 0;
u32 pps_num = 0;
u32 burst;
+ bool pps;
u64 rate;
+ int err;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
@@ -132,6 +226,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
"unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
+
+ err = nfp_policer_validate(&flow->rule->action, action, extack, true);
+ if (err)
+ return err;
+
if (action->police.rate_bytes_ps > 0) {
if (bps_num++) {
NL_SET_ERR_MSG_MOD(extack,
@@ -169,23 +268,12 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
}
if (rate != 0) {
- skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
- NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- config = nfp_flower_cmsg_get_data(skb);
- memset(config, 0, sizeof(struct nfp_police_config));
+ pps = false;
if (action->police.rate_pkt_ps > 0)
- config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
- config->head.port = cpu_to_be32(netdev_port_id);
- config->bkt_tkn_p = cpu_to_be32(burst);
- config->bkt_tkn_c = cpu_to_be32(burst);
- config->pbs = cpu_to_be32(burst);
- config->cbs = cpu_to_be32(burst);
- config->pir = cpu_to_be32(rate);
- config->cir = cpu_to_be32(rate);
- nfp_ctrl_tx(repr->app->ctrl, skb);
+ pps = true;
+ nfp_flower_offload_one_police(repr->app, true,
+ pps, netdev_port_id,
+ rate, burst);
}
}
repr_priv->qos_table.netdev_port_id = netdev_port_id;
@@ -266,6 +354,9 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
+ if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
+ return nfp_act_stats_reply(app, msg);
+
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
@@ -297,7 +388,7 @@ exit_unlock_rcu:
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
- u32 netdev_port_id)
+ u32 id, bool ingress)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
@@ -308,10 +399,15 @@ nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
GFP_ATOMIC);
if (!skb)
return;
-
head = nfp_flower_cmsg_get_data(skb);
+
memset(head, 0, sizeof(struct nfp_police_cfg_head));
- head->port = cpu_to_be32(netdev_port_id);
+ if (ingress) {
+ head->port = cpu_to_be32(id);
+ } else {
+ head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ head->meter_id = cpu_to_be32(id);
+ }
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
@@ -341,7 +437,8 @@ nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
if (!netdev_port_id)
continue;
- nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ nfp_flower_stats_rlim_request(fl_priv,
+ netdev_port_id, true);
}
}
@@ -359,6 +456,8 @@ static void update_stats_cache(struct work_struct *work)
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
+ nfp_flower_stats_meter_request_all(fl_priv);
+
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
@@ -406,6 +505,9 @@ void nfp_flower_qos_init(struct nfp_app *app)
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
+ mutex_init(&fl_priv->meter_stats_lock);
+ nfp_init_meter_table(app);
+
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
@@ -441,3 +543,339 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+/* Offload tc action, currently only for tc police */
+
+static const struct rhashtable_params stats_meter_table_params = {
+ .key_offset = offsetof(struct nfp_meter_entry, meter_id),
+ .head_offset = offsetof(struct nfp_meter_entry, ht_node),
+ .key_len = sizeof(u32),
+};
+
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+}
+
+static struct nfp_meter_entry *
+nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table,
+ &meter_id,
+ stats_meter_table_params);
+ if (meter_entry)
+ return meter_entry;
+
+ meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
+ if (!meter_entry)
+ return NULL;
+
+ meter_entry->meter_id = meter_id;
+ meter_entry->used = jiffies;
+ if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
+ stats_meter_table_params)) {
+ kfree(meter_entry);
+ return NULL;
+ }
+
+ priv->qos_rate_limiters++;
+ if (priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return meter_entry;
+}
+
+static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+ if (!meter_entry)
+ return;
+
+ rhashtable_remove_fast(&priv->meter_table,
+ &meter_entry->ht_node,
+ stats_meter_table_params);
+ kfree(meter_entry);
+ priv->qos_rate_limiters--;
+ if (!priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&priv->qos_stats_work);
+}
+
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ int err = 0;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ switch (op) {
+ case NFP_METER_DEL:
+ nfp_flower_del_meter_entry(app, meter_id);
+ goto exit_unlock;
+ case NFP_METER_ADD:
+ meter_entry = nfp_flower_add_meter_entry(app, meter_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ if (!meter_entry) {
+ err = -ENOMEM;
+ goto exit_unlock;
+ }
+
+ if (action->police.rate_bytes_ps > 0) {
+ meter_entry->bps = true;
+ meter_entry->rate = action->police.rate_bytes_ps;
+ meter_entry->burst = action->police.burst;
+ } else {
+ meter_entry->bps = false;
+ meter_entry->rate = action->police.rate_pkt_ps;
+ meter_entry->burst = action->police.burst_pkt;
+ }
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_init_meter_table(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
+}
+
+void
+nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct rhashtable_iter iter;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ rhashtable_walk_enter(&fl_priv->meter_table, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(meter_entry))
+ continue;
+ nfp_flower_stats_rlim_request(fl_priv,
+ meter_entry->meter_id, false);
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *paction = &fl_act->action.entries[0];
+ u32 action_num = fl_act->action.num_entries;
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct flow_action_entry *action = NULL;
+ u32 burst, i, meter_id;
+ bool pps_support, pps;
+ bool add = false;
+ u64 rate;
+ int err;
+
+ pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
+
+ for (i = 0 ; i < action_num; i++) {
+ /* Set qos associate data for this interface */
+ action = paction + i;
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ continue;
+ }
+
+ err = nfp_policer_validate(&fl_act->action, action, extack, false);
+ if (err)
+ return err;
+
+ if (action->police.rate_bytes_ps > 0) {
+ rate = action->police.rate_bytes_ps;
+ burst = action->police.burst;
+ } else if (action->police.rate_pkt_ps > 0 && pps_support) {
+ rate = action->police.rate_pkt_ps;
+ burst = action->police.burst_pkt;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported qos rate limit");
+ continue;
+ }
+
+ if (rate != 0) {
+ meter_id = action->hw_index;
+ if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
+ continue;
+
+ pps = false;
+ if (action->police.rate_pkt_ps > 0)
+ pps = true;
+ nfp_flower_offload_one_police(app, false, pps, meter_id,
+ rate, burst);
+ add = true;
+ }
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+ u32 meter_id;
+ bool pps;
+
+ /* Delete qos associate data for this interface */
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = fl_act->index;
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "no meter entry when delete the action index.");
+ return -ENOENT;
+ }
+ pps = !meter_entry->bps;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ config->head.meter_id = cpu_to_be32(meter_id);
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
+
+ return 0;
+}
+
+void
+nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_stats_reply *msg = pmsg;
+ u32 meter_id;
+
+ meter_id = be32_to_cpu(msg->head.meter_id);
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry)
+ goto exit_unlock;
+
+ meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+ meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
+ if (!meter_entry->stats.update) {
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+ }
+
+ meter_entry->stats.update = jiffies;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ u64 diff_bytes, diff_pkts, diff_drops;
+ int err = 0;
+
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
+ if (!meter_entry) {
+ err = -ENOENT;
+ goto exit_unlock;
+ }
+ diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
+ meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
+ diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
+ meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
+ diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
+ meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
+
+ flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
+ meter_entry->stats.update,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act)
+{
+ struct netlink_ext_ack *extack = fl_act->extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
+ return -EOPNOTSUPP;
+
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return nfp_act_install_actions(app, fl_act, extack);
+ case FLOW_ACT_DESTROY:
+ return nfp_act_remove_actions(app, fl_act, extack);
+ case FLOW_ACT_STATS:
+ return nfp_act_stats_actions(app, fl_act, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index dfb4468fe287..52f67157bd0f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -77,38 +77,6 @@ struct nfp_tun_active_tuns_v6 {
};
/**
- * struct nfp_tun_neigh - neighbour/route entry on the NFP
- * @dst_ipv4: destination IPv4 address
- * @src_ipv4: source IPv4 address
- * @dst_addr: destination MAC address
- * @src_addr: source MAC address
- * @port_id: NFP port to output packet on - associated with source IPv4
- */
-struct nfp_tun_neigh {
- __be32 dst_ipv4;
- __be32 src_ipv4;
- u8 dst_addr[ETH_ALEN];
- u8 src_addr[ETH_ALEN];
- __be32 port_id;
-};
-
-/**
- * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
- * @dst_ipv6: destination IPv6 address
- * @src_ipv6: source IPv6 address
- * @dst_addr: destination MAC address
- * @src_addr: source MAC address
- * @port_id: NFP port to output packet on - associated with source IPv6
- */
-struct nfp_tun_neigh_v6 {
- struct in6_addr dst_ipv6;
- struct in6_addr src_ipv6;
- u8 dst_addr[ETH_ALEN];
- u8 src_addr[ETH_ALEN];
- __be32 port_id;
-};
-
-/**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request
* @ipv4_addr: destination ipv4 address for route
@@ -313,9 +281,15 @@ static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
{
+ struct nfp_flower_priv *priv = app->priv;
struct sk_buff *skb;
unsigned char *msg;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) &&
+ (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
+ plen -= sizeof(struct nfp_tun_neigh_ext);
+
skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
if (!skb)
return -ENOMEM;
@@ -327,193 +301,268 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0;
}
-static bool
-__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
- void *add, int add_len)
-{
- struct nfp_offloaded_route *entry;
-
- spin_lock_bh(list_lock);
- list_for_each_entry(entry, route_list, list)
- if (!memcmp(entry->ip_add, add, add_len)) {
- spin_unlock_bh(list_lock);
- return true;
- }
- spin_unlock_bh(list_lock);
- return false;
-}
-
-static int
-__nfp_tun_add_route_to_cache(struct list_head *route_list,
- spinlock_t *list_lock, void *add, int add_len)
-{
- struct nfp_offloaded_route *entry;
-
- spin_lock_bh(list_lock);
- list_for_each_entry(entry, route_list, list)
- if (!memcmp(entry->ip_add, add, add_len)) {
- spin_unlock_bh(list_lock);
- return 0;
- }
-
- entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
- if (!entry) {
- spin_unlock_bh(list_lock);
- return -ENOMEM;
- }
-
- memcpy(entry->ip_add, add, add_len);
- list_add_tail(&entry->list, route_list);
- spin_unlock_bh(list_lock);
-
- return 0;
-}
-
static void
-__nfp_tun_del_route_from_cache(struct list_head *route_list,
- spinlock_t *list_lock, void *add, int add_len)
+nfp_tun_mutual_link(struct nfp_predt_entry *predt,
+ struct nfp_neigh_entry *neigh)
{
- struct nfp_offloaded_route *entry;
-
- spin_lock_bh(list_lock);
- list_for_each_entry(entry, route_list, list)
- if (!memcmp(entry->ip_add, add, add_len)) {
- list_del(&entry->list);
- kfree(entry);
- break;
- }
- spin_unlock_bh(list_lock);
-}
+ struct nfp_fl_payload *flow_pay = predt->flow_pay;
+ struct nfp_tun_neigh_ext *ext;
+ struct nfp_tun_neigh *common;
-static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
-{
- struct nfp_flower_priv *priv = app->priv;
+ if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6)
+ return;
- return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
- &priv->tun.neigh_off_lock_v4, ipv4_addr,
- sizeof(*ipv4_addr));
-}
+ /* In the case of bonding it is possible that there might already
+ * be a flow linked (as the MAC address gets shared). If a flow
+ * is already linked just return.
+ */
+ if (neigh->flow)
+ return;
-static bool
-nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
-{
- struct nfp_flower_priv *priv = app->priv;
+ common = neigh->is_ipv6 ?
+ &((struct nfp_tun_neigh_v6 *)neigh->payload)->common :
+ &((struct nfp_tun_neigh_v4 *)neigh->payload)->common;
+ ext = neigh->is_ipv6 ?
+ &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
+ &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
+
+ if (memcmp(flow_pay->pre_tun_rule.loc_mac,
+ common->src_addr, ETH_ALEN) ||
+ memcmp(flow_pay->pre_tun_rule.rem_mac,
+ common->dst_addr, ETH_ALEN))
+ return;
- return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
- &priv->tun.neigh_off_lock_v6, ipv6_addr,
- sizeof(*ipv6_addr));
+ list_add(&neigh->list_head, &predt->nn_list);
+ neigh->flow = predt;
+ ext->host_ctx = flow_pay->meta.host_ctx_id;
+ ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci;
+ ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid;
}
static void
-nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+nfp_tun_link_predt_entries(struct nfp_app *app,
+ struct nfp_neigh_entry *nn_entry)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_predt_entry *predt, *tmp;
- __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
- &priv->tun.neigh_off_lock_v4, ipv4_addr,
- sizeof(*ipv4_addr));
+ list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) {
+ nfp_tun_mutual_link(predt, nn_entry);
+ }
}
-static void
-nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+void nfp_tun_link_and_update_nn_entries(struct nfp_app *app,
+ struct nfp_predt_entry *predt)
{
struct nfp_flower_priv *priv = app->priv;
-
- __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
- &priv->tun.neigh_off_lock_v6, ipv6_addr,
- sizeof(*ipv6_addr));
+ struct nfp_neigh_entry *nn_entry;
+ struct rhashtable_iter iter;
+ size_t neigh_size;
+ u8 type;
+
+ rhashtable_walk_enter(&priv->neigh_table, &iter);
+ rhashtable_walk_start(&iter);
+ while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(nn_entry))
+ continue;
+ nfp_tun_mutual_link(predt, nn_entry);
+ neigh_size = nn_entry->is_ipv6 ?
+ sizeof(struct nfp_tun_neigh_v6) :
+ sizeof(struct nfp_tun_neigh_v4);
+ type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_flower_xmit_tun_conf(app, type, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
-static void
-nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+static void nfp_tun_cleanup_nn_entries(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
-
- __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
- &priv->tun.neigh_off_lock_v4, ipv4_addr,
- sizeof(*ipv4_addr));
+ struct nfp_neigh_entry *neigh;
+ struct nfp_tun_neigh_ext *ext;
+ struct rhashtable_iter iter;
+ size_t neigh_size;
+ u8 type;
+
+ rhashtable_walk_enter(&priv->neigh_table, &iter);
+ rhashtable_walk_start(&iter);
+ while ((neigh = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(neigh))
+ continue;
+ ext = neigh->is_ipv6 ?
+ &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
+ &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
+ ext->host_ctx = cpu_to_be32(U32_MAX);
+ ext->vlan_tpid = cpu_to_be16(U16_MAX);
+ ext->vlan_tci = cpu_to_be16(U16_MAX);
+
+ neigh_size = neigh->is_ipv6 ?
+ sizeof(struct nfp_tun_neigh_v6) :
+ sizeof(struct nfp_tun_neigh_v4);
+ type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
+ GFP_ATOMIC);
+
+ rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node,
+ neigh_table_params);
+ if (neigh->flow)
+ list_del(&neigh->list_head);
+ kfree(neigh);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
-static void
-nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
+ struct nfp_predt_entry *predt)
{
- struct nfp_flower_priv *priv = app->priv;
-
- __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
- &priv->tun.neigh_off_lock_v6, ipv6_addr,
- sizeof(*ipv6_addr));
+ struct nfp_neigh_entry *neigh, *tmp;
+ struct nfp_tun_neigh_ext *ext;
+ size_t neigh_size;
+ u8 type;
+
+ list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) {
+ ext = neigh->is_ipv6 ?
+ &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
+ &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
+ neigh->flow = NULL;
+ ext->host_ctx = cpu_to_be32(U32_MAX);
+ ext->vlan_tpid = cpu_to_be16(U16_MAX);
+ ext->vlan_tci = cpu_to_be16(U16_MAX);
+ list_del(&neigh->list_head);
+ neigh_size = neigh->is_ipv6 ?
+ sizeof(struct nfp_tun_neigh_v6) :
+ sizeof(struct nfp_tun_neigh_v4);
+ type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
+ GFP_ATOMIC);
+ }
}
static void
-nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
- struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ void *flow, struct neighbour *neigh, bool is_ipv6,
+ bool override)
{
- struct nfp_tun_neigh payload;
+ bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
+ size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
+ sizeof(struct nfp_tun_neigh_v4);
+ unsigned long cookie = (unsigned long)neigh;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_neigh_entry *nn_entry;
u32 port_id;
+ u8 mtype;
port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
if (!port_id)
return;
- memset(&payload, 0, sizeof(struct nfp_tun_neigh));
- payload.dst_ipv4 = flow->daddr;
+ spin_lock_bh(&priv->predt_lock);
+ nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
+ neigh_table_params);
+ if (!nn_entry && !neigh_invalid) {
+ struct nfp_tun_neigh_ext *ext;
+ struct nfp_tun_neigh *common;
+
+ nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
+ GFP_ATOMIC);
+ if (!nn_entry)
+ goto err;
+
+ nn_entry->payload = (char *)&nn_entry[1];
+ nn_entry->neigh_cookie = cookie;
+ nn_entry->is_ipv6 = is_ipv6;
+ nn_entry->flow = NULL;
+ if (is_ipv6) {
+ struct flowi6 *flowi6 = (struct flowi6 *)flow;
+ struct nfp_tun_neigh_v6 *payload;
+
+ payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
+ payload->src_ipv6 = flowi6->saddr;
+ payload->dst_ipv6 = flowi6->daddr;
+ common = &payload->common;
+ ext = &payload->ext;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
+ } else {
+ struct flowi4 *flowi4 = (struct flowi4 *)flow;
+ struct nfp_tun_neigh_v4 *payload;
+
+ payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
+ payload->src_ipv4 = flowi4->saddr;
+ payload->dst_ipv4 = flowi4->daddr;
+ common = &payload->common;
+ ext = &payload->ext;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ }
+ ext->host_ctx = cpu_to_be32(U32_MAX);
+ ext->vlan_tpid = cpu_to_be16(U16_MAX);
+ ext->vlan_tci = cpu_to_be16(U16_MAX);
+ ether_addr_copy(common->src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(common->dst_addr, neigh, netdev);
+ common->port_id = cpu_to_be32(port_id);
+
+ if (rhashtable_insert_fast(&priv->neigh_table,
+ &nn_entry->ht_node,
+ neigh_table_params))
+ goto err;
+
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
+ } else if (nn_entry && neigh_invalid) {
+ if (is_ipv6) {
+ struct flowi6 *flowi6 = (struct flowi6 *)flow;
+ struct nfp_tun_neigh_v6 *payload;
+
+ payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
+ memset(payload, 0, sizeof(struct nfp_tun_neigh_v6));
+ payload->dst_ipv6 = flowi6->daddr;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
+ } else {
+ struct flowi4 *flowi4 = (struct flowi4 *)flow;
+ struct nfp_tun_neigh_v4 *payload;
- /* If entry has expired send dst IP with all other fields 0. */
- if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
+ payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
+ memset(payload, 0, sizeof(struct nfp_tun_neigh_v4));
+ payload->dst_ipv4 = flowi4->daddr;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ }
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
- goto send_msg;
+ rhashtable_remove_fast(&priv->neigh_table,
+ &nn_entry->ht_node,
+ neigh_table_params);
+
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
+
+ if (nn_entry->flow)
+ list_del(&nn_entry->list_head);
+ kfree(nn_entry);
+ } else if (nn_entry && !neigh_invalid && override) {
+ mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
}
- /* Have a valid neighbour so populate rest of entry. */
- payload.src_ipv4 = flow->saddr;
- ether_addr_copy(payload.src_addr, netdev->dev_addr);
- neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
- payload.port_id = cpu_to_be32(port_id);
- /* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
-
-send_msg:
- nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
- sizeof(struct nfp_tun_neigh),
- (unsigned char *)&payload, flag);
-}
-
-static void
-nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
- struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
-{
- struct nfp_tun_neigh_v6 payload;
- u32 port_id;
-
- port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
- if (!port_id)
- return;
-
- memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
- payload.dst_ipv6 = flow->daddr;
-
- /* If entry has expired send dst IP with all other fields 0. */
- if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
- /* Trigger probe to verify invalid neighbour state. */
- neigh_event_send(neigh, NULL);
- goto send_msg;
- }
+ spin_unlock_bh(&priv->predt_lock);
+ return;
- /* Have a valid neighbour so populate rest of entry. */
- payload.src_ipv6 = flow->saddr;
- ether_addr_copy(payload.src_addr, netdev->dev_addr);
- neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
- payload.port_id = cpu_to_be32(port_id);
- /* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
-
-send_msg:
- nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
- sizeof(struct nfp_tun_neigh_v6),
- (unsigned char *)&payload, flag);
+err:
+ kfree(nn_entry);
+ spin_unlock_bh(&priv->predt_lock);
+ nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
}
static int
@@ -522,12 +571,9 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
{
struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir;
- struct flowi4 flow4 = {};
- struct flowi6 flow6 = {};
struct neighbour *n;
struct nfp_app *app;
- struct rtable *rt;
- bool ipv6 = false;
+ bool neigh_invalid;
int err;
switch (event) {
@@ -542,13 +588,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
- if (n->tbl->family == AF_INET6)
- ipv6 = true;
-
- if (ipv6)
- flow6.daddr = *(struct in6_addr *)n->primary_key;
- else
- flow4.daddr = *(__be32 *)n->primary_key;
+ neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
@@ -557,38 +597,51 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
!nfp_flower_internal_port_can_offload(app, n->dev))
return NOTIFY_DONE;
- /* Only concerned with changes to routes already added to NFP. */
- if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
- (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
- return NOTIFY_DONE;
-
#if IS_ENABLED(CONFIG_INET)
- if (ipv6) {
+ if (n->tbl->family == AF_INET6) {
#if IS_ENABLED(CONFIG_IPV6)
- struct dst_entry *dst;
-
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
- &flow6, NULL);
- if (IS_ERR(dst))
- return NOTIFY_DONE;
+ struct flowi6 flow6 = {};
- dst_release(dst);
- flow6.flowi6_proto = IPPROTO_UDP;
- nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
+ flow6.daddr = *(struct in6_addr *)n->primary_key;
+ if (!neigh_invalid) {
+ struct dst_entry *dst;
+ /* Use ipv6_dst_lookup_flow to populate flow6->saddr
+ * and other fields. This information is only needed
+ * for new entries, lookup can be skipped when an entry
+ * gets invalidated - as only the daddr is needed for
+ * deleting.
+ */
+ dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+ return NOTIFY_DONE;
+
+ dst_release(dst);
+ }
+ nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
#else
return NOTIFY_DONE;
#endif /* CONFIG_IPV6 */
} else {
- /* Do a route lookup to populate flow data. */
- rt = ip_route_output_key(dev_net(n->dev), &flow4);
- err = PTR_ERR_OR_ZERO(rt);
- if (err)
- return NOTIFY_DONE;
+ struct flowi4 flow4 = {};
- ip_rt_put(rt);
-
- flow4.flowi4_proto = IPPROTO_UDP;
- nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
+ flow4.daddr = *(__be32 *)n->primary_key;
+ if (!neigh_invalid) {
+ struct rtable *rt;
+ /* Use ip_route_output_key to populate flow4->saddr and
+ * other fields. This information is only needed for
+ * new entries, lookup can be skipped when an entry
+ * gets invalidated - as only the daddr is needed for
+ * deleting.
+ */
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
+
+ ip_rt_put(rt);
+ }
+ nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
}
#else
return NOTIFY_DONE;
@@ -631,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
neigh_release(n);
rcu_read_unlock();
return;
@@ -673,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
neigh_release(n);
rcu_read_unlock();
return;
@@ -922,8 +975,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- int ida_idx = NFP_MAX_MAC_INDEX, err;
struct nfp_tun_offloaded_mac *entry;
+ int ida_idx = -1, err;
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
@@ -942,8 +995,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
if (!nfp_mac_idx) {
/* Assign a global index if non-repr or MAC is now shared. */
if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (ida_idx < 0)
return ida_idx;
@@ -997,8 +1050,8 @@ err_remove_hash:
err_free_entry:
kfree(entry);
err_free_ida:
- if (ida_idx != NFP_MAX_MAC_INDEX)
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ if (ida_idx != -1)
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
return err;
}
@@ -1011,6 +1064,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
+ u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@@ -1018,7 +1072,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
entry->ref_count--;
- /* If del is part of a mod then mac_list is still in use elsewheree. */
+ /* If del is part of a mod then mac_list is still in use elsewhere. */
if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
@@ -1029,8 +1083,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
- u16 nfp_mac_idx;
-
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
@@ -1046,7 +1098,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
- u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
@@ -1063,7 +1114,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
}
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
entry->index = nfp_mac_idx;
return 0;
}
@@ -1074,10 +1125,16 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ else
+ nfp_mac_idx = entry->index;
+
/* If MAC has global ID then extract and free the ida entry. */
- if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
}
kfree(entry);
@@ -1364,10 +1421,6 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->tun.neigh_off_lock_v4);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
- spin_lock_init(&priv->tun.neigh_off_lock_v6);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb);
@@ -1382,11 +1435,8 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app)
{
- struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv4_addr_entry *ip_entry;
- struct nfp_tun_neigh_v6 ipv6_route;
- struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb);
@@ -1402,36 +1452,9 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
mutex_destroy(&priv->tun.ipv6_off_lock);
- /* Free memory in the route list and remove entries from fw cache. */
- list_for_each_entry_safe(route_entry, temp,
- &priv->tun.neigh_off_list_v4, list) {
- memset(&ipv4_route, 0, sizeof(ipv4_route));
- memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
- sizeof(ipv4_route.dst_ipv4));
- list_del(&route_entry->list);
- kfree(route_entry);
-
- nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
- sizeof(struct nfp_tun_neigh),
- (unsigned char *)&ipv4_route,
- GFP_KERNEL);
- }
-
- list_for_each_entry_safe(route_entry, temp,
- &priv->tun.neigh_off_list_v6, list) {
- memset(&ipv6_route, 0, sizeof(ipv6_route));
- memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
- sizeof(ipv6_route.dst_ipv6));
- list_del(&route_entry->list);
- kfree(route_entry);
-
- nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
- sizeof(struct nfp_tun_neigh),
- (unsigned char *)&ipv6_route,
- GFP_KERNEL);
- }
-
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
nfp_check_rhashtable_empty, NULL);
+
+ nfp_tun_cleanup_nn_entries(app);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
new file mode 100644
index 000000000000..448c1c1afaee
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -0,0 +1,1382 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfd3.h"
+
+/* Transmit processing
+ *
+ * One queue controller peripheral queue is used for transmit. The
+ * driver en-queues packets for transmit by advancing the write
+ * pointer. The device indicates that packets have transmitted by
+ * advancing the read pointer. The driver maintains a local copy of
+ * the read and write pointer in @struct nfp_net_tx_ring. The driver
+ * keeps @wr_p in sync with the queue controller write pointer and can
+ * determine how many packets have been transmitted by comparing its
+ * copy of the read pointer @rd_p with the read pointer maintained by
+ * the queue controller peripheral.
+ */
+
+/* Wrappers for deciding when to stop and restart TX queues */
+static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
+}
+
+static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
+}
+
+/**
+ * nfp_nfd3_tx_ring_stop() - stop tx ring
+ * @nd_q: netdev queue
+ * @tx_ring: driver tx queue structure
+ *
+ * Safely stop TX ring. Remember that while we are running .start_xmit()
+ * someone else may be cleaning the TX ring completions so we need to be
+ * extra careful here.
+ */
+static void
+nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to HW TX descriptor
+ * @skb: Pointer to SKB
+ * @md_bytes: Prepend length
+ *
+ * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
+ * Return error on packet header greater than maximum supported LSO header size.
+ */
+static void
+nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
+ struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
+{
+ u32 l3_offset, l4_offset, hdrlen;
+ u16 mss;
+
+ if (!skb_is_gso(skb))
+ return;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_tcp_all_headers(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
+ }
+
+ txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
+ txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
+
+ mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
+ txd->l3_offset = l3_offset - md_bytes;
+ txd->l4_offset = l4_offset - md_bytes;
+ txd->lso_hdrlen = hdrlen - md_bytes;
+ txd->mss = cpu_to_le16(mss);
+ txd->flags |= NFD3_DESC_TX_LSO;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+/**
+ * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to TX descriptor
+ * @skb: Pointer to SKB
+ *
+ * This function sets the TX checksum flags in the TX descriptor based
+ * on the configuration and the protocol of the packet to be transmitted.
+ */
+static void
+nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 l4_hdr;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return;
+
+ txd->flags |= NFD3_DESC_TX_CSUM;
+ if (skb->encapsulation)
+ txd->flags |= NFD3_DESC_TX_ENCAP;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
+ txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+ l4_hdr = iph->protocol;
+ } else if (ipv6h->version == 6) {
+ l4_hdr = ipv6h->nexthdr;
+ } else {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+ break;
+ case IPPROTO_UDP:
+ txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+ break;
+ default:
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
+ return;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (skb->encapsulation)
+ r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
+ else
+ r_vec->hw_csum_tx += txbuf->pkt_cnt;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+ bool vlan_insert;
+ u32 meta_id = 0;
+ int md_bytes;
+
+ if (unlikely(md_dst || tls_handle)) {
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
+ }
+
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
+
+ if (!(md_dst || tls_handle || vlan_insert))
+ return 0;
+
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
+
+ if (unlikely(skb_cow_head(skb, md_bytes)))
+ return -ENOMEM;
+
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= NFP_NET_META_PORTID_SIZE;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= NFP_NET_META_CONN_HANDLE_SIZE;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
+
+ data -= sizeof(meta_id);
+ put_unaligned_be32(meta_id, data);
+
+ return md_bytes;
+}
+
+/**
+ * nfp_nfd3_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int f, nr_frags, wr_idx, md_bytes;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ unsigned int fsize;
+ u64 tls_handle = 0;
+ u16 qidx;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ return NETDEV_TX_OK;
+ }
+
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_err;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = skb->len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
+ nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
+ nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ txd->flags |= NFD3_DESC_TX_VLAN;
+ txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ }
+
+ /* Gather DMA */
+ if (nr_frags > 0) {
+ __le64 second_half;
+
+ /* all descs must match except for in addr, length and eop */
+ second_half = txd->vals8[1];
+
+ for (f = 0; f < nr_frags; f++) {
+ frag = &skb_shinfo(skb)->frags[f];
+ fsize = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
+ fsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
+ tx_ring->txbufs[wr_idx].skb = skb;
+ tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
+ tx_ring->txbufs[wr_idx].fidx = f;
+
+ txd = &tx_ring->txds[wr_idx];
+ txd->dma_len = cpu_to_le16(fsize);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_gather++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ skb_tx_timestamp(skb);
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
+ tx_ring->wr_p += nr_frags + 1;
+ if (nfp_nfd3_tx_ring_should_stop(tx_ring))
+ nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += nr_frags + 1;
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_unmap:
+ while (--f >= 0) {
+ frag = &skb_shinfo(skb)->frags[f];
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+ wr_idx = wr_idx - 1;
+ if (wr_idx < 0)
+ wr_idx += tx_ring->cnt;
+ }
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+err_dma_err:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfd3_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct netdev_queue *nd_q;
+ u32 qcp_rd_p;
+ int todo;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ while (todo--) {
+ const skb_frag_t *frag;
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int fidx, nr_frags;
+ int idx;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_buf->skb;
+ if (!skb)
+ continue;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_buf->fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ done_pkts += tx_buf->pkt_cnt;
+ done_bytes += tx_buf->real_len;
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ napi_consume_skb(skb, budget);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+ }
+
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ done_bytes += tx_ring->txbufs[idx].real_len;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+/* Receive processing
+ */
+
+static void *
+nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfd3_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static void
+nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash = data;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info, vlan_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfd3_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool
+nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ if (!*completed) {
+ nfp_nfd3_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
+ txbuf->frag = rxbuf->frag;
+ txbuf->dma_addr = rxbuf->dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = pkt_len;
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ return true;
+}
+
+/**
+ * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (!dp->chained_metadata_format) {
+ nfp_nfd3_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfd3_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfd3_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfd3_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ unsigned int real_len = skb->len, meta_len = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return true;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ meta_len = 8;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+ }
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_warn;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = real_len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return false;
+
+err_dma_warn:
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
new file mode 100644
index 000000000000..7a0df9e6c3c4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFD3_H_
+#define _NFP_DP_NFD3_H_
+
+struct sk_buff;
+struct net_device;
+
+/* TX descriptor format */
+
+#define NFD3_DESC_TX_EOP BIT(7)
+#define NFD3_DESC_TX_OFFSET_MASK GENMASK(6, 0)
+#define NFD3_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+/* Flags in the host TX descriptor */
+#define NFD3_DESC_TX_CSUM BIT(7)
+#define NFD3_DESC_TX_IP4_CSUM BIT(6)
+#define NFD3_DESC_TX_TCP_CSUM BIT(5)
+#define NFD3_DESC_TX_UDP_CSUM BIT(4)
+#define NFD3_DESC_TX_VLAN BIT(3)
+#define NFD3_DESC_TX_LSO BIT(2)
+#define NFD3_DESC_TX_ENCAP BIT(1)
+#define NFD3_DESC_TX_O_IP4_CSUM BIT(0)
+
+struct nfp_nfd3_tx_desc {
+ union {
+ struct {
+ u8 dma_addr_hi; /* High bits of host buf address */
+ __le16 dma_len; /* Length to DMA for this desc */
+ u8 offset_eop; /* Offset in buf where pkt starts +
+ * highest bit is eop flag.
+ */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 flags; /* TX Flags, see @NFD3_DESC_TX_* */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
+ __le16 data_len; /* Length of frame + meta data */
+ } __packed;
+ __le32 vals[4];
+ __le64 vals8[2];
+ };
+};
+
+/**
+ * struct nfp_nfd3_tx_buf - software TX buffer descriptor
+ * @skb: normal ring, sk_buff associated with this buffer
+ * @frag: XDP ring, page frag associated with this buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ * @dma_addr: DMA mapping address of the buffer
+ * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
+ * @pkt_cnt: Number of packets to be produced out of the skb associated
+ * with this buffer (valid only on the head's buffer).
+ * Will be 1 for all non-TSO packets.
+ * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
+ * buffer from the TX queue (for AF_XDP).
+ * @real_len: Number of bytes which to be produced out of the skb (valid only
+ * on the head's buffer). Equal to skb->len for non-TSO packets.
+ */
+struct nfp_nfd3_tx_buf {
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ struct xdp_buff *xdp;
+ };
+ dma_addr_t dma_addr;
+ union {
+ struct {
+ short int fidx;
+ u16 pkt_cnt;
+ };
+ struct {
+ bool is_xsk_tx;
+ };
+ };
+ u32 real_len;
+};
+
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb);
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len);
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
+int nfp_nfd3_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
new file mode 100644
index 000000000000..a03190c9313c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbuf;
+ unsigned int idx;
+
+ while (tx_ring->rd_p != tx_ring->wr_p) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->txbufs[idx];
+
+ txbuf->real_len = 0;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+
+ if (tx_ring->r_vec->xsk_pool) {
+ if (txbuf->is_xsk_tx)
+ nfp_nfd3_xsk_tx_free(txbuf);
+
+ xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
+ }
+ }
+}
+
+/**
+ * nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+static void
+nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int idx, nr_frags;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_ring->txbufs[idx].skb;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+ }
+
+ if (tx_ring->is_xdp)
+ nfp_nfd3_xsk_tx_bufs_free(tx_ring);
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
+ * @tx_ring: TX ring to free
+ */
+static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->txbufs);
+
+ if (tx_ring->txds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->txds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+/**
+ * nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX Ring structure to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt;
+
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->txds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
+ if (!tx_ring->txbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfd3_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ if (!tx_ring->txbufs[i].frag)
+ return;
+
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+ __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+ }
+}
+
+static int
+nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return 0;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+ if (!txbufs[i].frag) {
+ nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nfp_nfd3_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfd3_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ struct xdp_buff *xdp;
+ struct sk_buff *skb;
+
+ txd = &tx_ring->txds[i];
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
+ txd->vals[0], txd->vals[1],
+ txd->vals[2], txd->vals[3]);
+
+ if (!tx_ring->is_xdp) {
+ skb = READ_ONCE(tx_ring->txbufs[i].skb);
+ if (skb)
+ seq_printf(file, " skb->head=%p skb->data=%p",
+ skb->head, skb->data);
+ } else {
+ xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
+ if (xdp)
+ seq_printf(file, " xdp->data=%p", xdp->data);
+ }
+
+ if (tx_ring->txbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &tx_ring->txbufs[i].dma_addr);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFD3_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VEPA | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfd3_ops = {
+ .version = NFP_NFD_VER_NFD3,
+ .tx_min_desc_per_pkt = 1,
+ .cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(40),
+ .poll = nfp_nfd3_poll,
+ .xsk_poll = nfp_nfd3_xsk_poll,
+ .ctrl_poll = nfp_nfd3_ctrl_poll,
+ .xmit = nfp_nfd3_tx,
+ .ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfd3_tx_ring_reset,
+ .tx_ring_free = nfp_nfd3_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfd3_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
new file mode 100644
index 000000000000..5d9db8c2a5b4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static bool
+nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
+ int pkt_off)
+{
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ unsigned int wr_idx;
+
+ if (nfp_net_tx_space(tx_ring) < 1)
+ return false;
+
+ xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
+ pkt_len);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->xdp = xrxbuf->xdp;
+ txbuf->real_len = pkt_len;
+ txbuf->is_xsk_tx = true;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_ptr_add++;
+ tx_ring->wr_p++;
+
+ return true;
+}
+
+static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
+ const struct nfp_net_rx_desc *rxd,
+ struct nfp_net_xsk_rx_buf *xrxbuf,
+ const struct nfp_meta_parsed *meta,
+ unsigned int pkt_len,
+ bool meta_xdp,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ if (likely(!meta->portid)) {
+ netdev = dp->netdev;
+ } else {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
+ if (unlikely(!netdev)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = napi_alloc_skb(&r_vec->napi, pkt_len);
+ if (!skb) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ skb_put_data(skb, xrxbuf->xdp->data, pkt_len);
+
+ skb->mark = meta->mark;
+ skb_set_hash(skb, meta->hash, meta->hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
+
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
+ dev_kfree_skb_any(skb);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+
+ if (meta_xdp)
+ skb_metadata_set(skb,
+ xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
+
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+
+ (*skbs_polled)++;
+}
+
+static unsigned int
+nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_redir = false;
+ int pkts_polled = 0;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, pkt_len, pkt_off;
+ struct nfp_net_xsk_rx_buf *xrxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ int idx, act;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ xrxbuf = &rx_ring->xsk_rxbufs[idx];
+
+ /* If starved of buffers "drop" it and scream. */
+ if (rx_ring->rd_p >= rx_ring->wr_p) {
+ nn_dp_warn(dp, "Starved of RX buffers\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ /* Only supporting AF_XDP with dynamic metadata so buffer layout
+ * is always:
+ *
+ * ---------------------------------------------------------
+ * | off | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
+ nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ /* Stats update. */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ xrxbuf->xdp->data += meta_len;
+ xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
+ xdp_set_data_meta_invalid(xrxbuf->xdp);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ net_prefetch(xrxbuf->xdp->data);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ xrxbuf->xdp->data -
+ meta_len,
+ xrxbuf->xdp->data,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "Invalid RX packet metadata\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ if (unlikely(meta.portid)) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ if (meta.portid != NFP_META_PORT_ID_CTRL) {
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
+ xrxbuf, &meta,
+ pkt_len, false,
+ skbs_polled);
+ continue;
+ }
+
+ nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
+ pkt_len);
+ nfp_net_xsk_rx_free(xrxbuf);
+ continue;
+ }
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
+
+ pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
+ pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
+
+ switch (act) {
+ case XDP_PASS:
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
+ true, skbs_polled);
+ break;
+ case XDP_TX:
+ if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
+ xrxbuf, pkt_len, pkt_off))
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ else
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ } else {
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ xdp_redir = true;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+ }
+
+ nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
+
+ if (xdp_redir)
+ xdp_do_flush_map();
+
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return pkts_polled;
+}
+
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
+{
+ xsk_buff_free(txbuf->xdp);
+
+ txbuf->dma_addr = 0;
+ txbuf->xdp = NULL;
+}
+
+static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ u32 done_pkts = 0, done_bytes = 0, reused = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return true;
+
+ /* Work out how many descriptors have been transmitted. */
+ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ struct nfp_nfd3_tx_buf *txbuf;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ txbuf = &tx_ring->txbufs[idx];
+ if (unlikely(!txbuf->real_len))
+ continue;
+
+ done_bytes += txbuf->real_len;
+ txbuf->real_len = 0;
+
+ if (txbuf->is_xsk_tx) {
+ nfp_nfd3_xsk_tx_free(txbuf);
+ reused++;
+ }
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
+ struct xsk_buff_pool *xsk_pool;
+ struct nfp_nfd3_tx_desc *txd;
+ u32 pkts = 0, wr_idx;
+ u32 i, got;
+
+ xsk_pool = r_vec->xsk_pool;
+
+ while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
+ for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
+ if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
+ break;
+ got = i;
+ if (!got)
+ break;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+ prefetchw(&tx_ring->txds[wr_idx]);
+
+ for (i = 0; i < got; i++)
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
+ desc[i].len);
+
+ for (i = 0; i < got; i++) {
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+
+ tx_ring->txbufs[wr_idx].real_len = desc[i].len;
+ tx_ring->txbufs[wr_idx].is_xsk_tx = false;
+
+ /* Build TX descriptor. */
+ txd = &tx_ring->txds[wr_idx];
+ nfp_desc_set_dma_addr_40b(txd,
+ xsk_buff_raw_get_dma(xsk_pool, desc[i].addr));
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(desc[i].len);
+ txd->data_len = cpu_to_le16(desc[i].len);
+ }
+
+ tx_ring->wr_p += got;
+ pkts += got;
+ }
+
+ if (!pkts)
+ return;
+
+ xsk_tx_release(xsk_pool);
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
+}
+
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled, skbs = 0;
+
+ pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
+
+ if (pkts_polled < budget) {
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+
+ if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
+ pkts_polled = budget;
+
+ nfp_nfd3_xsk_tx(r_vec->xdp_ring);
+
+ if (pkts_polled < budget && napi_complete_done(napi, skbs))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ }
+
+ return pkts_polled;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
new file mode 100644
index 000000000000..2b427d8ccb2f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -0,0 +1,1539 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/overflow.h>
+#include <linux/sizes.h>
+#include <linux/bitfield.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfdk.h"
+
+static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
+}
+
+static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
+}
+
+static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+static __le64
+nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
+ struct sk_buff *skb)
+{
+ u32 segs, hdrlen, l3_offset, l4_offset;
+ struct nfp_nfdk_tx_desc txd;
+ u16 mss;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_tcp_all_headers(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
+ }
+
+ segs = skb_shinfo(skb)->gso_segs;
+ mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
+
+ txd.l3_offset = l3_offset;
+ txd.l4_offset = l4_offset;
+ txd.lso_meta_res = 0;
+ txd.mss = cpu_to_le16(mss);
+ txd.lso_hdrlen = hdrlen;
+ txd.lso_totsegs = segs;
+
+ txbuf->pkt_cnt = segs;
+ txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return txd.raw;
+}
+
+static u8
+nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return flags;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return flags;
+
+ flags |= NFDK_DESC_TX_L4_CSUM;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ /* L3 checksum offloading flag is not required for ipv6 */
+ if (iph->version == 4) {
+ flags |= NFDK_DESC_TX_L3_CSUM;
+ } else if (ipv6h->version != 6) {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return flags;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb->encapsulation) {
+ r_vec->hw_csum_tx += pkt_cnt;
+ } else {
+ flags |= NFDK_DESC_TX_ENCAP;
+ r_vec->hw_csum_tx_inner += pkt_cnt;
+ }
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return flags;
+}
+
+static int
+nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ unsigned int n_descs, wr_p, nop_slots;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int nr_frags;
+ unsigned int wr_idx;
+ int err;
+
+recount_descs:
+ n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++)
+ n_descs += DIV_ROUND_UP(skb_frag_size(frag),
+ NFDK_TX_MAX_DATA_PER_DESC);
+
+ if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
+ if (skb_is_nonlinear(skb)) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ goto recount_descs;
+ }
+ return -EINVAL;
+ }
+
+ /* Under count by 1 (don't count meta) for the round down to work out */
+ n_descs += !!skb_is_gso(skb);
+
+ if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
+ goto close_block;
+
+ if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
+ goto close_block;
+
+ return 0;
+
+close_block:
+ wr_p = tx_ring->wr_p;
+ nop_slots = D_BLOCK_CPL(wr_p);
+
+ wr_idx = D_IDX(tx_ring, wr_p);
+ tx_ring->ktxbufs[wr_idx].skb = NULL;
+ txd = &tx_ring->ktxds[wr_idx];
+
+ memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+
+ return 0;
+}
+
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
+ struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+ bool vlan_insert;
+ u32 meta_id = 0;
+ int md_bytes;
+
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
+
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
+
+ if (!(md_dst || vlan_insert))
+ return 0;
+
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
+
+ if (unlikely(skb_cow_head(skb, md_bytes)))
+ return -ENOMEM;
+
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= NFP_NET_META_PORTID_SIZE;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
+
+ meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
+ FIELD_PREP(NFDK_META_FIELDS, meta_id);
+
+ data -= sizeof(meta_id);
+ put_unaligned_be32(meta_id, data);
+
+ return NFDK_DESC_TX_CHAIN_META;
+}
+
+/**
+ * nfp_nfdk_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int real_len, qidx;
+ unsigned int dma_len, type;
+ struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
+ int nr_frags, wr_idx;
+ dma_addr_t dma_addr;
+ u64 metadata;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
+ if (unlikely((int)metadata < 0))
+ goto err_flush;
+
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
+ goto err_flush;
+
+ /* nr_frags will change after skb_linearize so we get nr_frags after
+ * nfp_nfdk_tx_maybe_close_block function
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (skb_is_gso(skb))
+ type = NFDK_DESC_TX_TYPE_TSO;
+ else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+
+ /* We will do our best to pass as much data as we can in descriptor
+ * and we need to make sure the first descriptor includes whole head
+ * since there is limitation in firmware side. Sometimes the value of
+ * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than
+ * headlen.
+ */
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ /* starts at bit 0 */
+ BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
+
+ /* Preserve the original dlen_type, this way below the EOP logic
+ * can use dlen_type.
+ */
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ /* The rest of the data (if any) will be in larger dma descritors
+ * and is handled with the fragment loop.
+ */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+
+ while (true) {
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ if (frag >= fend)
+ break;
+
+ dma_len = skb_frag_size(frag);
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ frag++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ if (!skb_is_gso(skb)) {
+ real_len = skb->len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+ } else {
+ /* lso desc should be placed after metadata desc */
+ (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
+ real_len = txbuf->real_len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd += 2;
+ txbuf++;
+ }
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ skb_tx_timestamp(skb);
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ if (nfp_nfdk_tx_ring_should_stop(tx_ring))
+ nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += cnt;
+ if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), nr_frags, cnt);
+ if (skb_is_gso(skb))
+ txbuf--;
+err_unmap:
+ /* txbuf pointed to the next-to-use */
+ etxbuf = txbuf;
+ /* first txbuf holds the skb */
+ txbuf = &tx_ring->ktxbufs[wr_idx + 1];
+ if (txbuf < etxbuf) {
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ txbuf++;
+ }
+ frag = skb_shinfo(skb)->frags;
+ while (etxbuf < txbuf) {
+ dma_unmap_page(dp->dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ frag++;
+ txbuf++;
+ }
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfdk_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+ u32 rd_p, qcp_rd_p;
+ int todo;
+
+ rd_p = tx_ring->rd_p;
+ if (tx_ring->wr_p == rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+ ktxbufs = tx_ring->ktxbufs;
+
+ while (todo > 0) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct sk_buff *skb;
+
+ txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
+ skb = txbuf->skb;
+ txbuf++;
+
+ /* Closed block */
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(rd_p);
+ goto next;
+ }
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ txbuf++;
+
+ /* Unmap frags */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + skb_shinfo(skb)->nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf++;
+ }
+
+ if (!skb_is_gso(skb)) {
+ done_bytes += skb->len;
+ done_pkts++;
+ } else {
+ done_bytes += txbuf->real_len;
+ done_pkts += txbuf->pkt_cnt;
+ n_descs++;
+ }
+
+ napi_consume_skb(skb, budget);
+next:
+ rd_p += n_descs;
+ todo -= n_descs;
+ }
+
+ tx_ring->rd_p = rd_p;
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+/* Receive processing */
+static void *
+nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfdk_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+static void
+nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static bool
+nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info, vlan_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfdk_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_rx_ring *rx_ring;
+ u32 qcp_rd_p, done = 0;
+ bool done_all;
+ int todo;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ rx_ring = r_vec->rx_ring;
+ while (todo > 0) {
+ int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
+ struct nfp_nfdk_tx_buf *txbuf;
+ unsigned int step = 1;
+
+ txbuf = &tx_ring->ktxbufs[idx];
+ if (!txbuf->raw)
+ goto next;
+
+ if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
+ WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
+ goto next;
+ }
+
+ /* Two successive txbufs are used to stash virtual and dma
+ * address respectively, recycle and clean them here.
+ */
+ nfp_nfdk_rx_give_one(dp, rx_ring,
+ (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
+ txbuf[1].dma_addr);
+ txbuf[0].raw = 0;
+ txbuf[1].raw = 0;
+ step = 2;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ /* Note: tx_bytes not accumulated. */
+ r_vec->tx_pkts++;
+ u64_stats_update_end(&r_vec->tx_sync);
+next:
+ todo -= step;
+ done += step;
+ }
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
+ tx_ring->rd_p += done;
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static bool
+nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int n_descs;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ /* Make sure there's still at least one block available after
+ * aligning to block boundary, so that the txds used below
+ * won't wrap around the tx_ring.
+ */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ if (!*completed) {
+ nfp_nfdk_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ /* Check if cross block boundary */
+ n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
+ if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
+ ((u32)tx_ring->data_pending + pkt_len >
+ NFDK_TX_MAX_DATA_PER_BLOCK)) {
+ unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ memset(txd, 0,
+ array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
+ txbuf[1].dma_addr = rxbuf->dma_addr;
+ /* Note: pkt len not stored */
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->ktxds[wr_idx];
+ dma_len = pkt_len;
+ dma_addr = rxbuf->dma_addr + dma_off;
+
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = 0;
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += pkt_len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ return true;
+}
+
+/**
+ * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfdk_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfdk_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfdk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int dma_len, type;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ u64 metadata = 0;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ metadata = NFDK_DESC_TX_CHAIN_META;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
+ FIELD_PREP(NFDK_META_FIELDS,
+ NFP_NET_META_PORTID),
+ skb_push(skb, 4));
+ }
+
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
+ goto err_free;
+
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), 0, cnt);
+ txbuf--;
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
new file mode 100644
index 000000000000..0ea51d9f2325
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFDK_H_
+#define _NFP_DP_NFDK_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#define NFDK_TX_DESC_PER_SIMPLE_PKT 2
+
+#define NFDK_TX_MAX_DATA_PER_HEAD SZ_4K
+#define NFDK_TX_MAX_DATA_PER_DESC SZ_16K
+#define NFDK_TX_DESC_BLOCK_SZ 256
+#define NFDK_TX_DESC_BLOCK_CNT (NFDK_TX_DESC_BLOCK_SZ / \
+ sizeof(struct nfp_nfdk_tx_desc))
+#define NFDK_TX_DESC_STOP_CNT (NFDK_TX_DESC_BLOCK_CNT * \
+ NFDK_TX_DESC_PER_SIMPLE_PKT)
+#define NFDK_TX_MAX_DATA_PER_BLOCK SZ_64K
+#define NFDK_TX_DESC_GATHER_MAX 17
+
+/* TX descriptor format */
+
+#define NFDK_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+#define NFDK_DESC_TX_CHAIN_META BIT(3)
+#define NFDK_DESC_TX_ENCAP BIT(2)
+#define NFDK_DESC_TX_L4_CSUM BIT(1)
+#define NFDK_DESC_TX_L3_CSUM BIT(0)
+
+#define NFDK_DESC_TX_DMA_LEN_HEAD GENMASK(11, 0)
+#define NFDK_DESC_TX_TYPE_HEAD GENMASK(15, 12)
+#define NFDK_DESC_TX_DMA_LEN GENMASK(13, 0)
+#define NFDK_DESC_TX_TYPE_NOP 0
+#define NFDK_DESC_TX_TYPE_GATHER 1
+#define NFDK_DESC_TX_TYPE_TSO 2
+#define NFDK_DESC_TX_TYPE_SIMPLE 8
+#define NFDK_DESC_TX_EOP BIT(14)
+
+#define NFDK_META_LEN GENMASK(7, 0)
+#define NFDK_META_FIELDS GENMASK(31, 8)
+
+#define D_BLOCK_CPL(idx) (NFDK_TX_DESC_BLOCK_CNT - \
+ (idx) % NFDK_TX_DESC_BLOCK_CNT)
+
+struct nfp_nfdk_tx_desc {
+ union {
+ struct {
+ __le16 dma_addr_hi; /* High bits of host buf address */
+ __le16 dma_len_type; /* Length to DMA for this desc */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+ };
+
+ struct {
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 lso_totsegs; /* LSO, total segments */
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ __le16 lso_meta_res; /* Rsvd bits in TSO metadata */
+ };
+
+ struct {
+ u8 flags; /* TX Flags, see @NFDK_DESC_TX_* */
+ u8 reserved[7]; /* meta byte placeholder */
+ };
+
+ __le32 vals[2];
+ __le64 raw;
+ };
+};
+
+/* The device don't make use of the 2 or 3 least significant bits of the address
+ * due to alignment constraints. The driver can make use of those bits to carry
+ * information about the buffer before giving it to the device.
+ *
+ * NOTE: The driver must clear the lower bits before handing the buffer to the
+ * device.
+ *
+ * - NFDK_TX_BUF_INFO_SOP - Start of a packet
+ * Mark the buffer as a start of a packet. This is used in the XDP TX process
+ * to stash virtual and DMA address so that they can be recycled when the TX
+ * operation is completed.
+ */
+#define NFDK_TX_BUF_PTR(val) ((val) & ~(sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO(val) ((val) & (sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO_SOP BIT(0)
+
+struct nfp_nfdk_tx_buf {
+ union {
+ /* First slot */
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ unsigned long val;
+ };
+
+ /* 1 + nr_frags next slots */
+ dma_addr_t dma_addr;
+
+ /* TSO (optional) */
+ struct {
+ u32 pkt_cnt;
+ u32 real_len;
+ };
+
+ u64 raw;
+ };
+};
+
+static inline int nfp_nfdk_headlen_to_segs(unsigned int headlen)
+{
+ /* First descriptor fits less data, so adjust for that */
+ return DIV_ROUND_UP(headlen +
+ NFDK_TX_MAX_DATA_PER_DESC -
+ NFDK_TX_MAX_DATA_PER_HEAD,
+ NFDK_TX_MAX_DATA_PER_DESC);
+}
+
+int nfp_nfdk_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
new file mode 100644
index 000000000000..fdb8144a63e0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "nfdk.h"
+
+static void
+nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ int nr_frags, rd_idx;
+ struct sk_buff *skb;
+
+ rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->ktxbufs[rd_idx];
+
+ skb = txbuf->skb;
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(tx_ring->rd_p);
+ goto next;
+ }
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ txbuf++;
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ txbuf++;
+
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ txbuf++;
+ }
+
+ if (skb_is_gso(skb))
+ n_descs++;
+
+ dev_kfree_skb_any(skb);
+next:
+ tx_ring->rd_p += n_descs;
+ }
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->ktxbufs);
+
+ if (tx_ring->ktxds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->ktxds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+static int
+nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
+ tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->ktxds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
+ GFP_KERNEL);
+ if (!tx_ring->ktxbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfdk_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+}
+
+static int
+nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return 0;
+}
+
+static void
+nfp_nfdk_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfdk_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ txd = &tx_ring->ktxds[i];
+
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
+ txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFDK_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
+ NFP_NET_CFG_CTRL_TXRWB | NFP_NET_CFG_CTRL_VEPA | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfdk_ops = {
+ .version = NFP_NFD_VER_NFDK,
+ .tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
+ .cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(48),
+ .poll = nfp_nfdk_poll,
+ .ctrl_poll = nfp_nfdk_ctrl_poll,
+ .xmit = nfp_nfdk_tx,
+ .ctrl_tx_one = nfp_nfdk_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfdk_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfdk_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfdk_tx_ring_reset,
+ .tx_ring_free = nfp_nfdk_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfdk_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfdk_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfdk_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 3a973282b2bb..bb3f46c74f77 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -121,7 +121,7 @@ struct nfp_reprs *
nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
{
return rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
struct nfp_reprs *
@@ -230,7 +230,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02x, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 3e9baff07100..dd56207df246 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -75,7 +75,7 @@ extern const struct nfp_app_type app_abm;
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
- * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
+ * @eswitch_mode_set: set SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @dev_get: get representor or internal port representing netdev
@@ -174,6 +174,16 @@ struct nfp_app {
void *priv;
};
+static inline void assert_nfp_app_locked(struct nfp_app *app)
+{
+ devl_assert_locked(priv_to_devlink(app->pf));
+}
+
+static inline bool nfp_app_is_locked(struct nfp_app *app)
+{
+ return devl_lock_is_held(priv_to_devlink(app->pf));
+}
+
void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index bea978df7713..405786c00334 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -26,12 +26,11 @@ nfp_devlink_fill_eth_port(struct nfp_port *port,
}
static int
-nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf,
+ struct devlink_port *dl_port,
struct nfp_eth_table_port *copy)
{
- struct nfp_port *port;
-
- port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+ struct nfp_port *port = container_of(dl_port, struct nfp_port, dl_port);
return nfp_devlink_fill_eth_port(port, copy);
}
@@ -62,7 +61,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
}
static int
-nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -70,33 +69,25 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (eth_port.port_lanes % count) {
- ret = -EINVAL;
- goto out;
- }
+ if (eth_port.port_lanes % count)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
if (eth_port.lanes == 10 && count == 2)
lanes = 8 / count;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
-nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -104,29 +95,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (!eth_port.is_split) {
- ret = -EINVAL;
- goto out;
- }
+ if (!eth_port.is_split)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
if (eth_port.port_lanes == 8)
lanes = 10;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
@@ -161,13 +144,8 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- ret = nfp_app_eswitch_mode_set(pf->app, mode);
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_set(pf->app, mode);
}
static const struct nfp_devlink_versions_simple {
@@ -375,12 +353,12 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
devlink = priv_to_devlink(app->pf);
- return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+ return devl_port_register(devlink, &port->dl_port, port->eth_id);
}
void nfp_devlink_port_unregister(struct nfp_port *port)
{
- devlink_port_unregister(&port->dl_port);
+ devl_port_unregister(&port->dl_port);
}
void nfp_devlink_port_type_eth_set(struct nfp_port *port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index bb3b8a7f6c5d..71301dbd8fb5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -19,6 +19,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
@@ -32,17 +33,37 @@
static const char nfp_driver_name[] = "nfp";
static const struct pci_device_id nfp_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800,
},
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP4000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP5000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP4000,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP5000,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
{ 0, } /* Required last entry. */
};
@@ -222,6 +243,7 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
int err;
if (num_vfs > pf->limit_vfs) {
@@ -236,7 +258,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
@@ -250,11 +273,11 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return num_vfs;
err_sriov_disable:
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
return err;
#endif
@@ -265,8 +288,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
@@ -274,7 +299,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return -EPERM;
}
@@ -282,7 +307,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
@@ -367,7 +392,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
/* First try to find a firmware image specific for this device */
interface = nfp_cpp_interface(pf->cpp);
nfp_cpp_serial(pf->cpp, &serial);
- sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw",
+ sprintf(fw_name, "netronome/serial-%pMF-%02x-%02x.nffw",
serial, interface >> 8, interface & 0xff);
fw = nfp_net_fw_request(pdev, pf, fw_name);
if (fw)
@@ -385,7 +410,9 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno");
+ if (!fw_model)
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
@@ -664,25 +691,91 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
return 0;
}
+int nfp_net_pf_get_app_id(struct nfp_pf *pf)
+{
+ return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
+}
+
+static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf)
+{
+ char name[32];
+ int err = 0;
+ u64 val;
+
+ snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp));
+
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
+ if (err) {
+ if (err != -ENOENT)
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+
+ return 0;
+ }
+
+ return val;
+}
+
+static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf)
+{
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ bool sp_indiff;
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp))
+ return;
+
+ if (!nfp_nsp_has_hwinfo_set(nsp))
+ goto end;
+
+ sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) ||
+ (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF);
+
+ /* No need to clean `sp_indiff` in driver, management firmware
+ * will do it when application firmware is unloaded.
+ */
+ snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ /* Not a fatal error, no need to return error to stop driver from loading */
+ if (err) {
+ nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err);
+ } else {
+ /* Need reinit eth_tbl since the eth table state may change
+ * after sp_indiff is configured.
+ */
+ kfree(pf->eth_tbl);
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ }
+
+end:
+ nfp_nsp_close(nsp);
+}
+
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct devlink *devlink;
struct nfp_pf *pf;
int err;
- if (pdev->vendor == PCI_VENDOR_ID_NETRONOME &&
- pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
+ if ((pdev->vendor == PCI_VENDOR_ID_NETRONOME ||
+ pdev->vendor == PCI_VENDOR_ID_CORIGINE) &&
+ (pdev->device == PCI_DEVICE_ID_NFP3800_VF ||
+ pdev->device == PCI_DEVICE_ID_NFP6000_VF))
dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_disable;
@@ -700,9 +793,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf = devlink_priv(devlink);
INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
- mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
+ pf->dev_info = dev_info;
pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
if (!pf->wq) {
@@ -710,7 +803,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_priv_unset;
}
- pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+ pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info);
if (IS_ERR(pf->cpp)) {
err = PTR_ERR(pf->cpp);
goto err_disable_msix;
@@ -760,6 +853,8 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_fw_unload;
}
+ nfp_pf_cfg_hwinfo(pf);
+
err = nfp_net_pci_probe(pf);
if (err)
goto err_fw_unload;
@@ -790,7 +885,6 @@ err_disable_msix:
destroy_workqueue(pf->wq);
err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
- mutex_destroy(&pf->lock);
devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
@@ -827,7 +921,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
kfree(pf->eth_tbl);
kfree(pf->nspi);
- mutex_destroy(&pf->lock);
devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -856,7 +949,9 @@ static int __init nfp_main_init(void)
{
int err;
- pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n",
+ pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2020 Netronome Systems\n",
+ nfp_driver_name);
+ pr_info("%s: NFP PCIe Driver, Copyright (C) 2021-2022 Corigine Inc.\n",
nfp_driver_name);
nfp_net_debugfs_create();
@@ -900,6 +995,6 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw");
-MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
+MODULE_AUTHOR("Corigine, Inc. <oss-drivers@corigine.com>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
+MODULE_DESCRIPTION("The Network Flow Processor (NFP) driver.");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index a7dede946a33..afd3edfa2428 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
@@ -48,6 +47,7 @@ struct nfp_dumpspec {
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
+ * @dev_info: NFP ASIC params
* @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
@@ -84,10 +84,12 @@ struct nfp_dumpspec {
* @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
- * @lock: Protects all fields which may change after probe
+ *
+ * Fields which may change after proble are protected by devlink instance lock.
*/
struct nfp_pf {
struct pci_dev *pdev;
+ const struct nfp_dev_info *dev_info;
struct nfp_cpp *cpp;
@@ -139,8 +141,6 @@ struct nfp_pf {
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
-
- struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
@@ -161,6 +161,7 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val);
+int nfp_net_pf_get_app_id(struct nfp_pf *pf);
u8 __iomem *
nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
@@ -190,4 +191,7 @@ int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
int nfp_devlink_params_register(struct nfp_pf *pf);
void nfp_devlink_params_unregister(struct nfp_pf *pf);
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate);
+unsigned int nfp_net_speed2lr(unsigned int speed);
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 0b1865e9f0b5..a101ff30a1ae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,9 +63,6 @@
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
-/* Max bits in DMA address */
-#define NFP_NET_MAX_DMA_BITS 40
-
/* Default size for MTU and freelist buffer sizes */
#define NFP_NET_DEFAULT_MTU 1500U
@@ -85,11 +82,6 @@
NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
-#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */
-#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */
-#define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */
-#define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */
-
#define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
@@ -105,104 +97,61 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_dev_info;
+struct nfp_dp_ops;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
struct nfp_port;
+struct xsk_buff_pool;
+
+struct nfp_nfd3_tx_desc;
+struct nfp_nfd3_tx_buf;
+
+struct nfp_nfdk_tx_desc;
+struct nfp_nfdk_tx_buf;
/* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
-#define nfp_desc_set_dma_addr(desc, dma_addr) \
+#define nfp_desc_set_dma_addr_40b(desc, dma_addr) \
do { \
- __typeof(desc) __d = (desc); \
+ __typeof__(desc) __d = (desc); \
dma_addr_t __addr = (dma_addr); \
\
__d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
-/* TX descriptor format */
-
-#define PCIE_DESC_TX_EOP BIT(7)
-#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
-#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
-
-/* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM BIT(7)
-#define PCIE_DESC_TX_IP4_CSUM BIT(6)
-#define PCIE_DESC_TX_TCP_CSUM BIT(5)
-#define PCIE_DESC_TX_UDP_CSUM BIT(4)
-#define PCIE_DESC_TX_VLAN BIT(3)
-#define PCIE_DESC_TX_LSO BIT(2)
-#define PCIE_DESC_TX_ENCAP BIT(1)
-#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
-
-struct nfp_net_tx_desc {
- union {
- struct {
- u8 dma_addr_hi; /* High bits of host buf address */
- __le16 dma_len; /* Length to DMA for this desc */
- u8 offset_eop; /* Offset in buf where pkt starts +
- * highest bit is eop flag.
- */
- __le32 dma_addr_lo; /* Low 32bit of host buf addr */
-
- __le16 mss; /* MSS to be used for LSO */
- u8 lso_hdrlen; /* LSO, TCP payload offset */
- u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
- union {
- struct {
- u8 l3_offset; /* L3 header offset */
- u8 l4_offset; /* L4 header offset */
- };
- __le16 vlan; /* VLAN tag to add if indicated */
- };
- __le16 data_len; /* Length of frame + meta data */
- } __packed;
- __le32 vals[4];
- __le64 vals8[2];
- };
-};
-
-/**
- * struct nfp_net_tx_buf - software TX buffer descriptor
- * @skb: normal ring, sk_buff associated with this buffer
- * @frag: XDP ring, page frag associated with this buffer
- * @dma_addr: DMA mapping address of the buffer
- * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
- * @pkt_cnt: Number of packets to be produced out of the skb associated
- * with this buffer (valid only on the head's buffer).
- * Will be 1 for all non-TSO packets.
- * @real_len: Number of bytes which to be produced out of the skb (valid only
- * on the head's buffer). Equal to skb->len for non-TSO packets.
- */
-struct nfp_net_tx_buf {
- union {
- struct sk_buff *skb;
- void *frag;
- };
- dma_addr_t dma_addr;
- short int fidx;
- u16 pkt_cnt;
- u32 real_len;
-};
+#define nfp_desc_set_dma_addr_48b(desc, dma_addr) \
+ do { \
+ __typeof__(desc) __d = (desc); \
+ dma_addr_t __addr = (dma_addr); \
+ \
+ __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr)); \
+ __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
+ } while (0)
/**
* struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure
* @idx: Ring index from Linux's perspective
- * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
+ * @data_pending: number of bytes added to current block (NFDK only)
* @qcp_q: Pointer to base of the QCP TX queue
+ * @txrwb: TX pointer write back area
* @cnt: Size of the queue in number of descriptors
* @wr_p: TX ring write pointer (free running)
* @rd_p: TX ring read pointer (free running)
* @qcp_rd_p: Local copy of QCP TX queue read pointer
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for .xmit_more delayed kick)
- * @txbufs: Array of transmitted TX buffers, to free on transmit
- * @txds: Virtual address of TX ring in host memory
+ * @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
+ * @ktxbufs: Array of transmitted TX buffers, to free on transmit (NFDK)
+ * @txds: Virtual address of TX ring in host memory (NFD3)
+ * @ktxds: Virtual address of TX ring in host memory (NFDK)
+ *
+ * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
* @is_xdp: Is this a XDP TX ring?
@@ -210,9 +159,10 @@ struct nfp_net_tx_buf {
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
- u32 idx;
- int qcidx;
+ u16 idx;
+ u16 data_pending;
u8 __iomem *qcp_q;
+ u64 *txrwb;
u32 cnt;
u32 wr_p;
@@ -221,8 +171,17 @@ struct nfp_net_tx_ring {
u32 wr_ptr_add;
- struct nfp_net_tx_buf *txbufs;
- struct nfp_net_tx_desc *txds;
+ union {
+ struct nfp_nfd3_tx_buf *txbufs;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ };
+ union {
+ struct nfp_nfd3_tx_desc *txds;
+ struct nfp_nfdk_tx_desc *ktxds;
+ };
+
+ /* Cold data follows */
+ int qcidx;
dma_addr_t dma;
size_t size;
@@ -266,8 +225,8 @@ struct nfp_net_tx_ring {
struct nfp_net_rx_desc {
union {
struct {
- u8 dma_addr_hi; /* High bits of the buf address */
- __le16 reserved; /* Must be zero */
+ __le16 dma_addr_hi; /* High bits of the buf address */
+ u8 reserved; /* Must be zero */
u8 meta_len_dd; /* Must be zero */
__le32 dma_addr_lo; /* Low bits of the buffer address */
@@ -289,6 +248,8 @@ struct nfp_net_rx_desc {
};
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+#define NFP_NET_VLAN_CTAG 0
+#define NFP_NET_VLAN_STAG 1
struct nfp_meta_parsed {
u8 hash_type;
@@ -297,6 +258,11 @@ struct nfp_meta_parsed {
u32 mark;
u32 portid;
__wsum csum;
+ struct {
+ bool stripped;
+ u8 tpid;
+ u16 tci;
+ } vlan;
};
struct nfp_net_rx_hash {
@@ -315,6 +281,16 @@ struct nfp_net_rx_buf {
};
/**
+ * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
+ * @dma_addr: DMA mapping address of the buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ */
+struct nfp_net_xsk_rx_buf {
+ dma_addr_t dma_addr;
+ struct xdp_buff *xdp;
+};
+
+/**
* struct nfp_net_rx_ring - RX ring structure
* @r_vec: Back pointer to ring vector structure
* @cnt: Size of the queue in number of descriptors
@@ -324,6 +300,7 @@ struct nfp_net_rx_buf {
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
+ * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
* @rxds: Virtual address of FL/RX ring in host memory
* @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
@@ -342,6 +319,7 @@ struct nfp_net_rx_ring {
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
+ struct nfp_net_xsk_rx_buf *xsk_rxbufs;
struct nfp_net_rx_desc *rxds;
struct xdp_rxq_info xdp_rxq;
@@ -360,6 +338,7 @@ struct nfp_net_rx_ring {
* @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
+ * @xsk_pool: XSK buffer pool active on vector queue pair (for AF_XDP)
* @irq_entry: MSI-X table entry (use for talking to the device)
* @event_ctr: Number of interrupt
* @rx_dim: Dynamic interrupt moderation structure for RX
@@ -431,6 +410,7 @@ struct nfp_net_r_vector {
u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
@@ -460,13 +440,17 @@ struct nfp_net_fw_version {
u8 minor;
u8 major;
u8 class;
- u8 resv;
+
+ /* This byte can be exploited for more use, currently,
+ * BIT0: dp type, BIT[7:1]: reserved
+ */
+ u8 extend;
} __packed;
static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
- u8 resv, u8 class, u8 major, u8 minor)
+ u8 extend, u8 class, u8 major, u8 minor)
{
- return fw_ver->resv == resv &&
+ return fw_ver->extend == extend &&
fw_ver->class == class &&
fw_ver->major == major &&
fw_ver->minor == minor;
@@ -494,13 +478,17 @@ struct nfp_stat_pair {
* @rx_rings: Array of pre-allocated RX ring structures
* @ctrl_bar: Pointer to mapped control BAR
*
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @ops: Callbacks and parameters for this vNIC's NFD version
+ * @txrwb: TX pointer write back area (indexed by queue id)
+ * @txrwb_dma: TX pointer write back area DMA address
+ * @txd_cnt: Size of the TX ring in number of min size packets
+ * @rxd_cnt: Size of the RX ring in number of min size packets
* @num_r_vecs: Number of used ring vectors
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
* @mtu: Device MTU
+ * @xsk_pools: XSK buffer pools, @max_r_vecs in size (for AF_XDP).
*/
struct nfp_net_dp {
struct device *dev;
@@ -527,6 +515,11 @@ struct nfp_net_dp {
/* Cold data follows */
+ const struct nfp_dp_ops *ops;
+
+ u64 *txrwb;
+ dma_addr_t txrwb_dma;
+
unsigned int txd_cnt;
unsigned int rxd_cnt;
@@ -537,11 +530,14 @@ struct nfp_net_dp {
unsigned int num_rx_rings;
unsigned int mtu;
+
+ struct xsk_buff_pool **xsk_pools;
};
/**
* struct nfp_net - NFP network device structure
* @dp: Datapath structure
+ * @dev_info: NFP ASIC params
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
@@ -615,6 +611,7 @@ struct nfp_net_dp {
struct nfp_net {
struct nfp_net_dp dp;
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
u32 id;
@@ -767,7 +764,6 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
-#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
@@ -776,50 +772,21 @@ static inline void nn_pci_flush(struct nfp_net *nn)
#define NFP_QCP_QUEUE_STS_HI 0x000c
#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
-/* The offset of a QCP queues in the PCIe Target */
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
-
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
NFP_QCP_READ_PTR = 0,
NFP_QCP_WRITE_PTR
};
-/* There appear to be an *undocumented* upper limit on the value which
- * one can add to a queue and that value is either 0x3f or 0x7f. We
- * go with 0x3f as a conservative measure.
- */
-#define NFP_QCP_MAX_ADD 0x3f
-
-static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
- enum nfp_qcp_ptr ptr, u32 val)
-{
- u32 off;
-
- if (ptr == NFP_QCP_READ_PTR)
- off = NFP_QCP_QUEUE_ADD_RPTR;
- else
- off = NFP_QCP_QUEUE_ADD_WPTR;
-
- while (val > NFP_QCP_MAX_ADD) {
- writel(NFP_QCP_MAX_ADD, q + off);
- val -= NFP_QCP_MAX_ADD;
- }
-
- writel(val, q + off);
-}
-
/**
* nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
}
/**
@@ -827,12 +794,10 @@ static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
}
static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
@@ -875,6 +840,8 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
+
static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
{
WARN_ON_ONCE(!nn->dp.netdev && nn->port);
@@ -921,11 +888,13 @@ static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
/* Globals */
extern const char nfp_driver_version[];
-extern const struct net_device_ops nfp_net_netdev_ops;
+extern const struct net_device_ops nfp_nfd3_netdev_ops;
+extern const struct net_device_ops nfp_nfdk_netdev_ops;
static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
{
- return netdev->netdev_ops == &nfp_net_netdev_ops;
+ return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
+ netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
@@ -941,7 +910,8 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -972,6 +942,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
+struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 79257ec41987..27f4786ace4f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
/*
* nfp_net_common.c
@@ -13,7 +13,6 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -32,19 +31,24 @@
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h>
#include <net/tls.h>
#include <net/vxlan.h>
+#include <net/xdp_sock_drv.h>
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_net_dp.h"
#include "nfp_net_sriov.h"
+#include "nfp_net_xsk.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
#include "crypto/fw.h"
@@ -63,33 +67,10 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
{
- return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void
-nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_sync_single_for_device(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir);
-}
-
-static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_unmap_single_attrs(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
- unsigned int len)
-{
- dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
- len, dp->rx_dma_dir);
+ queue &= dev_info->qc_idx_mask;
+ return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
}
/* Firmware reconfig
@@ -375,19 +356,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
*/
/**
- * nfp_net_irq_unmask() - Unmask automasked interrupt
- * @nn: NFP Network structure
- * @entry_nr: MSI-X table entry
- *
- * Clear the ICR for the IRQ entry.
- */
-static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
-{
- nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
- nn_pci_flush(nn);
-}
-
-/**
* nfp_net_irqs_alloc() - allocates MSI-X irqs
* @pdev: PCI device structure
* @irq_entries: Array to be initialized and used to hold the irq entries
@@ -506,19 +474,22 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
{
unsigned long flags;
bool link_up;
- u32 sts;
+ u16 sts;
spin_lock_irqsave(&nn->link_status_lock, flags);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
link_up = !!(sts & NFP_NET_CFG_STS_LINK);
if (nn->link_up == link_up)
goto out;
nn->link_up = link_up;
- if (nn->port)
+ if (nn->port) {
set_bit(NFP_PORT_CHANGED, &nn->port->flags);
+ if (nn->port->link_cb)
+ nn->port->link_cb(nn->port);
+ }
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -569,49 +540,6 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
}
/**
- * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
- * @tx_ring: TX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- * @is_xdp: Is this an XDP TX ring?
- */
-static void
-nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx,
- bool is_xdp)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- tx_ring->idx = idx;
- tx_ring->r_vec = r_vec;
- tx_ring->is_xdp = is_xdp;
- u64_stats_init(&tx_ring->r_vec->tx_sync);
-
- tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
- tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
-}
-
-/**
- * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
- * @rx_ring: RX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- */
-static void
-nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- rx_ring->idx = idx;
- rx_ring->r_vec = r_vec;
- u64_stats_init(&rx_ring->r_vec->rx_sync);
-
- rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
- rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
-}
-
-/**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
@@ -658,178 +586,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
-/* Transmit
- *
- * One queue controller peripheral queue is used for transmit. The
- * driver en-queues packets for transmit by advancing the write
- * pointer. The device indicates that packets have transmitted by
- * advancing the read pointer. The driver maintains a local copy of
- * the read and write pointer in @struct nfp_net_tx_ring. The driver
- * keeps @wr_p in sync with the queue controller write pointer and can
- * determine how many packets have been transmitted by comparing its
- * copy of the read pointer @rd_p with the read pointer maintained by
- * the queue controller peripheral.
- */
-
-/**
- * nfp_net_tx_full() - Check if the TX ring is full
- * @tx_ring: TX ring to check
- * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
- *
- * This function checks, based on the *host copy* of read/write
- * pointer if a given TX ring is full. The real TX queue may have
- * some newly made available slots.
- *
- * Return: True if the ring is full.
- */
-static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
-{
- return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
-}
-
-/* Wrappers for deciding when to stop and restart TX queues */
-static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
-{
- return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
-}
-
-static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
-{
- return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
-}
-
-/**
- * nfp_net_tx_ring_stop() - stop tx ring
- * @nd_q: netdev queue
- * @tx_ring: driver tx queue structure
- *
- * Safely stop TX ring. Remember that while we are running .start_xmit()
- * someone else may be cleaning the TX ring completions so we need to be
- * extra careful here.
- */
-static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
- struct nfp_net_tx_ring *tx_ring)
-{
- netif_tx_stop_queue(nd_q);
-
- /* We can race with the TX completion out of NAPI so recheck */
- smp_mb();
- if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
- netif_tx_start_queue(nd_q);
-}
-
-/**
- * nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to HW TX descriptor
- * @skb: Pointer to SKB
- * @md_bytes: Prepend length
- *
- * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
- * Return error on packet header greater than maximum supported LSO header size.
- */
-static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb,
- u32 md_bytes)
-{
- u32 l3_offset, l4_offset, hdrlen;
- u16 mss;
-
- if (!skb_is_gso(skb))
- return;
-
- if (!skb->encapsulation) {
- l3_offset = skb_network_offset(skb);
- l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- } else {
- l3_offset = skb_inner_network_offset(skb);
- l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
- }
-
- txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
- txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
-
- mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l3_offset = l3_offset - md_bytes;
- txd->l4_offset = l4_offset - md_bytes;
- txd->lso_hdrlen = hdrlen - md_bytes;
- txd->mss = cpu_to_le16(mss);
- txd->flags |= PCIE_DESC_TX_LSO;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_lso++;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-/**
- * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to TX descriptor
- * @skb: Pointer to SKB
- *
- * This function sets the TX checksum flags in the TX descriptor based
- * on the configuration and the protocol of the packet to be transmitted.
- */
-static void nfp_net_tx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
-{
- struct ipv6hdr *ipv6h;
- struct iphdr *iph;
- u8 l4_hdr;
-
- if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
- return;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return;
-
- txd->flags |= PCIE_DESC_TX_CSUM;
- if (skb->encapsulation)
- txd->flags |= PCIE_DESC_TX_ENCAP;
-
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
- if (iph->version == 4) {
- txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- l4_hdr = iph->protocol;
- } else if (ipv6h->version == 6) {
- l4_hdr = ipv6h->nexthdr;
- } else {
- nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
- return;
- }
-
- switch (l4_hdr) {
- case IPPROTO_TCP:
- txd->flags |= PCIE_DESC_TX_TCP_CSUM;
- break;
- case IPPROTO_UDP:
- txd->flags |= PCIE_DESC_TX_UDP_CSUM;
- break;
- default:
- nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
- return;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- if (skb->encapsulation)
- r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
- else
- r_vec->hw_csum_tx += txbuf->pkt_cnt;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-static struct sk_buff *
+struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
{
@@ -844,7 +601,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
resync_pending = tls_offload_tx_resync_pending(skb->sk);
@@ -901,7 +658,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
return skb;
}
-static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
{
#ifdef CONFIG_TLS_DEVICE
struct nfp_net_tls_offload_ctx *ntls;
@@ -912,7 +669,7 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
@@ -923,411 +680,6 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
#endif
}
-static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
-{
- wmb();
- nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
- tx_ring->wr_ptr_add = 0;
-}
-
-static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
-{
- struct metadata_dst *md_dst = skb_metadata_dst(skb);
- unsigned char *data;
- u32 meta_id = 0;
- int md_bytes;
-
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
- }
-
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
-
- if (unlikely(skb_cow_head(skb, md_bytes)))
- return -ENOMEM;
-
- meta_id = 0;
- data = skb_push(skb, md_bytes) + md_bytes;
- if (md_dst) {
- data -= 4;
- put_unaligned_be32(md_dst->u.port_info.port_id, data);
- meta_id = NFP_NET_META_PORTID;
- }
- if (tls_handle) {
- /* conn handle is opaque, we just use u64 to be able to quickly
- * compare it to zero
- */
- data -= 8;
- memcpy(data, &tls_handle, sizeof(tls_handle));
- meta_id <<= NFP_NET_META_FIELD_SIZE;
- meta_id |= NFP_NET_META_CONN_HANDLE;
- }
-
- data -= 4;
- put_unaligned_be32(meta_id, data);
-
- return md_bytes;
-}
-
-/**
- * nfp_net_tx() - Main transmit entry point
- * @skb: SKB to transmit
- * @netdev: netdev structure
- *
- * Return: NETDEV_TX_OK on success.
- */
-static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- const skb_frag_t *frag;
- int f, nr_frags, wr_idx, md_bytes;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_r_vector *r_vec;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct netdev_queue *nd_q;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- unsigned int fsize;
- u64 tls_handle = 0;
- u16 qidx;
-
- dp = &nn->dp;
- qidx = skb_get_queue_mapping(skb);
- tx_ring = &dp->tx_rings[qidx];
- r_vec = tx_ring->r_vec;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
- netif_tx_stop_queue(nd_q);
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- return NETDEV_TX_BUSY;
- }
-
- skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
- if (unlikely(!skb)) {
- nfp_net_tx_xmit_more_flush(tx_ring);
- return NETDEV_TX_OK;
- }
-
- md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
- if (unlikely(md_bytes < 0))
- goto err_flush;
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_err;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = skb->len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
- nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
- nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
- txd->flags |= PCIE_DESC_TX_VLAN;
- txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
- }
-
- /* Gather DMA */
- if (nr_frags > 0) {
- __le64 second_half;
-
- /* all descs must match except for in addr, length and eop */
- second_half = txd->vals8[1];
-
- for (f = 0; f < nr_frags; f++) {
- frag = &skb_shinfo(skb)->frags[f];
- fsize = skb_frag_size(frag);
-
- dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
- fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_unmap;
-
- wr_idx = D_IDX(tx_ring, wr_idx + 1);
- tx_ring->txbufs[wr_idx].skb = skb;
- tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
- tx_ring->txbufs[wr_idx].fidx = f;
-
- txd = &tx_ring->txds[wr_idx];
- txd->dma_len = cpu_to_le16(fsize);
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop = md_bytes |
- ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
- txd->vals8[1] = second_half;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_gather++;
- u64_stats_update_end(&r_vec->tx_sync);
- }
-
- skb_tx_timestamp(skb);
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-
- tx_ring->wr_p += nr_frags + 1;
- if (nfp_net_tx_ring_should_stop(tx_ring))
- nfp_net_tx_ring_stop(nd_q, tx_ring);
-
- tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return NETDEV_TX_OK;
-
-err_unmap:
- while (--f >= 0) {
- frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
- wr_idx = wr_idx - 1;
- if (wr_idx < 0)
- wr_idx += tx_ring->cnt;
- }
- dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
-err_dma_err:
- nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
-err_flush:
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- nfp_net_tls_tx_undo(skb, tls_handle);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/**
- * nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- * @budget: NAPI budget (only used as bool to determine if in NAPI context)
- */
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct netdev_queue *nd_q;
- u32 done_pkts = 0, done_bytes = 0;
- u32 qcp_rd_p;
- int todo;
-
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- while (todo--) {
- const skb_frag_t *frag;
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int fidx, nr_frags;
- int idx;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p++);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_buf->skb;
- if (!skb)
- continue;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_buf->fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
-
- done_pkts += tx_buf->pkt_cnt;
- done_bytes += tx_buf->real_len;
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- napi_consume_skb(skb, budget);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
- }
-
- tx_ring->qcp_rd_p = qcp_rd_p;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- if (!dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
- if (nfp_net_tx_ring_should_wake(tx_ring)) {
- /* Make sure TX thread will see updated tx_ring->rd_p */
- smp_mb();
-
- if (unlikely(netif_tx_queue_stopped(nd_q)))
- netif_tx_wake_queue(nd_q);
- }
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-}
-
-static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- u32 done_pkts = 0, done_bytes = 0;
- bool done_all;
- int idx, todo;
- u32 qcp_rd_p;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return true;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
- todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
-
- tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
-
- done_pkts = todo;
- while (todo--) {
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_ring->rd_p++;
-
- done_bytes += tx_ring->txbufs[idx].real_len;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-
- return done_all;
-}
-
-/**
- * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @dp: NFP Net data path struct
- * @tx_ring: TX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void
-nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- const skb_frag_t *frag;
- struct netdev_queue *nd_q;
-
- while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int idx, nr_frags;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_ring->txbufs[idx].skb;
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (tx_buf->fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (tx_buf->fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
-
- tx_ring->qcp_rd_p++;
- tx_ring->rd_p++;
- }
-
- memset(tx_ring->txds, 0, tx_ring->size);
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
- if (tx_ring->is_xdp || !dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_reset_queue(nd_q);
-}
-
static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -1335,1008 +687,43 @@ static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
-/* Receive processing
- */
+/* Receive processing */
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
+nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz;
+ unsigned int fl_bufsz = 0;
- fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- fl_bufsz += dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
fl_bufsz += dp->rx_offset;
fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
- fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
- fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
return fl_bufsz;
}
-static void
-nfp_net_free_frag(void *frag, bool xdp)
-{
- if (!xdp)
- skb_free_frag(frag);
- else
- __free_page(virt_to_page(frag));
-}
-
-/**
- * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @dp: NFP Net data path struct
- * @dma_addr: Pointer to storage for DMA address (output param)
- *
- * This function will allcate a new page frag, map it for DMA.
- *
- * Return: allocated page frag or NULL on failure.
- */
-static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = netdev_alloc_frag(dp->fl_bufsz);
- } else {
- struct page *page;
-
- page = alloc_page(GFP_KERNEL);
- frag = page ? page_address(page) : NULL;
- }
- if (!frag) {
- nn_dp_warn(dp, "Failed to alloc receive page frag\n");
- return NULL;
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = napi_alloc_frag(dp->fl_bufsz);
- if (unlikely(!frag))
- return NULL;
- } else {
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return NULL;
- frag = page_address(page);
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-/**
- * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring structure
- * @frag: page fragment buffer
- * @dma_addr: DMA address of skb mapping
- */
-static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring,
- void *frag, dma_addr_t dma_addr)
-{
- unsigned int wr_idx;
-
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
-
- nfp_net_dma_sync_dev_rx(dp, dma_addr);
-
- /* Stash SKB and DMA address away */
- rx_ring->rxbufs[wr_idx].frag = frag;
- rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
-
- /* Fill freelist descriptor */
- rx_ring->rxds[wr_idx].fld.reserved = 0;
- rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
-
- rx_ring->wr_p++;
- if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
- /* Update write pointer of the freelist queue. Make
- * sure all writes are flushed before telling the hardware.
- */
- wmb();
- nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
- }
-}
-
-/**
- * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
- * @rx_ring: RX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int wr_idx, last_idx;
-
- /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
- * kept at cnt - 1 FL bufs.
- */
- if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
- return;
-
- /* Move the empty entry to the end of the list */
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
- last_idx = rx_ring->cnt - 1;
- rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
- rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
- rx_ring->rxbufs[last_idx].dma_addr = 0;
- rx_ring->rxbufs[last_idx].frag = NULL;
-
- memset(rx_ring->rxds, 0, rx_ring->size);
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
-}
-
-/**
- * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- *
- * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
- * entries. After device is disabled nfp_net_rx_ring_reset() must be called
- * to restore required ring geometry.
- */
-static void
-nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- /* NULL skb can only happen when initial filling of the ring
- * fails to allocate enough buffers and calls here to free
- * already allocated ones.
- */
- if (!rx_ring->rxbufs[i].frag)
- continue;
-
- nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
- rx_ring->rxbufs[i].dma_addr = 0;
- rx_ring->rxbufs[i].frag = NULL;
- }
-}
-
-/**
- * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- */
-static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- struct nfp_net_rx_buf *rxbufs;
- unsigned int i;
-
- rxbufs = rx_ring->rxbufs;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
- if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(dp, rx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
- * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to fill
- */
-static void
-nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
- rx_ring->rxbufs[i].dma_addr);
-}
-
-/**
- * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
- * @flags: RX descriptor flags field in CPU byte order
- */
-static int nfp_net_rx_csum_has_errors(u16 flags)
-{
- u16 csum_all_checked, csum_all_ok;
-
- csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
- csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
-
- return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
-}
-
-/**
- * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @rxd: Pointer to RX descriptor
- * @meta: Parsed metadata prepend
- * @skb: Pointer to SKB
- */
-static void nfp_net_rx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd,
- struct nfp_meta_parsed *meta, struct sk_buff *skb)
-{
- skb_checksum_none_assert(skb);
-
- if (!(dp->netdev->features & NETIF_F_RXCSUM))
- return;
-
- if (meta->csum_type) {
- skb->ip_summed = meta->csum_type;
- skb->csum = meta->csum;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_complete++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_error++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- /* Assume that the firmware will never report inner CSUM_OK unless outer
- * L4 headers were successfully parsed. FW will always report zero UDP
- * checksum as CSUM_OK.
- */
- if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-
- if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_inner_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-}
-
-static void
-nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
- unsigned int type, __be32 *hash)
-{
- if (!(netdev->features & NETIF_F_RXHASH))
- return;
-
- switch (type) {
- case NFP_NET_RSS_IPV4:
- case NFP_NET_RSS_IPV6:
- case NFP_NET_RSS_IPV6_EX:
- meta->hash_type = PKT_HASH_TYPE_L3;
- break;
- default:
- meta->hash_type = PKT_HASH_TYPE_L4;
- break;
- }
-
- meta->hash = get_unaligned_be32(hash);
-}
-
-static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, struct nfp_net_rx_desc *rxd)
-{
- struct nfp_net_rx_hash *rx_hash = data;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
-
- nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
- &rx_hash->hash);
-}
-
-static bool
-nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, void *pkt, unsigned int pkt_len, int meta_len)
-{
- u32 meta_info;
-
- meta_info = get_unaligned_be32(data);
- data += 4;
-
- while (meta_info) {
- switch (meta_info & NFP_NET_META_FIELD_MASK) {
- case NFP_NET_META_HASH:
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, meta,
- meta_info & NFP_NET_META_FIELD_MASK,
- (__be32 *)data);
- data += 4;
- break;
- case NFP_NET_META_MARK:
- meta->mark = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_PORTID:
- meta->portid = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_CSUM:
- meta->csum_type = CHECKSUM_COMPLETE;
- meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
- data += 4;
- break;
- case NFP_NET_META_RESYNC_INFO:
- if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
- pkt_len))
- return false;
- data += sizeof(struct nfp_net_tls_resync_req);
- break;
- default:
- return true;
- }
-
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- }
-
- return data != pkt;
-}
-
-static void
-nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
- struct sk_buff *skb)
-{
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_drops++;
- /* If we have both skb and rxbuf the replacement buffer allocation
- * must have failed, count this as an alloc failure.
- */
- if (skb && rxbuf)
- r_vec->rx_replace_buf_alloc_fail++;
- u64_stats_update_end(&r_vec->rx_sync);
-
- /* skb is build based on the frag, free_skb() would free the frag
- * so to be able to reuse it we need an extra ref.
- */
- if (skb && rxbuf && skb->head == rxbuf->frag)
- page_ref_inc(virt_to_head_page(rxbuf->frag));
- if (rxbuf)
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
- if (skb)
- dev_kfree_skb_any(skb);
-}
-
-static bool
-nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len, bool *completed)
-{
- unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- int wr_idx;
-
- /* Reject if xdp_adjust_tail grow packet beyond DMA area */
- if (pkt_len + dma_off > dma_map_sz)
- return false;
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- if (!*completed) {
- nfp_net_xdp_complete(tx_ring);
- *completed = true;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
- NULL);
- return false;
- }
- }
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
-
- nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
-
- txbuf->frag = rxbuf->frag;
- txbuf->dma_addr = rxbuf->dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = pkt_len;
-
- dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
- pkt_len, DMA_BIDIRECTIONAL);
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
- txd->data_len = cpu_to_le16(pkt_len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- return true;
-}
-
-/**
- * nfp_net_rx() - receive up to @budget packets on @rx_ring
- * @rx_ring: RX ring to receive from
- * @budget: NAPI budget
- *
- * Note, this function is separated out from the napi poll function to
- * more cleanly separate packet receive code from other bookkeeping
- * functions performed in the napi poll function.
- *
- * Return: Number of packets received.
- */
-static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct nfp_net_tx_ring *tx_ring;
- struct bpf_prog *xdp_prog;
- bool xdp_tx_cmpl = false;
- unsigned int true_bufsz;
- struct sk_buff *skb;
- int pkts_polled = 0;
- struct xdp_buff xdp;
- int idx;
-
- xdp_prog = READ_ONCE(dp->xdp_prog);
- true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
- &rx_ring->xdp_rxq);
- tx_ring = r_vec->xdp_ring;
-
- while (pkts_polled < budget) {
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- struct nfp_meta_parsed meta;
- bool redir_egress = false;
- struct net_device *netdev;
- dma_addr_t new_dma_addr;
- u32 meta_len_xdp = 0;
- void *new_frag;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- break;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- memset(&meta, 0, sizeof(meta));
-
- rx_ring->rd_p++;
- pkts_polled++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- /* < meta_len >
- * <-- [rx_offset] -->
- * ---------------------------------------------------------
- * | [XX] | metadata | packet | XXXX |
- * ---------------------------------------------------------
- * <---------------- data_len --------------->
- *
- * The rx_offset is fixed for all packets, the meta_len can vary
- * on a packet by packet basis. If rx_offset is set to zero
- * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
- * buffer and is immediately followed by the packet (no [XX]).
- */
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
- (dp->rx_offset && meta_len > dp->rx_offset))) {
- nn_dp_warn(dp, "oversized RX packet metadata %u\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
- data_len);
-
- if (!dp->chained_metadata_format) {
- nfp_net_set_hash_desc(dp->netdev, &meta,
- rxbuf->frag + meta_off, rxd);
- } else if (meta_len) {
- if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- rxbuf->frag + pkt_off,
- pkt_len, meta_len))) {
- nn_dp_warn(dp, "invalid RX packet metadata\n");
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
- }
-
- if (xdp_prog && !meta.portid) {
- void *orig_data = rxbuf->frag + pkt_off;
- unsigned int dma_off;
- int act;
-
- xdp_prepare_buff(&xdp,
- rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
- pkt_off - NFP_NET_RX_BUF_HEADROOM,
- pkt_len, true);
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- pkt_len = xdp.data_end - xdp.data;
- pkt_off += xdp.data - orig_data;
-
- switch (act) {
- case XDP_PASS:
- meta_len_xdp = xdp.data - xdp.data_meta;
- break;
- case XDP_TX:
- dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
- if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
- tx_ring, rxbuf,
- dma_off,
- pkt_len,
- &xdp_tx_cmpl)))
- trace_xdp_exception(dp->netdev,
- xdp_prog, act);
- continue;
- default:
- bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_ABORTED:
- trace_xdp_exception(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- }
- }
-
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
- struct nfp_net *nn = netdev_priv(dp->netdev);
-
- nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
- pkt_len);
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_dev_get(nn->app, meta.portid,
- &redir_egress);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
-
- if (nfp_netdev_is_nfp_repr(netdev))
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
- skb = build_skb(rxbuf->frag, true_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- continue;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- skb->mark = meta.mark;
- skb_set_hash(skb, meta.hash, meta.hash_type);
-
- skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, netdev);
-
- nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
-
-#ifdef CONFIG_TLS_DEVICE
- if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
- skb->decrypted = true;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_tls_rx++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-#endif
-
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
- if (meta_len_xdp)
- skb_metadata_set(skb, meta_len_xdp);
-
- if (likely(!redir_egress)) {
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
- } else {
- skb->dev = netdev;
- skb_reset_network_header(skb);
- __skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
- }
-
- if (xdp_prog) {
- if (tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
- else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
- !xdp_tx_cmpl)
- if (!nfp_net_xdp_complete(tx_ring))
- pkts_polled = budget;
- }
-
- return pkts_polled;
-}
-
-/**
- * nfp_net_poll() - napi poll function
- * @napi: NAPI structure
- * @budget: NAPI budget
- *
- * Return: number of packets polled.
- */
-static int nfp_net_poll(struct napi_struct *napi, int budget)
-{
- struct nfp_net_r_vector *r_vec =
- container_of(napi, struct nfp_net_r_vector, napi);
- unsigned int pkts_polled = 0;
-
- if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring, budget);
- if (r_vec->rx_ring)
- pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
-
- if (pkts_polled < budget)
- if (napi_complete_done(napi, pkts_polled))
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-
- if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
- }
-
- if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
- }
-
- return pkts_polled;
-}
-
-/* Control device data path
- */
-
-static bool
-nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- struct sk_buff *skb, bool old)
-{
- unsigned int real_len = skb->len, meta_len = 0;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- int wr_idx;
-
- dp = &r_vec->nfp_net->dp;
- tx_ring = r_vec->tx_ring;
-
- if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
- nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
- goto err_free;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- if (!old)
- __skb_queue_tail(&r_vec->queue, skb);
- else
- __skb_queue_head(&r_vec->queue, skb);
- return true;
- }
-
- if (nfp_app_ctrl_has_meta(nn->app)) {
- if (unlikely(skb_headroom(skb) < 8)) {
- nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
- goto err_free;
- }
- meta_len = 8;
- put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
- put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
- }
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_warn;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = real_len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return false;
-
-err_dma_warn:
- nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
-err_free:
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
-
- return nfp_ctrl_tx_one(nn, r_vec, skb, false);
-}
-
-bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
- bool ret;
-
- spin_lock_bh(&r_vec->lock);
- ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
- spin_unlock_bh(&r_vec->lock);
-
- return ret;
-}
-
-static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&r_vec->queue)))
- if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
- return;
-}
-
-static bool
-nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
-{
- u32 meta_type, meta_tag;
-
- if (!nfp_app_ctrl_has_meta(nn->app))
- return !meta_len;
-
- if (meta_len != 8)
- return false;
-
- meta_type = get_unaligned_be32(data);
- meta_tag = get_unaligned_be32(data + 4);
-
- return (meta_type == NFP_NET_META_PORTID &&
- meta_tag == NFP_META_PORT_ID_CTRL);
-}
-
-static bool
-nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- dma_addr_t new_dma_addr;
- struct sk_buff *skb;
- void *new_frag;
- int idx;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- return false;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- rx_ring->rd_p++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
-
- if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
- nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
-
- skb = build_skb(rxbuf->frag, dp->fl_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- return true;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- nfp_app_ctrl_rx(nn->app, skb);
-
- return true;
-}
+ unsigned int fl_bufsz;
-static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
-{
- struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
- struct nfp_net *nn = r_vec->nfp_net;
- struct nfp_net_dp *dp = &nn->dp;
- unsigned int budget = 512;
+ fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
+ fl_bufsz += dp->rx_dma_off;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
- continue;
+ fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
+ fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- return budget;
+ return fl_bufsz;
}
-static void nfp_ctrl_poll(struct tasklet_struct *t)
+static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
{
- struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+ unsigned int fl_bufsz;
- spin_lock(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring, 0);
- __nfp_ctrl_tx_queued(r_vec);
- spin_unlock(&r_vec->lock);
+ fl_bufsz = XDP_PACKET_HEADROOM;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- if (nfp_ctrl_rx(r_vec)) {
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- } else {
- tasklet_schedule(&r_vec->tasklet);
- nn_dp_warn(&r_vec->nfp_net->dp,
- "control message budget exceeded!\n");
- }
+ return fl_bufsz;
}
/* Setup and Configuration
@@ -2371,7 +758,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
+ tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
@@ -2379,263 +766,23 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
}
}
-/**
- * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
- * @tx_ring: TX ring to free
- */
-static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
- kvfree(tx_ring->txbufs);
-
- if (tx_ring->txds)
- dma_free_coherent(dp->dev, tx_ring->size,
- tx_ring->txds, tx_ring->dma);
-
- tx_ring->cnt = 0;
- tx_ring->txbufs = NULL;
- tx_ring->txds = NULL;
- tx_ring->dma = 0;
- tx_ring->size = 0;
-}
-
-/**
- * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
- * @dp: NFP Net data path struct
- * @tx_ring: TX Ring structure to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-
- tx_ring->cnt = dp->txd_cnt;
-
- tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!tx_ring->txds) {
- netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- tx_ring->cnt);
- goto err_alloc;
- }
-
- tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
- GFP_KERNEL);
- if (!tx_ring->txbufs)
- goto err_alloc;
-
- if (!tx_ring->is_xdp && dp->netdev)
- netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
- tx_ring->idx);
-
- return 0;
-
-err_alloc:
- nfp_net_tx_ring_free(tx_ring);
- return -ENOMEM;
-}
-
static void
-nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- if (!tx_ring->txbufs[i].frag)
- return;
-
- nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
- __free_page(virt_to_page(tx_ring->txbufs[i].frag));
- }
-}
-
-static int
-nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return 0;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
- if (!txbufs[i].frag) {
- nfp_net_tx_ring_bufs_free(dp, tx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
- GFP_KERNEL);
- if (!dp->tx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- int bias = 0;
-
- if (r >= dp->num_stack_tx_rings)
- bias = dp->num_stack_tx_rings;
-
- nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
- r, bias);
-
- if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
-err_free_ring:
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
- kfree(dp->tx_rings);
- return -ENOMEM;
-}
-
-static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
+nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
- unsigned int r;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
-
- kfree(dp->tx_rings);
-}
-
-/**
- * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
- * @rx_ring: RX ring to free
- */
-static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
-{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
if (dp->netdev)
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kvfree(rx_ring->rxbufs);
-
- if (rx_ring->rxds)
- dma_free_coherent(dp->dev, rx_ring->size,
- rx_ring->rxds, rx_ring->dma);
-
- rx_ring->cnt = 0;
- rx_ring->rxbufs = NULL;
- rx_ring->rxds = NULL;
- rx_ring->dma = 0;
- rx_ring->size = 0;
-}
-
-/**
- * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
-{
- int err;
-
- if (dp->netdev) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
- rx_ring->idx, rx_ring->r_vec->napi.napi_id);
- if (err < 0)
- return err;
- }
-
- rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!rx_ring->rxds) {
- netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- rx_ring->cnt);
- goto err_alloc;
- }
-
- rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
- GFP_KERNEL);
- if (!rx_ring->rxbufs)
- goto err_alloc;
-
- return 0;
-
-err_alloc:
- nfp_net_rx_ring_free(rx_ring);
- return -ENOMEM;
-}
-
-static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
- GFP_KERNEL);
- if (!dp->rx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
-
- if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
-err_free_ring:
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
- kfree(dp->rx_rings);
- return -ENOMEM;
+ netif_napi_add(dp->netdev, &r_vec->napi,
+ nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
+ else
+ tasklet_enable(&r_vec->tasklet);
}
-static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+static void
+nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
{
- unsigned int r;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
-
- kfree(dp->rx_rings);
+ if (dp->netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
}
static void
@@ -2648,6 +795,17 @@ nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
+
+ if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
+ r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
+
+ if (r_vec->xsk_pool)
+ xsk_pool_set_rxq_info(r_vec->xsk_pool,
+ &r_vec->rx_ring->xdp_rxq);
+
+ nfp_net_napi_del(dp, r_vec);
+ nfp_net_napi_add(dp, r_vec, idx);
+ }
}
static int
@@ -2656,23 +814,14 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
{
int err;
- /* Setup NAPI */
- if (nn->dp.netdev)
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
- else
- tasklet_enable(&r_vec->tasklet);
+ nfp_net_napi_add(&nn->dp, r_vec, idx);
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
@@ -2690,11 +839,7 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
irq_set_affinity_hint(r_vec->irq_vector, NULL);
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2768,17 +913,6 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
-static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
-
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
-}
-
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -2808,8 +942,11 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->dp.num_rx_rings; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
+ nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
+ }
for (r = 0; r < nn->dp.num_tx_rings; r++)
nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
for (r = 0; r < nn->dp.num_r_vecs; r++)
@@ -2818,25 +955,6 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn->dp.ctrl = new_ctrl;
}
-static void
-nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_rx_ring *rx_ring, unsigned int idx)
-{
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
-}
-
-static void
-nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_tx_ring *tx_ring, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
-}
-
/**
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
* @nn: NFP Net device to reconfigure
@@ -2866,11 +984,11 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
for (r = 0; r < nn->dp.num_rx_rings; r++)
nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_tx_rings));
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_rx_rings));
if (nn->dp.netdev)
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
@@ -3296,20 +1414,39 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
*new = nn->dp;
+ new->xsk_pools = kmemdup(new->xsk_pools,
+ array_size(nn->max_r_vecs,
+ sizeof(new->xsk_pools)),
+ GFP_KERNEL);
+ if (!new->xsk_pools) {
+ kfree(new);
+ return NULL;
+ }
+
/* Clear things which need to be recomputed */
new->fl_bufsz = 0;
new->tx_rings = NULL;
new->rx_rings = NULL;
new->num_r_vecs = 0;
new->num_stack_tx_rings = 0;
+ new->txrwb = NULL;
+ new->txrwb_dma = 0;
return new;
}
+static void nfp_net_free_dp(struct nfp_net_dp *dp)
+{
+ kfree(dp->xsk_pools);
+ kfree(dp);
+}
+
static int
nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
struct netlink_ext_ack *extack)
{
+ unsigned int r, xsk_min_fl_bufsz;
+
/* XDP-enabled tests */
if (!dp->xdp_prog)
return 0;
@@ -3322,6 +1459,18 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
return -EINVAL;
}
+ xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
+ for (r = 0; r < nn->max_r_vecs; r++) {
+ if (!dp->xsk_pools[r])
+ continue;
+
+ if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XSK buffer pool chunk size too small");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -3389,7 +1538,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
nfp_net_open_stack(nn);
exit_free_dp:
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
@@ -3398,7 +1547,7 @@ err_free_rx:
err_cleanup_vecs:
for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
}
@@ -3482,21 +1631,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
@@ -3547,16 +1696,18 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
if (features & NETIF_F_HW_VLAN_CTAG_TX)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -3566,6 +1717,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (changed & NETIF_F_HW_VLAN_STAG_RX) {
+ if (features & NETIF_F_HW_VLAN_STAG_RX)
+ new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -3595,6 +1753,27 @@ static int nfp_net_set_features(struct net_device *netdev,
}
static netdev_features_t
+nfp_net_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_STAG_RX)) {
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
+ } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
+ }
+ }
+ return features;
+}
+
+static netdev_features_t
nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
@@ -3610,8 +1789,7 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -3716,6 +1894,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
return nfp_net_xdp_setup_hw(nn, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3742,7 +1923,70 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-const struct net_device_ops nfp_net_netdev_ops = {
+static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
+ nlflags, filter_mask, NULL);
+}
+
+static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem, err;
+ u32 new_ctrl;
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ new_ctrl = nn->dp.ctrl;
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA)
+ new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
+ else if (mode == BRIDGE_MODE_VEB)
+ new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
+ else
+ return -EOPNOTSUPP;
+
+ if (new_ctrl == nn->dp.ctrl)
+ return 0;
+
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (!err)
+ nn->dp.ctrl = new_ctrl;
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
@@ -3753,6 +1997,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_rate = nfp_app_set_vf_rate,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
@@ -3763,10 +2008,45 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
+ .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
+};
+
+const struct net_device_ops nfp_nfdk_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
+ .ndo_open = nfp_net_netdev_open,
+ .ndo_stop = nfp_net_netdev_close,
+ .ndo_start_xmit = nfp_net_tx,
+ .ndo_get_stats64 = nfp_net_stat64,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = nfp_app_set_vf_mac,
+ .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_rate = nfp_app_set_vf_rate,
+ .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
+ .ndo_get_vf_config = nfp_app_get_vf_config,
+ .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_setup_tc = nfp_port_setup_tc,
+ .ndo_tx_timeout = nfp_net_tx_timeout,
+ .ndo_set_rx_mode = nfp_net_set_rx_mode,
+ .ndo_change_mtu = nfp_net_change_mtu,
+ .ndo_set_mac_address = nfp_net_set_mac_address,
+ .ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
+ .ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_bpf = nfp_net_xdp,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
@@ -3806,15 +2086,15 @@ static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
*/
void nfp_net_info(struct nfp_net *nn)
{
- nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
+ nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
nn->dp.is_vf ? "VF " : "",
nn->dp.num_tx_rings, nn->max_tx_rings,
nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3823,6 +2103,9 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
@@ -3832,6 +2115,8 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
+ nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -3843,6 +2128,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @dev_info: NFP ASIC params
* @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
@@ -3855,9 +2141,11 @@ void nfp_net_info(struct nfp_net *nn)
* Return: NFP Net device structure, or ERR_PTR on error.
*/
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
+ u64 dma_mask = dma_get_mask(&pdev->dev);
struct nfp_net *nn;
int err;
@@ -3880,7 +2168,36 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.dev = &pdev->dev;
nn->dp.ctrl_bar = ctrl_bar;
+ nn->dev_info = dev_info;
nn->pdev = pdev;
+ nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
+
+ switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
+ case NFP_NET_CFG_VERSION_DP_NFD3:
+ nn->dp.ops = &nfp_nfd3_ops;
+ break;
+ case NFP_NET_CFG_VERSION_DP_NFDK:
+ if (nn->fw_ver.major < 5) {
+ dev_err(&pdev->dev,
+ "NFDK must use ABI 5 or newer, found: %d\n",
+ nn->fw_ver.major);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+ nn->dp.ops = &nfp_nfdk_ops;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+
+ if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
+ dev_err(&pdev->dev,
+ "DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
+ nn->dp.ops->dma_mask, dma_mask);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
@@ -3893,6 +2210,14 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
nn->dp.num_r_vecs = min_t(unsigned int,
nn->dp.num_r_vecs, num_online_cpus());
+ nn->max_r_vecs = nn->dp.num_r_vecs;
+
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ GFP_KERNEL);
+ if (!nn->dp.xsk_pools) {
+ err = -ENOMEM;
+ goto err_free_nn;
+ }
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
@@ -3932,6 +2257,7 @@ void nfp_net_free(struct nfp_net *nn)
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
nfp_ccm_mbox_free(nn);
+ kfree(nn->dp.xsk_pools);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -4048,8 +2374,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
- if (nn->cap & NFP_NET_CFG_CTRL_LSO)
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
}
@@ -4063,41 +2393,57 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->vlan_features = netdev->hw_features;
- if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
}
- if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
} else {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
}
}
if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ }
netdev->features = netdev->hw_features;
if (nfp_app_has_tc(nn->app) && nn->port)
netdev->hw_features |= NETIF_F_HW_TC;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
/* Finalise the netdev setup */
- netdev->netdev_ops = &nfp_net_netdev_ops;
+ switch (nn->dp.ops->version) {
+ case NFP_NFD_VER_NFD3:
+ netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ break;
+ case NFP_NFD_VER_NFDK:
+ netdev->netdev_ops = &nfp_nfdk_netdev_ops;
+ break;
+ }
+
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
- netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
+ netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netif_carrier_off(netdev);
@@ -4138,6 +2484,9 @@ static int nfp_net_read_caps(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* Mask out NFD-version-specific features */
+ nn->cap &= nn->dp.ops->cap_mask;
+
/* For control vNICs mask out the capabilities app doesn't want. */
if (!nn->dp.netdev)
nn->cap &= nn->app->type->ctrl_cap_mask;
@@ -4190,6 +2539,10 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ /* Enable TX pointer writeback, if supported */
+ if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3d61a8cb60b0..6714d5e8fdab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,8 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
-/*
- * nfp_net_ctrl.h
+/* nfp_net_ctrl.h
* Netronome network device driver: Control BAR layout
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
@@ -15,33 +14,36 @@
#include <linux/types.h>
-/**
- * Configuration BAR size.
+/* 64-bit per app capabilities */
+#define NFP_NET_APP_CAP_SP_INDIFF BIT_ULL(0) /* indifferent to port speed */
+
+/* Configuration BAR size.
*
* The configuration BAR is 8K in size, but due to
* THB-350, 32k needs to be reserved.
*/
#define NFP_NET_CFG_BAR_SZ (32 * 1024)
-/**
- * Offset in Freelist buffer where packet starts on RX
- */
+/* Offset in Freelist buffer where packet starts on RX */
#define NFP_NET_RX_OFFSET 32
-/**
- * LSO parameters
+/* LSO parameters
* %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames
* %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce
*/
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
-/**
- * Prepend field types
- */
+/* working with metadata vlan api (NFD version >= 2.0) */
+#define NFP_NET_META_VLAN_STRIP BIT(31)
+#define NFP_NET_META_VLAN_TPID_MASK GENMASK(19, 16)
+#define NFP_NET_META_VLAN_TCI_MASK GENMASK(15, 0)
+
+/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_VLAN 4 /* ctag or stag type */
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
@@ -49,9 +51,11 @@
#define NFP_META_PORT_ID_CTRL ~0U
-/**
- * Hash type pre-pended when a RSS hash was computed
- */
+/* Prepend field sizes */
+#define NFP_NET_META_VLAN_SIZE 4
+#define NFP_NET_META_PORTID_SIZE 4
+#define NFP_NET_META_CONN_HANDLE_SIZE 8
+/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
#define NFP_NET_RSS_IPV6 2
@@ -63,16 +67,14 @@
#define NFP_NET_RSS_IPV6_UDP 8
#define NFP_NET_RSS_IPV6_EX_UDP 9
-/**
- * Ring counts
+/* Ring counts
* %NFP_NET_TXR_MAX: Maximum number of TX rings
* %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
-/**
- * Read/Write config words (0x0000 - 0x002c)
+/* Read/Write config words (0x0000 - 0x002c)
* %NFP_NET_CFG_CTRL: Global control
* %NFP_NET_CFG_UPDATE: Indicate which fields are updated
* %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
@@ -100,12 +102,15 @@
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
+#define NFP_NET_CFG_CTRL_RXQINQ (0x1 << 13) /* Enable S-tag strip */
+#define NFP_NET_CFG_CTRL_RXVLAN_V2 (0x1 << 15) /* Enable C-tag strip */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
-#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_VEPA (0x1 << 22) /* Enable VEPA mode */
+#define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN C-tag insert*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -122,6 +127,10 @@
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_RXVLAN_ANY (NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2)
+#define NFP_NET_CFG_CTRL_TXVLAN_ANY (NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2)
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
@@ -147,8 +156,7 @@
#define NFP_NET_CFG_LSC 0x0020
#define NFP_NET_CFG_MACADDR 0x0024
-/**
- * Read-only words (0x0030 - 0x0050):
+/* Read-only words (0x0030 - 0x0050):
* %NFP_NET_CFG_VERSION: Firmware version number
* %NFP_NET_CFG_STS: Status
* %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
@@ -162,7 +170,10 @@
* - define more STS bits
*/
#define NFP_NET_CFG_VERSION 0x0030
-#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
+#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xfe << 24)
+#define NFP_NET_CFG_VERSION_DP_NFD3 0
+#define NFP_NET_CFG_VERSION_DP_NFDK 1
+#define NFP_NET_CFG_VERSION_DP_MASK 1
#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
@@ -185,6 +196,10 @@
#define NFP_NET_CFG_STS_LINK_RATE_40G 5
#define NFP_NET_CFG_STS_LINK_RATE_50G 6
#define NFP_NET_CFG_STS_LINK_RATE_100G 7
+/* NSP Link rate is a 16-bit word. It's determined by NSP and
+ * written to CFG BAR by NFP driver.
+ */
+#define NFP_NET_CFG_STS_NSP_LINK_RATE 0x0036
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
@@ -193,36 +208,31 @@
#define NFP_NET_CFG_START_TXQ 0x0048
#define NFP_NET_CFG_START_RXQ 0x004c
-/**
- * Prepend configuration
+/* Prepend configuration
*/
#define NFP_NET_CFG_RX_OFFSET 0x0050
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
-/**
- * RSS capabilities
+/* RSS capabilities
* %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
* %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
-/**
- * TLV area start
+/* TLV area start
* %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
*/
#define NFP_NET_CFG_TLV_BASE 0x0058
-/**
- * VXLAN/UDP encap configuration
+/* VXLAN/UDP encap configuration
* %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
-/**
- * BPF section
+/* BPF section
* %NFP_NET_CFG_BPF_ABI: BPF ABI version
* %NFP_NET_CFG_BPF_CAP: BPF capabilities
* %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
@@ -247,14 +257,12 @@
#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
-/**
- * 40B reserved for future use (0x0098 - 0x00c0)
+/* 40B reserved for future use (0x0098 - 0x00c0)
*/
#define NFP_NET_CFG_RESERVED 0x0098
#define NFP_NET_CFG_RESERVED_SZ 0x0028
-/**
- * RSS configuration (0x0100 - 0x01ac):
+/* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
* %NFP_NET_CFG_RSS_CFG: RSS configuration word
* %NFP_NET_CFG_RSS_KEY: RSS "secret" key
@@ -281,8 +289,7 @@
NFP_NET_CFG_RSS_KEY_SZ)
#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
-/**
- * TX ring configuration (0x200 - 0x800)
+/* TX ring configuration (0x200 - 0x800)
* %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
* %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
* %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
@@ -301,8 +308,7 @@
#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \
((_x) * 0x4))
-/**
- * RX ring configuration (0x0800 - 0x0c00)
+/* RX ring configuration (0x0800 - 0x0c00)
* %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
* %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
* %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
@@ -318,8 +324,7 @@
#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \
((_x) * 0x4))
-/**
- * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
+/* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
* enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
@@ -334,8 +339,7 @@
#define NFP_NET_CFG_ICR_RXTX 0x1
#define NFP_NET_CFG_ICR_LSC 0x2
-/**
- * General device stats (0x0d00 - 0x0d90)
+/* General device stats (0x0d00 - 0x0d90)
* all counters are 64bit.
*/
#define NFP_NET_CFG_STATS_BASE 0x0d00
@@ -368,8 +372,7 @@
#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
-/**
- * Per ring stats (0x1000 - 0x1800)
+/* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
* %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
@@ -381,8 +384,7 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
-/**
- * General use mailbox area (0x1800 - 0x19ff)
+/* General use mailbox area (0x1800 - 0x19ff)
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
@@ -399,8 +401,7 @@
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
-/**
- * VLAN filtering using general use mailbox
+/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
* %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
@@ -411,8 +412,7 @@
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
-/**
- * TLV capabilities
+/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
* %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
@@ -438,8 +438,7 @@
#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
-/**
- * Capability TLV types
+/* Capability TLV types
*
* %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
* Special TLV type to catch bugs, should never be encountered. Drivers should
@@ -512,8 +511,7 @@
struct device;
-/**
- * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+/* struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
* @me_freq_mhz: ME clock_freq (MHz)
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 553c708694e8..d8b735ccf899 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include "nfp_net.h"
+#include "nfp_net_dp.h"
static struct dentry *nfp_dir;
@@ -42,13 +43,19 @@ static int nfp_rx_q_show(struct seq_file *file, void *data)
seq_printf(file, "%04d: 0x%08x 0x%08x", i,
rxd->vals[0], rxd->vals[1]);
- frag = READ_ONCE(rx_ring->rxbufs[i].frag);
- if (frag)
- seq_printf(file, " frag=%p", frag);
+ if (!r_vec->xsk_pool) {
+ frag = READ_ONCE(rx_ring->rxbufs[i].frag);
+ if (frag)
+ seq_printf(file, " frag=%p", frag);
- if (rx_ring->rxbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &rx_ring->rxbufs[i].dma_addr);
+ if (rx_ring->rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->rxbufs[i].dma_addr);
+ } else {
+ if (rx_ring->xsk_rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->xsk_rxbufs[i].dma_addr);
+ }
if (i == rx_ring->rd_p % rxd_cnt)
seq_puts(file, " H_RD ");
@@ -74,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_desc *txd;
- int d_rd_p, d_wr_p, txd_cnt;
struct nfp_net *nn;
- int i;
+ int d_rd_p, d_wr_p;
rtnl_lock();
@@ -91,49 +96,20 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
if (!nfp_net_running(nn))
goto out;
- txd_cnt = tx_ring->cnt;
-
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ if (tx_ring->txrwb)
+ seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb);
+ seq_putc(file, '\n');
- for (i = 0; i < txd_cnt; i++) {
- txd = &tx_ring->txds[i];
- seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
- txd->vals[0], txd->vals[1],
- txd->vals[2], txd->vals[3]);
-
- if (tx_ring == r_vec->tx_ring) {
- struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
-
- if (skb)
- seq_printf(file, " skb->head=%p skb->data=%p",
- skb->head, skb->data);
- } else {
- seq_printf(file, " frag=%p",
- READ_ONCE(tx_ring->txbufs[i].frag));
- }
-
- if (tx_ring->txbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &tx_ring->txbufs[i].dma_addr);
-
- if (i == tx_ring->rd_p % txd_cnt)
- seq_puts(file, " H_RD");
- if (i == tx_ring->wr_p % txd_cnt)
- seq_puts(file, " H_WR");
- if (i == d_rd_p % txd_cnt)
- seq_puts(file, " D_RD");
- if (i == d_wr_p % txd_cnt)
- seq_puts(file, " D_WR");
-
- seq_putc(file, '\n');
- }
+ nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
+ d_rd_p, d_wr_p);
out:
rtnl_unlock();
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
new file mode 100644
index 000000000000..550df83b798c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include "nfp_app.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+/**
+ * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp: NFP Net data path struct
+ * @dma_addr: Pointer to storage for DMA address (output param)
+ *
+ * This function will allcate a new page frag, map it for DMA.
+ *
+ * Return: allocated page frag or NULL on failure.
+ */
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = netdev_alloc_frag(dp->fl_bufsz);
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL);
+ frag = page ? page_address(page) : NULL;
+ }
+ if (!frag) {
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
+ return NULL;
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
+ * @tx_ring: TX ring structure
+ * @dp: NFP Net data path struct
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ * @is_xdp: Is this an XDP TX ring?
+ */
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, unsigned int idx,
+ bool is_xdp)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+ tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
+
+ tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
+ tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
+ tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
+ * @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ */
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
+
+ rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
+ rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int wr_idx, last_idx;
+
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
+ /* Move the empty entry to the end of the list */
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+ last_idx = rx_ring->cnt - 1;
+ if (rx_ring->r_vec->xsk_pool) {
+ rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
+ memset(&rx_ring->xsk_rxbufs[last_idx], 0,
+ sizeof(*rx_ring->xsk_rxbufs));
+ } else {
+ rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
+ memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
+ }
+
+ memset(rx_ring->rxds, 0, rx_ring->size);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].frag)
+ continue;
+
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].frag = NULL;
+ }
+}
+
+/**
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ */
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return 0;
+
+ rxbufs = rx_ring->rxbufs;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
+ if (!rxbufs[i].frag) {
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
+
+ if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
+ dp->txrwb = dma_alloc_coherent(dp->dev,
+ dp->num_tx_rings * sizeof(u64),
+ &dp->txrwb_dma, GFP_KERNEL);
+ if (!dp->txrwb)
+ goto err_free_rings;
+ }
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ int bias = 0;
+
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
+
+ nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
+ &nn->r_vecs[r - bias], r, bias);
+
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+err_free_rings:
+ kfree(dp->tx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+ kfree(dp->tx_rings);
+}
+
+/**
+ * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
+ * @rx_ring: RX ring to free
+ */
+static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ kvfree(rx_ring->xsk_rxbufs);
+ else
+ kvfree(rx_ring->rxbufs);
+
+ if (rx_ring->rxds)
+ dma_free_coherent(dp->dev, rx_ring->size,
+ rx_ring->rxds, rx_ring->dma);
+
+ rx_ring->cnt = 0;
+ rx_ring->rxbufs = NULL;
+ rx_ring->xsk_rxbufs = NULL;
+ rx_ring->rxds = NULL;
+ rx_ring->dma = 0;
+ rx_ring->size = 0;
+}
+
+/**
+ * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
+{
+ enum xdp_mem_type mem_type;
+ size_t rxbuf_sw_desc_sz;
+ int err;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ mem_type = MEM_TYPE_XSK_BUFF_POOL;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
+ } else {
+ mem_type = MEM_TYPE_PAGE_ORDER0;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
+ }
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx, rx_ring->r_vec->napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
+ if (err)
+ goto err_alloc;
+ }
+
+ rx_ring->cnt = dp->rxd_cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rx_ring->rxds) {
+ netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ rx_ring->cnt);
+ goto err_alloc;
+ }
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->xsk_rxbufs)
+ goto err_alloc;
+ } else {
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->rxbufs)
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ nfp_net_rx_ring_free(rx_ring);
+ return -ENOMEM;
+}
+
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
+
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+ kfree(dp->rx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+
+ kfree(dp->rx_rings);
+}
+
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx)
+{
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
+}
+
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
+ if (tx_ring->txrwb) {
+ *tx_ring->txrwb = 0;
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
+ nn->dp.txrwb_dma + idx * sizeof(u64));
+ }
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
+}
+
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->dp.ops->xmit(skb, netdev);
+}
+
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+ bool ret;
+
+ spin_lock_bh(&r_vec->lock);
+ ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+ spin_unlock_bh(&r_vec->lock);
+
+ return ret;
+}
+
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta)
+{
+ u16 tpid = 0, tci = 0;
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
+ tpid = ETH_P_8021Q;
+ tci = le16_to_cpu(rxd->rxd.vlan);
+ } else if (meta->vlan.stripped) {
+ if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
+ tpid = ETH_P_8021Q;
+ else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
+ tpid = ETH_P_8021AD;
+ else
+ return false;
+
+ tci = meta->vlan.tci;
+ }
+ if (tpid)
+ __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
new file mode 100644
index 000000000000..831c83ce0d3d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_NET_DP_
+#define _NFP_NET_DP_
+
+#include "nfp_net.h"
+
+static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+{
+ return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+ dma_sync_single_for_device(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
+}
+
+static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr)
+{
+ dma_unmap_single_attrs(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+ len, dp->rx_dma_dir);
+}
+
+/**
+ * nfp_net_tx_full() - check if the TX ring is full
+ * @tx_ring: TX ring to check
+ * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
+ *
+ * This function checks, based on the *host copy* of read/write
+ * pointer if a given TX ring is full. The real TX queue may have
+ * some newly made available slots.
+ *
+ * Return: True if the ring is full.
+ */
+static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
+{
+ return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
+}
+
+static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
+{
+ wmb(); /* drain writebuffer */
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
+ tx_ring->wr_ptr_add = 0;
+}
+
+static inline u32
+nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
+{
+ if (tx_ring->txrwb)
+ return *tx_ring->txrwb;
+ return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+}
+
+static inline void nfp_net_free_frag(void *frag, bool xdp)
+{
+ if (!xdp)
+ skb_free_frag(frag);
+ else
+ __free_page(virt_to_page(frag));
+}
+
+/**
+ * nfp_net_irq_unmask() - Unmask automasked interrupt
+ * @nn: NFP Network structure
+ * @entry_nr: MSI-X table entry
+ *
+ * Clear the ICR for the IRQ entry.
+ */
+static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
+{
+ nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
+ nn_pci_flush(nn);
+}
+
+struct seq_file;
+
+/* Common */
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx);
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx);
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
+
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta);
+
+enum nfp_nfd_version {
+ NFP_NFD_VER_NFD3,
+ NFP_NFD_VER_NFDK,
+};
+
+/**
+ * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
+ * @version: Indicate dp type
+ * @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
+ * @cap_mask: Mask of supported features
+ * @dma_mask: DMA addressing capability
+ * @poll: Napi poll for normal rx/tx
+ * @xsk_poll: Napi poll when xsk is enabled
+ * @ctrl_poll: Tasklet poll for ctrl rx/tx
+ * @xmit: Xmit for normal path
+ * @ctrl_tx_one: Xmit for ctrl path
+ * @rx_ring_fill_freelist: Give buffers from the ring to FW
+ * @tx_ring_alloc: Allocate resource for a TX ring
+ * @tx_ring_reset: Free any untransmitted buffers and reset pointers
+ * @tx_ring_free: Free resources allocated to a TX ring
+ * @tx_ring_bufs_alloc: Allocate resource for each TX buffer
+ * @tx_ring_bufs_free: Free resources allocated to each TX buffer
+ * @print_tx_descs: Show TX ring's info for debug purpose
+ */
+struct nfp_dp_ops {
+ enum nfp_nfd_version version;
+ unsigned int tx_min_desc_per_pkt;
+ u32 cap_mask;
+ u64 dma_mask;
+
+ int (*poll)(struct napi_struct *napi, int budget);
+ int (*xsk_poll)(struct napi_struct *napi, int budget);
+ void (*ctrl_poll)(struct tasklet_struct *t);
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
+ bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+ void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+ int (*tx_ring_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_reset)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
+ int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+
+ void (*print_tx_descs)(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p);
+};
+
+static inline void
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_reset(dp, tx_ring);
+}
+
+static inline void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ dp->ops->rx_ring_fill_freelist(dp, rx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_free(tx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_bufs_free(dp, tx_ring);
+}
+
+static inline void
+nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
+}
+
+extern const struct nfp_dp_ops nfp_nfd3_ops;
+extern const struct nfp_dp_ops nfp_nfdk_ops;
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _NFP_NET_DP_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e0c27471bcdb..22a5d2419084 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -21,12 +21,15 @@
#include <linux/sfp.h>
#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net_ctrl.h"
+#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
+#include "nfpcore/nfp_cpp.h"
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
@@ -202,7 +205,7 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
{
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
- strlcpy(drvinfo->driver, dev_driver_string(&pdev->dev),
+ strscpy(drvinfo->driver, dev_driver_string(&pdev->dev),
sizeof(drvinfo->driver));
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -217,20 +220,51 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct nfp_net *nn = netdev_priv(netdev);
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor);
- strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
+ strscpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
}
+static int
+nfp_net_nway_reset(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, false);
+ if (err) {
+ netdev_info(netdev, "Link down failed: %d\n", err);
+ return err;
+ }
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, true);
+ if (err) {
+ netdev_info(netdev, "Link up failed: %d\n", err);
+ return err;
+ }
+
+ netdev_info(netdev, "Link reset succeeded\n");
+ return 0;
+}
+
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
- strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ strscpy(drvinfo->bus_info, pci_name(app->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -270,25 +304,14 @@ static int
nfp_net_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- static const u32 ls_to_ethtool[] = {
- [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
- [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
- [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
- [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
- [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
- [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
- [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
- [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
- };
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_net *nn;
- u32 sts, ls;
+ unsigned int speed;
+ u16 sts;
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
@@ -296,8 +319,15 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
if (eth_port) {
- cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
- AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ if (eth_port->supp_aneg) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (eth_port->aneg == NFP_ANEG_AUTO) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ }
+ }
nfp_net_set_fec_link_mode(eth_port, cmd);
}
@@ -316,18 +346,15 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
nn = netdev_priv(netdev);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
-
- ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+ speed = nfp_net_lr2speed(FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts));
+ if (!speed)
return -EOPNOTSUPP;
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
- ls >= ARRAY_SIZE(ls_to_ethtool))
- return 0;
-
- cmd->base.speed = ls_to_ethtool[ls];
- cmd->base.duplex = DUPLEX_FULL;
+ if (speed != SPEED_UNKNOWN) {
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ }
return 0;
}
@@ -336,6 +363,7 @@ static int
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ bool req_aneg = (cmd->base.autoneg == AUTONEG_ENABLE);
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_nsp *nsp;
@@ -355,13 +383,25 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (IS_ERR(nsp))
return PTR_ERR(nsp);
- err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
- NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+ if (req_aneg && !eth_port->supp_aneg) {
+ netdev_warn(netdev, "Autoneg is not supported.\n");
+ err = -EOPNOTSUPP;
+ goto err_bad_set;
+ }
+
+ err = __nfp_eth_set_aneg(nsp, req_aneg ? NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
if (err)
goto err_bad_set;
+
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ if (req_aneg) {
+ netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
+ err = -EINVAL;
+ goto err_bad_set;
+ }
+
err = __nfp_eth_set_speed(nsp, speed);
if (err)
goto err_bad_set;
@@ -386,9 +426,10 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
+ u32 qc_max = nn->dev_info->max_qc_size;
- ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
- ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
+ ring->rx_max_pending = qc_max;
+ ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
ring->rx_pending = nn->dp.rxd_cnt;
ring->tx_pending = nn->dp.txd_cnt;
}
@@ -412,19 +453,22 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
struct nfp_net *nn = netdev_priv(netdev);
- u32 rxd_cnt, txd_cnt;
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ qc_min = nn->dev_info->min_qc_size;
+ qc_max = nn->dev_info->max_qc_size;
+ tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
- txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
+ txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
return -EINVAL;
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
@@ -436,6 +480,160 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
+static int nfp_test_link(struct net_device *netdev)
+{
+ if (!netif_carrier_ok(netdev) || !(netdev->flags & IFF_UP))
+ return 1;
+
+ return 0;
+}
+
+static int nfp_test_nsp(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_nsp_identify *nspi;
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_info(netdev, "NSP Test: failed to access the NSP: %d\n", err);
+ goto exit;
+ }
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15) {
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi) {
+ err = -ENOMEM;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_identify(nsp, nspi, sizeof(*nspi));
+ if (err < 0)
+ netdev_info(netdev, "NSP Test: reading bsp version failed %d\n", err);
+
+ kfree(nspi);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+exit:
+ return err;
+}
+
+static int nfp_test_fw(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ netdev_info(netdev, "FW Test: update failed %d\n", err);
+
+ return err;
+}
+
+static int nfp_test_reg(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_cpp *cpp = app->cpp;
+ u32 model = nfp_cpp_model(cpp);
+ u32 value;
+ int err;
+
+ err = nfp_cpp_model_autodetect(cpp, &value);
+ if (err < 0) {
+ netdev_info(netdev, "REG Test: NFP model detection failed %d\n", err);
+ return err;
+ }
+
+ return (value == model) ? 0 : 1;
+}
+
+static bool link_test_supported(struct net_device *netdev)
+{
+ return true;
+}
+
+static bool nsp_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static bool fw_test_supported(struct net_device *netdev)
+{
+ if (nfp_netdev_is_nfp_net(netdev))
+ return true;
+
+ return false;
+}
+
+static bool reg_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static struct nfp_self_test_item {
+ char name[ETH_GSTRING_LEN];
+ bool (*is_supported)(struct net_device *dev);
+ int (*func)(struct net_device *dev);
+} nfp_self_test[] = {
+ {"Link Test", link_test_supported, nfp_test_link},
+ {"NSP Test", nsp_test_supported, nfp_test_nsp},
+ {"Firmware Test", fw_test_supported, nfp_test_fw},
+ {"Register Test", reg_test_supported, nfp_test_reg}
+};
+
+#define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
+
+static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ ethtool_sprintf(&data, nfp_self_test[i].name);
+}
+
+static int nfp_get_self_test_count(struct net_device *netdev)
+{
+ int i, count = 0;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ count++;
+
+ return count;
+}
+
+static void nfp_net_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ int i, ret, count = 0;
+
+ netdev_info(netdev, "Start self test\n");
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++) {
+ if (nfp_self_test[i].is_supported(netdev)) {
+ ret = nfp_self_test[i].func(netdev);
+ if (ret)
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[count++] = ret;
+ }
+ }
+
+ netdev_info(netdev, "Test end\n");
+}
+
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -488,7 +686,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -496,10 +694,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -509,7 +707,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -699,6 +897,9 @@ static void nfp_net_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -733,6 +934,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
cnt += nfp_mac_get_stats_count(netdev);
cnt += nfp_app_port_get_stats_count(nn->port);
return cnt;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -751,6 +954,9 @@ static void nfp_port_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -780,6 +986,8 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
return count;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -837,7 +1045,7 @@ nfp_port_get_fecparam(struct net_device *netdev,
return 0;
param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
- param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
+ param->active_fec = nfp_port_fec_nsp_to_ethtool(BIT(eth_port->act_fec));
return 0;
}
@@ -1224,6 +1432,8 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
@@ -1454,14 +1664,219 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static void nfp_port_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return;
+
+ /* Currently pause frame support is fixed */
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+}
+
+static int nfp_net_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Control LED to blink */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Control LED to normal mode */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+#define NFP_EEPROM_LEN ETH_ALEN
+
+static int
+nfp_net_get_eeprom_len(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return 0;
+
+ return NFP_EEPROM_LEN;
+}
+
+static int
+nfp_net_get_nsp_hwindex(struct net_device *netdev,
+ struct nfp_nsp **nspptr,
+ u32 *index)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
+ netdev_err(netdev, "NSP doesn't support PF MAC generation\n");
+ nfp_nsp_close(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ *nspptr = nsp;
+ *index = eth_port->eth_index;
+
+ return 0;
+}
+
+static int
+nfp_net_get_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index);
+ err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "Reading persistent MAC address failed: %d\n",
+ err);
+ return -EOPNOTSUPP;
+ }
+
+ if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &mac_addr[0], &mac_addr[1], &mac_addr[2],
+ &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+ netdev_err(netdev, "Can't parse persistent MAC address (%s)\n",
+ hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_set_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo),
+ "eth%u.mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ index, mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5]);
+
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "HWinfo set failed: %d, hwinfo: %s\n",
+ err, hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ memcpy(bytes, buf + eeprom->offset, eeprom->len);
+
+ return 0;
+}
+
+static int
+nfp_net_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ memcpy(buf + eeprom->offset, bytes, eeprom->len);
+ if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = nfp_net_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
+ .self_test = nfp_net_self_test,
.get_strings = nfp_net_get_strings,
.get_ethtool_stats = nfp_net_get_stats,
.get_sset_count = nfp_net_get_sset_count,
@@ -1476,6 +1891,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
@@ -1486,13 +1904,17 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
.get_drvinfo = nfp_app_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
+ .self_test = nfp_net_self_test,
.get_sset_count = nfp_port_get_sset_count,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
@@ -1503,6 +1925,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 751f76cd4f79..3bae92dc899e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -22,6 +22,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
@@ -76,12 +77,6 @@ static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
}
-static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
-{
- return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
- NFP_APP_CORE_NIC);
-}
-
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
if (nfp_net_is_data_vnic(nn))
@@ -116,13 +111,12 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
- nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
@@ -202,6 +196,9 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
goto err_free_prev;
}
+ if (nn->port)
+ nn->port->link_cb = nfp_net_refresh_port_table;
+
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
/* Kill the vNIC if app init marked it as invalid */
@@ -307,6 +304,7 @@ err_prev_deinit:
static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{
+ struct devlink *devlink = priv_to_devlink(pf);
u8 __iomem *ctrl_bar;
int err;
@@ -314,9 +312,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
err = nfp_app_init(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
if (err)
goto err_free;
@@ -343,9 +341,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -354,14 +352,16 @@ err_free:
static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
+
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_app_free(pf->app);
pf->app = NULL;
@@ -495,8 +495,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
}
cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
- mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
- NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
+ nfp_qcp_queue_offset(pf->dev_info, 0),
+ pf->dev_info->qc_area_sz, &pf->qc_area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(mem);
@@ -519,6 +520,57 @@ err_unmap_ctrl:
return err;
}
+static const unsigned int lr_to_speed[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
+};
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate)
+{
+ if (linkrate < ARRAY_SIZE(lr_to_speed))
+ return lr_to_speed[linkrate];
+
+ return SPEED_UNKNOWN;
+}
+
+unsigned int nfp_net_speed2lr(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
+ if (speed == lr_to_speed[i])
+ return i;
+ }
+
+ return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
+}
+
+static void nfp_net_notify_port_speed(struct nfp_port *port)
+{
+ struct net_device *netdev = port->netdev;
+ struct nfp_net *nn;
+ u16 sts;
+
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return;
+
+ nn = netdev_priv(netdev);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+
+ if (!(sts & NFP_NET_CFG_STS_LINK)) {
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
+ return;
+ }
+
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
+}
+
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -540,18 +592,20 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
}
memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+ nfp_net_notify_port_speed(port);
return 0;
}
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
int err;
- lockdep_assert_held(&pf->lock);
+ devl_assert_locked(devlink);
/* Check for nfp_net_pci_remove() racing against us */
if (list_empty(&pf->vnics))
@@ -600,10 +654,11 @@ static void nfp_net_refresh_vnics(struct work_struct *work)
{
struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work);
+ struct devlink *devlink = priv_to_devlink(pf);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_net_refresh_port_table_sync(pf);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
}
void nfp_net_refresh_port_table(struct nfp_port *port)
@@ -672,9 +727,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
}
@@ -690,7 +747,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
break;
default:
nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
@@ -709,7 +766,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_shared_buf_unreg;
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -729,7 +786,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -742,7 +799,7 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
@@ -756,10 +813,11 @@ err_unmap:
void nfp_net_pci_remove(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net *nn, *next;
devlink_unregister(priv_to_devlink(pf));
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
@@ -771,7 +829,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 181ac8e789a3..8b77582bdfa0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -20,7 +20,7 @@ struct net_device *
nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
{
return rcu_dereference_protected(set->reprs[id],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
static void
@@ -286,8 +286,7 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
if (repr->dst->u.port_info.lower_dev != lower)
return;
- netif_set_gso_max_size(netdev, lower->gso_max_size);
- netif_set_gso_max_segs(netdev, lower->gso_max_segs);
+ netif_inherit_tso_max(netdev, lower);
netdev_update_features(netdev);
}
@@ -366,9 +365,9 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->vlan_features = netdev->hw_features;
- if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN_ANY)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
else
@@ -376,12 +375,16 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
}
if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXQINQ)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
netdev->features = netdev->hw_features;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
@@ -476,7 +479,7 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
int i;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 3fdaaf8ed2ba..6eeeb0fda91f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -15,7 +15,7 @@
#include "nfp_net_sriov.h"
static int
-nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
+nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn)
{
u16 cap_vf;
@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP);
if ((cap_vf & cap) != cap) {
- nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
+ if (warn)
+ nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
return -EOPNOTSUPP;
}
if (vf < 0 || vf >= app->pf->num_vfs) {
- nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
+ if (warn)
+ nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
return -EINVAL;
}
@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
unsigned int vf_offset;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true);
if (err)
return err;
@@ -95,15 +97,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN;
+ bool is_proto_sup = true;
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true);
if (err)
return err;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan_proto))
return -EOPNOTSUPP;
if (vlan > 4095 || qos > 7) {
@@ -112,14 +116,63 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
return -EINVAL;
}
+ /* Check if fw supports or not */
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true);
+ if (err)
+ is_proto_sup = false;
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ if (!is_proto_sup)
+ return -EOPNOTSUPP;
+ update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO;
+ }
+
/* Write VLAN tag to VF entry in VF config symbol */
- vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
+ vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos);
+
+ /* vlan_tag of 0 means that the configuration should be cleared and in
+ * such circumstances setting the TPID has no meaning when
+ * configuring firmware.
+ */
+ if (vlan_tag && is_proto_sup)
+ vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto));
+
vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ;
- writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN,
- "vlan");
+ return nfp_net_sriov_update(app, vf, update, "vlan");
+}
+
+int nfp_app_set_vf_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ u32 vf_offset, ratevalue;
+ int err;
+
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true);
+ if (err)
+ return err;
+
+ if (max_tx_rate >= NFP_NET_VF_RATE_MAX ||
+ min_tx_rate >= NFP_NET_VF_RATE_MAX) {
+ nfp_warn(app->cpp, "tx-rate exceeds %d.\n",
+ NFP_NET_VF_RATE_MAX);
+ return -EINVAL;
+ }
+
+ vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ;
+ ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE,
+ max_tx_rate ? max_tx_rate :
+ NFP_NET_VF_RATE_MAX) |
+ FIELD_PREP(NFP_NET_VF_CFG_MIN_RATE, min_tx_rate);
+
+ writel(ratevalue,
+ app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE);
+
+ return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_RATE,
+ "rate");
}
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
@@ -130,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF,
- "spoofchk");
+ "spoofchk", true);
if (err)
return err;
@@ -154,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
- "trust");
+ "trust", true);
if (err)
return err;
@@ -179,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE,
- "link_state");
+ "link_state", true);
if (err)
return err;
@@ -208,14 +261,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
- unsigned int vf_offset;
- u16 vlan_tci;
- u32 mac_hi;
+ u32 vf_offset, mac_hi, rate;
+ u32 vlan_tag;
u16 mac_lo;
u8 flags;
int err;
- err = nfp_net_sriov_check(app, vf, 0, "");
+ err = nfp_net_sriov_check(app, vf, 0, "", true);
if (err)
return err;
@@ -225,7 +277,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL);
- vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vf;
@@ -233,12 +285,27 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
put_unaligned_be32(mac_hi, &ivi->mac[0]);
put_unaligned_be16(mac_lo, &ivi->mac[4]);
- ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci);
- ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
-
+ ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
+ ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false))
+ ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false);
+ if (!err) {
+ rate = readl(app->pf->vfcfg_tbl2 + vf_offset +
+ NFP_NET_VF_CFG_RATE);
+
+ ivi->max_tx_rate = FIELD_GET(NFP_NET_VF_CFG_MAX_RATE, rate);
+ ivi->min_tx_rate = FIELD_GET(NFP_NET_VF_CFG_MIN_RATE, rate);
+
+ if (ivi->max_tx_rate == NFP_NET_VF_RATE_MAX)
+ ivi->max_tx_rate = 0;
+ if (ivi->min_tx_rate == NFP_NET_VF_RATE_MAX)
+ ivi->min_tx_rate = 0;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index a3db0cbf6425..2d445fa199dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -4,8 +4,7 @@
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
-/**
- * SRIOV VF configuration.
+/* SRIOV VF configuration.
* The configuration memory begins with a mailbox region for communication with
* the firmware followed by individual VF entries.
*/
@@ -20,6 +19,8 @@
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5)
+#define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
@@ -27,6 +28,8 @@
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5)
+#define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -44,12 +47,20 @@
#define NFP_NET_VF_CFG_LS_MODE_ENABLE 1
#define NFP_NET_VF_CFG_LS_MODE_DISABLE 2
#define NFP_NET_VF_CFG_VLAN 0x8
+#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000
#define NFP_NET_VF_CFG_VLAN_QOS 0xe000
#define NFP_NET_VF_CFG_VLAN_VID 0x0fff
+#define NFP_NET_VF_CFG_RATE 0xc
+#define NFP_NET_VF_CFG_MIN_RATE 0x0000ffff
+#define NFP_NET_VF_CFG_MAX_RATE 0xffff0000
+
+#define NFP_NET_VF_RATE_MAX 0xffff
int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
+int nfp_app_set_vf_rate(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate);
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
new file mode 100644
index 000000000000..aea507aed49d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <net/xdp_sock_drv.h>
+#include <trace/events/xdp.h>
+
+#include "nfp_app.h"
+#include "nfp_net.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+static void
+nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
+ struct xdp_buff *xdp)
+{
+ unsigned int headroom;
+
+ headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
+
+ rx_ring->rxds[idx].fld.reserved = 0;
+ rx_ring->rxds[idx].fld.meta_len_dd = 0;
+
+ rx_ring->xsk_rxbufs[idx].xdp = xdp;
+ rx_ring->xsk_rxbufs[idx].dma_addr =
+ xsk_buff_xdp_get_frame_dma(xdp) + headroom;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ rxbuf->dma_addr = 0;
+ rxbuf->xdp = NULL;
+}
+
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ if (rxbuf->xdp)
+ xsk_buff_free(rxbuf->xdp);
+
+ nfp_net_xsk_rx_unstash(rxbuf);
+}
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (!rx_ring->cnt)
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
+}
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ unsigned int wr_idx, wr_ptr_add = 0;
+ struct xdp_buff *xdp;
+
+ while (nfp_net_rx_space(rx_ring)) {
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ xdp = xsk_buff_alloc(pool);
+ if (!xdp)
+ break;
+
+ nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
+
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ rx_ring->xsk_rxbufs[wr_idx].dma_addr);
+
+ rx_ring->wr_p++;
+ wr_ptr_add++;
+ }
+
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
+}
+
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+}
+
+static void nfp_net_xsk_pool_unmap(struct device *dev,
+ struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_unmap(pool, 0);
+}
+
+static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_map(pool, dev, 0);
+}
+
+int nfp_net_xsk_setup_pool(struct net_device *netdev,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ struct xsk_buff_pool *prev_pool;
+ struct nfp_net_dp *dp;
+ int err;
+
+ /* NFDK doesn't implement xsk yet. */
+ if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
+ return -EOPNOTSUPP;
+
+ /* Reject on old FWs so we can drop some checks on datapath. */
+ if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ return -EOPNOTSUPP;
+ if (!nn->dp.chained_metadata_format)
+ return -EOPNOTSUPP;
+
+ /* Install */
+ if (pool) {
+ err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
+ if (err)
+ return err;
+ }
+
+ /* Reconfig/swap */
+ dp = nfp_net_clone_dp(nn);
+ if (!dp) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ prev_pool = dp->xsk_pools[queue_id];
+ dp->xsk_pools[queue_id] = pool;
+
+ err = nfp_net_ring_reconfig(nn, dp, NULL);
+ if (err)
+ goto err_unmap;
+
+ /* Uninstall */
+ if (prev_pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
+
+ return 0;
+err_unmap:
+ if (pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
+
+ return err;
+}
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
+ * so it must be within our vector range. Moreover, our napi structs
+ * are statically allocated, so we can always kick them without worrying
+ * if reconfig is in progress or interface down.
+ */
+ napi_schedule(&nn->r_vecs[queue_id].napi);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
new file mode 100644
index 000000000000..6d281eb2fc1c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#ifndef _NFP_XSK_H_
+#define _NFP_XSK_H_
+
+#include <net/xdp_sock_drv.h>
+
+#define NFP_NET_XSK_TX_BATCH 16 /* XSK TX transmission batch size. */
+
+static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp,
+ unsigned int qid)
+{
+ return dp->xdp_prog && dp->xsk_pools[qid];
+}
+
+static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
+{
+ return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
+}
+
+static inline int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
+{
+ return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf);
+int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool,
+ u16 queue_id);
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring);
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring);
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+
+#endif /* _NFP_XSK_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 87f2268b16d6..e19bb0150cb5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/etherdevice.h>
+#include "nfpcore/nfp_dev.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
@@ -36,11 +37,22 @@ struct nfp_net_vf {
static const char nfp_net_driver_name[] = "nfp_netvf";
-#define PCI_DEVICE_NFP6000VF 0x6003
static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800_VF,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000_VF,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800_VF,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
+ },
+ { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000_VF,
+ PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
},
{ 0, } /* Required last entry. */
};
@@ -65,6 +77,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
int max_tx_rings, max_rx_rings;
u32 tx_bar_off, rx_bar_off;
@@ -78,6 +91,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
int stride;
int err;
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf)
return -ENOMEM;
@@ -95,8 +110,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_regions;
@@ -116,9 +130,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
}
@@ -138,7 +154,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
break;
default:
dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
@@ -167,19 +183,19 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(startq);
+ tx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(startq);
+ rx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true,
+ max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
}
vf->nn = nn;
- nn->fw_ver = fw_ver;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 93c5bfc0510b..4f2308570dcf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -75,23 +75,6 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
-{
- struct nfp_port *port;
-
- lockdep_assert_held(&pf->lock);
-
- if (type != NFP_PORT_PHYS_PORT)
- return NULL;
-
- list_for_each_entry(port, &pf->ports, port_list)
- if (port->eth_id == id)
- return port;
-
- return NULL;
-}
-
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
{
if (!port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index ae4da189d955..6793cdf9ff11 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -46,6 +46,7 @@ enum nfp_port_flags {
* @tc_offload_cnt: number of active TC offloads, how offloads are counted
* is not defined, use as a boolean
* @app: backpointer to the app structure
+ * @link_cb: callback when link status changed
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
@@ -66,6 +67,7 @@ struct nfp_port {
unsigned long tc_offload_cnt;
struct nfp_app *app;
+ void (*link_cb)(struct nfp_port *port);
struct devlink_port dl_port;
@@ -106,8 +108,6 @@ nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
int nfp_port_get_port_parent_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid);
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
@@ -132,8 +132,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
void nfp_devlink_port_type_eth_set(struct nfp_port *port);
void nfp_devlink_port_type_clear(struct nfp_port *port);
-/**
- * Mac stats (0x0000 - 0x0200)
+/* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
*/
#define NFP_MAC_STATS_BASE 0x0000
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index afab6f0fc564..6ad43c7cefe6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -4,7 +4,6 @@
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
-#include <linux/kernel.h>
#include <linux/crc32.h>
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 252fe06f58aa..33b4c2856316 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include "nfp_cpp.h"
+#include "nfp_dev.h"
#include "nfp6000/nfp6000.h"
@@ -100,11 +101,7 @@
#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (0x400 + ((bar) * 8 + (slot)) * 4)
-
-#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (((bar) * 8 + (slot)) * 4)
+#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index) ((bar_index) * 4)
/* The number of explicit BARs to reserve.
* Minimum is 0, maximum is 4 on the NFP6000.
@@ -145,6 +142,7 @@ struct nfp_bar {
struct nfp6000_pcie {
struct pci_dev *pdev;
struct device *dev;
+ const struct nfp_dev_info *dev_info;
/* PCI BAR management */
spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */
@@ -269,19 +267,16 @@ compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
static int
nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
{
- int base, slot;
- int xbar;
+ unsigned int xbar;
- base = bar->index >> 3;
- slot = bar->index & 7;
+ xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
if (nfp->iomem.csr) {
- xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
writel(newcfg, nfp->iomem.csr + xbar);
/* Readback to ensure BAR is flushed */
readl(nfp->iomem.csr + xbar);
} else {
- xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+ xbar += nfp->dev_info->pcie_cfg_expbar_offset;
pci_write_config_dword(nfp->pdev, xbar, newcfg);
}
@@ -622,16 +617,17 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
- nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+ nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
+ nfp->dev_info->pcie_expl_offset;
switch (nfp->pdev->device) {
- case PCI_DEVICE_ID_NETRONOME_NFP3800:
+ case PCI_DEVICE_ID_NFP3800:
pf = nfp->pdev->devfn & 7;
nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
break;
- case PCI_DEVICE_ID_NETRONOME_NFP4000:
- case PCI_DEVICE_ID_NETRONOME_NFP5000:
- case PCI_DEVICE_ID_NETRONOME_NFP6000:
+ case PCI_DEVICE_ID_NFP4000:
+ case PCI_DEVICE_ID_NFP5000:
+ case PCI_DEVICE_ID_NFP6000:
nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
break;
default:
@@ -644,12 +640,12 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
}
switch (nfp->pdev->device) {
- case PCI_DEVICE_ID_NETRONOME_NFP3800:
+ case PCI_DEVICE_ID_NFP3800:
expl_groups = 1;
break;
- case PCI_DEVICE_ID_NETRONOME_NFP4000:
- case PCI_DEVICE_ID_NETRONOME_NFP5000:
- case PCI_DEVICE_ID_NETRONOME_NFP6000:
+ case PCI_DEVICE_ID_NFP4000:
+ case PCI_DEVICE_ID_NFP5000:
+ case PCI_DEVICE_ID_NFP6000:
expl_groups = 4;
break;
default:
@@ -1306,18 +1302,20 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = {
/**
* nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
* @pdev: NFP6000 PCI device
+ * @dev_info: NFP ASIC params
*
* Return: NFP CPP handle
*/
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
{
struct nfp6000_pcie *nfp;
u16 interface;
int err;
/* Finished with card initialization. */
- dev_info(&pdev->dev,
- "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
+ dev_info(&pdev->dev, "Network Flow Processor %s PCIe Card Probe\n",
+ dev_info->chip_names);
pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
@@ -1328,6 +1326,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
nfp->dev = &pdev->dev;
nfp->pdev = pdev;
+ nfp->dev_info = dev_info;
init_waitqueue_head(&nfp->bar_waiters);
spin_lock_init(&nfp->bar_lock);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 6d1bffa6eac6..097660b673db 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -11,6 +11,7 @@
#include "nfp_cpp.h"
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info);
#endif /* NFP6000_PCIE_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 2dd0f5842873..3d379e937184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -32,10 +32,6 @@
#define PCI_64BIT_BAR_COUNT 3
-/* NFP hardware vendor/device ids.
- */
-#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
-
#define NFP_CPP_NUM_TARGETS 16
/* Max size of area it should be safe to request */
#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 34c0d2ddf9ef..a8286d0032d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 85734c6badf5..508ae6b571ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -22,6 +22,7 @@
#include "nfp6000/nfp_xpb.h"
/* NFP6000 PL */
+#define NFP_PL_DEVICE_PART_NFP6000 0x6200
#define NFP_PL_DEVICE_ID 0x00000004
#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16)
@@ -130,8 +131,12 @@ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
return err;
*model = reg & NFP_PL_DEVICE_MODEL_MASK;
- if (*model & NFP_PL_DEVICE_ID_MASK)
- *model -= 0x10;
+ /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */
+ if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) ==
+ NFP_PL_DEVICE_PART_NFP6000) {
+ if (*model & NFP_PL_DEVICE_ID_MASK)
+ *model -= 0x10;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
new file mode 100644
index 000000000000..0725b51c2a95
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#include "nfp_dev.h"
+
+const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
+ [NFP_DEV_NFP3800] = {
+ .dma_mask = DMA_BIT_MASK(48),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0x400000,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+
+ .chip_names = "NFP3800",
+ .pcie_cfg_expbar_offset = 0x0a00,
+ .pcie_expl_offset = 0xd000,
+ .qc_area_sz = 0x100000,
+ },
+ [NFP_DEV_NFP3800_VF] = {
+ .dma_mask = DMA_BIT_MASK(48),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+ },
+ [NFP_DEV_NFP6000] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0x80000,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+
+ .chip_names = "NFP4000/NFP5000/NFP6000",
+ .pcie_cfg_expbar_offset = 0x0400,
+ .pcie_expl_offset = 0x1000,
+ .qc_area_sz = 0x80000,
+ },
+ [NFP_DEV_NFP6000_VF] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+ },
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
new file mode 100644
index 000000000000..e4d38178de0f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DEV_H_
+#define _NFP_DEV_H_
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_CORIGINE 0x1da8
+#define PCI_DEVICE_ID_NFP3800 0x3800
+#define PCI_DEVICE_ID_NFP4000 0x4000
+#define PCI_DEVICE_ID_NFP5000 0x5000
+#define PCI_DEVICE_ID_NFP6000 0x6000
+#define PCI_DEVICE_ID_NFP3800_VF 0x3803
+#define PCI_DEVICE_ID_NFP6000_VF 0x6003
+
+enum nfp_dev_id {
+ NFP_DEV_NFP3800,
+ NFP_DEV_NFP3800_VF,
+ NFP_DEV_NFP6000,
+ NFP_DEV_NFP6000_VF,
+ NFP_DEV_CNT,
+};
+
+struct nfp_dev_info {
+ /* Required fields */
+ u64 dma_mask;
+ u32 qc_idx_mask;
+ u32 qc_addr_offset;
+ u32 min_qc_size;
+ u32 max_qc_size;
+
+ /* PF-only fields */
+ const char *chip_names;
+ u32 pcie_cfg_expbar_offset;
+ u32 pcie_expl_offset;
+ u32 qc_area_sz;
+};
+
+extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT];
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 10e7d8b21c46..730fea214b8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -513,7 +513,7 @@ nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp,
dma_size = BIT_ULL(dma_order);
nseg = DIV_ROUND_UP(max_size, chunk_size);
- chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL);
+ chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL);
if (!chunks)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index f5360bae6f75..992d72ac98d3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -132,6 +132,7 @@ enum nfp_eth_fec {
* @ports.interface: interface (module) plugged in
* @ports.media: media type of the @interface
* @ports.fec: forward error correction mode
+ * @ports.act_fec: active forward error correction mode
* @ports.aneg: auto negotiation mode
* @ports.mac_addr: interface MAC address
* @ports.label_port: port id
@@ -162,6 +163,7 @@ struct nfp_eth_table {
enum nfp_eth_media media;
enum nfp_eth_fec fec;
+ enum nfp_eth_fec act_fec;
enum nfp_eth_aneg aneg;
u8 mac_addr[ETH_ALEN];
@@ -172,6 +174,7 @@ struct nfp_eth_table {
bool enabled;
bool tx_enabled;
bool rx_enabled;
+ bool supp_aneg;
bool override_changed;
@@ -196,6 +199,8 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
return !!eth_port->fec_modes_supported;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 311a5be25acb..bb64efec4c46 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -27,6 +27,7 @@
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+#define NSP_ETH_PORT_SUPP_ANEG BIT_ULL(63)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
@@ -40,6 +41,7 @@
#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -49,6 +51,7 @@
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -169,7 +172,14 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
if (dst->fec_modes_supported)
dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
- dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->act_fec = dst->fec;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 33)
+ return;
+
+ dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
+ dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
}
static void
@@ -492,6 +502,36 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ /* Set this features were added in ABI 0.32 */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set id mode operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, state);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
({ \
__BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 07a00dd9cfe0..19d043b593cc 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -324,8 +324,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
+ sizeof(*priv->rx_bd_v) *
((i + 1) % RX_BD_NUM));
- skb = netdev_alloc_skb_ip_align(ndev,
- NIXGE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
@@ -899,6 +900,7 @@ static int nixge_open(struct net_device *ndev)
err_rx_irq:
free_irq(priv->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&priv->napi);
phy_stop(phy);
phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet);
@@ -989,8 +991,8 @@ static const struct net_device_ops nixge_netdev_ops = {
static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, "nixge", sizeof(ed->driver));
- strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
+ strscpy(ed->driver, "nixge", sizeof(ed->driver));
+ strscpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int
@@ -1293,7 +1295,7 @@ static int nixge_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->dev = &pdev->dev;
- netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, nixge_poll);
err = nixge_of_get_resources(pdev);
if (err)
goto free_netdev;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 660013f716d4..daa028729d44 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -56,8 +56,8 @@
#include <asm/irq.h>
-#define TX_WORK_PER_LOOP 64
-#define RX_WORK_PER_LOOP 64
+#define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT
+#define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT
/*
* Hardware access:
@@ -4291,9 +4291,9 @@ static void nv_do_stats_poll(struct timer_list *t)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
else
dev->netdev_ops = &nv_netdev_ops_optimized;
- netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
+ netif_napi_add(dev, &np->napi, nv_napi_poll);
dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index bc39558fe82b..1a4a272f4c5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1184,9 +1184,9 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
@@ -1373,7 +1373,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->duplex = DUPLEX_FULL;
__lpc_params_setup(pldat);
- netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
ret = register_netdev(ndev);
if (ret) {
@@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct netdata_local *pldat;
+ int ret;
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(ndev->irq);
@@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
pldat = netdev_priv(ndev);
/* Enable interface clock */
- clk_enable(pldat->clk);
+ ret = clk_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Reset and initialize */
__lpc_eth_reset(pldat);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 84cc79e928c8..541b8bcd3223 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -169,9 +169,9 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 1dc40c537281..3f2c30184752 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -32,8 +32,6 @@
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
-#define PCH_GBE_TX_WEIGHT 64
-#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
/* Initialize the wake-on-LAN settings */
@@ -1469,7 +1467,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc->gbec_status, tx_desc->dma_status);
unused = PCH_GBE_DESC_UNUSED(tx_ring);
- thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
+ thresh = tx_ring->count - NAPI_POLL_WEIGHT;
if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
{ /* current marked clean, tx queue filling up, do extra clean */
int j, k;
@@ -1482,13 +1480,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
/* current marked clean, scan for more that need cleaning. */
k = i;
- for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
+ for (j = 0; j < NAPI_POLL_WEIGHT; j++)
{
tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
if (++k >= tx_ring->count) k = 0; /*increment, wrap*/
}
- if (j < PCH_GBE_TX_WEIGHT) {
+ if (j < NAPI_POLL_WEIGHT) {
netdev_dbg(adapter->netdev,
"clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
unused, j, i, k, tx_ring->next_to_use,
@@ -1547,7 +1545,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ if (cleaned_count++ == NAPI_POLL_WEIGHT) {
cleaned = false;
break;
}
@@ -2518,8 +2516,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
- netif_napi_add(netdev, &adapter->napi,
- pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ netif_napi_add(netdev, &adapter->napi, pch_gbe_napi_poll);
netdev->hw_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features = netdev->hw_features;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 9c408328be0d..1cc001087193 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1819,9 +1819,9 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct hamachi_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 12105f62cbdd..640ac01689fb 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -191,7 +191,7 @@ IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
-and an AlphaStation to verifty the Alpha port!
+and an AlphaStation to verify the Alpha port!
IVb. References
@@ -1340,9 +1340,9 @@ static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct yellowfin_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index f0ace3a0e85c..aaab590ef548 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1697,7 +1697,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mac->pdev = pdev;
mac->netdev = dev;
- netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
+ netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_GSO;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 5e25411ff02f..602f4d45d529 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,7 +18,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define DEVCMD_TIMEOUT 10
+#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
@@ -78,6 +78,9 @@ void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
@@ -89,4 +92,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 7e296fa71b36..ce436e97324a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -109,8 +109,8 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
static void ionic_vf_dealloc_locked(struct ionic *ionic)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
- dma_addr_t dma = 0;
int i;
if (!ionic->vfs)
@@ -120,9 +120,8 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
v = &ionic->vfs[i];
if (v->stats_pa) {
- (void)ionic_set_vf_config(ionic, i,
- IONIC_VF_ATTR_STATSADDR,
- (u8 *)&dma);
+ vfc.stats_pa = 0;
+ (void)ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
@@ -143,6 +142,7 @@ static void ionic_vf_dealloc(struct ionic *ionic)
static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
int err = 0;
int i;
@@ -166,9 +166,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
}
ionic->num_vfs++;
+
/* ignore failures from older FW, we just won't get stats */
- (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
- (u8 *)&v->stats_pa);
+ vfc.stats_pa = cpu_to_le64(v->stats_pa);
+ (void)ionic_set_vf_config(ionic, i, &vfc);
}
out:
@@ -255,7 +256,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_disable_device;
+ goto err_out_pci_release_regions;
/* Configure the device */
err = ionic_setup(ionic);
@@ -319,22 +320,25 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lif_register(ionic->lif);
+ err = ionic_devlink_register(ionic);
if (err) {
- dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ dev_err(dev, "Cannot register devlink: %d\n", err);
goto err_out_deinit_lifs;
}
- err = ionic_devlink_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register devlink: %d\n", err);
- goto err_out_deregister_lifs;
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ goto err_out_deregister_devlink;
}
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
return 0;
-err_out_deregister_lifs:
- ionic_lif_unregister(ionic->lif);
+err_out_deregister_devlink:
+ ionic_devlink_unregister(ionic);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lif_deinit(ionic->lif);
@@ -348,7 +352,6 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
- del_timer_sync(&ionic->watchdog_timer);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
@@ -357,6 +360,7 @@ err_out_teardown:
err_out_unmap_bars:
ionic_unmap_bars(ionic);
+err_out_pci_release_regions:
pci_release_regions(pdev);
err_out_pci_disable_device:
pci_disable_device(pdev);
@@ -376,8 +380,8 @@ static void ionic_remove(struct pci_dev *pdev)
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
- ionic_devlink_unregister(ionic);
ionic_lif_unregister(ionic->lif);
+ ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d57e80d44c9d..9d0514cfeb5c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -33,7 +33,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) {
+ if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "rxmode change dropped\n");
@@ -46,6 +47,24 @@ static void ionic_watchdog_cb(struct timer_list *t)
}
}
+static void ionic_watchdog_init(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+
+ /* set times to ensure the first check will proceed */
+ atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
+ idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
+ /* init as ready, so no transition if the first check succeeds */
+ idev->last_fw_hb = 0;
+ idev->fw_hb_ready = true;
+ idev->fw_status_ready = true;
+ idev->fw_generation = IONIC_FW_STS_F_GENERATION &
+ ioread8(&idev->dev_info_regs->fw_status);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -109,21 +128,7 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
- ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
-
- /* set times to ensure the first check will proceed */
- atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
- idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
- /* init as ready, so no transition if the first check succeeds */
- idev->last_fw_hb = 0;
- idev->fw_hb_ready = true;
- idev->fw_status_ready = true;
- idev->fw_generation = IONIC_FW_STS_F_GENERATION &
- ioread8(&idev->dev_info_regs->fw_status);
-
- mod_timer(&ionic->watchdog_timer,
- round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_watchdog_init(ionic);
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -132,10 +137,21 @@ int ionic_dev_setup(struct ionic *ionic)
}
/* Devcmd Interface */
+bool ionic_is_fw_running(struct ionic_dev *idev)
+{
+ u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
+
+ /* firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+}
+
int ionic_heartbeat_check(struct ionic *ionic)
{
- struct ionic_dev *idev = &ionic->idev;
unsigned long check_time, last_check_time;
+ struct ionic_dev *idev = &ionic->idev;
+ struct ionic_lif *lif = ionic->lif;
bool fw_status_ready = true;
bool fw_hb_ready;
u8 fw_generation;
@@ -155,13 +171,10 @@ do_check_time:
goto do_check_time;
}
- /* firmware is useful only if the running bit is set and
- * fw_status != 0xff (bad PCI read)
- * If fw_status is not ready don't bother with the generation.
- */
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
+ /* If fw_status is not ready don't bother with the generation */
+ if (!ionic_is_fw_running(idev)) {
fw_status_ready = false;
} else {
fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
@@ -176,26 +189,40 @@ do_check_time:
* the down, the next watchdog will see the fw is up
* and the generation value stable, so will trigger
* the fw-up activity.
+ *
+ * If we had already moved to FW_RESET from a RESET event,
+ * it is possible that we never saw the fw_status go to 0,
+ * so we fake the current idev->fw_status_ready here to
+ * force the transition and get FW up again.
*/
- fw_status_ready = false;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ idev->fw_status_ready = false; /* go to running */
+ else
+ fw_status_ready = false; /* go to down */
}
}
+ dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n",
+ fw_status, fw_status_ready, idev->fw_status_ready,
+ idev->last_fw_hb, lif->state[0]);
+
/* is this a transition? */
- if (fw_status_ready != idev->fw_status_ready) {
- struct ionic_lif *lif = ionic->lif;
+ if (fw_status_ready != idev->fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
bool trigger = false;
idev->fw_status_ready = fw_status_ready;
- if (!fw_status_ready) {
- dev_info(ionic->dev, "FW stopped %u\n", fw_status);
- if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
- } else {
- dev_info(ionic->dev, "FW running %u\n", fw_status);
- if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
+ if (!fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status);
+ trigger = true;
+
+ } else if (fw_status_ready &&
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ dev_info(ionic->dev, "FW running 0x%02x\n", fw_status);
+ trigger = true;
}
if (trigger) {
@@ -210,12 +237,14 @@ do_check_time:
}
}
- if (!fw_status_ready)
+ if (!idev->fw_status_ready)
return -ENXIO;
- /* wait at least one watchdog period since the last heartbeat */
+ /* Because of some variability in the actual FW heartbeat, we
+ * wait longer than the DEVCMD_TIMEOUT before checking again.
+ */
last_check_time = idev->last_hb_time;
- if (time_before(check_time, last_check_time + ionic->watchdog_period))
+ if (time_before(check_time, last_check_time + DEVCMD_TIMEOUT * 2 * HZ))
return 0;
fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
@@ -392,60 +421,63 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
}
/* VF commands */
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc)
{
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
- .vf_setattr.attr = attr,
+ .vf_setattr.attr = vfc->attr,
.vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
+ memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_getattr.opcode = IONIC_CMD_VF_GETATTR,
+ .vf_getattr.attr = attr,
+ .vf_getattr.vf_index = cpu_to_le16(vf),
+ };
+ int err;
+
+ if (vf >= ionic->num_vfs)
+ return -EINVAL;
+
switch (attr) {
case IONIC_VF_ATTR_SPOOFCHK:
- cmd.vf_setattr.spoofchk = *data;
- dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_TRUST:
- cmd.vf_setattr.trust = *data;
- dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_LINKSTATE:
- cmd.vf_setattr.linkstate = *data;
- dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_MAC:
- ether_addr_copy(cmd.vf_setattr.macaddr, data);
- dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
- __func__, vf, data);
- break;
case IONIC_VF_ATTR_VLAN:
- cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
- dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
- __func__, vf, *(u16 *)data);
- break;
case IONIC_VF_ATTR_RATE:
- cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
- dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
- __func__, vf, *(u32 *)data);
break;
case IONIC_VF_ATTR_STATSADDR:
- cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
- dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
- __func__, vf, *(u64 *)data);
- break;
default:
return -EINVAL;
}
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_go(&ionic->idev, &cmd);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ err = ionic_dev_cmd_wait_nomsg(ionic, DEVCMD_TIMEOUT);
+ memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr,
+ sizeof(*comp));
mutex_unlock(&ionic->dev_cmd_lock);
+ if (err && comp->status != IONIC_RC_ENOSUPP)
+ ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode,
+ comp->status, err);
+
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index e5acf3bd62b2..563c302eb033 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -318,7 +318,10 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc);
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp);
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
@@ -353,5 +356,6 @@ void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic);
+bool ionic_is_fw_running(struct ionic_dev *idev);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 386a5cf1e224..01c22701482d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -74,10 +74,10 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
+ strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
+ strscpy(drvinfo->bus_info, ionic_bus_info(ionic),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 278610ed7227..4a90f611c611 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -759,7 +759,7 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
- * updated. Similarly, if @csum_l4 is set the the L4
+ * updated. Similarly, if @csum_l4 is set the L4
* checksum is updated. If @encap is set then encap header
* checksums are also updated.
*
@@ -1368,9 +1368,9 @@ union ionic_port_config {
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
- * @link_down_count: number of times link went from from up to down
+ * @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
- * @xcvr: tranceiver status
+ * @xcvr: transceiver status
*/
struct ionic_port_status {
__le32 id;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 2ff7be17e5af..19d4848df17d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
+#include <linux/vmalloc.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -393,11 +394,11 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_qcq_intr_free(lif, qcq);
if (qcq->cq.info) {
- devm_kfree(dev, qcq->cq.info);
+ vfree(qcq->cq.info);
qcq->cq.info = NULL;
}
if (qcq->q.info) {
- devm_kfree(dev, qcq->q.info);
+ vfree(qcq->q.info);
qcq->q.info = NULL;
}
}
@@ -528,8 +529,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
- GFP_KERNEL);
+ new->q.info = vzalloc(num_descs * sizeof(*new->q.info));
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -550,8 +550,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (err)
goto err_out;
- new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
- GFP_KERNEL);
+ new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info));
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
@@ -640,14 +639,14 @@ err_out_free_cq:
err_out_free_q:
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
- devm_kfree(dev, new->cq.info);
+ vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
err_out_free_q_info:
- devm_kfree(dev, new->q.info);
+ vfree(new->q.info);
err_out_free_qcq:
devm_kfree(dev, new);
err_out:
@@ -775,8 +774,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -831,11 +829,9 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
- netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -1112,12 +1108,17 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
}
break;
default:
@@ -1433,7 +1434,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
- if ((vlan_flags & features) &&
+ if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
@@ -1560,8 +1561,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1570,6 +1630,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -1782,7 +1850,7 @@ static void ionic_lif_quiesce(struct ionic_lif *lif)
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
- netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
+ netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
}
static void ionic_txrx_disable(struct ionic_lif *lif)
@@ -2152,6 +2220,76 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
+static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
+{
+ struct ionic_vf_getattr_comp comp = { 0 };
+ int err;
+ u8 attr;
+
+ attr = IONIC_VF_ATTR_VLAN;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].vlanid = comp.vlanid;
+
+ attr = IONIC_VF_ATTR_SPOOFCHK;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].spoofchk = comp.spoofchk;
+
+ attr = IONIC_VF_ATTR_LINKSTATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err) {
+ switch (comp.linkstate) {
+ case IONIC_VF_LINK_STATUS_UP:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_DOWN:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_AUTO:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
+ break;
+ default:
+ dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
+ break;
+ }
+ }
+
+ attr = IONIC_VF_ATTR_RATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].maxrate = comp.maxrate;
+
+ attr = IONIC_VF_ATTR_TRUST;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].trusted = comp.trust;
+
+ attr = IONIC_VF_ATTR_MAC;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
+
+err_out:
+ if (err)
+ dev_err(ionic->dev, "Failed to get %s for VF %d\n",
+ ionic_vf_attr_to_str(attr), vf);
+
+ return err;
+}
+
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2167,14 +2305,18 @@ static int ionic_get_vf_config(struct net_device *netdev,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ivf->vf = vf;
- ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
- ivf->qos = 0;
- ivf->spoofchk = ionic->vfs[vf].spoofchk;
- ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
- ivf->trusted = ionic->vfs[vf].trusted;
- ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ ivf->vf = vf;
+ ivf->qos = 0;
+
+ ret = ionic_update_cached_vf_config(ionic, vf);
+ if (!ret) {
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
}
up_read(&ionic->vf_op_lock);
@@ -2220,6 +2362,7 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2235,7 +2378,11 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ ether_addr_copy(vfc.macaddr, mac);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, vfc.macaddr);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
}
@@ -2247,6 +2394,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 proto)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2269,8 +2417,11 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ vfc.vlanid = cpu_to_le16(vlan);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, le16_to_cpu(vfc.vlanid));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
@@ -2282,6 +2433,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int ionic_set_vf_rate(struct net_device *netdev, int vf,
int tx_min, int tx_max)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2298,8 +2450,11 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ vfc.maxrate = cpu_to_le32(tx_max);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, le32_to_cpu(vfc.maxrate));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
@@ -2310,9 +2465,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2323,10 +2478,13 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_SPOOFCHK, &data);
+ vfc.spoofchk = set;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, vfc.spoofchk);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].spoofchk = data;
+ ionic->vfs[vf].spoofchk = set;
}
up_write(&ionic->vf_op_lock);
@@ -2335,9 +2493,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2348,10 +2506,13 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_TRUST, &data);
+ vfc.trust = set;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, vfc.trust);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].trusted = data;
+ ionic->vfs[vf].trusted = set;
}
up_write(&ionic->vf_op_lock);
@@ -2360,20 +2521,21 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data;
+ u8 vfls;
int ret;
switch (set) {
case IFLA_VF_LINK_STATE_ENABLE:
- data = IONIC_VF_LINK_STATUS_UP;
+ vfls = IONIC_VF_LINK_STATUS_UP;
break;
case IFLA_VF_LINK_STATE_DISABLE:
- data = IONIC_VF_LINK_STATUS_DOWN;
+ vfls = IONIC_VF_LINK_STATUS_DOWN;
break;
case IFLA_VF_LINK_STATE_AUTO:
- data = IONIC_VF_LINK_STATUS_AUTO;
+ vfls = IONIC_VF_LINK_STATUS_AUTO;
break;
default:
return -EINVAL;
@@ -2387,8 +2549,11 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_LINKSTATE, &data);
+ vfc.linkstate = vfls;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, vfc.linkstate);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].linkstate = set;
}
@@ -2652,11 +2817,15 @@ err_out:
* than the full array, but leave the qcq shells in place
*/
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
- lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->txqcqs[i]);
+ if (lif->txqcqs && lif->txqcqs[i]) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ }
- lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->rxqcqs[i]);
+ if (lif->rxqcqs && lif->rxqcqs[i]) {
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
}
if (err)
@@ -2835,6 +3004,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
mutex_unlock(&lif->queue_lock);
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -2861,6 +3031,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -2934,8 +3107,6 @@ void ionic_lif_free(struct ionic_lif *lif)
/* unmap doorbell page */
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
@@ -2995,8 +3166,7 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
- netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
napi_enable(&qcq->napi);
@@ -3069,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3077,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3086,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
@@ -3135,22 +3320,12 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
- if (!lif->dbid_inuse) {
- dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
- return -ENOMEM;
- }
-
- /* first doorbell id reserved for kernel (dbid aka pid == zero) */
- set_bit(0, lif->dbid_inuse);
lif->kern_pid = 0;
-
dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
if (!lif->kern_dbpage) {
dev_err(dev, "Cannot map dbpage, aborting\n");
- err = -ENOMEM;
- goto err_out_free_dbid;
+ return -ENOMEM;
}
err = ionic_lif_adminq_init(lif);
@@ -3186,15 +3361,13 @@ int ionic_lif_init(struct ionic_lif *lif)
return 0;
err_out_notifyq_deinit:
+ napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
err_out_adminq_deinit:
ionic_lif_qcq_deinit(lif, lif->adminqcq);
ionic_lif_reset(lif);
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
-err_out_free_dbid:
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
return err;
}
@@ -3214,7 +3387,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9f7ab2f17f93..a53984bf3544 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -135,6 +135,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
IONIC_LIF_F_FW_RESET,
+ IONIC_LIF_F_FW_STOPPING,
IONIC_LIF_F_SPLIT_INTR,
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
@@ -213,7 +214,6 @@ struct ionic_lif {
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
- unsigned long *dbid_inuse;
unsigned int dbid_count;
struct ionic_phc *phc;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 875f4ec42efe..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -188,6 +188,28 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
}
}
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
+{
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ return "IONIC_VF_ATTR_SPOOFCHK";
+ case IONIC_VF_ATTR_TRUST:
+ return "IONIC_VF_ATTR_TRUST";
+ case IONIC_VF_ATTR_LINKSTATE:
+ return "IONIC_VF_ATTR_LINKSTATE";
+ case IONIC_VF_ATTR_MAC:
+ return "IONIC_VF_ATTR_MAC";
+ case IONIC_VF_ATTR_VLAN:
+ return "IONIC_VF_ATTR_VLAN";
+ case IONIC_VF_ATTR_RATE:
+ return "IONIC_VF_ATTR_RATE";
+ case IONIC_VF_ATTR_STATSADDR:
+ return "IONIC_VF_ATTR_STATSADDR";
+ default:
+ return "IONIC_VF_ATTR_UNKNOWN";
+ }
+}
+
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
@@ -215,9 +237,13 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err)
{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(status), err);
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
}
static int ionic_adminq_check_err(struct ionic_lif *lif,
@@ -318,6 +344,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return err;
}
@@ -331,11 +358,15 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (remaining)
break;
- /* interrupt the wait if FW stopped */
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ /* force a check of FW status and break out if FW reset */
+ (void)ionic_heartbeat_check(lif->ionic);
+ if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !lif->ionic->idev.fw_status_ready) ||
+ test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
if (do_msg)
- netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
- name, ctx->cmd.cmd.opcode);
+ netdev_warn(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return -ENXIO;
}
@@ -370,21 +401,34 @@ int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ struct ionic_dev *idev = &ionic->idev;
- iowrite32(0, &regs->doorbell);
- memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+ iowrite32(0, &idev->dev_cmd_regs->doorbell);
+ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
-int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err)
+{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
+}
+
+static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ const bool do_msg)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
+ int done = 0;
+ bool fw_up;
int opcode;
- int hb = 0;
- int done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -394,31 +438,24 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
try_again:
opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
- do {
+ for (fw_up = ionic_is_fw_running(idev);
+ !done && fw_up && time_before(jiffies, max_wait);
+ fw_up = ionic_is_fw_running(idev)) {
done = ionic_dev_cmd_done(idev);
if (done)
break;
usleep_range(100, 200);
-
- /* Don't check the heartbeat on FW_CONTROL commands as they are
- * notorious for interrupting the firmware's heartbeat update.
- */
- if (opcode != IONIC_CMD_FW_CONTROL)
- hb = ionic_heartbeat_check(ionic);
- } while (!done && !hb && time_before(jiffies, max_wait));
+ }
duration = jiffies - start_time;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
- if (!done && hb) {
- /* It is possible (but unlikely) that FW was busy and missed a
- * heartbeat check but is still alive and will process this
- * request, so don't clean the dev_cmd in this case.
- */
- dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
- ionic_opcode_to_str(opcode), opcode);
+ if (!done && !fw_up) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) interrupted - FW is down\n",
+ ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
}
@@ -437,23 +474,35 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (do_msg)
+ ionic_dev_cmd_dev_err_print(ionic, opcode, err,
+ ionic_error_to_errno(err));
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
+int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, true);
+}
+
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, false);
+}
+
int ionic_setup(struct ionic *ionic)
{
int err;
@@ -540,6 +589,9 @@ int ionic_reset(struct ionic *ionic)
struct ionic_dev *idev = &ionic->idev;
int err;
+ if (!ionic_is_fw_running(idev))
+ return 0;
+
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_reset(idev);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
@@ -612,15 +664,17 @@ int ionic_port_init(struct ionic *ionic)
int ionic_port_reset(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
- int err;
+ int err = 0;
if (!idev->port_info)
return 0;
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_reset(idev);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
+ if (ionic_is_fw_running(idev)) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_reset(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ }
dma_free_coherent(ionic->dev, idev->port_info_sz,
idev->port_info, idev->port_info_pa);
@@ -628,9 +682,6 @@ int ionic_port_reset(struct ionic *ionic)
idev->port_info = NULL;
idev->port_info_pa = 0;
- if (err)
- dev_err(ionic->dev, "Failed to reset port\n");
-
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index f6e785f949f9..b7363376dfc8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -376,10 +376,24 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
- if (err == -ENOSPC) {
- if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
- lif->max_vlans = lif->nvlans;
+ /* store the max_vlans limit that we found */
+ if (err == -ENOSPC &&
+ le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+
+ /* Prevent unnecessary error messages on recoverable
+ * errors as the filter will get retried on the next
+ * sync attempt.
+ */
+ switch (err) {
+ case -ENOSPC:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
return 0;
+ default:
+ break;
}
ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
@@ -494,9 +508,22 @@ static int ionic_lif_filter_del(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ switch (err) {
+ /* ignore these errors */
+ case -EEXIST:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
+ case 0:
+ break;
+ default:
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index fd6806b4a1b9..9859a4432985 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,7 +151,6 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 94384f5d2a22..c03986bf2628 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,7 +10,6 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -669,27 +668,37 @@ dma_fail:
return -EIO;
}
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info)
+{
+ struct ionic_buf_info *buf_info = desc_info->bufs;
+ struct device *dev = q->dev;
+ unsigned int i;
+
+ if (!desc_info->nbufs)
+ return;
+
+ dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ buf_info++;
+ for (i = 1; i < desc_info->nbufs; i++, buf_info++)
+ dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+
+ desc_info->nbufs = 0;
+}
+
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
- struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg;
- struct device *dev = q->dev;
- unsigned int i;
u16 qi;
- if (desc_info->nbufs) {
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- buf_info++;
- for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- }
+ ionic_tx_desc_unmap_bufs(q, desc_info);
if (!skb)
return;
@@ -931,14 +940,16 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err)
+ if (err) {
+ /* clean up mapping from ionic_tx_map_skb */
+ ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
+ }
if (encap)
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
@@ -1003,8 +1014,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
}
-static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1038,12 +1049,10 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
stats->crc32_csum++;
else
stats->csum++;
-
- return 0;
}
-static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1074,12 +1083,10 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_offset = 0;
stats->csum_none++;
-
- return 0;
}
-static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
@@ -1093,31 +1100,24 @@ static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
}
stats->frags += skb_shinfo(skb)->nr_frags;
-
- return 0;
}
static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- int err;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
- err = ionic_tx_calc_csum(q, skb, desc_info);
+ ionic_tx_calc_csum(q, skb, desc_info);
else
- err = ionic_tx_calc_no_csum(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_calc_no_csum(q, skb, desc_info);
/* add frags */
- err = ionic_tx_skb_frags(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_skb_frags(q, skb, desc_info);
skb_tx_timestamp(skb);
stats->pkts++;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3c4a84ea6321..8c4cb910e09b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -65,9 +65,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ strscpy(drvinfo->driver, netxen_nic_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ strscpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
@@ -75,7 +75,7 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 07dd3c3b1771..de8d54b23f73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -173,8 +173,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_add(netdev, &sds_ring->napi,
- netxen_nic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll);
}
return 0;
@@ -1877,7 +1876,7 @@ netxen_tso_check(struct net_device *netdev,
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 0d9c2fe0245d..3d2098f21bb7 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o
qed-$(CONFIG_QED_NVMETCP) += \
qed_nvmetcp.o \
- qed_nvmetcp_fw_funcs.o \
- qed_nvmetcp_ip_services.o
+ qed_nvmetcp_fw_funcs.o
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index 9d5a0c9e1ca0..f6cd1b3efdfd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1282,7 +1282,7 @@ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
* @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
* results.
*
- * Return: Rrror if the parsing fails, ok otherwise.
+ * Return: Error if the parsing fails, ok otherwise.
*/
enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index e3edca187ddf..5250d1d1e49c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -489,7 +489,7 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 11
+#define NUM_COMMON_GLOBAL_PARAMS 10
#define MAX_RECURSION_DEPTH 10
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index cc4ec2bb36db..d61cd32ec3b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -412,7 +412,7 @@ static int qed_llh_alloc(struct qed_dev *cdev)
continue;
p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
- DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n",
p_llh_info->num_ppfid, i);
p_llh_info->num_ppfid++;
}
@@ -626,7 +626,7 @@ static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
if (ppfid >= p_llh_info->num_ppfid) {
DP_NOTICE(cdev,
- "ppfid %d is not valid, available indices are 0..%hhd\n",
+ "ppfid %d is not valid, available indices are 0..%d\n",
ppfid, p_llh_info->num_ppfid - 1);
*p_abs_ppfid = 0;
return -EINVAL;
@@ -3098,6 +3098,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
continue;
}
+ /* Some flows may keep variable set */
+ p_hwfn->mcp_info->mcp_handling_status = 0;
+
rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0ce37f2460a4..407029a36fa1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1835,8 +1835,6 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
if (!allocated_mem)
return NULL;
- memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
-
/* For each Storm, set physical address in RAM */
while (buf_offset < buf_size) {
struct phys_mem_desc *storm_mem_desc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 82e74f62b677..2661c483c67e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1110,7 +1110,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
bit_len);
/* Some bits represent more than a
- * a single interrupt. Correctly print
+ * single interrupt. Correctly print
* their name.
*/
if (ATTENTION_LENGTH(flags) > 2 ||
@@ -1119,7 +1119,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strlcpy(bit_name,
+ strscpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c5003fa1a25e..c91898be7c03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -823,7 +823,6 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].b_int_requested)
break;
- synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
&cdev->hwfns[i].sp_dpc);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index da1eadabcb41..9fb1fa479d4b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -140,7 +140,7 @@ static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
- struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp;
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
@@ -249,6 +249,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->cmd_lock);
spin_lock_init(&p_info->link_lock);
+ spin_lock_init(&p_info->unload_lock);
INIT_LIST_HEAD(&p_info->cmd_list);
@@ -614,12 +615,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
usecs);
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ bool can_sleep)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -627,6 +629,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
+ mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -638,6 +641,28 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, true));
+}
+
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, false));
+}
+
static int
qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1071,10 +1096,15 @@ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+#define MFW_COMPLETION_MAX_ITER 5000
+#define MFW_COMPLETION_INTERVAL_MS 1
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
+ u32 cnt = MFW_COMPLETION_MAX_ITER;
u32 wol_param;
+ int rc;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@@ -1097,7 +1127,23 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
- return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ set_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ while (test_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status) && --cnt)
+ msleep(MFW_COMPLETION_INTERVAL_MS);
+
+ if (!cnt)
+ DP_NOTICE(p_hwfn,
+ "Failed to wait MFW event completion after %d msec\n",
+ MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS);
+
+ return rc;
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1728,8 +1774,8 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
- &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
}
static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1766,8 +1812,8 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
- &resp, &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
}
static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
@@ -1997,6 +2043,19 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
"Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ if (test_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status)) {
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+ DP_INFO(p_hwfn,
+ "Msg [%d] is bypassed on unload flow\n", i);
+ continue;
+ }
+
+ set_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
switch (i) {
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
@@ -2050,6 +2109,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
+
+ clear_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
}
/* ACK everything */
@@ -3675,8 +3737,8 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
{
int rc;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
- p_mcp_resp, p_mcp_param);
+ rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD,
+ param, p_mcp_resp, p_mcp_param);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 369e1892450a..9bd0565fe8ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -393,11 +393,12 @@ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * qed_mcp_cmd(): General function for sending commands to the MCP
+ * qed_mcp_cmd(): Sleepable function for sending commands to the MCP
* mailbox. It acquire mutex lock for the entire
* operation, from sending the request until the MCP
* response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * to 5 seconds every 10ms. Should not be called from atomic
+ * context.
*
* @p_hwfn: HW device data.
* @p_ptt: PTT required for register access.
@@ -417,6 +418,31 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
+ * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 10us. Should be called when sleep
+ * is not allowed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
* qed_mcp_drain(): drains the nig, allowing completion to pass in
* case of pauses.
* (Should be called only from sleepable context)
@@ -762,6 +788,14 @@ struct qed_mcp_info {
/* S/N for debug data mailbox commands */
atomic_t dbg_data_seq;
+
+ /* Spinlock used to sync the flag mcp_handling_status with
+ * the mfw events handler
+ */
+ spinlock_t unload_lock;
+ unsigned long mcp_handling_status;
+#define QED_MCP_BYPASS_PROC_BIT 0
+#define QED_MCP_IN_PROCESSING_BIT 1
};
struct qed_mcp_mb_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
index b70ee8200e15..6459dd3feb37 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -2470,6 +2470,6 @@ struct nvm_meta_bin_t {
u32 version;
#define NVM_META_BIN_VERSION 1
u32 num_options;
- u32 options[0];
+ u32 options[];
};
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
deleted file mode 100644
index 96a2077fd315..000000000000
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-/*
- * Copyright 2021 Marvell. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/param.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-
-#include <net/tcp.h>
-
-#include <linux/qed/qed_nvmetcp_ip_services_if.h>
-
-#define QED_IP_RESOL_TIMEOUT 4
-
-int qed_route_ipv4(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- __be32 *loc_ip, *rem_ip;
- struct rtable *rt;
- int rc = -ENXIO;
- int retry;
-
- loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
- rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
- *ndev = NULL;
- rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
- if (IS_ERR(rt)) {
- pr_err("lookup route failed\n");
- rc = PTR_ERR(rt);
- goto return_err;
- }
-
- neigh = dst_neigh_lookup(&rt->dst, rem_ip);
- if (!neigh) {
- rc = -ENOMEM;
- ip_rt_put(rt);
- goto return_err;
- }
-
- *ndev = rt->dst.dev;
- ip_rt_put(rt);
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- /* copy resolved MAC address */
- neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
- if (!(*loc_ip)) {
- *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
- local_addr->ss_family = AF_INET;
- }
-
-return_err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv4);
-
-int qed_route_ipv6(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- struct dst_entry *dst;
- struct flowi6 fl6;
- int rc = -ENXIO;
- int retry;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
- fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst || dst->error) {
- if (dst) {
- dst_release(dst);
- pr_err("lookup route failed %d\n", dst->error);
- }
-
- goto out;
- }
-
- neigh = dst_neigh_lookup(dst, &fl6.daddr);
- if (neigh) {
- *ndev = ip6_dst_idev(dst)->dev;
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- neigh_ha_snapshot((u8 *)hardware_address->sa_data,
- neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
-
- if (ipv6_addr_any(&fl6.saddr)) {
- if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
- &fl6.daddr, 0, &fl6.saddr)) {
- pr_err("Unable to find source IP address\n");
- goto out;
- }
-
- local_addr->ss_family = AF_INET6;
- ((struct sockaddr_in6 *)local_addr)->sin6_addr =
- fl6.saddr;
- }
- }
-
- dst_release(dst);
-
-out:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv6);
-
-void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
-{
- if (is_vlan_dev(*ndev)) {
- *vlan_id = vlan_dev_vlan_id(*ndev);
- *ndev = vlan_dev_real_dev(*ndev);
- }
-}
-EXPORT_SYMBOL(qed_vlan_get_ndev);
-
-struct pci_dev *qed_validate_ndev(struct net_device *ndev)
-{
- struct pci_dev *pdev = NULL;
- struct net_device *upper;
-
- for_each_pci_dev(pdev) {
- if (pdev && pdev->driver &&
- !strcmp(pdev->driver->name, "qede")) {
- upper = pci_get_drvdata(pdev);
- if (upper->ifindex == ndev->ifindex)
- return pdev;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(qed_validate_ndev);
-
-__be16 qed_get_in_port(struct sockaddr_storage *sa)
-{
- return sa->ss_family == AF_INET
- ? ((struct sockaddr_in *)sa)->sin_port
- : ((struct sockaddr_in6 *)sa)->sin6_port;
-}
-EXPORT_SYMBOL(qed_get_in_port);
-
-int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
- struct socket **sock, u16 *port)
-{
- struct sockaddr_storage sa;
- int rc = 0;
-
- rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
- sock);
- if (rc) {
- pr_warn("failed to create socket: %d\n", rc);
- goto err;
- }
-
- (*sock)->sk->sk_allocation = GFP_KERNEL;
- sk_set_memalloc((*sock)->sk);
-
- rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
- sizeof(local_ip_addr));
-
- if (rc) {
- pr_warn("failed to bind socket: %d\n", rc);
- goto err_sock;
- }
-
- rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
- if (rc < 0) {
- pr_warn("getsockname() failed: %d\n", rc);
- goto err_sock;
- }
-
- *port = ntohs(qed_get_in_port(&sa));
-
- return 0;
-
-err_sock:
- sock_release(*sock);
- sock = NULL;
-err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_fetch_tcp_port);
-
-void qed_return_tcp_port(struct socket *sock)
-{
- if (sock && sock->sk) {
- tcp_set_state(sock->sk, TCP_CLOSE);
- sock_release(sock);
- }
-}
-EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 23b668de4640..5a5dbbb8d8aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -42,8 +42,7 @@ int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
bmap->max_count = max_count;
- bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
- GFP_KERNEL);
+ bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL);
if (!bmap->bitmap)
return -ENOMEM;
@@ -107,7 +106,7 @@ int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
static bool qed_bmap_is_empty(struct qed_bmap *bmap)
{
- return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+ return bitmap_empty(bmap->bitmap, bmap->max_count);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -319,48 +318,31 @@ free_rdma_dev:
void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, bool check)
{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
+ unsigned int bit, weight, nbits;
+ unsigned long *b;
+
+ if (!check)
+ goto end;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ if (!weight)
goto end;
DP_NOTICE(p_hwfn,
"%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
bmap->name, bmap->max_count, weight);
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ for (bit = 0; bit < bmap->max_count; bit += 512) {
+ b = bmap->bitmap + BITS_TO_LONGS(bit);
+ nbits = min(bmap->max_count - bit, 512U);
+
+ if (!bitmap_empty(b, nbits))
DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ "line 0x%04x: %*pb\n", bit / 512, nbits, b);
}
end:
- kfree(bmap->bitmap);
+ bitmap_free(bmap->bitmap);
bmap->bitmap = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 071b4aeaddf2..134ecfca96a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -76,7 +76,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
- while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
/* If the HW device is during recovery, all resources are
* immediately reset without receiving a per-cid indication
* from HW. In this case we don't expect the cid bitmap to be
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 8ac38828ba45..0848b5529d48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2984,12 +2984,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
struct qed_filter_accept_flags *flags = &params->accept_flags;
struct qed_public_vf_info *vf_info;
+ u16 tlv_mask;
+
+ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
+ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
/* Untrusted VFs can't even be trusted to know that fact.
* Simply indicate everything is configured fine, and trace
* configuration 'behind their back'.
*/
- if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+ if (!(*tlvs & tlv_mask))
return 0;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
@@ -3006,6 +3010,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
flags->tx_accept_filter &= ~mask;
}
+ if (params->update_accept_any_vlan_flg) {
+ vf_info->accept_any_vlan = params->accept_any_vlan;
+
+ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
+ params->accept_any_vlan = false;
+ }
+
return 0;
}
@@ -3806,11 +3817,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
return found;
}
-static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
- u16 vfid,
- struct qed_mcp_link_params *p_params,
- struct qed_mcp_link_state *p_link,
- struct qed_mcp_link_capabilities *p_caps)
+static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
@@ -3818,7 +3829,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
- return;
+ return -EINVAL;
p_bulletin = p_vf->bulletin.p_virt;
@@ -3828,6 +3839,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
if (p_caps)
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ return 0;
}
static int
@@ -4686,6 +4698,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
struct qed_public_vf_info *vf_info;
struct qed_mcp_link_state link;
u32 tx_rate;
+ int ret;
/* Sanitize request */
if (IS_VF(cdev))
@@ -4699,7 +4712,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
- qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ if (ret)
+ return ret;
/* Fill information about VF */
ivi->vf = vf_id;
@@ -4715,6 +4730,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
tx_rate = vf_info->tx_rate;
ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+ ivi->trusted = vf_info->is_trusted_request;
return 0;
}
@@ -5145,6 +5161,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
params.update_ctl_frame_check = 1;
params.mac_chk_en = !vf_info->is_trusted_configured;
+ params.update_accept_any_vlan_flg = 0;
+
+ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = vf_info->accept_any_vlan;
+ }
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
@@ -5160,13 +5182,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
if (!vf_info->is_trusted_configured) {
flags->rx_accept_filter &= ~mask;
flags->tx_accept_filter &= ~mask;
+ params.accept_any_vlan = false;
}
if (flags->update_rx_mode_config ||
flags->update_tx_mode_config ||
- params.update_ctl_frame_check)
+ params.update_ctl_frame_check ||
+ params.update_accept_any_vlan_flg) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
+ vf_info->is_trusted_configured ? "trusted" : "untrusted",
+ vf->abs_vf_id, vf->relative_vf_id);
qed_sp_vport_update(hwfn, &params,
QED_SPQ_MODE_EBLOCK, NULL);
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index f448e3dd6c8b..6ee2493de164 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -62,6 +62,7 @@ struct qed_public_vf_info {
bool is_trusted_request;
u8 rx_accept_mode;
u8 tx_accept_mode;
+ bool accept_any_vlan;
};
struct qed_iov_vf_init_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 597cd9cd57b5..7b0e390c0b07 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
p_iov->bulletin.size,
&p_iov->bulletin.phys,
GFP_KERNEL);
+ if (!p_iov->bulletin.p_virt)
+ goto free_pf2vf_reply;
+
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt,
@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
return rc;
+free_pf2vf_reply:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 306b5f4bc632..2bd51a41ce8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -225,7 +225,7 @@ struct pfvf_start_queue_resp_tlv {
};
/* Extended queue information - additional index for reference inside qzone.
- * If commmunicated between VF/PF, each TLV relating to queues should be
+ * If communicated between VF/PF, each TLV relating to queues should be
* extended by one such [or have a future base TLV that already contains info].
*/
struct vfpf_qid_tlv {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 97a7ab0826ed..8034d812d5a0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -624,7 +624,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
struct qede_dev *edev = netdev_priv(ndev);
char mbi[ETHTOOL_FWVERS_LEN];
- strlcpy(info->driver, "qede", sizeof(info->driver));
+ strscpy(info->driver, "qede", sizeof(info->driver));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -661,7 +661,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
"mfw %s", mfw);
}
- strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index b242000a77fd..7c2af482192d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -260,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
@@ -748,6 +746,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
buf = page_address(bd->data) + bd->page_offset;
skb = build_skb(buf, rxq->rx_buf_seg_size);
+ if (unlikely(!skb))
+ return NULL;
+
skb_reserve(skb, pad);
skb_put(skb, len);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b4e5a15e308b..953f304b8588 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1214,7 +1214,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
@@ -1904,8 +1904,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
/* Add NAPI objects */
for_each_queue(i) {
- netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
- qede_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
napi_enable(&edev->fp_array[i].napi);
}
}
@@ -1916,7 +1915,6 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
for (i = 0; i < edev->int_info.used_cnt; i++) {
if (edev->int_info.msix_cnt) {
- synchronize_irq(edev->int_info.msix[i].vector);
free_irq(edev->int_info.msix[i].vector,
&edev->fp_array[i]);
} else {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 39176e765767..c9c8225f04d6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -496,19 +496,19 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
&edev->flags)) {
- DP_ERR(edev, "Timestamping in progress\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
edev->ptp_skip_txts++;
return;
}
if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
- DP_ERR(edev,
- "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamping was not enabled, this pkt will not be timestamped\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else if (unlikely(ptp->tx_skb)) {
- DP_ERR(edev,
- "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Device supports a single outstanding pkt to ts, It will not be ts\n");
clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
edev->ptp_skip_txts++;
} else {
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b30589a135c2..76072f8c3d2f 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,10 +1736,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ql3xxx_driver_version,
+ strscpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ql3xxx_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
}
@@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*
@@ -3812,7 +3813,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
- netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+ netif_napi_add(ndev, &qdev->napi, ql_poll);
ndev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 5d79ee4370bc..7519773eaca6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->get_hw_capability)
return dcb->ops->get_hw_capability(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
@@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->attach)
return dcb->ops->attach(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline int
@@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
if (dcb && dcb->ops->query_hw_capability)
return dcb->ops->query_hw_capability(dcb, buf);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
@@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
if (dcb && dcb->ops->query_cee_param)
return dcb->ops->query_cee_param(dcb, buf, type);
- return 0;
+ return -EOPNOTSUPP;
}
static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
@@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->get_cee_cfg)
return dcb->ops->get_cee_cfg(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e10fe071a40f..1ee491f78c6b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -277,10 +277,10 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ strscpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
}
@@ -1355,7 +1355,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
memset(data, 0, stats->n_stats * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 29cdcb2285b1..92930a055cbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <linux/printk.h>
+#include <linux/jiffies.h>
#include "qlcnic.h"
@@ -332,7 +333,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
- if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
@@ -496,7 +497,7 @@ set_flags:
}
opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->hdr_length = hdr_len;
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
@@ -1585,17 +1586,15 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll);
} else {
if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_rx_poll);
}
}
@@ -1607,8 +1606,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(netdev, &tx_ring->napi,
+ qlcnic_tx_poll);
}
}
@@ -2114,17 +2113,14 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_rx_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_msix_sriov_vf_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_msix_sriov_vf_poll);
} else {
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_poll);
}
}
@@ -2137,9 +2133,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- netif_tx_napi_add(netdev, &tx_ring->napi,
- qlcnic_83xx_msix_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d320567b2cca..28476b982bab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -368,7 +368,8 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e90fa97c0ae6..8dd7aa08ecfb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1869,8 +1869,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!min_tx_rate)
min_tx_rate = QLC_VF_MIN_TX_RATE;
- if (max_tx_rate &&
- (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+ if (max_tx_rate && max_tx_rate >= 10000) {
netdev_err(netdev,
"Invalid max Tx rate, allowed range is [%d - %d]",
min_tx_rate, QLC_VF_MAX_TX_RATE);
@@ -1880,8 +1879,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!max_tx_rate)
max_tx_rate = 10000;
- if (min_tx_rate &&
- (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ if (min_tx_rate && min_tx_rate < QLC_VF_MIN_TX_RATE) {
netdev_err(netdev,
"Invalid min Tx rate, allowed range is [%d - %d]",
QLC_VF_MIN_TX_RATE, max_tx_rate);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 06104d2ff5b3..0d80447d4d3b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1264,7 +1264,7 @@ static int emac_tso_csum(struct emac_adapter *adpt,
pskb_trim(skb, pkt_len);
}
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* we only need to do csum */
netif_warn(adpt, tx_err, adpt->netdev,
@@ -1339,7 +1339,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
if (TPD_LSO(tpd)) {
- mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mapped_len = skb_tcp_all_headers(skb);
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
@@ -1465,7 +1465,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
/* Make sure the are enough free descriptors to hold one
* maximum-sized SKB. We need one desc for each fragment,
* one for the checksum (emac_tso_csum), one for TSO, and
- * and one for the SKB header.
+ * one for the SKB header.
*/
if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
netif_stop_queue(adpt->netdev);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index a55c52696d49..3115b2c12898 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -684,8 +684,7 @@ static int emac_probe(struct platform_device *pdev)
/* Initialize queues */
emac_mac_rx_tx_ring_init_all(pdev, adpt);
- netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx);
ret = register_netdev(netdev);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 792ce9a323cd..f62c39544e08 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -164,10 +164,10 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
{
struct qcaspi *qca = netdev_priv(dev);
- strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
- strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
- strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
- strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev),
+ strscpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
+ strscpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
+ strscpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
+ strscpy(p->bus_info, dev_name(&qca->spi_dev->dev),
sizeof(p->bus_info));
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 955cce644392..c865a4be05ee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -435,7 +435,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
@@ -1001,7 +1001,7 @@ qca_spi_probe(struct spi_device *spi)
return 0;
}
-static int
+static void
qca_spi_remove(struct spi_device *spi)
{
struct net_device *qcaspi_devs = spi_get_drvdata(spi);
@@ -1011,8 +1011,6 @@ qca_spi_remove(struct spi_device *spi)
unregister_netdev(qcaspi_devs);
free_netdev(qcaspi_devs);
-
- return 0;
}
static const struct spi_device_id qca_spi_id[] = {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 27c4f43176aa..26646cb6a20a 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -108,7 +108,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
netdev->mtu +
VLAN_ETH_HLEN);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index bfbd7847f946..a313242a762e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -207,7 +207,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
if (unlikely(!port)) {
- atomic_long_inc(&skb->dev->rx_nohandler);
+ dev_core_stats_rx_nohandler_inc(skb->dev);
kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index e5a0b38f7dbe..2b033060fc20 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -19,7 +19,7 @@ struct rmnet_map_control_command {
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
} __aligned(1);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 3676976c875b..ba194698cc14 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -298,7 +298,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
- u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
@@ -323,8 +322,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
if (skb_tailroom(skb) < padding)
return NULL;
- padbytes = (u8 *)skb_put(skb, padding);
- memset(padbytes, 0, padding);
+ skb_put_zero(skb, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index a6bf7d505178..eecd52ed1ed2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -939,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1127,7 +1127,7 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, r6040_poll, 64);
+ netif_napi_add(dev, &lp->napi, r6040_poll);
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ad7b9e9d7f95..f5786d78ed23 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1382,9 +1382,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
@@ -1986,7 +1986,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
eth_hw_addr_set(dev, (u8 *)addr);
dev->netdev_ops = &cp_netdev_ops;
- netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
+ netif_napi_add_weight(dev, &cp->napi, cp_rx_poll, 16);
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 15b40fd93cd2..469e2e229c6e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1002,7 +1002,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
dev->netdev_ops = &rtl8139_netdev_ops;
dev->ethtool_ops = &rtl8139_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
+ netif_napi_add(dev, &tp->napi, rtl8139_poll);
/* note: the hardware is not capable of sg/csum/highdma, however
* through the use of skb_copy_and_csum_dev we enable these
@@ -2380,9 +2380,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
static int rtl8139_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 63f0d2d0e87b..b202184eddd4 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-/* Delay between EEPROM clock transitions. */
-#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
-
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 8da4b66b71b5..55ef8251feb5 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,10 +23,10 @@ enum mac_version {
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
+ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
+ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_16,
+ /* RTL_GIGA_MAC_VER_16 was merged with VER_10 */
RTL_GIGA_MAC_VER_17,
RTL_GIGA_MAC_VER_18,
RTL_GIGA_MAC_VER_19,
@@ -51,20 +51,20 @@ enum mac_version {
RTL_GIGA_MAC_VER_38,
RTL_GIGA_MAC_VER_39,
RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
+ /* support for RTL_GIGA_MAC_VER_41 has been removed */
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
+ /* support for RTL_GIGA_MAC_VER_45 has been removed */
RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
+ /* support for RTL_GIGA_MAC_VER_47 has been removed */
RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
+ /* support for RTL_GIGA_MAC_VER_49 has been removed */
+ /* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_53,
- RTL_GIGA_MAC_VER_60,
+ /* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_NONE
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 19e2621e0645..a73d061d9fcb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -49,10 +49,8 @@
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
-#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
-#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
@@ -102,12 +100,9 @@ static const struct {
[RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
@@ -131,20 +126,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
[RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
[RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
[RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
[RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_60] = {"RTL8125A" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
@@ -658,10 +647,8 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
-MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
MODULE_FIRMWARE(FIRMWARE_8168FP_3);
-MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
@@ -689,7 +676,7 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static bool rtl_is_8125(struct rtl8169_private *tp)
{
- return tp->mac_version >= RTL_GIGA_MAC_VER_60;
+ return tp->mac_version >= RTL_GIGA_MAC_VER_61;
}
static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
@@ -892,8 +879,6 @@ static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
if (value & BMCR_RESET || !(value & BMCR_PDOWN))
rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
else
@@ -1207,7 +1192,7 @@ static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
return RTL_DASH_NONE;
@@ -1397,8 +1382,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
- rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1420,11 +1408,11 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
- strlcpy(info->fw_version, rtl_fw->version,
+ strscpy(info->fw_version, rtl_fw->version,
sizeof(info->fw_version));
}
@@ -2008,7 +1996,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168F family. */
{ 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ */
{ 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
@@ -2038,7 +2029,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
{ 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
@@ -2051,19 +2041,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
- { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR
- * Not even r8101 vendor driver knows these id's,
- * so let's disable detection for now. -- HK
- * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
- * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
- */
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
/* 8110 family. */
{ 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2085,8 +2066,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
if (ver != RTL_GIGA_MAC_NONE && !gmii) {
if (ver == RTL_GIGA_MAC_VER_42)
ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_45)
- ver = RTL_GIGA_MAC_VER_47;
else if (ver == RTL_GIGA_MAC_VER_46)
ver = RTL_GIGA_MAC_VER_48;
}
@@ -2268,7 +2247,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
@@ -2335,7 +2314,6 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
readrq = 512;
@@ -2452,7 +2430,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_63:
@@ -2465,6 +2443,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
}
}
+static void rtl_disable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+}
+
static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
{
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -2667,10 +2650,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
- rtl_eri_set_bits(tp, 0xd4, 0x1f80);
- break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2678,13 +2658,48 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
}
}
+static void rtl_disable_exit_l1(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
/* Don't enable ASPM in the chip if OS can't control ASPM */
if (enable && tp->aspm_manageable) {
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ /* reset ephy tx/rx disable timer */
+ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
+ /* chip can trigger L1.2 */
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2));
+ break;
+ default:
+ break;
+ }
} else {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
+ break;
+ default:
+ break;
+ }
+
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
}
@@ -2950,7 +2965,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3188,7 +3203,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3239,7 +3254,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3253,45 +3268,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
}
-static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_1[] = {
- { 0x00, 0xffff, 0x10ab },
- { 0x06, 0xffff, 0xf030 },
- { 0x08, 0xffff, 0x2006 },
- { 0x0d, 0xffff, 0x1666 },
- { 0x0c, 0x3ff0, 0x0000 }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_1);
-
- rtl_hw_start_8168ep(tp);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
-static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_2[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20ea }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_2);
-
- rtl_hw_start_8168ep(tp);
-
- RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168ep_3[] = {
@@ -3342,7 +3318,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3586,48 +3562,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
else
rtl8125a_config_eee_mac(tp);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- udelay(10);
-}
-
-static void rtl_hw_start_8125a_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8125a_1[] = {
- { 0x01, 0xffff, 0xa812 },
- { 0x09, 0xffff, 0x520c },
- { 0x04, 0xffff, 0xd000 },
- { 0x0d, 0xffff, 0xf702 },
- { 0x0a, 0xffff, 0x8653 },
- { 0x06, 0xffff, 0x001e },
- { 0x08, 0xffff, 0x3595 },
- { 0x20, 0xffff, 0x9455 },
- { 0x21, 0xffff, 0x99ff },
- { 0x02, 0xffff, 0x6046 },
- { 0x29, 0xffff, 0xfe00 },
- { 0x23, 0xffff, 0xab62 },
-
- { 0x41, 0xffff, 0xa80c },
- { 0x49, 0xffff, 0x520c },
- { 0x44, 0xffff, 0xd000 },
- { 0x4d, 0xffff, 0xf702 },
- { 0x4a, 0xffff, 0x8653 },
- { 0x46, 0xffff, 0x001e },
- { 0x48, 0xffff, 0x3595 },
- { 0x60, 0xffff, 0x9455 },
- { 0x61, 0xffff, 0x99ff },
- { 0x42, 0xffff, 0x6046 },
- { 0x69, 0xffff, 0xfe00 },
- { 0x63, 0xffff, 0xab62 },
- };
-
- rtl_set_def_aspm_entry_latency(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8125a_1);
-
- rtl_hw_start_8125_common(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
+ rtl_disable_rxdvgate(tp);
}
static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
@@ -3686,10 +3621,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
@@ -3713,20 +3645,14 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
- [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
- [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
- [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
};
@@ -4121,7 +4047,6 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
padto = max_t(unsigned int, padto, ETH_ZLEN);
@@ -4155,7 +4080,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
- u32 transport_offset = (u32)skb_transport_offset(skb);
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 mss = shinfo->gso_size;
@@ -4172,7 +4096,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
WARN_ON_ONCE(1);
}
- opts[0] |= transport_offset << GTTCPHO_SHIFT;
+ opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4200,7 +4124,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
else
WARN_ON_ONCE(1);
- opts[1] |= transport_offset << TCPHO_SHIFT;
+ opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
} else {
unsigned int padto = rtl_quirk_packet_padto(tp, skb);
@@ -4367,14 +4291,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- int transport_offset = skb_transport_offset(skb);
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
features = rtl8168evl_fix_tso(skb, features);
- if (transport_offset > GTTCPHO_MAX &&
+ if (skb_transport_offset(skb) > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -4385,7 +4308,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (rtl_quirk_packet_padto(tp, skb))
features &= ~NETIF_F_CSUM_MASK;
- if (transport_offset > TCPHO_MAX &&
+ if (skb_transport_offset(skb) > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
}
@@ -4644,8 +4567,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
pm_runtime_idle(&tp->pci_dev->dev);
}
- if (net_ratelimit())
- phy_print_status(tp->phydev);
+ phy_print_status(tp->phydev);
}
static int r8169_phy_connect(struct rtl8169_private *tp)
@@ -4683,7 +4605,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl_pci_commit(tp);
rtl8169_cleanup(tp, true);
-
+ rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
}
@@ -4843,8 +4765,6 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
rtl8169_down(tp);
}
-#ifdef CONFIG_PM
-
static int rtl8169_runtime_resume(struct device *dev)
{
struct rtl8169_private *tp = dev_get_drvdata(dev);
@@ -4860,7 +4780,7 @@ static int rtl8169_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused rtl8169_suspend(struct device *device)
+static int rtl8169_suspend(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4873,7 +4793,7 @@ static int __maybe_unused rtl8169_suspend(struct device *device)
return 0;
}
-static int __maybe_unused rtl8169_resume(struct device *device)
+static int rtl8169_resume(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4908,6 +4828,9 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
+ if (tp->dash_type != RTL_DASH_NONE)
+ return -EBUSY;
+
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
pm_schedule_suspend(device, 10000);
@@ -4915,30 +4838,11 @@ static int rtl8169_runtime_idle(struct device *device)
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
- SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
- rtl8169_runtime_idle)
+ SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
+ RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
+ rtl8169_runtime_idle)
};
-#endif /* CONFIG_PM */
-
-static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
-{
- /* WoL fails with 8168b when the receiver is disabled. */
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pci_clear_master(tp->pci_dev);
-
- RTL_W8(tp, ChipCmd, CmdRxEnb);
- rtl_pci_commit(tp);
- break;
- default:
- break;
- }
-}
-
static void rtl_shutdown(struct pci_dev *pdev)
{
struct rtl8169_private *tp = pci_get_drvdata(pdev);
@@ -4950,10 +4854,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF) {
- if (tp->saved_wolopts)
- rtl_wol_shutdown_quirk(tp);
-
+ if (system_state == SYSTEM_POWER_OFF &&
+ tp->dash_type == RTL_DASH_NONE) {
pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -5161,13 +5063,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
rtl_hw_init_8125(tp);
break;
default:
@@ -5187,7 +5089,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
return JUMBO_7K;
/* RTL8168b */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
@@ -5198,37 +5099,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
}
}
-static void rtl_disable_clk(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int rtl_get_ether_clk(struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
- struct clk *clk;
- int rc;
-
- clk = devm_clk_get(d, "ether_clk");
- if (IS_ERR(clk)) {
- rc = PTR_ERR(clk);
- if (rc == -ENOENT)
- /* clk-core allows NULL (for suspend / resume) */
- rc = 0;
- else
- dev_err_probe(d, rc, "failed to get clk\n");
- } else {
- tp->clk = clk;
- rc = clk_prepare_enable(clk);
- if (rc)
- dev_err(d, "failed to enable clk: %d\n", rc);
- else
- rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
- }
-
- return rc;
-}
-
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
u8 mac_addr[ETH_ALEN] __aligned(2) = {};
@@ -5255,6 +5125,16 @@ done:
rtl_rar_set(tp, mac_addr);
}
+/* register is set if system vendor successfully tested ASPM 1.2 */
+static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
+{
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
+ return true;
+
+ return false;
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5282,9 +5162,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
/* Get the *optional* external "ether_clk" used on some boards */
- rc = rtl_get_ether_clk(tp);
- if (rc)
- return rc;
+ tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
+ if (IS_ERR(tp->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n");
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -5303,12 +5183,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
- return -ENODEV;
- }
-
rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
if (rc < 0) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
@@ -5333,7 +5207,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Chips from RTL8168h partially have issues with L1.2, but seem
* to work fine with L1 and L1.1.
*/
- if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ if (rtl_aspm_is_safe(tp))
+ rc = 0;
+ else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
@@ -5368,7 +5244,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &rtl8169_ethtool_ops;
- netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &tp->napi, rtl8169_poll);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
@@ -5395,12 +5271,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
- netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
- netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
+ netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
+ netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
} else {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
- netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
+ netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
+ netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
dev->hw_features |= NETIF_F_RXALL;
@@ -5409,7 +5285,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
- rtl_set_d3_pll_down(tp, true);
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+ dev->wol_enabled = 1;
+ }
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
@@ -5460,9 +5341,7 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
-#ifdef CONFIG_PM
- .driver.pm = &rtl8169_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&rtl8169_pm_ops),
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index f7ad5487879b..930496cd34ed 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -429,15 +429,6 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
{ 0x0d, 0xf880 }
};
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
struct phy_device *phydev,
u16 val)
@@ -455,6 +446,29 @@ static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
r8169_apply_firmware(tp);
}
+static void rtl8168d_1_common(struct phy_device *phydev)
+{
+ u16 val;
+
+ phy_write_paged(phydev, 0x0002, 0x05, 0x669a);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x669a);
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u16 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -469,25 +483,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
@@ -513,24 +509,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
@@ -814,71 +793,6 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- u16 dout_tapbin;
- u32 data;
-
- r8169_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin ^ 0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- rtl8168g_enable_gphy_10m(phydev);
-
- /* SAR ADC performance */
- phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(phydev);
- rtl8168h_config_eee_phy(phydev);
-}
-
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -916,27 +830,6 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(phydev);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(phydev);
- rtl8168g_config_eee_phy(phydev);
-}
-
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1102,44 +995,6 @@ static void rtl8125_legacy_force_mode(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0);
}
-static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- rtl8168g_enable_gphy_10m(phydev);
-
- rtl8125a_config_eee_phy(phydev);
-}
-
static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1260,10 +1115,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
@@ -1287,20 +1139,14 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
[RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
[RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
[RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
};
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 08062d73df10..e0f8276cffed 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -189,6 +189,7 @@ enum ravb_reg {
PSR = 0x0528,
PIPR = 0x052c,
CXR31 = 0x0530, /* RZ/G2L only */
+ CXR35 = 0x0540, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
@@ -965,6 +966,13 @@ enum CXR31_BIT {
CXR31_SEL_LINK1 = 0x00000008,
};
+enum CXR35_BIT {
+ CXR35_SEL_XMII = 0x00000003,
+ CXR35_SEL_XMII_RGMII = 0x00000000,
+ CXR35_SEL_XMII_MII = 0x00000002,
+ CXR35_HALFCYC_CLKSW = 0xffff0000,
+};
+
enum CSR0_BIT {
CSR0_TPE = 0x00000010,
CSR0_RPE = 0x00000020,
@@ -1027,8 +1035,11 @@ struct ravb_hw_info {
unsigned tx_counters:1; /* E-MAC has TX counters */
unsigned carrier_counters:1; /* E-MAC has carrier counters */
unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
+ unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */
+ unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */
unsigned gptp:1; /* AVB-DMAC has gPTP support */
unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp_ref_clk:1; /* gPTP has separate reference clock */
unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
@@ -1040,6 +1051,7 @@ struct ravb_private {
void __iomem *addr;
struct clk *clk;
struct clk *refclk;
+ struct clk *gptp_clk;
struct mdiobb_ctrl mdiobb;
u32 num_rx_ring[NUM_RX_QUEUE];
u32 num_tx_ring[NUM_TX_QUEUE];
@@ -1077,6 +1089,8 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
+ int erra_irq;
+ int mgmta_irq;
int rx_irqs[NUM_RX_QUEUE];
int tx_irqs[NUM_TX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b215cde68e10..36324126db6d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -475,7 +475,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -540,7 +540,13 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ } else {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -1124,7 +1130,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q)
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
if (napi_schedule_prep(&priv->napi[q])) {
/* Mask RX and TX interrupts */
- if (!info->multi_irqs) {
+ if (!info->irq_en_dis) {
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
ravb_write(ndev, tic & ~BIT(q), TIC);
} else {
@@ -1306,7 +1312,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- if (!info->multi_irqs) {
+ if (!info->irq_en_dis) {
ravb_modify(ndev, RIC0, mask, mask);
ravb_modify(ndev, TIC, mask, mask);
} else {
@@ -1432,11 +1438,7 @@ static int ravb_phy_init(struct net_device *ndev)
* at this time.
*/
if (soc_device_match(r8a7795es10)) {
- err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
- goto err_phy_disconnect;
- }
+ phy_set_max_speed(phydev, SPEED_100);
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
@@ -1453,12 +1455,12 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
-err_phy_disconnect:
- phy_disconnect(phydev);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
@@ -1804,12 +1806,23 @@ static int ravb_open(struct net_device *ndev)
ndev, dev, "ch19:tx_nc");
if (error)
goto out_free_irq_nc_rx;
+
+ if (info->err_mgmt_irqs) {
+ error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
+ ndev, dev, "err_a");
+ if (error)
+ goto out_free_irq_nc_tx;
+ error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
+ ndev, dev, "mgmt_a");
+ if (error)
+ goto out_free_irq_erra;
+ }
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_nc_tx;
+ goto out_free_irq_mgmta;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1829,9 +1842,15 @@ out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
-out_free_irq_nc_tx:
+out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
+ if (info->err_mgmt_irqs)
+ free_irq(priv->mgmta_irq, ndev);
+out_free_irq_erra:
+ if (info->err_mgmt_irqs)
+ free_irq(priv->erra_irq, ndev);
+out_free_irq_nc_tx:
free_irq(priv->tx_irqs[RAVB_NC], ndev);
out_free_irq_nc_rx:
free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -2172,6 +2191,10 @@ static int ravb_close(struct net_device *ndev)
free_irq(priv->tx_irqs[RAVB_BE], ndev);
free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ if (info->err_mgmt_irqs) {
+ free_irq(priv->erra_irq, ndev);
+ free_irq(priv->mgmta_irq, ndev);
+ }
}
free_irq(ndev->irq, ndev);
@@ -2416,6 +2439,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
+ .irq_en_dis = 1,
.ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
@@ -2444,6 +2468,31 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.magic_pkt = 1,
};
+static const struct ravb_hw_info ravb_rzv2m_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
+ .multi_irqs = 1,
+ .err_mgmt_irqs = 1,
+ .gptp = 1,
+ .gptp_ref_clk = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info gbeth_hw_info = {
.rx_ring_free = ravb_rx_ring_free_gbeth,
.rx_ring_format = ravb_rx_ring_format_gbeth,
@@ -2471,6 +2520,8 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
@@ -2479,11 +2530,15 @@ MODULE_DEVICE_TABLE(of, ravb_match_table);
static int ravb_set_gti(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct device *dev = ndev->dev.parent;
unsigned long rate;
uint64_t inc;
- rate = clk_get_rate(priv->clk);
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
if (!rate)
return -EINVAL;
@@ -2600,10 +2655,14 @@ static int ravb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- if (info->multi_irqs)
- irq = platform_get_irq_byname(pdev, "ch22");
- else
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "dia");
+ else
+ irq = platform_get_irq_byname(pdev, "ch22");
+ } else {
irq = platform_get_irq(pdev, 0);
+ }
if (irq < 0) {
error = irq;
goto out_release;
@@ -2645,7 +2704,10 @@ static int ravb_probe(struct platform_device *pdev)
of_property_read_bool(np, "renesas,ether-link-active-low");
if (info->multi_irqs) {
- irq = platform_get_irq_byname(pdev, "ch24");
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "line3");
+ else
+ irq = platform_get_irq_byname(pdev, "ch24");
if (irq < 0) {
error = irq;
goto out_release;
@@ -2667,6 +2729,22 @@ static int ravb_probe(struct platform_device *pdev)
}
priv->tx_irqs[i] = irq;
}
+
+ if (info->err_mgmt_irqs) {
+ irq = platform_get_irq_byname(pdev, "err_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->erra_irq = irq;
+
+ irq = platform_get_irq_byname(pdev, "mgmt_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->mgmta_irq = irq;
+ }
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2682,6 +2760,15 @@ static int ravb_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->refclk);
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_disable_refclk;
+ }
+ clk_prepare_enable(priv->gptp_clk);
+ }
+
ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
@@ -2703,7 +2790,7 @@ static int ravb_probe(struct platform_device *pdev)
/* Set GTI value */
error = ravb_set_gti(ndev);
if (error)
- goto out_disable_refclk;
+ goto out_disable_gptp_clk;
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
@@ -2723,7 +2810,7 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_refclk;
+ goto out_disable_gptp_clk;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
@@ -2754,9 +2841,9 @@ static int ravb_probe(struct platform_device *pdev)
goto out_dma_free;
}
- netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
/* Network device register */
error = register_netdev(ndev);
@@ -2786,6 +2873,8 @@ out_dma_free:
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
+out_disable_gptp_clk:
+ clk_disable_unprepare(priv->gptp_clk);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
@@ -2807,6 +2896,7 @@ static int ravb_remove(struct platform_device *pdev)
if (info->ccc_gac)
ravb_ptp_stop(ndev);
+ clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
@@ -2854,7 +2944,6 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int ret;
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2863,9 +2952,7 @@ static int ravb_wol_restore(struct net_device *ndev)
/* Disable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
- ret = ravb_close(ndev);
- if (ret < 0)
- return ret;
+ ravb_close(ndev);
return disable_irq_wake(priv->emac_irq);
}
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index c099656dd75b..87c4306d66ec 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -198,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
else if (on)
ravb_write(ndev, GIE_PTCS, GIE);
@@ -254,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
else
ravb_write(ndev, GIE_PTMS0, GIE);
@@ -266,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, 0);
else
ravb_write(ndev, GID_PTMD0, GID);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d947a628e166..71a499113308 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2026,15 +2026,11 @@ static int sh_eth_phy_init(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
- int err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
- phy_disconnect(phydev);
- return err;
- }
- }
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -3372,7 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
- netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
+ netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
/* network device register */
ret = register_netdev(ndev);
@@ -3450,9 +3446,7 @@ static int sh_eth_wol_restore(struct net_device *ndev)
* both be reset and all registers restored. This is what
* happens during suspend and resume without WoL enabled.
*/
- ret = sh_eth_close(ndev);
- if (ret < 0)
- return ret;
+ sh_eth_close(ndev);
ret = sh_eth_open(ndev);
if (ret < 0)
return ret;
@@ -3464,7 +3458,7 @@ static int sh_eth_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
@@ -3483,7 +3477,7 @@ static int sh_eth_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3fcea211716c..9e59669a93dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -129,7 +129,7 @@ static int rocker_reg_test(const struct rocker *rocker)
u64 test_reg;
u64 rnd;
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd >>= 1;
rocker_write32(rocker, TEST_REG, rnd);
test_reg = rocker_read32(rocker, TEST_REG);
@@ -139,9 +139,9 @@ static int rocker_reg_test(const struct rocker *rocker)
return -EIO;
}
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd <<= 31;
- rnd |= prandom_u32();
+ rnd |= get_random_u32();
rocker_write64(rocker, TEST_REG64, rnd);
test_reg = rocker_read64(rocker, TEST_REG64);
if (test_reg != rnd * 2) {
@@ -224,7 +224,7 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
if (err)
goto unmap;
- prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
expect[i] = ~buf[i];
err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
@@ -2226,8 +2226,8 @@ rocker_port_set_link_ksettings(struct net_device *dev,
static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
@@ -2573,10 +2573,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
- netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
- NAPI_POLL_WEIGHT);
- netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bc70c6abd6a5..58cf7cc54f40 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 98edb01024f0..8ba017ec9849 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -175,8 +175,8 @@ static int sxgbe_set_eee(struct net_device *dev,
static void sxgbe_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 sxgbe_getmsglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 32161a56726c..9664f029fa16 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -89,7 +89,7 @@ static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
- /* Exit and disable EEE in case of we are are in LPI state. */
+ /* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -127,7 +127,7 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(ndev->phydev, 1))
+ if (phy_init_eee(ndev->phydev, true))
return false;
priv->eee_active = 1;
@@ -2143,7 +2143,7 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
}
- netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+ netif_napi_add(ndev, &priv->napi, sxgbe_poll);
spin_lock_init(&priv->stats_lock);
@@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion\n", __func__);
- return -EINVAL;
+ return 1;
}
__setup("sxgbeeth=", sxgbe_cmdline_opt);
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 16a4cbae9326..c672f92d65e9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -749,6 +749,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
+ u8 addr[ETH_ALEN];
ether3_banner();
@@ -776,7 +777,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
- ether3_addr(dev->dev_addr, ec);
+ ether3_addr(addr, ec);
+ eth_hw_addr_set(dev, addr);
priv(dev)->dev = dev;
timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 97ce64079855..0950e6b0508f 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -17,14 +17,14 @@ config NET_VENDOR_SOLARFLARE
if NET_VENDOR_SOLARFLARE
config SFC
- tristate "Solarflare SFC9000/SFC9100/EF100-family support"
+ tristate "Solarflare SFC9100/EF100-family support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select CRC32
help
This driver supports 10/40-gigabit Ethernet cards based on
- the Solarflare SFC9000-family and SFC9100-family controllers.
+ the Solarflare SFC9100-family controllers.
It also supports 10/25/40/100-gigabit Ethernet cards based
on the Solarflare EF100 networking IP in Xilinx FPGAs.
@@ -32,7 +32,7 @@ config SFC
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC9000/SFC9100-family MTD support"
+ bool "Solarflare SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
help
@@ -40,22 +40,22 @@ config SFC_MTD
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_MCDI_MON
- bool "Solarflare SFC9000/SFC9100-family hwmon support"
+ bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
help
This exposes the on-board firmware-managed sensors as a
hardware monitor device.
config SFC_SRIOV
- bool "Solarflare SFC9000-family SR-IOV support"
+ bool "Solarflare SFC9100-family SR-IOV support"
depends on SFC && PCI_IOV
default y
help
- This enables support for the SFC9000 I/O Virtualization
+ This enables support for the Single Root I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
config SFC_MCDI_LOGGING
- bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ bool "Solarflare SFC9100-family MCDI logging support"
depends on SFC
default y
help
@@ -65,5 +65,6 @@ config SFC_MCDI_LOGGING
a sysfs file 'mcdi_logging' under the PCI device.
source "drivers/net/ethernet/sfc/falcon/Kconfig"
+source "drivers/net/ethernet/sfc/siena/Kconfig"
endif # NET_VENDOR_SOLARFLARE
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8bd01c429f91..b5e45fc6337e 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
sfc-y += efx.o efx_common.o efx_channels.o nic.o \
- farch.o siena.o ef10.o \
+ ef10.o \
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
selftest.o ethtool.o ethtool_common.o ptp.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
@@ -8,8 +8,10 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
+ mae.o tc.o tc_bindings.o
obj-$(CONFIG_SFC) += sfc.o
obj-$(CONFIG_SFC_FALCON) += falcon/
+obj-$(CONFIG_SFC_SIENA) += siena/
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index cf366ed2557c..7022fb2005a2 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
efx_update_sw_stats(efx, stats);
out:
+ /* releasing a DMA coherent buffer with BH disabled can panic */
+ spin_unlock_bh(&efx->stats_lock);
efx_nic_free_buffer(efx, &stats_buf);
+ spin_lock_bh(&efx->stats_lock);
return rc;
}
@@ -2256,7 +2259,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
@@ -2535,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
if (rc)
- return rc;
+ goto out_unlock;
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
if (rc)
goto fail_add_vlan;
}
- return 0;
+ goto out_unlock;
fail_add_vlan:
efx_mcdi_filter_table_remove(efx);
+out_unlock:
+ up_write(&efx->filter_sem);
return rc;
}
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3208,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
@@ -3240,9 +3251,7 @@ restore_vadaptor:
if (rc2)
goto reset_nic;
restore_filters:
- down_write(&efx->filter_sem);
rc2 = efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3268,12 +3277,35 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
bool was_enabled = efx->port_enabled;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ /* If this function is a VF and we have access to the parent PF,
+ * then use the PF control path to attempt to change the VF MAC address.
+ */
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac[ETH_ALEN];
+
+ /* net_dev->dev_addr can be zeroed by efx_net_stop in
+ * efx_ef10_sriov_set_vf_mac, so pass in a copy.
+ */
+ ether_addr_copy(mac, efx->net_dev->dev_addr);
+
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
+ if (!rc)
+ return 0;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Updating VF mac via PF failed (%d), setting directly\n",
+ rc);
+ }
+#endif
+
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
+ efx_ef10_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -3283,47 +3315,12 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (was_enabled)
efx_net_open(efx->net_dev);
efx_device_attach_if_not_resetting(efx);
-#ifdef CONFIG_SFC_SRIOV
- if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
-
- if (rc == -EPERM) {
- struct efx_nic *efx_pf;
-
- /* Switch to PF and change MAC address on vport */
- efx_pf = pci_get_drvdata(pci_dev_pf);
-
- rc = efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr);
- } else if (!rc) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
- struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
- unsigned int i;
-
- /* MAC address successfully changed by VF (with MAC
- * spoofing) so update the parent PF if possible.
- */
- for (i = 0; i < efx_pf->vf_count; ++i) {
- struct ef10_vf *vf = nic_data->vf + i;
-
- if (vf->efx == efx) {
- ether_addr_copy(vf->mac,
- efx->net_dev->dev_addr);
- return 0;
- }
- }
- }
- } else
-#endif
if (rc == -EPERM) {
netif_err(efx, drv, efx->net_dev,
"Cannot change MAC address; use sfboot to enable"
@@ -3579,6 +3576,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
n_parts++;
}
+ if (!n_parts) {
+ kfree(parts);
+ return 0;
+ }
+
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
@@ -3869,7 +3871,7 @@ static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int efx_tunnel_type, rc;
@@ -3929,7 +3931,7 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int rc;
@@ -3990,6 +3992,30 @@ static unsigned int ef10_check_caps(const struct efx_nic *efx,
}
}
+static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
+{
+ unsigned int ret = EFX_RECYCLE_RING_SIZE_10G;
+
+ /* There is no difference between PFs and VFs. The side is based on
+ * the maximum link speed of a given NIC.
+ */
+ switch (efx->pci_dev->device & 0xfff) {
+ case 0x0903: /* Farmingdale can do up to 10G */
+ break;
+ case 0x0923: /* Greenport can do up to 40G */
+ case 0x0a03: /* Medford can do up to 40G */
+ ret *= 4;
+ break;
+ default: /* Medford2 can do up to 100G */
+ ret *= 10;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ ret *= 4;
+
+ return ret;
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4060,7 +4086,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
@@ -4106,6 +4132,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4176,7 +4203,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
@@ -4243,4 +4270,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index ffdb36715a49..71aab3d0480f 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,6 +17,7 @@
#include "io.h"
#include "ef100_nic.h"
#include "ef100_netdev.h"
+#include "ef100_sriov.h"
#include "ef100_regs.h"
#include "ef100.h"
@@ -422,61 +423,61 @@ static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
*/
static void ef100_pci_remove(struct pci_dev *pci_dev)
{
- struct efx_nic *efx;
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ struct efx_probe_data *probe_data;
- efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
- rtnl_lock();
- dev_close(efx->net_dev);
- rtnl_unlock();
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ ef100_remove_netdev(probe_data);
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_struct_tc(efx);
+#endif
- /* Unregistering our netdev notifier triggers unbinding of TC indirect
- * blocks, so we have to do it before PCI removal.
- */
- unregister_netdevice_notifier(&efx->netdev_notifier);
ef100_remove(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
- pci_set_drvdata(pci_dev, NULL);
- efx_fini_struct(efx);
- free_netdev(efx->net_dev);
+ pci_dbg(pci_dev, "shutdown successful\n");
pci_disable_pcie_error_reporting(pci_dev);
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ kfree(probe_data);
};
static int ef100_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
struct ef100_func_ctl_window fcw = { 0 };
- struct net_device *net_dev;
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
efx->type = (const struct efx_nic_type *)entry->driver_data;
+ efx->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, efx);
- SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail;
efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare EF100 NIC detected\n");
+ pci_info(pci_dev, "Solarflare EF100 NIC detected\n");
rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Error looking for ef100 function control window, rc=%d\n",
- rc);
+ pci_err(pci_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
goto fail;
}
@@ -488,8 +489,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
}
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
- netif_err(efx, probe, efx->net_dev,
- "Func control window overruns BAR\n");
+ pci_err(pci_dev, "Func control window overruns BAR\n");
rc = -EIO;
goto fail;
}
@@ -503,19 +503,16 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
efx->reg_base = fcw.offset;
- efx->netdev_notifier.notifier_call = ef100_netdev_event;
- rc = register_netdevice_notifier(&efx->netdev_notifier);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to register netdevice notifier, rc=%d\n", rc);
+ rc = efx->type->probe(efx);
+ if (rc)
goto fail;
- }
- rc = efx->type->probe(efx);
+ efx->state = STATE_PROBED;
+ rc = ef100_probe_netdev(probe_data);
if (rc)
goto fail;
- netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+ pci_dbg(pci_dev, "initialisation successful\n");
return 0;
@@ -524,6 +521,23 @@ fail:
return rc;
}
+#ifdef CONFIG_SFC_SRIOV
+static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct efx_nic *efx = pci_get_drvdata(dev);
+ int rc;
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ }
+ return -ENOENT;
+}
+#endif
+
/* PCI device ID table */
static const struct pci_device_id ef100_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
@@ -538,6 +552,9 @@ struct pci_driver ef100_pci_driver = {
.id_table = ef100_pci_table,
.probe = ef100_pci_probe,
.remove = ef100_pci_remove,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = ef100_pci_sriov_configure,
+#endif
.err_handler = &efx_err_handlers,
};
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5dba4125d953..135ece2f1375 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -26,7 +26,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
@@ -43,6 +43,8 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count,
+ .get_priv_flags = efx_ethtool_get_priv_flags,
+ .set_priv_flags = efx_ethtool_set_priv_flags,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.get_link_ksettings = efx_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 67fe44db6b61..88fa29572e23 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -22,6 +22,8 @@
#include "ef100_regs.h"
#include "mcdi_filters.h"
#include "rx_common.h"
+#include "ef100_sriov.h"
+#include "tc_bindings.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -79,11 +81,12 @@ static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
*/
static int ef100_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
+ efx_detach_reps(efx);
netif_stop_queue(net_dev);
efx_stop_all(efx);
efx_mcdi_mac_fini_stats(efx);
@@ -96,13 +99,15 @@ static int ef100_net_stop(struct net_device *net_dev)
efx_mcdi_free_vis(efx);
efx_remove_interrupts(efx);
+ efx->state = STATE_NET_DOWN;
+
return 0;
}
/* Context: process, rtnl_lock() held. */
static int ef100_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int allocated_vis;
int rc;
@@ -172,6 +177,10 @@ static int ef100_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
+ efx->state = STATE_NET_UP;
+ if (netif_running(efx->net_dev))
+ efx_attach_reps(efx);
+
return 0;
fail:
@@ -189,7 +198,16 @@ fail:
static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
+}
+
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv)
+{
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int rc;
@@ -204,7 +222,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = &channel->tx_queue[0];
- rc = ef100_enqueue_skb(tx_queue, skb);
+ rc = __ef100_enqueue_skb(tx_queue, skb, efv);
if (rc == 0)
return NETDEV_TX_OK;
@@ -229,6 +247,9 @@ static const struct net_device_ops ef100_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
+#ifdef CONFIG_SFC_SRIOV
+ .ndo_setup_tc = efx_tc_setup,
+#endif
};
/* Netdev registration
@@ -239,13 +260,14 @@ int ef100_netdev_event(struct notifier_block *this,
struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ if (efx->net_dev == net_dev &&
+ (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
ef100_update_name(efx);
return NOTIFY_DONE;
}
-int ef100_register_netdev(struct efx_nic *efx)
+static int ef100_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
int rc;
@@ -271,7 +293,7 @@ int ef100_register_netdev(struct efx_nic *efx)
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
- efx->state = STATE_READY;
+ efx->state = STATE_NET_DOWN;
rtnl_unlock();
efx_init_mcdi_logging(efx);
@@ -283,11 +305,123 @@ fail_locked:
return rc;
}
-void ef100_unregister_netdev(struct efx_nic *efx)
+static void ef100_unregister_netdev(struct efx_nic *efx)
{
if (efx_dev_registered(efx)) {
efx_fini_mcdi_logging(efx);
- efx->state = STATE_UNINIT;
+ efx->state = STATE_PROBED;
unregister_netdev(efx->net_dev);
}
}
+
+void ef100_remove_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+
+ if (!efx->net_dev)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+#if defined(CONFIG_SFC_SRIOV)
+ if (!efx->type->is_vf)
+ efx_ef100_pci_sriov_disable(efx, true);
+#endif
+
+ ef100_unregister_netdev(efx);
+
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_tc(efx);
+#endif
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+
+ free_netdev(efx->net_dev);
+ efx->net_dev = NULL;
+ efx->state = STATE_PROBED;
+}
+
+int ef100_probe_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+ struct efx_probe_data **probe_ptr;
+ struct net_device *net_dev;
+ int rc;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ pci_info(efx->pci_dev, "No network port on this PCI function");
+ return 0;
+ }
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
+ SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ netif_set_tso_max_segs(net_dev,
+ ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+ efx->mdio.dev = net_dev;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ if (!efx->type->is_vf) {
+ rc = ef100_probe_netdev_pf(efx);
+ if (rc)
+ goto fail;
+ }
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
index d40abb7cc086..86bf985e0951 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.h
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -10,8 +10,13 @@
*/
#include <linux/netdevice.h>
+#include "ef100_rep.h"
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv);
int ef100_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
-int ef100_register_netdev(struct efx_nic *efx);
-void ef100_unregister_netdev(struct efx_nic *efx);
+int ef100_probe_netdev(struct efx_probe_data *probe_data);
+void ef100_remove_netdev(struct efx_probe_data *probe_data);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index f79b14a119ae..ad686c671ab8 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -22,7 +22,11 @@
#include "mcdi_filters.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
+#include "ef100_sriov.h"
#include "ef100_netdev.h"
+#include "tc.h"
+#include "mae.h"
+#include "rx_common.h"
#define EF100_MAX_VIS 4096
#define EF100_NUM_MCDI_BUFFERS 1
@@ -146,7 +150,7 @@ static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
-static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+int efx_ef100_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -325,7 +329,7 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ef100_phy_probe(struct efx_nic *efx)
+int ef100_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
int rc;
@@ -363,7 +367,7 @@ static int ef100_phy_probe(struct efx_nic *efx)
return 0;
}
-static int ef100_filter_table_probe(struct efx_nic *efx)
+int ef100_filter_table_probe(struct efx_nic *efx)
{
return efx_mcdi_filter_table_probe(efx, true);
}
@@ -372,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
{
int rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
- if (rc) {
- efx_mcdi_filter_table_down(efx);
- return rc;
- }
+ if (rc)
+ goto fail_unspec;
rc = efx_mcdi_filter_add_vlan(efx, 0);
- if (rc) {
- efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
- efx_mcdi_filter_table_down(efx);
- }
+ if (rc)
+ goto fail_vlan0;
+ /* Drop the lock: we've finished altering table existence, and
+ * filter insertion will need to take the lock for read.
+ */
+ up_write(&efx->filter_sem);
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_tc_insert_rep_filters(efx);
+ /* Rep filter failure is nonfatal */
+ if (rc)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to insert representor filters, rc %d\n",
+ rc);
+#endif
+ return 0;
+fail_vlan0:
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+fail_unspec:
+ efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
return rc;
}
static void ef100_filter_table_down(struct efx_nic *efx)
{
+#ifdef CONFIG_SFC_SRIOV
+ efx_tc_remove_rep_filters(efx);
+#endif
+ down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
}
/* Other
@@ -696,173 +720,37 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
}
}
-/* NIC level access functions
- */
-#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
- NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_HW_VLAN_CTAG_TX)
-
-const struct efx_nic_type ef100_pf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = false,
- .probe = ef100_probe_pf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
-
- .check_caps = ef100_check_caps,
-
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
-#endif
-
- .get_phys_port_id = efx_ef100_get_phys_port_id,
-
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed for Riverhead is 100G */
+ return 10 * EFX_RECYCLE_RING_SIZE_10G;
+}
- .reconfigure_mac = ef100_reconfigure_mac,
- .reconfigure_port = efx_mcdi_port_reconfigure,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef100_get_base_mport(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u32 selector, id;
+ int rc;
- /* Per-type bar/size configuration not used on ef100. Location of
- * registers is defined by extended capabilities.
+ /* Construct mport selector for "physical network port" */
+ efx_mae_mport_wire(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ /* The ID should always fit in 16 bits, because that's how wide the
+ * corresponding fields in the RX prefix & TX override descriptor are
*/
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
-const struct efx_nic_type ef100_vf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = true,
- .probe = ef100_probe_vf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
- .check_caps = ef100_check_caps,
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
+ id);
+ nic_data->base_mport = id;
+ nic_data->have_mport = true;
+ return 0;
+}
#endif
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
-
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
static int compare_versions(const char *a, const char *b)
{
int a_major, a_minor, a_point, a_patch;
@@ -995,12 +883,15 @@ static int ef100_process_design_param(struct efx_nic *efx,
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
- nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
- netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
+ nic_data->tso_max_payload_len = min_t(u64, reader->value,
+ GSO_LEGACY_MAX_SIZE);
+ netif_set_tso_max_size(efx->net_dev,
+ nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
+ netif_set_tso_max_segs(efx->net_dev,
+ nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
@@ -1061,8 +952,7 @@ static int ef100_check_design_params(struct efx_nic *efx)
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
- netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
- total_len);
+ pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len);
while (offset < total_len) {
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
@@ -1101,9 +991,9 @@ out:
static int ef100_probe_main(struct efx_nic *efx)
{
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
- struct net_device *net_dev = efx->net_dev;
struct ef100_nic_data *nic_data;
char fw_version[32];
+ u32 priv_mask = 0;
int i, rc;
if (WARN_ON(bar_size == 0))
@@ -1114,23 +1004,18 @@ static int ef100_probe_main(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
nic_data->efx = efx;
- net_dev->features |= efx->type->offload_features;
- net_dev->hw_features |= efx->type->offload_features;
- net_dev->hw_enc_features |= efx->type->offload_features;
- net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ efx->max_vis = EF100_MAX_VIS;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unsupported design parameters\n");
+ pci_err(efx->pci_dev, "Unsupported design parameters\n");
goto fail;
}
@@ -1167,12 +1052,6 @@ static int ef100_probe_main(struct efx_nic *efx)
/* Post-IO section. */
rc = efx_mcdi_init(efx);
- if (!rc && efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
- netif_info(efx, probe, efx->net_dev,
- "No network port on this PCI function");
- rc = -ENODEV;
- }
if (rc)
goto fail;
/* Reset (most) configuration for this function */
@@ -1188,67 +1067,43 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_ef100_init_datapath_caps(efx);
- if (rc < 0)
- goto fail;
-
- efx->max_vis = EF100_MAX_VIS;
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail;
efx->port_num = rc;
efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
- netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+ pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
+
+ rc = efx_mcdi_get_privilege_mask(efx, &priv_mask);
+ if (rc) /* non-fatal, and priv_mask will still be 0 */
+ pci_info(efx->pci_dev,
+ "Failed to get privilege mask from FW, rc %d\n", rc);
+ nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE);
if (compare_versions(fw_version, "1.1.0.1000") < 0) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
rc = -EINVAL;
goto fail;
}
if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ pci_info(efx->pci_dev, "Firmware uses unsolicited-event credits\n");
rc = -EINVAL;
goto fail;
}
- rc = ef100_phy_probe(efx);
- if (rc)
- goto fail;
-
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_probe(efx);
- up_write(&efx->filter_sem);
- if (rc)
- goto fail;
-
- netdev_rss_key_fill(efx->rss_context.rx_hash_key,
- sizeof(efx->rss_context.rx_hash_key));
-
- /* Don't fail init if RSS setup doesn't work. */
- efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
-
- rc = ef100_register_netdev(efx);
- if (rc)
- goto fail;
-
return 0;
fail:
return rc;
}
-int ef100_probe_pf(struct efx_nic *efx)
+int ef100_probe_netdev_pf(struct efx_nic *efx)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
- struct ef100_nic_data *nic_data;
- int rc = ef100_probe_main(efx);
-
- if (rc)
- goto fail;
+ int rc;
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr);
if (rc)
goto fail;
@@ -1256,6 +1111,37 @@ int ef100_probe_pf(struct efx_nic *efx)
eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+ if (!nic_data->grp_mae)
+ return 0;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_init_struct_tc(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef100_get_base_mport(efx);
+ if (rc) {
+ netif_warn(efx, probe, net_dev,
+ "Failed to probe base mport rc %d; representors will not function\n",
+ rc);
+ }
+
+ rc = efx_init_tc(efx);
+ if (rc) {
+ /* Either we don't have an MAE at all (i.e. legacy v-switching),
+ * or we do but we failed to probe it. In the latter case, we
+ * may not have set up default rules, in which case we won't be
+ * able to pass any traffic. However, we don't fail the probe,
+ * because the user might need to use the netdevice to apply
+ * configuration changes to fix whatever's wrong with the MAE.
+ */
+ netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
+ rc);
+ } else {
+ net_dev->features |= NETIF_F_HW_TC;
+ efx->fixed_features |= NETIF_F_HW_TC;
+ }
+#endif
return 0;
fail:
@@ -1271,14 +1157,6 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
- ef100_unregister_netdev(efx);
-
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
- efx_fini_channels(efx);
- kfree(efx->phy_data);
- efx->phy_data = NULL;
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1286,3 +1164,175 @@ void ef100_remove(struct efx_nic *efx)
kfree(nic_data);
efx->nic_data = NULL;
}
+
+/* NIC level access functions
+ */
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_HW_VLAN_CTAG_TX)
+
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_main,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef100_sriov_configure,
+#endif
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index e799688d5264..0295933145fa 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -8,6 +8,8 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#ifndef EFX_EF100_NIC_H
+#define EFX_EF100_NIC_H
#include "net_driver.h"
#include "nic_common.h"
@@ -15,7 +17,7 @@
extern const struct efx_nic_type ef100_pf_nic_type;
extern const struct efx_nic_type ef100_vf_nic_type;
-int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_netdev_pf(struct efx_nic *efx);
int ef100_probe_vf(struct efx_nic *efx);
void ef100_remove(struct efx_nic *efx);
@@ -70,6 +72,9 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT];
+ u32 base_mport;
+ bool have_mport; /* base_mport was populated successfully */
+ bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
u16 tso_max_frames;
@@ -78,3 +83,9 @@ struct ef100_nic_data {
#define efx_ef100_has_cap(caps, flag) \
(!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
+
+int efx_ef100_init_datapath_caps(struct efx_nic *efx);
+int ef100_phy_probe(struct efx_nic *efx);
+int ef100_filter_table_probe(struct efx_nic *efx);
+
+#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h
index 710bbdb19885..982b6ab1eb62 100644
--- a/drivers/net/ethernet/sfc/ef100_regs.h
+++ b/drivers/net/ethernet/sfc/ef100_regs.h
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -181,12 +181,6 @@
/* RHEAD_BASE_EVENT */
#define ESF_GZ_E_TYPE_LBN 60
#define ESF_GZ_E_TYPE_WIDTH 4
-#define ESE_GZ_EF100_EV_DRIVER 5
-#define ESE_GZ_EF100_EV_MCDI 4
-#define ESE_GZ_EF100_EV_CONTROL 3
-#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
-#define ESE_GZ_EF100_EV_TX_COMPLETION 1
-#define ESE_GZ_EF100_EV_RX_PKTS 0
#define ESF_GZ_EV_EVQ_PHASE_LBN 59
#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
@@ -369,14 +363,18 @@
#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1
#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
@@ -454,12 +452,8 @@
#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_M2M_RSVD_LBN 120
#define ESF_GZ_M2M_RSVD_WIDTH 2
-#define ESF_GZ_M2M_ADDR_SPC_LBN 108
-#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
-#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
#define ESF_GZ_M2M_ADDR_LBN 0
@@ -492,12 +486,8 @@
#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_TX_SEG_RSVD2_LBN 120
#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
-#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
-#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_TX_SEG_RSVD_LBN 80
#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
#define ESF_GZ_TX_SEG_LEN_LBN 64
@@ -583,6 +573,12 @@
#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
+/* Enum D2VIO_MSG_OP */
+#define ESE_GZ_QUE_JBDNE 3
+#define ESE_GZ_QUE_EVICT 2
+#define ESE_GZ_QUE_EMPTY 1
+#define ESE_GZ_NOP 0
+
/* Enum DESIGN_PARAMS */
#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
#define ESE_EF100_DP_GZ_VI_STRIDES 16
@@ -630,6 +626,19 @@
#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
+/* Enum RH_DSC_TYPE */
+#define ESE_GZ_TX_TOMB 0xF
+#define ESE_GZ_TX_VIO 0xE
+#define ESE_GZ_TX_TSO_OVRRD 0x8
+#define ESE_GZ_TX_D2CMP 0x7
+#define ESE_GZ_TX_DATA 0x6
+#define ESE_GZ_TX_D2M 0x5
+#define ESE_GZ_TX_M2M 0x4
+#define ESE_GZ_TX_SEG 0x3
+#define ESE_GZ_TX_TSO 0x2
+#define ESE_GZ_TX_OVRRD 0x1
+#define ESE_GZ_TX_SEND 0x0
+
/* Enum RH_HCLASS_L2_CLASS */
#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
@@ -666,6 +675,25 @@
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
+/* Enum SF_CTL_EVENT_SUBTYPE */
+#define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3
+#define ESE_GZ_EF100_CTL_EV_FLUSH 0x2
+#define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1
+#define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0
+
+/* Enum SF_EVENT_TYPE */
+#define ESE_GZ_EF100_EV_DRIVER 0x5
+#define ESE_GZ_EF100_EV_MCDI 0x4
+#define ESE_GZ_EF100_EV_CONTROL 0x3
+#define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2
+#define ESE_GZ_EF100_EV_TX_COMPLETION 0x1
+#define ESE_GZ_EF100_EV_RX_PKTS 0x0
+
+/* Enum SF_EW_EVENT_TYPE */
+#define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2
+#define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1
+#define ESE_GZ_EF100_EWEV_64BIT 0x0
+
/* Enum TX_DESC_CSO_PARTIAL_EN */
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
@@ -681,6 +709,15 @@
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
+
+/* Enum VIRTIO_NET_HDR_F */
+#define ESE_GZ_NEEDS_CSUM 0x1
+
+/* Enum VIRTIO_NET_HDR_GSO */
+#define ESE_GZ_TCPV6 0x4
+#define ESE_GZ_UDP 0x3
+#define ESE_GZ_TCPV4 0x1
+#define ESE_GZ_NONE 0x0
/**************************************************************************/
#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
new file mode 100644
index 000000000000..81ab22c74635
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_rep.h"
+#include "ef100_netdev.h"
+#include "ef100_nic.h"
+#include "mae.h"
+#include "rx_common.h"
+#include "tc_bindings.h"
+
+#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
+
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
+static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
+ unsigned int i)
+{
+ efv->parent = efx;
+ efv->idx = i;
+ INIT_LIST_HEAD(&efv->list);
+ efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efv->dflt.acts.list);
+ INIT_LIST_HEAD(&efv->rx_list);
+ spin_lock_init(&efv->rx_lock);
+ efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW;
+ return 0;
+}
+
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
+ napi_enable(&efv->napi);
+ return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ napi_disable(&efv->napi);
+ netif_napi_del(&efv->napi);
+ return 0;
+}
+
+static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ netdev_tx_t rc;
+
+ /* __ef100_hard_start_xmit() will always return success even in the
+ * case of TX drops, where it will increment efx's tx_dropped. The
+ * efv stats really only count attempted TX, not success/failure.
+ */
+ atomic64_inc(&efv->stats.tx_packets);
+ atomic64_add(skb->len, &efv->stats.tx_bytes);
+ netif_tx_lock(efx->net_dev);
+ rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
+ netif_tx_unlock(efx->net_dev);
+ return rc;
+}
+
+static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+
+ nic_data = efx->nic_data;
+ /* nic_data->port_id is a u8[] */
+ ppid->id_len = sizeof(nic_data->port_id);
+ memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
+ return 0;
+}
+
+static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+ int ret;
+
+ nic_data = efx->nic_data;
+ ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
+ nic_data->pf_index, efv->idx);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+ struct efx_nic *efx = efv->parent;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, efv);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, efv);
+
+ return -EOPNOTSUPP;
+}
+
+static void efx_ef100_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+
+ stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
+ stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
+ stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
+ stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
+ stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
+ stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
+}
+
+const struct net_device_ops efx_ef100_rep_netdev_ops = {
+ .ndo_open = efx_ef100_rep_open,
+ .ndo_stop = efx_ef100_rep_close,
+ .ndo_start_xmit = efx_ef100_rep_xmit,
+ .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
+ .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
+ .ndo_get_stats64 = efx_ef100_rep_get_stats64,
+ .ndo_setup_tc = efx_ef100_rep_setup_tc,
+};
+
+static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
+}
+
+static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ return efv->msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ efv->msg_enable = msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ ring->rx_max_pending = U32_MAX;
+ ring->rx_pending = efv->rx_pring_size;
+}
+
+static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
+ return -EINVAL;
+
+ efv->rx_pring_size = ring->rx_pending;
+ return 0;
+}
+
+static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
+ .get_drvinfo = efx_ef100_rep_get_drvinfo,
+ .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
+ .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
+ .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
+ .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
+};
+
+static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
+ unsigned int i)
+{
+ struct net_device *net_dev;
+ struct efx_rep *efv;
+ int rc;
+
+ net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
+ if (!net_dev)
+ return ERR_PTR(-ENOMEM);
+
+ efv = netdev_priv(net_dev);
+ rc = efx_ef100_rep_init_struct(efx, efv, i);
+ if (rc)
+ goto fail1;
+ efv->net_dev = net_dev;
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_add_tail(&efv->list, &efx->vf_reps);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
+ netif_device_attach(net_dev);
+ netif_carrier_on(net_dev);
+ } else {
+ netif_carrier_off(net_dev);
+ netif_tx_stop_all_queues(net_dev);
+ }
+ rtnl_unlock();
+
+ net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
+ net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+ net_dev->features |= NETIF_F_LLTX;
+ net_dev->hw_features |= NETIF_F_LLTX;
+ return efv;
+fail1:
+ free_netdev(net_dev);
+ return ERR_PTR(rc);
+}
+
+static int efx_ef100_configure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+ u32 selector;
+ int rc;
+
+ efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
+ /* Construct mport selector for corresponding VF */
+ efx_mae_mport_vf(efx, efv->idx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
+ /* mport label should fit in 16 bits */
+ WARN_ON(efv->mport >> 16);
+
+ return efx_tc_configure_default_rule_rep(efv);
+}
+
+static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ efx_tc_deconfigure_default_rule(efx, &efv->dflt);
+}
+
+static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_del(&efv->list);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ rtnl_unlock();
+ synchronize_rcu();
+ free_netdev(efv->net_dev);
+}
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
+{
+ struct efx_rep *efv;
+ int rc;
+
+ efv = efx_ef100_rep_create_netdev(efx, i);
+ if (IS_ERR(efv)) {
+ rc = PTR_ERR(efv);
+ pci_err(efx->pci_dev,
+ "Failed to create representor for VF %d, rc %d\n", i,
+ rc);
+ return rc;
+ }
+ rc = efx_ef100_configure_rep(efv);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to configure representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail1;
+ }
+ rc = register_netdev(efv->net_dev);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to register representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail2;
+ }
+ pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
+ efv->net_dev->name);
+ return 0;
+fail2:
+ efx_ef100_deconfigure_rep(efv);
+fail1:
+ efx_ef100_rep_destroy_netdev(efv);
+ return rc;
+}
+
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
+{
+ struct net_device *rep_dev;
+
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ return;
+ netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
+ unregister_netdev(rep_dev);
+ efx_ef100_deconfigure_rep(efv);
+ efx_ef100_rep_destroy_netdev(efv);
+}
+
+void efx_ef100_fini_vfreps(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_rep *efv, *next;
+
+ if (!nic_data->grp_mae)
+ return;
+
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+}
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+ struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+ unsigned int read_index;
+ struct list_head head;
+ struct sk_buff *skb;
+ bool need_resched;
+ int spent = 0;
+
+ INIT_LIST_HEAD(&head);
+ /* Grab up to 'weight' pending SKBs */
+ spin_lock_bh(&efv->rx_lock);
+ read_index = efv->write_index;
+ while (spent < weight && !list_empty(&efv->rx_list)) {
+ skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+ list_del(&skb->list);
+ list_add_tail(&skb->list, &head);
+ spent++;
+ }
+ spin_unlock_bh(&efv->rx_lock);
+ /* Receive them */
+ netif_receive_skb_list(&head);
+ if (spent < weight)
+ if (napi_complete_done(napi, spent)) {
+ spin_lock_bh(&efv->rx_lock);
+ efv->read_index = read_index;
+ /* If write_index advanced while we were doing the
+ * RX, then storing our read_index won't re-prime the
+ * fake-interrupt. In that case, we need to schedule
+ * NAPI again to consume the additional packet(s).
+ */
+ need_resched = efv->write_index != read_index;
+ spin_unlock_bh(&efv->rx_lock);
+ if (need_resched)
+ napi_schedule(&efv->napi);
+ }
+ return spent;
+}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ struct sk_buff *skb;
+ bool primed;
+
+ /* Don't allow too many queued SKBs to build up, as they consume
+ * GFP_ATOMIC memory. If we overrun, just start dropping.
+ */
+ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "nodesc-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+
+ skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+ if (!skb) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "noskb-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+ memcpy(skb->data, eh, rx_buf->len);
+ __skb_put(skb, rx_buf->len);
+
+ skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+ skb_checksum_none_assert(skb);
+
+ atomic64_inc(&efv->stats.rx_packets);
+ atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+ /* Add it to the rx list */
+ spin_lock_bh(&efv->rx_lock);
+ primed = efv->read_index == efv->write_index;
+ list_add_tail(&skb->list, &efv->rx_list);
+ efv->write_index++;
+ spin_unlock_bh(&efv->rx_lock);
+ /* Trigger rx work */
+ if (primed)
+ napi_schedule(&efv->napi);
+}
+
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
+{
+ struct efx_rep *efv, *out = NULL;
+
+ /* spinlock guards against list mutation while we're walking it;
+ * but caller must also hold rcu_read_lock() to ensure the netdev
+ * isn't freed after we drop the spinlock.
+ */
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_for_each_entry(efv, &efx->vf_reps, list)
+ if (efv->mport == mport) {
+ out = efv;
+ break;
+ }
+ spin_unlock_bh(&efx->vf_reps_lock);
+ return out;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
new file mode 100644
index 000000000000..c21bc716f847
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Handling for ef100 representor netdevs */
+#ifndef EF100_REP_H
+#define EF100_REP_H
+
+#include "net_driver.h"
+#include "tc.h"
+
+struct efx_rep_sw_stats {
+ atomic64_t rx_packets, tx_packets;
+ atomic64_t rx_bytes, tx_bytes;
+ atomic64_t rx_dropped, tx_errors;
+};
+
+/**
+ * struct efx_rep - Private data for an Efx representor
+ *
+ * @parent: the efx PF which manages this representor
+ * @net_dev: representor netdevice
+ * @msg_enable: log message enable flags
+ * @mport: m-port ID of corresponding VF
+ * @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
+ * @dflt: default-rule for MAE switching
+ * @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
+ * @stats: software traffic counters for netdev stats
+ */
+struct efx_rep {
+ struct efx_nic *parent;
+ struct net_device *net_dev;
+ u32 msg_enable;
+ u32 mport;
+ unsigned int idx;
+ unsigned int write_index, read_index;
+ unsigned int rx_pring_size;
+ struct efx_tc_flow_rule dflt;
+ struct list_head list;
+ struct list_head rx_list;
+ spinlock_t rx_lock;
+ struct napi_struct napi;
+ struct efx_rep_sw_stats stats;
+};
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
+void efx_ef100_fini_vfreps(struct efx_nic *efx);
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
+/* Returns the representor corresponding to a VF m-port, or NULL
+ * @mport is an m-port label, *not* an m-port ID!
+ * Caller must hold rcu_read_lock().
+ */
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
+extern const struct net_device_ops efx_ef100_rep_netdev_ops;
+#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 85207acf7dee..65bbe37753e6 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
void __ef100_rx_packet(struct efx_channel *channel)
{
- struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+ channel->rx_pkt_index);
struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
u8 *eh = efx_rx_buf_va(rx_buf);
__wsum csum = 0;
+ u16 ing_port;
u32 *prefix;
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto out;
}
+ ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
+
+ nic_data = efx->nic_data;
+
+ if (nic_data->have_mport && ing_port != nic_data->base_mport) {
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_rep *efv;
+
+ rcu_read_lock();
+ efv = efx_ef100_find_rep_by_mport(efx, ing_port);
+ if (efv) {
+ if (efv->net_dev->flags & IFF_UP)
+ efx_ef100_rep_rx_packet(efv, rx_buf);
+ rcu_read_unlock();
+ /* Representor Rx doesn't care about PF Rx buffer
+ * ownership, it just makes a copy. So, we are done
+ * with the Rx buffer from PF point of view and should
+ * free it.
+ */
+ goto free_rx_buffer;
+ }
+ rcu_read_unlock();
+#endif
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Unrecognised ing_port %04x (base %04x), dropping\n",
+ ing_port, nic_data->base_mport);
+ channel->n_rx_mport_bad++;
+ goto free_rx_buffer;
+ }
+
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
++channel->n_rx_ip_hdr_chksum_err;
@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
}
if (channel->type->receive_skb) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
/* no support for special channels yet, so just discard */
WARN_ON_ONCE(1);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- goto out;
+ goto free_rx_buffer;
}
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+ goto out;
+free_rx_buffer:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
out:
channel->rx_pkt_n_frags = 0;
}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c
new file mode 100644
index 000000000000..94bdbfcb47e8
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_sriov.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_sriov.h"
+#include "ef100_nic.h"
+#include "ef100_rep.h"
+
+static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct pci_dev *dev = efx->pci_dev;
+ struct efx_rep *efv, *next;
+ int rc, i;
+
+ efx->vf_count = num_vfs;
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail1;
+
+ if (!nic_data->grp_mae)
+ return 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ rc = efx_ef100_vfrep_create(efx, i);
+ if (rc)
+ goto fail2;
+ }
+ return 0;
+
+fail2:
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+ pci_disable_sriov(dev);
+fail1:
+ netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
+ efx->vf_count = 0;
+ return rc;
+}
+
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ efx_ef100_fini_vfreps(efx);
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
+ return 0;
+}
+
+int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef100_pci_sriov_disable(efx, false);
+ else
+ return efx_ef100_pci_sriov_enable(efx, num_vfs);
+}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h
new file mode 100644
index 000000000000..8ffdf464dd1d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_sriov.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include "net_driver.h"
+
+int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 26ef51d6b542..102ddc7e206a 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -254,7 +254,8 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
- unsigned int segment_count)
+ unsigned int segment_count,
+ struct efx_rep *efv)
{
unsigned int old_write_count = tx_queue->write_count;
unsigned int new_write_count = old_write_count;
@@ -272,6 +273,20 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
else
next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+ if (unlikely(efv)) {
+ /* Create TX override descriptor */
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ tx_queue->packet_write_count = new_write_count;
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
+ nr_descs--;
+ }
+
/* if it's a raw write (such as XDP) then always SEND single frames */
if (!skb)
nr_descs = 1;
@@ -306,6 +321,9 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
/* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND;
+ /* mark as an EFV buffer if applicable */
+ if (unlikely(efv))
+ buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
@@ -324,7 +342,7 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
- ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
ef100_tx_push_buffers(tx_queue);
}
@@ -351,6 +369,12 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
+ return __ef100_enqueue_skb(tx_queue, skb, NULL);
+}
+
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv)
+{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
bool xmit_more = netdev_xmit_more();
@@ -376,16 +400,64 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return 0;
}
+ if (unlikely(efv)) {
+ struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ /* Drop representor packets if the queue is stopped.
+ * We currently don't assert backoff to representors so this is
+ * to make sure representor traffic can't starve the main
+ * net device.
+ * And, of course, if there are no TX descriptors left.
+ */
+ if (netif_tx_queue_stopped(tx_queue->core_txq) ||
+ unlikely(efx_tx_buffer_in_use(buffer))) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ /* Also drop representor traffic if it could cause us to
+ * stop the queue. If we assert backoff and we haven't
+ * received traffic on the main net device recently then the
+ * TX watchdog can go off erroneously.
+ */
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
+ /* Refresh cached fill level and re-check */
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
+ tx_queue->insert_count++;
+ }
+
/* Map for DMA and create descriptors */
rc = efx_tx_map_data(tx_queue, skb, segments);
if (rc)
goto err;
- ef100_tx_make_descriptors(tx_queue, skb, segments);
+ ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
+ /* Because of checks above, representor traffic should
+ * not be able to stop the queue.
+ */
+ WARN_ON(efv);
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
@@ -404,8 +476,12 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* If xmit_more then we don't need to push the doorbell, unless there
* are 256 descriptors already queued in which case we have to push to
* ensure we never push more than 256 at once.
+ *
+ * Always push for representor traffic, and don't account it to parent
+ * PF netdevice's BQL.
*/
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ if (unlikely(efv) ||
+ __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index ddc4b98fa6db..e9e11540fcde 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -13,6 +13,7 @@
#define EFX_EF100_TX_H
#include "net_driver.h"
+#include "ef100_rep.h"
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
@@ -22,4 +23,6 @@ unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv);
#endif
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 7f5aa4a8c451..9aae0d8b713f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -408,8 +408,9 @@ fail1:
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int vfs_assigned = pci_vfs_assigned(dev);
- int rc = 0;
+ int i, rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
return -EBUSY;
}
- if (!vfs_assigned)
+ if (!vfs_assigned) {
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].pci_dev = NULL;
pci_disable_sriov(dev);
- else
+ } else {
rc = -EBUSY;
+ }
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
@@ -497,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
}
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
@@ -535,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
if (vf->efx) {
/* VF cannot use the vport_id that the PF created */
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
vf->efx->type->filter_table_probe(vf->efx);
- up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev);
efx_device_attach_if_not_resetting(vf->efx);
}
@@ -576,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_net_stop(vf->efx->net_dev);
mutex_lock(&vf->efx->mac_lock);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
@@ -650,7 +647,6 @@ restore_filters:
if (rc2)
goto reset_nic_up_write;
- up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock);
rc2 = efx_net_open(vf->efx->net_dev);
@@ -662,10 +658,8 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx) {
- up_write(&vf->efx->filter_sem);
+ if (vf->efx)
mutex_unlock(&vf->efx->mac_lock);
- }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 302dc835ac3d..0556542d7a6b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,14 +106,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
- } while (0)
-
/**************************************************************************
*
* Port handling
@@ -378,6 +370,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc)
goto fail5;
+ efx->state = STATE_NET_DOWN;
+
return 0;
fail5:
@@ -498,7 +492,7 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
@@ -523,7 +517,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
@@ -544,6 +538,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
+ else
+ efx->state = STATE_NET_UP;
+
efx_selftest_async_start(efx);
return 0;
}
@@ -554,7 +551,7 @@ int efx_net_open(struct net_device *net_dev)
*/
int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -567,7 +564,7 @@ int efx_net_stop(struct net_device *net_dev)
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_add_vid)
return efx->type->vlan_rx_add_vid(efx, proto, vid);
@@ -577,7 +574,7 @@ static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid
static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_kill_vid)
return efx->type->vlan_rx_kill_vid(efx, proto, vid);
@@ -646,7 +643,7 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
/* Context: process, rtnl_lock() held. */
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -659,7 +656,7 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -681,7 +678,7 @@ static int efx_netdev_event(struct notifier_block *this,
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
- efx_update_name(netdev_priv(net_dev));
+ efx_update_name(efx_netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -710,7 +707,7 @@ static int efx_register_netdev(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
- netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
+ netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
@@ -720,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
- efx->state = STATE_READY;
- smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
@@ -748,6 +743,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx);
+ efx->state = STATE_NET_DOWN;
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -777,10 +774,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(netdev_priv(efx->net_dev) != efx);
+ if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
+ return;
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -795,10 +793,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
- .driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
@@ -849,7 +843,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
- BUG_ON(efx->state == STATE_READY);
+ WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
@@ -867,6 +861,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
@@ -891,10 +886,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+ pci_dbg(efx->pci_dev, "shutdown successful\n");
efx_fini_struct(efx);
free_netdev(efx->net_dev);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ kfree(probe_data);
pci_disable_pcie_error_reporting(pci_dev);
};
@@ -1048,24 +1045,36 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
static int efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
+ struct efx_probe_data *probe_data, **probe_ptr;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
- EFX_MAX_RX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
+ efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
@@ -1125,6 +1134,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
+ fail0:
+ kfree(probe_data);
return rc;
}
@@ -1154,13 +1165,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_UNINIT;
-
+ if (efx_net_active(efx->state)) {
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
+
+ efx->state = efx_freeze(efx->state);
}
rtnl_unlock();
@@ -1168,6 +1179,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1175,7 +1197,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
+ if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
@@ -1188,7 +1210,7 @@ static int efx_pm_thaw(struct device *dev)
efx_device_attach_if_not_resetting(efx);
- efx->state = STATE_READY;
+ efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx);
}
@@ -1272,6 +1294,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
@@ -1294,12 +1317,6 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_init_sriov();
- if (rc)
- goto err_sriov;
-#endif
-
rc = efx_create_reset_workqueue();
if (rc)
goto err_reset;
@@ -1319,10 +1336,6 @@ static int __init efx_init_module(void)
err_pci:
efx_destroy_reset_workqueue();
err_reset:
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
- err_sriov:
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -1335,9 +1348,6 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
-#ifdef CONFIG_SFC_SRIOV
- efx_fini_sriov();
-#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index daf0c00c1242..4239c7ece123 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -12,6 +12,7 @@
#include "net_driver.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
+#include "efx_common.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -28,7 +29,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct
ef100_enqueue_skb, __efx_enqueue_skb,
tx_queue, skb);
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
@@ -207,6 +207,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
+ /* We must stop reps (which use our TX) before we stop ourselves. */
+ efx_detach_reps(efx);
+
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
@@ -218,8 +221,11 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
- if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+ if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
netif_device_attach(efx->net_dev);
+ if (efx->state == STATE_NET_UP)
+ efx_attach_reps(efx);
+ }
}
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index ead550ae2709..aaa381743bca 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -46,63 +46,52 @@ module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
+static const struct efx_channel_type efx_default_channel_type;
-/***************
- * Housekeeping
- ***************/
+/*************
+ * INTERRUPTS
+ *************/
-int efx_channel_dummy_op_int(struct efx_channel *channel)
+static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
{
- return 0;
-}
+ cpumask_var_t filter_mask;
+ unsigned int count;
+ int cpu;
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
+ if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
+ cpumask_copy(filter_mask, cpu_online_mask);
+ if (local_node)
+ cpumask_and(filter_mask, filter_mask,
+ cpumask_of_pcibus(efx->pci_dev->bus));
-/*************
- * INTERRUPTS
- *************/
+ count = 0;
+ for_each_cpu(cpu, filter_mask) {
+ ++count;
+ cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
+ }
+
+ free_cpumask_var(filter_mask);
+
+ return count;
+}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
{
- cpumask_var_t thread_mask;
unsigned int count;
- int cpu;
if (rss_cpus) {
count = rss_cpus;
} else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
+ count = count_online_cores(efx, true);
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
+ /* If no online CPUs in local node, fallback to any online CPUs */
+ if (count == 0)
+ count = count_online_cores(efx, false);
}
if (count > EFX_MAX_RX_QUEUES) {
@@ -309,6 +298,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
@@ -329,6 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -369,12 +360,19 @@ int efx_probe_interrupts(struct efx_nic *efx)
#if defined(CONFIG_SMP)
void efx_set_interrupt_affinity(struct efx_nic *efx)
{
+ const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
struct efx_channel *channel;
unsigned int cpu;
+ /* If no online CPUs in local node, fallback to any online CPU */
+ if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
+ numa_mask = cpu_online_mask;
+
+ cpu = -1;
efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
+ cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first_and(cpu_online_mask, numa_mask);
irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
}
}
@@ -674,7 +672,8 @@ fail:
return rc;
}
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
@@ -764,9 +763,90 @@ void efx_remove_channels(struct efx_nic *efx)
kfree(efx->xdp_tx_queues);
}
+static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
+static void efx_set_xdp_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ unsigned int next_queue = 0;
+ int xdp_queue_number = 0;
+ int rc;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->tx_channel_offset)
+ continue;
+
+ if (efx_channel_is_xdp_tx(channel)) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ } else {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is HW %u\n",
+ channel->channel, tx_queue->label,
+ tx_queue->queue);
+ }
+
+ /* If XDP is borrowing queues from net stack, it must
+ * use the queue with no csum offload, which is the
+ * first one of the channel
+ * (note: tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode ==
+ EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ }
+ }
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+}
+
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+ *ptp_channel = efx_ptp_channel(efx);
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
int rc, rc2;
@@ -835,7 +915,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_init_napi_channel(efx->channel[i]);
}
+ efx_set_xdp_channels(efx);
out:
+ efx->ptp_data = NULL;
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
@@ -846,6 +928,7 @@ out:
}
}
+ efx->ptp_data = ptp_data;
rc2 = efx_soft_enable_interrupts(efx);
if (rc2) {
rc = rc ? rc : rc2;
@@ -864,35 +947,15 @@ rollback:
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++)
swap(efx->channel[i], other_channel[i]);
+ efx_ptp_update_channel(efx, ptp_channel);
goto out;
}
-static inline int
-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
- struct efx_tx_queue *tx_queue)
-{
- if (xdp_queue_number >= efx->xdp_tx_queue_count)
- return -EINVAL;
-
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- tx_queue->channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- return 0;
-}
-
int efx_set_channels(struct efx_nic *efx)
{
- struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
- unsigned int next_queue = 0;
- int xdp_queue_number;
int rc;
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
@@ -904,61 +967,14 @@ int efx_set_channels(struct efx_nic *efx)
return -ENOMEM;
}
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
-
- if (channel->channel >= efx->tx_channel_offset) {
- if (efx_channel_is_xdp_tx(channel)) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- } else {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
- channel->channel, tx_queue->label,
- tx_queue->queue);
- }
-
- /* If XDP is borrowing queues from net stack, it must use the queue
- * with no csum offload, which is the first one of the channel
- * (note: channel->tx_queue_by_type is not initialized yet)
- */
- if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
- tx_queue = &channel->tx_queue[0];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- }
- }
}
- WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number != efx->xdp_tx_queue_count);
- WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number > efx->xdp_tx_queue_count);
- /* If we have more CPUs than assigned XDP TX queues, assign the already
- * existing queues to the exceeding CPUs
- */
- next_queue = 0;
- while (xdp_queue_number < efx->xdp_tx_queue_count) {
- tx_queue = efx->xdp_tx_queues[next_queue++];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
+ efx_set_xdp_channels(efx);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
@@ -966,7 +982,7 @@ int efx_set_channels(struct efx_nic *efx)
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
}
-bool efx_default_channel_want_txqs(struct efx_channel *channel)
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
@@ -1102,7 +1118,7 @@ void efx_start_channels(struct efx_nic *efx)
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rev(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
atomic_inc(&efx->active_queues);
@@ -1297,8 +1313,7 @@ void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
}
void efx_init_napi(struct efx_nic *efx)
@@ -1324,3 +1339,26 @@ void efx_fini_napi(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index d77ec1f77fb1..46b702648721 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -32,12 +32,10 @@ void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx);
-bool efx_default_channel_want_txqs(struct efx_channel *channel);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
@@ -50,7 +48,6 @@ void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
-int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index af37c990217e..c2224e41a694 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -24,6 +24,7 @@
#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
+#include "ef100_rep.h"
static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
@@ -51,8 +52,8 @@ static unsigned int efx_monitor_interval = 1 * HZ;
/* Default stats update time */
#define STATS_PERIOD_MS_DEFAULT 1000
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
@@ -167,7 +168,7 @@ static void efx_mac_work(struct work_struct *data)
int efx_set_mac_address(struct net_device *net_dev, void *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
u8 old_addr[6];
@@ -202,7 +203,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Context: netif_addr_lock held, BHs disabled. */
void efx_set_rx_mode(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -211,7 +212,7 @@ void efx_set_rx_mode(struct net_device *net_dev)
int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
@@ -285,7 +286,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
/* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
@@ -600,7 +601,7 @@ void efx_stop_all(struct efx_nic *efx)
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx_nic_update_stats_atomic(efx, NULL, stats);
@@ -723,7 +724,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
/* Context: netif_tx_lock held, BHs disabled. */
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
@@ -898,7 +899,7 @@ static void efx_reset_work(struct work_struct *data)
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
- if (efx->state == STATE_READY)
+ if (efx_net_active(efx->state))
(void)efx_reset(efx, method);
rtnl_unlock();
@@ -908,7 +909,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
- if (efx->state == STATE_RECOVERY) {
+ if (efx_recovering(efx->state)) {
netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n",
RESET_TYPE(type));
@@ -943,7 +944,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (!efx_net_active(READ_ONCE(efx->state)))
return;
/* efx_process_channel() will no longer read events once a
@@ -978,8 +979,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {}
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
{
int rc = -ENOMEM;
@@ -996,9 +996,8 @@ int efx_init_struct(struct efx_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
- efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
@@ -1023,7 +1022,8 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->mdio.dev = net_dev;
+ spin_lock_init(&efx->vf_reps_lock);
+ INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1077,13 +1077,11 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
int rc;
efx->mem_bar = UINT_MAX;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+ pci_dbg(pci_dev, "initialising I/O bar=%d\n", bar);
rc = pci_enable_device(pci_dev);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
+ pci_err(pci_dev, "failed to enable PCI device\n");
goto fail1;
}
@@ -1091,42 +1089,40 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
+ pci_err(efx->pci_dev, "could not find a suitable DMA mask\n");
goto fail2;
}
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long)dma_mask);
+ pci_dbg(efx->pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask);
efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
if (!efx->membase_phys) {
- netif_err(efx, probe, efx->net_dev,
- "ERROR: No BAR%d mapping from the BIOS. "
- "Try pci=realloc on the kernel command line\n", bar);
+ pci_err(efx->pci_dev,
+ "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n",
+ bar);
rc = -ENODEV;
goto fail3;
}
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR[%d] failed\n", bar);
+ pci_err(efx->pci_dev,
+ "request for memory BAR[%d] failed\n", bar);
rc = -EIO;
goto fail3;
}
efx->mem_bar = bar;
efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR[%d] at %llx+%x\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size);
+ pci_err(efx->pci_dev,
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
+ pci_dbg(efx->pci_dev,
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1142,7 +1138,7 @@ fail1:
void efx_fini_io(struct efx_nic *efx)
{
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+ pci_dbg(efx->pci_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -1217,13 +1213,15 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx->state = STATE_RECOVERY;
+ efx->state = efx_recover(efx->state);
efx->reset_pending = 0;
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ if (efx_net_active(efx->state)) {
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ }
status = PCI_ERS_RESULT_NEED_RESET;
} else {
@@ -1271,7 +1269,7 @@ static void efx_io_resume(struct pci_dev *pdev)
netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc);
} else {
- efx->state = STATE_READY;
+ efx->state = efx_recovered(efx->state);
netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n");
}
@@ -1357,7 +1355,7 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (skb->encapsulation) {
if (features & NETIF_F_GSO_MASK)
@@ -1378,7 +1376,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_phys_port_id)
return efx->type->get_phys_port_id(efx, ppid);
@@ -1388,9 +1386,44 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (snprintf(name, len, "p%u", efx->port_num) >= len)
return -EINVAL;
return 0;
}
+
+void efx_detach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_carrier_off(rep_dev);
+ /* See efx_device_detach_sync() */
+ netif_tx_lock_bh(rep_dev);
+ netif_tx_stop_all_queues(rep_dev);
+ netif_tx_unlock_bh(rep_dev);
+ }
+}
+
+void efx_attach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_tx_wake_all_queues(rep_dev);
+ netif_carrier_on(rep_dev);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 65513fd0cf6c..2c54dac3e662 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -14,8 +14,7 @@
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size);
void efx_fini_io(struct efx_nic *efx);
-int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
- struct net_device *net_dev);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev);
void efx_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -43,12 +42,11 @@ void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx)->state != STATE_UNINIT && \
+ (efx)->state != STATE_PROBED) \
+ ASSERT_RTNL(); \
} while (0)
int efx_try_recovery(struct efx_nic *efx);
@@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -113,4 +111,7 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len);
+
+void efx_detach_reps(struct efx_nic *efx);
+void efx_attach_reps(struct efx_nic *efx);
#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 48506373721a..364323599f7b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -33,7 +33,7 @@
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
@@ -55,13 +55,13 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
- return efx_nic_get_regs_len(netdev_priv(net_dev));
+ return efx_nic_get_regs_len(efx_netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_nic_get_regs(efx, buf);
@@ -101,7 +101,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
@@ -121,7 +121,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
@@ -163,7 +163,7 @@ efx_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
@@ -177,7 +177,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
@@ -204,7 +204,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
@@ -212,14 +212,14 @@ static void efx_ethtool_get_wol(struct net_device *net_dev,
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
struct ethtool_fec_stats *fec_stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_fec_stats)
efx->type->get_fec_stats(efx, fec_stats);
@@ -228,7 +228,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Software capabilities */
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bd552c7dffcb..6649a2327d03 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
#ifdef CONFIG_RFS_ACCEL
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
@@ -100,27 +101,35 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "log-tc-errors",
+};
+
+#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
+
+#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
+
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->msg_enable;
}
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
efx->msg_enable = msg_enable;
}
@@ -128,7 +137,7 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
bool already_up;
int rc = -ENOMEM;
@@ -137,7 +146,7 @@ void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
- if (efx->state != STATE_READY) {
+ if (!efx_net_active(efx->state)) {
rc = -EBUSY;
goto out;
}
@@ -176,7 +185,7 @@ fail:
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
@@ -186,7 +195,7 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev,
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
int rc = 0;
@@ -441,7 +450,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
switch (string_set) {
case ETH_SS_STATS:
@@ -451,6 +460,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ case ETH_SS_PRIV_FLAGS:
+ return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
default:
return -EINVAL;
}
@@ -459,7 +470,7 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int i;
switch (string_set) {
@@ -467,7 +478,7 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (efx_describe_per_queue_stats(efx, strings) *
@@ -477,17 +488,44 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
+ strscpy(strings + i * ETH_GSTRING_LEN,
+ efx_ethtool_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ break;
default:
/* No other string sets */
break;
}
}
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 ret_flags = 0;
+
+ if (efx->log_tc_errs)
+ ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
+
+ return ret_flags;
+}
+
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ efx->log_tc_errs =
+ !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
+
+ return 0;
+}
+
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
@@ -561,7 +599,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
@@ -584,7 +622,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
@@ -604,7 +642,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -617,7 +655,7 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -809,7 +847,7 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 rss_context = 0;
s32 rc = 0;
@@ -1127,7 +1165,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
@@ -1148,7 +1186,7 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->n_rx_channels == 1)
return 0;
@@ -1157,7 +1195,7 @@ u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->rx_hash_key_size;
}
@@ -1165,7 +1203,7 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
@@ -1186,7 +1224,7 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -1205,7 +1243,7 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
int rc = 0;
@@ -1238,7 +1276,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u8 hfunc, u32 *rss_context,
bool delete)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
bool allocated = false;
int rc;
@@ -1300,7 +1338,7 @@ out_unlock:
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->map_reset_flags(flags);
@@ -1314,7 +1352,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
@@ -1327,7 +1365,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
int efx_ethtool_get_module_info(struct net_device *net_dev,
struct ethtool_modinfo *modinfo)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 659491932101..0afc74021a5e 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -27,6 +27,8 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev);
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags);
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data);
diff --git a/drivers/net/ethernet/sfc/falcon/bitfield.h b/drivers/net/ethernet/sfc/falcon/bitfield.h
index 5eb178d0c149..78537a53009e 100644
--- a/drivers/net/ethernet/sfc/falcon/bitfield.h
+++ b/drivers/net/ethernet/sfc/falcon/bitfield.h
@@ -117,7 +117,7 @@ typedef union ef4_oword {
*
* ( element ) << 4
*
- * The result will contain the relevant bits filled in in the range
+ * The result will contain the relevant bits filled in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 60c595ef7589..e151b0957751 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -112,11 +112,6 @@ module_param(ef4_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(ef4_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
/* This is the time (in jiffies) between invocations of the hardware
* monitor.
* On Falcon-based NICs, this will:
@@ -2017,8 +2012,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
struct ef4_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- ef4_poll, napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -2267,7 +2261,7 @@ static int ef4_register_netdev(struct ef4_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops;
- netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
+ netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
net_dev->min_mtu = EF4_MIN_MTU;
net_dev->max_mtu = EF4_MAX_MTU;
@@ -2335,7 +2329,7 @@ static void ef4_unregister_netdev(struct ef4_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (ef4_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2646,7 +2640,7 @@ static int ef4_init_struct(struct ef4_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 907254b36663..3976a333f7e3 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -162,9 +162,9 @@ static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct ef4_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
@@ -412,7 +412,7 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (ef4_describe_per_queue_stats(efx, strings) *
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 3324a6219a09..7a1c9337081b 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -2387,7 +2387,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
board->i2c_data.data = efx;
board->i2c_adap.algo_data = &board->i2c_data;
board->i2c_adap.dev.parent = &efx->pci_dev->dev;
- strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+ strscpy(board->i2c_adap.name, "SFC4000 GPIO",
sizeof(board->i2c_adap.name));
rc = i2c_bit_add_bus(&board->i2c_adap);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 2c91792cec01..c64623c2e80c 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -2711,7 +2711,7 @@ void ef4_farch_filter_table_remove(struct ef4_nic *efx)
enum ef4_farch_filter_table_id table_id;
for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2740,9 +2740,7 @@ int ef4_farch_filter_table_probe(struct ef4_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a381cf9ec4f3..a2c7139f2b32 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -679,7 +679,7 @@ union ef4_multicast_hash {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @rx_ip_align: RX DMA address offset to have IP header aligned in
- * in accordance with NET_IP_ALIGN
+ * accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 156da315ec89..78c851b5a56f 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -452,7 +452,7 @@ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 0c6cc2191369..6bbdb5d2eebf 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
struct ef4_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct iommu_domain __maybe_unused *domain;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
#else
- if (iommu_present(&pci_bus_type))
+ domain = iommu_get_domain_for_dev(&efx->pci_dev->dev);
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY)
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index f7306e93a8b8..b9369483758c 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -98,7 +98,8 @@ unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EF4_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EF4_PAGE_SIZE));
return max_descs;
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 40b2af8bfb81..5f201a547e5b 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/in6.h>
#include <asm/byteorder.h>
/**
@@ -88,6 +89,7 @@ enum efx_filter_priority {
* the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
+ * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
@@ -95,6 +97,7 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
+ EFX_FILTER_FLAG_VPORT_ID = 0x20,
};
/** enum efx_encap_type - types of encapsulation
@@ -127,6 +130,9 @@ enum efx_encap_type {
* MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
+ * @vport_id: Virtual port ID associated with RX queue, for adapter switching,
+ * if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
+ * EF100 an mport selector.
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
@@ -157,7 +163,8 @@ struct efx_filter_spec {
u32 flags:6;
u32 dmaq_id:12;
u32 rss_context;
- __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ u32 vport_id;
+ __be16 outer_vid;
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
@@ -218,6 +225,27 @@ efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
}
/**
+ * efx_filter_set_ipv6_local - specify IPv6 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv6_local(struct efx_filter_spec *spec, u8 proto,
+ const struct in6_addr *host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IPV6);
+ spec->ip_proto = proto;
+ memcpy(spec->loc_host, host, sizeof(spec->loc_host));
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
@@ -292,6 +320,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+/**
+ * efx_filter_set_vport_id - override virtual port id relating to filter
+ * @spec: Specification to initialise
+ * @vport_id: firmware ID of the virtual port
+ */
+static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
+ u32 vport_id)
+{
+ spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
+ spec->vport_id = vport_id;
+}
+
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type)
{
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
new file mode 100644
index 000000000000..874c765b2465
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mae.h"
+#include "mcdi.h"
+#include "mcdi_pcol_mae.h"
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ if (WARN_ON_ONCE(!id))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!label))
+ return -EINVAL;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
+ MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
+ MAE_MPORT_SELECTOR_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
+ *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
+ return 0;
+}
+
+int efx_mae_free_mport(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
+ return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
+ MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* Constructs an mport selector from an mport ID, because they're not the same */
+void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
+ MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* id is really only 24 bits wide */
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
+ return 0;
+}
+
+static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_GET_CAPS, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ caps->match_field_count = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
+ caps->action_prios = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ACTION_PRIOS);
+ return 0;
+}
+
+static int efx_mae_get_rule_fields(struct efx_nic *efx, u32 cmd,
+ u8 *field_support)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS));
+ MCDI_DECLARE_STRUCT_PTR(caps);
+ unsigned int count;
+ size_t outlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, cmd, NULL, 0, outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ count = MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_COUNT);
+ memset(field_support, MAE_FIELD_UNSUPPORTED, MAE_NUM_FIELDS);
+ caps = _MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_FIELD_FLAGS);
+ /* We're only interested in the support status enum, not any other
+ * flags, so just extract that from each entry.
+ */
+ for (i = 0; i < count; i++)
+ if (i * sizeof(*outbuf) + MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST < outlen)
+ field_support[i] = EFX_DWORD_FIELD(caps[i], MAE_FIELD_FLAGS_SUPPORT_STATUS);
+ return 0;
+}
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ int rc;
+
+ rc = efx_mae_get_basic_caps(efx, caps);
+ if (rc)
+ return rc;
+ return efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_AR_CAPS,
+ caps->action_rule_fields);
+}
+
+/* Bit twiddling:
+ * Prefix: 1...110...0
+ * ~: 0...001...1
+ * + 1: 0...010...0 is power of two
+ * so (~x) & ((~x) + 1) == 0. Converse holds also.
+ */
+#define is_prefix_byte(_x) !(((_x) ^ 0xff) & (((_x) ^ 0xff) + 1))
+
+enum mask_type { MASK_ONES, MASK_ZEROES, MASK_PREFIX, MASK_OTHER };
+
+static const char *mask_type_name(enum mask_type typ)
+{
+ switch (typ) {
+ case MASK_ONES:
+ return "all-1s";
+ case MASK_ZEROES:
+ return "all-0s";
+ case MASK_PREFIX:
+ return "prefix";
+ case MASK_OTHER:
+ return "arbitrary";
+ default: /* can't happen */
+ return "unknown";
+ }
+}
+
+/* Checks a (big-endian) bytestring is a bit prefix */
+static enum mask_type classify_mask(const u8 *mask, size_t len)
+{
+ bool zeroes = true; /* All bits seen so far are zeroes */
+ bool ones = true; /* All bits seen so far are ones */
+ bool prefix = true; /* Valid prefix so far */
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (ones) {
+ if (!is_prefix_byte(mask[i]))
+ prefix = false;
+ } else if (mask[i]) {
+ prefix = false;
+ }
+ if (mask[i] != 0xff)
+ ones = false;
+ if (mask[i])
+ zeroes = false;
+ }
+ if (ones)
+ return MASK_ONES;
+ if (zeroes)
+ return MASK_ZEROES;
+ if (prefix)
+ return MASK_PREFIX;
+ return MASK_OTHER;
+}
+
+static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ)
+{
+ switch (support) {
+ case MAE_FIELD_UNSUPPORTED:
+ case MAE_FIELD_SUPPORTED_MATCH_NEVER:
+ if (typ == MASK_ZEROES)
+ return 0;
+ return -EOPNOTSUPP;
+ case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
+ if (typ == MASK_ZEROES)
+ return 0;
+ fallthrough;
+ case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
+ if (typ == MASK_ONES)
+ return 0;
+ return -EINVAL;
+ case MAE_FIELD_SUPPORTED_MATCH_PREFIX:
+ if (typ == MASK_OTHER)
+ return -EOPNOTSUPP;
+ return 0;
+ case MAE_FIELD_SUPPORTED_MATCH_MASK:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_fields = efx->tc->caps->action_rule_fields;
+ __be32 ingress_port = cpu_to_be32(mask->ingress_port);
+ enum mask_type ingress_port_mask_type;
+ int rc;
+
+ /* Check for _PREFIX assumes big-endian, so we need to convert */
+ ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
+ sizeof(ingress_port));
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
+ ingress_port_mask_type);
+ if (rc) {
+ efx_tc_err(efx, "No support for %s mask in field ingress_port\n",
+ mask_type_name(ingress_port_mask_type));
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port");
+ return rc;
+ }
+ return 0;
+}
+
+static bool efx_mae_asl_id(u32 id)
+{
+ return !!(id & BIT(31));
+}
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
+ MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
+ MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
+ if (act->deliver)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
+ act->dest_mport);
+ BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
+ /* We rely on the high bit of AS IDs always being clear.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ return -EIO;
+ }
+ return 0;
+}
+
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-sets exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
+ return -EIO;
+ return 0;
+}
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+ struct efx_tc_action_set *act;
+ size_t inlen, outlen, i = 0;
+ efx_dword_t *inbuf;
+ int rc;
+
+ list_for_each_entry(act, &acts->list, list)
+ i++;
+ if (i == 0)
+ return -EINVAL;
+ if (i == 1) {
+ /* Don't wrap an ASL around a single AS, just use the AS_ID
+ * directly. ASLs are a more limited resource.
+ */
+ act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
+ acts->fw_id = act->fw_id;
+ return 0;
+ }
+ if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
+ return -EOPNOTSUPP; /* Too many actions */
+ inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(act, &acts->list, list) {
+ MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
+ i, act->fw_id);
+ i++;
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ goto out_free;
+ }
+ acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+ /* We rely on the high bit of ASL IDs always being set.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
+ efx_mae_free_action_set_list(efx, acts);
+ rc = -EIO;
+ }
+out_free:
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ /* If this is just an AS_ID with no ASL wrapper, then there is
+ * nothing for us to free. (The AS will be freed later.)
+ */
+ if (efx_mae_asl_id(acts->fw_id)) {
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
+ acts->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-set-lists exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
+ return -EIO;
+ }
+ /* We're probably about to free @acts, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
+ return 0;
+}
+
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
+ match->value.recirc_id);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
+ match->mask.recirc_id);
+ return 0;
+}
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ MCDI_DECLARE_STRUCT_PTR(response);
+ size_t outlen;
+ int rc;
+
+ if (!id)
+ return -EINVAL;
+
+ match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+ response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+ if (efx_mae_asl_id(acts_id)) {
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+ MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+ } else {
+ /* We only had one AS, so we didn't wrap it in an ASL */
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+ rc = efx_mae_populate_match_criteria(match_crit, match);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+ return 0;
+}
+
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what rules exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
+ return -EIO;
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
new file mode 100644
index 000000000000..3e0cd238d523
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF100_MAE_H
+#define EF100_MAE_H
+/* MCDI interface for the ef100 Match-Action Engine */
+
+#include "net_driver.h"
+#include "tc.h"
+#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
+int efx_mae_free_mport(struct efx_nic *efx, u32 id);
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
+void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
+
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+
+#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
+
+struct mae_caps {
+ u32 match_field_count;
+ u32 action_prios;
+ u8 action_rule_fields[MAE_NUM_FIELDS];
+};
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack);
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id);
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+
+#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index be6bfd6b7ec7..af338208eae9 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -99,14 +99,12 @@ int efx_mcdi_init(struct efx_nic *efx)
*/
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
+ pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
+ pci_err(efx->pci_dev, "Host already registered with MCPU\n");
if (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
@@ -163,9 +161,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
spin_lock_bh(&mcdi->iface_lock);
++mcdi->seqno;
+ seqno = mcdi->seqno & SEQ_MASK;
spin_unlock_bh(&mcdi->iface_lock);
- seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
@@ -1261,7 +1259,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
}
/* The MC is going down in to BIST mode. set the BIST flag to block
- * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
* (which doesn't actually execute a reset, it waits for the controlling
* function to reset it).
*/
@@ -1447,7 +1445,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
return;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
buf[0] = 0;
}
@@ -1471,8 +1469,9 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* care what firmware we get.
*/
if (rc == -EPERM) {
- netif_dbg(efx, probe, efx->net_dev,
- "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ pci_dbg(efx->pci_dev,
+ "%s with fw-variant setting failed EPERM, trying without it\n",
+ __func__);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
@@ -1514,7 +1513,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -2130,6 +2129,52 @@ fail:
return rc;
}
+/* Failure to read a privilege mask is never fatal, because we can always
+ * carry on as though we didn't have the privilege we were interested in.
+ * So use efx_mcdi_rpc_quiet().
+ */
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
+{
+ MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
+ MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
+ size_t outlen;
+ u16 pf, vf;
+ int rc;
+
+ if (!efx || !mask)
+ return -EINVAL;
+
+ /* Get our function number */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
+ fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
+ &outlen);
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
+ return -EIO;
+
+ pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
+ vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
+
+ MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ *mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return 0;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 69c2924a147c..1f18e9dc62e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -201,11 +201,23 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+/* Use MCDI_STRUCT_ functions to access members of MCDI structuredefs.
+ * _buf should point to the start of the structure, typically obtained with
+ * MCDI_DECLARE_STRUCT_PTR(structure) = _MCDI_DWORD(mcdi_buf, FIELD_WHICH_IS_STRUCT);
+ */
+#define MCDI_STRUCT_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, _field ## _OFST)
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define _MCDI_STRUCT_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
+#define MCDI_STRUCT_SET_BYTE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 1); \
+ *(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
+ } while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
@@ -214,6 +226,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
@@ -366,6 +380,7 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 1523be77b9db..4ff6586116ee 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
+ if (flags & EFX_FILTER_FLAG_VPORT_ID)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
+ else
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
+ saved_spec->vport_id = spec->vport_id;
}
} else if (!replacing) {
kfree(saved_spec);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 06426aa9f2f3..c0d6558b9fd2 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
*/
bool mc_chaining;
bool vlan_filter;
+ /* Entries on the vlan_list are added/removed under filter_sem */
struct list_head vlan_list;
};
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 5954fcfee2b1..f5128db7c7e7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -285,7 +285,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- strlcpy(attr->name, name, sizeof(attr->name));
+ strscpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index d3fcbf930dba..cd297e19cddc 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -73,8 +73,8 @@
* \------------------------------ Resync (always set)
*
* The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writting
- * back into shared memory, or by writting out an event.
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
@@ -165,138 +165,8 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
-/* Operation not permitted. */
-#define MC_CMD_ERR_EPERM 1
-/* Non-existent command target */
-#define MC_CMD_ERR_ENOENT 2
-/* assert() has killed the MC */
-#define MC_CMD_ERR_EINTR 4
-/* I/O failure */
-#define MC_CMD_ERR_EIO 5
-/* Already exists */
-#define MC_CMD_ERR_EEXIST 6
-/* Try again */
-#define MC_CMD_ERR_EAGAIN 11
-/* Out of memory */
-#define MC_CMD_ERR_ENOMEM 12
-/* Caller does not hold required locks */
-#define MC_CMD_ERR_EACCES 13
-/* Resource is currently unavailable (e.g. lock contention) */
-#define MC_CMD_ERR_EBUSY 16
-/* No such device */
-#define MC_CMD_ERR_ENODEV 19
-/* Invalid argument to target */
-#define MC_CMD_ERR_EINVAL 22
-/* Broken pipe */
-#define MC_CMD_ERR_EPIPE 32
-/* Read-only */
-#define MC_CMD_ERR_EROFS 30
-/* Out of range */
-#define MC_CMD_ERR_ERANGE 34
-/* Non-recursive resource is already acquired */
-#define MC_CMD_ERR_EDEADLK 35
-/* Operation not implemented */
-#define MC_CMD_ERR_ENOSYS 38
-/* Operation timed out */
-#define MC_CMD_ERR_ETIME 62
-/* Link has been severed */
-#define MC_CMD_ERR_ENOLINK 67
-/* Protocol error */
-#define MC_CMD_ERR_EPROTO 71
-/* Operation not supported */
-#define MC_CMD_ERR_ENOTSUP 95
-/* Address not available */
-#define MC_CMD_ERR_EADDRNOTAVAIL 99
-/* Not connected */
-#define MC_CMD_ERR_ENOTCONN 107
-/* Operation already in progress */
-#define MC_CMD_ERR_EALREADY 114
-
-/* Resource allocation failed. */
-#define MC_CMD_ERR_ALLOC_FAIL 0x1000
-/* V-adaptor not found. */
-#define MC_CMD_ERR_NO_VADAPTOR 0x1001
-/* EVB port not found. */
-#define MC_CMD_ERR_NO_EVB_PORT 0x1002
-/* V-switch not found. */
-#define MC_CMD_ERR_NO_VSWITCH 0x1003
-/* Too many VLAN tags. */
-#define MC_CMD_ERR_VLAN_LIMIT 0x1004
-/* Bad PCI function number. */
-#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
-/* Invalid VLAN mode. */
-#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
-/* Invalid v-switch type. */
-#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
-/* Invalid v-port type. */
-#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
-/* MAC address exists. */
-#define MC_CMD_ERR_MAC_EXIST 0x1009
-/* Slave core not present */
-#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
-/* The datapath is disabled. */
-#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
-/* The requesting client is not a function */
-#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
-/* The requested operation might require the
- command to be passed between MCs, and the
- transport doesn't support that. Should
- only ever been seen over the UART. */
-#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
-/* VLAN tag(s) exists */
-#define MC_CMD_ERR_VLAN_EXIST 0x100e
-/* No MAC address assigned to an EVB port */
-#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
-/* Notifies the driver that the request has been relayed
- * to an admin function for authorization. The driver should
- * wait for a PROXY_RESPONSE event and then resend its request.
- * This error code is followed by a 32-bit handle that
- * helps matching it with the respective PROXY_RESPONSE event. */
-#define MC_CMD_ERR_PROXY_PENDING 0x1010
-#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
-/* The request cannot be passed for authorization because
- * another request from the same function is currently being
- * authorized. The drvier should try again later. */
-#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
-/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
- * that has enabled proxying or BLOCK_INDEX points to a function that
- * doesn't await an authorization. */
-#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
-/* This code is currently only used internally in FW. Its meaning is that
- * an operation failed due to lack of SR-IOV privilege.
- * Normally it is translated to EPERM by send_cmd_err(),
- * but it may also be used to trigger some special mechanism
- * for handling such case, e.g. to relay the failed request
- * to a designated admin function for authorization. */
-#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
-/* Workaround 26807 could not be turned on/off because some functions
- * have already installed filters. See the comment at
- * MC_CMD_WORKAROUND_BUG26807.
- * May also returned for other operations such as sub-variant switching. */
-#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
- * doesn't exist on this NIC */
-#define MC_CMD_ERR_NO_CLOCK 0x1015
-/* Returned by MC_CMD_TESTASSERT if the action that should
- * have caused an assertion failed to do so. */
-#define MC_CMD_ERR_UNREACHABLE 0x1016
-/* This command needs to be processed in the background but there were no
- * resources to do so. Send it again after a command has completed. */
-#define MC_CMD_ERR_QUEUE_FULL 0x1017
-/* The operation could not be completed because the PCIe link has gone
- * away. This error code is never expected to be returned over the TLP
- * transport. */
-#define MC_CMD_ERR_NO_PCIE 0x1018
-/* The operation could not be completed because the datapath has gone
- * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
- * datapath absence may be temporary*/
-#define MC_CMD_ERR_NO_DATAPATH 0x1019
-/* The operation could not complete because some VIs are allocated */
-#define MC_CMD_ERR_VIS_PRESENT 0x101a
-/* The operation could not complete because some PIO buffers are allocated */
-#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-
#define MC_CMD_ERR_CODE_OFST 0
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
/* We define 8 "escape" commands to allow
for command number space extension */
@@ -365,10 +235,857 @@
*/
#define MC_CMD_ERR_ARG_OFST 4
-/* No space */
-#define MC_CMD_ERR_ENOSPC 28
-
-/* MCDI_EVENT structuredef */
+/* MC_CMD_ERR enum: Public MCDI error codes. Error codes that correspond to
+ * POSIX errnos should use the same numeric values that linux does. Error codes
+ * specific to Solarflare firmware should use values in the range 0x1000 -
+ * 0x10ff. The range 0x2000 - 0x20ff is reserved for private error codes (see
+ * MC_CMD_ERR_PRIV below).
+ */
+/* enum: Operation not permitted. */
+#define MC_CMD_ERR_EPERM 0x1
+/* enum: Non-existent command target */
+#define MC_CMD_ERR_ENOENT 0x2
+/* enum: assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 0x4
+/* enum: I/O failure */
+#define MC_CMD_ERR_EIO 0x5
+/* enum: Already exists */
+#define MC_CMD_ERR_EEXIST 0x6
+/* enum: Try again */
+#define MC_CMD_ERR_EAGAIN 0xb
+/* enum: Out of memory */
+#define MC_CMD_ERR_ENOMEM 0xc
+/* enum: Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 0xd
+/* enum: Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 0x10
+/* enum: No such device */
+#define MC_CMD_ERR_ENODEV 0x13
+/* enum: Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 0x16
+/* enum: No space */
+#define MC_CMD_ERR_ENOSPC 0x1c
+/* enum: Read-only */
+#define MC_CMD_ERR_EROFS 0x1e
+/* enum: Broken pipe */
+#define MC_CMD_ERR_EPIPE 0x20
+/* enum: Out of range */
+#define MC_CMD_ERR_ERANGE 0x22
+/* enum: Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 0x23
+/* enum: Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 0x26
+/* enum: Operation timed out */
+#define MC_CMD_ERR_ETIME 0x3e
+/* enum: Link has been severed */
+#define MC_CMD_ERR_ENOLINK 0x43
+/* enum: Protocol error */
+#define MC_CMD_ERR_EPROTO 0x47
+/* enum: Bad message */
+#define MC_CMD_ERR_EBADMSG 0x4a
+/* enum: Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 0x5f
+/* enum: Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 0x63
+/* enum: Not connected */
+#define MC_CMD_ERR_ENOTCONN 0x6b
+/* enum: Operation already in progress */
+#define MC_CMD_ERR_EALREADY 0x72
+/* enum: Stale handle. The handle references a resource that no longer exists.
+ */
+#define MC_CMD_ERR_ESTALE 0x74
+/* enum: Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* enum: V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* enum: EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* enum: V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* enum: Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* enum: Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* enum: Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* enum: Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* enum: Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* enum: MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* enum: Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* enum: The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* enum: The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* enum: The requested operation might require the command to be passed between
+ * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * the UART.
+ */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* enum: VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* enum: No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* enum: Notifies the driver that the request has been relayed to an admin
+ * function for authorization. The driver should wait for a PROXY_RESPONSE
+ * event and then resend its request. This error code is followed by a 32-bit
+ * handle that helps matching it with the respective PROXY_RESPONSE event.
+ */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+/* enum: The request cannot be passed for authorization because another request
+ * from the same function is currently being authorized. The drvier should try
+ * again later.
+ */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* enum: Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that doesn't
+ * await an authorization.
+ */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* enum: This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege. Normally it is
+ * translated to EPERM by send_cmd_err(), but it may also be used to trigger
+ * some special mechanism for handling such case, e.g. to relay the failed
+ * request to a designated admin function for authorization.
+ */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* enum: Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as
+ * sub-variant switching.
+ */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+ * this NIC
+ */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* enum: Returned by MC_CMD_TESTASSERT if the action that should have caused an
+ * assertion failed to do so.
+ */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* enum: This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed.
+ */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* enum: The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport.
+ */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* enum: The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary
+ */
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* enum: The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* enum: The operation could not complete because some PIO buffers are
+ * allocated
+ */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+
+/* MC_CMD_FPGA_FLASH_INDEX enum */
+#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
+#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
+
+/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
+/* enum: Legacy mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
+/* enum: Switchdev mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
+/* enum: Bootstrap mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
+/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
+
+/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
+ * interfaces. There is a need to refer to interfaces explicitly from drivers
+ * (for example, a management driver on one interface administering a function
+ * on another interface). This enumeration provides stable identifiers to all
+ * interfaces present on a product. Product documentation will specify which
+ * interfaces exist and their associated identifier. In general, drivers,
+ * should not assign special meanings to specific values. Instead, behaviour
+ * should be determined by NIC configuration, which will identify interfaces
+ * where appropriate.
+ */
+/* enum: Primary host interfaces. Typically (i.e. for all known SFC products)
+ * the interface exposed on the edge connector (or form factor equivalent).
+ */
+#define PCIE_INTERFACE_HOST_PRIMARY 0x0
+/* enum: Riverhead and keystone products have a second PCIe interface to which
+ * an on-NIC ARM module is expected to be connected.
+ */
+#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: For MCDI commands issued over a PCIe interface, this value is
+ * translated into the interface over which the command was issued. Not
+ * meaningful for other MCDI transports.
+ */
+#define PCIE_INTERFACE_CALLER 0xffffffff
+
+/* MC_CLIENT_ID_SPECIFIER enum */
+/* enum: Equivalent to the caller's client ID */
+#define MC_CMD_CLIENT_ID_SELF 0xffffffff
+
+/* MAE_FIELD_SUPPORT_STATUS enum */
+/* enum: The NIC does not support this field. The driver must ensure that any
+ * mask associated with this field in a match rule is zeroed. The NIC may
+ * either reject requests with an invalid mask for such a field, or may assume
+ * that the mask is zero. (This category only exists to describe behaviour for
+ * fields that a newer driver might know about but that older firmware does
+ * not. It is recommended that firmware report MAE_FIELD_FIELD_MATCH_NEVER for
+ * all match fields defined at the time of its compilation. If a driver see a
+ * field support status value that it does not recognise, it must treat that
+ * field as thought the field was reported as MAE_FIELD_SUPPORTED_MATCH_NEVER,
+ * and must never set a non-zero mask value for this field.
+ */
+#define MAE_FIELD_UNSUPPORTED 0x0
+/* enum: The NIC supports this field, but cannot use it in a match rule. The
+ * driver must ensure that any mask for such a field in a match rule is zeroed.
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_NEVER 0x1
+/* enum: The NIC supports this field, and must use it in all match rules. The
+ * driver must ensure that any mask for such a field is all ones. The NIC will
+ * reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_ALWAYS 0x2
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or all ones. The NIC will reject requests with an invalid mask for such a
+ * field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_OPTIONAL 0x3
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or a consecutive set of ones following by all zeroes (starting from MSB).
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_PREFIX 0x4
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver may provide an arbitrary mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_MASK 0x5
+
+/* MAE_CT_VNI_MODE enum: Controls the layout of the VNI input to the conntrack
+ * lookup. (Values are not arbitrary - constrained by table access ABI.)
+ */
+/* enum: The VNI input to the conntrack lookup will be zero. */
+#define MAE_CT_VNI_MODE_ZERO 0x0
+/* enum: The VNI input to the conntrack lookup will be the VNI (VXLAN/Geneve)
+ * or VSID (NVGRE) field from the packet.
+ */
+#define MAE_CT_VNI_MODE_VNI 0x1
+/* enum: The VNI input to the conntrack lookup will be the VLAN ID from the
+ * outermost VLAN tag (in bottom 12 bits; top 12 bits zero).
+ */
+#define MAE_CT_VNI_MODE_1VLAN 0x2
+/* enum: The VNI input to the conntrack lookup will be the VLAN IDs from both
+ * VLAN tags (outermost in bottom 12 bits, innermost in top 12 bits).
+ */
+#define MAE_CT_VNI_MODE_2VLAN 0x3
+
+/* MAE_FIELD enum: NB: this enum shares namespace with the support status enum.
+ */
+/* enum: Source mport upon entering the MAE. */
+#define MAE_FIELD_INGRESS_PORT 0x0
+#define MAE_FIELD_MARK 0x1 /* enum */
+/* enum: Table ID used in action rule. Initially zero, can be changed in action
+ * rule response.
+ */
+#define MAE_FIELD_RECIRC_ID 0x2
+#define MAE_FIELD_IS_IP_FRAG 0x3 /* enum */
+#define MAE_FIELD_DO_CT 0x4 /* enum */
+#define MAE_FIELD_CT_HIT 0x5 /* enum */
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_MARK 0x6
+/* enum: Undefined unless DO_CT=1. */
+#define MAE_FIELD_CT_DOMAIN 0x7
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_PRIVATE_FLAGS 0x8
+/* enum: 1 if the packet ingressed the NIC from one of the MACs, else 0. */
+#define MAE_FIELD_IS_FROM_NETWORK 0x9
+/* enum: 1 if the packet has 1 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_OVLAN 0xa
+/* enum: 1 if the packet has 2 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_IVLAN 0xb
+/* enum: 1 if the outer packet has 1 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_OVLAN 0xc
+/* enum: 1 if the outer packet has 2 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_IVLAN 0xd
+/* enum: Packet is IP fragment */
+#define MAE_FIELD_ENC_IP_FRAG 0xe
+#define MAE_FIELD_ETHER_TYPE 0x21 /* enum */
+#define MAE_FIELD_VLAN0_TCI 0x22 /* enum */
+#define MAE_FIELD_VLAN0_PROTO 0x23 /* enum */
+#define MAE_FIELD_VLAN1_TCI 0x24 /* enum */
+#define MAE_FIELD_VLAN1_PROTO 0x25 /* enum */
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_SADDR 0x28
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_DADDR 0x29
+/* enum: Inner when encap. NB: IPv4 and IPv6 fields are mutually exclusive. */
+#define MAE_FIELD_SRC_IP4 0x2a
+/* enum: Inner when encap */
+#define MAE_FIELD_SRC_IP6 0x2b
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP4 0x2c
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP6 0x2d
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_PROTO 0x2e
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TOS 0x2f
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TTL 0x30
+/* enum: Inner when encap TODO: how this is defined? The raw flags +
+ * frag_offset from the packet, or some derived value more amenable to ternary
+ * matching? TODO: there was a proposal for driver-allocation fields. The
+ * driver would provide some instruction for how to extract given field values,
+ * and would be given a field id in return. It could then use that field id in
+ * its matches. This feels like it would be extremely hard to implement in
+ * hardware, but I mention it for completeness.
+ */
+#define MAE_FIELD_IP_FLAGS 0x31
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_SPORT 0x32
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_DPORT 0x33
+/* enum: Inner when encap */
+#define MAE_FIELD_TCP_FLAGS 0x34
+/* enum: TCP packet with any of SYN, FIN or RST flag set */
+#define MAE_FIELD_TCP_SYN_FIN_RST 0x35
+/* enum: Packet is IP fragment with fragment offset 0 */
+#define MAE_FIELD_IP_FIRST_FRAG 0x36
+/* enum: The type of encapsulated used for this packet. Value as per
+ * ENCAP_TYPE_*.
+ */
+#define MAE_FIELD_ENCAP_TYPE 0x3f
+/* enum: The ID of the outer rule that marked this packet as encapsulated.
+ * Useful for implicitly matching on outer fields.
+ */
+#define MAE_FIELD_OUTER_RULE_ID 0x40
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETHER_TYPE 0x41
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_TCI 0x42
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_PROTO 0x43
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_TCI 0x44
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_PROTO 0x45
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_SADDR 0x48
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_DADDR 0x49
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP4 0x4a
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP6 0x4b
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP4 0x4c
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP6 0x4d
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_PROTO 0x4e
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TOS 0x4f
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TTL 0x50
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_FLAGS 0x51
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_SPORT 0x52
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_DPORT 0x53
+/* enum: VNI (when VXLAN or GENEVE) VSID (when NVGRE) Bottom 24 bits of Key
+ * (when L2GRE) Outer; only present when encap
+ */
+#define MAE_FIELD_ENC_VNET_ID 0x54
+
+/* MAE_MCDI_ENCAP_TYPE enum: Encapsulation type. Defines how the payload will
+ * be parsed to an inner frame. Other values are reserved. Unknown values
+ * should be treated same as NONE. (Values are not arbitrary - constrained by
+ * table access ABI.)
+ */
+#define MAE_MCDI_ENCAP_TYPE_NONE 0x0 /* enum */
+/* enum: Don't assume enum aligns with support bitmask... */
+#define MAE_MCDI_ENCAP_TYPE_VXLAN 0x1
+#define MAE_MCDI_ENCAP_TYPE_NVGRE 0x2 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_GENEVE 0x3 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_L2GRE 0x4 /* enum */
+
+/* MAE_MPORT_END enum: Selects which end of the logical link identified by an
+ * MPORT_SELECTOR is targeted by an operation.
+ */
+/* enum: Selects the port on the MAE virtual switch */
+#define MAE_MPORT_END_MAE 0x1
+/* enum: Selects the virtual NIC plugged into the MAE switch */
+#define MAE_MPORT_END_VNIC 0x2
+
+/* MAE_COUNTER_TYPE enum: The datapath maintains several sets of counters, each
+ * being associated with a different table. Note that the same counter ID may
+ * be allocated by different counter blocks, so e.g. AR counter 42 is different
+ * from CT counter 42. Generation counts are also type-specific. This value is
+ * also present in the header of streaming counter packets, in the IDENTIFIER
+ * field (see packetiser packet format definitions).
+ */
+/* enum: Action Rule counters - can be referenced in AR response. */
+#define MAE_COUNTER_TYPE_AR 0x0
+/* enum: Conntrack counters - can be referenced in CT response. */
+#define MAE_COUNTER_TYPE_CT 0x1
+/* enum: Outer Rule counters - can be referenced in OR response. */
+#define MAE_COUNTER_TYPE_OR 0x2
+
+/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
+ * structured with bits [31:24] reserved (0), [23:16] indicating which major
+ * block the tables belongs to (0=VNIC TX, none currently; 1=MAE; 2=VNIC RX),
+ * [15:8] a unique ID within the block, and [7:0] reserved for future
+ * variations of the same table. (All of the tables currently defined within
+ * the streaming engines are listed here, but this does not imply that they are
+ * all supported - MC_CMD_TABLE_LIST returns the list of actually supported
+ * tables.)
+ */
+/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_TABLE 0x10000
+/* enum: Outer_Rule_No_CT_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_NO_CT_TABLE 0x10100
+/* enum: Mgmt_Filter_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGMT_FILTER_TABLE 0x10200
+/* enum: Conntrack_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_CONNTRACK_TABLE 0x10300
+/* enum: Action_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ACTION_RULE_TABLE 0x10400
+/* enum: Mgroup_Default_Action_Set_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGROUP_DEFAULT_ACTION_SET_TABLE 0x10500
+/* enum: Encap_Hdr_Part1_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART1_TABLE 0x10600
+/* enum: Encap_Hdr_Part2_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART2_TABLE 0x10700
+/* enum: Replace_Src_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_SRC_MAC_TABLE 0x10800
+/* enum: Replace_Dst_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_DST_MAC_TABLE 0x10900
+/* enum: Dst_Mport_VC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_VC_TABLE 0x10a00
+/* enum: LACP_LAG_Config_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_LAG_CONFIG_TABLE 0x10b00
+/* enum: LACP_Balance_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_BALANCE_TABLE 0x10c00
+/* enum: Dst_Mport_Host_Chan_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_HOST_CHAN_TABLE 0x10d00
+/* enum: VNIC_Rx_Encap_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_VNIC_RX_ENCAP_TABLE 0x20000
+/* enum: Steering_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_STEERING_TABLE 0x20100
+/* enum: RSS_Context_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
+/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_INDIRECTION_TABLE 0x20300
+
+/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
+ * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
+ * (ether_type_msb & 0x3);
+ */
+#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
+
+/* TABLE_NAT_DIR enum: NAT direction. */
+#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
+#define TABLE_NAT_DIR_DEST 0x1 /* enum */
+
+/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
+ * is constructed as a concatenation (indicated here by "++") of packet header
+ * fields.
+ */
+/* enum: IP src addr ++ IP dst addr */
+#define TABLE_RSS_KEY_MODE_SA_DA 0x0
+/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
+#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
+/* enum: IP src addr */
+#define TABLE_RSS_KEY_MODE_SA 0x2
+/* enum: IP dst addr */
+#define TABLE_RSS_KEY_MODE_DA 0x3
+/* enum: IP src addr ++ TCP/UDP src port */
+#define TABLE_RSS_KEY_MODE_SA_SP 0x4
+/* enum: IP dest addr ++ TCP dest port */
+#define TABLE_RSS_KEY_MODE_DA_DP 0x5
+/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
+#define TABLE_RSS_KEY_MODE_NONE 0x7
+
+/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
+/* enum: RSS uses Indirection_Table lookup. */
+#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
+/* enum: RSS uses even spreading calculation. */
+#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+
+/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
+ * loosely grouped together into blocks with gaps for expansion, but the values
+ * are arbitrary. Field IDs are not specific to particular tables, and in some
+ * cases this sharing means that they are not used with the exact names of the
+ * corresponding table definitions in SF-123102-TC; however, the mapping should
+ * still be clear. The intent is that a list of fields, with their associated
+ * bit widths and semantics version code, unambiguously defines the semantics
+ * of the fields in a key or response. (Again, this list includes all of the
+ * fields currently defined within the streaming engines, but only a subset may
+ * actually be used by the supported list of tables.)
+ */
+/* enum: May appear multiple times within a key or response, and indicates that
+ * the field is unused and should be set to 0 (or masked out if permitted by
+ * the MASK_VALUE for this field).
+ */
+#define TABLE_FIELD_ID_UNUSED 0x0
+/* enum: Source m-port (a full m-port label). */
+#define TABLE_FIELD_ID_SRC_MPORT 0x1
+/* enum: Destination m-port (a full m-port label). */
+#define TABLE_FIELD_ID_DST_MPORT 0x2
+/* enum: Source m-group ID. */
+#define TABLE_FIELD_ID_SRC_MGROUP_ID 0x3
+/* enum: Physical network port ID (or m-port ID; same thing, for physical
+ * network ports).
+ */
+#define TABLE_FIELD_ID_NETWORK_PORT_ID 0x4
+/* enum: True if packet arrived via network port, false if it arrived via host.
+ */
+#define TABLE_FIELD_ID_IS_FROM_NETWORK 0x5
+/* enum: Full virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC 0x6
+/* enum: Low bits of virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC_LOW 0x7
+/* enum: User mark value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_MARK 0x8
+/* enum: User flag value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_FLAG 0x9
+/* enum: Counter ID associated with a response. All-bits-1 is a null value to
+ * suppress counting.
+ */
+#define TABLE_FIELD_ID_COUNTER_ID 0xa
+/* enum: Discriminator which may be set by plugins in some lookup keys; this
+ * allows plugins to make a reinterpretation of packet fields in these keys
+ * without clashing with the normal interpretation.
+ */
+#define TABLE_FIELD_ID_DISCRIM 0xb
+/* enum: Destination MAC address. The mapping from bytes in a frame to the
+ * 48-bit value for this field is in network order, i.e. a MAC address of
+ * AA:BB:CC:DD:EE:FF becomes a 48-bit value of 0xAABBCCDDEEFF.
+ */
+#define TABLE_FIELD_ID_DST_MAC 0x14
+/* enum: Source MAC address (see notes for DST_MAC). */
+#define TABLE_FIELD_ID_SRC_MAC 0x15
+/* enum: Outer VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_OVLAN_TPID_COMPRESSED 0x16
+/* enum: Full outer VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_OVLAN 0x17
+/* enum: Outer VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_OVLAN_VID 0x18
+/* enum: Inner VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_IVLAN_TPID_COMPRESSED 0x19
+/* enum: Full inner VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_IVLAN 0x1a
+/* enum: Inner VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_IVLAN_VID 0x1b
+/* enum: Ethertype. */
+#define TABLE_FIELD_ID_ETHER_TYPE 0x1c
+/* enum: Source IP address, either IPv4 or IPv6. The mapping from bytes in a
+ * frame to the 128-bit value for this field is in network order, with IPv4
+ * addresses assumed to have 12 bytes of trailing zeroes. i.e. the IPv6 address
+ * [2345::6789:ABCD] is 0x2345000000000000000000006789ABCD; the IPv4 address
+ * 192.168.1.2 is 0xC0A80102000000000000000000000000.
+ */
+#define TABLE_FIELD_ID_SRC_IP 0x1d
+/* enum: Destination IP address (see notes for SRC_IP). */
+#define TABLE_FIELD_ID_DST_IP 0x1e
+/* enum: IPv4 Type-of-Service or IPv6 Traffic Class field. */
+#define TABLE_FIELD_ID_IP_TOS 0x1f
+/* enum: IP Protocol. */
+#define TABLE_FIELD_ID_IP_PROTO 0x20
+/* enum: Layer 4 source port. */
+#define TABLE_FIELD_ID_SRC_PORT 0x21
+/* enum: Layer 4 destination port. */
+#define TABLE_FIELD_ID_DST_PORT 0x22
+/* enum: TCP flags. */
+#define TABLE_FIELD_ID_TCP_FLAGS 0x23
+/* enum: Virtual Network Identifier (VXLAN) or Virtual Session ID (NVGRE). */
+#define TABLE_FIELD_ID_VNI 0x24
+/* enum: True if packet has any tunnel encapsulation header. */
+#define TABLE_FIELD_ID_HAS_ENCAP 0x32
+/* enum: True if encap header has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_OVLAN 0x33
+/* enum: True if encap header has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_IVLAN 0x34
+/* enum: True if encap header is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_ENC_IP 0x35
+/* enum: True if encap header is specifically IPv4. */
+#define TABLE_FIELD_ID_HAS_ENC_IP4 0x36
+/* enum: True if encap header is UDP. */
+#define TABLE_FIELD_ID_HAS_ENC_UDP 0x37
+/* enum: True if only/inner frame has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_OVLAN 0x38
+/* enum: True if only/inner frame has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_IVLAN 0x39
+/* enum: True if only/inner frame is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_IP 0x3a
+/* enum: True if only/inner frame has a recognised L4 IP protocol (TCP or UDP).
+ */
+#define TABLE_FIELD_ID_HAS_L4 0x3b
+/* enum: True if only/inner frame is an IP fragment. */
+#define TABLE_FIELD_ID_IP_FRAG 0x3c
+/* enum: True if only/inner frame is the first IP fragment (fragment offset 0).
+ */
+#define TABLE_FIELD_ID_IP_FIRST_FRAG 0x3d
+/* enum: True if only/inner frame has an IP Time-To-Live of <= 1. (Note: the
+ * implementation calls this "ip_ttl_is_one" but does in fact match packets
+ * with TTL=0 - which we shouldn't be seeing! - as well.)
+ */
+#define TABLE_FIELD_ID_IP_TTL_LE_ONE 0x3e
+/* enum: True if only/inner frame has any of TCP SYN, FIN or RST flags set. */
+#define TABLE_FIELD_ID_TCP_INTERESTING_FLAGS 0x3f
+/* enum: Plugin channel selection. */
+#define TABLE_FIELD_ID_RDP_PL_CHAN 0x50
+/* enum: Enable update of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL_EN 0x51
+/* enum: New value of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL 0x52
+/* enum: Enable update of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL_EN 0x53
+/* enum: New value of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL 0x54
+/* enum: Enable update of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN_EN 0x55
+/* enum: New value of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN 0x56
+/* enum: Recirculation ID for lookup sequences with two action rule lookups. */
+#define TABLE_FIELD_ID_RECIRC_ID 0x64
+/* enum: Domain ID passed to conntrack and action rule lookups. */
+#define TABLE_FIELD_ID_DOMAIN 0x65
+/* enum: Construction mode for encap_tunnel_id - see MAE_CT_VNI_MODE enum. */
+#define TABLE_FIELD_ID_CT_VNI_MODE 0x66
+/* enum: True to inhibit conntrack lookup if TCP SYN, FIN or RST flag is set.
+ */
+#define TABLE_FIELD_ID_CT_TCP_FLAGS_INHIBIT 0x67
+/* enum: True to do conntrack lookups for IPv4 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_TCP 0x68
+/* enum: True to do conntrack lookups for IPv4 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_UDP 0x69
+/* enum: True to do conntrack lookups for IPv6 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_TCP 0x6a
+/* enum: True to do conntrack lookups for IPv6 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_UDP 0x6b
+/* enum: Outer rule identifier. */
+#define TABLE_FIELD_ID_OUTER_RULE_ID 0x6c
+/* enum: Encapsulation type - see MAE_MCDI_ENCAP_TYPE enum. */
+#define TABLE_FIELD_ID_ENCAP_TYPE 0x6d
+/* enum: Encap tunnel ID for conntrack lookups from VNI, VLAN tag(s), or 0,
+ * depending on CT_VNI_MODE.
+ */
+#define TABLE_FIELD_ID_ENCAP_TUNNEL_ID 0x78
+/* enum: A conntrack entry identifier, passed to plugins. */
+#define TABLE_FIELD_ID_CT_ENTRY_ID 0x79
+/* enum: Either source or destination NAT replacement port. */
+#define TABLE_FIELD_ID_NAT_PORT 0x7a
+/* enum: Either source or destination NAT replacement IPv4 address. Note that
+ * this is specifically an IPv4 address (IPv6 is not supported for NAT), with
+ * byte mapped to a 32-bit value in network order, i.e. the IPv4 address
+ * 192.168.1.2 is the value 0xC0A80102.
+ */
+#define TABLE_FIELD_ID_NAT_IP 0x7b
+/* enum: NAT direction: 0=>source, 1=>destination. */
+#define TABLE_FIELD_ID_NAT_DIR 0x7c
+/* enum: Conntrack mark value, passed to action rule lookup. Note that this is
+ * not related to the "user mark" in the metadata / packet prefix.
+ */
+#define TABLE_FIELD_ID_CT_MARK 0x7d
+/* enum: Private flags for conntrack, passed to action rule lookup. */
+#define TABLE_FIELD_ID_CT_PRIV_FLAGS 0x7e
+/* enum: True if the conntrack lookup resulted in a hit. */
+#define TABLE_FIELD_ID_CT_HIT 0x7f
+/* enum: True to suppress delivery when source and destination m-ports match.
+ */
+#define TABLE_FIELD_ID_SUPPRESS_SELF_DELIVERY 0x8c
+/* enum: True to perform tunnel decapsulation. */
+#define TABLE_FIELD_ID_DO_DECAP 0x8d
+/* enum: True to copy outer frame DSCP to inner on decap. */
+#define TABLE_FIELD_ID_DECAP_DSCP_COPY 0x8e
+/* enum: True to map outer frame ECN to inner on decap, by RFC 6040 rules. */
+#define TABLE_FIELD_ID_DECAP_ECN_RFC6040 0x8f
+/* enum: True to replace DSCP field. */
+#define TABLE_FIELD_ID_DO_REPLACE_DSCP 0x90
+/* enum: True to replace ECN field. */
+#define TABLE_FIELD_ID_DO_REPLACE_ECN 0x91
+/* enum: True to decrement IP Time-To-Live. */
+#define TABLE_FIELD_ID_DO_DECR_IP_TTL 0x92
+/* enum: True to replace source MAC address. */
+#define TABLE_FIELD_ID_DO_SRC_MAC 0x93
+/* enum: True to replace destination MAC address. */
+#define TABLE_FIELD_ID_DO_DST_MAC 0x94
+/* enum: Number of VLAN tags to pop. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_POP 0x95
+/* enum: Number of VLANs tags to push. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_PUSH 0x96
+/* enum: True to count this packet. */
+#define TABLE_FIELD_ID_DO_COUNT 0x97
+/* enum: True to perform tunnel encapsulation. */
+#define TABLE_FIELD_ID_DO_ENCAP 0x98
+/* enum: True to copy inner frame DSCP to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_DSCP_COPY 0x99
+/* enum: True to copy inner frame ECN to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_ECN_COPY 0x9a
+/* enum: True to deliver the packet (otherwise it is dropped). */
+#define TABLE_FIELD_ID_DO_DELIVER 0x9b
+/* enum: True to set the user flag in the metadata. */
+#define TABLE_FIELD_ID_DO_FLAG 0x9c
+/* enum: True to update the user mark in the metadata. */
+#define TABLE_FIELD_ID_DO_MARK 0x9d
+/* enum: True to override the capsule virtual channel for network deliveries.
+ */
+#define TABLE_FIELD_ID_DO_SET_NET_CHAN 0x9e
+/* enum: True to override the reported source m-port for host deliveries. */
+#define TABLE_FIELD_ID_DO_SET_SRC_MPORT 0x9f
+/* enum: Encap header ID for DO_ENCAP, indexing Encap_Hdr_Part1/2_Table. */
+#define TABLE_FIELD_ID_ENCAP_HDR_ID 0xaa
+/* enum: New DSCP value for DO_REPLACE_DSCP. */
+#define TABLE_FIELD_ID_DSCP_VALUE 0xab
+/* enum: If DO_REPLACE_ECN is set, the new value for the ECN field. If
+ * DO_REPLACE_ECN is not set, ECN_CONTROL[0] and ECN_CONTROL[1] are set to
+ * request remapping of ECT0 and ECT1 ECN codepoints respectively to CE.
+ */
+#define TABLE_FIELD_ID_ECN_CONTROL 0xac
+/* enum: Source MAC ID for DO_SRC_MAC, indexing Replace_Src_MAC_Table. */
+#define TABLE_FIELD_ID_SRC_MAC_ID 0xad
+/* enum: Destination MAC ID for DO_DST_MAC, indexing Replace_Dst_MAC_Table. */
+#define TABLE_FIELD_ID_DST_MAC_ID 0xae
+/* enum: Parameter for either DO_SET_NET_CHAN (only bottom 6 bits used in this
+ * case) or DO_SET_SRC_MPORT.
+ */
+#define TABLE_FIELD_ID_REPORTED_SRC_MPORT_OR_NET_CHAN 0xaf
+/* enum: 64-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK64 0xb4
+/* enum: 32-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK32 0xb5
+/* enum: 16-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK16 0xb6
+/* enum: 8-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK8 0xb7
+/* enum: 4-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK4 0xb8
+/* enum: 2-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK2 0xb9
+/* enum: Added encapsulation header length in words. */
+#define TABLE_FIELD_ID_HDR_LEN_W 0xba
+/* enum: Static value for layer 2/3 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L23 0xbb
+/* enum: Static value for layer 4 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L4 0xbc
+/* enum: True to use the static ENC_LACP_HASH values for the encap header
+ * instead of the calculated values for the inner frame when delivering a newly
+ * encapsulated packet to a LAG m-port.
+ */
+#define TABLE_FIELD_ID_USE_ENC_LACP_HASHES 0xbd
+/* enum: True to trigger conntrack from first action rule lookup (AR=>CT=>AR
+ * sequence).
+ */
+#define TABLE_FIELD_ID_DO_CT 0xc8
+/* enum: True to perform NAT using parameters from conntrack lookup response.
+ */
+#define TABLE_FIELD_ID_DO_NAT 0xc9
+/* enum: True to trigger recirculated action rule lookup (AR=>AR sequence). */
+#define TABLE_FIELD_ID_DO_RECIRC 0xca
+/* enum: Next action set payload ID for replay. The null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_PAYLOAD 0xcb
+/* enum: Next action set row ID for replay. The null value is all-1-bits. */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_ROW 0xcc
+/* enum: Action set payload ID for additional delivery to management CPU. The
+ * null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_PAYLOAD 0xcd
+/* enum: Action set row ID for additional delivery to management CPU. The null
+ * value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_ROW 0xce
+/* enum: True to include layer 4 in LACP hash on delivery to a LAG m-port. */
+#define TABLE_FIELD_ID_LACP_INC_L4 0xdc
+/* enum: True to request that LACP is performed by a plugin. */
+#define TABLE_FIELD_ID_LACP_PLUGIN 0xdd
+/* enum: LACP_Balance_Table base address divided by 64. */
+#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
+/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
+#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
+ * other encapsulation types.
+ */
+#define TABLE_FIELD_ID_UDP_PORT 0xe6
+/* enum: True to perform RSS based on outer fields rather than inner fields. */
+#define TABLE_FIELD_ID_RSS_ON_OUTER 0xe7
+/* enum: True to perform steering table lookup on outer fields rather than
+ * inner fields.
+ */
+#define TABLE_FIELD_ID_STEER_ON_OUTER 0xe8
+/* enum: Destination queue ID for host delivery. */
+#define TABLE_FIELD_ID_DST_QID 0xf0
+/* enum: True to drop this packet. */
+#define TABLE_FIELD_ID_DROP 0xf1
+/* enum: True to strip outer VLAN tag from this packet. */
+#define TABLE_FIELD_ID_VLAN_STRIP 0xf2
+/* enum: True to override the user mark field with the supplied USER_MARK, or
+ * false to bitwise-OR the USER_MARK into it.
+ */
+#define TABLE_FIELD_ID_MARK_OVERRIDE 0xf3
+/* enum: True to override the user flag field with the supplied USER_FLAG, or
+ * false to bitwise-OR the USER_FLAG into it.
+ */
+#define TABLE_FIELD_ID_FLAG_OVERRIDE 0xf4
+/* enum: RSS context ID, indexing the RSS_Context_Table. */
+#define TABLE_FIELD_ID_RSS_CTX_ID 0xfa
+/* enum: True to enable RSS. */
+#define TABLE_FIELD_ID_RSS_EN 0xfb
+/* enum: Toeplitz hash key. */
+#define TABLE_FIELD_ID_KEY 0xfc
+/* enum: Key mode for IPv4 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V4_KEY_MODE 0xfd
+/* enum: Key mode for IPv6 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V6_KEY_MODE 0xfe
+/* enum: Key mode for IPv4 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V4_KEY_MODE 0xff
+/* enum: Key mode for IPv6 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V6_KEY_MODE 0x100
+/* enum: Key mode for other IPv4 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V4_KEY_MODE 0x101
+/* enum: Key mode for other IPv6 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V6_KEY_MODE 0x102
+/* enum: Spreading mode - 0=>indirection; 1=>even. */
+#define TABLE_FIELD_ID_SPREAD_MODE 0x103
+/* enum: For indirection spreading mode, the base address of a region within
+ * the Indirection_Table. For even spreading mode, the number of queues to
+ * spread across (only values 1-255 are valid for this mode).
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_BASE 0x104
+/* enum: For indirection spreading mode, identifies the length of a region
+ * within the Indirection_Table, where length = 32 << len_id. Must be set to 0
+ * for even spreading mode.
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
+/* enum: An offset to be applied to the base destination queue ID. */
+#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+
+/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
+ * platforms
+ */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
@@ -447,17 +1164,21 @@
#define MCDI_EVENT_TX_ERR_TYPE_OFST 0
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-/* enum: Descriptor loader reported failure */
+/* enum: Descriptor loader reported failure. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
-/* enum: Descriptor ring empty and no EOP seen for packet */
+/* enum: Descriptor ring empty and no EOP seen for packet. Specific to
+ * EF10-family NICs
+ */
#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
-/* enum: Overlength packet */
+/* enum: Overlength packet. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_2BIG 0x3
-/* enum: Malformed option descriptor */
+/* enum: Malformed option descriptor. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
-/* enum: Option descriptor part way through a packet */
+/* enum: Option descriptor part way through a packet. Specific to EF10-family
+ * NICs.
+ */
#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
-/* enum: DMA or PIO data access error */
+/* enum: DMA or PIO data access error. Specific to EF10-family NICs */
#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_OFST 0
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
@@ -773,6 +1494,12 @@
* SF-122927-TC for details.
*/
#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Notification that the mport journal has changed since it was last read
+ * and updates can be read using the MC_CMD_MAE_MPORT_READ_JOURNAL command. The
+ * firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1070,7 +1797,13 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
@@ -1482,12 +2215,24 @@
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LBN 2080
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LBN 2112
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_WIDTH 32
/* MC firmware version number */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LBN 2144
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LBN 2176
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_WIDTH 32
/* MC firmware security level */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
@@ -1571,7 +2316,13 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_WIDTH 32
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
@@ -1587,7 +2338,13 @@
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
@@ -1611,7 +2368,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
@@ -1633,6 +2396,33 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
/* MC firmware unique build ID (as binary SHA-1 value) */
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
@@ -1650,7 +2440,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
/* The ID of the SUC chip. This is specific to the platform but typically
* indicates family, memory sizes etc. See SF-116728-SW for further details.
*/
@@ -1664,7 +2460,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
/* FPGA version as three numbers. On Riverhead based systems this field uses
* the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
* FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
@@ -1686,6 +2488,489 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+/* MC_CMD_GET_VERSION_V3_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_NUM 3
+
+/* MC_CMD_GET_VERSION_V4_OUT msgresponse: Extended response providing SoC
+ * version information
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+
+/* MC_CMD_GET_VERSION_V5_OUT msgresponse: Extended response providing bundle
+ * and board version information
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+/* Board version as four numbers - a.b.c.d. BOARD_VERSION[0] duplicates the
+ * BOARD_REVISION field
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_OFST 392
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_NUM 4
+/* Bundle version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_OFST 408
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_NUM 4
+
/***********************************/
/* MC_CMD_PTP
@@ -1789,7 +3074,9 @@
#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
/* PTP timestamping mode. Not used from Huntington onwards. */
@@ -1866,7 +3153,13 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1897,7 +3190,13 @@
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1936,7 +3235,13 @@
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LBN 96
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_WIDTH 32
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LBN 128
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_WIDTH 32
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
@@ -2052,7 +3357,13 @@
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
@@ -2083,7 +3394,13 @@
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LBN 96
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_WIDTH 32
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LBN 128
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_WIDTH 32
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
@@ -2130,7 +3447,9 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
@@ -2492,6 +3811,87 @@
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_LEN 40
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_LEN 4
+/* Minimum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of -0.1 ns should be assumed, which is
+ * equivalent to a -10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LBN 192
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_OFST 28
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LBN 224
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_WIDTH 32
+/* Maximum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of 0.1 ns should be assumed, which is
+ * equivalent to a +10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LBN 256
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_OFST 36
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LBN 288
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_WIDTH 32
+
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
@@ -2634,7 +4034,13 @@
#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
/* The requested update interval, in seconds. (Or the sub-command if ADDR is
* NULL.)
*/
@@ -3039,7 +4445,13 @@
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
@@ -3643,6 +5055,8 @@
#define MC_CMD_MEDIA_BASE_T 0x6
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+/* enum: DSFP. */
+#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
@@ -3912,7 +5326,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -3995,28 +5415,52 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4028,7 +5472,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -4111,49 +5561,91 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4524,7 +6016,13 @@
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
@@ -4565,7 +6063,13 @@
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
@@ -4616,6 +6120,129 @@
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_V3_IN msgrequest */
+#define MC_CMD_SET_MAC_V3_IN_LEN 40
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_V3_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_V3_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
+/* Identifies the MAC to update by the specifying the end of a logical MAE
+ * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
+ * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
+ * circumstances. 1. Some will always work (e.g. a VF can always address its
+ * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
+ * meaningful and will always fail with EINVAL (e.g. attempting to address the
+ * VNIC end of a link to a physical port), 3. Some are meaningful but require
+ * the MCDI client to have the required permission and fail with EPERM
+ * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
+ * and 4. Some could be implementation-specific and fail with ENOTSUP if not
+ * available (no examples exist right now). See SF-123581-TC section 4.3 for
+ * more details.
+ */
+#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 35
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 276
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 272
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 34
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -4649,7 +6276,13 @@
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
@@ -4731,7 +6364,13 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_OFST 8
@@ -4774,7 +6413,13 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
@@ -4930,7 +6575,13 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
@@ -4963,7 +6614,13 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
@@ -5037,7 +6694,13 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
@@ -5097,7 +6760,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
@@ -5108,7 +6777,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
@@ -5201,7 +6876,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LBN 64
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_WIDTH 32
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LBN 96
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_WIDTH 32
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
@@ -5706,6 +7387,9 @@
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_LBN 3
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_WIDTH 1
/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
* response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
@@ -6180,7 +7864,13 @@
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LO_LEN 4
+#define MC_CMD_SENSOR_ENTRY_LO_LBN 32
+#define MC_CMD_SENSOR_ENTRY_LO_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_HI_LEN 4
+#define MC_CMD_SENSOR_ENTRY_HI_LBN 64
+#define MC_CMD_SENSOR_ENTRY_HI_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
@@ -6202,7 +7892,13 @@
/* MC_CMD_SENSOR_ENTRY_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LBN 32 */
+/* MC_CMD_SENSOR_ENTRY_LO_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_HI_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_LBN 64 */
+/* MC_CMD_SENSOR_ENTRY_HI_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
@@ -6259,7 +7955,13 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
@@ -6271,7 +7973,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
@@ -6286,7 +7994,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
@@ -6583,11 +8297,16 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
* Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
- * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
- * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
- * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * SFP+ PHYs). The "media type" can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid "page number" input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+, PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined. Locks required: None. Return code: 0.
+ * For QSFP, PAGE=-1 is the lower (unbanked) page. PAGE=2 is the EEPROM and
+ * PAGE=3 is the module limits. For DSFP, module addressing requires a
+ * "BANK:PAGE". Not every bank has the same number of pages. See the Common
+ * Management Interface Specification (CMIS) for further details. A BANK:PAGE
+ * of "0xffff:0xffff" retrieves the lower (unbanked) page. Locks required -
+ * None. Return code - 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
#undef MC_CMD_0x4b_PRIVILEGE_CTG
@@ -6598,6 +8317,12 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -7404,7 +9129,13 @@
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
@@ -7589,7 +9320,13 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
@@ -7782,7 +9519,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +9564,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +9613,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -7997,7 +9734,13 @@
#define BUFTBL_ENTRY_RAWADDR_OFST 4
#define BUFTBL_ENTRY_RAWADDR_LEN 8
#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
+#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_LBN 32
#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
@@ -8007,14 +9750,25 @@
#define NVRAM_PARTITION_TYPE_ID_LEN 2
/* enum: Primary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: NMC firmware partition (this is intentionally an alias of MC_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_FIRMWARE 0x100
/* enum: Secondary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
/* enum: Expansion ROM partition */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
/* enum: Static configuration TLV partition */
#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Factory configuration TLV partition (this is intentionally an alias of
+ * STATIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_FACTORY_CONFIG 0x400
/* enum: Dynamic configuration TLV partition */
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: User configuration TLV partition (this is intentionally an alias of
+ * DYNAMIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_USER_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
@@ -8027,10 +9781,16 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output partition for NMC firmware (this is
+ * intentionally an alias of LOG)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Crash log partition for NMC firmware */
+#define NVRAM_PARTITION_TYPE_NMC_CRASH_LOG 0x801
/* enum: Application license key storage partition */
#define NVRAM_PARTITION_TYPE_LICENSE 0x900
/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
@@ -8047,6 +9807,22 @@
#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
/* enum: Non-volatile log output partition for FC */
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: FPGA Stage 1 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE1 0xb05
+/* enum: FPGA Stage 2 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE2 0xb06
+/* enum: FPGA User XCLBIN / Programmable Region 0 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_REGION0 0xb07
+/* enum: FPGA User XCLBIN (this is intentionally an alias of FPGA_REGION0) */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_USER 0xb07
+/* enum: FPGA jump instruction (a.k.a. boot) partition to select Stage1
+ * bitstream
+ */
+#define NVRAM_PARTITION_TYPE_FPGA_JUMP 0xb08
+/* enum: FPGA Validate XCLBIN */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_VALIDATE 0xb09
+/* enum: FPGA XOCL Configuration information */
+#define NVRAM_PARTITION_TYPE_FPGA_XOCL_CONFIG 0xb0a
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
/* enum: SUC firmware partition (this is intentionally an alias of
@@ -8055,6 +9831,10 @@
#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: SUC Non-volatile log output partition (this is intentionally an alias
+ * of MUM_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_LOG 0xc01
/* enum: MUM Application table partition. */
#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
/* enum: MUM boot rom partition. */
@@ -8069,6 +9849,10 @@
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
/* enum: Used by the expansion ROM for logging */
#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Non-volatile log output partition for Expansion ROM (this is
+ * intentionally an alias of PXE_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_EXPROM_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -8077,6 +9861,10 @@
* between XJTAG and Manftest.
*/
#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Deployment configuration TLV partition (this is intentionally an alias
+ * of MANUFACTURING)
+ */
+#define NVRAM_PARTITION_TYPE_DEPLOYMENT_CONFIG 0x1300
/* enum: Spare partition 4 */
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
@@ -8112,14 +9900,45 @@
#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Partition to store ASN.1 format Bundle Signature for checking. */
+#define NVRAM_PARTITION_TYPE_BUNDLE_SIGNATURE 0x1e04
+/* enum: Test partition on SmartNIC system microcontroller (SUC) */
+#define NVRAM_PARTITION_TYPE_SUC_TEST 0x1f00
+/* enum: System microcontroller access to primary FPGA flash. */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_PRIMARY 0x1f01
+/* enum: System microcontroller access to secondary FPGA flash (if present) */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_SECONDARY 0x1f02
+/* enum: System microcontroller access to primary System-on-Chip flash */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_PRIMARY 0x1f03
+/* enum: System microcontroller access to secondary System-on-Chip flash (if
+ * present)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_SECONDARY 0x1f04
+/* enum: System microcontroller critical failure logs. Contains structured
+ * details of sensors leading up to a critical failure (where the board is shut
+ * down).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FAILURE_LOG 0x1f05
+/* enum: System-on-Chip configuration information (see XN-200467-PS). */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
+/* enum: System-on-Chip update information. */
+#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
/* enum: Recovery partition map (provided if real map is missing or corrupt) */
#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Recovery Flash Partition Table, see SF-122606-TC. (this is
+ * intentionally an alias of RECOVERY_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_RECOVERY_FPT 0xfffe
/* enum: Partition map (real map as stored in flash) */
#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+/* enum: Flash Partition Table, see SF-122606-TC. (this is intentionally an
+ * alias of PARTITION_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_FPT 0xffff
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
@@ -8168,7 +9987,13 @@
#define LICENSED_FEATURES_MASK_OFST 0
#define LICENSED_FEATURES_MASK_LEN 8
#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_LO_LEN 4
+#define LICENSED_FEATURES_MASK_LO_LBN 0
+#define LICENSED_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_MASK_HI_LEN 4
+#define LICENSED_FEATURES_MASK_HI_LBN 32
+#define LICENSED_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8208,7 +10033,13 @@
#define LICENSED_V3_APPS_MASK_OFST 0
#define LICENSED_V3_APPS_MASK_LEN 8
#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_LO_LEN 4
+#define LICENSED_V3_APPS_MASK_LO_LBN 0
+#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_MASK_HI_LEN 4
+#define LICENSED_V3_APPS_MASK_HI_LBN 32
+#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
#define LICENSED_V3_APPS_ONLOAD_OFST 0
#define LICENSED_V3_APPS_ONLOAD_LBN 0
#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
@@ -8266,7 +10097,13 @@
#define LICENSED_V3_FEATURES_MASK_OFST 0
#define LICENSED_V3_FEATURES_MASK_LEN 8
#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LO_LEN 4
+#define LICENSED_V3_FEATURES_MASK_LO_LBN 0
+#define LICENSED_V3_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_MASK_HI_LEN 4
+#define LICENSED_V3_FEATURES_MASK_HI_LBN 32
+#define LICENSED_V3_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8421,7 +10258,8 @@
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
@@ -8493,7 +10331,13 @@
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8514,7 +10358,8 @@
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
@@ -8611,7 +10456,13 @@
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8637,6 +10488,158 @@
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+/* MC_CMD_INIT_EVQ_V3_IN msgrequest: Extended request to specify per-queue
+ * event merge timeouts.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_LEN 556
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_LBN 11
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Receive event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_RX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_OFST 548
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_LEN 4
+/* Transmit event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_TX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_OFST 552
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_LEN 4
+
+/* MC_CMD_INIT_EVQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V3_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
/* QUEUE_CRC_MODE structuredef */
#define QUEUE_CRC_MODE_LEN 1
#define QUEUE_CRC_MODE_MODE_LBN 0
@@ -8687,7 +10690,8 @@
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
@@ -8728,7 +10732,13 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -8752,7 +10762,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
@@ -8826,8 +10837,16 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8849,7 +10868,8 @@
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
@@ -8923,8 +10943,16 @@
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8975,7 +11003,8 @@
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
@@ -9049,8 +11078,16 @@
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9114,7 +11151,8 @@
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
@@ -9188,8 +11226,16 @@
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9285,7 +11331,8 @@
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
@@ -9329,7 +11376,13 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -9350,7 +11403,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
@@ -9399,6 +11453,9 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_LBN 17
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
@@ -9409,8 +11466,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 0
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Flags related to Qbb flow control mode. */
@@ -9507,7 +11570,13 @@
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LBN 32
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_WIDTH 32
#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LBN 64
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_WIDTH 32
/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
@@ -9606,7 +11675,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9616,7 +11691,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9627,7 +11708,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9651,7 +11738,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9661,7 +11754,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9672,7 +11771,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9788,7 +11893,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
@@ -9844,7 +11955,13 @@
#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
@@ -9888,6 +12005,9 @@
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10000,7 +12120,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
@@ -10086,6 +12212,9 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10263,9 +12392,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
- * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
- * its rte_flow API. This extension is only useful with the sfc_efx driver
- * included as part of DPDK, used in conjunction with the dpdk datapath
+ * filter actions for EF100. Some of these actions are also supported on EF10,
+ * for Intel's DPDK (Data Plane Development Kit, dpdk.org) via its rte_flow
+ * API. In the latter case, this extension is only useful with the sfc_efx
+ * driver included as part of DPDK, used in conjunction with the dpdk datapath
* firmware variant.
*/
#define MC_CMD_FILTER_OP_V3_IN_LEN 180
@@ -10278,7 +12408,13 @@
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
@@ -10364,6 +12500,9 @@
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10539,11 +12678,42 @@
*/
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
-/* Set an action for all packets matching this filter. The DPDK driver and dpdk
- * f/w variant use their own specific delivery structures, which are documented
- * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
- * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
- * will cause the filter insertion to fail with ENOTSUP.
+/* Flags controlling mutations of the packet and/or metadata when the filter is
+ * matched. The user_mark and user_flag fields' logic is as follows: if
+ * (req.MATCH_BITOR_FLAG == 1) user_flag = req.MATCH_SET_FLAG bit_or user_flag;
+ * else user_flag = req.MATCH_SET_FLAG; if (req.MATCH_SET_MARK == 0) user_mark
+ * = 0; else if (req.MATCH_BITOR_MARK == 1) user_mark = req.MATCH_SET_MARK
+ * bit_or user_mark; else user_mark = req.MATCH_SET_MARK; N.B. These flags
+ * overlap with the MATCH_ACTION field, which is deprecated in favour of this
+ * field. For the cases where these flags induce a valid encoding of the
+ * MATCH_ACTION field, the semantics agree.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_WIDTH 1
+/* Deprecated: the overlapping MATCH_ACTION_FLAGS field exposes all of the
+ * functionality of this field in an ABI-backwards-compatible manner, and
+ * should be used instead. Any future extensions should be made to the
+ * MATCH_ACTION_FLAGS field, and not to this field. Set an action for all
+ * packets matching this filter. The DPDK driver and (on EF10) dpdk f/w variant
+ * use their own specific delivery structures, which are documented in the DPDK
+ * Firmware Driver Interface (SF-119419-TC). Requesting anything other than
+ * MATCH_ACTION_NONE on an EF10 NIC running another f/w variant will cause the
+ * filter insertion to fail with ENOTSUP.
*/
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
@@ -10580,7 +12750,13 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_WIDTH 32
/* enum: guaranteed invalid filter handle (low 32 bits) */
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
/* enum: guaranteed invalid filter handle (high 32 bits) */
@@ -10600,7 +12776,13 @@
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_OUT/HANDLE */
@@ -10638,6 +12820,8 @@
* rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+/* enum: read the supported encapsulation types for the VNIC */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -10704,6 +12888,30 @@
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+/* MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT msgresponse: Returns
+ * the supported encapsulation types for the VNIC
+ */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_LEN 8
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_TYPES is returned */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -10849,9 +13057,15 @@
/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
-/* Identifies the port assignment for this function. */
+/* Identifies the port assignment for this function. On EF100, it is possible
+ * for the function to have no network port assigned (either because it is not
+ * yet configured, or assigning a port to a given function personality makes no
+ * sense - e.g. virtio-blk), in which case the return value is NULL_PORT.
+ */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+/* enum: Special value to indicate no port is assigned to a function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_NULL_PORT 0xffffffff
/***********************************/
@@ -11009,7 +13223,8 @@
/***********************************/
/* MC_CMD_GET_VI_ALLOC_INFO
* Get information about number of VI's and base VI number allocated to this
- * function.
+ * function. This message is not available to dynamic clients created by
+ * MC_CMD_CLIENT_ALLOC.
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
#undef MC_CMD_0x8d_PRIVILEGE_CTG
@@ -11036,7 +13251,9 @@
/***********************************/
/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ * For CmdClient use. Dump pertinent information on a specific absolute VI. The
+ * VI must be owned by the calling client or one of its ancestors; usership of
+ * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
#undef MC_CMD_0x8e_PRIVILEGE_CTG
@@ -11050,7 +13267,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
/* The PF part of the function owning this VI. */
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
@@ -11073,12 +13290,24 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
/* Raw evq timer table data. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
@@ -11095,22 +13324,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
@@ -11130,22 +13383,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
/* RXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
/* Reserved, currently 0. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
@@ -11158,6 +13435,9 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+/* Current user, as assigned by MC_CMD_SET_VI_USER. */
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
/***********************************/
@@ -11200,7 +13480,9 @@
/***********************************/
/* MC_CMD_GET_VI_TLP_PROCESSING
- * Get TLP steering and ordering information for a VI.
+ * Get TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
#undef MC_CMD_0xb0_PRIVILEGE_CTG
@@ -11239,7 +13521,9 @@
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
- * Set TLP steering and ordering information for a VI.
+ * Set TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
@@ -14497,6 +16781,24 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -14983,6 +17285,24 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -14990,7 +17310,13 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32
/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
@@ -15477,6 +17803,24 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -15484,7 +17828,13 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32
/* The minimum size (in table entries) of indirection table to be allocated
* from the pool for an RSS context. Note that the table size used must be a
* power of 2.
@@ -15521,6 +17871,573 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -15729,7 +18646,7 @@
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
-/* Function Local Instance (VI) number. */
+/* Function Local Instance (VI) number which has a TxQ allocated to it. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
@@ -17303,7 +20220,13 @@
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
@@ -17503,6 +20426,18 @@
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+/* MC_CMD_GET_FUNCTION_INFO_OUT_V2 msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN 12
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_LEN 4
+/* Values from PCIE_INTERFACE enumeration. For NICs with a single interface, or
+ * in the case of a V1 response, this should be HOST_PRIMARY.
+ */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_OFST 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_LEN 4
+
/***********************************/
/* MC_CMD_ENABLE_OFFLINE_BIST
@@ -18570,7 +21505,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LBN 192
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LBN 224
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
@@ -18578,7 +21519,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LBN 448
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LBN 480
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
@@ -18681,7 +21628,13 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
@@ -18713,7 +21666,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
@@ -18721,7 +21680,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
/***********************************/
@@ -18826,7 +21791,13 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
@@ -18876,7 +21847,13 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
@@ -18956,7 +21933,13 @@
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
/***********************************/
@@ -19322,7 +22305,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
@@ -19490,6 +22473,22 @@
* SF-117064-DG for background).
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Control the Match-Action Engine if present. See mcdi_mae.yml. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE 0x10000
+/* enum: This Function/client may call MC_CMD_CLIENT_ALLOC to create new
+ * dynamic client children of itself.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALLOC_CLIENT 0x20000
+/* enum: A dynamic client with this privilege may perform all the same DMA
+ * operations as the function client from which it is descended.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_FUNC_DMA 0x40000
+/* enum: A client with this privilege may perform DMA as any PCIe function on
+ * the device and to on-device DDR. It allows clients to use TX-DESC2CMPT-DESC
+ * descriptors, and to use TX-SEG-DESC and TX-MEM2MEM-DESC with an address
+ * space override (i.e. with the ADDR_SPC_EN bit set).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ARBITRARY_DMA 0x80000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -20277,7 +23276,8 @@
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
@@ -20499,7 +23499,13 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
@@ -20521,6 +23527,9 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
@@ -20530,6 +23539,12 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
@@ -20575,9 +23590,12 @@
#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
+#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
+#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
+#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
@@ -20814,6 +23832,21 @@
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
+/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
+ * requests of the device and that can own resources managed by the device.
+ * Examples of clients include PCIe functions and dynamic clients. A client
+ * handle is a 32b opaque value used to refer to a client. Further details can
+ * be found within XN-200418-TC.
+ */
+#define CLIENT_HANDLE_LEN 4
+#define CLIENT_HANDLE_OPAQUE_OFST 0
+#define CLIENT_HANDLE_OPAQUE_LEN 4
+/* enum: A client handle guaranteed never to refer to a real client. */
+#define CLIENT_HANDLE_NULL 0xffffffff
+/* enum: Used to refer to the calling client. */
+#define CLIENT_HANDLE_SELF 0xfffffffe
+#define CLIENT_HANDLE_OPAQUE_LBN 0
+#define CLIENT_HANDLE_OPAQUE_WIDTH 32
/* CLOCK_INFO structuredef: Information about a single hardware clock */
#define CLOCK_INFO_LEN 28
@@ -20848,7 +23881,13 @@
#define CLOCK_INFO_FREQUENCY_OFST 4
#define CLOCK_INFO_FREQUENCY_LEN 8
#define CLOCK_INFO_FREQUENCY_LO_OFST 4
+#define CLOCK_INFO_FREQUENCY_LO_LEN 4
+#define CLOCK_INFO_FREQUENCY_LO_LBN 32
+#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
#define CLOCK_INFO_FREQUENCY_HI_OFST 8
+#define CLOCK_INFO_FREQUENCY_HI_LEN 4
+#define CLOCK_INFO_FREQUENCY_HI_LBN 64
+#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
#define CLOCK_INFO_FREQUENCY_LBN 32
#define CLOCK_INFO_FREQUENCY_WIDTH 64
/* Human-readable ASCII name for clock, with NUL termination */
@@ -20858,6 +23897,57 @@
#define CLOCK_INFO_NAME_LBN 96
#define CLOCK_INFO_NAME_WIDTH 8
+/* SCHED_CREDIT_CHECK_RESULT structuredef */
+#define SCHED_CREDIT_CHECK_RESULT_LEN 16
+/* The instance of the scheduler. Refer to XN-200389-AW for the location of
+ * these schedulers in the hardware.
+ */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_A 0x0 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_A 0x1 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_B 0x2 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_C 0x3 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_TX 0x4 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_D 0x5 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_REPLAY 0x6 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
+/* The type of node that this result refers to. */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_OFST 1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LEN 1
+/* enum: Destination node */
+#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
+/* enum: Source node */
+#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
+/* Level of node in scheduler hierarchy (level 0 is the bottom of the
+ * hierarchy, increasing towards the root node).
+ */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_OFST 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LEN 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LBN 16
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_WIDTH 16
+/* Node index */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_OFST 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LBN 32
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_WIDTH 32
+/* The number of credits the node is expected to have. */
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_OFST 8
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LBN 64
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_WIDTH 32
+/* The number of credits the node actually had. */
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_OFST 12
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LBN 96
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_WIDTH 32
+
/***********************************/
/* MC_CMD_GET_CLOCKS_INFO
@@ -20887,7 +23977,19 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this
+ * only affects checksum validation in VNIC RX - on TX the send descriptor
+ * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
+ * apply to the current driver. If a rule matches, then the packet is
+ * considered to have the corresponding encapsulation type, and the inner
+ * packet is parsed. It is up to the driver to ensure that overlapping rules
+ * are not inserted. (If a packet would match multiple rules, a random one of
+ * them will be used.) A rule with the exact same match criteria may not be
+ * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
+ * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
+ * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
+ * combinations. Each driver may only have a limited set of active rules -
+ * returns ENOSPC if the caller's table is full.
*/
#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
#undef MC_CMD_0x16d_PRIVILEGE_CTG
@@ -20951,6 +24053,12 @@
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
@@ -20967,7 +24075,9 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ * Remove a VNIC encapsulation rule. Packets which would have previously
+ * matched the rule will then be considered as unencapsulated. Returns EALREADY
+ * if the input HANDLE doesn't correspond to an existing rule.
*/
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
#undef MC_CMD_0x16e_PRIVILEGE_CTG
@@ -20983,6 +24093,964 @@
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
+ * the endianness specified by the RFC; users should ignore the broken-out
+ * fields and instead do straight memory copies to ensure correct ordering.
+ */
+#define UUID_LEN 16
+#define UUID_TIME_LOW_OFST 0
+#define UUID_TIME_LOW_LEN 4
+#define UUID_TIME_LOW_LBN 0
+#define UUID_TIME_LOW_WIDTH 32
+#define UUID_TIME_MID_OFST 4
+#define UUID_TIME_MID_LEN 2
+#define UUID_TIME_MID_LBN 32
+#define UUID_TIME_MID_WIDTH 16
+#define UUID_TIME_HI_LBN 52
+#define UUID_TIME_HI_WIDTH 12
+#define UUID_VERSION_LBN 48
+#define UUID_VERSION_WIDTH 4
+#define UUID_RESERVED_LBN 64
+#define UUID_RESERVED_WIDTH 2
+#define UUID_CLK_SEQ_LBN 66
+#define UUID_CLK_SEQ_WIDTH 14
+#define UUID_NODE_OFST 10
+#define UUID_NODE_LEN 6
+#define UUID_NODE_LBN 80
+#define UUID_NODE_WIDTH 48
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_ALLOC
+ * Create a handle to a datapath plugin's extension. This involves finding a
+ * currently-loaded plugin offering the given functionality (as identified by
+ * the UUID) and allocating a handle to track the usage of it. Plugin
+ * functionality is identified by 'extension' rather than any other identifier
+ * so that a single plugin bitfile may offer more than one piece of independent
+ * functionality. If two bitfiles are loaded which both offer the same
+ * extension, then the metadata is interrogated further to determine which is
+ * the newest and that is the one opened. See SF-123625-SW for architectural
+ * detail on datapath plugins.
+ */
+#define MC_CMD_PLUGIN_ALLOC 0x1ad
+#undef MC_CMD_0x1ad_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
+#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
+/* The functionality requested of the plugin, as a UUID structure */
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
+/* Additional options for opening the handle */
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
+/* Load the extension only if it is in the specified administrative group.
+ * Specify ANY to load the extension wherever it is found (if there are
+ * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
+ * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
+ * administrative groups.
+ */
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
+/* enum: Load the extension from any ADMIN_GROUP. */
+#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
+/* Reserved */
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
+
+/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
+#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
+/* Unique identifier of this usage */
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_FREE
+ * Delete a handle to a plugin's extension.
+ */
+#define MC_CMD_PLUGIN_FREE 0x1ae
+#undef MC_CMD_0x1ae_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_FREE_IN msgrequest */
+#define MC_CMD_PLUGIN_FREE_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
+#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_GLOBAL
+ * Returns the global metadata applying to the whole plugin extension. See the
+ * other metadata calls for subtypes of data.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
+#undef MC_CMD_0x1af_PRIVILEGE_CTG
+
+#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
+/* Unique identifier of this plugin extension. This is identical to the value
+ * which was requested when the handle was allocated.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
+/* semver sub-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
+/* semver micro-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
+/* Number of different messages which can be sent to this extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
+/* Byte offset within the VI window of the plugin's mapped CSR window. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
+/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
+ * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
+ * MAPPED_CSR_FLAGS are ignored).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
+/* Flags indicating how to perform the CSR window mapping. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
+/* Identifier of the set of extensions which all change state together.
+ * Extensions having the same ADMIN_GROUP will always load and unload at the
+ * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
+ * generation number as an implementation detail to ensure that they're not
+ * reused rapidly).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
+/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
+ * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
+ * access to this extension.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
+/* Reserved */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER
+ * Returns metadata supplied by the plugin author which describes this
+ * extension in a human-readable way. Contrast with
+ * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
+ * to operate.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
+#undef MC_CMD_0x1b0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
+/* Category of data to return */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
+/* enum: Top-level information about the extension. The returned data is an
+ * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
+ * the extension. The data is a back-to-back list of zero-terminated strings;
+ * the even-numbered fields (0,2,4,...) are keys and their following odd-
+ * numbered fields are the corresponding values. Both keys and values are
+ * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
+ * times. Note that all information (including the key/value structure itself
+ * and the UTF-8 encoding) may have been provided by the plugin author, so
+ * callers must be cautious about parsing it. Callers should parse only the
+ * top-level structure to separate out the keys and values; the contents of the
+ * values is not expected to be machine-readable.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
+/* Byte position of the data to be returned within the full data block of the
+ * given SUBTYPE.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Full length of the data block of the requested SUBTYPE, in bytes. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
+/* The information requested by SUBTYPE. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_MSG
+ * Returns the simple metadata for a specific plugin request message. This
+ * supplies information necessary for the host to know how to build an
+ * MC_CMD_PLUGIN_REQ request.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
+#undef MC_CMD_0x1b1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
+/* Unique message ID to obtain */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
+/* Unique message ID. This is the same value as the input parameter; it exists
+ * to allow future MCDI extensions which enumerate all messages.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
+/* Packed index number of this message, assigned by the MC to give each message
+ * a unique ID in an array to allow for more efficient storage/management.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
+/* Short human-readable codename for this message. This is conventionally
+ * formatted as a C identifier in the basic ASCII character set with any spare
+ * bytes at the end set to 0, however this convention is not enforced by the MC
+ * so consumers must check for all potential malformations before using it for
+ * a trusted purpose.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
+/* Number of bytes of data which must be passed from the host kernel to the MC
+ * for this message's payload, and which are passed back again in the response.
+ * The MC's plugin metadata loader will have validated that the number of bytes
+ * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
+ * message.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
+
+/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
+ * an individual extension.
+ */
+#define PLUGIN_EXTENSION_LEN 20
+#define PLUGIN_EXTENSION_UUID_OFST 0
+#define PLUGIN_EXTENSION_UUID_LEN 16
+#define PLUGIN_EXTENSION_UUID_LBN 0
+#define PLUGIN_EXTENSION_UUID_WIDTH 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
+#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
+#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
+#define PLUGIN_EXTENSION_RESERVED_LBN 137
+#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_ALL
+ * Returns a list of all plugin extensions currently loaded and available. The
+ * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
+ * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
+ * ADMIN_GROUP field collects how extensions are grouped in to units which are
+ * loaded/unloaded together; extensions with the same value are in the same
+ * group.
+ */
+#define MC_CMD_PLUGIN_GET_ALL 0x1b2
+#undef MC_CMD_0x1b2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
+/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
+ * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
+
+/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
+/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
+ * structs.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_REQ
+ * Send a command to a plugin. A plugin may define an arbitrary number of
+ * 'messages' which it allows applications on the host system to send, each
+ * identified by a 32-bit ID.
+ */
+#define MC_CMD_PLUGIN_REQ 0x1b3
+#undef MC_CMD_0x1b3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_REQ_IN msgrequest */
+#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
+#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
+/* Message ID defined by the plugin author */
+#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
+/* Data blob being the parameter to the message. This must be of the length
+ * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
+ */
+#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
+#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
+#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
+/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
+ * the same size as the input DATA parameter.
+ */
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
+
+/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
+ * space that maps to a contiguous region of TRGT_ADDR space. Addresses
+ * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
+ * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
+ * TRGT_ADDR_BASE.
+ */
+#define DESC_ADDR_REGION_LEN 32
+/* The start of the region in DESC_ADDR space. */
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
+/* The start of the region in TRGT_ADDR space. Drivers can set this via
+ * MC_CMD_SET_DESC_ADDR_REGIONS.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
+/* The size of the region. */
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
+/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
+ * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_OFST 24
+#define DESC_ADDR_REGION_RSVD_LEN 8
+#define DESC_ADDR_REGION_RSVD_LO_OFST 24
+#define DESC_ADDR_REGION_RSVD_LO_LEN 4
+#define DESC_ADDR_REGION_RSVD_LO_LBN 192
+#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_HI_OFST 28
+#define DESC_ADDR_REGION_RSVD_HI_LEN 4
+#define DESC_ADDR_REGION_RSVD_HI_LBN 224
+#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_LBN 192
+#define DESC_ADDR_REGION_RSVD_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_INFO
+ * Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO 0x1b7
+#undef MC_CMD_0x1b7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_INFO_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_INFO_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_INFO_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN 4
+/* The type of mapping; see SF-nnnnnn-xx (EF100 driver writer's guide, once
+ * written) for details of each type.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_OFST 0
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_LEN 4
+/* enum: TRGT_ADDR = DESC_ADDR */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT 0x0
+/* enum: DESC_ADDR has one or more regions that map into TRGT_ADDR. The base
+ * TRGT_ADDR for each region is programmable via MCDI.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_REGIONED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_REGIONS
+ * Returns a list of the DESC_ADDR regions for the calling function's address space. Only valid if that function's address space has the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS 0x1b8
+#undef MC_CMD_0x1b8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX 224
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2 992
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LEN(num) (0+32*(num))
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM(len) (((len)-0)/32)
+/* An array of DESC_ADDR_REGION strutures. The number of entries in the array
+ * indicates the number of available regions.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_OFST 0
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_LEN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MINNUM 1
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM 7
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM_MCDI2 31
+
+
+/***********************************/
+/* MC_CMD_SET_DESC_ADDR_REGIONS
+ * Set the base TRGT_ADDR for a set of DESC_ADDR regions for the calling function's address space. Only valid if that function's address space had the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS 0x1b9
+#undef MC_CMD_0x1b9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMIN 16
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX 248
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2 1016
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(num) (8+8*(num))
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_NUM(len) (((len)-8)/8)
+/* A bitmask indicating which regions should have their base TRGT_ADDR updated.
+ * To update the base TRGR_ADDR for a DESC_ADDR region, the corresponding bit
+ * should be set to 1.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_OFST 0
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_LEN 4
+/* Reserved field; must be set to zero. */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_OFST 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_LEN 4
+/* An array of values used to updated the base TRGT_ADDR for DESC_ADDR regions.
+ * Array indices corresponding to region numbers (i.e. the array is sparse, and
+ * included entries for regions even if the corresponding SET_REGION_MASK bit
+ * is zero).
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LBN 64
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_OFST 12
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LBN 96
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MINNUM 1
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM 30
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM_MCDI2 126
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_CMD
+ * Execute an arbitrary MCDI command on behalf of a different client. The
+ * consequences of the command (e.g. ownership of any resources created) apply
+ * to the indicated client rather than the function client which actually sent
+ * this command. All inherent permission checks are also performed on the
+ * indicated client. The given client must be a descendant of the requestor.
+ * The command to be proxied follows immediately afterward in the host buffer
+ * (or on the UART). Chaining multiple MC_CMD_CLIENT_CMD is unnecessary and not
+ * supported. New dynamic clients may be created with MC_CMD_CLIENT_ALLOC.
+ */
+#define MC_CMD_CLIENT_CMD 0x1ba
+#undef MC_CMD_0x1ba_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_CMD_IN msgrequest */
+#define MC_CMD_CLIENT_CMD_IN_LEN 4
+/* The client as which to execute the following command. */
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_CMD_OUT msgresponse */
+#define MC_CMD_CLIENT_CMD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_ALLOC
+ * Create a new client object. Clients are a system for delineating NIC
+ * resource ownership, such that groups of resources may be torn down as a
+ * unit. See also MC_CMD_CLIENT_CMD. See XN-200265-TC for background, concepts
+ * and a glossary. Clients created by this command are known as "dynamic
+ * clients". The newly-created client is a child of the client which sent this
+ * command. The caller must have the GRP_ALLOC_CLIENT privilege. The new client
+ * initially has no permission to do anything; see
+ * MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY.
+ */
+#define MC_CMD_CLIENT_ALLOC 0x1bb
+#undef MC_CMD_0x1bb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bb_PRIVILEGE_CTG SRIOV_CTG_ALLOC_CLIENT
+
+/* MC_CMD_CLIENT_ALLOC_IN msgrequest */
+#define MC_CMD_CLIENT_ALLOC_IN_LEN 0
+
+/* MC_CMD_CLIENT_ALLOC_OUT msgresponse */
+#define MC_CMD_CLIENT_ALLOC_OUT_LEN 4
+/* The ID of the new client object which has been created. */
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLIENT_FREE
+ * Destroy and release an existing client object. All resources owned by that
+ * client (including its child clients, and thus all resources owned by the
+ * entire family tree) are freed.
+ */
+#define MC_CMD_CLIENT_FREE 0x1bc
+#undef MC_CMD_0x1bc_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_FREE_IN msgrequest */
+#define MC_CMD_CLIENT_FREE_IN_LEN 4
+/* The ID of the client to be freed. This client must be a descendant of the
+ * requestor. A client cannot free itself.
+ */
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_FREE_OUT msgresponse */
+#define MC_CMD_CLIENT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_USER
+ * Assign partial rights over this VI to another client. VIs have an 'owner'
+ * and a 'user'. The owner is the client which allocated the VI
+ * (MC_CMD_ALLOC_VIS) and cannot be changed. The user is the client which has
+ * permission to create queues and other resources on that VI. Initially
+ * user==owner, but the user can be changed by this command; the resources thus
+ * created are then owned by the user-client. Only the VI owner can call this
+ * command, and the request will fail if there are any outstanding child
+ * resources (e.g. queues) currently allocated from this VI.
+ */
+#define MC_CMD_SET_VI_USER 0x1be
+#undef MC_CMD_0x1be_PRIVILEGE_CTG
+
+#define MC_CMD_0x1be_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_USER_IN msgrequest */
+#define MC_CMD_SET_VI_USER_IN_LEN 8
+/* Function-relative VI number to modify. */
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_LEN 4
+/* Client ID to become the new user. This must be a descendant of the owning
+ * client, the owning client itself, or the special value MC_CMD_CLIENT_ID_SELF
+ * which is synonymous with the owning client.
+ */
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_OFST 4
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_SET_VI_USER_OUT msgresponse */
+#define MC_CMD_SET_VI_USER_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES
+ * A device reports a set of MAC addresses for each client to use, known as the
+ * "permanent MAC addresses". Those MAC addresses are provided by the client's
+ * administrator, e.g. via MC_CMD_SET_CLIENT_MAC_ADDRESSES, and are intended as
+ * a hint to that client which MAC address its administrator would like to use
+ * to identity itself. This API exists solely to allow communication of MAC
+ * address from administrator to adminstree, and has no inherent interaction
+ * with switching within the device. There is no guarantee that a client will
+ * be able to send traffic with a source MAC address taken from the list of MAC
+ * address reported, nor is there a guarantee that a client will be able to
+ * resource traffic with a destination MAC taken from the list of MAC
+ * addresses. Likewise, there is no guarantee that a client will not be able to
+ * use a MAC address not present in the list. Restrictions on switching are
+ * controlled either through the EVB API if operating in EVB mode, or via MAE
+ * rules if host software is directly managing the MAE. In order to allow
+ * tenants to use this API whilst a provider is using the EVB API, the MAC
+ * addresses reported by MC_CMD_GET_CLIENT_MAC_ADDRESSES will be augmented with
+ * any MAC addresses associated with the vPort assigned to the caller. In order
+ * to allow tenants to use the EVB API whilst a provider is using this API, if
+ * a client queries the MAC addresses for a vPort using the host_evb_port_id
+ * EVB_PORT_ASSIGNED, that list of MAC addresses will be augmented with the MAC
+ * addresses assigned to the calling client. This query can either be explicit
+ * (i.e. MC_CMD_VPORT_GET_MAC_ADDRESSES) or implicit (e.g. creation of a
+ * vAdaptor with a NULL/automatic MAC address). Changing the MAC address on a
+ * vAdaptor only affects VNIC steering filters; it has no effect on the MAC
+ * addresses assigned to the vAdaptor's owner. VirtIO clients behave as EVB
+ * clients. On VirtIO device reset, a vAdaptor is created with an automatic MAC
+ * address. Querying the VirtIO device's MAC address queries the underlying
+ * vAdaptor's MAC address. Setting the VirtIO device's MAC address sets the
+ * underlying vAdaptor's MAC addresses.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES 0x1c4
+#undef MC_CMD_0x1c4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN 4
+/* A handle for the client for whom MAC address should be obtained. Use
+ * CLIENT_HANDLE_SELF to obtain the MAC addresses assigned to the calling
+ * client.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMIN 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX 252
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(num) (0+6*(num))
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_NUM(len) (((len)-0)/6)
+/* An array of MAC addresses assigned to the client. */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_LEN 6
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MINNUM 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM 42
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM_MCDI2 170
+
+
+/***********************************/
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES
+ * Set the permanent MAC addresses for a client. The caller must by an
+ * administrator of the target client. See MC_CMD_GET_CLIENT_MAC_ADDRESSES for
+ * additional detail.
+ */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES 0x1c5
+#undef MC_CMD_0x1c5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMIN 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX 250
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX_MCDI2 1018
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(num) (4+6*(num))
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_NUM(len) (((len)-4)/6)
+/* A handle for the client for whom MAC addresses should be set */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+/* An array of MAC addresses to assign to the client. */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_OFST 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_LEN 6
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MINNUM 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM 41
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM_MCDI2 169
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_ATTR
+ * Retrieve physical build-level board attributes as configured at
+ * manufacturing stage. Fields originate from EEPROM and per-platform constants
+ * in firmware. Fields are used in development to identify/ differentiate
+ * boards based on build levels/parameters, and also in manufacturing to cross
+ * check "what was programmed in manufacturing" is same as "what firmware
+ * thinks has been programmed" as there are two layers to translation within
+ * firmware before the attributes reach this MCDI handler. Some parameters are
+ * retrieved as part of other commands and therefore not replicated here. See
+ * GET_VERSION_OUT.
+ */
+#define MC_CMD_GET_BOARD_ATTR 0x1c6
+#undef MC_CMD_0x1c6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
+#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
+#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
+/* Defines board capabilities and validity of attributes returned in this
+ * response-message.
+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
+/* enum: The FPGA voltage on the adapter can be set to low */
+#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
+/* enum: The FPGA voltage on the adapter can be set to regular */
+#define MC_CMD_FPGA_VOLTAGE_REG 0x1
+/* enum: The FPGA voltage on the adapter can be set to high */
+#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
+/* An array of cage types on the board */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
+/* enum: The cages are not known */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
+/* enum: The cages are SFP/SFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
+/* enum: The cages are QSFP/QSFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
+
+
+/***********************************/
+/* MC_CMD_GET_SOC_STATE
+ * Retrieve current state of the System-on-Chip. This command is valid when
+ * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
+ */
+#define MC_CMD_GET_SOC_STATE 0x1c7
+#undef MC_CMD_0x1c7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SOC_STATE_IN msgrequest */
+#define MC_CMD_GET_SOC_STATE_IN_LEN 0
+
+/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
+#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
+/* Status flags for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
+/* Status fields for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
+/* enum: Power on (set by SUC on power up) */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
+/* enum: Running bootloader */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
+/* enum: Bootloader has started OS. OS is booting */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
+/* enum: OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
+/* enum: Maintenance OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
+/* Number of SoC resets since power on */
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CHECK_SCHEDULER_CREDITS
+ * For debugging purposes. For each source and destination node in the hardware
+ * schedulers, check whether the number of credits is as it should be. This
+ * should only be used when the NIC is idle, because collection is not atomic
+ * and because the expected credit counts are only meaningful when no traffic
+ * is flowing.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS 0x1c8
+#undef MC_CMD_0x1c8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_IN msgrequest */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_LEN 8
+/* Flags for the request */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_LEN 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_LBN 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_WIDTH 1
+/* If there are too many results to fit into an MCDI response, they're split
+ * into pages. This field specifies which (0-indexed) page to request. A
+ * request with PAGE=0 will snapshot the results, and subsequent requests with
+ * PAGE>0 will return data from the most recent snapshot. The GENERATION field
+ * in the response allows callers to verify that all responses correspond to
+ * the same snapshot.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_LEN 4
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_OUT msgresponse */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMIN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX 240
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2 1008
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LEN(num) (16+16*(num))
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_NUM(len) (((len)-16)/16)
+/* The total number of results (across all pages). */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_LEN 4
+/* The number of pages that the response is split across. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_LEN 4
+/* The number of results in this response. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_OFST 8
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_LEN 4
+/* Result generation count. Incremented any time a request is made with PAGE=0.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_OFST 12
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_LEN 4
+/* The results, as an array of SCHED_CREDIT_CHECK_RESULT structures. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_OFST 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_LEN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MINNUM 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM 14
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM_MCDI2 62
+
+
+/***********************************/
+/* MC_CMD_TXQ_STATS
+ * Query per-TXQ statistics.
+ */
+#define MC_CMD_TXQ_STATS 0x1d5
+#undef MC_CMD_0x1d5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TXQ_STATS_IN msgrequest */
+#define MC_CMD_TXQ_STATS_IN_LEN 8
+/* Instance of TXQ to retrieve statistics for */
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
+/* Flags for the request */
+#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
+#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
+#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
+
+/* MC_CMD_TXQ_STATS_OUT msgresponse */
+#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
+#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
+#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
+#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
+
/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
* defined in SF-120734-TC with more information in SF-122717-TC.
*/
@@ -21044,7 +25112,13 @@
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LBN 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LBN 32
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_WIDTH 32
/***********************************/
@@ -21075,13 +25149,50 @@
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LBN 64
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LBN 96
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */
#define MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0
/***********************************/
+/* MC_CMD_VIRTIO_GET_CAPABILITIES
+ * Get virtio capabilities supported by the device. Returns general virtio
+ * capabilities and limitations of the hardware / firmware implementation
+ * (hardware device as a whole), rather than that of individual configured
+ * virtio devices. At present, only the absolute maximum number of queues
+ * allowed on multi-queue devices is returned. Response is expected to be
+ * extended as necessary in the future.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
+#undef MC_CMD_0x1d3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
+/* Type of device to get capabilities for. Matches the device id as defined by
+ * the virtio spec.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
+/* Maximum number of queues supported for a single device instance */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
+
+
+/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -21133,17 +25244,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LBN 128
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LBN 160
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_WIDTH 32
/* Address of the available ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LBN 192
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LBN 224
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_WIDTH 32
/* Address of the used ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LBN 256
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LBN 288
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_WIDTH 32
/* PASID to use on PCIe transactions involving this queue. Ignored if the
* USE_PASID flag is not set.
*/
@@ -21167,21 +25296,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LBN 384
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LBN 416
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */
-/* The inital producer index for this queue's used ring. If this queue is being
- * created to be migrated into, this should be the FINAL_PIDX value returned by
- * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it
+/* The initial available index for this virtqueue. If this queue is being
+ * created to be migrated into, this should be the FINAL_AVAIL_IDX value
+ * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or
+ * equivalent if the original queue was on a thirdparty device). Otherwise, it
* should be zero.
*/
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_OFST 56
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_LEN 4
+/* Alias of INITIAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4
-/* The inital consumer index for this queue's available ring. If this queue is
- * being created to be migrated into, this should be the FINAL_CIDX value
- * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from.
- * Otherwise, it should be zero.
- */
+/* The initial used index for this virtqueue. If this queue is being created to
+ * be migrated into, this should be the FINAL_USED_IDX value returned by
+ * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or equivalent if
+ * the original queue was on a thirdparty device). Otherwise, it should be
+ * zero.
+ */
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_OFST 60
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_LEN 4
+/* Alias of INITIAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4
/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated
@@ -21226,10 +25369,16 @@
/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8
-/* The producer index of the used ring when the queue was stopped. */
+/* The available index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_OFST 0
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_LEN 4
+/* Alias of FINAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4
-/* The consumer index of the available ring when the queue was stopped. */
+/* The used index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_OFST 4
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_LEN 4
+/* Alias of FINAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4
@@ -21309,16 +25458,40 @@
#define PCIE_FUNCTION_VF_NULL 0xffff
#define PCIE_FUNCTION_VF_LBN 16
#define PCIE_FUNCTION_VF_WIDTH 16
-/* PCIe interface of the function */
+/* PCIe interface of the function. Values should be taken from the
+ * PCIE_INTERFACE enum
+ */
#define PCIE_FUNCTION_INTF_OFST 4
#define PCIE_FUNCTION_INTF_LEN 4
-/* enum: Host PCIe interface */
+/* enum: Host PCIe interface. (Alias for HOST_PRIMARY, provided for backwards
+ * compatibility)
+ */
#define PCIE_FUNCTION_INTF_HOST 0x0
-/* enum: Application Processor interface */
+/* enum: Application Processor interface (alias for NIC_EMBEDDED, provided for
+ * backwards compatibility)
+ */
#define PCIE_FUNCTION_INTF_AP 0x1
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
+/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
+ * (absolute VI number + VI relative queue number). On Keystone, a VI can
+ * contain multiple queues (at present, up to 2), each with separate controls
+ * for direction. This structure is required to uniquely identify the absolute
+ * source queue for descriptor proxy functions.
+ */
+#define QUEUE_ID_LEN 4
+/* Absolute VI number */
+#define QUEUE_ID_ABS_VI_OFST 0
+#define QUEUE_ID_ABS_VI_LEN 2
+#define QUEUE_ID_ABS_VI_LBN 0
+#define QUEUE_ID_ABS_VI_WIDTH 16
+/* Relative queue number within the VI */
+#define QUEUE_ID_REL_QUEUE_LBN 16
+#define QUEUE_ID_REL_QUEUE_WIDTH 1
+#define QUEUE_ID_RESERVED_LBN 17
+#define QUEUE_ID_RESERVED_WIDTH 15
+
/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_CREATE
@@ -21347,7 +25520,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
/* The personality to set. The meanings of the personalities are defined in
* SF-120734-TC with more information in SF-122717-TC. At present, we only
* support proxying for VIRTIO_BLK
@@ -21371,7 +25556,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
/***********************************/
@@ -21412,7 +25609,13 @@
#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
@@ -21485,7 +25688,13 @@
#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
@@ -21720,7 +25929,19 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
/* Function personality */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
@@ -21733,6 +25954,10 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
/* enum: Function configuration is pending reset */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
+/* enum: Function configuration is missing (created, but no configuration
+ * committed)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
/* Generation count to be delivered in an event once the configuration becomes
* live (if status is "pending")
*/
@@ -21742,7 +25967,7 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG
+ * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
*/
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
@@ -21780,9 +26005,27 @@
#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
/* Function personality */
#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
@@ -21840,7 +26083,11 @@
* Enable descriptor proxying for function into target event queue. Returns VI
* allocation info for the proxy source function, so that the caller can map
* absolute VI IDs from descriptor proxy events back to the originating
- * function.
+ * function. This is a legacy function that only supports single queue proxy
+ * devices. It is also limited in that it can only be called after host driver
+ * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
+ * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
+ * supports multi-queue devices and has no dependency on host driver attach.
*/
#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
#undef MC_CMD_0x178_PRIVILEGE_CTG
@@ -21871,8 +26118,44 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
+ * Enable descriptor proxying for a source queue on a host function into target
+ * event queue. Source queue number is a relative virtqueue number on the
+ * source function (0 to max_virtqueues-1). For a multi-queue device, the
+ * caller must enable all source queues individually. To retrieve absolute VI
+ * information for the source function (so that VI IDs from descriptor proxy
+ * events can be mapped back to source function / queue) see
+ * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
+#undef MC_CMD_0x1d0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to enable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+/* Descriptor proxy sink queue (caller function relative). Must be extended
+ * width event queue
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function
+ * Disable descriptor proxying for function. For multi-queue functions,
+ * disables all queues.
*/
#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
#undef MC_CMD_0x179_PRIVILEGE_CTG
@@ -21892,6 +26175,75 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
+ * Disable descriptor proxying for a specific source queue on a function.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
+#undef MC_CMD_0x1d1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to disable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_GET_VI_INFO
+ * Returns absolute VI allocation information for the descriptor proxy source
+ * function referenced by HANDLE, so that the caller can map absolute VI IDs
+ * from descriptor proxy events back to the originating function and queue. The
+ * call is only valid after the host driver for the source function has
+ * attached (after receiving a driver attach event for the descriptor proxy
+ * function) and will fail with ENOTCONN otherwise.
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
+#undef MC_CMD_0x1d2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
+/* VI information (VI ID + VI relative queue number) for each of the source
+ * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
+ * structures.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
+
+
+/***********************************/
/* MC_CMD_GET_ADDR_SPC_ID
* Get Address space identifier for use in mem2mem descriptors for a given
* target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
@@ -21942,7 +26294,19 @@
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
@@ -21962,7 +26326,3381 @@
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_HANDLE
+ * Obtain a handle for a client given a description of that client. N.B. this
+ * command is subject to change given the open discussion about how PCIe
+ * functions should be referenced on an iEP (integrated endpoint: functions
+ * span multiple buses) and multihost (multiple PCIe interfaces) system.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE 0x1c3
+#undef MC_CMD_0x1c3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_LEN 12
+/* Type of client to get a client handle for */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_LEN 4
+/* enum: Obtain a client handle for a PCIe function-type client. */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC 0x0
+/* PCIe Function ID (as struct PCIE_FUNCTION). Valid when TYPE==FUNC. Use: -
+ * INTF=CALLER, PF=PF_NULL, VF=VF_NULL to refer to the calling function -
+ * INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
+ * a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
+ * to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
+ * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
+ * interface where ... refers to a small integer for the VF/PF fields, and to
+ * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * meaningful to use INTF=CALLER within a structure that's an argument to
+ * MC_CMD_DEVEL_GET_CLIENT_HANDLE.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LEN 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_WIDTH 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_WIDTH 32
+/* enum: NULL value for the INTF field of struct PCIE_FUNCTION. Provided for
+ * backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_LEN 4
+
+/* MC_CMD_GET_CLIENT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_LEN 4
+
+/* MAE_FIELD_FLAGS structuredef */
+#define MAE_FIELD_FLAGS_LEN 4
+#define MAE_FIELD_FLAGS_FLAT_OFST 0
+#define MAE_FIELD_FLAGS_FLAT_LEN 4
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_OFST 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_LBN 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_WIDTH 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_LBN 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_LBN 7
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_FLAT_LBN 0
+#define MAE_FIELD_FLAGS_FLAT_WIDTH 32
+
+/* MAE_ENC_FIELD_PAIRS structuredef: Mask and value pairs for all fields that
+ * it makes sense to use to determine the encapsulation type of a packet. Its
+ * intended use is to keep a common packing of fields across multiple MCDI
+ * commands, keeping things inherently sychronised and allowing code shared. To
+ * use in an MCDI command, the command should end with a variable length byte
+ * array populated with this structure. Do not extend this structure. Instead,
+ * create _Vx versions with the necessary fields appended. That way, the
+ * existing semantics for extending MCDI commands are preserved.
+ */
+#define MAE_ENC_FIELD_PAIRS_LEN 156
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_OFST 8
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LBN 64
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 10
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 80
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_OFST 12
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LBN 96
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 14
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_OFST 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LBN 128
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 18
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 144
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_OFST 20
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LBN 160
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 22
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 176
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_OFST 24
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LBN 192
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 26
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 208
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_OFST 28
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LBN 224
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 34
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 272
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_OFST 40
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LBN 320
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 46
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 368
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_OFST 52
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LBN 416
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 56
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 448
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_OFST 60
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LBN 480
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 76
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 608
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_OFST 92
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LBN 736
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_OFST 96
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LBN 768
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_OFST 100
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LBN 800
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_OFST 116
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LBN 928
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_OFST 132
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LBN 1056
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_OFST 133
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LBN 1064
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_OFST 134
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LBN 1072
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_OFST 135
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LBN 1080
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_OFST 136
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LBN 1088
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_OFST 137
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LBN 1096
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_WIDTH 8
+/* More generic alias for ENC_VLAN_FLAGS. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS_MASK alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_WIDTH 8
+/* More generic alias for ENC_FLAGS_MASK. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_OFST 140
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LBN 1120
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 144
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 1152
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_OFST 148
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LBN 1184
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 150
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 1200
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_OFST 152
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LBN 1216
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 154
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 1232
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+
+/* MAE_FIELD_MASK_VALUE_PAIRS structuredef: Mask and value pairs for all fields
+ * currently defined. Same semantics as MAE_ENC_FIELD_PAIRS.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_LEN 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_WIDTH 32
+
+/* MAE_FIELD_MASK_VALUE_PAIRS_V2 structuredef */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN 372
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_LBN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_LBN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_LBN 3
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_LBN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_LBN 5
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_LBN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_LBN 7
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_LBN 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_LBN 9
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LBN 2752
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_OFST 348
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LBN 2784
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_OFST 352
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LBN 2816
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_OFST 354
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LBN 2832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_OFST 356
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LBN 2848
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_OFST 360
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LBN 2880
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_OFST 364
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LBN 2912
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_OFST 365
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LBN 2920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_OFST 366
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LBN 2928
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_OFST 367
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LBN 2936
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_OFST 368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LBN 2944
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_OFST 369
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LBN 2952
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_OFST 370
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LBN 2960
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_OFST 371
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LBN 2968
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_WIDTH 8
+
+/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
+ * integer value (mport_id) that is guaranteed to be representable within
+ * 32-bits or within any NIC interface field that needs store the value
+ * (whichever is narrowers). This selector structure provides a stable way to
+ * refer to m-ports.
+ */
+#define MAE_MPORT_SELECTOR_LEN 4
+/* Used to force the tools to output bitfield-style defines for this structure.
+ */
+#define MAE_MPORT_SELECTOR_FLAT_OFST 0
+#define MAE_MPORT_SELECTOR_FLAT_LEN 4
+/* enum: An m-port selector value that is guaranteed never to represent a real
+ * mport
+ */
+#define MAE_MPORT_SELECTOR_NULL 0x0
+/* enum: The m-port assigned to the calling client. */
+#define MAE_MPORT_SELECTOR_ASSIGNED 0x1000000
+#define MAE_MPORT_SELECTOR_TYPE_OFST 0
+#define MAE_MPORT_SELECTOR_TYPE_LBN 24
+#define MAE_MPORT_SELECTOR_TYPE_WIDTH 8
+/* enum: The MPORT connected to a given physical port */
+#define MAE_MPORT_SELECTOR_TYPE_PPORT 0x2
+/* enum: The MPORT assigned to a given PCIe function. Deprecated in favour of
+ * MH_FUNC.
+ */
+#define MAE_MPORT_SELECTOR_TYPE_FUNC 0x3
+/* enum: An mport_id */
+#define MAE_MPORT_SELECTOR_TYPE_MPORT_ID 0x4
+/* enum: The MPORT assigned to a given PCIe function (see also FWRIVERHD-1108)
+ */
+#define MAE_MPORT_SELECTOR_TYPE_MH_FUNC 0x5
+/* enum: This is guaranteed never to be a valid selector type */
+#define MAE_MPORT_SELECTOR_TYPE_INVALID 0xff
+#define MAE_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_WIDTH 24
+#define MAE_MPORT_SELECTOR_PPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_HOST_PRIMARY 0x1 /* enum */
+#define MAE_MPORT_SELECTOR_NIC_EMBEDDED 0x2 /* enum */
+/* enum: Deprecated, use CALLER_INTF instead. */
+#define MAE_MPORT_SELECTOR_CALLER 0xf
+#define MAE_MPORT_SELECTOR_CALLER_INTF 0xf /* enum */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_WIDTH 8
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_LBN 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_WIDTH 16
+/* enum: Used for VF_ID to indicate a physical function. */
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL 0xffff
+/* enum: Used for PF_ID to indicate the physical function of the calling
+ * client. - When used by a PF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the calling function. (For clarity, it is recommended that
+ * clients use ASSIGNED to achieve this behaviour). - When used by a PF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a VF child of the calling
+ * function. - When used by a VF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the PF owning the calling function. - When used by a VF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a sibling VF of the
+ * calling function. - Not meaningful used by a client that is not a PCIe
+ * function.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER 0xff
+/* enum: Same as PF_ID_CALLER, but for use in the smaller MH_PF_ID field. Only
+ * valid if FUNC_INTF_ID is CALLER.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_CALLER 0xf
+#define MAE_MPORT_SELECTOR_FLAT_LBN 0
+#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
+
+/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
+ * virtual network port by MAE port and link end
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
+/* The MAE MPORT of interest */
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_WIDTH 32
+/* Which end of the link identified by MPORT to consider */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MPORT_END */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_WIDTH 32
+/* A field for accessing the endpoint selector as a collection of bits */
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LEN 8
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_WIDTH 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_WIDTH 32
+/* enum: Set FLAT to this value to obtain backward-compatible behaviour in
+ * commands that have been extended to take a MAE_LINK_ENDPOINT_SELECTOR
+ * argument. New commands that are designed to take such an argument from the
+ * start will not support this.
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_MAE_LINK_ENDPOINT_COMPAT 0x0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_CAPS
+ * Describes capabilities of the MAE (Match-Action Engine)
+ */
+#define MC_CMD_MAE_GET_CAPS 0x140
+#undef MC_CMD_0x140_PRIVILEGE_CTG
+
+#define MC_CMD_0x140_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_GET_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_OUT_LEN 52
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V2_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_LEN 60
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V3_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_LEN 64
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_LEN 4
+/* The total number of Outer Rule counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_OFST 60
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_AR_CAPS
+ * Get a level of support for match fields when used in match-action rules
+ */
+#define MC_CMD_MAE_GET_AR_CAPS 0x141
+#undef MC_CMD_0x141_PRIVILEGE_CTG
+
+#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_AR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_AR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_AR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_LEN 4
+/* Array of values indicating the NIC's support for a given field, indexed by
+ * field id. The driver must ensure space for
+ * MC_CMD_MAE_GET_CAPS.MATCH_FIELD_COUNT entries in the array..
+ */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_OR_CAPS
+ * Get a level of support for fields used in outer rule keys.
+ */
+#define MC_CMD_MAE_GET_OR_CAPS 0x142
+#undef MC_CMD_0x142_PRIVILEGE_CTG
+
+#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_OR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_OR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_OR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_LEN 4
+/* Same semantics as MC_CMD_MAE_GET_AR_CAPS.MAE_FIELD_FLAGS */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_ALLOC
+ * Allocate match-action-engine counters, which can be referenced in various
+ * tables.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC 0x143
+#undef MC_CMD_0x143_PRIVILEGE_CTG
+
+#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_ALLOC_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_LEN 4
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTER_ALLOC_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN 8
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_LEN 4
+/* Which type of counter to allocate. */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. Packets with generation count >= GENERATION_COUNT will
+ * contain valid counter values for counter IDs allocated in this call, unless
+ * the counter values are zero and zero squash is enabled. Note that there is
+ * an independent GENERATION_COUNT object per counter type, and that generation
+ * counts wrap from 0xffffffff to 1.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_LEN 4
+/* enum: Generation counter 0 is reserved and unused. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_INVALID 0x0
+/* The number of counter IDs that the NIC allocated. It is never less than 1;
+ * failure to allocate a single counter will cause an error to be returned. It
+ * is never greater than REQUESTED_COUNT, but may be less.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters allocated. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 61
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_FREE
+ * Free match-action-engine counters
+ */
+#define MC_CMD_MAE_COUNTER_FREE 0x144
+#undef MC_CMD_0x144_PRIVILEGE_CTG
+
+#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_FREE_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMIN 8
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX_MCDI2 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_NUM(len) (((len)-4)/4)
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_COUNTER_FREE_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN 136
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+/* Which type of counter to free. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_OFST 132
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX_MCDI2 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. A packet with generation count == GENERATION_COUNT will
+ * contain the final values for these counter IDs, unless the counter values
+ * are zero and zero squash is enabled. Note that the GENERATION_COUNT value is
+ * specific to the COUNTER_TYPE (IDENTIFIER field in packet header). Receiving
+ * a packet with generation count > GENERATION_COUNT guarantees that no more
+ * values will be written for these counters. If values for these counter IDs
+ * are present, the counter ID has been reallocated. A counter ID will not be
+ * reallocated within a single read cycle as this would merge increments from
+ * the 'old' and 'new' counters. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_LEN 4
+/* The number of counter IDs actually freed. It is never less than 1; failure
+ * to free a single counter will cause an error to be returned. It is never
+ * greater than the number that were requested to be freed, but may be less if
+ * counters could not be freed.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters to that were freed. Note,
+ * failure to free a counter can only occur on incorrect driver behaviour, so
+ * asserting that the expected counters were freed is reasonable. When
+ * debugging, attempting to free a single counter at a time will provide a
+ * reason for the failure to free said counter.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_START
+ * Start streaming counter values, specifying an RxQ to deliver packets to.
+ * Counters allocated to the calling function will be written in a round robin
+ * at a fixed cycle rate, assuming sufficient credits are available. The driver
+ * may cause the counter values to be written at a slower rate by constraining
+ * the availability of credits. Note that if the driver wishes to deliver
+ * packets to a different queue, it must call MAE_COUNTERS_STREAM_STOP to stop
+ * delivering packets to the current queue first.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START 0x151
+#undef MC_CMD_0x151_PRIVILEGE_CTG
+
+#define MC_CMD_0x151_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_IN msgrequest: Using V1 is equivalent to V2
+ * with COUNTER_TYPES_MASK=0x1 (i.e. AR counters only).
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN 8
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_WIDTH 1
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN 12
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_WIDTH 1
+/* Mask of which counter types should be reported. Each bit position
+ * corresponds to a value of the MAE_COUNTER_TYPE enum. For example a value of
+ * 0x3 requests both AR and CT counters. A value of zero is invalid. Counter
+ * types not selected by the mask value won't be included in the stream. If a
+ * client wishes to change which counter types are reported, it must first call
+ * MAE_COUNTERS_STREAM_STOP, then restart it with the new mask value.
+ * Requesting a counter type which isn't supported by firmware (reported in
+ * MC_CMD_MAE_GET_CAPS/COUNTER_TYPES_SUPPORTED) will result in ENOTSUP.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_OFST 8
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP
+ * Stop streaming counter values to the specified RxQ.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP 0x152
+#undef MC_CMD_0x152_PRIVILEGE_CTG
+
+#define MC_CMD_0x152_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN 2
+/* The RxQ to stop writing packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_LEN 2
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_LEN 4
+/* Generation count for AR counters. The final set of AR counter values will be
+ * written out in packets with count == GENERATION_COUNT. An empty packet with
+ * count > GENERATION_COUNT indicates that no more counter values of this type
+ * will be written to this stream. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMIN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX_MCDI2 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_NUM(len) (((len)-0)/4)
+/* Array of generation counts, indexed by MAE_COUNTER_TYPE. Note that since
+ * MAE_COUNTER_TYPE_AR==0, this response is backwards-compatible with V1. The
+ * final set of counter values will be written out in packets with count ==
+ * GENERATION_COUNT. An empty packet with count > GENERATION_COUNT indicates
+ * that no more counter values of this type will be written to this stream.
+ * GENERATION_COUNT_INVALID is reserved and unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MINNUM 1
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM 8
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM_MCDI2 8
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS
+ * Give a number of credits to the packetiser. Each credit received allows the
+ * MC to write one packet to the RxQ, therefore for each credit the driver must
+ * have written sufficient descriptors for a packet of length
+ * MAE_COUNTERS_PACKETISER_STREAM_START/PACKET_SIZE and rung the doorbell.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS 0x153
+#undef MC_CMD_0x153_PRIVILEGE_CTG
+
+#define MC_CMD_0x153_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN 4
+/* Number of credits to give to the packetiser. */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC
+ * Allocate an encapsulation header to be used in an Action Rule response. The
+ * header must be constructed as a valid packet with 0-length payload.
+ * Specifically, the L3/L4 lengths & checksums will only be incrementally fixed
+ * by the NIC, rather than recomputed entirely. Currently only IPv4, IPv6 and
+ * UDP are supported. If the maximum number of headers have already been
+ * allocated then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC 0x148
+#undef MC_CMD_0x148_PRIVILEGE_CTG
+
+#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(num) (4+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_NUM(len) (((len)-4)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM 248
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_LEN 4
+/* enum: An encap metadata ID that is guaranteed never to represent real encap
+ * metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE
+ * Update encap action metadata. See comments for MAE_ENCAP_HEADER_ALLOC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE 0x149
+#undef MC_CMD_0x149_PRIVILEGE_CTG
+
+#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMIN 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_OFST 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM 244
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_FREE
+ * Free encap action metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE 0x14a
+#undef MC_CMD_0x14a_PRIVILEGE_CTG
+
+#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_ALLOC
+ * Allocate MAC address. Hardware implementations have MAC addresses programmed
+ * into an indirection table, and clients should take care not to allocate the
+ * same MAC address twice (but instead reuse its ID). If the maximum number of
+ * MAC addresses have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC 0x15e
+#undef MC_CMD_0x15e_PRIVILEGE_CTG
+
+#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN 6
+/* MAC address as bytes in network order. */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN 6
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_LEN 4
+/* enum: An MAC address ID that is guaranteed never to represent a real MAC
+ * address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_FREE
+ * Free MAC address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_FREE 0x15f
+#undef MC_CMD_0x15f_PRIVILEGE_CTG
+
+#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_ALLOC
+ * Allocate an action set, which can be referenced either in response to an
+ * Action Rule, or as part of an Action Set List. If the maxmimum number of
+ * action sets have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC 0x14d
+#undef MC_CMD_0x14d_PRIVILEGE_CTG
+
+#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_LEN 4
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V2_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V7_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LEN 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
+/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
+ * ACTION_SET_ID_NULL. This allows an AS_ID to be distinguished from an ASL_ID
+ * returned from MC_CMD_MAE_ACTION_SET_LIST_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_LEN 4
+/* enum: An action set ID that is guaranteed never to represent an action set
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_FREE
+ */
+#define MC_CMD_MAE_ACTION_SET_FREE 0x14e
+#undef MC_CMD_0x14e_PRIVILEGE_CTG
+
+#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC
+ * Allocate an action set list (ASL) that can be referenced by an ID. The ASL
+ * ID can be used when inserting an action rule, so that for each packet
+ * matching the rule every action set in the list is applied. If the maximum
+ * number of ASLs have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC 0x14f
+#undef MC_CMD_0x14f_PRIVILEGE_CTG
+
+#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMIN 8
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_NUM(len) (((len)-4)/4)
+/* Number of elements in the AS_IDS field. */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_LEN 4
+/* The IDs of the action sets in this list. The last element of this list may
+ * be the ID of an already allocated ASL. In this case the action sets from the
+ * already allocated ASL will be applied after the action sets supplied by this
+ * request. This mechanism can be used to reduce resource usage in the case
+ * where one ASL is a sublist of another ASL. The sublist should be allocated
+ * first, then the superlist should be allocated by supplying all required
+ * action set IDs that are not in the sublist followed by the ID of the
+ * sublist. One sublist can be referenced by multiple superlists.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_OFST 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM 62
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2 254
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN 4
+/* The MSB of the ASL_ID is guaranteed to be set. This allows an ASL_ID to be
+ * distinguished from an AS_ID returned from MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN 4
+/* enum: An action set list ID that is guaranteed never to represent an action
+ * set list
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE
+ * Free match-action-engine redirect_lists
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE 0x150
+#undef MC_CMD_0x150_PRIVILEGE_CTG
+
+#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_INSERT
+ * Inserts an Outer Rule, which controls encapsulation parsing, and may
+ * influence the Lookup Sequence. If the maximum number of rules have already
+ * been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT 0x15a
+#undef MC_CMD_0x15a_PRIVILEGE_CTG
+
+#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMIN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(num) (16+1*(num))
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_NUM(len) (((len)-16)/1)
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* Match priority. Lower values have higher priority. Must be less than
+ * MC_CMD_MAE_GET_CAPS_OUT.ENCAP_PRIOS If a packet matches two filters with
+ * equal priority then it is unspecified which takes priority.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_LEN 4
+/* Deprecated alias for ACTION_CONTROL. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_WIDTH 16
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_LEN 4
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_LEN 4
+/* Structure of the format MAE_ENC_FIELD_PAIRS. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_OFST 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM 236
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_LEN 4
+/* enum: An outer match ID that is guaranteed never to represent an outer match
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_REMOVE
+ */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE 0x15b
+#undef MC_CMD_0x15b_PRIVILEGE_CTG
+
+#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_UPDATE
+ * Atomically change the response of an Outer Rule.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
+#undef MC_CMD_0x17d_PRIVILEGE_CTG
+
+#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
+/* ID of outer rule to update */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
+
+/* MAE_ACTION_RULE_RESPONSE structuredef */
+#define MAE_ACTION_RULE_RESPONSE_LEN 16
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_WIDTH 32
+/* Only one of ASL_ID or AS_ID may have a non-NULL value. */
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_OFST 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LBN 32
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_WIDTH 32
+/* Controls lookup flow when this rule is hit. See sub-fields for details. More
+ * info on the lookup sequence can be found in SF-122976-TC. It is an error to
+ * set both DO_CT and DO_RECIRC.
+ */
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_LBN 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_LBN 2
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_LBN 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_WIDTH 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_LBN 16
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_WIDTH 16
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LBN 64
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_WIDTH 32
+/* Counter ID to increment if DO_CT or DO_RECIRC is set. Must be set to
+ * COUNTER_ID_NULL otherwise. Counter ID must have been allocated with
+ * COUNTER_TYPE=AR.
+ */
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_OFST 12
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LBN 96
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_INSERT
+ * Insert a rule specify that packets matching a filter be processed according
+ * to a previous allocated action. Masks can be set as indicated by
+ * MC_CMD_MAE_GET_MATCH_FIELD_CAPABILITIES. If the maximum number of rules have
+ * already been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT 0x15c
+#undef MC_CMD_0x15c_PRIVILEGE_CTG
+
+#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMIN 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(num) (28+1*(num))
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_NUM(len) (((len)-28)/1)
+/* See MC_CMD_MAE_OUTER_RULE_REGISTER_IN/PRIO. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_LEN 20
+/* Reserved for future use. Must be set to zero. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_OFST 24
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_LEN 4
+/* Structure of the format MAE_FIELD_MASK_VALUE_PAIRS */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_OFST 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM 224
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM_MCDI2 992
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_LEN 4
+/* enum: An action rule ID that is guaranteed never to represent an action rule
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_UPDATE
+ * Atomically change the response of an action rule. Firmware may return
+ * ENOTSUP, in which case the driver should DELETE/INSERT.
+ */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE 0x15d
+#undef MC_CMD_0x15d_PRIVILEGE_CTG
+
+#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN 24
+/* ID of action rule to update */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_LEN 20
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_DELETE
+ */
+#define MC_CMD_MAE_ACTION_RULE_DELETE 0x155
+#undef MC_CMD_0x155_PRIVILEGE_CTG
+
+#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_LOOKUP
+ * Return the m-port corresponding to a selector.
+ */
+#define MC_CMD_MAE_MPORT_LOOKUP 0x160
+#undef MC_CMD_0x160_PRIVILEGE_CTG
+
+#define MC_CMD_0x160_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_LOOKUP_IN msgrequest */
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_LEN 4
+
+/* MC_CMD_MAE_MPORT_LOOKUP_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ALLOC
+ * Allocates a m-port, which can subsequently be used in action rules as a
+ * match or delivery argument.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC 0x163
+#undef MC_CMD_0x163_PRIVILEGE_CTG
+
+#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN 24
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_LEN 16
+/* An m-port selector identifying the VNIC to which traffic should be
+ * delivered. This must currently be set to MAE_MPORT_SELECTOR_ASSIGNED (i.e.
+ * the m-port assigned to the calling client).
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN 24
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_LEN 4
+/* A value that will appear in the packet metadata for any packets delivered
+ * using an alias type m-port. This value is guaranteed unique on the VNIC
+ * being delivered to, and is guaranteed not to exceed the range of values
+ * representable in the relevant metadata field.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_FREE
+ * Free a m-port which was previously allocated by the driver.
+ */
+#define MC_CMD_MAE_MPORT_FREE 0x164
+#undef MC_CMD_0x164_PRIVILEGE_CTG
+
+#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_FREE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_FREE_IN_LEN 4
+/* MPORT_ID as returned by MC_CMD_MAE_MPORT_ALLOC. */
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_FREE_OUT_LEN 0
+
+/* MAE_MPORT_DESC structuredef */
+#define MAE_MPORT_DESC_LEN 52
+#define MAE_MPORT_DESC_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_FLAGS_OFST 4
+#define MAE_MPORT_DESC_FLAGS_LEN 4
+#define MAE_MPORT_DESC_FLAGS_LBN 32
+#define MAE_MPORT_DESC_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_UUID_OFST 16
+#define MAE_MPORT_DESC_UUID_LEN 16
+#define MAE_MPORT_DESC_UUID_LBN 128
+#define MAE_MPORT_DESC_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_RESERVED_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LEN 8
+#define MAE_MPORT_DESC_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_LBN 256
+#define MAE_MPORT_DESC_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
+
+/* MAE_MPORT_DESC_V2 structuredef */
+#define MAE_MPORT_DESC_V2_LEN 56
+#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
+#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
+#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_V2_UUID_OFST 16
+#define MAE_MPORT_DESC_V2_UUID_LEN 16
+#define MAE_MPORT_DESC_V2_UUID_LBN 128
+#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
+#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
+/* A client handle for the VNIC's owner. Only valid for type VNIC. */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ENUMERATE
+ * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
+ * will be removed at some future point.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
+#undef MC_CMD_0x17c_PRIVILEGE_CTG
+
+#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_READ_JOURNAL
+ * Firmware maintains a per-client journal of mport creations and deletions.
+ * This journal is clear-on-read, i.e. repeated calls of this command will
+ * drain the buffer. Whenever the caller resets its function via FLR or
+ * MC_CMD_ENTITY_RESET, the journal is regenerated from a blank start.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL 0x147
+#undef MC_CMD_0x147_PRIVILEGE_CTG
+
+#define MC_CMD_0x147_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_IN msgrequest */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN 4
+/* Any unused flags are reserved and must be set to zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_LEN 4
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMIN 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_LBN 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_WIDTH 1
+/* The number of MAE_MPORT_DESC structures in MPORT_DESC_DATA. May be zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_OFST 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_OFST 8
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM 240
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1008
+
+/* TABLE_FIELD_DESCR structuredef: An individual table field descriptor. This
+ * describes the location and properties of one N-bit field within a wider
+ * M-bit key/mask/response value.
+ */
+#define TABLE_FIELD_DESCR_LEN 8
+/* Identifier for this field. */
+#define TABLE_FIELD_DESCR_FIELD_ID_OFST 0
+#define TABLE_FIELD_DESCR_FIELD_ID_LEN 2
+/* Enum values, see field(s): */
+/* TABLE_FIELD_ID */
+#define TABLE_FIELD_DESCR_FIELD_ID_LBN 0
+#define TABLE_FIELD_DESCR_FIELD_ID_WIDTH 16
+/* Lowest (least significant) bit number of the bits of this field. */
+#define TABLE_FIELD_DESCR_LBN_OFST 2
+#define TABLE_FIELD_DESCR_LBN_LEN 2
+#define TABLE_FIELD_DESCR_LBN_LBN 16
+#define TABLE_FIELD_DESCR_LBN_WIDTH 16
+/* Width of this field in bits. */
+#define TABLE_FIELD_DESCR_WIDTH_OFST 4
+#define TABLE_FIELD_DESCR_WIDTH_LEN 2
+#define TABLE_FIELD_DESCR_WIDTH_LBN 32
+#define TABLE_FIELD_DESCR_WIDTH_WIDTH 16
+/* The mask type for this field. (Note that masking is relevant to keys; fields
+ * of responses are always reported with the EXACT type.)
+ */
+#define TABLE_FIELD_DESCR_MASK_TYPE_OFST 6
+#define TABLE_FIELD_DESCR_MASK_TYPE_LEN 1
+/* enum: Field must never be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_NEVER 0x0
+/* enum: Exact match: field must always be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_EXACT 0x1
+/* enum: Ternary match: arbitrary mask bits are allowed. */
+#define TABLE_FIELD_DESCR_MASK_TERNARY 0x2
+/* enum: Whole field match: mask must be all 1 bits, or all 0 bits. */
+#define TABLE_FIELD_DESCR_MASK_WHOLE_FIELD 0x3
+/* enum: Longest prefix match: mask must be 1 bit(s) followed by 0 bit(s). */
+#define TABLE_FIELD_DESCR_MASK_LPM 0x4
+#define TABLE_FIELD_DESCR_MASK_TYPE_LBN 48
+#define TABLE_FIELD_DESCR_MASK_TYPE_WIDTH 8
+/* A version code that allows field semantics to be extended. All fields
+ * currently use version 0.
+ */
+#define TABLE_FIELD_DESCR_SCHEME_OFST 7
+#define TABLE_FIELD_DESCR_SCHEME_LEN 1
+#define TABLE_FIELD_DESCR_SCHEME_LBN 56
+#define TABLE_FIELD_DESCR_SCHEME_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_TABLE_LIST
+ * Return the list of tables which may be accessed via this table API.
+ */
+#define MC_CMD_TABLE_LIST 0x1c9
+#undef MC_CMD_0x1c9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_LIST_IN msgrequest */
+#define MC_CMD_TABLE_LIST_IN_LEN 4
+/* Index of the first item to be returned in the TABLE_ID sequence. (Set to 0
+ * for the first call; further calls are only required if the whole sequence
+ * does not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_OFST 0
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_LEN 4
+
+/* MC_CMD_TABLE_LIST_OUT msgresponse */
+#define MC_CMD_TABLE_LIST_OUT_LENMIN 4
+#define MC_CMD_TABLE_LIST_OUT_LENMAX 252
+#define MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_LIST_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(len) (((len)-4)/4)
+/* The total number of tables. */
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_OFST 0
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_LEN 4
+/* A sequence of table identifiers. If all N_TABLES items do not fit, further
+ * items can be obtained by repeating the call with a non-zero
+ * FIRST_TABLE_ID_INDEX.
+ */
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_OFST 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_LEN 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MINNUM 0
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM 62
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM_MCDI2 254
+/* Enum values, see field(s): */
+/* TABLE_ID */
+
+
+/***********************************/
+/* MC_CMD_TABLE_DESCRIPTOR
+ * Request the table descriptor for a particular table. This describes
+ * properties of the table and the format of the key and response. May return
+ * EINVAL for unknown table ID.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR 0x1ca
+#undef MC_CMD_0x1ca_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ca_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_LEN 8
+/* Identifier for this field. */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Index of the first item to be returned in the FIELDS sequence. (Set to 0 for
+ * the first call; further calls are only required if the whole sequence does
+ * not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_LEN 4
+
+/* MC_CMD_TABLE_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_NUM(len) (((len)-20)/8)
+/* Maximum number of entries in this table. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_LEN 4
+/* The type of table. (This is really just informational; the important
+ * properties of a table that affect programming can be deduced from other
+ * items in the table or field descriptor.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_LEN 2
+/* enum: Direct table (essentially just an array). Behaves like a BCAM for
+ * programming purposes, where the fact that the key is actually used as an
+ * array index is really just an implementation detail.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_DIRECT 0x1
+/* enum: BCAM (binary CAM) table: exact match on all key fields." */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_BCAM 0x2
+/* enum: TCAM (ternary CAM) table: matches fields with a mask. Each entry may
+ * have its own different mask.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_TCAM 0x3
+/* enum: STCAM (semi-TCAM) table: like a TCAM but entries shared a limited
+ * number of unique masks.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_STCAM 0x4
+/* Width of key (and corresponding mask, for TCAM or STCAM) in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_OFST 6
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_LEN 2
+/* Width of response in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_LEN 2
+/* The total number of fields in the key. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_OFST 10
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_LEN 2
+/* The total number of fields in the response. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_OFST 12
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_LEN 2
+/* Number of priorities for STCAM or TCAM; otherwise 0. The priority of a table
+ * entry (relevant when more than one masked entry matches) ranges from
+ * 0=highest to N_PRIORITIES-1=lowest.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_OFST 14
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_LEN 2
+/* Maximum number of masks for STCAM; otherwise 0. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_OFST 16
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_LEN 2
+/* Flags. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_LEN 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_LBN 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_WIDTH 1
+/* Access scheme version code, allowing the method of accessing table entries
+ * to change semantics in future. A client which does not understand the value
+ * of this field should assume that it cannot program this table. Currently
+ * always set to 0 indicating the original MC_CMD_TABLE_INSERT/UPDATE/DELETE
+ * semantics.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_OFST 19
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_LEN 1
+/* A sequence of TABLE_FIELD_DESCR structures: N_KEY_FIELDS items describing
+ * the key, followed by N_RESP_FIELDS items describing the response. If all
+ * N_KEY_FIELDS+N_RESP_FIELDS items do not fit, further items can be obtained
+ * by repeating the call with a non-zero FIRST_FIELDS_INDEX.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LEN 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LBN 160
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_OFST 24
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LBN 192
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MINNUM 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM 29
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_TABLE_INSERT
+ * Insert a new entry into a table. The entry must not currently exist. May
+ * return EINVAL for unknown table ID or other bad request parameters, EEXIST
+ * if the entry already exists, ENOSPC if there is no space or EPERM if the
+ * operation is not permitted. In case of an error, the additional MCDI error
+ * argument field returns the raw error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_INSERT 0x1cd
+#undef MC_CMD_0x1cd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_INSERT_IN msgrequest */
+#define MC_CMD_TABLE_INSERT_IN_LENMIN 16
+#define MC_CMD_TABLE_INSERT_IN_LENMAX 252
+#define MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_INSERT_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_INSERT_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_INSERT_IN_DATA_OFST 12
+#define MC_CMD_TABLE_INSERT_IN_DATA_LEN 4
+#define MC_CMD_TABLE_INSERT_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_INSERT_OUT msgresponse */
+#define MC_CMD_TABLE_INSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_UPDATE
+ * Update an existing entry in a table with a new response value. May return
+ * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
+ * entry does not already exist, or EPERM if the operation is not permitted. In
+ * case of an error, the additional MCDI error argument field returns the raw
+ * error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_UPDATE 0x1ce
+#undef MC_CMD_0x1ce_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_UPDATE_IN msgrequest */
+#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
+#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_DELETE
+ * Delete an existing entry in a table. May return EINVAL for unknown table ID
+ * or other bad request parameters, ENOENT if the entry does not exist, or
+ * EPERM if the operation is not permitted. In case of an error, the additional
+ * MCDI error argument field returns the raw error code from the underlying CAM
+ * driver.
+ */
+#define MC_CMD_TABLE_DELETE 0x1cf
+#undef MC_CMD_0x1cf_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DELETE_IN msgrequest */
+#define MC_CMD_TABLE_DELETE_IN_LENMIN 16
+#define MC_CMD_TABLE_DELETE_IN_LENMAX 252
+#define MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DELETE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_DELETE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_DELETE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_DELETE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_DELETE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_DELETE_OUT msgresponse */
+#define MC_CMD_TABLE_DELETE_OUT_LEN 0
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
new file mode 100644
index 000000000000..ff6d80c8e486
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2022 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef MCDI_PCOL_MAE_H
+#define MCDI_PCOL_MAE_H
+/* MCDI definitions for Match-Action Engine functionality, that are
+ * missing from the main mcdi_pcol.h
+ */
+
+/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
+ * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
+
+#endif /* MCDI_PCOL_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 94c6a345c0b1..ad4694fa3dda 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -20,7 +20,7 @@
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
@@ -46,7 +46,7 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
static int efx_mcdi_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cc15ee8812d9..2e9ba0cfe848 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -178,6 +178,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
+#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -477,6 +478,8 @@ enum efx_sync_events_state {
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
+ * not recognised
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -539,6 +542,7 @@ struct efx_channel {
unsigned int n_rx_xdp_bad_drops;
unsigned int n_rx_xdp_tx;
unsigned int n_rx_xdp_redirect;
+ unsigned int n_rx_mport_bad;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -612,11 +616,6 @@ extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
-extern const char *const efx_reset_type_names[];
-extern const unsigned int efx_reset_type_max;
-#define RESET_TYPE(type) \
- STRING_TABLE_LOOKUP(type, efx_reset_type)
-
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
@@ -627,12 +626,55 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
- STATE_UNINIT = 0, /* device being probed/removed or is frozen */
- STATE_READY = 1, /* hardware ready and netdev registered */
- STATE_DISABLED = 2, /* device disabled due to hardware errors */
- STATE_RECOVERY = 3, /* device recovering from PCI error */
+ STATE_UNINIT = 0, /* device being probed/removed */
+ STATE_PROBED, /* hardware probed */
+ STATE_NET_DOWN, /* netdev registered */
+ STATE_NET_UP, /* ready for traffic */
+ STATE_DISABLED, /* device disabled due to hardware errors */
+
+ STATE_RECOVERY = 0x100,/* recovering from PCI error */
+ STATE_FROZEN = 0x200, /* frozen by power management */
};
+static inline bool efx_net_active(enum nic_state state)
+{
+ return state == STATE_NET_DOWN || state == STATE_NET_UP;
+}
+
+static inline bool efx_frozen(enum nic_state state)
+{
+ return state & STATE_FROZEN;
+}
+
+static inline bool efx_recovering(enum nic_state state)
+{
+ return state & STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_freeze(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_FROZEN;
+}
+
+static inline enum nic_state efx_thaw(enum nic_state state)
+{
+ WARN_ON(!efx_frozen(state));
+ return state & ~STATE_FROZEN;
+}
+
+static inline enum nic_state efx_recover(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_recovered(enum nic_state state)
+{
+ WARN_ON(!efx_recovering(state));
+ return state & ~STATE_RECOVERY;
+}
+
/* Forward declaration */
struct efx_nic;
@@ -813,6 +855,7 @@ enum efx_xdp_tx_queues_mode {
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irqs_hooked: Channel interrupts are hooked
+ * @log_tc_errs: Error logging for TC filter insertion is enabled
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -928,12 +971,15 @@ enum efx_xdp_tx_queues_mode {
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
+ * @vf_reps_lock: Protects vf_reps list
+ * @vf_reps: local VF reps
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
+ * @tc: state for TC offload (EF100).
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -972,6 +1018,7 @@ struct efx_nic {
unsigned int timer_max_ns;
bool irq_rx_adaptive;
bool irqs_hooked;
+ bool log_tc_errs;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
@@ -1107,6 +1154,8 @@ struct efx_nic {
unsigned vf_init_count;
unsigned vi_scale;
#endif
+ spinlock_t vf_reps_lock;
+ struct list_head vf_reps;
struct efx_ptp_data *ptp_data;
bool ptp_warned;
@@ -1115,6 +1164,7 @@ struct efx_nic {
bool xdp_rxq_info_failed;
struct notifier_block netdev_notifier;
+ struct efx_tc_state *tc;
unsigned int mem_bar;
u32 reg_base;
@@ -1128,6 +1178,24 @@ struct efx_nic {
atomic_t n_rx_noskb_drops;
};
+/**
+ * struct efx_probe_data - State after hardware probe
+ * @pci_dev: The PCI device
+ * @efx: Efx NIC details
+ */
+struct efx_probe_data {
+ struct pci_dev *pci_dev;
+ struct efx_nic efx;
+};
+
+static inline struct efx_nic *efx_netdev_priv(struct net_device *dev)
+{
+ struct efx_probe_data **probe_ptr = netdev_priv(dev);
+ struct efx_probe_data *probe_data = *probe_ptr;
+
+ return &probe_data->efx;
+}
+
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
@@ -1282,6 +1350,7 @@ struct efx_udp_tunnel {
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @print_additional_fwver: Dump NIC-specific additional FW version info
* @sensor_event: Handle a sensor event from MCDI
+ * @rx_recycle_ring_size: Size of the RX recycle ring
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1460,6 +1529,7 @@ struct efx_nic_type {
size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
size_t len);
void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
+ unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1533,7 +1603,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return true;
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
}
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 22fbb0ae77fb..63e2394382bb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -465,7 +465,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 5c2fe3ce3f4d..251868235ae4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -301,10 +301,6 @@ struct efx_ef10_nic_data {
int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
-int efx_init_sriov(void);
-void efx_fini_sriov(void);
-
-extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index b9cafe9cd568..0cef35c0c559 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -195,6 +195,11 @@ static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
efx->type->sensor_event(efx, ev);
}
+static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
+{
+ return efx->type->rx_recycle_ring_size(efx);
+}
+
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index f0ef515e2ade..eaef4a15008a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -45,6 +45,7 @@
#include "farch_regs.h"
#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
+#include "efx_channels.h"
/* Maximum number of events expected to make up a PTP event */
#define MAX_EVENT_FRAGS 3
@@ -117,9 +118,14 @@
#define PTP_MIN_LENGTH 63
-#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_RXFILTERS_LEN 5
+
+#define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */
+#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0x01, 0x81} /* ff0e::181 */
#define PTP_EVENT_PORT 319
#define PTP_GENERAL_PORT 320
+#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */
/* Annoyingly the format of the version numbers are different between
* versions 1 and 2 so it isn't possible to simply look for 1 or 2.
@@ -223,9 +229,8 @@ struct efx_ptp_timeset {
* @work: Work task
* @reset_required: A serious error has occurred and the PTP task needs to be
* reset (disable, enable).
- * @rxfilter_event: Receive filter when operating
- * @rxfilter_general: Receive filter when operating
- * @rxfilter_installed: Receive filter installed
+ * @rxfilters: Receive filters when operating
+ * @rxfilters_count: Num of installed rxfilters, should be == PTP_RXFILTERS_LEN
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
@@ -294,9 +299,8 @@ struct efx_ptp_data {
struct workqueue_struct *workwq;
struct work_struct work;
bool reset_required;
- u32 rxfilter_event;
- u32 rxfilter_general;
- bool rxfilter_installed;
+ u32 rxfilters[PTP_RXFILTERS_LEN];
+ size_t rxfilters_count;
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
@@ -541,6 +545,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
return efx->ptp_data ? efx->ptp_data->channel : NULL;
}
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
+{
+ if (efx->ptp_data)
+ efx->ptp_data->channel = channel;
+}
+
static u32 last_sync_timestamp_major(struct efx_nic *efx)
{
struct efx_channel *channel = efx_ptp_channel(efx);
@@ -1093,7 +1103,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
+ /* This code invokes normal driver TX code which is always
+ * protected from softirqs when called from generic TX code,
+ * which in turn disables preemption. Look at __dev_queue_xmit
+ * which uses rcu_read_lock_bh disabling preemption for RCU
+ * plus disabling softirqs. We do not need RCU reader
+ * protection here.
+ *
+ * Although it is theoretically safe for current PTP TX/RX code
+ * running without disabling softirqs, there are three good
+ * reasond for doing so:
+ *
+ * 1) The code invoked is mainly implemented for non-PTP
+ * packets and it is always executed with softirqs
+ * disabled.
+ * 2) This being a single PTP packet, better to not
+ * interrupt its processing by softirqs which can lead
+ * to high latencies.
+ * 3) netdev_xmit_more checks preemption is disabled and
+ * triggers a BUG_ON if not.
+ */
+ local_bh_disable();
efx_enqueue_skb(tx_queue, skb);
+ local_bh_enable();
} else {
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
dev_kfree_skb_any(skb);
@@ -1261,61 +1293,108 @@ static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- if (ptp->rxfilter_installed) {
+ while (ptp->rxfilters_count) {
+ ptp->rxfilters_count--;
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
+ ptp->rxfilters[ptp->rxfilters_count]);
}
}
-static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+static void efx_ptp_init_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
+{
+ struct efx_channel *channel = efx->ptp_data->channel;
+ struct efx_rx_queue *queue = efx_channel_get_rx_queue(channel);
+
+ efx_filter_init_rx(rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(queue));
+}
+
+static int efx_ptp_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+
+ int rc = efx_filter_insert_filter(efx, rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilters[ptp->rxfilters_count] = rc;
+ ptp->rxfilters_count++;
+ return 0;
+}
+
+static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, u16 port)
+{
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, htonl(PTP_ADDR_IPV4),
+ htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, u16 port)
+{
+ const struct in6_addr addr = {{PTP_ADDR_IPV6}};
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv6_local(&rxfilter, IPPROTO_UDP, &addr, htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_eth_filter(struct efx_nic *efx)
+{
+ const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER;
struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_eth_local(&rxfilter, EFX_FILTER_VID_UNSPEC, addr);
+ rxfilter.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ rxfilter.ether_type = htons(ETH_P_1588);
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
int rc;
- if (!ptp->channel || ptp->rxfilter_installed)
+ if (!ptp->channel || ptp->rxfilters_count)
return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
*/
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_EVENT_PORT));
- if (rc != 0)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_EVENT_PORT);
if (rc < 0)
- return rc;
- ptp->rxfilter_event = rc;
-
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_GENERAL_PORT));
- if (rc != 0)
goto fail;
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_GENERAL_PORT);
if (rc < 0)
goto fail;
- ptp->rxfilter_general = rc;
- ptp->rxfilter_installed = true;
+ /* if the NIC supports hw timestamps by the MAC, we can support
+ * PTP over IPv6 and Ethernet
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_EVENT_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_GENERAL_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_eth_filter(efx);
+ if (rc < 0)
+ goto fail;
+ }
+
return 0;
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
@@ -1443,6 +1522,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
int rc = 0;
unsigned int pos;
+ if (efx->ptp_data) {
+ efx->ptp_data->channel = channel;
+ return 0;
+ }
+
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
@@ -2176,7 +2260,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.pre_probe = efx_ptp_probe_channel,
.post_remove = efx_ptp_remove_channel,
.get_name = efx_ptp_get_channel_name,
- /* no copy operation; there is no need to reallocate this channel */
+ .copy = efx_copy_channel,
.receive_skb = efx_ptp_rx,
.want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 9855e8c9e544..7b1ef7002b3f 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -16,6 +16,7 @@ struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 633ca77a26fd..9220afeddee8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -23,13 +23,6 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* RX maximum head room required.
*
* This must be at least 1 to prevent overflow, plus one packet-worth
@@ -141,16 +134,7 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
unsigned int bufs_in_recycle_ring, page_ring_size;
struct efx_nic *efx = rx_queue->efx;
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
+ bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
@@ -166,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
int i;
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
/* Unmap and release the pages in the recycle ring. Remove the ring. */
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
struct page *page = rx_queue->page_ring[i];
@@ -673,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
return false;
- return memcmp(&left->outer_vid, &right->outer_vid,
+ return memcmp(&left->vport_id, &right->vport_id,
sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
+ offsetof(struct efx_filter_spec, vport_id)) == 0;
}
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
+ return jhash2((const u32 *)&spec->vport_id,
(sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ offsetof(struct efx_filter_spec, vport_id)) / 4,
0);
}
@@ -806,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
int rc;
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
goto out_unlock;
@@ -843,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
}
#endif
out_unlock:
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -859,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
channel->rps_flow_id = NULL;
}
#endif
- down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
}
#ifdef CONFIG_RFS_ACCEL
@@ -870,7 +853,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
- struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_nic *efx = efx_netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
int slot_idx = req - efx->rps_slot;
struct efx_arfs_rule *rule;
@@ -955,7 +938,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
struct efx_arfs_rule *rule;
struct flow_keys fk;
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index 207ccd8ba062..fbd2769307f9 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -18,6 +18,12 @@
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_10G 256
+
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig
new file mode 100644
index 000000000000..c6ea09769873
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/Kconfig
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SFC_SIENA
+ tristate "Solarflare SFC9000 support"
+ depends on PCI
+ depends on PTP_1588_CLOCK
+ select MDIO
+ select CRC32
+ help
+ This driver supports 10-gigabit Ethernet cards based on
+ the Solarflare SFC9000 controller.
+
+ To compile this driver as a module, choose M here. The module
+ will be called sfc-siena.
+config SFC_SIENA_MTD
+ bool "Solarflare SFC9000-family MTD support"
+ depends on SFC_SIENA && MTD && !(SFC_SIENA=y && MTD=m)
+ default y
+ help
+ This exposes the on-board flash and/or EEPROM as MTD devices
+ (e.g. /dev/mtd1). This is required to update the firmware or
+ the boot configuration under Linux.
+config SFC_SIENA_MCDI_MON
+ bool "Solarflare SFC9000-family hwmon support"
+ depends on SFC_SIENA && HWMON && !(SFC_SIENA=y && HWMON=m)
+ default y
+ help
+ This exposes the on-board firmware-managed sensors as a
+ hardware monitor device.
+config SFC_SIENA_SRIOV
+ bool "Solarflare SFC9000-family SR-IOV support"
+ depends on SFC_SIENA && PCI_IOV
+ default n
+ help
+ This enables support for the Single Root I/O Virtualization
+ features, allowing accelerated network performance in
+ virtualized environments.
+config SFC_SIENA_MCDI_LOGGING
+ bool "Solarflare SFC9000-family MCDI logging support"
+ depends on SFC_SIENA
+ default y
+ help
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device, or via module
+ parameter mcdi_logging_default.
diff --git a/drivers/net/ethernet/sfc/siena/Makefile b/drivers/net/ethernet/sfc/siena/Makefile
new file mode 100644
index 000000000000..f7384299667c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+sfc-siena-y += farch.o siena.o \
+ efx.o efx_common.o efx_channels.o nic.o \
+ tx.o tx_common.o rx.o rx_common.o \
+ selftest.o ethtool.o ethtool_common.o ptp.o \
+ mcdi.o mcdi_port.o mcdi_port_common.o \
+ mcdi_mon.o
+sfc-siena-$(CONFIG_SFC_SIENA_MTD) += mtd.o
+sfc-siena-$(CONFIG_SFC_SIENA_SRIOV) += siena_sriov.o
+
+obj-$(CONFIG_SFC_SIENA) += sfc-siena.o
diff --git a/drivers/net/ethernet/sfc/siena/bitfield.h b/drivers/net/ethernet/sfc/siena/bitfield.h
new file mode 100644
index 000000000000..1f981dfe4bdc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/bitfield.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_BITFIELD_H
+#define EFX_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and
+ * efx_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EFX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EFX_MASK64(width) \
+ ((width) == 64 ? ~((u64) 0) : \
+ (((((u64) 1) << (width))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits. Use
+ * EFX_MASK64 for higher width fields.
+ */
+#define EFX_MASK32(width) \
+ ((width) == 32 ? ~((u32) 0) : \
+ (((((u32) 1) << (width))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union efx_dword {
+ __le32 u32[1];
+} efx_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union efx_qword {
+ __le64 u64[1];
+ __le32 u32[2];
+ efx_dword_t dword[2];
+} efx_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union efx_oword {
+ __le64 u64[2];
+ efx_qword_t qword[2];
+ __le32 u32[4];
+ efx_dword_t dword[4];
+} efx_oword_t;
+
+/* Format string and value expanders for printk */
+#define EFX_DWORD_FMT "%08x"
+#define EFX_QWORD_FMT "%08x:%08x"
+#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EFX_DWORD_VAL(dword) \
+ ((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EFX_QWORD_VAL(qword) \
+ ((unsigned int) le32_to_cpu((qword).u32[1])), \
+ ((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EFX_OWORD_VAL(oword) \
+ ((unsigned int) le32_to_cpu((oword).u32[3])), \
+ ((unsigned int) le32_to_cpu((oword).u32[2])), \
+ ((unsigned int) le32_to_cpu((oword).u32[1])), \
+ ((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ * ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
+ ((low) > (max) || (high) < (min) ? 0 : \
+ (low) > (min) ? \
+ (native_element) >> ((low) - (min)) : \
+ (native_element) << ((min) - (low)))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EFX_EXTRACT_OWORD64(oword, low, high) \
+ ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+ EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD64(qword, low, high) \
+ (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+ EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_OWORD32(oword, low, high) \
+ ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD32(qword, low, high) \
+ ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_DWORD(dword, low, high) \
+ (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_OWORD_FIELD64(oword, field) \
+ EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD64(qword, field) \
+ EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_FIELD32(oword, field) \
+ EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD32(qword, field) \
+ EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_DWORD_FIELD(dword, field) \
+ EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_IS_ZERO64(oword) \
+ (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EFX_QWORD_IS_ZERO64(qword) \
+ (((qword).u64[0]) == (__force __le64) 0)
+
+#define EFX_OWORD_IS_ZERO32(oword) \
+ (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+ == (__force __le32) 0)
+
+#define EFX_QWORD_IS_ZERO32(qword) \
+ (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EFX_DWORD_IS_ZERO(dword) \
+ (((dword).u32[0]) == (__force __le32) 0)
+
+#define EFX_OWORD_IS_ALL_ONES64(oword) \
+ (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EFX_QWORD_IS_ALL_ONES64(qword) \
+ ((qword).u64[0] == ~((__force __le64) 0))
+
+#define EFX_OWORD_IS_ALL_ONES32(oword) \
+ (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+ == ~((__force __le32) 0))
+
+#define EFX_QWORD_IS_ALL_ONES32(qword) \
+ (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EFX_DWORD_IS_ALL_ONES(dword) \
+ ((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u64) (value)) << (low - min)) : \
+ (((u64) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u32) (value)) << (low - min)) : \
+ (((u32) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE(min, max, low, high, value) \
+ ((((max - min) >= 32) || ((high - low) >= 32)) ? \
+ EFX_INSERT_NATIVE64(min, max, low, high, value) : \
+ EFX_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
+ EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS_NATIVE(min, max, \
+ field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7, \
+ field8, value8, \
+ field9, value9, \
+ field10, value10, \
+ field11, value11, \
+ field12, value12, \
+ field13, value13, \
+ field14, value14, \
+ field15, value15, \
+ field16, value16, \
+ field17, value17, \
+ field18, value18, \
+ field19, value19) \
+ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
+
+#define EFX_INSERT_FIELDS64(...) \
+ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELDS32(...) \
+ cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_POPULATE_OWORD64(oword, ...) do { \
+ (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD64(qword, ...) do { \
+ (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_OWORD32(oword, ...) do { \
+ (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
+ (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD32(qword, ...) do { \
+ (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_DWORD(dword, ...) do { \
+ (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ } while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#else
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_18(oword, ...) \
+ EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_17(oword, ...) \
+ EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_16(oword, ...) \
+ EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_15(oword, ...) \
+ EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_14(oword, ...) \
+ EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_13(oword, ...) \
+ EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_12(oword, ...) \
+ EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_11(oword, ...) \
+ EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_10(oword, ...) \
+ EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_9(oword, ...) \
+ EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_8(oword, ...) \
+ EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_7(oword, ...) \
+ EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_6(oword, ...) \
+ EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_5(oword, ...) \
+ EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_4(oword, ...) \
+ EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_3(oword, ...) \
+ EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_2(oword, ...) \
+ EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_1(oword, ...) \
+ EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_OWORD(oword) \
+ EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_OWORD(oword) \
+ EFX_POPULATE_OWORD_4(oword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, \
+ EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_18(qword, ...) \
+ EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_17(qword, ...) \
+ EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_16(qword, ...) \
+ EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_15(qword, ...) \
+ EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_14(qword, ...) \
+ EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_13(qword, ...) \
+ EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_12(qword, ...) \
+ EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_11(qword, ...) \
+ EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_10(qword, ...) \
+ EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_9(qword, ...) \
+ EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_8(qword, ...) \
+ EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_7(qword, ...) \
+ EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_6(qword, ...) \
+ EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_5(qword, ...) \
+ EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_4(qword, ...) \
+ EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_3(qword, ...) \
+ EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_2(qword, ...) \
+ EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_1(qword, ...) \
+ EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_QWORD(qword) \
+ EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_QWORD(qword) \
+ EFX_POPULATE_QWORD_2(qword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_18(dword, ...) \
+ EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_17(dword, ...) \
+ EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_16(dword, ...) \
+ EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_15(dword, ...) \
+ EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_14(dword, ...) \
+ EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_13(dword, ...) \
+ EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_12(dword, ...) \
+ EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_11(dword, ...) \
+ EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_10(dword, ...) \
+ EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_9(dword, ...) \
+ EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_8(dword, ...) \
+ EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_7(dword, ...) \
+ EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_6(dword, ...) \
+ EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_5(dword, ...) \
+ EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_4(dword, ...) \
+ EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_3(dword, ...) \
+ EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_2(dword, ...) \
+ EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_1(dword, ...) \
+ EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ *
+ */
+#define EFX_INVERT_OWORD(oword) do { \
+ (oword).u64[0] = ~((oword).u64[0]); \
+ (oword).u64[1] = ~((oword).u64[1]); \
+ } while (0)
+
+#define EFX_AND_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
+ } while (0)
+
+#define EFX_AND_QWORD(qword, from, mask) \
+ (qword).u64[0] = (from).u64[0] & (mask).u64[0]
+
+#define EFX_OR_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
+ } while (0)
+
+#define EFX_INSERT64(min, max, low, high, value) \
+ cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INSERT32(min, max, low, high, value) \
+ cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INPLACE_MASK64(min, max, low, high) \
+ EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_INPLACE_MASK32(min, max, low, high) \
+ EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do { \
+ (oword).u64[0] = (((oword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ (oword).u64[1] = (((oword).u64[1] \
+ & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
+ | EFX_INSERT64(64, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD64(qword, low, high, value) do { \
+ (qword).u64[0] = (((qword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD32(oword, low, high, value) do { \
+ (oword).u32[0] = (((oword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (oword).u32[1] = (((oword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ (oword).u32[2] = (((oword).u32[2] \
+ & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
+ | EFX_INSERT32(64, 95, low, high, value)); \
+ (oword).u32[3] = (((oword).u32[3] \
+ & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
+ | EFX_INSERT32(96, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD32(qword, low, high, value) do { \
+ (qword).u32[0] = (((qword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (qword).u32[1] = (((qword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_DWORD32(dword, low, high, value) do { \
+ (dword).u32[0] = (((dword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD_FIELD64(oword, field, value) \
+ EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value) \
+ EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value) \
+ EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value) \
+ EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value) \
+ EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+
+
+#if BITS_PER_LONG == 64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#else
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#endif
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
+#define EFX_DMA_TYPE_WIDTH(width) \
+ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d) \
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+ cpu_to_le32(c), cpu_to_le32(d) } }
+
+#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
new file mode 100644
index 000000000000..60e5b7c8ccf9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -0,0 +1,1337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/filter.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include "net_driver.h"
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
+#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "selftest.h"
+#include "sriov.h"
+#ifdef CONFIG_SFC_SIENA_SRIOV
+#include "siena_sriov.h"
+#endif
+
+#include "mcdi_port_common.h"
+#include "mcdi_pcol.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+module_param_named(interrupt_mode, efx_siena_interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+module_param_named(rss_cpus, efx_siena_rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+bool efx_siena_separate_tx_channels;
+module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels,
+ bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
+ "Use separate channels for TX and RX");
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full. A queue is
+ * restarted when it drops below half full. The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ * 512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+static bool phy_flash_cfg;
+module_param(phy_flash_cfg, bool, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static void efx_remove_port(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_fini_port(struct efx_nic *efx);
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
+ /* Connect up MAC/PHY operations table */
+ rc = efx->type->probe_port(efx);
+ if (rc)
+ return rc;
+
+ /* Initialise MAC address to permanent address */
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
+
+ return 0;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+ mutex_lock(&efx->mac_lock);
+
+ efx->port_initialized = true;
+
+ /* Ensure the PHY advertises the correct flow control settings */
+ rc = efx_siena_mcdi_port_reconfigure(efx);
+ if (rc && rc != -EPERM)
+ goto fail;
+
+ mutex_unlock(&efx->mac_lock);
+ return 0;
+
+fail:
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+ if (!efx->port_initialized)
+ return;
+
+ efx->port_initialized = false;
+
+ efx->link_state.up = false;
+ efx_siena_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+ efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+ /* Carry out hardware-type specific initialisation */
+ rc = efx->type->probe(efx);
+ if (rc)
+ return rc;
+
+ do {
+ if (!efx->max_channels || !efx->max_tx_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources to allocate"
+ " any channels\n");
+ rc = -ENOSPC;
+ goto fail1;
+ }
+
+ /* Determine the number of channels and queues by trying
+ * to hook in MSI-X interrupts.
+ */
+ rc = efx_siena_probe_interrupts(efx);
+ if (rc)
+ goto fail1;
+
+ rc = efx_siena_set_channels(efx);
+ if (rc)
+ goto fail1;
+
+ /* dimension_resources can fail with EAGAIN */
+ rc = efx->type->dimension_resources(efx);
+ if (rc != 0 && rc != -EAGAIN)
+ goto fail2;
+
+ if (rc == -EAGAIN)
+ /* try again with new max_channels */
+ efx_siena_remove_interrupts(efx);
+
+ } while (rc == -EAGAIN);
+
+ if (efx->n_channels > 1)
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+ efx_siena_set_default_rx_indir_table(efx, &efx->rss_context);
+
+ /* Initialise the interrupt moderation settings */
+ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
+ efx_siena_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec,
+ true, true);
+
+ return 0;
+
+fail2:
+ efx_siena_remove_interrupts(efx);
+fail1:
+ efx->type->remove(efx);
+ return rc;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+ efx_siena_remove_interrupts(efx);
+ efx->type->remove(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_probe_nic(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+ goto fail1;
+ }
+
+ rc = efx_probe_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+ goto fail2;
+ }
+
+ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ rc = efx->type->vswitching_probe(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to setup vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ rc = efx_siena_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail4;
+ }
+
+ rc = efx_siena_probe_channels(efx);
+ if (rc)
+ goto fail5;
+
+ return 0;
+
+ fail5:
+ efx_siena_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
+ fail3:
+ efx_remove_port(efx);
+ fail2:
+ efx_remove_nic(efx);
+ fail1:
+ return rc;
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+ rtnl_lock();
+ efx_xdp_setup_prog(efx, NULL);
+ rtnl_unlock();
+
+ efx_siena_remove_channels(efx);
+ efx_siena_remove_filters(efx);
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
+ efx_remove_port(efx);
+ efx_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
+{
+ if (usecs == 0)
+ return 0;
+ if (usecs * 1000 < efx->timer_quantum_ns)
+ return 1; /* never round down to 0 */
+ return usecs * 1000 / efx->timer_quantum_ns;
+}
+
+/* Set interrupt moderation parameters */
+int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
+{
+ struct efx_channel *channel;
+ unsigned int timer_max_us;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ timer_max_us = efx->timer_max_ns / 1000;
+
+ if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
+ return -EINVAL;
+
+ if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation_us = rx_usecs;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel))
+ channel->irq_moderation_us = rx_usecs;
+ else if (efx_channel_has_tx_queues(channel))
+ channel->irq_moderation_us = tx_usecs;
+ else if (efx_channel_is_xdp_tx(channel))
+ channel->irq_moderation_us = tx_usecs;
+ }
+
+ return 0;
+}
+
+void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = efx->irq_rx_moderation_us;
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0) {
+ *tx_usecs = *rx_usecs;
+ } else {
+ struct efx_channel *tx_channel;
+
+ tx_channel = efx->channel[efx->tx_channel_offset];
+ *tx_usecs = tx_channel->irq_moderation_us;
+ }
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (cmd == SIOCSHWTSTAMP)
+ return efx_siena_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_siena_ptp_get_ts_config(efx, ifr);
+
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+static int efx_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+ if (efx_siena_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL))
+ return -EIO;
+
+ /* Notify the kernel of the link state polled during driver load,
+ * before the monitor starts running */
+ efx_siena_link_status_changed(efx);
+
+ efx_siena_start_all(efx);
+ if (efx->state == STATE_DISABLED || efx->reset_pending)
+ netif_device_detach(efx->net_dev);
+ efx_siena_selftest_async_start(efx);
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int efx_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ /* Stop the device and flush all the channels */
+ efx_siena_stop_all(efx);
+
+ return 0;
+}
+
+static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_add_vid)
+ return efx->type->vlan_rx_add_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_kill_vid)
+ return efx->type->vlan_rx_kill_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static const struct net_device_ops efx_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_siena_net_stats,
+ .ndo_tx_timeout = efx_siena_watchdog,
+ .ndo_start_xmit = efx_siena_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_siena_change_mtu,
+ .ndo_set_mac_address = efx_siena_set_mac_address,
+ .ndo_set_rx_mode = efx_siena_set_rx_mode,
+ .ndo_set_features = efx_siena_set_features,
+ .ndo_features_check = efx_siena_features_check,
+ .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+#endif
+ .ndo_get_phys_port_id = efx_siena_get_phys_port_id,
+ .ndo_get_phys_port_name = efx_siena_get_phys_port_name,
+ .ndo_setup_tc = efx_siena_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_siena_filter_rfs,
+#endif
+ .ndo_xdp_xmit = efx_xdp_xmit,
+ .ndo_bpf = efx_xdp
+};
+
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ if (efx->xdp_rxq_info_failed) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to bind XDP program due to previous failure of rxq_info\n");
+ return -EINVAL;
+ }
+
+ if (prog && efx->net_dev->mtu > efx_siena_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to configure XDP with MTU of %d (max: %d)\n",
+ efx->net_dev->mtu, efx_siena_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ old_prog = rtnl_dereference(efx->xdp_prog);
+ rcu_assign_pointer(efx->xdp_prog, prog);
+ /* Release the reference that was originally passed by the caller. */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return efx_xdp_setup_prog(efx, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return efx_siena_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
+static void efx_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ efx_siena_mtd_rename(efx);
+ efx_siena_set_channel_names(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+ if ((net_dev->netdev_ops == &efx_netdev_ops) &&
+ event == NETDEV_CHANGENAME)
+ efx_update_name(netdev_priv(net_dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+ .notifier_call = efx_netdev_event,
+};
+
+static ssize_t phy_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR_RO(phy_type);
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ net_dev->ethtool_ops = &efx_siena_ethtool_ops;
+ netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+
+ rtnl_lock();
+
+ /* Enable resets to be scheduled and check whether any were
+ * already requested. If so, the NIC is probably hosed so we
+ * abort.
+ */
+ efx->state = STATE_READY;
+ smp_mb(); /* ensure we change state before checking reset_pending */
+ if (efx->reset_pending) {
+ pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
+ rc = -EIO;
+ goto fail_locked;
+ }
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ efx_update_name(efx);
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_siena_init_tx_queue_core_txq(tx_queue);
+ }
+
+ efx_associate(efx);
+
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
+
+ efx_siena_init_mcdi_logging(efx);
+
+ return 0;
+
+fail_registered:
+ rtnl_lock();
+ efx_dissociate(efx);
+ unregister_netdevice(net_dev);
+fail_locked:
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+ if (!efx->net_dev)
+ return;
+
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+ if (efx_dev_registered(efx)) {
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ efx_siena_fini_mcdi_logging(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static const struct pci_device_id efx_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
+ .driver_data = (unsigned long)&siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
+ .driver_data = (unsigned long)&siena_a0_nic_type},
+ {0} /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats)
+{
+ u64 n_rx_nodesc_trunc = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+ /* Flush reset_work. It can no longer be scheduled since we
+ * are not READY.
+ */
+ BUG_ON(efx->state == STATE_READY);
+ efx_siena_flush_reset_workqueue(efx);
+
+ efx_siena_disable_interrupts(efx);
+ efx_siena_clear_interrupt_affinity(efx);
+ efx_siena_fini_interrupt(efx);
+ efx_fini_port(efx);
+ efx->type->fini(efx);
+ efx_siena_fini_napi(efx);
+ efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ /* Mark the NIC as fini, then stop the interface */
+ rtnl_lock();
+ efx_dissociate(efx);
+ dev_close(efx->net_dev);
+ efx_siena_disable_interrupts(efx);
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+
+ if (efx->type->sriov_fini)
+ efx->type->sriov_fini(efx);
+
+ efx_unregister_netdev(efx);
+
+ efx_siena_mtd_remove(efx);
+
+ efx_pci_remove_main(efx);
+
+ efx_siena_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ efx_siena_fini_struct(efx);
+ free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
+};
+
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC.
+ */
+static void efx_probe_vpd_strings(struct efx_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
+
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
+ return;
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
+
+ kfree(vpd_data);
+}
+
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Do start-of-day initialisation */
+ rc = efx_probe_all(efx);
+ if (rc)
+ goto fail1;
+
+ efx_siena_init_napi(efx);
+
+ down_write(&efx->filter_sem);
+ rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
+ if (rc) {
+ pci_err(efx->pci_dev, "failed to initialise NIC\n");
+ goto fail3;
+ }
+
+ rc = efx_init_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
+ goto fail4;
+ }
+
+ rc = efx_siena_init_interrupt(efx);
+ if (rc)
+ goto fail5;
+
+ efx_siena_set_interrupt_affinity(efx);
+ rc = efx_siena_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
+
+ return 0;
+
+ fail6:
+ efx_siena_clear_interrupt_affinity(efx);
+ efx_siena_fini_interrupt(efx);
+ fail5:
+ efx_fini_port(efx);
+ fail4:
+ efx->type->fini(efx);
+ fail3:
+ efx_siena_fini_napi(efx);
+ efx_remove_all(efx);
+ fail1:
+ return rc;
+}
+
+static int efx_pci_probe_post_io(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ int rc = efx_pci_probe_main(efx);
+
+ if (rc)
+ return rc;
+
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n",
+ rc);
+ }
+
+ /* Determine netdevice features */
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ net_dev->features |= NETIF_F_TSO6;
+ /* Check whether device supports TSO */
+ if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+ net_dev->features &= ~NETIF_F_ALL_TSO;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+
+ net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
+
+ /* Disable receiving frames with bad FCS, by default. */
+ net_dev->features &= ~NETIF_F_RXALL;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
+ rc = efx_register_netdev(efx);
+ if (!rc)
+ return 0;
+
+ efx_pci_remove_main(efx);
+ return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically). It sets up PCI mappings, resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine. It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int efx_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct efx_nic_type *) entry->driver_data;
+ efx->fixed_features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_siena_init_struct(efx, pci_dev, net_dev);
+ if (rc)
+ goto fail1;
+
+ pci_info(pci_dev, "Solarflare NIC detected\n");
+
+ if (!efx->type->is_vf)
+ efx_probe_vpd_strings(efx);
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_siena_init_io(efx, efx->type->mem_bar(efx),
+ efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
+ if (rc)
+ goto fail2;
+
+ rc = efx_pci_probe_post_io(efx);
+ if (rc) {
+ /* On failure, retry once immediately.
+ * If we aborted probe due to a scheduled reset, dismiss it.
+ */
+ efx->reset_pending = 0;
+ rc = efx_pci_probe_post_io(efx);
+ if (rc) {
+ /* On another failure, retry once more
+ * after a 50-305ms delay.
+ */
+ unsigned char r;
+
+ get_random_bytes(&r, 1);
+ msleep((unsigned int)r + 50);
+ efx->reset_pending = 0;
+ rc = efx_pci_probe_post_io(efx);
+ }
+ }
+ if (rc)
+ goto fail3;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ /* Try to create MTDs, but allow this to fail */
+ rtnl_lock();
+ rc = efx_mtd_probe(efx);
+ rtnl_unlock();
+ if (rc && rc != -EPERM)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to create MTDs (%d)\n", rc);
+
+ (void)pci_enable_pcie_error_reporting(pci_dev);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+ fail3:
+ efx_siena_fini_io(efx);
+ fail2:
+ efx_siena_fini_struct(efx);
+ fail1:
+ WARN_ON(rc > 0);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+ free_netdev(net_dev);
+ return rc;
+}
+
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SIENA_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(dev);
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ } else
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int efx_pm_freeze(struct device *dev)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_UNINIT;
+
+ efx_device_detach_sync(efx);
+
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+ int rc;
+ struct efx_nic *efx = dev_get_drvdata(dev);
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ rc = efx_siena_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ mutex_lock(&efx->mac_lock);
+ efx_siena_mcdi_port_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_siena_start_all(efx);
+
+ efx_device_attach_if_not_resetting(efx);
+
+ efx->state = STATE_READY;
+
+ efx->type->resume_wol(efx);
+ }
+
+ rtnl_unlock();
+
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ efx_siena_queue_reset_work(efx);
+
+ return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ efx->type->fini(efx);
+
+ efx->reset_pending = 0;
+
+ pci_save_state(pci_dev);
+ return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ int rc;
+
+ rc = pci_set_power_state(pci_dev, PCI_D0);
+ if (rc)
+ return rc;
+ pci_restore_state(pci_dev);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+ pci_set_master(efx->pci_dev);
+ rc = efx->type->reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ return rc;
+ down_write(&efx->filter_sem);
+ rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ return rc;
+ rc = efx_pm_thaw(dev);
+ return rc;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+ int rc;
+
+ efx_pm_freeze(dev);
+ rc = efx_pm_poweroff(dev);
+ if (rc)
+ efx_pm_resume(dev);
+ return rc;
+}
+
+static const struct dev_pm_ops efx_pm_ops = {
+ .suspend = efx_pm_suspend,
+ .resume = efx_pm_resume,
+ .freeze = efx_pm_freeze,
+ .thaw = efx_pm_thaw,
+ .poweroff = efx_pm_poweroff,
+ .restore = efx_pm_resume,
+};
+
+static struct pci_driver efx_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = efx_pci_table,
+ .probe = efx_pci_probe,
+ .remove = efx_pci_remove,
+ .driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
+ .err_handler = &efx_siena_err_handlers,
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ .sriov_configure = efx_pci_sriov_configure,
+#endif
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+static int __init efx_init_module(void)
+{
+ int rc;
+
+ pr_info("Solarflare Siena driver\n");
+
+ rc = register_netdevice_notifier(&efx_netdev_notifier);
+ if (rc)
+ goto err_notifier;
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ rc = efx_init_sriov();
+ if (rc)
+ goto err_sriov;
+#endif
+
+ rc = efx_siena_create_reset_workqueue();
+ if (rc)
+ goto err_reset;
+
+ rc = pci_register_driver(&efx_pci_driver);
+ if (rc < 0)
+ goto err_pci;
+
+ return 0;
+
+ err_pci:
+ efx_siena_destroy_reset_workqueue();
+ err_reset:
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ efx_fini_sriov();
+ err_sriov:
+#endif
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+ return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+ pr_info("Solarflare Siena driver unloading\n");
+
+ pci_unregister_driver(&efx_pci_driver);
+ efx_siena_destroy_reset_workqueue();
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ efx_fini_sriov();
+#endif
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+ "Michael Brown <mbrown@fensystems.co.uk>");
+MODULE_DESCRIPTION("Solarflare Siena network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h
new file mode 100644
index 000000000000..27d1d3f19cae
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_EFX_H
+#define EFX_EFX_H
+
+#include <linux/indirect_call_wrapper.h>
+#include "net_driver.h"
+#include "filter.h"
+
+/* TX */
+void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb);
+static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
+ __efx_siena_enqueue_skb, tx_queue, skb);
+}
+int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data);
+
+/* RX */
+void __efx_siena_rx_packet(struct efx_channel *channel);
+void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __efx_siena_rx_packet(channel);
+}
+
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT 128U
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx))
+
+/* All EF10 architecture NICs steal one bit of the DMAQ size for various
+ * other purposes when counting TxQ entries, so we halve the queue size.
+ */
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
+static inline bool efx_rss_enabled(struct efx_nic *efx)
+{
+ return efx->rss_spread > 1;
+}
+
+/* Filters */
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
+
+/* RSS contexts */
+static inline bool efx_rss_active(struct efx_rss_context *ctx)
+{
+ return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
+}
+
+/* Ethtool support */
+extern const struct ethtool_ops efx_siena_ethtool_ops;
+
+/* Global */
+unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
+int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
+
+/* Update the generic software stats in the passed stats array */
+void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats);
+
+/* MTD */
+#ifdef CONFIG_SFC_SIENA_MTD
+int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
+void efx_siena_mtd_rename(struct efx_nic *efx);
+void efx_siena_mtd_remove(struct efx_nic *efx);
+#else
+static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_siena_mtd_rename(struct efx_nic *efx) {}
+static inline void efx_siena_mtd_remove(struct efx_nic *efx) {}
+#endif
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+#endif
+
+static inline void efx_device_detach_sync(struct efx_nic *efx)
+{
+ struct net_device *dev = efx->net_dev;
+
+ /* Lock/freeze all TX queues so that we can be sure the
+ * TX scheduler is stopped when we're done and before
+ * netif_device_present() becomes false.
+ */
+ netif_tx_lock_bh(dev);
+ netif_device_detach(dev);
+ netif_tx_unlock_bh(dev);
+}
+
+static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
+{
+ if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+ netif_device_attach(efx->net_dev);
+}
+
+static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+ if (WARN_ON(down_read_trylock(sem))) {
+ up_read(sem);
+ return false;
+ }
+ return true;
+}
+
+int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n,
+ struct xdp_frame **xdpfs, bool flush);
+
+#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
new file mode 100644
index 000000000000..06ed74994e36
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -0,0 +1,1368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/filter.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+#include "workarounds.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+unsigned int efx_siena_rss_cpus;
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+static const struct efx_channel_type efx_default_channel_type;
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
+{
+ cpumask_var_t filter_mask;
+ unsigned int count;
+ int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ cpumask_copy(filter_mask, cpu_online_mask);
+ if (local_node)
+ cpumask_and(filter_mask, filter_mask,
+ cpumask_of_pcibus(efx->pci_dev->bus));
+
+ count = 0;
+ for_each_cpu(cpu, filter_mask) {
+ ++count;
+ cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
+ }
+
+ free_cpumask_var(filter_mask);
+
+ return count;
+}
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ unsigned int count;
+
+ if (efx_siena_rss_cpus) {
+ count = efx_siena_rss_cpus;
+ } else {
+ count = count_online_cores(efx, true);
+
+ /* If no online CPUs in local node, fallback to any online CPUs */
+ if (count == 0)
+ count = count_online_cores(efx, false);
+ }
+
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus,
+ warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+ }
+#endif
+
+ return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int tx_per_ev;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_siena_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+ tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+
+ max_channels = min_t(unsigned int, vec_count, max_channels);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels >= max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+ } else if (n_channels + n_xdp_tx > efx->max_vis) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+ n_xdp_tx, n_channels, efx->max_vis);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+ } else if (n_channels + n_xdp_ev > max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+
+ n_xdp_ev = max_channels - n_channels;
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+ DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
+ } else {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+ }
+
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = tx_per_ev;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_ev * tx_per_ev, n_xdp_ev);
+ } else {
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ }
+
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ efx->n_channels = n_channels;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_siena_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+ efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+ efx->xdp_channel_offset = n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_siena_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int rss_spread;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible, before XDP channels */
+ efx->n_extra_tx_channels = 0;
+ j = efx->xdp_channel_offset;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
+ }
+ }
+
+ rss_spread = efx->n_rx_channels;
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((rss_spread > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ rss_spread : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = rss_spread;
+
+ return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_siena_set_interrupt_affinity(struct efx_nic *efx)
+{
+ const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ /* If no online CPUs in local node, fallback to any online CPU */
+ if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
+ numa_mask = cpu_online_mask;
+
+ cpu = -1;
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first_and(cpu_online_mask, numa_mask);
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+void efx_siena_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused)
+{
+}
+
+void
+efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused)
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_siena_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_siena_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_siena_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
+static void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+static void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_RFS_ACCEL
+static void efx_filter_rfs_expire(struct work_struct *data)
+{
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
+ min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
+}
+#endif
+
+/* Allocate and initialise a channel structure. */
+static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = -1;
+ tx_queue->label = j;
+ tx_queue->channel = channel;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
+
+ return channel;
+}
+
+int efx_siena_init_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i);
+ if (!efx->channel[i])
+ return -ENOMEM;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ efx_siena_interrupt_mode);
+
+ efx->max_channels = EFX_MAX_CHANNELS;
+ efx->max_tx_channels = EFX_MAX_CHANNELS;
+
+ return 0;
+}
+
+void efx_siena_fini_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ if (efx->channel[i]) {
+ kfree(efx->channel[i]);
+ efx->channel[i] = NULL;
+ }
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+static
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ tx_queue->cb_page = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_siena_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_siena_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ channel->rx_list = NULL;
+
+ return 0;
+
+fail:
+ efx_siena_remove_channel(channel);
+ return rc;
+}
+
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (number < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_siena_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+int efx_siena_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_siena_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_siena_remove_channels(efx);
+ return rc;
+}
+
+void efx_siena_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_siena_remove_rx_queue(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_siena_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+void efx_siena_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_siena_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
+}
+
+static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
+static void efx_set_xdp_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ unsigned int next_queue = 0;
+ int xdp_queue_number = 0;
+ int rc;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->tx_channel_offset)
+ continue;
+
+ if (efx_channel_is_xdp_tx(channel)) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ } else {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is HW %u\n",
+ channel->channel, tx_queue->label,
+ tx_queue->queue);
+ }
+
+ /* If XDP is borrowing queues from net stack, it must
+ * use the queue with no csum offload, which is the
+ * first one of the channel
+ * (note: tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode ==
+ EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ }
+ }
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+}
+
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
+static void efx_init_napi_channel(struct efx_channel *channel);
+static void efx_fini_napi_channel(struct efx_channel *channel);
+
+int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
+ u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ unsigned int i, next_buffer_table = 0;
+ u32 old_rxq_entries, old_txq_entries;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_siena_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+ efx_set_xdp_channels(efx);
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_siena_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_siena_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
+ goto out;
+}
+
+int efx_siena_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+ }
+
+ efx_set_xdp_channels(efx);
+
+ rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ if (rc)
+ return rc;
+ return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+}
+
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_siena_start_eventq(channel);
+ }
+
+ efx_siena_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_siena_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_siena_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_siena_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_siena_mcdi_flush_async(efx);
+}
+
+int efx_siena_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ /* TODO: Is this really a bug? */
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+void efx_siena_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_siena_start_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ efx_for_each_channel_rev(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_siena_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_siena_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_siena_stop_eventq(channel);
+ efx_siena_fast_push_rx_descriptors(rx_queue, false);
+ efx_siena_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+}
+
+void efx_siena_stop_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ int rc = 0;
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_siena_stop_eventq(channel);
+ efx_siena_start_eventq(channel);
+ }
+ }
+
+ if (efx->type->fini_dmaq)
+ rc = efx->type->fini_dmaq(efx);
+
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_siena_fini_rx_queue(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_siena_fini_tx_queue(tx_queue);
+ }
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_siena_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl,
+ tx_queue->bytes_compl);
+ }
+ }
+
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
+ return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int time;
+#endif
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ xdp_do_flush_map();
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ efx_update_irq_mod(efx, channel);
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ time = jiffies - channel->rfs_last_expiry;
+ /* Would our quota be >= 20? */
+ if (channel->rfs_filter_count * time >= 600 * HZ)
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ return spent;
+}
+
+static void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
+}
+
+void efx_siena_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+void efx_siena_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_siena_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_siena_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h
new file mode 100644
index 000000000000..c4b95a2d770f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SIENA_CHANNELS_H
+#define EFX_SIENA_CHANNELS_H
+
+extern unsigned int efx_siena_interrupt_mode;
+extern unsigned int efx_siena_rss_cpus;
+
+int efx_siena_probe_interrupts(struct efx_nic *efx);
+void efx_siena_remove_interrupts(struct efx_nic *efx);
+int efx_siena_enable_interrupts(struct efx_nic *efx);
+void efx_siena_disable_interrupts(struct efx_nic *efx);
+
+void efx_siena_set_interrupt_affinity(struct efx_nic *efx);
+void efx_siena_clear_interrupt_affinity(struct efx_nic *efx);
+
+void efx_siena_start_eventq(struct efx_channel *channel);
+void efx_siena_stop_eventq(struct efx_channel *channel);
+
+int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
+ u32 txq_entries);
+void efx_siena_set_channel_names(struct efx_nic *efx);
+int efx_siena_init_channels(struct efx_nic *efx);
+int efx_siena_probe_channels(struct efx_nic *efx);
+int efx_siena_set_channels(struct efx_nic *efx);
+void efx_siena_remove_channel(struct efx_channel *channel);
+void efx_siena_remove_channels(struct efx_nic *efx);
+void efx_siena_fini_channels(struct efx_nic *efx);
+void efx_siena_start_channels(struct efx_nic *efx);
+void efx_siena_stop_channels(struct efx_nic *efx);
+
+void efx_siena_init_napi(struct efx_nic *efx);
+void efx_siena_fini_napi(struct efx_nic *efx);
+
+void efx_siena_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
new file mode 100644
index 000000000000..1fd396b00bfb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -0,0 +1,1408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/filter.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/gre.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "mcdi_port_common.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_siena_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_siena_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_siena_create_reset_workqueue(void)
+{
+ reset_workqueue = create_singlethread_workqueue("sfc_siena_reset");
+ if (!reset_workqueue) {
+ printk(KERN_ERR "Failed to create reset workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void efx_siena_queue_reset_work(struct efx_nic *efx)
+{
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_siena_flush_reset_workqueue(struct efx_nic *efx)
+{
+ cancel_work_sync(&efx->reset_work);
+}
+
+void efx_siena_destroy_reset_workqueue(void)
+{
+ if (reset_workqueue) {
+ destroy_workqueue(reset_workqueue);
+ reset_workqueue = NULL;
+ }
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
+{
+ if (efx->type->reconfigure_mac) {
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx, mtu_only);
+ up_read(&efx->filter_sem);
+ }
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx_siena_mac_reconfigure(efx, false);
+ mutex_unlock(&efx->mac_lock);
+}
+
+int efx_siena_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EADDRNOTAVAIL;
+ }
+
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
+ eth_hw_addr_set(net_dev, new_addr);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ eth_hw_addr_set(net_dev, old_addr);
+ return rc;
+ }
+ }
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ efx_siena_mac_reconfigure(efx, false);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+void efx_siena_set_rx_mode(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise efx_start_port() will do this */
+}
+
+int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
+ * If rx-fcs is changed, mac_reconfigure updates that too.
+ */
+ if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXFCS)) {
+ /* efx_siena_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ efx_siena_set_rx_mode(net_dev);
+ }
+
+ return 0;
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_siena_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN
+ */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom + tailroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM;
+
+ return PAGE_SIZE - overhead;
+}
+
+/* Context: process, rtnl_lock() held. */
+int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_siena_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_siena_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_device_detach_sync(efx);
+ efx_siena_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ efx_siena_mac_reconfigure(efx, true);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_siena_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway.
+ */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled && efx->type->monitor)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx_siena_start_monitor(efx);
+}
+
+void efx_siena_start_monitor(struct efx_nic *efx)
+{
+ if (efx->type->monitor)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM);
+
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_siena_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if ((efx->rx_scatter != old_rx_scatter) &&
+ efx->type->filter_update_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_siena_start_channels(efx);
+
+ efx_siena_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_siena_ptp_stop_datapath(efx);
+
+ efx_siena_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* Equivalent to efx_siena_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_siena_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+}
+
+void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising[0]) {
+ if (wanted_fc & EFX_FC_RX)
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EFX_FC_TX)
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
+ }
+}
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx_siena_mac_reconfigure(efx, false);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_siena_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_siena_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock
+ */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ efx_siena_start_monitor(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx_siena_mcdi_phy_poll(efx))
+ efx_siena_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (efx->type->start_stats) {
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ }
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+void efx_siena_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ if (efx->type->update_stats) {
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ }
+
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx->type->update_stats_atomic)
+ return efx->type->update_stats_atomic(efx, full_stats, core_stats);
+ return efx->type->update_stats(efx, full_stats, core_stats);
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+void efx_siena_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx_siena_update_stats_atomic(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_siena_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc = 0;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ if (efx->type->reconfigure_port)
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_siena_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_siena_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_siena_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_siena_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ mutex_lock(&efx->rss_lock);
+ efx->type->fini(efx);
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ efx_siena_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_siena_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx_siena_mcdi_port_reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_siena_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ if (efx->type->rx_restore_rss_contexts)
+ efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_siena_start_all(efx);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_siena_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc, rc2 = 0;
+ bool disabled;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ /* efx_siena_reset_down() grabs locks that prevent recovery on EF100.
+ * EF100 reset is handled in the efx_nic_type callback below.
+ */
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ efx_siena_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests.
+ */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ if (efx_nic_rev(efx) != EFX_REV_EF100)
+ rc2 = efx_siena_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = READ_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_siena_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_siena_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_siena_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (READ_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions.
+ */
+ efx_siena_mcdi_mode_poll(efx);
+
+ efx_siena_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy NIC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_siena_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+
+void efx_siena_port_dummy_op_void(struct efx_nic *efx) {}
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_siena_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int rc = -ENOMEM;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_SIENA_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx_siena_selftest_async_init(efx);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ INIT_LIST_HEAD(&efx->rss_context.list);
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ mutex_init(&efx->rss_lock);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
+ spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ mutex_init(&efx->mac_lock);
+ init_rwsem(&efx->filter_sem);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ efx->tx_queues_per_channel = 1;
+ efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+
+ efx->mem_bar = UINT_MAX;
+
+ rc = efx_siena_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_siena_fini_struct(efx);
+ return rc;
+}
+
+void efx_siena_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
+ efx_siena_fini_channels(efx);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ int rc;
+
+ efx->mem_bar = UINT_MAX;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ if (!efx->membase_phys) {
+ netif_err(efx, probe, efx->net_dev,
+ "ERROR: No BAR%d mapping from the BIOS. "
+ "Try pci=realloc on the kernel command line\n", bar);
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR[%d] failed\n", bar);
+ rc = -EIO;
+ goto fail3;
+ }
+ efx->mem_bar = bar;
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+fail4:
+ pci_release_region(efx->pci_dev, bar);
+fail3:
+ efx->membase_phys = 0;
+fail2:
+ pci_disable_device(efx->pci_dev);
+fail1:
+ return rc;
+}
+
+void efx_siena_fini_io(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, efx->mem_bar);
+ efx->membase_phys = 0;
+ efx->mem_bar = UINT_MAX;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+static ssize_t mcdi_logging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+
+static ssize_t mcdi_logging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+
+static DEVICE_ATTR_RW(mcdi_logging);
+
+void efx_siena_init_mcdi_logging(struct efx_nic *efx)
+{
+ int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ }
+}
+
+void efx_siena_fini_mcdi_logging(struct efx_nic *efx)
+{
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+}
+#endif
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_siena_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_siena_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+const struct pci_error_handlers efx_siena_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
+/* Determine whether the NIC will be able to handle TX offloads for a given
+ * encapsulated packet.
+ */
+static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct gre_base_hdr *greh;
+ __be16 dst_port;
+ u8 ipproto;
+
+ /* Does the NIC support encap offloads?
+ * If not, we should never get here, because we shouldn't have
+ * advertised encap offload feature flags in the first place.
+ */
+ if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
+ return false;
+
+ /* Determine encapsulation protocol in use */
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ipproto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ /* If there are extension headers, this will cause us to
+ * think we can't offload something that we maybe could have.
+ */
+ ipproto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ /* Not IP, so can't offload it */
+ return false;
+ }
+ switch (ipproto) {
+ case IPPROTO_GRE:
+ /* We support NVGRE but not IP over GRE or random gretaps.
+ * Specifically, the NIC will accept GRE as encapsulated if
+ * the inner protocol is Ethernet, but only handle it
+ * correctly if the GRE header is 8 bytes long. Moreover,
+ * it will not update the Checksum or Sequence Number fields
+ * if they are present. (The Routing Present flag,
+ * GRE_ROUTING, cannot be set else the header would be more
+ * than 8 bytes long; so we don't have to worry about it.)
+ */
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return false;
+ if (ntohs(skb->inner_protocol) != ETH_P_TEB)
+ return false;
+ if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
+ return false;
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ return !(greh->flags & (GRE_CSUM | GRE_SEQ));
+ case IPPROTO_UDP:
+ /* If the port is registered for a UDP tunnel, we assume the
+ * packet is for that tunnel, and the NIC will handle it as
+ * such. If not, the NIC won't know what to do with it.
+ */
+ dst_port = udp_hdr(skb)->dest;
+ return efx->type->udp_tnl_has_port(efx, dst_port);
+ default:
+ return false;
+ }
+}
+
+netdev_features_t efx_siena_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (skb->encapsulation) {
+ if (features & NETIF_F_GSO_MASK)
+ /* Hardware can only do TSO with at most 208 bytes
+ * of headers.
+ */
+ if (skb_inner_transport_offset(skb) >
+ EFX_TSO2_MAX_HDRLEN)
+ features &= ~(NETIF_F_GSO_MASK);
+ if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
+ if (!efx_can_encap_offloads(efx, skb))
+ features &= ~(NETIF_F_GSO_MASK |
+ NETIF_F_CSUM_MASK);
+ }
+ return features;
+}
+
+int efx_siena_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->get_phys_port_id)
+ return efx->type->get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_siena_get_phys_port_name(struct net_device *net_dev,
+ char *name, size_t len)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (snprintf(name, len, "p%u", efx->port_num) >= len)
+ return -EINVAL;
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h
new file mode 100644
index 000000000000..aeb92f4e34b7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_common.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_siena_fini_io(struct efx_nic *efx);
+int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_siena_fini_struct(struct efx_nic *efx);
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+void efx_siena_link_clear_advertising(struct efx_nic *efx);
+void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc);
+
+void efx_siena_start_all(struct efx_nic *efx);
+void efx_siena_stop_all(struct efx_nic *efx);
+
+void efx_siena_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
+
+int efx_siena_create_reset_workqueue(void);
+void efx_siena_queue_reset_work(struct efx_nic *efx);
+void efx_siena_flush_reset_workqueue(struct efx_nic *efx);
+void efx_siena_destroy_reset_workqueue(void);
+
+void efx_siena_start_monitor(struct efx_nic *efx);
+
+int __efx_siena_reconfigure_port(struct efx_nic *efx);
+int efx_siena_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+int efx_siena_try_recovery(struct efx_nic *efx);
+void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method);
+void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue);
+int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_siena_reset(struct efx_nic *efx, enum reset_type method);
+void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+/* Dummy PHY ops for PHY drivers */
+int efx_siena_port_dummy_op_int(struct efx_nic *efx);
+void efx_siena_port_dummy_op_void(struct efx_nic *efx);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
+
+ napi_schedule(&channel->napi_str);
+}
+
+static inline void efx_schedule_channel_irq(struct efx_channel *channel)
+{
+ channel->event_test_cpu = raw_smp_processor_id();
+ efx_schedule_channel(channel);
+}
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+void efx_siena_init_mcdi_logging(struct efx_nic *efx);
+void efx_siena_fini_mcdi_logging(struct efx_nic *efx);
+#else
+static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {}
+#endif
+
+void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
+int efx_siena_set_mac_address(struct net_device *net_dev, void *data);
+void efx_siena_set_rx_mode(struct net_device *net_dev);
+int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data);
+void efx_siena_link_status_changed(struct efx_nic *efx);
+unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx);
+int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu);
+
+extern const struct pci_error_handlers efx_siena_err_handlers;
+
+netdev_features_t efx_siena_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+
+int efx_siena_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
+
+int efx_siena_get_phys_port_name(struct net_device *net_dev,
+ char *name, size_t len);
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/enum.h b/drivers/net/ethernet/sfc/siena/enum.h
new file mode 100644
index 000000000000..25b28b3969d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/enum.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_ENUM_H
+#define EFX_ENUM_H
+
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
+ * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
+ * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
+ */
+/* Please keep up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_DATA = 1,
+ LOOPBACK_GMAC = 2,
+ LOOPBACK_XGMII = 3,
+ LOOPBACK_XGXS = 4,
+ LOOPBACK_XAUI = 5,
+ LOOPBACK_GMII = 6,
+ LOOPBACK_SGMII = 7,
+ LOOPBACK_XGBR = 8,
+ LOOPBACK_XFI = 9,
+ LOOPBACK_XAUI_FAR = 10,
+ LOOPBACK_GMII_FAR = 11,
+ LOOPBACK_SGMII_FAR = 12,
+ LOOPBACK_XFI_FAR = 13,
+ LOOPBACK_GPHY = 14,
+ LOOPBACK_PHYXS = 15,
+ LOOPBACK_PCS = 16,
+ LOOPBACK_PMAPMD = 17,
+ LOOPBACK_XPORT = 18,
+ LOOPBACK_XGMII_WS = 19,
+ LOOPBACK_XAUI_WS = 20,
+ LOOPBACK_XAUI_WS_FAR = 21,
+ LOOPBACK_XAUI_WS_NEAR = 22,
+ LOOPBACK_GMII_WS = 23,
+ LOOPBACK_XFI_WS = 24,
+ LOOPBACK_XFI_WS_FAR = 25,
+ LOOPBACK_PHYXS_WS = 26,
+ LOOPBACK_MAX
+};
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
+ (1 << LOOPBACK_GMAC) | \
+ (1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI) | \
+ (1 << LOOPBACK_GMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_XGBR) | \
+ (1 << LOOPBACK_XFI) | \
+ (1 << LOOPBACK_XAUI_FAR) | \
+ (1 << LOOPBACK_GMII_FAR) | \
+ (1 << LOOPBACK_SGMII_FAR) | \
+ (1 << LOOPBACK_XFI_FAR) | \
+ (1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx) \
+ ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
+ ~(1 << LOOPBACK_NONE))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
+
+#define LOOPBACK_EXTERNAL(_efx) \
+ (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
+#define LOOPBACK_CHANGED(_from, _to, _mask) \
+ (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
+ * other valuesspecify reasons, which efx_siena_schedule_reset() will choose
+ * a method for.
+ *
+ * Reset methods are numbered in order of increasing scope.
+ *
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
+ * @RESET_TYPE_MC_BIST: MC entering BIST mode.
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
+ * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_DMA_ERROR: DMA error
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
+ */
+enum reset_type {
+ RESET_TYPE_INVISIBLE,
+ RESET_TYPE_RECOVER_OR_ALL,
+ RESET_TYPE_ALL,
+ RESET_TYPE_WORLD,
+ RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
+ RESET_TYPE_MC_BIST,
+ RESET_TYPE_DISABLE,
+ RESET_TYPE_MAX_METHOD,
+ RESET_TYPE_TX_WATCHDOG,
+ RESET_TYPE_INT_ERROR,
+ RESET_TYPE_DMA_ERROR,
+ RESET_TYPE_TX_SKIP,
+ RESET_TYPE_MC_FAILURE,
+ /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
+ * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
+ * We encode this by having its enum value be greater than
+ * RESET_TYPE_MAX_METHOD.
+ */
+ RESET_TYPE_MCDI_TIMEOUT,
+ RESET_TYPE_MAX,
+};
+
+#endif /* EFX_ENUM_H */
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
new file mode 100644
index 000000000000..e4ec589216c1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "ethtool_common.h"
+#include "filter.h"
+#include "nic.h"
+
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ enum efx_led_mode mode = EFX_LED_DEFAULT;
+
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EFX_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EFX_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EFX_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
+
+ return efx_siena_mcdi_set_id_led(efx, mode);
+}
+
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_siena_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_siena_get_regs(efx, buf);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event). Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions. In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields. We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields. However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields. To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields. If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int tx_usecs, rx_usecs;
+ bool rx_adaptive;
+
+ efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+ coalesce->tx_coalesce_usecs = tx_usecs;
+ coalesce->tx_coalesce_usecs_irq = tx_usecs;
+ coalesce->rx_coalesce_usecs = rx_usecs;
+ coalesce->rx_coalesce_usecs_irq = rx_usecs;
+ coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+ return 0;
+}
+
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ unsigned int tx_usecs, rx_usecs;
+ bool adaptive, rx_may_override_tx;
+ int rc;
+
+ efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+ if (coalesce->rx_coalesce_usecs != rx_usecs)
+ rx_usecs = coalesce->rx_coalesce_usecs;
+ else
+ rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+ adaptive = coalesce->use_adaptive_rx_coalesce;
+
+ /* If channels are shared, TX IRQ moderation can be quietly
+ * overridden unless it is changed from its old value.
+ */
+ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+ coalesce->tx_coalesce_usecs_irq == tx_usecs);
+ if (coalesce->tx_coalesce_usecs != tx_usecs)
+ tx_usecs = coalesce->tx_coalesce_usecs;
+ else
+ tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+ rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
+ if (rc != 0)
+ return rc;
+
+ efx_for_each_channel(channel, efx)
+ efx->type->push_irq_moderation(channel);
+
+ return 0;
+}
+
+static void
+efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
+static int
+efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+ netif_err(efx, drv, efx->net_dev,
+ "RX queues cannot be smaller than %u\n",
+ EFX_RXQ_MIN_ENT);
+ return -EINVAL;
+ }
+
+ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries);
+}
+
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->get_fec_stats)
+ efx->type->get_fec_stats(efx, fec_stats);
+}
+
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* Software capabilities */
+ ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE);
+ ts_info->phc_index = -1;
+
+ efx_siena_ptp_get_ts_info(efx, ts_info);
+ return 0;
+}
+
+const struct ethtool_ops efx_siena_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_drvinfo = efx_siena_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_siena_ethtool_get_msglevel,
+ .set_msglevel = efx_siena_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = efx_ethtool_get_coalesce,
+ .set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
+ .get_pauseparam = efx_siena_ethtool_get_pauseparam,
+ .set_pauseparam = efx_siena_ethtool_set_pauseparam,
+ .get_sset_count = efx_siena_ethtool_get_sset_count,
+ .self_test = efx_siena_ethtool_self_test,
+ .get_strings = efx_siena_ethtool_get_strings,
+ .set_phys_id = efx_ethtool_phys_id,
+ .get_ethtool_stats = efx_siena_ethtool_get_stats,
+ .get_wol = efx_ethtool_get_wol,
+ .set_wol = efx_ethtool_set_wol,
+ .reset = efx_siena_ethtool_reset,
+ .get_rxnfc = efx_siena_ethtool_get_rxnfc,
+ .set_rxnfc = efx_siena_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size,
+ .get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
+ .get_rxfh = efx_siena_ethtool_get_rxfh,
+ .set_rxfh = efx_siena_ethtool_set_rxfh,
+ .get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
+ .set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
+ .get_ts_info = efx_ethtool_get_ts_info,
+ .get_module_info = efx_siena_ethtool_get_module_info,
+ .get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
+ .get_link_ksettings = efx_siena_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_siena_ethtool_set_link_ksettings,
+ .get_fec_stats = efx_ethtool_get_fec_stats,
+ .get_fecparam = efx_siena_ethtool_get_fecparam,
+ .set_fecparam = efx_siena_ethtool_set_fecparam,
+};
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
new file mode 100644
index 000000000000..f590e87e5a23
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -0,0 +1,1340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "ethtool_common.h"
+#include "mcdi_port_common.h"
+
+struct efx_sw_stat_desc {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned int offset;
+ u64 (*get_stat)(void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ efx_siena_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->msg_enable;
+}
+
+void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ efx->msg_enable = msg_enable;
+}
+
+void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u8 wanted_fc, old_fc;
+ u32 old_adv;
+ int rc = 0;
+
+ mutex_lock(&efx->mac_lock);
+
+ wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+ (pause->tx_pause ? EFX_FC_TX : 0) |
+ (pause->autoneg ? EFX_FC_AUTO : 0));
+
+ if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
+
+ old_adv = efx->link_advertising[0];
+ old_fc = efx->wanted_fc;
+ efx_siena_link_set_wanted_fc(efx, wanted_fc);
+ if (efx->link_advertising[0] != old_adv ||
+ (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+ rc = efx_siena_mcdi_port_reconfigure(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
+ goto out;
+ }
+ }
+
+ /* Reconfigure the MAC. The PHY *may* generate a link state change event
+ * if the user just changed the advertised capabilities, but there's no
+ * harm doing this twice */
+ efx_siena_mac_reconfigure(efx, false);
+
+out:
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_siena_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->label],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->label],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx_siena_mcdi_phy_test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL);
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+void efx_siena_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_self_tests *efx_tests;
+ bool already_up;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
+ if (efx->state != STATE_READY) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev, NULL);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
+ goto out;
+ }
+ }
+
+ rc = efx_siena_selftest(efx, efx_tests, test->flags);
+
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+out:
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
+ return n_stats;
+}
+
+int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_describe_per_queue_stats(efx, NULL) +
+ efx_siena_ptp_describe_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+void efx_siena_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strscpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (efx_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ efx_siena_ptp_describe_stats(efx, strings);
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+void efx_siena_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ const struct efx_sw_stat_desc *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ *data = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ *data = 0;
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
+
+ efx_siena_ptp_update_stats(efx, data);
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_link_state *link_state = &efx->link_state;
+
+ mutex_lock(&efx->mac_lock);
+ efx_siena_mcdi_phy_get_link_ksettings(efx, cmd);
+ mutex_unlock(&efx->mac_lock);
+
+ /* Both MACs support pause frames (bidirectional and respond-only) */
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+
+ if (LOOPBACK_INTERNAL(efx)) {
+ cmd->base.speed = link_state->speed;
+ cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+int
+efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* GMAC does not support 1000Mbps HD */
+ if ((cmd->base.speed == SPEED_1000) &&
+ (cmd->base.duplex != DUPLEX_FULL)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx_siena_mcdi_phy_set_link_ksettings(efx, cmd);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+int efx_siena_ethtool_get_fecparam(struct net_device *net_dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx_siena_mcdi_phy_get_fecparam(efx, fecparam);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx_siena_mcdi_phy_set_fecparam(efx, fecparam);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
+
+#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
+#define IP_PROTO_FULL_MASK 0xFF
+#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static inline void ip6_fill_mask(__be32 *mask)
+{
+ mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
+}
+
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule,
+ u32 *rss_context)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
+ int rc;
+
+ rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V6_FLOW : UDP_V6_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ memcpy(ip6_entry->ip6dst, spec.loc_host,
+ sizeof(ip6_entry->ip6dst));
+ ip6_fill_mask(ip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ memcpy(ip6_entry->ip6src, spec.rem_host,
+ sizeof(ip6_entry->ip6src));
+ ip6_fill_mask(ip6_mask->ip6src);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip6_entry->pdst = spec.loc_port;
+ ip6_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip6_entry->psrc = spec.rem_port;
+ ip6_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
+ rule->flow_type = ETHER_FLOW;
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ eth_broadcast_addr(mac_mask->h_dest);
+ else
+ ether_addr_copy(mac_mask->h_dest,
+ mac_addr_ig_mask);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+ eth_broadcast_addr(mac_mask->h_source);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV4_USER_FLOW;
+ uip_entry->ip_ver = ETH_RX_NFC_IP4;
+ if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+ uip_mask->proto = IP_PROTO_FULL_MASK;
+ uip_entry->proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ uip_entry->ip4dst = spec.loc_host[0];
+ uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ uip_entry->ip4src = spec.rem_host[0];
+ uip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV6_USER_FLOW;
+ if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+ uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
+ uip6_entry->l4_proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ memcpy(uip6_entry->ip6dst, spec.loc_host,
+ sizeof(uip6_entry->ip6dst));
+ ip6_fill_mask(uip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ memcpy(uip6_entry->ip6src, spec.rem_host,
+ sizeof(uip6_entry->ip6src));
+ ip6_fill_mask(uip6_mask->ip6src);
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+
+ if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
+ rule->flow_type |= FLOW_RSS;
+ *rss_context = spec.rss_context;
+ }
+
+ return rc;
+}
+
+int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 rss_context = 0;
+ s32 rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ struct efx_rss_context *ctx = &efx->rss_context;
+ __u64 data;
+
+ mutex_lock(&efx->rss_lock);
+ if (info->flow_type & FLOW_RSS && info->rss_context) {
+ ctx = efx_siena_find_rss_context_entry(efx,
+ info->rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ }
+
+ data = 0;
+ if (!efx_rss_active(ctx)) /* No RSS */
+ goto out_setdata_unlock;
+
+ switch (info->flow_type & ~FLOW_RSS) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (ctx->rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata_unlock:
+ info->data = data;
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+ }
+
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
+ if (rc < 0)
+ return rc;
+ if (info->fs.flow_type & FLOW_RSS)
+ info->rss_context = rss_context;
+ return 0;
+
+ case ETHTOOL_GRXCLSRLALL:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline bool ip6_mask_is_full(__be32 mask[4])
+{
+ return !~(mask[0] & mask[1] & mask[2] & mask[3]);
+}
+
+static inline bool ip6_mask_is_empty(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule,
+ u32 rss_context)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+ u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ enum efx_filter_flags flags = 0;
+ struct efx_filter_spec spec;
+ int rc;
+
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
+ rule->m_ext.data[1]))
+ return -EINVAL;
+
+ if (efx->rx_scatter)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+ if (rule->flow_type & FLOW_RSS)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
+
+ if (rule->flow_type & FLOW_RSS)
+ spec.rss_context = rss_context;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
+ : IPPROTO_UDP;
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
+ return -EINVAL;
+ break;
+
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IPV6);
+ spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
+ : IPPROTO_UDP;
+ if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (ip6_mask->pdst) {
+ if (ip6_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip6_entry->pdst;
+ }
+ if (ip6_mask->psrc) {
+ if (ip6_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip6_entry->psrc;
+ }
+ if (ip6_mask->tclass)
+ return -EINVAL;
+ break;
+
+ case IPV4_USER_FLOW:
+ if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
+ uip_entry->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IP);
+ if (uip_mask->ip4dst) {
+ if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = uip_entry->ip4dst;
+ }
+ if (uip_mask->ip4src) {
+ if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = uip_entry->ip4src;
+ }
+ if (uip_mask->proto) {
+ if (uip_mask->proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip_entry->proto;
+ }
+ break;
+
+ case IPV6_USER_FLOW:
+ if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
+ return -EINVAL;
+ spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IPV6);
+ if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (uip6_mask->l4_proto) {
+ if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip6_entry->l4_proto;
+ }
+ break;
+
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ else
+ return -EINVAL;
+ ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
+ }
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ ether_addr_copy(spec.rem_mac, mac_entry->h_source);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ rule->location = rc;
+ return 0;
+}
+
+int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return efx_ethtool_set_class_rule(efx, &info->fs,
+ info->rss_context);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->n_rx_channels == 1)
+ return 0;
+ return ARRAY_SIZE(efx->rss_context.rx_indir_table);
+}
+
+u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->type->rx_hash_key_size;
+}
+
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, efx->rss_context.rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (key)
+ memcpy(key, efx->rss_context.rx_hash_key,
+ efx->type->rx_hash_key_size);
+ return 0;
+}
+
+int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* Hash function is Toeplitz, cannot be changed */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ if (!indir && !key)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+ if (!indir)
+ indir = efx->rss_context.rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
+int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_rss_context *ctx;
+ int rc = 0;
+
+ if (!efx->type->rx_pull_rss_context_config)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->rss_lock);
+ ctx = efx_siena_find_rss_context_entry(efx, rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ rc = efx->type->rx_pull_rss_context_config(efx, ctx);
+ if (rc)
+ goto out_unlock;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
+ if (key)
+ memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+}
+
+int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc, u32 *rss_context,
+ bool delete)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_rss_context *ctx;
+ bool allocated = false;
+ int rc;
+
+ if (!efx->type->rx_push_rss_context_config)
+ return -EOPNOTSUPP;
+ /* Hash function is Toeplitz, cannot be changed */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->rss_lock);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ if (delete) {
+ /* alloc + delete == Nothing to do */
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ ctx = efx_siena_alloc_rss_context_entry(efx);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ /* Initialise indir table and key to defaults */
+ efx_siena_set_default_rx_indir_table(efx, ctx);
+ netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
+ allocated = true;
+ } else {
+ ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ }
+
+ if (delete) {
+ /* delete this context */
+ rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
+ if (!rc)
+ efx_siena_free_rss_context_entry(ctx);
+ goto out_unlock;
+ }
+
+ if (!key)
+ key = ctx->rx_hash_key;
+ if (!indir)
+ indir = ctx->rx_indir_table;
+
+ rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
+ if (rc && allocated)
+ efx_siena_free_rss_context_entry(ctx);
+ else
+ *rss_context = ctx->user_id;
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+}
+
+int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
+
+ return efx_siena_reset(efx, rc);
+}
+
+int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx_siena_mcdi_phy_get_module_eeprom(efx, ee, data);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
+
+int efx_siena_ethtool_get_module_info(struct net_device *net_dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx_siena_mcdi_phy_get_module_info(efx, modinfo);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
new file mode 100644
index 000000000000..04b375dc6800
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_COMMON_H
+#define EFX_ETHTOOL_COMMON_H
+
+void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info);
+u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev);
+void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_siena_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data);
+void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause);
+int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause);
+int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
+void efx_siena_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
+ u8 *strings);
+void efx_siena_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats __always_unused,
+ u64 *data);
+int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *out);
+int efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *settings);
+int efx_siena_ethtool_get_fecparam(struct net_device *net_dev,
+ struct ethtool_fecparam *fecparam);
+int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
+ struct ethtool_fecparam *fecparam);
+int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info);
+u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
+u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc);
+int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
+ const u32 *indir, const u8 *key, const u8 hfunc);
+int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context);
+int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc, u32 *rss_context,
+ bool delete);
+int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
+int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *ee,
+ u8 *data);
+int efx_siena_ethtool_get_module_info(struct net_device *net_dev,
+ struct ethtool_modinfo *modinfo);
+#endif
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index 148dcd48b58d..89ccd65c978b 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -16,6 +16,7 @@
#include "bitfield.h"
#include "efx.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
@@ -227,12 +228,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
struct siena_nic_data *nic_data = efx->nic_data;
#endif
len = ALIGN(len, EFX_BUF_SIZE);
- if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
return -ENOMEM;
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
@@ -240,7 +241,7 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
BUG_ON(efx_siena_sriov_enabled(efx) &&
nic_data->vf_buftbl_base < efx->next_buffer_table);
#endif
@@ -268,7 +269,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
(u64)buffer->buf.dma_addr, buffer->buf.len,
buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
- efx_nic_free_buffer(efx, &buffer->buf);
+ efx_siena_free_buffer(efx, &buffer->buf);
buffer->entries = 0;
}
@@ -666,7 +667,7 @@ static int efx_farch_do_flush(struct efx_nic *efx)
* completion). If that fails, fall back to the old scheme.
*/
if (efx_siena_sriov_enabled(efx)) {
- rc = efx_mcdi_flush_rxqs(efx);
+ rc = efx_siena_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
}
@@ -746,12 +747,13 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
* completion events. This means that efx->rxq_flush_outstanding remained at 4
* after the FLR; also, efx->active_queues was non-zero (as no flush completion
* events were received, and we didn't go through efx_check_tx_flush_complete())
- * If we don't fix this up, on the next call to efx_realloc_channels() we won't
- * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
- * for batched flush requests; and the efx->active_queues gets messed up because
- * we keep incrementing for the newly initialised queues, but it never went to
- * zero previously. Then we get a timeout every time we try to restart the
- * queues, as it doesn't go back to zero when we should be flushing the queues.
+ * If we don't fix this up, on the next call to efx_siena_realloc_channels() we
+ * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
+ * of 4 for batched flush requests; and the efx->active_queues gets messed up
+ * because we keep incrementing for the newly initialised queues, but it never
+ * went to zero previously. Then we get a timeout every time we try to restart
+ * the queues, as it doesn't go back to zero when we should be flushing the
+ * queues.
*/
void efx_farch_finish_flr(struct efx_nic *efx)
{
@@ -837,7 +839,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = channel->tx_queue +
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
@@ -848,7 +850,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
@@ -955,7 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return false;
}
@@ -1000,7 +1002,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* Discard all pending fragments */
if (rx_queue->scatter_n) {
- efx_rx_packet(
+ efx_siena_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
@@ -1014,7 +1016,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* Discard new fragment if not SOP */
if (!rx_ev_sop) {
- efx_rx_packet(
+ efx_siena_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
@@ -1066,9 +1068,9 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ efx_siena_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
@@ -1158,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue, true);
+ efx_siena_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1185,7 +1187,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
#endif
break;
@@ -1193,7 +1195,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
#endif
break;
@@ -1221,7 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1229,9 +1231,9 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
}
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
#endif
@@ -1242,9 +1244,9 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
}
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
#endif
@@ -1305,13 +1307,13 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
#endif
case FSE_CZ_EV_CODE_MCDI_EV:
- efx_mcdi_process_event(channel, &event);
+ efx_siena_mcdi_process_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
if (efx->type->handle_global_event &&
@@ -1495,12 +1497,12 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
@@ -1528,7 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
* code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones.
*/
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
@@ -1669,7 +1671,7 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, total_tx_channels;
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
struct siena_nic_data *nic_data;
unsigned buftbl_min;
#endif
@@ -1677,7 +1679,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
nic_data = efx->nic_data;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
@@ -2776,7 +2778,7 @@ void efx_farch_filter_table_remove(struct efx_nic *efx)
enum efx_farch_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2820,9 +2822,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
@@ -2923,13 +2923,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
*/
arfs_id = 0;
} else {
- rule = efx_rps_hash_find(efx, &spec);
+ rule = efx_siena_rps_hash_find(efx, &spec);
if (!rule) {
/* ARFS table doesn't know of this filter, remove it */
force = true;
} else {
arfs_id = rule->arfs_id;
- if (!efx_rps_check_rule(rule, index, &force))
+ if (!efx_siena_rps_check_rule(rule, index,
+ &force))
goto out_unlock;
}
}
@@ -2937,7 +2938,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
flow_id, arfs_id)) {
if (rule)
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
- efx_rps_hash_del(efx, &spec);
+ efx_siena_rps_hash_del(efx, &spec);
efx_farch_filter_table_clear_entry(efx, table, index);
ret = true;
}
diff --git a/drivers/net/ethernet/sfc/siena/farch_regs.h b/drivers/net/ethernet/sfc/siena/farch_regs.h
new file mode 100644
index 000000000000..d138be423e63
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/farch_regs.h
@@ -0,0 +1,2929 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register MC register Host memory structure
+ * -------------------------------------------------------------
+ * Address R MCR
+ * Bitfield RF MCRF SF
+ * Enumerator FE MCFE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * A: Falcon A1 (SFC4000AB)
+ * B: Falcon B0 (SFC4000BA)
+ * C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define FR_AZ_ADR_REGION 0x00000000
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define FR_AZ_INT_EN_KER 0x00000010
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define FR_BZ_INT_EN_CHAR 0x00000020
+#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_BZ_CHAR_INT_KER_LBN 3
+#define FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define FR_AZ_INT_ADR_KER 0x00000030
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define FR_BZ_INT_ADR_CHAR 0x00000040
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_BZ_INT_ADR_CHAR_LBN 0
+#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define FR_AA_INT_ACK_KER 0x00000050
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
+#define FR_BZ_INT_ISR0 0x00000090
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define FR_AZ_HW_INIT 0x000000c0
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AB_TRGT_MASK_ALL_LBN 100
+#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AA_FC_BLOCKING_EN_LBN 45
+#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define FRF_BZ_B2B_REQ_EN_LBN 45
+#define FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AA_B2B_REQ_EN_LBN 44
+#define FRF_AA_B2B_REQ_EN_WIDTH 1
+#define FRF_BB_FC_BLOCKING_EN_LBN 44
+#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define FR_AB_EE_SPI_HCMD 0x00000100
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define FR_CZ_USR_EV_CFG 0x00000100
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define FR_AB_EE_SPI_HADR 0x00000110
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define FR_AB_EE_SPI_HDATA 0x00000120
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define FR_AB_EE_BASE_PAGE 0x00000130
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define FR_AB_EE_VPD_CFG0 0x00000140
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define FR_AB_EE_VPD_SW_DATA 0x00000160
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define FR_AB_NIC_STAT 0x00000200
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define FR_AB_GPIO_CTL 0x00000210
+#define FRF_AB_GPIO_OUT3_LBN 112
+#define FRF_AB_GPIO_OUT3_WIDTH 16
+#define FRF_AB_GPIO_IN3_LBN 104
+#define FRF_AB_GPIO_IN3_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define FRF_AB_GPIO_OUT2_LBN 80
+#define FRF_AB_GPIO_OUT2_WIDTH 16
+#define FRF_AB_GPIO_IN2_LBN 72
+#define FRF_AB_GPIO_IN2_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_CLK156_OUT_EN_LBN 31
+#define FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define FRF_AB_USE_NIC_CLK_LBN 30
+#define FRF_AB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO7_OUT_LBN 23
+#define FRF_AB_GPIO7_OUT_WIDTH 1
+#define FRF_AB_GPIO6_OUT_LBN 22
+#define FRF_AB_GPIO6_OUT_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO7_IN_LBN 15
+#define FRF_AB_GPIO7_IN_WIDTH 1
+#define FRF_AB_GPIO6_IN_LBN 14
+#define FRF_AB_GPIO6_IN_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define FR_AB_GLB_CTL 0x00000220
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FR_AZ_FATAL_INTR_KER 0x00000230
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define FR_BZ_DP_CTRL 0x00000250
+#define FRF_BZ_FLS_EVQ_ID_LBN 0
+#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define FR_AZ_MEM_STAT 0x00000260
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define FR_AZ_CS_DEBUG 0x00000270
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define FR_AZ_DRIVER 0x00000280
+#define FR_AZ_DRIVER_STEP 16
+#define FR_AZ_DRIVER_ROWS 8
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define FR_AZ_ALTERA_BUILD 0x00000300
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define FR_AZ_CSR_SPARE 0x00000310
+#define FRF_AB_MEM_PERR_EN_LBN 64
+#define FRF_AB_MEM_PERR_EN_WIDTH 38
+#define FRF_CZ_MEM_PERR_EN_LBN 64
+#define FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define FR_AB_PCIE_SD_CTL0123 0x00000320
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define FR_AB_PCIE_SD_CTL45 0x00000330
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define FR_BB_DEBUG_DATA_OUT 0x00000350
+#define FRF_BB_DEBUG2_PORT_LBN 25
+#define FRF_BB_DEBUG2_PORT_WIDTH 15
+#define FRF_BB_DEBUG1_PORT_LBN 0
+#define FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR_P0 0x00000400
+#define FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define FR_AA_EVQ_RPTR_KER 0x00011b00
+#define FR_AA_EVQ_RPTR_KER_STEP 4
+#define FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR 0x00fa0000
+#define FR_BZ_EVQ_RPTR_STEP 16
+#define FR_BB_EVQ_RPTR_ROWS 4096
+#define FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define FR_BB_EVQ_RPTR_P123 0x01000400
+#define FR_BB_EVQ_RPTR_P123_STEP 8192
+#define FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define FR_AA_TIMER_COMMAND_KER 0x00000420
+#define FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define FR_BB_TIMER_COMMAND_P123 0x01000420
+#define FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define FR_AZ_DRV_EV 0x00000440
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define FR_AZ_EVQ_CTL 0x00000450
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define FR_AZ_EVQ_CNT1 0x00000460
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define FR_AZ_EVQ_CNT2 0x00000470
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define FR_CZ_USR_EV 0x00000540
+#define FR_CZ_USR_EV_STEP 8192
+#define FR_CZ_USR_EV_ROWS 1024
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define FR_AZ_BUF_TBL_CFG 0x00000600
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define FR_AZ_SRM_CFG 0x00000630
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define FR_AZ_BUF_TBL_UPD 0x00000650
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define FR_AZ_SRM_UPD_EVQ 0x00000660
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define FR_AZ_SRAM_PARITY 0x00000670
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define FR_AZ_RX_CFG 0x00000800
+#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define FR_BZ_RX_FILTER_CTL 0x00000810
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_NUM_KER_LBN 24
+#define FRF_BZ_NUM_KER_WIDTH 2
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define FR_AA_RX_DESC_UPD_KER 0x00000830
+#define FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define FR_BB_RX_DESC_UPD_P123 0x01000830
+#define FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define FR_AZ_RX_DC_CFG 0x00000840
+#define FRF_AB_RX_MAX_PF_LBN 2
+#define FRF_AB_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define FR_AZ_RX_DC_PF_WM 0x00000850
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define FR_BZ_RX_RSS_TKEY 0x00000860
+#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define FR_AZ_RX_NODESC_DROP 0x00000880
+#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define FR_AA_RX_SELF_RST 0x00000890
+#define FRF_AA_RX_ISCSI_DIS_LBN 17
+#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AA_RX_SW_RST_REG_LBN 16
+#define FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define FRF_AA_RX_SELF_RST_EN_LBN 8
+#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define FR_AZ_RX_DEBUG 0x000008a0
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define FR_AZ_RX_PUSH_DROP 0x000008b0
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define FR_AZ_TX_DC_CFG 0x00000a20
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define FR_AA_TX_CHKSM_CFG 0x00000a30
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define FR_AZ_TX_CFG 0x00000a50
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define FR_AZ_TX_PUSH_DROP 0x00000a60
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define FR_AZ_TX_RESERVED 0x00000a80
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define FR_BZ_TX_PACE 0x00000a90
+#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define FR_BB_TX_VLAN 0x00000ae0
+#define FRF_BB_TX_VLAN_EN_LBN 127
+#define FRF_BB_TX_VLAN_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_LBN 112
+#define FRF_BB_TX_VLAN7_WIDTH 12
+#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_LBN 96
+#define FRF_BB_TX_VLAN6_WIDTH 12
+#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_LBN 80
+#define FRF_BB_TX_VLAN5_WIDTH 12
+#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_LBN 64
+#define FRF_BB_TX_VLAN4_WIDTH 12
+#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_LBN 48
+#define FRF_BB_TX_VLAN3_WIDTH 12
+#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_LBN 32
+#define FRF_BB_TX_VLAN2_WIDTH 12
+#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_LBN 16
+#define FRF_BB_TX_VLAN1_WIDTH 12
+#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_LBN 0
+#define FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_IPFIL_TBL 0x00000b00
+#define FR_BB_TX_IPFIL_TBL_STEP 16
+#define FR_BB_TX_IPFIL_TBL_ROWS 16
+#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define FR_AB_MD_TXD 0x00000c00
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define FR_AB_MD_RXD 0x00000c10
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define FR_AB_MD_CS 0x00000c20
+#define FRF_AB_MD_RD_EN_CMD_LBN 15
+#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define FRF_AB_MD_WR_EN_CMD_LBN 14
+#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define FR_AB_MD_PHY_ADR 0x00000c30
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define FR_AB_MD_ID 0x00000c40
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define FR_AB_MD_STAT 0x00000c50
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define FR_AB_MAC_STAT_DMA 0x00000c60
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define FR_AB_MAC_CTRL 0x00000c80
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FFE_AB_MAC_SPEED_10G 3
+#define FFE_AB_MAC_SPEED_1G 2
+#define FFE_AB_MAC_SPEED_100M 1
+#define FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define FR_BB_GEN_MODE 0x00000c90
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define FR_AB_GM_CFG1 0x00000e00
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define FR_AB_GM_CFG2 0x00000e10
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FFE_AB_IF_MODE_BYTE_MODE 2
+#define FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define FR_AB_GM_IPG 0x00000e20
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define FR_AB_GM_HD 0x00000e30
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define FR_AB_GM_MAX_FLEN 0x00000e40
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define FR_AB_GM_TEST 0x00000e70
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define FR_AB_GM_ADR1 0x00000f00
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define FR_AB_GM_ADR2 0x00000f10
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define FR_AB_GMF_CFG0 0x00000f20
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define FR_AB_GMF_CFG1 0x00000f30
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define FR_AB_GMF_CFG2 0x00000f40
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define FR_AB_GMF_CFG3 0x00000f50
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FR_AB_GMF_CFG4 0x00000f60
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FR_AB_GMF_CFG5 0x00000f70
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define FR_AB_XM_ADR_LO 0x00001200
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define FR_AB_XM_ADR_HI 0x00001210
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define FR_AB_XM_GLB_CFG 0x00001220
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define FR_AB_XM_TX_CFG 0x00001230
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define FR_AB_XM_RX_CFG 0x00001240
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define FR_AB_XM_MGT_INT_MASK 0x00001250
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define FR_AB_XM_FC 0x00001270
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define FR_AB_XM_PAUSE_TIME 0x00001290
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FR_AB_XM_TX_PARAM 0x000012d0
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FR_AB_XM_RX_PARAM 0x000012e0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define FR_AB_XX_PWR_RST 0x00001300
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define FR_AB_XX_SD_CTL 0x00001310
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define FR_AB_XX_TXDRV_CTL 0x00001320
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define FR_AB_XX_PRBS_CTL 0x00001330
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define FR_AB_XX_PRBS_CHK 0x00001340
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define FR_AB_XX_PRBS_ERR 0x00001350
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define FR_AB_XX_CORE_STAT 0x00001360
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AA_RX_RESET_LBN 89
+#define FRF_AA_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define FR_BZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define FR_BZ_BUF_HALF_TBL 0x00800000
+#define FR_BZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_BB_BUF_HALF_TBL_ROWS 524288
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define FR_BZ_BUF_FULL_TBL 0x00800000
+#define FR_BZ_BUF_FULL_TBL_STEP 8
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_BB_BUF_FULL_TBL_ROWS 917504
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define FR_BZ_RX_FILTER_TBL0_STEP 32
+#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define FR_BB_RX_FILTER_TBL1 0x00f00010
+#define FR_BB_RX_FILTER_TBL1_STEP 32
+#define FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_BZ_TCP_UDP_LBN 108
+#define FRF_BZ_TCP_UDP_WIDTH 1
+#define FRF_BZ_RXQ_ID_LBN 96
+#define FRF_BZ_RXQ_ID_WIDTH 12
+#define FRF_BZ_DEST_IP_LBN 64
+#define FRF_BZ_DEST_IP_WIDTH 32
+#define FRF_BZ_DEST_PORT_TCP_LBN 48
+#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_BZ_SRC_IP_LBN 16
+#define FRF_BZ_SRC_IP_WIDTH 32
+#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define FR_BZ_TIMER_TBL 0x00f70000
+#define FR_BZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_BB_TIMER_TBL_ROWS 4096
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_BB_TIMER_MODE_LBN 12
+#define FRF_BB_TIMER_MODE_WIDTH 2
+#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_BB_TIMER_MODE_TRIG_START 2
+#define FFE_BB_TIMER_MODE_IMMED_START 1
+#define FFE_BB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_BB_TIMER_VAL_LBN 0
+#define FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define FR_BZ_TX_PACE_TBL 0x00f80000
+#define FR_BZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+#define FRF_BZ_TX_PACE_LBN 0
+#define FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define FR_BZ_SRM_DBG 0x03000000
+#define FR_BZ_SRM_DBG_STEP 8
+#define FR_CZ_SRM_DBG_ROWS 262144
+#define FR_BB_SRM_DBG_ROWS 2097152
+#define FRF_BZ_SRM_DBG_LBN 0
+#define FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_BZ_TX_DSC_ERROR_EV 15
+#define FSE_BZ_RX_DSC_ERROR_EV 14
+#define FSE_AA_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AB_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_CZ_EV_CODE_MCDI_EV 12
+#define FSE_CZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define FSF_CZ_MC_XFRC_MODE_LBN 416
+#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define FSF_CZ_MC_XFRC_HASH_LBN 384
+#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define FRF_AZ_SRM_NB_SZ_WIDTH \
+ (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+ FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+ FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define FRF_AB_XX_COMMA_DET_WIDTH 4
+#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define FRF_AB_XX_DISPERR_WIDTH 4
+#define FFE_AB_XX_STAT_ALL_LANES 0xf
+#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
+
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
+
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/siena/filter.h b/drivers/net/ethernet/sfc/siena/filter.h
new file mode 100644
index 000000000000..40b2af8bfb81
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/filter.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/**
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
+ * Used for RX default unicast and multicast/broadcast filters.
+ *
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
+ */
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
+ EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
+ * networking and SR-IOV)
+ */
+enum efx_filter_priority {
+ EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_PRI_MANUAL,
+ EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ * By default, matching packets will be delivered only to the
+ * specified queue. If this flag is set, they will be delivered
+ * to a range of queues offset from the specified queue number
+ * according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ * queue.
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
+ * @EFX_FILTER_FLAG_TX: Filter is for TX
+ */
+enum efx_filter_flags {
+ EFX_FILTER_FLAG_RX_RSS = 0x01,
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
+ EFX_FILTER_FLAG_RX = 0x08,
+ EFX_FILTER_FLAG_TX = 0x10,
+};
+
+/** enum efx_encap_type - types of encapsulation
+ * @EFX_ENCAP_TYPE_NONE: no encapsulation
+ * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
+ * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
+ * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
+ * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
+ *
+ * Contains both enumerated types and flags.
+ * To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
+ */
+enum efx_encap_type {
+ EFX_ENCAP_TYPE_NONE = 0,
+ EFX_ENCAP_TYPE_VXLAN = 1,
+ EFX_ENCAP_TYPE_NVGRE = 2,
+ EFX_ENCAP_TYPE_GENEVE = 3,
+
+ EFX_ENCAP_TYPES_MASK = 7,
+ EFX_ENCAP_FLAG_IPV6 = 8,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This
+ * is a user_id (with 0 meaning the driver/default RSS context), not an
+ * MCFW context_id.
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
+ * %EFX_FILTER_MATCH_ENCAP_TYPE is set
+ *
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on which fields are matched.
+ */
+struct efx_filter_spec {
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ u32 encap_type:4;
+ /* total 65 bytes */
+};
+
+enum {
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
+};
+
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+ enum efx_filter_priority priority,
+ enum efx_filter_flags flags,
+ unsigned rxq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = priority;
+ spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = 0;
+ spec->dmaq_id = rxq_id;
+}
+
+static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
+ unsigned txq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = EFX_FILTER_FLAG_TX;
+ spec->dmaq_id = txq_id;
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
+enum {
+ EFX_FILTER_VID_UNSPEC = 0xffff,
+};
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ ether_addr_copy(spec->loc_mac, addr);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
+static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
+ enum efx_encap_type encap_type)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->encap_type = encap_type;
+}
+
+static inline enum efx_encap_type efx_filter_get_encap_type(
+ const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
+ return spec->encap_type;
+ return EFX_ENCAP_TYPE_NONE;
+}
+#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/siena/io.h b/drivers/net/ethernet/sfc/siena/io.h
new file mode 100644
index 000000000000..30439cc83a89
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/io.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy for the Falcon architecture:
+ *
+ * Many CSRs are very wide and cannot be read or written atomically.
+ * Writes from the host are buffered by the Bus Interface Unit (BIU)
+ * up to 128 bits. Whenever the host writes part of such a register,
+ * the BIU collects the written value and does not write to the
+ * underlying register until all 4 dwords have been written. A
+ * similar buffering scheme applies to host access to the NIC's 64-bit
+ * SRAM.
+ *
+ * Writes to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes. We use
+ * efx_nic::biu_lock for this.
+ *
+ * We also serialise reads from 128-bit CSRs and SRAM with the same
+ * spinlock. This may not be necessary, but it doesn't really matter
+ * as there are no such reads on the fast path.
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+#endif
+
+static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
+{
+ return efx->reg_base + reg;
+}
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+ const efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ __raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+ __raw_writel((__force u32)value->u32[0], membase + addr);
+ __raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg)
+{
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+
+ /* No lock required */
+ _efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ value->u32[0] = _efx_readd(efx, reg + 0);
+ value->u32[1] = _efx_readd(efx, reg + 4);
+ value->u32[2] = _efx_readd(efx, reg + 8);
+ value->u32[3] = _efx_readd(efx, reg + 12);
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+ value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ value->u32[0] = _efx_readd(efx, reg);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* default VI stride (step between per-VI registers) is 8K on EF10 and
+ * 64K on EF100
+ */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
+#define EF100_DEFAULT_VI_STRIDE 0x10000
+
+/* Calculate offset to page-mapped register */
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+ unsigned int reg)
+{
+ return page * efx->vi_stride + reg;
+}
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ reg = efx_paged_reg(efx, page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+}
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x180 && \
+ (reg) != 0x200 && \
+ (reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ const efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ if (page == 0) {
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+ } else {
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+ }
+}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
+
+#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
new file mode 100644
index 000000000000..3f7899daa86a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -0,0 +1,2260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/atomic.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "mcdi_pcol.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+
+/* A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 250ms for the status word to be set.
+ */
+#define MCDI_STATUS_DELAY_US 100
+#define MCDI_STATUS_DELAY_COUNT 2500
+#define MCDI_STATUS_SLEEP_MS \
+ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
+
+#define SEQ_MASK \
+ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ bool quiet;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(struct timer_list *t);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
+static void efx_mcdi_abandon(struct efx_nic *efx);
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+static bool efx_siena_mcdi_logging_default;
+module_param_named(mcdi_logging_default, efx_siena_mcdi_logging_default,
+ bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
+int efx_siena_mcdi_init(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc = -ENOMEM;
+
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ goto fail;
+
+ mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = efx_siena_mcdi_logging_default;
+#endif
+ init_waitqueue_head(&mcdi->wq);
+ init_waitqueue_head(&mcdi->proxy_rx_wq);
+ spin_lock_init(&mcdi->iface_lock);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
+
+ (void)efx_siena_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_siena_mcdi_handle_assertion(efx);
+ if (rc)
+ goto fail2;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ goto fail2;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
+ return 0;
+fail2:
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
+}
+
+void efx_siena_mcdi_detach(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+}
+
+void efx_siena_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ u32 xflags, seqno;
+
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ seqno = mcdi->seqno & SEQ_MASK;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ xflags = 0;
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x",
+ le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x",
+ le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
+
+ mcdi->new_epoch = false;
+}
+
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
+}
+
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
+ efx_dword_t hdr;
+
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
+
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
+ mcdi->resprc_raw = 0;
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
+ mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
+ } else {
+ mcdi->resprc = 0;
+ }
+}
+
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ rmb();
+ if (!efx->type->mcdi_poll_response(efx))
+ return false;
+
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return true;
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned long time, finish;
+ unsigned int spins;
+ int rc;
+
+ /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+ rc = efx_siena_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
+
+ /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+ * because generally mcdi responses are fast. After that, back off
+ * and poll once a jiffy (approximately)
+ */
+ spins = USER_TICK_USEC;
+ finish = jiffies + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+ --spins;
+ udelay(1);
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ time = jiffies;
+
+ if (efx_mcdi_poll_once(efx))
+ break;
+
+ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+ /* Return rc=0 like wait_event_timeout() */
+ return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function; reset
+ * software state as necessary.
+ */
+int efx_siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return 0;
+
+ return efx->type->mcdi_poll_reboot(efx);
+}
+
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
+}
+
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
+{
+ /* Wait until the interface becomes QUIESCENT and we win the race
+ * to mark it RUNNING_SYNC.
+ */
+ wait_event(mcdi->wq,
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+ * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+ * completed the request first, then we'll just end up completing the
+ * request again, which is safe.
+ *
+ * We need an smp_rmb() to synchronise with efx_siena_mcdi_mode_poll(), which
+ * wait_event_timeout() implicitly provides.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return efx_mcdi_poll(efx);
+
+ return 0;
+}
+
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
+{
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
+ wake_up(&mcdi->wq);
+ return true;
+ }
+
+ return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ wake_up(&mcdi->wq);
+}
+
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len, err_len;
+ efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_ERR(errbuf);
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_siena_mcdi_display_error(efx, async->cmd, async->inlen,
+ errbuf, err_len, rc);
+ }
+
+ if (async->complete)
+ async->complete(efx, async->cookie, rc, outbuf,
+ min(async->outlen, data_len));
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+ unsigned int datalen, unsigned int mcdi_err)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool wake = false;
+
+ spin_lock(&mcdi->iface_lock);
+
+ if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+ if (mcdi->credits)
+ /* The request has been cancelled */
+ --mcdi->credits;
+ else
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
+ } else {
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
+
+ wake = true;
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(struct timer_list *t)
+{
+ struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
+ size_t hdr_len, size_t data_len,
+ u32 *proxy_handle)
+{
+ MCDI_DECLARE_BUF_ERR(testbuf);
+ const size_t buflen = sizeof(testbuf);
+
+ if (!proxy_handle || data_len < buflen)
+ return false;
+
+ efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
+ if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
+ *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
+ return true;
+ }
+
+ return false;
+}
+
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet,
+ u32 *proxy_handle, int *raw_rc)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_ERR(errbuf);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ efx_mcdi_abandon(efx);
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (proxy_handle)
+ *proxy_handle = 0;
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ if (raw_rc)
+ *raw_rc = mcdi->resprc_raw;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
+ netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
+ cmd, -rc);
+ if (efx->type->mcdi_reboot_detected)
+ efx->type->mcdi_reboot_detected(efx);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (proxy_handle && (rc == -EPROTO) &&
+ efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
+ proxy_handle)) {
+ mcdi->proxy_rx_status = 0;
+ mcdi->proxy_rx_handle = 0;
+ mcdi->state = MCDI_STATE_PROXY_WAIT;
+ } else if (rc && !quiet) {
+ efx_siena_mcdi_display_error(efx, cmd, inlen, errbuf,
+ err_len, rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_siena_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ if (!proxy_handle || !*proxy_handle)
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
+{
+ if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
+ /* Interrupt the proxy wait. */
+ mcdi->proxy_rx_status = -EINTR;
+ wake_up(&mcdi->proxy_rx_wq);
+ }
+}
+
+static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
+ u32 handle, int status)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
+
+ mcdi->proxy_rx_status = efx_mcdi_errno(status);
+ /* Ensure the status is written before we update the handle, since the
+ * latter is used to check if we've finished.
+ */
+ wmb();
+ mcdi->proxy_rx_handle = handle;
+ wake_up(&mcdi->proxy_rx_wq);
+}
+
+static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ /* Wait for a proxy event, or timeout. */
+ rc = wait_event_timeout(mcdi->proxy_rx_wq,
+ mcdi->proxy_rx_handle != 0 ||
+ mcdi->proxy_rx_status == -EINTR,
+ MCDI_RPC_TIMEOUT);
+
+ if (rc <= 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy timeout %d\n", handle);
+ return -ETIMEDOUT;
+ } else if (mcdi->proxy_rx_handle != handle) {
+ netif_warn(efx, hw, efx->net_dev,
+ "MCDI proxy unexpected handle %d (expected %d)\n",
+ mcdi->proxy_rx_handle, handle);
+ return -EINVAL;
+ }
+
+ return mcdi->proxy_rx_status;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet, int *raw_rc)
+{
+ u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
+ int rc;
+
+ if (inbuf && inlen && (inbuf == outbuf)) {
+ /* The input buffer can't be aliased with the output. */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rc = efx_siena_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc)
+ return rc;
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet, &proxy_handle, raw_rc);
+
+ if (proxy_handle) {
+ /* Handle proxy authorisation. This allows approval of MCDI
+ * operations to be delegated to the admin function, allowing
+ * fine control over (eg) multicast subscriptions.
+ */
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI waiting for proxy auth %d\n",
+ proxy_handle);
+ rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
+
+ if (rc == 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "MCDI proxy retry %d\n", proxy_handle);
+
+ /* We now retry the original request. */
+ mcdi->state = MCDI_STATE_RUNNING_SYNC;
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+
+ rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
+ outbuf, outlen, outlen_actual,
+ quiet, NULL, raw_rc);
+ } else {
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x failed after proxy auth rc=%d\n",
+ cmd, rc);
+
+ if (rc == -EINTR || rc == -EIO)
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_mcdi_release(mcdi);
+ }
+ }
+
+ return rc;
+}
+
+static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int raw_rc = 0;
+ int rc;
+
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual, true, &raw_rc);
+
+ if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ efx->type->is_vf) {
+ /* If the EVB port isn't available within a VF this may
+ * mean the PF is still bringing the switch up. We should
+ * retry our request shortly.
+ */
+ unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
+ unsigned int delay_us = 10000;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "%s: NO_EVB_PORT; will retry request\n",
+ __func__);
+
+ do {
+ usleep_range(delay_us, delay_us + 10000);
+ rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+ outbuf, outlen, outlen_actual,
+ true, &raw_rc);
+ if (delay_us < 100000)
+ delay_us <<= 1;
+ } while ((rc == -EPROTO) &&
+ (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+ time_before(jiffies, abort_time));
+ }
+
+ if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
+ efx_siena_mcdi_display_error(efx, cmd, inlen,
+ outbuf, outlen, rc);
+
+ return rc;
+}
+
+/**
+ * efx_siena_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes. Must be a multiple
+ * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer. May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes. If the actual
+ * response is longer than @outlen & ~3, it will be truncated
+ * to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ * length. May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in an appropriate
+ * context.
+ *
+ * Return: A negative error code, or zero if successful. The error
+ * code may come from the MCDI response or may indicate a failure
+ * to communicate with the MC. In the former case, the response
+ * will still be copied to @outbuf and *@outlen_actual will be
+ * set accordingly. In the latter case, *@outlen_actual will be
+ * set to zero.
+ */
+int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
+
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_siena_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_siena_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_siena_mcdi_display_error
+ * as needed.
+ */
+int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
+
+int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
+ if (mcdi->mode == MCDI_MODE_FAIL)
+ return -ENETDOWN;
+
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
+
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->quiet = quiet;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
+
+ return rc;
+}
+
+/**
+ * efx_siena_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
+int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false, NULL, NULL);
+}
+
+int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true, NULL, NULL);
+}
+
+void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
+
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+ cmd, inlen, rc, code, err_arg);
+}
+
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
+void efx_siena_mcdi_mode_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ /* If already in polling mode, nothing to do.
+ * If in fail-fast state, don't switch to polled completion.
+ * FLR recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
+ return;
+
+ /* We can switch from event completion to polled completion, because
+ * mcdi requests are always completed in shared memory. We do this by
+ * switching the mode to POLL'd then completing the request.
+ * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+ *
+ * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+ * which efx_mcdi_complete_sync() provides for us.
+ */
+ mcdi->mode = MCDI_MODE_POLL;
+
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_siena_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in poll or fail mode so no more requests can be queued */
+ BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ if (async->complete)
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
+}
+
+void efx_siena_mcdi_mode_event(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ /* If already in event completion mode, nothing to do.
+ * If in fail-fast state, don't switch to event completion. FLR
+ * recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
+ return;
+
+ /* We can't switch from polled to event completion in the middle of a
+ * request, because the completion method is specified in the request.
+ * So acquire the interface to serialise the requestors. We don't need
+ * to acquire the iface_lock to change the mode here, but we do need a
+ * write memory barrier ensure that efx_siena_mcdi_rpc() sees it, which
+ * efx_mcdi_acquire() provides.
+ */
+ efx_mcdi_acquire_sync(mcdi);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ /* If there is an outstanding MCDI request, it has been terminated
+ * either by a BADASSERT or REBOOT event. If the mcdi interface is
+ * in polled mode, then do nothing because the MC reboot handler will
+ * set the header correctly. However, if the mcdi interface is waiting
+ * for a CMDDONE event it won't receive it [and since all MCDI events
+ * are sent to the same queue, we can't be racing with
+ * efx_mcdi_ev_cpl()]
+ *
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
+ *
+ * If there is an outstanding proxy response expected it is not going
+ * to arrive. We should thus abort it.
+ */
+ spin_lock(&mcdi->iface_lock);
+ efx_mcdi_proxy_abort(mcdi);
+
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ } else {
+ int count;
+
+ /* Consume the status word since efx_siena_mcdi_rpc_finish() won't */
+ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
+ rc = efx_siena_mcdi_poll_reboot(efx);
+ if (rc)
+ break;
+ udelay(MCDI_STATUS_DELAY_US);
+ }
+
+ /* On EF10, a CODE_MC_REBOOT event can be received without the
+ * reboot detection in efx_siena_mcdi_poll_reboot() being triggered.
+ * If zero was returned from the final call to
+ * efx_siena_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+ * MC has definitely rebooted so prepare for the reset.
+ */
+ if (!rc && efx->type->mcdi_reboot_detected)
+ efx->type->mcdi_reboot_detected(efx);
+
+ mcdi->new_epoch = true;
+
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+}
+
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ efx_mcdi_proxy_abort(mcdi);
+
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
+/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
+ * to recover.
+ */
+static void efx_mcdi_abandon(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
+ return; /* it had already been done */
+ netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
+ efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
+}
+
+static void efx_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
+void efx_siena_mcdi_process_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+ u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
+ efx_mcdi_ev_death(efx, -EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_PMNOTICE:
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(efx,
+ MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+ MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+ MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE:
+ efx_siena_mcdi_process_link_change(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SENSOREVT:
+ efx_sensor_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SCHEDERR:
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
+ break;
+ case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
+ efx_mcdi_ev_death(efx, -EIO);
+ break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+ /* MAC stats are gather lazily. We can ignore this. */
+ break;
+ case MCDI_EVENT_CODE_FLR:
+ if (efx->type->sriov_flr)
+ efx->type->sriov_flr(efx,
+ MCDI_EVENT_FIELD(*event, FLR_VF));
+ break;
+ case MCDI_EVENT_CODE_PTP_RX:
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ case MCDI_EVENT_CODE_PTP_PPS:
+ efx_siena_ptp_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_siena_time_sync_event(channel, event);
+ break;
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ efx_mcdi_ev_proxy_response(efx,
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
+ MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "Unknown MCDI event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
+ size_t outlength;
+ const __le16 *ver_words;
+ size_t offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ goto fail;
+ if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+ offset = scnprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]),
+ le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]),
+ le16_to_cpu(ver_words[3]));
+
+ if (efx->type->print_additional_fwver)
+ offset += efx->type->print_additional_fwver(efx, buf + offset,
+ len - offset);
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+
+ return;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ buf[0] = 0;
+}
+
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+ driver_operating ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
+
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+ * specified will fail with EPERM, and we have to tell the MC we don't
+ * care what firmware we get.
+ */
+ if (rc == -EPERM) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+ MC_CMD_FW_DONT_CARE);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf,
+ sizeof(outbuf), &outlen);
+ }
+ if (rc) {
+ efx_siena_mcdi_display_error(efx, MC_CMD_DRV_ATTACH,
+ sizeof(inbuf), outbuf, outlen, rc);
+ goto fail;
+ }
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
+ /* We currently assume we have control of the external link
+ * and are completely trusted by firmware. Abort probing
+ * if that's not true for this function.
+ */
+
+ if (was_attached != NULL)
+ *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+ return 0;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
+ int port_num = efx_port_num(efx);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+ /* we need __aligned(2) for ether_addr_copy */
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (mac_address)
+ ether_addr_copy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
+ if (fw_subtype_list) {
+ for (i = 0;
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
+ }
+ if (capabilities) {
+ if (port_num)
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ else
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
+
+ return rc;
+}
+
+int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
+ u32 dest_evq)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
+ u32 dest = 0;
+ int rc;
+
+ if (uart)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+ if (evq)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+ BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+ *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+ *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_siena_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ goto fail1;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ goto fail2;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+
+fail2:
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
+ const char *reason;
+ size_t outlen;
+ int retry;
+ int rc;
+
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
+ * because a boot-time assertion might cause this command to fail
+ * with EINTR. And once again because GET_ASSERTS can race with
+ * MC_CMD_REBOOT running on the other port. */
+ retry = 2;
+ do {
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EPERM)
+ return 0;
+ } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+ if (rc) {
+ efx_siena_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
+ return rc;
+ }
+ if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+ return -EIO;
+
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return 0;
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : "unknown assertion";
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers */
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
+
+ return 1;
+}
+
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
+
+ /* If the MC is running debug firmware, it might now be
+ * waiting for a debugger to attach, but we just want it to
+ * reboot. We set a flag that makes the command a no-op if it
+ * has already done so.
+ * The MCDI will thus return either 0 or -EIO.
+ */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf,
+ MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL);
+ if (rc == -EIO)
+ rc = 0;
+ if (rc)
+ efx_siena_mcdi_display_error(efx, MC_CMD_REBOOT,
+ MC_CMD_REBOOT_IN_LEN, NULL, 0, rc);
+ return rc;
+}
+
+int efx_siena_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc <= 0)
+ return rc;
+
+ return efx_mcdi_exit_assertion(efx);
+}
+
+int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
+
+ BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+ BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+ BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+ BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+ return efx_siena_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_reset_func(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+ MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* White is black, and up is down */
+ if (rc == -EIO)
+ return 0;
+ if (rc == 0)
+ rc = -EIO;
+ return rc;
+}
+
+enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* If MCDI is down, we can't handle_assertion */
+ if (method == RESET_TYPE_MCDI_TIMEOUT) {
+ rc = pci_reset_function(efx->pci_dev);
+ if (rc)
+ return rc;
+ /* Re-enable polled MCDI completion */
+ if (efx->mcdi) {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ mcdi->mode = MCDI_MODE_POLL;
+ }
+ return 0;
+ }
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_siena_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_DATAPATH)
+ return 0;
+ else if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_func(efx);
+}
+
+static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+ const u8 *mac, int *id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+
+}
+
+
+int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out)
+{
+ return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ return rc;
+}
+
+int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
+ int rc, count;
+
+ BUILD_BUG_ON(EFX_MAX_CHANNELS >
+ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
+ count = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
+ }
+ }
+ }
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count),
+ NULL, 0, NULL);
+ WARN_ON(rc < 0);
+
+ return rc;
+}
+
+int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0,
+ NULL, 0, NULL);
+ return rc;
+}
+
+#ifdef CONFIG_SFC_SIENA_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
+ size_t outlen;
+ int rc, rc2;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+ /* Always set this flag. Old firmware ignores it */
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ netif_err(efx, drv, efx->net_dev,
+ "NVRAM update failed verification with code 0x%x\n",
+ rc2);
+ switch (rc2) {
+ case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
+ rc = -EIO;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
+ case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
+ rc = -EINVAL;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ rc = -EPERM;
+ break;
+ default:
+ netif_err(efx, drv, efx->net_dev,
+ "Unknown response to NVRAM_UPDATE_FINISH\n");
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
+
+int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_SIENA_MTD */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h
new file mode 100644
index 000000000000..06f38e5e6832
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_MCDI_H
+#define EFX_MCDI_H
+
+/**
+ * enum efx_mcdi_state - MCDI request handling state
+ * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
+ * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
+ * indicates we must wait for a proxy try again message.
+ * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
+ * has not yet consumed the result. For all other threads, equivalent to
+ * %MCDI_STATE_RUNNING.
+ */
+enum efx_mcdi_state {
+ MCDI_STATE_QUIESCENT,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
+ MCDI_STATE_PROXY_WAIT,
+ MCDI_STATE_COMPLETED,
+};
+
+/**
+ * enum efx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
+ * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum efx_mcdi_mode {
+ MCDI_MODE_POLL,
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+/**
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event.
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
+ * @seqno: The next sequence number to use for mcdi requests.
+ * @credits: Number of spurious MCDI completion events allowed before we
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
+ * @proxy_rx_handle: Most recently received proxy authorisation handle
+ * @proxy_rx_status: Status of most recent proxy authorisation
+ * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
+ */
+struct efx_mcdi_iface {
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
+ wait_queue_head_t wq;
+ spinlock_t iface_lock;
+ bool new_epoch;
+ unsigned int credits;
+ unsigned int seqno;
+ int resprc;
+ int resprc_raw;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
+#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
+ unsigned int proxy_rx_handle;
+ int proxy_rx_status;
+ wait_queue_head_t proxy_rx_wq;
+};
+
+struct efx_mcdi_mon {
+ struct efx_buffer dma_buf;
+ struct mutex update_lock;
+ unsigned long last_update;
+ struct device *device;
+ struct efx_mcdi_mon_attribute *attrs;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ unsigned int n_attrs;
+};
+
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_SIENA_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+ u32 fn_flags;
+};
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+ EFX_WARN_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
+}
+
+#ifdef CONFIG_SFC_SIENA_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_WARN_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+int efx_siena_mcdi_init(struct efx_nic *efx);
+void efx_siena_mcdi_detach(struct efx_nic *efx);
+void efx_siena_mcdi_fini(struct efx_nic *efx);
+
+int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen);
+int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
+
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+int efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
+
+int efx_siena_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_siena_mcdi_mode_poll(struct efx_nic *efx);
+void efx_siena_mcdi_mode_event(struct efx_nic *efx);
+void efx_siena_mcdi_flush_async(struct efx_nic *efx);
+
+void efx_siena_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
+#define _MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF(_name, _len) \
+ _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name) \
+ MCDI_DECLARE_BUF(_name, 8)
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
+
+#define MCDI_EVENT_FIELD(_ev, _field) \
+ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+
+#define MCDI_CAPABILITY(field) \
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+ MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
+
+#define efx_has_cap(efx, field) \
+ efx->type->check_caps(efx, \
+ MCDI_CAPABILITY(field), \
+ MCDI_CAPABILITY_OFST(field))
+
+void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities);
+int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
+ u32 dest_evq);
+int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_siena_mcdi_handle_assertion(struct efx_nic *efx);
+int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out);
+int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx);
+void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx);
+enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason);
+int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+
+#ifdef CONFIG_SFC_SIENA_MCDI_MON
+int efx_siena_mcdi_mon_probe(struct efx_nic *efx);
+void efx_siena_mcdi_mon_remove(struct efx_nic *efx);
+#else
+static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {}
+#endif
+
+#ifdef CONFIG_SFC_SIENA_MTD
+int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
+#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
new file mode 100644
index 000000000000..56a9c56ed9e3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/stat.h>
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+
+enum efx_hwmon_type {
+ EFX_HWMON_UNKNOWN,
+ EFX_HWMON_TEMP, /* temperature */
+ EFX_HWMON_COOL, /* cooling device, probably a heatsink */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
+};
+
+static const struct {
+ const char *label;
+ enum efx_hwmon_type hwmon_type;
+ int port;
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller die temp. (ext. ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
+#undef SENSOR
+};
+
+static const char *const sensor_status_names[] = {
+ [MC_CMD_SENSOR_STATE_OK] = "OK",
+ [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+ [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+ [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
+};
+
+void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ unsigned int type, state, value;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
+
+ type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+ state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+ value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+
+ /* Deal gracefully with the board having more drivers than we
+ * know about, but do not expect new sensor states. */
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
+ if (!name)
+ name = "No sensor name available";
+ EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ state_txt = sensor_status_names[state];
+ EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
+
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
+}
+
+#ifdef CONFIG_SFC_SIENA_MCDI_MON
+
+struct efx_mcdi_mon_attribute {
+ struct device_attribute dev_attr;
+ unsigned int index;
+ unsigned int type;
+ enum efx_hwmon_type hwmon_type;
+ unsigned int limit_value;
+ char name[12];
+};
+
+static int efx_mcdi_mon_update(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
+ int rc;
+
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+ if (rc == 0)
+ hwmon->last_update = jiffies;
+ return rc;
+}
+
+static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
+ efx_dword_t *entry)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev->parent);
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
+
+ mutex_lock(&hwmon->update_lock);
+
+ /* Use cached value if last update was < 1 s ago */
+ if (time_before(jiffies, hwmon->last_update + HZ))
+ rc = 0;
+ else
+ rc = efx_mcdi_mon_update(efx);
+
+ /* Copy out the requested entry */
+ *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
+
+ mutex_unlock(&hwmon->update_lock);
+
+ return rc;
+}
+
+static ssize_t efx_mcdi_mon_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ unsigned int value, state;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
+ value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ unsigned int value;
+
+ value = mon_attr->limit_value;
+
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ int state;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
+}
+
+static ssize_t efx_mcdi_mon_show_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ return sprintf(buf, "%s\n",
+ efx_mcdi_sensor_type[mon_attr->type].label);
+}
+
+static void
+efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
+ ssize_t (*reader)(struct device *,
+ struct device_attribute *, char *),
+ unsigned int index, unsigned int type,
+ unsigned int limit_value)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
+
+ strscpy(attr->name, name, sizeof(attr->name));
+ attr->index = index;
+ attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
+ attr->limit_value = limit_value;
+ sysfs_attr_init(&attr->dev_attr.attr);
+ attr->dev_attr.attr.name = attr->name;
+ attr->dev_attr.attr.mode = 0444;
+ attr->dev_attr.show = reader;
+ hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
+}
+
+int efx_siena_mcdi_mon_probe(struct efx_nic *efx)
+{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
+ size_t outlen;
+ char name[12];
+ u32 mask;
+ int rc, i, j, type;
+
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
+ return 0;
+
+ rc = efx_siena_alloc_buffer(efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mutex_init(&hwmon->update_lock);
+ efx_mcdi_mon_update(efx);
+
+ /* Allocate space for the maximum possible number of
+ * attributes for this set of sensors:
+ * value, min, max, crit, alarm and label for each sensor.
+ */
+ n_attrs = 6 * n_sensors;
+ hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
+ if (!hwmon->attrs) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!hwmon->group.attrs) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
+ const char *hwmon_prefix;
+ unsigned hwmon_index;
+ u16 min1, max1, min2, max2;
+
+ /* Find next sensor type or exit if there is none */
+ do {
+ type++;
+
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ goto hwmon_register;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
+ case EFX_HWMON_TEMP:
+ hwmon_prefix = "temp";
+ hwmon_index = ++n_temp; /* 1-based */
+ break;
+ case EFX_HWMON_COOL:
+ /* This is likely to be a heatsink, but there
+ * is no convention for representing cooling
+ * devices other than fans.
+ */
+ hwmon_prefix = "fan";
+ hwmon_index = ++n_cool; /* 1-based */
+ break;
+ default:
+ hwmon_prefix = "in";
+ hwmon_index = n_in++; /* 0-based */
+ break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
+ }
+
+ min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MIN1);
+ max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MAX1);
+ min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MIN2);
+ max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MAX2);
+
+ if (min1 != max1) {
+ snprintf(name, sizeof(name), "%s%u_input",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_value, i, type, 0);
+
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ }
+
+ snprintf(name, sizeof(name), "%s%u_max",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max1);
+
+ if (min2 != max2) {
+ /* Assume max2 is critical value.
+ * But we have no good way to expose min2.
+ */
+ snprintf(name, sizeof(name), "%s%u_crit",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max2);
+ }
+ }
+
+ snprintf(name, sizeof(name), "%s%u_alarm",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
+ snprintf(name, sizeof(name), "%s%u_label",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_label, i, type, 0);
+ }
+ }
+
+hwmon_register:
+ hwmon->groups[0] = &hwmon->group;
+ hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
+ KBUILD_MODNAME, NULL,
+ hwmon->groups);
+ if (IS_ERR(hwmon->device)) {
+ rc = PTR_ERR(hwmon->device);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_siena_mcdi_mon_remove(efx);
+ return rc;
+}
+
+void efx_siena_mcdi_mon_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+
+ if (hwmon->device)
+ hwmon_device_unregister(hwmon->device);
+ kfree(hwmon->attrs);
+ kfree(hwmon->group.attrs);
+ efx_siena_free_buffer(efx, &hwmon->dma_buf);
+}
+
+#endif /* CONFIG_SFC_SIENA_MCDI_MON */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
new file mode 100644
index 000000000000..a3cc8b7ec732
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -0,0 +1,17204 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 2
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_SEQ_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
+#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_OFST 0
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_OFST 0
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_OFST 0
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_OFST 0
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_OFST 0
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_OFST 0
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_OFST 0
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
+#define MCDI_EVENT_AOE_ERR_DATA_OFST 0
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_OFST 0
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_OFST 0
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_OFST 0
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_OFST 0
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_OFST 0
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_OFST 0
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24
+#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4
+/* Enum values, see field(s): */
+/* MCDI_EVENT/LINKCHANGE_SPEED */
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28
+#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29
+#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0
+#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30
+#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
+#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
+#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+/* Alias for PTP_DATA. */
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+/* Data associated with PTP events which doesn't fit into the main DATA field
+ */
+#define MCDI_EVENT_PTP_DATA_LBN 36
+#define MCDI_EVENT_PTP_DATA_WIDTH 8
+/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the
+ * event ring
+ */
+#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59
+#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
+/* enum: Link change. This event is sent instead of LINKCHANGE if
+ * WANT_V2_LINKCHANGES was set on driver attach.
+ */
+#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20
+/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach
+ * when the local device capabilities changes. This will usually correspond to
+ * a module change.
+ */
+#define MCDI_EVENT_CODE_MODULECHANGE 0x21
+/* enum: Notification that the sensors have been added and/or removed from the
+ * sensor table. This event includes the new sensor table generation count, if
+ * this does not match the driver's local copy it is expected to call
+ * DYNAMIC_SENSORS_LIST to refresh it.
+ */
+#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22
+/* enum: Notification that a sensor has changed state as a result of a reading
+ * crossing a threshold. This is sent as two events, the first event contains
+ * the handle and the sensor's state (in the SRC field), and the second
+ * contains the value.
+ */
+#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23
+/* enum: Notification that a descriptor proxy function configuration has been
+ * pushed to "live" status (visible to host). SRC field contains the handle of
+ * the affected descriptor proxy function. DATA field contains the generation
+ * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET /
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24
+/* enum: Notification that a descriptor proxy function has been reset. SRC
+ * field contains the handle of the affected descriptor proxy function. See
+ * SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25
+/* enum: Notification that a driver attached to a descriptor proxy function.
+ * SRC field contains the handle of the affected descriptor proxy function. For
+ * Virtio proxy functions this message consists of two MCDI events, where the
+ * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0
+ * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy
+ * functions event length and meaning of DATA field is not yet defined. See
+ * SF-122927-TC for details.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32
+#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32
+/* The new generation count after a sensor has been added or deleted. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32
+/* The handle of a dynamic sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32
+/* The current values of a sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0
+#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32
+/* The current state of a sensor. */
+#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36
+#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8
+#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0
+#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4
+#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0
+#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32
+/* Generation count of applied configuration set */
+#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0
+#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4
+#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0
+#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32
+/* Virtio features negotiated with the host driver. First event (CONT=1)
+ * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63.
+ */
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
+#define MUM_EVENT_SENSOR_ID_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_OFST 0
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_OFST 0
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4)
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs
+ * found on Riverhead designs
+ */
+#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26
+
+/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted
+ * firmware version information
+ */
+#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20
+/* MC firmware build date (as Unix timestamp) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+/* MC firmware version number */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+/* MC firmware security level */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
+/* MC firmware extra version info (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about adapter components.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register. Siena PTP adapters only. */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change the frequency correction applied to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. Not implemented. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC. Siena PTP adapters only.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1)
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1)
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Not used. Events are always sent to function relative queue 0. */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20)
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1)
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2)
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
+#define MC_CMD_DRV_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_OFST 0
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4
+#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver
+ * version
+ */
+#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4
+/* MC_CMD_DRV_ATTACH_OFST 0 */
+/* MC_CMD_DRV_ATTACH_LBN 0 */
+/* MC_CMD_DRV_ATTACH_WIDTH 1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1
+/* MC_CMD_DRV_PREBOOT_OFST 0 */
+/* MC_CMD_DRV_PREBOOT_LBN 1 */
+/* MC_CMD_DRV_PREBOOT_WIDTH 1 */
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5
+#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+/* MC_CMD_FW_FULL_FEATURED 0x0 */
+/* enum: Prefer to use firmware with fewer features but lower latency */
+/* MC_CMD_FW_LOW_LATENCY 0x1 */
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+/* MC_CMD_FW_PACKED_STREAM 0x2 */
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+/* MC_CMD_FW_HIGH_TX_RATE 0x3 */
+/* enum: Reserved value */
+/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+/* MC_CMD_FW_RULES_ENGINE 0x5 */
+/* enum: Prefer to use firmware with additional DPDK support */
+/* MC_CMD_FW_DPDK 0x6 */
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+/* MC_CMD_FW_L3XUDP 0x7 */
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */
+/* enum: Only this option is allowed for non-admin functions */
+/* MC_CMD_FW_DONT_CARE 0xffffffff */
+/* Version of the driver to be reported by management protocols (e.g. NC-SI)
+ * handled by the NIC. This is a zero-terminated ASCII string.
+ */
+#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12
+#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
+/* enum: Used during development only. Should no longer be used. */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5
+/* enum: If set, indicates that TX only spreading is enabled. Even-numbered
+ * TXQs will use one engine, and odd-numbered TXQs will use the other. This
+ * also has the effect that only even-numbered RXQs will receive traffic.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1)
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
+#define MC_CMD_PUTS_IN_UART_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_OFST 0
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
+#define MC_CMD_PHY_CAP_10HDX_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_OFST 8
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_OFST 8
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_OFST 8
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_OFST 8
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_OFST 8
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_OFST 8
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_OFST 8
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_OFST 8
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_OFST 8
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_OFST 8
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_OFST 8
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_OFST 8
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_OFST 8
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_OFST 8
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_OFST 8
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define AN_TYPE_LEN 4
+#define AN_TYPE_TYPE_OFST 0
+#define AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define MC_CMD_AN_CLAUSE73 0x3
+#define AN_TYPE_TYPE_LBN 0
+#define AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define FEC_TYPE_LEN 4
+#define FEC_TYPE_TYPE_OFST 0
+#define FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define MC_CMD_FEC_RS 0x2
+#define FEC_TYPE_TYPE_LBN 0
+#define FEC_TYPE_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9
+#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9
+#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9
+#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
+
+/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence
+ * number to ensure this SET_LINK command corresponds to the latest
+ * MODULECHANGE event.
+ */
+#define MC_CMD_SET_LINK_IN_V2_LEN 17
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V4 0x7d
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3
+#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1)
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1)
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe
+/* enum: Internal-error. The bundle header is invalid. */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16
+/* enum: Internal-error. Component hash calculation failed. */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
+/* enum: The update operation is in-progress. */
+#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
+
+/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4
+/* Flags controlling information retrieved */
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Engineering sensor 1 */
+#define MC_CMD_SENSOR_ENGINEERING_1 0x57
+/* enum: Engineering sensor 2 */
+#define MC_CMD_SENSOR_ENGINEERING_2 0x58
+/* enum: Engineering sensor 3 */
+#define MC_CMD_SENSOR_ENGINEERING_3 0x59
+/* enum: Engineering sensor 4 */
+#define MC_CMD_SENSOR_ENGINEERING_4 0x5a
+/* enum: Engineering sensor 5 */
+#define MC_CMD_SENSOR_ENGINEERING_5 0x5b
+/* enum: Engineering sensor 6 */
+#define MC_CMD_SENSOR_ENGINEERING_6 0x5c
+/* enum: Engineering sensor 7 */
+#define MC_CMD_SENSOR_ENGINEERING_7 0x5d
+/* enum: Engineering sensor 8 */
+#define MC_CMD_SENSOR_ENGINEERING_8 0x5e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
+/* Flags controlling information retrieved */
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4)
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1)
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation, see SF-110495-PS for details of CLP
+ * processing. This command has been extended to accomodate the requirements of
+ * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
+ * SF-120509-TC and SF-117282-PS.
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
+#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
+#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
+#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
+#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_OP_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
+ * should match the equivalent structure in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24
+/* A value below this will trigger a warning event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32
+/* A value below this will trigger a critical event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32
+/* A value below this will shut down the card. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32
+/* A value above this will trigger a warning event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32
+/* A value above this will trigger a critical event. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32
+/* A value above this will shut down the card. */
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160
+#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32
+
+/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64
+/* The handle used to identify the sensor in calls to
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32
+/* A human-readable name for the sensor (zero terminated string, max 32 bytes)
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256
+/* The type of the sensor device, and by implication the unit of that the
+ * values will be reported in
+ */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4
+/* enum: A voltage sensor. Unit is mV */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0
+/* enum: A current sensor. Unit is mA */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1
+/* enum: A power sensor. Unit is mW */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2
+/* enum: A temperature sensor. Unit is Celsius */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3
+/* enum: A cooling fan sensor. Unit is RPM */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32
+/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320
+#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192
+
+/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12
+/* The handle used to identify the sensor */
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0
+#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32
+/* The current value of the sensor */
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32
+#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32
+/* The sensor's condition, e.g. good, broken or removed */
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4
+/* enum: Sensor working normally within limits */
+#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0
+/* enum: Warning threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1
+/* enum: Critical threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2
+/* enum: Fatal threshold breached */
+#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3
+/* enum: Sensor not working */
+#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4
+/* enum: Sensor working but no reading available */
+#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5
+/* enum: Sensor initialization failed */
+#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64
+#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_LIST
+ * Return a complete list of handles for sensors currently managed by the MC,
+ * and a generation count for this version of the sensor table. On systems
+ * advertising the DYNAMIC_SENSORS capability bit, this replaces the
+ * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
+ * added by the NMC.
+ *
+ * Sensor handles are persistent for the lifetime of the sensor and are used to
+ * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
+ *
+ * The generation count is maintained by the MC, is persistent across reboots
+ * and will be incremented each time the sensor table is modified. When the
+ * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
+ * containing the new generation count. The driver should compare this against
+ * the current generation count, and if it is different, call
+ * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
+ *
+ * The sensor count is provided to allow a future path to supporting more than
+ * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
+ * the maximum number that will fit in a single response. As this is a fairly
+ * large number (253) it is not anticipated that this will be needed in the
+ * near future, so can currently be ignored.
+ *
+ * On Riverhead this command is implemented as a wrapper for `list` in the
+ * sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
+#undef MC_CMD_0x66_PRIVILEGE_CTG
+
+#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4)
+/* Generation count, which will be updated each time a sensor is added to or
+ * removed from the MC sensor table.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4
+/* Number of sensors managed by the MC. Note that in principle, this can be
+ * larger than the size of the HANDLES array.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61
+#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
+ * Get descriptions for a set of sensors, specified as an array of sensor
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a wrapper for
+ * `get_descriptions` in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
+#undef MC_CMD_0x67_PRIVILEGE_CTG
+
+#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64)
+/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
+ * Read the state and value for a set of sensors, specified as an array of
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
+ *
+ * In the case of a broken sensor, then the state of the response's
+ * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
+ * provided should be treated as erroneous.
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
+ * in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
+#undef MC_CMD_0x68_PRIVILEGE_CTG
+
+#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12)
+/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_EVENT_CTRL
+ * Configure which categories of unsolicited events the driver expects to
+ * receive (Riverhead).
+ */
+#define MC_CMD_EVENT_CTRL 0x69
+#undef MC_CMD_0x69_PRIVILEGE_CTG
+
+#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVENT_CTRL_IN msgrequest */
+#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
+#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
+#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
+#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
+/* Array of event categories for which the driver wishes to receive events. */
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
+#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
+/* enum: Driver wishes to receive LINKCHANGE events. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
+/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
+ */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
+/* enum: Driver wishes to receive receive errors. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
+/* enum: Driver wishes to receive transmit errors. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
+/* enum: Driver wishes to receive firmware alerts. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
+/* enum: Driver wishes to receive reboot events. */
+#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
+
+/* MC_CMD_EVENT_CTRL_OUT msgrequest */
+#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
+/* enum: Bundle image partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00
+/* enum: Bundle metadata partition that holds additional information related to
+ * a bundle update in TLV format
+ */
+#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01
+/* enum: Bundle update non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
+/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_OFST 0
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_OFST 0
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_OFST 0
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_OFST 0
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_OFST 0
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_OFST 0
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_OFST 0
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_OFST 0
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_OFST 0
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_OFST 0
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_OFST 0
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_OFST 0
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_OFST 0
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_OFST 0
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+
+/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required
+ * for systems with a QDMA (currently, Riverhead)
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_LEN 564
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560
+#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560
+#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4
+
+/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a
+ * different RX packet prefix
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_LEN 568
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560
+#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560
+#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4
+/* Prefix id for the RX prefix format to use on packets delivered this queue.
+ * Zero is always a valid prefix id and means the default prefix format
+ * documented for the platform. Other prefix ids can be obtained by calling
+ * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields.
+ */
+#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564
+#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+/* enum: read the list of supported matches for the encapsulation detection
+ * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
+ * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
+ * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
+ * supported match types that can be used in the encapsulation detection rules
+ * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD.
+ */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FLAGS values for
+ * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+
+/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+
+/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+
+/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4
+/* Size of indirection table to be allocated to this context from the pool.
+ * Must be a power of 2. The minimum and maximum table size can be queried
+ * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in
+ * the common pool to allocate the requested table size, due to allocating
+ * table space to other RSS contexts, then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12
+#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
+ * Write a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
+#undef MC_CMD_0x13e_PRIVILEGE_CTG
+
+#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array of index-value pairs to be written to the table. Structure is
+ * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
+/* The index of the table entry to be written. */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
+/* The value to write into the table entry. */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_READ_TABLE
+ * Read a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
+#undef MC_CMD_0x13f_PRIVILEGE_CTG
+
+#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array containing the indices of the entries to be read. */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
+/* A buffer containing the requested entries read from the table. */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
+#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6)
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4)
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
+
+/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
+/* Bitmask of engineering port modes available on the board (indexed by
+ * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
+ * contains all modes implemented in firmware for a particular board. Modes
+ * listed in MODES are considered production modes and should be exposed in
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
+ * should be considered hidden (not to be exposed in userland tools) and for
+ * engineering use only. There are no other semantic differences and any mode
+ * listed in either MODES or ENGINEERING_MODES can be set on the board.
+ */
+#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
+#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
+
+
+/***********************************/
+/* MC_CMD_OVERRIDE_PORT_MODE
+ * Override flash config port mode for subsequent MC reboot(s). Override data
+ * is stored in the presistent data section of DMEM and activated on next MC
+ * warm reboot. A cold reboot resets the override. It is assumed that a
+ * sufficient number of PFs are available and that port mapping is valid for
+ * the new port mode, as the override does not affect PF configuration.
+ */
+#define MC_CMD_OVERRIDE_PORT_MODE 0x137
+#undef MC_CMD_0x137_PRIVILEGE_CTG
+
+#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
+/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
+#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
+
+/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
+#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_ADD
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
+#undef MC_CMD_0x16d_PRIVILEGE_CTG
+
+#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
+/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
+/* Any non-zero bits other than the ones named below or an unsupported
+ * combination will cause the NIC to return EOPNOTSUPP. In the future more
+ * flags may be added.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
+/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
+ * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
+/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
+ * (Deprecated)
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
+/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
+/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
+ * case of IPv4, the IP should be in the first 4 bytes and all other bytes
+ * should be zero.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
+/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
+/* Actions that should be applied to packets match the rule. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
+/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
+/* Handle to inserted rule. Used for removing the rule. */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
+ * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
+#undef MC_CMD_0x16e_PRIVILEGE_CTG
+
+#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
+/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+
+/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
+ * defined in SF-120734-TC with more information in SF-122717-TC.
+ */
+#define FUNCTION_PERSONALITY_LEN 4
+#define FUNCTION_PERSONALITY_ID_OFST 0
+#define FUNCTION_PERSONALITY_ID_LEN 4
+/* enum: Function has no assigned personality */
+#define FUNCTION_PERSONALITY_NULL 0x0
+/* enum: Function has an EF100-style function control window and VI windows
+ * with both EF100 and vDPA doorbells.
+ */
+#define FUNCTION_PERSONALITY_EF100 0x1
+/* enum: Function has virtio net device configuration registers and doorbells
+ * for virtio queue pairs.
+ */
+#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
+/* enum: Function has virtio block device configuration registers and a
+ * doorbell for a single virtqueue.
+ */
+#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
+/* enum: Function is a Xilinx acceleration device - management function */
+#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
+/* enum: Function is a Xilinx acceleration device - user function */
+#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
+#define FUNCTION_PERSONALITY_ID_LBN 0
+#define FUNCTION_PERSONALITY_ID_WIDTH 32
+
+/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID
+ * (interface/PF/VF tuple)
+ */
+#define PCIE_FUNCTION_LEN 8
+/* PCIe PF function number */
+#define PCIE_FUNCTION_PF_OFST 0
+#define PCIE_FUNCTION_PF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define PCIE_FUNCTION_PF_ANY 0xfffe
+/* enum: Value representing invalid (null) function */
+#define PCIE_FUNCTION_PF_NULL 0xffff
+#define PCIE_FUNCTION_PF_LBN 0
+#define PCIE_FUNCTION_PF_WIDTH 16
+/* PCIe VF Function number (PF relative) */
+#define PCIE_FUNCTION_VF_OFST 2
+#define PCIE_FUNCTION_VF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define PCIE_FUNCTION_VF_ANY 0xfffe
+/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF ==
+ * PF_NULL)
+ */
+#define PCIE_FUNCTION_VF_NULL 0xffff
+#define PCIE_FUNCTION_VF_LBN 16
+#define PCIE_FUNCTION_VF_WIDTH 16
+/* PCIe interface of the function */
+#define PCIE_FUNCTION_INTF_OFST 4
+#define PCIE_FUNCTION_INTF_LEN 4
+/* enum: Host PCIe interface */
+#define PCIE_FUNCTION_INTF_HOST 0x0
+/* enum: Application Processor interface */
+#define PCIE_FUNCTION_INTF_AP 0x1
+#define PCIE_FUNCTION_INTF_LBN 32
+#define PCIE_FUNCTION_INTF_WIDTH 32
+
+#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.c b/drivers/net/ethernet/sfc/siena/mcdi_port.c
new file mode 100644
index 000000000000..93b8b2338f11
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
+ */
+
+/*
+ * Driver for PHY related operations via MCDI.
+ */
+
+#include <linux/slab.h>
+#include "efx.h"
+#include "mcdi_port.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "selftest.h"
+#include "mcdi_port_common.h"
+
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
+}
+
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ return true;
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+int efx_siena_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx_siena_mcdi_phy_probe(efx);
+ if (rc != 0)
+ return rc;
+
+ return efx_siena_mcdi_mac_init_stats(efx);
+}
+
+void efx_siena_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx_siena_mcdi_phy_remove(efx);
+ efx_siena_mcdi_mac_fini_stats(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.h b/drivers/net/ethernet/sfc/siena/mcdi_port.h
new file mode 100644
index 000000000000..7b4ae250b51f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_MCDI_PORT_H
+#define EFX_MCDI_PORT_H
+
+#include "net_driver.h"
+
+bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx);
+int efx_siena_mcdi_port_probe(struct efx_nic *efx);
+void efx_siena_mcdi_port_remove(struct efx_nic *efx);
+
+#endif /* EFX_MCDI_PORT_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c
new file mode 100644
index 000000000000..067fe0f4393a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mcdi_port_common.h"
+#include "efx_common.h"
+#include "nic.h"
+
+static int efx_mcdi_get_phy_cfg(struct efx_nic *efx,
+ struct efx_mcdi_phy_data *cfg)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void efx_siena_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
+{
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ return efx_siena_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+{
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ SET_BIT(Backplane);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseKX_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseKX4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseKR4_Full);
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ SET_BIT(FIBRE);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
+ SET_BIT(1000baseT_Full);
+ SET_BIT(1000baseX_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+ SET_BIT(10000baseCR_Full);
+ SET_BIT(10000baseLR_Full);
+ SET_BIT(10000baseSR_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+ SET_BIT(40000baseCR4_Full);
+ SET_BIT(40000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
+ SET_BIT(100000baseCR4_Full);
+ SET_BIT(100000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
+ SET_BIT(25000baseCR_Full);
+ SET_BIT(25000baseSR_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ SET_BIT(TP);
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ SET_BIT(10baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ SET_BIT(10baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ SET_BIT(100baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ SET_BIT(100baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ SET_BIT(1000baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ SET_BIT(Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ SET_BIT(Asym_Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ SET_BIT(Autoneg);
+
+ #undef SET_BIT
+}
+
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+{
+ u32 result = 0;
+
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (TEST_BIT(10baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (TEST_BIT(100baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (TEST_BIT(100baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (TEST_BIT(1000baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+ TEST_BIT(1000baseX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+ TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+ TEST_BIT(10000baseSR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+ TEST_BIT(40000baseSR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+ if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (TEST_BIT(Asym_Pause))
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (TEST_BIT(Autoneg))
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ #undef TEST_BIT
+
+ return result;
+}
+
+static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+ return flags;
+}
+
+static u8 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+/* The semantics of the ethtool FEC mode bitmask are not well defined,
+ * particularly the meaning of combinations of bits. Which means we get to
+ * define our own semantics, as follows:
+ * OFF overrides any other bits, and means "disable all FEC" (with the
+ * exception of 25G KR4/CR4, where it is not possible to reject it if AN
+ * partner requests it).
+ * AUTO on its own means use cable requirements and link partner autoneg with
+ * fw-default preferences for the cable type.
+ * AUTO and either RS or BASER means use the specified FEC type if cable and
+ * link partner support it, otherwise autoneg/fw-default.
+ * RS or BASER alone means use the specified FEC type if cable and link partner
+ * support it and either requests it, otherwise no FEC.
+ * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
+ * partner support it, preferring RS to BASER.
+ */
+static u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap)
+{
+ u32 ret = 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_AUTO)
+ ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap;
+ if (ethtool_cap & ETHTOOL_FEC_RS &&
+ supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER) {
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ }
+ return ret;
+}
+
+/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
+ * can never produce, (baser xor rs) and neither req; the implementation below
+ * maps both of those to AUTO. This should never matter, and it's not clear
+ * what a better mapping would be anyway.
+ */
+static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+{
+ bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
+ rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
+ baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
+ baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+
+ if (!baser && !rs)
+ return ETHTOOL_FEC_OFF;
+ return (rs_req ? ETHTOOL_FEC_RS : 0) |
+ (baser_req ? ETHTOOL_FEC_BASER : 0) |
+ (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation
+ */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+bool efx_siena_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ efx->link_state.up = false;
+ else
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+int efx_siena_mcdi_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ u32 caps;
+ int rc;
+
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+ if (rc != 0)
+ goto fail;
+
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio.mode_support = 0;
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
+ else
+ phy_data->forced_cap = caps;
+
+ /* Assert that we can map efx -> mcdi loopback modes */
+ BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+ BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+ BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+ BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+ BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+ BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+ BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+ BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+ BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+ BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+ BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+ BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+ BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+ BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+ BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+ BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+ rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+ if (rc != 0)
+ goto fail;
+ /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+ * but by convention we don't
+ */
+ efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Record the initial FEC configuration (or nearest approximation
+ * representable in the ethtool configuration space)
+ */
+ efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
+ efx->link_state.speed == 25000 ||
+ efx->link_state.speed == 50000);
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_siena_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ return 0;
+
+fail:
+ kfree(phy_data);
+ return rc;
+}
+
+void efx_siena_mcdi_phy_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ efx->phy_data = NULL;
+ kfree(phy_data);
+}
+
+void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
+ (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return;
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
+}
+
+int
+efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ if (cmd->base.autoneg) {
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
+ }
+ } else {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
+ }
+ }
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ if (cmd->base.autoneg) {
+ efx_siena_link_set_advertising(efx, cmd->link_modes.advertising);
+ phy_cfg->forced_cap = 0;
+ } else {
+ efx_siena_link_clear_advertising(efx);
+ phy_cfg->forced_cap = caps;
+ }
+ return 0;
+}
+
+int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
+ u32 caps, active, speed; /* MCDI format */
+ bool is_25g = false;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
+ return -EOPNOTSUPP;
+
+ /* behaviour for 25G/50G links depends on 25G BASER bit */
+ speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
+ is_25g = speed == 25000 || speed == 50000;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
+ fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
+ /* BASER is never supported on 100G */
+ if (speed == 100000)
+ fec->fec &= ~ETHTOOL_FEC_BASER;
+
+ active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
+ switch (active) {
+ case MC_CMD_FEC_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case MC_CMD_FEC_BASER:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case MC_CMD_FEC_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ netif_warn(efx, hw, efx->net_dev,
+ "Firmware reports unrecognised FEC_TYPE %u\n",
+ active);
+ /* We don't know what firmware has picked. AUTO is as good a
+ * "can't happen" value as any other.
+ */
+ fec->active_fec = ETHTOOL_FEC_AUTO;
+ break;
+ }
+
+ return 0;
+}
+
+/* Basic validation to ensure that the caps we are going to attempt to set are
+ * in fact supported by the adapter. Note that 'no FEC' is always supported.
+ */
+static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap)
+{
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap &&
+ !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap))
+ return -EINVAL;
+ return 0;
+}
+
+int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx,
+ const struct ethtool_fecparam *fec)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec);
+ if (rc)
+ return rc;
+
+ /* Work out what efx_siena_mcdi_phy_set_link_ksettings() would produce from
+ * saved advertising bits
+ */
+ if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
+ caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ else
+ caps = phy_cfg->forced_cap;
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec);
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ /* Record the new FEC setting for subsequent set_link calls */
+ efx->fec_config = fec->fec;
+ return 0;
+}
+
+int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
+ phy_cfg->forced_cap);
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+ return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+}
+
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf,
+ MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ return rc;
+}
+
+int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned int flags)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support
+ */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx,
+ unsigned int index)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
+#define SFP_PAGE_SIZE 128
+#define SFF_DIAG_TYPE_OFFSET 92
+#define SFF_DIAG_ADDR_CHANGE BIT(2)
+#define SFF_8079_NUM_PAGES 2
+#define SFF_8472_NUM_PAGES 4
+#define SFF_8436_NUM_PAGES 5
+#define SFF_DMT_LEVEL_OFFSET 94
+
+/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
+ * @efx: NIC context
+ * @page: EEPROM page number
+ * @data: Destination data pointer
+ * @offset: Offset in page to copy from in to data
+ * @space: Space available in data
+ *
+ * Return:
+ * >=0 - amount of data copied
+ * <0 - error
+ */
+static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
+ unsigned int page,
+ u8 *data, ssize_t offset,
+ ssize_t space)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
+ unsigned int payload_len;
+ unsigned int to_copy;
+ size_t outlen;
+ int rc;
+
+ if (offset > SFP_PAGE_SIZE)
+ return -EINVAL;
+
+ to_copy = min(space, SFP_PAGE_SIZE - offset);
+
+ MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+
+ if (rc)
+ return rc;
+
+ if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+ SFP_PAGE_SIZE))
+ return -EIO;
+
+ payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
+ if (payload_len != SFP_PAGE_SIZE)
+ return -EIO;
+
+ memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ to_copy);
+
+ return to_copy;
+}
+
+static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
+ unsigned int page,
+ u8 byte)
+{
+ u8 data;
+ int rc;
+
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
+ if (rc == 1)
+ return data;
+
+ return rc;
+}
+
+static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DIAG_TYPE_OFFSET);
+}
+
+static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the DMT level at byte 94. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DMT_LEVEL_OFFSET);
+}
+
+static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
+ return phy_data->media;
+
+ /* A QSFP+ NIC may actually have an SFP+ module attached.
+ * The ID is page 0, byte 0.
+ */
+ switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+ case 0x3:
+ return MC_CMD_MEDIA_SFP_PLUS;
+ case 0xc:
+ case 0xd:
+ return MC_CMD_MEDIA_QSFP_PLUS;
+ default:
+ return 0;
+ }
+}
+
+int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ int rc;
+ ssize_t space_remaining = ee->len;
+ unsigned int page_off;
+ bool ignore_missing;
+ int num_pages;
+ int page;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
+ SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
+ page = 0;
+ ignore_missing = false;
+ break;
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ num_pages = SFF_8436_NUM_PAGES;
+ page = -1; /* We obtain the lower page by asking for -1. */
+ ignore_missing = true; /* Ignore missing pages after page 0. */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ page_off = ee->offset % SFP_PAGE_SIZE;
+ page += ee->offset / SFP_PAGE_SIZE;
+
+ while (space_remaining && (page < num_pages)) {
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
+ data, page_off,
+ space_remaining);
+
+ if (rc > 0) {
+ space_remaining -= rc;
+ data += rc;
+ page_off = 0;
+ page++;
+ } else if (rc == 0) {
+ space_remaining = 0;
+ } else if (ignore_missing && (page > 0)) {
+ int intended_size = SFP_PAGE_SIZE - page_off;
+
+ space_remaining -= intended_size;
+ if (space_remaining < 0) {
+ space_remaining = 0;
+ } else {
+ memset(data, 0, intended_size);
+ data += intended_size;
+ page_off = 0;
+ page++;
+ rc = 0;
+ }
+ } else {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo)
+{
+ int sff_8472_level;
+ int diag_type;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
+
+ /* If we can't read the diagnostics level we have none. */
+ if (sff_8472_level < 0)
+ return -EOPNOTSUPP;
+
+ /* Check if this module requires the (unsupported) address
+ * change operation.
+ */
+ diag_type = efx_mcdi_phy_diag_type(efx);
+
+ if (sff_8472_level == 0 ||
+ (diag_type & SFF_DIAG_ADDR_CHANGE)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
+{
+ return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+}
+
+int efx_siena_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ /* This has no effect on EF10 */
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS,
+ !!(efx->net_dev->features & NETIF_F_RXFCS));
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes,
+ sizeof(cmdbytes), NULL, 0, NULL);
+}
+
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ efx->num_mac_stats * sizeof(u64) : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
+
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_siena_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ return rc;
+}
+
+void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
+}
+
+void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[efx->num_mac_stats - 1] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
+}
+
+int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx)
+{
+ int rc;
+
+ if (!efx->num_mac_stats)
+ return 0;
+
+ /* Allocate buffer for stats */
+ rc = efx_siena_alloc_buffer(efx, &efx->stats_buffer,
+ efx->num_mac_stats * sizeof(u64),
+ GFP_KERNEL);
+ if (rc) {
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to allocate DMA buffer: %d\n", rc);
+ return rc;
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64) efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64) virt_to_phys(efx->stats_buffer.addr));
+
+ return 0;
+}
+
+void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx)
+{
+ efx_siena_free_buffer(efx, &efx->stats_buffer);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
+};
+
+void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_siena_link_status_changed(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h
new file mode 100644
index 000000000000..7a6de13d9ce6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+void efx_siena_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+bool efx_siena_mcdi_phy_poll(struct efx_nic *efx);
+int efx_siena_mcdi_phy_probe(struct efx_nic *efx);
+void efx_siena_mcdi_phy_remove(struct efx_nic *efx);
+void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd);
+int efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
+int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec);
+int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx,
+ const struct ethtool_fecparam *fec);
+int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned int flags);
+const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx,
+ unsigned int index);
+int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
+ struct ethtool_eeprom *ee, u8 *data);
+int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx,
+ struct ethtool_modinfo *modinfo);
+int efx_siena_mcdi_set_mac(struct efx_nic *efx);
+int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx);
+void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/mtd.c b/drivers/net/ethernet/sfc/siena/mtd.c
new file mode 100644
index 000000000000..12a624247f44
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mtd.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "efx.h"
+
+#define to_efx_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mtd_partition, mtd)
+
+/* MTD interface */
+
+static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct efx_nic *efx = mtd->priv;
+
+ return efx->type->mtd_erase(mtd, erase->addr, erase->len);
+}
+
+static void efx_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc;
+
+ rc = efx->type->mtd_sync(mtd);
+ if (rc)
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, part->dev_type_name, rc);
+}
+
+static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part)
+{
+ int rc;
+
+ for (;;) {
+ rc = mtd_device_unregister(&part->mtd);
+ if (rc != -EBUSY)
+ break;
+ ssleep(1);
+ }
+ WARN_ON(rc);
+ list_del(&part->node);
+}
+
+int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
+{
+ struct efx_mtd_partition *part;
+ size_t i;
+
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+
+ part->mtd.writesize = 1;
+
+ if (!(part->mtd.flags & MTD_NO_ERASE))
+ part->mtd.flags |= MTD_WRITEABLE;
+
+ part->mtd.owner = THIS_MODULE;
+ part->mtd.priv = efx;
+ part->mtd.name = part->name;
+ part->mtd._erase = efx_mtd_erase;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
+ part->mtd._sync = efx_mtd_sync;
+
+ efx->type->mtd_rename(part);
+
+ if (mtd_device_register(&part->mtd, NULL, 0))
+ goto fail;
+
+ /* Add to list in order - efx_siena_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+ efx_siena_mtd_remove_partition(part);
+ }
+ /* Failure is unlikely here, but probably means we're out of memory */
+ return -ENOMEM;
+}
+
+void efx_siena_mtd_remove(struct efx_nic *efx)
+{
+ struct efx_mtd_partition *parts, *part, *next;
+
+ WARN_ON(efx_dev_registered(efx));
+
+ if (list_empty(&efx->mtd_list))
+ return;
+
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
+
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_siena_mtd_remove_partition(part);
+
+ kfree(parts);
+}
+
+void efx_siena_mtd_rename(struct efx_nic *efx)
+{
+ struct efx_mtd_partition *part;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
+}
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
new file mode 100644
index 000000000000..ff7bbc325952
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -0,0 +1,1715 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EFX_NET_DRIVER_H
+#define EFX_NET_DRIVER_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
+#include <net/xdp.h>
+
+#include "enum.h"
+#include "bitfield.h"
+#include "filter.h"
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+
+#ifdef DEBUG
+#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EFX_MAX_CHANNELS 32U
+#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+#define EFX_EXTRA_CHANNEL_IOV 0
+#define EFX_EXTRA_CHANNEL_PTP 1
+#define EFX_MAX_EXTRA_CHANNELS 2U
+
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */
+#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */
+#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */
+#define EFX_TXQ_TYPES 8
+/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */
+#define EFX_MAX_TXQ_PER_CHANNEL 4
+#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS)
+
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Minimum MTU, from RFC791 (IP) */
+#define EFX_MIN_MTU 68
+
+/* Maximum total header length for TSOv2 */
+#define EFX_TSO2_MAX_HDRLEN 208
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer. Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT 4
+#endif
+
+/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and
+ * still fit two standard MTU size packets into a single 4K page.
+ */
+#define EFX_XDP_HEADROOM 128
+#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+struct hwtstamp_config;
+
+struct efx_self_tests;
+
+/**
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct efx_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
+ unsigned int index;
+ unsigned int entries;
+};
+
+/**
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ * freed when descriptor completes
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ * member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ * descriptor.
+ * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
+ * @len: Length of this fragment.
+ * This field is zero when the queue slot is empty.
+ * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
+ */
+struct efx_tx_buffer {
+ union {
+ const struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ union {
+ efx_qword_t option; /* EF10 */
+ dma_addr_t dma_addr;
+ };
+ unsigned short flags;
+ unsigned short len;
+ unsigned short unmap_len;
+ unsigned short dma_offset;
+};
+#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
+#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
+#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
+
+/**
+ * struct efx_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path. There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @label: Label for TX completion events.
+ * Is our index within @channel->tx_queue array.
+ * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags.
+ * @tso_version: Version of TSO in use for this queue.
+ * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series.
+ * @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
+ * @buffer: The software buffer ring
+ * @cb_page: Array of pages of copy buffers. Carved up according to
+ * %EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks.
+ * @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
+ * @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
+ * @read_count: Current read pointer.
+ * This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @merge_events: Number of TX merged completion events
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
+ * @insert_count: Current insert pointer
+ * This is the number of buffers that have been added to the
+ * software ring.
+ * @write_count: Current write pointer
+ * This is the number of buffers that have been added to the
+ * hardware ring.
+ * @packet_write_count: Completable write pointer
+ * This is the write pointer of the last packet written.
+ * Normally this will equal @write_count, but as option descriptors
+ * don't produce completion events, they won't update this.
+ * Filled in iff @efx->type->option_descriptors; only used for PIO.
+ * Thus, this is written and used on EF10, and neither on farch.
+ * @old_read_count: The value of read_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of read_count if this
+ * variable indicates that the queue is full. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ * blocks
+ * @tso_packets: Number of packets via the TSO xmit path
+ * @tso_fallbacks: Number of times TSO fallback used
+ * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
+ * @xmit_pending: Are any packets waiting to be pushed to the NIC
+ * @cb_packets: Number of times the TX copybreak feature has been used
+ * @notify_count: Count of notified descriptors to the NIC
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
+ */
+struct efx_tx_queue {
+ /* Members which don't change on the fast path */
+ struct efx_nic *efx ____cacheline_aligned_in_smp;
+ unsigned int queue;
+ unsigned int label;
+ unsigned int type;
+ unsigned int tso_version;
+ bool tso_encap;
+ struct efx_channel *channel;
+ struct netdev_queue *core_txq;
+ struct efx_tx_buffer *buffer;
+ struct efx_buffer *cb_page;
+ struct efx_special_buffer txd;
+ unsigned int ptr_mask;
+ void __iomem *piobuf;
+ unsigned int piobuf_offset;
+ bool initialised;
+ bool timestamping;
+ bool xdp_tx;
+
+ /* Members used mainly on the completion path */
+ unsigned int read_count ____cacheline_aligned_in_smp;
+ unsigned int old_write_count;
+ unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
+ u32 completed_timestamp_major;
+ u32 completed_timestamp_minor;
+
+ /* Members used only on the xmit path */
+ unsigned int insert_count ____cacheline_aligned_in_smp;
+ unsigned int write_count;
+ unsigned int packet_write_count;
+ unsigned int old_read_count;
+ unsigned int tso_bursts;
+ unsigned int tso_long_headers;
+ unsigned int tso_packets;
+ unsigned int tso_fallbacks;
+ unsigned int pushes;
+ unsigned int pio_packets;
+ bool xmit_pending;
+ unsigned int cb_packets;
+ unsigned int notify_count;
+ /* Statistics to supplement MAC stats */
+ unsigned long tx_packets;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
+ atomic_t flush_outstanding;
+};
+
+#define EFX_TX_CB_ORDER 7
+#define EFX_TX_CB_SIZE (1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN
+
+/**
+ * struct efx_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @page: The associated page buffer.
+ * Will be %NULL if the buffer slot is currently free.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
+ */
+struct efx_rx_buffer {
+ dma_addr_t dma_addr;
+ struct page *page;
+ u16 page_offset;
+ u16 len;
+ u16 flags;
+};
+#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
+#define EFX_RX_PKT_CSUMMED 0x0002
+#define EFX_RX_PKT_DISCARD 0x0004
+#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
+#define EFX_RX_PKT_CSUM_LEVEL 0x0200
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[] ____cacheline_aligned;
+};
+
+/**
+ * struct efx_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @core_index: Index of network core RX queue. Will be >= 0 iff this
+ * is associated with a real RX queue.
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @refill_enabled: Enable refill whenever fill level is low
+ * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
+ * @rxq_flush_pending.
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ * (<= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ * This records the minimum fill level observed when a ring
+ * refill was triggered.
+ * @recycle_count: RX buffer recycle counter.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
+ */
+struct efx_rx_queue {
+ struct efx_nic *efx;
+ int core_index;
+ struct efx_rx_buffer *buffer;
+ struct efx_special_buffer rxd;
+ unsigned int ptr_mask;
+ bool refill_enabled;
+ bool flush_pending;
+
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ unsigned int scatter_len;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
+ unsigned int max_fill;
+ unsigned int fast_fill_trigger;
+ unsigned int min_fill;
+ unsigned int min_overfill;
+ unsigned int recycle_count;
+ struct timer_list slow_fill;
+ unsigned int slow_fill_count;
+ /* Statistics to supplement MAC stats */
+ unsigned long rx_packets;
+ struct xdp_rxq_info xdp_rxq_info;
+ bool xdp_rxq_info_valid;
+};
+
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
+};
+
+/**
+ * struct efx_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @channel: Channel instance number
+ * @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @irq_moderation_us: IRQ moderation value (in microseconds)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
+ * @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
+ * @eventq_read_ptr: Event queue read pointer
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
+ * @rfs_filter_count: number of accelerated RFS filters currently in place;
+ * equals the count of @rps_flow_id slots filled
+ * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters
+ * were checked for expiry
+ * @rfs_expire_index: next accelerated RFS filter ID to check for expiry
+ * @n_rfs_succeeded: number of successful accelerated RFS filter insertions
+ * @n_rfs_failed: number of failed accelerated RFS filter insertions
+ * @filter_work: Work item for efx_filter_rfs_expire()
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __efx_siena_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __efx_siena_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
+ * @rx_queue: RX queue for this channel
+ * @tx_queue: TX queues for this channel
+ * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
+ */
+struct efx_channel {
+ struct efx_nic *efx;
+ int channel;
+ const struct efx_channel_type *type;
+ bool eventq_init;
+ bool enabled;
+ int irq;
+ unsigned int irq_moderation_us;
+ struct net_device *napi_dev;
+ struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long busy_poll_state;
+#endif
+ struct efx_special_buffer eventq;
+ unsigned int eventq_mask;
+ unsigned int eventq_read_ptr;
+ int event_test_cpu;
+
+ unsigned int irq_count;
+ unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filter_count;
+ unsigned int rfs_last_expiry;
+ unsigned int rfs_expire_index;
+ unsigned int n_rfs_succeeded;
+ unsigned int n_rfs_failed;
+ struct delayed_work filter_work;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
+#endif
+
+ unsigned int n_rx_tobe_disc;
+ unsigned int n_rx_ip_hdr_chksum_err;
+ unsigned int n_rx_tcp_udp_chksum_err;
+ unsigned int n_rx_outer_ip_hdr_chksum_err;
+ unsigned int n_rx_outer_tcp_udp_chksum_err;
+ unsigned int n_rx_inner_ip_hdr_chksum_err;
+ unsigned int n_rx_inner_tcp_udp_chksum_err;
+ unsigned int n_rx_eth_crc_err;
+ unsigned int n_rx_mcast_mismatch;
+ unsigned int n_rx_frm_trunc;
+ unsigned int n_rx_overlength;
+ unsigned int n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
+ unsigned int n_rx_xdp_drops;
+ unsigned int n_rx_xdp_bad_drops;
+ unsigned int n_rx_xdp_tx;
+ unsigned int n_rx_xdp_redirect;
+
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
+
+ struct list_head *rx_list;
+
+ struct efx_rx_queue rx_queue;
+ struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL];
+ struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
+};
+
+/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
+ * struct efx_channel_type - distinguishes traffic and extra channels
+ * @handle_no_channel: Handle failure to allocate an extra channel
+ * @pre_probe: Set up extra state prior to initialisation
+ * @post_remove: Tear down extra state after finalisation, if allocated.
+ * May be called on channels that have not been probed.
+ * @get_name: Generate the channel's name (used for its IRQ handler)
+ * @copy: Copy the channel state prior to reallocation. May be %NULL if
+ * reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ * created. If %NULL, TX queues are not created.
+ * @keep_eventq: Flag for whether event queue should be kept initialised
+ * while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ * channel's TX queues.
+ */
+struct efx_channel_type {
+ void (*handle_no_channel)(struct efx_nic *);
+ int (*pre_probe)(struct efx_channel *);
+ void (*post_remove)(struct efx_channel *);
+ void (*get_name)(struct efx_channel *, char *buf, size_t len);
+ struct efx_channel *(*copy)(const struct efx_channel *);
+ bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*want_txqs)(struct efx_channel *);
+ bool keep_eventq;
+ bool want_pio;
+};
+
+enum efx_led_mode {
+ EFX_LED_OFF = 0,
+ EFX_LED_ON = 1,
+ EFX_LED_DEFAULT = 2
+};
+
+#define STRING_TABLE_LOOKUP(val, member) \
+ ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
+
+extern const char *const efx_siena_loopback_mode_names[];
+extern const unsigned int efx_siena_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_siena_loopback_mode)
+
+enum efx_int_mode {
+ /* Be careful if altering to correct macro below */
+ EFX_INT_MODE_MSIX = 0,
+ EFX_INT_MODE_MSI = 1,
+ EFX_INT_MODE_LEGACY = 2,
+ EFX_INT_MODE_MAX /* Insert any new items before this */
+};
+#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
+
+enum nic_state {
+ STATE_UNINIT = 0, /* device being probed/removed or is frozen */
+ STATE_READY = 1, /* hardware ready and netdev registered */
+ STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
+};
+
+/* Forward declaration */
+struct efx_nic;
+
+/* Pseudo bit-mask flow control field */
+#define EFX_FC_RX FLOW_CTRL_RX
+#define EFX_FC_TX FLOW_CTRL_TX
+#define EFX_FC_AUTO 4
+
+/**
+ * struct efx_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct efx_link_state {
+ bool up;
+ bool fd;
+ u8 fc;
+ unsigned int speed;
+};
+
+static inline bool efx_link_state_equal(const struct efx_link_state *left,
+ const struct efx_link_state *right)
+{
+ return left->up == right->up && left->fd == right->fd &&
+ left->fc == right->fc && left->speed == right->speed;
+}
+
+/**
+ * enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_LOW_POWER: set to low power through MDIO
+ * @PHY_MODE_OFF: switched off through external control
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+ PHY_MODE_NORMAL = 0,
+ PHY_MODE_TX_DISABLED = 1,
+ PHY_MODE_LOW_POWER = 2,
+ PHY_MODE_OFF = 4,
+ PHY_MODE_SPECIAL = 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+ return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
+ */
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EFX_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union efx_multicast_hash {
+ u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+ efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
+};
+
+struct vfdi_status;
+
+/* The reserved RSS context value */
+#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
+/**
+ * struct efx_rss_context - A user-defined RSS context for filtering
+ * @list: node of linked list on which this struct is stored
+ * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
+ * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
+ * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
+ * @user_id: the rss_context ID exposed to userspace over ethtool.
+ * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
+ * @rx_hash_key: Toeplitz hash key for this RSS context
+ * @indir_table: Indirection table for this RSS context
+ */
+struct efx_rss_context {
+ struct list_head list;
+ u32 context_id;
+ u32 user_id;
+ bool rx_hash_udp_4tuple;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
+};
+
+#ifdef CONFIG_RFS_ACCEL
+/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
+ * is used to test if filter does or will exist.
+ */
+#define EFX_ARFS_FILTER_ID_PENDING -1
+#define EFX_ARFS_FILTER_ID_ERROR -2
+#define EFX_ARFS_FILTER_ID_REMOVING -3
+/**
+ * struct efx_arfs_rule - record of an ARFS filter and its IDs
+ * @node: linkage into hash table
+ * @spec: details of the filter (used as key for hash table). Use efx->type to
+ * determine which member to use.
+ * @rxq_index: channel to which the filter will steer traffic.
+ * @arfs_id: filter ID which was returned to ARFS
+ * @filter_id: index in software filter table. May be
+ * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
+ * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
+ * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
+ */
+struct efx_arfs_rule {
+ struct hlist_node node;
+ struct efx_filter_spec spec;
+ u16 rxq_index;
+ u16 arfs_id;
+ s32 filter_id;
+};
+
+/* Size chosen so that the table is one page (4kB) */
+#define EFX_ARFS_HASH_TABLE_SIZE 512
+
+/**
+ * struct efx_async_filter_insertion - Request to asynchronously insert a filter
+ * @net_dev: Reference to the netdevice
+ * @spec: The filter to insert
+ * @work: Workitem for this request
+ * @rxq_index: Identifies the channel for which this request was made
+ * @flow_id: Identifies the kernel-side flow for which this request was made
+ */
+struct efx_async_filter_insertion {
+ struct net_device *net_dev;
+ struct efx_filter_spec spec;
+ struct work_struct work;
+ u16 rxq_index;
+ u32 flow_id;
+};
+
+/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
+#define EFX_RPS_MAX_IN_FLIGHT 8
+#endif /* CONFIG_RFS_ACCEL */
+
+enum efx_xdp_tx_queues_mode {
+ EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */
+ EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */
+ EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
+};
+
+/**
+ * struct efx_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ * Work items do not hold and must not acquire RTNL.
+ * @workqueue_name: Name of workqueue
+ * @reset_work: Scheduled reset workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
+ * @interrupt_mode: Interrupt mode
+ * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irqs_hooked: Channel interrupts are hooked
+ * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
+ * @irq_rx_moderation_us: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
+ * @reset_pending: Bitmask for pending resets
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @msi_context: Context for each MSI
+ * @extra_channel_types: Types of extra (non-traffic) channels that
+ * should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
+ * @next_buffer_table: First available buffer table id
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
+ * @tx_queues_per_channel: number of TX queues probed on each channel
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * accordance with NET_IP_ALIGN
+ * @rx_dma_len: Current maximum RX DMA length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
+ * @rx_scatter: Scatter mode enabled for receives
+ * @rss_context: Main RSS context. Its @list member is the head of the list of
+ * RSS contexts created by user requests
+ * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
+ * @irq_status: Interrupt status buffer
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
+ * @mtd_list: List of MTDs attached to the NIC
+ * @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ * efx_monitor() and efx_siena_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ * Serialises efx_siena_stop_all(), efx_siena_start_all(),
+ * efx_monitor() and efx_mac_work() with kernel interfaces.
+ * Safe to read under any one of the rtnl_lock, mac_lock, or netif_tx_lock,
+ * but all three must be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
+ * @stats_buffer: DMA buffer for statistics
+ * @phy_type: PHY type
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mdio: PHY MDIO interface
+ * @mdio_bus: PHY MDIO bus ID (only used by Siena)
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
+ * @link_advertising: Autonegotiation advertising flags
+ * @fec_config: Forward Error Correction configuration flags. For bit positions
+ * see &enum ethtool_fec_config_bits.
+ * @link_state: Current state of the link
+ * @n_link_state_changes: Number of times the link has changed state
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
+ * @wanted_fc: Wanted flow control flags
+ * @fc_disable: When non-zero flow control is disabled. Typically used to
+ * ensure that network back pressure doesn't delay dma queue flushes.
+ * Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
+ * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_mutex: Protects RPS state of all channels
+ * @rps_slot_map: bitmap of in-flight entries in @rps_slot
+ * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
+ * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
+ * @rps_next_id).
+ * @rps_hash_table: Mapping between ARFS filters and their various IDs
+ * @rps_next_id: next arfs_id for an ARFS filter
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
+ * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
+ * Decremented when the efx_flush_rx_queue() is called.
+ * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
+ * completed (either success or failure). Not used when MCDI is used to
+ * flush receive queues.
+ * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
+ * @vf_count: Number of VFs intended to be enabled.
+ * @vf_init_count: Number of VFs that have been fully initialised.
+ * @vi_scale: log2 number of vnics per VF.
+ * @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
+ * @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ * xdp_rxq_info structures?
+ * @netdev_notifier: Netdevice notifier.
+ * @mem_bar: The BAR that is mapped into membase.
+ * @reg_base: Offset from the start of the bar to the function control window.
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
+ * field is used by efx_test_interrupts() to verify that an
+ * interrupt has occurred.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ *
+ * This is stored in the private area of the &struct net_device.
+ */
+struct efx_nic {
+ /* The following fields should be written very rarely */
+
+ char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
+ struct pci_dev *pci_dev;
+ unsigned int port_num;
+ const struct efx_nic_type *type;
+ int legacy_irq;
+ bool eeh_disabled_legacy_irq;
+ struct workqueue_struct *workqueue;
+ char workqueue_name[16];
+ struct work_struct reset_work;
+ resource_size_t membase_phys;
+ void __iomem *membase;
+
+ unsigned int vi_stride;
+
+ enum efx_int_mode interrupt_mode;
+ unsigned int timer_quantum_ns;
+ unsigned int timer_max_ns;
+ bool irq_rx_adaptive;
+ bool irqs_hooked;
+ unsigned int irq_mod_step_us;
+ unsigned int irq_rx_moderation_us;
+ u32 msg_enable;
+
+ enum nic_state state;
+ unsigned long reset_pending;
+
+ struct efx_channel *channel[EFX_MAX_CHANNELS];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
+ const struct efx_channel_type *
+ extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+
+ unsigned int xdp_tx_queue_count;
+ struct efx_tx_queue **xdp_tx_queues;
+ enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
+
+ unsigned rxq_entries;
+ unsigned txq_entries;
+ unsigned int txq_stop_thresh;
+ unsigned int txq_wake_thresh;
+
+ unsigned tx_dc_base;
+ unsigned rx_dc_base;
+ unsigned sram_lim_qw;
+ unsigned next_buffer_table;
+
+ unsigned int max_channels;
+ unsigned int max_vis;
+ unsigned int max_tx_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned rss_spread;
+ unsigned tx_channel_offset;
+ unsigned n_tx_channels;
+ unsigned n_extra_tx_channels;
+ unsigned int tx_queues_per_channel;
+ unsigned int n_xdp_channels;
+ unsigned int xdp_channel_offset;
+ unsigned int xdp_tx_per_channel;
+ unsigned int rx_ip_align;
+ unsigned int rx_dma_len;
+ unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
+ int rx_packet_ts_offset;
+ bool rx_scatter;
+ struct efx_rss_context rss_context;
+ struct mutex rss_lock;
+ u32 vport_id;
+
+ unsigned int_error_count;
+ unsigned long int_error_expire;
+
+ bool must_realloc_vis;
+ bool irq_soft_enabled;
+ struct efx_buffer irq_status;
+ unsigned irq_zero_count;
+ unsigned irq_level;
+ struct delayed_work selftest_work;
+
+#ifdef CONFIG_SFC_SIENA_MTD
+ struct list_head mtd_list;
+#endif
+
+ void *nic_data;
+ struct efx_mcdi_data *mcdi;
+
+ struct mutex mac_lock;
+ struct work_struct mac_work;
+ bool port_enabled;
+
+ bool mc_bist_for_other_fn;
+ bool port_initialized;
+ struct net_device *net_dev;
+
+ netdev_features_t fixed_features;
+
+ u16 num_mac_stats;
+ struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
+
+ unsigned int phy_type;
+ void *phy_data;
+ struct mdio_if_info mdio;
+ unsigned int mdio_bus;
+ enum efx_phy_mode phy_mode;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
+ u32 fec_config;
+ struct efx_link_state link_state;
+ unsigned int n_link_state_changes;
+
+ bool unicast_filter;
+ union efx_multicast_hash multicast_hash;
+ u8 wanted_fc;
+ unsigned fc_disable;
+
+ atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+ u64 loopback_modes;
+
+ void *loopback_selftest;
+ /* We access loopback_selftest immediately before running XDP,
+ * so we want them next to each other.
+ */
+ struct bpf_prog __rcu *xdp_prog;
+
+ struct rw_semaphore filter_sem;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ struct mutex rps_mutex;
+ unsigned long rps_slot_map;
+ struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
+ spinlock_t rps_hash_lock;
+ struct hlist_head *rps_hash_table;
+ u32 rps_next_id;
+#endif
+
+ atomic_t active_queues;
+ atomic_t rxq_flush_pending;
+ atomic_t rxq_flush_outstanding;
+ wait_queue_head_t flush_wq;
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ unsigned vf_count;
+ unsigned vf_init_count;
+ unsigned vi_scale;
+#endif
+
+ struct efx_ptp_data *ptp_data;
+ bool ptp_warned;
+
+ char *vpd_sn;
+ bool xdp_rxq_info_failed;
+
+ struct notifier_block netdev_notifier;
+
+ unsigned int mem_bar;
+ u32 reg_base;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ int last_irq_cpu;
+ spinlock_t stats_lock;
+ atomic_t n_rx_noskb_drops;
+};
+
+static inline int efx_dev_registered(struct efx_nic *efx)
+{
+ return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+static inline unsigned int efx_port_num(struct efx_nic *efx)
+{
+ return efx->port_num;
+}
+
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
+struct efx_udp_tunnel {
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff
+ u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
+ __be16 port;
+};
+
+/**
+ * struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
+ * @mem_map_size: Get memory BAR mapped size
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ * and VIs once the available interrupt resources are clear)
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
+ * @reset: Reset the controller hardware and possibly the PHY. This will
+ * be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @prepare_flr: Prepare for an FLR
+ * @finish_flr: Clean up after an FLR
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
+ * @update_stats_atomic: Update statistics while in atomic context, if that
+ * is more limiting than @update_stats. Otherwise, leave %NULL and
+ * driver core will call @update_stats.
+ * @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
+ * @stop_stats: Stop the regular fetching of statistics
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
+ * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
+ * to the hardware. Serialised by the mac_lock.
+ * @check_mac_fault: Check MAC fault state. True if fault present.
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @get_fec_stats: Get standard FEC statistics.
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
+ * expected to reset the NIC.
+ * @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue (and select TXQ type)
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @tx_enqueue: Add an SKB to TX queue
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
+ * @rx_push_rss_context_config: Write RSS hash key and indirection table for
+ * user RSS context to the NIC
+ * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user
+ * RSS context back from the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_siena_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
+ * @get_phys_port_id: Get the underlying physical port id.
+ * @set_mac_address: Set the MAC address of the device
+ * @tso_versions: Returns mask of firmware-assisted TSO versions supported.
+ * If %NULL, then device does not support any TSO version.
+ * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
+ * @udp_tnl_has_port: Check if a port has been added as UDP tunnel
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
+ * @sensor_event: Handle a sensor event from MCDI
+ * @rx_recycle_ring_size: Size of the RX recycle ring
+ * @revision: Hardware architecture revision
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @max_dma_mask: Maximum possible DMA mask
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @option_descriptors: NIC supports TX option descriptors
+ * @min_interrupt_mode: Lowest capability interrupt mode supported
+ * from &enum efx_int_mode.
+ * @timer_period_max: Maximum period of interrupt timer (in ticks)
+ * @offload_features: net_device feature flags for protocol offload
+ * features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
+ */
+struct efx_nic_type {
+ bool is_vf;
+ unsigned int (*mem_bar)(struct efx_nic *efx);
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
+ int (*probe)(struct efx_nic *efx);
+ void (*remove)(struct efx_nic *efx);
+ int (*init)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
+ void (*fini)(struct efx_nic *efx);
+ void (*monitor)(struct efx_nic *efx);
+ enum reset_type (*map_reset_reason)(enum reset_type reason);
+ int (*map_reset_flags)(u32 *flags);
+ int (*reset)(struct efx_nic *efx, enum reset_type method);
+ int (*probe_port)(struct efx_nic *efx);
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
+ void (*prepare_flush)(struct efx_nic *efx);
+ void (*finish_flush)(struct efx_nic *efx);
+ void (*prepare_flr)(struct efx_nic *efx);
+ void (*finish_flr)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
+ size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+ void (*push_irq_moderation)(struct efx_channel *channel);
+ int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
+ int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only);
+ bool (*check_mac_fault)(struct efx_nic *efx);
+ void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct efx_nic *efx, u32 type);
+ void (*resume_wol)(struct efx_nic *efx);
+ void (*get_fec_stats)(struct efx_nic *efx,
+ struct ethtool_fec_stats *fec_stats);
+ unsigned int (*check_caps)(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset);
+ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
+ int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*mcdi_reboot_detected)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ int (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+ unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+ int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table, const u8 *key);
+ int (*rx_pull_rss_config)(struct efx_nic *efx);
+ int (*rx_push_rss_context_config)(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key);
+ int (*rx_pull_rss_context_config)(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+ void (*rx_restore_rss_contexts)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ void (*rx_packet)(struct efx_channel *channel);
+ bool (*rx_buf_hash_valid)(const u8 *prefix);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_SIENA_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
+ int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
+ int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+ int (*sriov_init)(struct efx_nic *efx);
+ void (*sriov_fini)(struct efx_nic *efx);
+ bool (*sriov_wanted)(struct efx_nic *efx);
+ void (*sriov_reset)(struct efx_nic *efx);
+ void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
+ int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos);
+ int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+ bool spoofchk);
+ int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivi);
+ int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+ int link_state);
+ int (*vswitching_probe)(struct efx_nic *efx);
+ int (*vswitching_restore)(struct efx_nic *efx);
+ void (*vswitching_remove)(struct efx_nic *efx);
+ int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct efx_nic *efx);
+ u32 (*tso_versions)(struct efx_nic *efx);
+ int (*udp_tnl_push_ports)(struct efx_nic *efx);
+ bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
+ size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+ size_t len);
+ void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
+ unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx);
+
+ int revision;
+ unsigned int txd_ptr_tbl_base;
+ unsigned int rxd_ptr_tbl_base;
+ unsigned int buf_tbl_base;
+ unsigned int evq_ptr_tbl_base;
+ unsigned int evq_rptr_tbl_base;
+ u64 max_dma_mask;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
+ unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
+ bool always_rx_scatter;
+ bool option_descriptors;
+ unsigned int min_interrupt_mode;
+ unsigned int timer_period_max;
+ netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
+ unsigned int rx_hash_key_size;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
+/* Iterate over all used channels */
+#define efx_for_each_channel(_channel, _efx) \
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+/* Iterate over all used channels in reverse */
+#define efx_for_each_channel_rev(_channel, _efx) \
+ for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
+ _channel; \
+ _channel = _channel->channel ? \
+ (_efx)->channel[_channel->channel - 1] : NULL)
+
+static inline struct efx_channel *
+efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels);
+ return efx->channel[efx->tx_channel_offset + index];
+}
+
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+ return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->xdp_channel_offset <
+ channel->efx->n_xdp_channels;
+}
+
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
+}
+
+static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
+{
+ if (efx_channel_is_xdp_tx(channel))
+ return channel->efx->xdp_tx_per_channel;
+ return channel->efx->tx_queues_per_channel;
+}
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type)
+{
+ EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES);
+ return channel->tx_queue_by_type[type];
+}
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type)
+{
+ struct efx_channel *channel = efx_get_tx_channel(efx, index);
+
+ return efx_channel_get_tx_queue(channel, type);
+}
+
+/* Iterate over all TX queues belonging to a channel */
+#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + \
+ efx_channel_num_tx_queues(_channel); \
+ _tx_queue++)
+
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->rx_queue.core_index >= 0;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+ EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
+}
+
+/* Iterate over all RX queues belonging to a channel */
+#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+ return efx_rx_queue_channel(rx_queue)->channel;
+}
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
+ unsigned int index)
+{
+ return &rx_queue->buffer[index];
+}
+
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+/**
+ * EFX_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU. The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding. This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ *
+ * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
+ * XGMII cycle). If the frame length reaches the maximum value in the
+ * same cycle, the XMAC can miss the IPG altogether. We work around
+ * this by adding a further 16 bytes.
+ */
+#define EFX_FRAME_PAD 16
+#define EFX_MAX_FRAME_LEN(mtu) \
+ (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8))
+
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+/* Get the max fill level of the TX queues on this channel */
+static inline unsigned int
+efx_channel_tx_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->read_count);
+
+ return fill_level;
+}
+
+/* Conservative approximation of efx_channel_tx_fill_level using cached value */
+static inline unsigned int
+efx_channel_tx_old_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->old_read_count);
+
+ return fill_level;
+}
+
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
+{
+ const struct net_device *net_dev = efx->net_dev;
+
+ return net_dev->features | net_dev->hw_features;
+}
+
+/* Get the current TX queue insert index. */
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+/* Get a TX buffer. */
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+/* Get a TX buffer, checking it's not currently in use. */
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_WARN_ON_ONCE_PARANOID(buffer->len);
+ EFX_WARN_ON_ONCE_PARANOID(buffer->flags);
+ EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
+
+#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
new file mode 100644
index 000000000000..0ea0433a6230
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+#include "mcdi_pcol.h"
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status, MAC stats, etc.
+ *
+ **************************************************************************/
+
+int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags)
+{
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ return 0;
+}
+
+void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+ if (buffer->addr) {
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
+ buffer->addr = NULL;
+ }
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_siena_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
+
+void efx_siena_event_test_start(struct efx_channel *channel)
+{
+ channel->event_test_cpu = -1;
+ smp_wmb();
+ channel->efx->type->ev_test_generate(channel);
+}
+
+int efx_siena_irq_test_start(struct efx_nic *efx)
+{
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+ return efx->type->irq_test_generate(efx);
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_siena_init_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int n_irqs;
+ int rc;
+
+ if (!EFX_INT_MODE_USE_MSI(efx)) {
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
+ efx->name, efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
+ goto fail1;
+ }
+ efx->irqs_hooked = true;
+ return 0;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ efx->net_dev->rx_cpu_rmap =
+ alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+ }
+#endif
+
+ /* Hook MSI or MSI-X interrupt */
+ n_irqs = 0;
+ efx_for_each_channel(channel, efx) {
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
+ IRQF_PROBE_SHARED, /* Not shared */
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
+ goto fail2;
+ }
+ ++n_irqs;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
+ channel->channel < efx->n_rx_channels) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ channel->irq);
+ if (rc)
+ goto fail2;
+ }
+#endif
+ }
+
+ efx->irqs_hooked = true;
+ return 0;
+
+ fail2:
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+ efx_for_each_channel(channel, efx) {
+ if (n_irqs-- == 0)
+ break;
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
+ }
+ fail1:
+ return rc;
+}
+
+void efx_siena_fini_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+
+ if (!efx->irqs_hooked)
+ return;
+ if (EFX_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
+ free_irq(efx->legacy_irq, efx);
+ }
+ efx->irqs_hooked = false;
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+};
+
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ F, A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ F, B, Z, 8, 1024),
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+};
+
+size_t efx_siena_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_siena_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit SRAM */
+ efx_readd(efx, buf, table->offset + 4 * i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit-readable register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
+
+/**
+ * efx_siena_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strscpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_siena_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least @count.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ stats[index] += val;
+ else
+ stats[index] = val;
+ }
+ }
+}
+
+void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h
new file mode 100644
index 000000000000..6def31070edb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include "nic_common.h"
+#include "efx.h"
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
+
+enum {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_TXC43128 = 1,
+ PHY_TYPE_88E1111 = 2,
+ PHY_TYPE_SFX7101 = 3,
+ PHY_TYPE_QT2022C2 = 4,
+ PHY_TYPE_PM8358 = 6,
+ PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
+ PHY_TYPE_SFT9001B = 10,
+};
+
+enum {
+ SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ * @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ * %local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
+ */
+struct siena_nic_data {
+ struct efx_nic *efx;
+ int wol_filter_id;
+ u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SIENA_SRIOV
+ struct siena_vf *vf;
+ struct efx_channel *vfdi_channel;
+ unsigned vf_buftbl_base;
+ struct efx_buffer vfdi_status;
+ struct list_head local_addr_list;
+ struct list_head local_page_list;
+ struct mutex local_lock;
+ struct work_struct peer_work;
+#endif
+};
+
+extern const struct efx_nic_type siena_a0_nic_type;
+
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* Falcon/Siena queue operations */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned int len);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 filter_id,
+ struct efx_filter_spec *);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 *buf,
+ u32 size);
+#ifdef CONFIG_RFS_ACCEL
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+int efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+
+/* Global Resources */
+void efx_siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void efx_farch_finish_flr(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
+
+/* Tests */
+struct efx_farch_register_test {
+ unsigned address;
+ efx_oword_t mask;
+};
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
+
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
+
+#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h
new file mode 100644
index 000000000000..3af0405eeaa4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic_common.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_NIC_COMMON_H
+#define EFX_NIC_COMMON_H
+
+#include "net_driver.h"
+#include "efx_common.h"
+#include "mcdi.h"
+#include "ptp.h"
+
+enum {
+ /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
+ * They are not supported by this driver but these revision numbers
+ * form part of the ethtool API for register dumping.
+ */
+ EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
+ EFX_REV_EF100 = 5,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+ return efx->type->revision;
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Report whether this TX queue would be empty for the given write_count.
+ * May return false negative.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ * We use the write_count used for the last doorbell push, to get the
+ * NIC's view of the tx queue.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
+
+ tx_queue->empty_read_count = 0;
+ return was_empty && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE 4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE EFX_PAGE_SIZE
+
+/* NIC-generic software stats */
+enum {
+ GENERIC_STAT_rx_noskb_drops,
+ GENERIC_STAT_rx_nodesc_trunc,
+ GENERIC_STAT_COUNT
+};
+
+#define EFX_GENERIC_SW_STAT(ext_name) \
+ [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+/* TX data path */
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ if (tx_queue->efx->type->tx_remove)
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
+
+/* RX data path */
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
+
+/* Event data path */
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+
+void efx_siena_event_test_start(struct efx_channel *channel);
+
+bool efx_siena_event_present(struct efx_channel *channel);
+
+static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ if (efx->type->sensor_event)
+ efx->type->sensor_event(efx, ev);
+}
+
+static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
+{
+ return efx->type->rx_recycle_ring_size(efx);
+}
+
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
+/* Interrupts */
+int efx_siena_init_interrupt(struct efx_nic *efx);
+int efx_siena_irq_test_start(struct efx_nic *efx);
+void efx_siena_fini_interrupt(struct efx_nic *efx);
+
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+ return READ_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+ return READ_ONCE(efx->last_irq_cpu);
+}
+
+/* Global Resources */
+int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags);
+void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+size_t efx_siena_get_regs_len(struct efx_nic *efx);
+void efx_siena_get_regs(struct efx_nic *efx, void *buf);
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
+void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+
+#define EFX_MAX_FLUSH_TIME 5000
+
+#endif /* EFX_NIC_COMMON_H */
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
new file mode 100644
index 000000000000..7c46752e6eae
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -0,0 +1,2201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities. Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ * The event contains the packet's UUID and sequence number, together
+ * with the hardware timestamp. The PTP receive packet queue is searched
+ * for this UUID/sequence number and, if found, put on a pending queue.
+ * Packets not matching are delivered without timestamps (MCDI events will
+ * always arrive after the actual packet).
+ * It is important for the operation of the PTP protocol that the ordering
+ * of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ * If work waiting, synchronise host/hardware time
+ *
+ * Transmit: send packet through MC, which returns the transmission time
+ * that is converted to an appropriate timestamp.
+ *
+ * Receive: the packet's reception time is converted to an appropriate
+ * timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "tx.h"
+#include "nic.h" /* indirectly includes ptp.h */
+
+/* Maximum number of events expected to make up a PTP event */
+#define MAX_EVENT_FRAGS 3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define MAX_SYNCHRONISE_WAIT_MS 2
+
+/* How long, at most, to spend synchronising */
+#define SYNCHRONISE_PERIOD_NS 250000
+
+/* How often to update the shared memory time */
+#define SYNCHRONISATION_GRANULARITY_NS 200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define MAX_SYNCHRONISATION_NS 1000
+
+/* How many (MC) receive events that can be queued */
+#define MAX_RECEIVE_EVENTS 8
+
+/* Length of (modified) moving average. */
+#define AVERAGE_LENGTH 16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS 10
+
+/* Offsets into PTP packet for identification. These offsets are from the
+ * start of the IP header, not the MAC header. Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET 22
+
+#define PTP_V1_VERSION_LENGTH 2
+#define PTP_V1_VERSION_OFFSET 28
+
+#define PTP_V1_UUID_LENGTH 6
+#define PTP_V1_UUID_OFFSET 50
+
+#define PTP_V1_SEQUENCE_LENGTH 2
+#define PTP_V1_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V1_MIN_LENGTH 64
+
+#define PTP_V2_VERSION_LENGTH 1
+#define PTP_V2_VERSION_OFFSET 29
+
+#define PTP_V2_UUID_LENGTH 8
+#define PTP_V2_UUID_OFFSET 48
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard. The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH 6
+#define PTP_V2_MC_UUID_OFFSET 50
+
+#define PTP_V2_SEQUENCE_LENGTH 2
+#define PTP_V2_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V2_MIN_LENGTH 63
+
+#define PTP_MIN_LENGTH 63
+
+#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_EVENT_PORT 319
+#define PTP_GENERAL_PORT 320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define PTP_VERSION_V1 1
+
+#define PTP_VERSION_V2 2
+#define PTP_VERSION_V2_MASK 0x0f
+
+enum ptp_packet_state {
+ PTP_PACKET_STATE_UNMATCHED = 0,
+ PTP_PACKET_STATE_MATCHED,
+ PTP_PACKET_STATE_TIMED_OUT,
+ PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define MC_NANOSECOND_BITS 30
+#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1)
+#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB 1000000
+
+/* Precalculate scale word to avoid long long division at runtime */
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40 26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44 22
+
+#define PTP_SYNC_ATTEMPTS 4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ * event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ * whether that is of no interest.
+ */
+struct efx_ptp_match {
+ u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+ unsigned long expiry;
+ enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @link: list of events
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ * @expiry: Time which the packet arrived
+ */
+struct efx_ptp_event_rx {
+ struct list_head link;
+ u32 seq0;
+ u32 seq1;
+ ktime_t hwtimestamp;
+ unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
+ * host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+ u32 host_start;
+ u32 major;
+ u32 minor;
+ u32 host_end;
+ u32 wait;
+ u32 window; /* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ * reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @rxfilter_installed: Receive filter installed
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time: contains time details
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections: contains corrections details
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ * timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ * timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ * transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ * receive timestamps
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device (if primary function)
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ * allocations in main data path).
+ * @good_syncs: Number of successful synchronisations.
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @bad_syncs: Number of failed synchronisations.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
+ * @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
+ */
+struct efx_ptp_data {
+ struct efx_nic *efx;
+ struct efx_channel *channel;
+ bool rx_ts_inline;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct list_head evt_list;
+ struct list_head evt_free_list;
+ spinlock_t evt_lock;
+ struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+ struct workqueue_struct *workwq;
+ struct work_struct work;
+ bool reset_required;
+ u32 rxfilter_event;
+ u32 rxfilter_general;
+ bool rxfilter_installed;
+ struct hwtstamp_config config;
+ bool enabled;
+ unsigned int mode;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ struct {
+ u32 minor_max;
+ u32 sync_event_diff_min;
+ u32 sync_event_diff_max;
+ unsigned int sync_event_minor_shift;
+ } nic_time;
+ unsigned int min_synchronisation_ns;
+ unsigned int capabilities;
+ struct {
+ s32 ptp_tx;
+ s32 ptp_rx;
+ s32 pps_out;
+ s32 pps_in;
+ s32 general_tx;
+ s32 general_rx;
+ } ts_corrections;
+ efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+ int evt_frag_idx;
+ int evt_code;
+ struct efx_buffer start;
+ struct pps_event_time host_time_pps;
+ unsigned int adjfreq_ppb_shift;
+ s64 current_adjfreq;
+ struct ptp_clock *phc_clock;
+ struct ptp_clock_info phc_clock_info;
+ struct work_struct pps_work;
+ struct workqueue_struct *pps_workwq;
+ bool nic_ts_enabled;
+ efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)];
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
+ struct efx_ptp_timeset
+ timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+ void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
+
+bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+ return efx_has_cap(efx, TX_MAC_TIMESTAMPING);
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+ return efx_siena_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_siena_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ memset(outbuf, 0, sizeof(outbuf));
+ efx_siena_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+ *nic_major = (u32)ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+ u32 maj = (u32)ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ *nic_major = (u32)ts.tv_sec;
+ *nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt;
+
+ nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+ correction = DIV_ROUND_CLOSEST(correction, 4);
+
+ kt = ktime_set(nic_major, nic_minor);
+
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx)
+{
+ return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_siena_ptp_channel(efx);
+ u32 major = 0;
+
+ if (channel)
+ major = channel->sync_timestamp_major;
+ return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+ struct efx_ptp_data *ptp,
+ u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ u32 sync_timestamp;
+ ktime_t kt = { 0 };
+ s16 delta;
+
+ if (!(nic_major & 0x80000000)) {
+ WARN_ON_ONCE(nic_major >> 16);
+
+ /* Medford provides 48 bits of timestamp, so we must get the top
+ * 16 bits from the timesync event state.
+ *
+ * We only have the lower 16 bits of the time now, but we do
+ * have a full resolution timestamp at some point in past. As
+ * long as the difference between the (real) now and the sync
+ * is less than 2^15, then we can reconstruct the difference
+ * between those two numbers using only the lower 16 bits of
+ * each.
+ *
+ * Put another way
+ *
+ * a - b = ((a mod k) - b) mod k
+ *
+ * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
+ * (a mod k) and b, so can calculate the delta, a - b.
+ *
+ */
+ sync_timestamp = last_sync_timestamp_major(efx);
+
+ /* Because delta is s16 this does an implicit mask down to
+ * 16 bits which is what we need, assuming
+ * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
+ * we can deal with the (unlikely) case of sync timestamps
+ * arriving from the future.
+ */
+ delta = nic_major - sync_timestamp;
+
+ /* Recover the fully specified time now, by applying the offset
+ * to the (fully specified) sync time.
+ */
+ nic_major = sync_timestamp + delta;
+
+ kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+ correction);
+ }
+ return kt;
+}
+
+ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ ktime_t kt;
+
+ if (efx_siena_ptp_use_mac_tx_timestamps(efx))
+ kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ else
+ kt = ptp->nic_to_kernel_time(
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ return kt;
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ } else if (rc == -EINVAL) {
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ } else if (rc == -EPERM) {
+ pci_info(efx->pci_dev, "no PTP support\n");
+ return rc;
+ } else {
+ efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+ }
+
+ switch (fmt) {
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ ptp->nic_time.minor_max = 1 << 27;
+ ptp->nic_time.sync_event_minor_shift = 19;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ ptp->nic_time.minor_max = 1000000000;
+ ptp->nic_time.sync_event_minor_shift = 22;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+ ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+ ptp->nic_time.minor_max = 4000000000UL;
+ ptp->nic_time.sync_event_minor_shift = 24;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Precalculate acceptable difference between the minor time in the
+ * packet prefix and the last MCDI time sync event. We expect the
+ * packet prefix timestamp to be after of sync event by up to one
+ * sync event interval (0.25s) but we allow it to exceed this by a
+ * fuzz factor of (0.1s)
+ */
+ ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+ - (ptp->nic_time.minor_max / 10);
+ ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+ + (ptp->nic_time.minor_max / 10);
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+ * a value to use for the minimum acceptable corrected synchronization
+ * window and may return further capabilities.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->capabilities = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+ else
+ ptp->capabilities = 0;
+
+ /* Set up the shift for conversion between frequency
+ * adjustments in parts-per-billion and the fixed-point
+ * fractional ns format that the adapter uses.
+ */
+ if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+ else
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
+ int rc;
+ size_t out_len;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+ if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+ efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+ efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+ } else {
+ efx->ptp_data->ts_corrections.general_tx =
+ efx->ptp_data->ts_corrections.ptp_tx;
+ efx->ptp_data->ts_corrections.general_rx =
+ efx->ptp_data->ts_corrections.ptp_rx;
+ }
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.ptp_tx = 0;
+ efx->ptp_data->ts_corrections.ptp_rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ efx->ptp_data->ts_corrections.general_tx = 0;
+ efx->ptp_data->ts_corrections.general_rx = 0;
+ } else {
+ efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_siena_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+ * should only have been called during probe.
+ */
+ if (rc == -ENOSYS || rc == -EPERM)
+ pci_info(efx->pci_dev, "no PTP support\n");
+ else if (rc)
+ efx_siena_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(q))) {
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+ netif_err(efx, drv, efx->net_dev,
+ "ERROR: PTP requires MSI-X and 1 additional interrupt"
+ "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+ struct pps_event_time *last_time)
+{
+ struct pps_event_time now;
+ struct timespec64 limit;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int *mc_running = ptp->start.addr;
+
+ pps_get_ts(&now);
+ limit = now.ts_real;
+ timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+ /* Write host time for specified period or until MC is done */
+ while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
+ READ_ONCE(*mc_running)) {
+ struct timespec64 update_time;
+ unsigned int host_time;
+
+ /* Don't update continuously to avoid saturating the PCIe bus */
+ update_time = now.ts_real;
+ timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+ do {
+ pps_get_ts(&now);
+ } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
+ READ_ONCE(*mc_running));
+
+ /* Synchronise NIC with single word of time only */
+ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+ now.ts_real.tv_nsec);
+ /* Update host time in NIC memory */
+ efx->type->ptp_write_host_time(efx, host_time);
+ }
+ *last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
+{
+ unsigned start_ns, end_ns;
+
+ timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+ /* Ignore seconds */
+ start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+ end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+ /* Allow for rollover */
+ if (end_ns < start_ns)
+ end_ns += NSEC_PER_SEC;
+ /* Determine duration of operation */
+ timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen. The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
+{
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
+ unsigned i;
+ unsigned ngood = 0;
+ unsigned last_good = 0;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ u32 last_sec;
+ u32 start_sec;
+ struct timespec64 delta;
+ ktime_t mc_time;
+
+ if (number_readings == 0)
+ return -EAGAIN;
+
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
+ */
+ for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec64 wait;
+
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
+
+ wait = ktime_to_timespec64(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->oversize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->undersize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
+ }
+ }
+
+ if (ngood == 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "PTP no suitable synchronisations\n");
+ return -EAGAIN;
+ }
+
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
+ * the start reading and the last value written by the host. The
+ * timescales are such that a gap of more than one second is never
+ * expected. delta is *not* normalised.
+ */
+ start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+ last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
+ }
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
+
+ /* Set PPS timestamp to match NIC top of second */
+ ptp->host_time_pps = *last_time;
+ pps_sub_ts(&ptp->host_time_pps, delta);
+
+ return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
+ size_t response_length;
+ int rc;
+ unsigned long timeout;
+ struct pps_event_time last_time = {};
+ unsigned int loops = 0;
+ int *start = ptp->start.addr;
+
+ MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+ num_readings);
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
+
+ /* Clear flag that signals MC ready */
+ WRITE_ONCE(*start, 0);
+ rc = efx_siena_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_WARN_ON_ONCE_PARANOID(rc);
+
+ /* Wait for start from MCDI (or timeout) */
+ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+ while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
+ udelay(20); /* Usually start MCDI execution quickly */
+ loops++;
+ }
+
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
+ if (READ_ONCE(*start))
+ efx_ptp_send_times(efx, &last_time);
+
+ /* Collect results */
+ rc = efx_siena_mcdi_rpc_finish(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+ synch_buf, sizeof(synch_buf),
+ &response_length);
+ if (rc == 0) {
+ rc = efx_ptp_process_times(efx, synch_buf, response_length,
+ &last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
+
+ return rc;
+}
+
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ u8 type = efx_tx_csum_type_skb(skb);
+ struct efx_tx_queue *tx_queue;
+
+ tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
+ if (tx_queue && tx_queue->timestamping) {
+ efx_enqueue_skb(tx_queue, skb);
+ } else {
+ WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ struct skb_shared_hwtstamps timestamps;
+ int rc = -EIO;
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
+
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ rc = skb_linearize(skb);
+ if (rc != 0)
+ goto fail;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ rc = skb_checksum_help(skb);
+ if (rc != 0)
+ goto fail;
+ }
+ skb_copy_from_linear_data(skb,
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, ptp_data->txbuf,
+ MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), txtime,
+ sizeof(txtime), &len);
+ if (rc != 0)
+ goto fail;
+
+ memset(&timestamps, 0, sizeof(timestamps));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.ptp_tx);
+
+ skb_tstamp_tx(skb, &timestamps);
+
+ rc = 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct list_head *cursor;
+ struct list_head *next;
+
+ if (ptp->rx_ts_inline)
+ return;
+
+ /* Drop time-expired events */
+ spin_lock_bh(&ptp->evt_lock);
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx,
+ link);
+ if (time_after(jiffies, evt->expiry)) {
+ list_move(&evt->link, &ptp->evt_free_list);
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP rx event dropped\n");
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+ struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ bool evts_waiting;
+ struct list_head *cursor;
+ struct list_head *next;
+ struct efx_ptp_match *match;
+ enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
+ spin_lock_bh(&ptp->evt_lock);
+ evts_waiting = !list_empty(&ptp->evt_list);
+ spin_unlock_bh(&ptp->evt_lock);
+
+ if (!evts_waiting)
+ return PTP_PACKET_STATE_UNMATCHED;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ /* Look for a matching timestamp in the event queue */
+ spin_lock_bh(&ptp->evt_lock);
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+ if ((evt->seq0 == match->words[0]) &&
+ (evt->seq1 == match->words[1])) {
+ struct skb_shared_hwtstamps *timestamps;
+
+ /* Match - add in hardware timestamp */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp = evt->hwtimestamp;
+
+ match->state = PTP_PACKET_STATE_MATCHED;
+ rc = PTP_PACKET_STATE_MATCHED;
+ list_move(&evt->link, &ptp->evt_free_list);
+ break;
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+
+ return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ */
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ptp->rxq))) {
+ struct efx_ptp_match *match;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+ __skb_queue_tail(q, skb);
+ } else if (efx_ptp_match_rx(efx, skb) ==
+ PTP_PACKET_STATE_MATCHED) {
+ __skb_queue_tail(q, skb);
+ } else if (time_after(jiffies, match->expiry)) {
+ match->state = PTP_PACKET_STATE_TIMED_OUT;
+ ++ptp->rx_no_timestamp;
+ __skb_queue_tail(q, skb);
+ } else {
+ /* Replace unprocessed entry and stop */
+ skb_queue_head(&ptp->rxq, skb);
+ break;
+ }
+ }
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+}
+
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_filter_spec rxfilter;
+ int rc;
+
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
+
+ /* Must filter on both event and general ports to ensure
+ * that there is no packet re-ordering.
+ */
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_EVENT_PORT));
+ if (rc != 0)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilter_event = rc;
+
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_GENERAL_PORT));
+ if (rc != 0)
+ goto fail;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ goto fail;
+ ptp->rxfilter_general = rc;
+
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ptp_enable(efx);
+ if (rc != 0)
+ goto fail;
+
+ ptp->evt_frag_idx = 0;
+ ptp->current_adjfreq = 0;
+
+ return 0;
+
+fail:
+ efx_ptp_remove_multicast_filters(efx);
+ return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct list_head *cursor;
+ struct list_head *next;
+ int rc;
+
+ if (ptp == NULL)
+ return 0;
+
+ rc = efx_ptp_disable(efx);
+
+ efx_ptp_remove_multicast_filters(efx);
+
+ /* Make sure RX packets are really delivered */
+ efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ /* Drop any pending receive events */
+ spin_lock_bh(&efx->ptp_data->evt_lock);
+ list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+ list_move(cursor, &efx->ptp_data->evt_free_list);
+ }
+ spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+ return rc;
+}
+
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+ if (efx->ptp_data && efx->ptp_data->enabled)
+ return efx_ptp_start(efx);
+ return 0;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp =
+ container_of(work, struct efx_ptp_data, pps_work);
+ struct efx_nic *efx = ptp->efx;
+ struct ptp_clock_event ptp_evt;
+
+ if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+ return;
+
+ ptp_evt.type = PTP_CLOCK_PPSUSR;
+ ptp_evt.pps_times = ptp->host_time_pps;
+ ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+static void efx_ptp_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp_data =
+ container_of(work, struct efx_ptp_data, work);
+ struct efx_nic *efx = ptp_data->efx;
+ struct sk_buff *skb;
+ struct sk_buff_head tempq;
+
+ if (ptp_data->reset_required) {
+ efx_ptp_stop(efx);
+ efx_ptp_start(efx);
+ return;
+ }
+
+ efx_ptp_drop_time_expired_events(efx);
+
+ __skb_queue_head_init(&tempq);
+ efx_ptp_process_events(efx, &tempq);
+
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ ptp_data->xmit_skb(efx, skb);
+
+ while ((skb = __skb_dequeue(&tempq)))
+ efx_ptp_process_rx(efx, skb);
+}
+
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc_siena",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime64 = efx_phc_gettime,
+ .settime64 = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
+{
+ struct efx_ptp_data *ptp;
+ int rc = 0;
+ unsigned int pos;
+
+ ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+ efx->ptp_data = ptp;
+ if (!efx->ptp_data)
+ return -ENOMEM;
+
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
+ rc = efx_siena_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
+ if (rc != 0)
+ goto fail1;
+
+ skb_queue_head_init(&ptp->rxq);
+ skb_queue_head_init(&ptp->txq);
+ ptp->workwq = create_singlethread_workqueue("sfc_siena_ptp");
+ if (!ptp->workwq) {
+ rc = -ENOMEM;
+ goto fail2;
+ }
+
+ if (efx_siena_ptp_use_mac_tx_timestamps(efx)) {
+ ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+ /* Request sync events on this channel. */
+ channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+ } else {
+ ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+ }
+
+ INIT_WORK(&ptp->work, efx_ptp_worker);
+ ptp->config.flags = 0;
+ ptp->config.tx_type = HWTSTAMP_TX_OFF;
+ ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+ INIT_LIST_HEAD(&ptp->evt_list);
+ INIT_LIST_HEAD(&ptp->evt_free_list);
+ spin_lock_init(&ptp->evt_lock);
+ for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+ list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
+ goto fail3;
+
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ } else if (ptp->phc_clock) {
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq =
+ create_singlethread_workqueue("sfc_siena_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ }
+ }
+ ptp->nic_ts_enabled = false;
+
+ return 0;
+fail4:
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+ destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+ efx_siena_free_buffer(efx, &ptp->start);
+
+fail1:
+ kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
+
+ return rc;
+}
+
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ channel->irq_moderation_us = 0;
+ channel->rx_queue.core_index = 0;
+
+ rc = efx_ptp_probe(efx, channel);
+ /* Failure to probe PTP is not fatal; this channel will just not be
+ * used for anything.
+ * In the case of EPERM, efx_ptp_probe will print its own message (in
+ * efx_ptp_get_attributes()), so we don't need to.
+ */
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to probe PTP, rc=%d\n", rc);
+ return 0;
+}
+
+static void efx_ptp_remove(struct efx_nic *efx)
+{
+ if (!efx->ptp_data)
+ return;
+
+ (void)efx_ptp_disable(efx);
+
+ cancel_work_sync(&efx->ptp_data->work);
+ if (efx->ptp_data->pps_workwq)
+ cancel_work_sync(&efx->ptp_data->pps_work);
+
+ skb_queue_purge(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
+
+ destroy_workqueue(efx->ptp_data->workwq);
+
+ efx_siena_free_buffer(efx, &efx->ptp_data->start);
+ kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ return efx->ptp_data &&
+ efx->ptp_data->enabled &&
+ skb->len >= PTP_MIN_LENGTH &&
+ skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+ likely(skb->protocol == htons(ETH_P_IP)) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ skb_headlen(skb) >=
+ skb_transport_offset(skb) + sizeof(struct udphdr) &&
+ udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet. Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+ u8 *match_data_012, *match_data_345;
+ unsigned int version;
+ u8 *data;
+
+ match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+ /* Correct version? */
+ if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+ if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+ return false;
+ }
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
+ if (version != PTP_VERSION_V1) {
+ return false;
+ }
+
+ /* PTP V1 uses all six bytes of the UUID to match the packet
+ * to the timestamp
+ */
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
+ } else {
+ if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+ return false;
+ }
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
+ if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+ return false;
+ }
+
+ /* The original V2 implementation uses bytes 2-7 of
+ * the UUID to match the packet to the timestamp. This
+ * discards two of the bytes of the MAC address used
+ * to create the UUID (SF bug 33070). The PTP V2
+ * enhanced mode fixes this issue and uses bytes 0-2
+ * and byte 5-7 of the UUID.
+ */
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
+ if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
+ } else {
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
+ }
+ }
+
+ /* Does this packet require timestamping? */
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ match->state = PTP_PACKET_STATE_UNMATCHED;
+
+ /* We expect the sequence number to be in the same position in
+ * the packet for PTP V1 and V2
+ */
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+ /* Extract UUID/Sequence information */
+ match->words[0] = (match_data_012[0] |
+ (match_data_012[1] << 8) |
+ (match_data_012[2] << 16) |
+ (match_data_345[0] << 24));
+ match->words[1] = (match_data_345[1] |
+ (match_data_345[2] << 8) |
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
+ 16));
+ } else {
+ match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+ }
+
+ skb_queue_tail(&ptp->rxq, skb);
+ queue_work(ptp->workwq, &ptp->work);
+
+ return true;
+}
+
+/* Transmit a PTP packet. This has to be transmitted by the MC
+ * itself, through an MCDI call. MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ skb_queue_tail(&ptp->txq, skb);
+
+ if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+ (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+ efx_xmit_hwtstamp_pending(skb);
+ queue_work(ptp->workwq, &ptp->work);
+
+ return NETDEV_TX_OK;
+}
+
+int efx_siena_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
+{
+ if ((enable_wanted != efx->ptp_data->enabled) ||
+ (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+ int rc = 0;
+
+ if (enable_wanted) {
+ /* Change of mode requires disable */
+ if (efx->ptp_data->enabled &&
+ (efx->ptp_data->mode != new_mode)) {
+ efx->ptp_data->enabled = false;
+ rc = efx_ptp_stop(efx);
+ if (rc != 0)
+ return rc;
+ }
+
+ /* Set new operating mode and establish
+ * baseline synchronisation, which must
+ * succeed.
+ */
+ efx->ptp_data->mode = new_mode;
+ if (netif_running(efx->net_dev))
+ rc = efx_ptp_start(efx);
+ if (rc == 0) {
+ rc = efx_ptp_synchronize(efx,
+ PTP_SYNC_ATTEMPTS * 2);
+ if (rc != 0)
+ efx_ptp_stop(efx);
+ }
+ } else {
+ rc = efx_ptp_stop(efx);
+ }
+
+ if (rc != 0)
+ return rc;
+
+ efx->ptp_data->enabled = enable_wanted;
+ }
+
+ return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+ int rc;
+
+ if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+ (init->tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
+ return rc;
+
+ efx->ptp_data->config = *init;
+ return 0;
+}
+
+void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
+ struct ethtool_ts_info *ts_info)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
+
+ if (!ptp)
+ return;
+
+ ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
+ ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
+}
+
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ /* Not a PTP enabled port */
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ rc = efx_ptp_ts_init(efx, &config);
+ if (rc != 0)
+ return rc;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+ ? -EFAULT : 0;
+}
+
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unexpected event length: got %d expected %d\n",
+ ptp->evt_frag_idx, expected_frag_len);
+ ptp->reset_required = true;
+ queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event. Put it on the event queue and
+ * start worker thread. This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ struct efx_ptp_event_rx *evt = NULL;
+
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
+ if (ptp->evt_frag_idx != 3) {
+ ptp_event_failure(efx, 3);
+ return;
+ }
+
+ spin_lock_bh(&ptp->evt_lock);
+ if (!list_empty(&ptp->evt_free_list)) {
+ evt = list_first_entry(&ptp->evt_free_list,
+ struct efx_ptp_event_rx, link);
+ list_del(&evt->link);
+
+ evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+ evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+ MCDI_EVENT_SRC) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[1],
+ MCDI_EVENT_SRC) << 8) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[0],
+ MCDI_EVENT_SRC) << 16));
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
+ EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.ptp_rx);
+ evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+ list_add_tail(&evt->link, &ptp->evt_list);
+
+ queue_work(ptp->workwq, &ptp->work);
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
+ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+ if (ptp->evt_frag_idx != 1) {
+ ptp_event_failure(efx, 1);
+ return;
+ }
+
+ netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ if (ptp->nic_ts_enabled)
+ queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+ if (!ptp) {
+ if (!efx->ptp_warned) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ efx->ptp_warned = true;
+ }
+ return;
+ }
+
+ if (!ptp->enabled)
+ return;
+
+ if (ptp->evt_frag_idx == 0) {
+ ptp->evt_code = code;
+ } else if (ptp->evt_code != code) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP out of sequence event %d\n", code);
+ ptp->evt_frag_idx = 0;
+ }
+
+ ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+ if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+ /* Process resulting event */
+ switch (code) {
+ case MCDI_EVENT_CODE_PTP_RX:
+ ptp_event_rx(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ ptp_event_fault(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_PPS:
+ ptp_event_pps(efx, ptp);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unknown event %d\n", code);
+ break;
+ }
+ ptp->evt_frag_idx = 0;
+ } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP too many event fragments\n");
+ ptp->evt_frag_idx = 0;
+ }
+}
+
+void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ /* When extracting the sync timestamp minor value, we should discard
+ * the least significant two bits. These are not required in order
+ * to reconstruct full-range timestamps and they are optionally used
+ * to report status depending on the options supplied when subscribing
+ * for sync events.
+ */
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+ << ptp->nic_time.sync_event_minor_shift;
+
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ if (channel->sync_events_state != SYNC_EVENTS_VALID)
+ return;
+
+ pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+ if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+ diff += ptp->nic_time.minor_max;
+
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
+ 1 : 0;
+
+ if (diff <= ptp->nic_time.sync_event_diff_max) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= ptp->nic_time.sync_event_diff_min) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ ptp->nic_to_kernel_time(pkt_timestamp_major,
+ pkt_timestamp_minor,
+ ptp->ts_corrections.general_rx);
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
+ s64 adjustment_ns;
+ int rc;
+
+ if (delta > MAX_PPB)
+ delta = MAX_PPB;
+ else if (delta < -MAX_PPB)
+ delta = -MAX_PPB;
+
+ /* Convert ppb to fixed point ns taking care to round correctly. */
+ adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+ (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+ ptp_data->adjfreq_ppb_shift;
+
+ MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+ NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ ptp_data->current_adjfreq = adjustment_ns;
+ return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ u32 nic_major, nic_minor;
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
+ return efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
+ int rc;
+ ktime_t kt;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc != 0)
+ return rc;
+
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec64(kt);
+ return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *e_ts)
+{
+ /* Get the current NIC time, efx_phc_gettime.
+ * Subtract from the desired time to get the offset
+ * call efx_phc_adjtime with the offset
+ */
+ int rc;
+ struct timespec64 time_now;
+ struct timespec64 delta;
+
+ rc = efx_phc_gettime(ptp, &time_now);
+ if (rc != 0)
+ return rc;
+
+ delta = timespec64_sub(*e_ts, time_now);
+
+ rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta));
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request,
+ int enable)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ if (request->type != PTP_CLK_REQ_PPS)
+ return -EOPNOTSUPP;
+
+ ptp_data->nic_ts_enabled = !!enable;
+ return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+ .handle_no_channel = efx_ptp_handle_no_channel,
+ .pre_probe = efx_ptp_probe_channel,
+ .post_remove = efx_ptp_remove_channel,
+ .get_name = efx_ptp_get_channel_name,
+ /* no copy operation; there is no need to reallocate this channel */
+ .receive_skb = efx_ptp_rx,
+ .want_txqs = efx_ptp_want_txqs,
+ .keep_eventq = false,
+};
+
+void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx)
+{
+ /* Check whether PTP is implemented on this NIC. The DISABLE
+ * operation will succeed if and only if it is implemented.
+ */
+ if (efx_ptp_disable(efx) == 0)
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+ &efx_ptp_channel_type;
+}
+
+void efx_siena_ptp_start_datapath(struct efx_nic *efx)
+{
+ if (efx_ptp_restart(efx))
+ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
+}
+
+void efx_siena_ptp_stop_datapath(struct efx_nic *efx)
+{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
+ efx_ptp_stop(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
new file mode 100644
index 000000000000..4172f90e9f6f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_PTP_H
+#define EFX_PTP_H
+
+#include <linux/net_tstamp.h>
+#include "net_driver.h"
+
+struct ethtool_ts_info;
+void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
+ struct ethtool_ts_info *ts_info);
+bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_siena_ptp_get_mode(struct efx_nic *efx);
+int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
+int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_siena_rx_skb_attach_timestamp(channel, skb);
+}
+
+void efx_siena_ptp_start_datapath(struct efx_nic *efx);
+void efx_siena_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
+
+#endif /* EFX_PTP_H */
diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c
new file mode 100644
index 000000000000..98d3c0743c0f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "rx_common.h"
+#include "filter.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
+/* Size of buffer allocated for skb header area. */
+#define EFX_SKB_HEADERS 128u
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ int len)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+ if (likely(len <= max_len))
+ return;
+
+ /* The packet must be discarded, but this is only a fatal error
+ * if the caller indicated it was
+ */
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX queue %d overlength RX event (%#x > %#x)\n",
+ efx_rx_queue_index(rx_queue), len, max_len);
+
+ efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
+}
+
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
+ if (unlikely(skb == NULL)) {
+ atomic_inc(&efx->n_rx_noskb_drops);
+ return NULL;
+ }
+
+ EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
+
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
+
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
+
+ for (;;) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len, efx->rx_buffer_truesize);
+ rx_buf->page = NULL;
+
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
+ }
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ skb_mark_napi_id(skb, &channel->napi_str);
+
+ return skb;
+}
+
+void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_rx_buffer *rx_buf;
+
+ rx_queue->rx_packets++;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->flags |= flags;
+
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+ }
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received ids %x-%x len %d %s%s\n",
+ efx_rx_queue_index(rx_queue), index,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
+ (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
+ (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
+
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
+ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
+ efx_rx_flush_packet(channel);
+ efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
+ return;
+ }
+
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
+ */
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+ */
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+ }
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle pages. */
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ /* Pipeline receives so that we give time for packet headers to be
+ * prefetched into cache.
+ */
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
+}
+
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
+
+ skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ /* Set the SKB flags */
+ skb_checksum_none_assert(skb);
+ if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+ }
+
+ efx_rx_skb_attach_timestamp(channel, skb);
+
+ if (channel->type->receive_skb)
+ if (channel->type->receive_skb(channel, skb))
+ return;
+
+ /* Pass the packet up */
+ if (channel->rx_list != NULL)
+ /* Add to list, will pass up later */
+ list_add_tail(&skb->list, channel->rx_list);
+ else
+ /* No list, so pass it up now */
+ netif_receive_skb(skb);
+}
+
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+ u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+ struct efx_rx_queue *rx_queue;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ s16 offset;
+ int err;
+
+ xdp_prog = rcu_dereference_bh(efx->xdp_prog);
+ if (!xdp_prog)
+ return true;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(channel->rx_pkt_n_frags > 1)) {
+ /* We can't do XDP on fragmented packets - drop. */
+ efx_siena_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP is not possible with multiple receive fragments (%d)\n",
+ channel->rx_pkt_n_frags);
+ channel->n_rx_xdp_bad_drops++;
+ return false;
+ }
+
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
+
+ /* Save the rx prefix. */
+ EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+ memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+ efx->rx_prefix_size);
+
+ xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
+ /* No support yet for XDP metadata */
+ xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+ rx_buf->len, false);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ offset = (u8 *)xdp.data - *ehp;
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ /* Fix up rx prefix. */
+ if (offset) {
+ *ehp += offset;
+ rx_buf->page_offset += offset;
+ rx_buf->len -= offset;
+ memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+ efx->rx_prefix_size);
+ }
+ break;
+
+ case XDP_TX:
+ /* Buffer ownership passes to tx on success. */
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
+ if (unlikely(err != 1)) {
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP TX failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_tx++;
+ }
+ break;
+
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP redirect failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ } else {
+ channel->n_rx_xdp_redirect++;
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_bad_drops++;
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_drops++;
+ break;
+ }
+
+ return xdp_act == XDP_PASS;
+}
+
+/* Handle a received packet. Second half: Touches packet payload. */
+void __efx_siena_rx_packet(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_buffer *rx_buf =
+ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = efx_rx_buf_va(rx_buf);
+
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ struct efx_rx_queue *rx_queue;
+
+ efx_siena_loopback_rx_packet(efx, eh, rx_buf->len);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ goto out;
+ }
+
+ if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+ goto out;
+
+ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+ rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
+
+ if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
+ efx_siena_rx_packet_gro(channel, rx_buf,
+ channel->rx_pkt_n_frags, eh, 0);
+ else
+ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
new file mode 100644
index 000000000000..4579f43484c3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -0,0 +1,1094 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf);
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ unsigned int index;
+ struct page *page;
+
+ if (unlikely(!rx_queue->page_ring))
+ return NULL;
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ struct page *page = rx_buf->page;
+ unsigned int index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned int read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct efx_nic *efx = rx_queue->efx;
+
+ bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ int i;
+
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ unsigned int max_fill, trigger, max_trigger;
+ struct efx_nic *efx = rx_queue->efx;
+ int rc = 0;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index, 0);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ int i;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned int index = i & rx_queue->ptr_mask;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ efx_fini_rx_recycle_ring(rx_queue);
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+void efx_siena_rx_slow_fill(struct timer_list *t)
+{
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ unsigned int page_offset, index, count;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ struct efx_rx_buffer *rx_buf;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+ EFX_XDP_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+ EFX_XDP_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+void efx_siena_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+ EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+ bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+void
+efx_siena_rx_packet_gro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh, __wsum csum)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ if (csum) {
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else {
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ }
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ napi_gro_frags(napi);
+}
+
+/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
+ * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
+ */
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx, *new;
+ u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ /* Search for first gap in the numbering */
+ list_for_each_entry(ctx, head, list) {
+ if (ctx->user_id != id)
+ break;
+ id++;
+ /* Check for wrap. If this happens, we have nearly 2^32
+ * allocated RSS contexts, which seems unlikely.
+ */
+ if (WARN_ON_ONCE(!id))
+ return NULL;
+ }
+
+ /* Create the new entry */
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+ new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ new->rx_hash_udp_4tuple = false;
+
+ /* Insert the new entry into the gap */
+ new->user_id = id;
+ list_add_tail(&new->list, &ctx->list);
+ return new;
+}
+
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ list_for_each_entry(ctx, head, list)
+ if (ctx->user_id == id)
+ return ctx;
+ return NULL;
+}
+
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
+{
+ list_del(&ctx->list);
+ kfree(ctx);
+}
+
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+/**
+ * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+ unsigned int filter_idx, bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_siena_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+static
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_siena_filter_spec_hash(spec);
+
+ lockdep_assert_held(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_siena_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
+int efx_siena_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+#endif
+out_unlock:
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+void efx_siena_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
+ kfree(channel->rps_flow_id);
+ channel->rps_flow_id = NULL;
+ }
+#endif
+ down_write(&efx->filter_sem);
+ efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static void efx_filter_rfs_work(struct work_struct *data)
+{
+ struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
+ work);
+ struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+ int slot_idx = req - efx->rps_slot;
+ struct efx_arfs_rule *rule;
+ u16 arfs_id = 0;
+ int rc;
+
+ rc = efx->type->filter_insert(efx, &req->spec, true);
+ if (rc >= 0)
+ /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
+ rc %= efx->type->max_rx_ip_filters;
+ if (efx->rps_hash_table) {
+ spin_lock_bh(&efx->rps_hash_lock);
+ rule = efx_siena_rps_hash_find(efx, &req->spec);
+ /* The rule might have already gone, if someone else's request
+ * for the same spec was already worked and then expired before
+ * we got around to our work. In that case we have nothing
+ * tying us to an arfs_id, meaning that as soon as the filter
+ * is considered for expiry it will be removed.
+ */
+ if (rule) {
+ if (rc < 0)
+ rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+ else
+ rule->filter_id = rc;
+ arfs_id = rule->arfs_id;
+ }
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
+ if (rc >= 0) {
+ /* Remember this so we can check whether to expire the filter
+ * later.
+ */
+ mutex_lock(&efx->rps_mutex);
+ if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
+ channel->rfs_filter_count++;
+ channel->rps_flow_id[rc] = req->flow_id;
+ mutex_unlock(&efx->rps_mutex);
+
+ if (req->spec.ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_succeeded++;
+ } else {
+ if (req->spec.ether_type == htons(ETH_P_IP))
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ else
+ netif_dbg(efx, rx_status, efx->net_dev,
+ "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc, arfs_id);
+ channel->n_rfs_failed++;
+ /* We're overloading the NIC's filter tables, so let's do a
+ * chunk of extra expiry work.
+ */
+ __efx_siena_filter_rfs_expire(channel,
+ min(channel->rfs_filter_count,
+ 100u));
+ }
+
+ /* Release references */
+ clear_bit(slot_idx, &efx->rps_slot_map);
+ dev_put(req->net_dev);
+}
+
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_async_filter_insertion *req;
+ struct efx_arfs_rule *rule;
+ struct flow_keys fk;
+ int slot_idx;
+ bool new;
+ int rc;
+
+ /* find a free slot */
+ for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
+ if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
+ break;
+ if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
+ return -EBUSY;
+
+ if (flow_id == RPS_FLOW_ID_INVALID) {
+ rc = -EINVAL;
+ goto out_clear;
+ }
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+
+ req = efx->rps_slot + slot_idx;
+ efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ req->spec.match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ req->spec.ether_type = fk.basic.n_proto;
+ req->spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ req->spec.rem_host[0] = fk.addrs.v4addrs.src;
+ req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
+ } else {
+ memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+ memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+ }
+
+ req->spec.rem_port = fk.ports.src;
+ req->spec.loc_port = fk.ports.dst;
+
+ if (efx->rps_hash_table) {
+ /* Add it to ARFS hash table */
+ spin_lock(&efx->rps_hash_lock);
+ rule = efx_rps_hash_add(efx, &req->spec, &new);
+ if (!rule) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ if (new)
+ rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+ rc = rule->arfs_id;
+ /* Skip if existing or pending filter already does the right thing */
+ if (!new && rule->rxq_index == rxq_index &&
+ rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+ goto out_unlock;
+ rule->rxq_index = rxq_index;
+ rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+ spin_unlock(&efx->rps_hash_lock);
+ } else {
+ /* Without an ARFS hash table, we just use arfs_id 0 for all
+ * filters. This means if multiple flows hash to the same
+ * flow_id, all but the most recently touched will be eligible
+ * for expiry.
+ */
+ rc = 0;
+ }
+
+ /* Queue the request */
+ dev_hold(req->net_dev = net_dev);
+ INIT_WORK(&req->work, efx_filter_rfs_work);
+ req->rxq_index = rxq_index;
+ req->flow_id = flow_id;
+ schedule_work(&req->work);
+ return rc;
+out_unlock:
+ spin_unlock(&efx->rps_hash_lock);
+out_clear:
+ clear_bit(slot_idx, &efx->rps_slot_map);
+ return rc;
+}
+
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+ unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ struct efx_nic *efx = channel->efx;
+ unsigned int index, size, start;
+ u32 flow_id;
+
+ if (!mutex_trylock(&efx->rps_mutex))
+ return false;
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = channel->rfs_expire_index;
+ start = index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota) {
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID) {
+ quota--;
+ if (expire_one(efx, flow_id, index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [channel %u flow %u]\n",
+ index, channel->channel, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ channel->rfs_filter_count--;
+ }
+ }
+ if (++index == size)
+ index = 0;
+ /* If we were called with a quota that exceeds the total number
+ * of filters in the table (which shouldn't happen, but could
+ * if two callers race), ensure that we don't loop forever -
+ * stop when we've examined every row of the table.
+ */
+ if (index == start)
+ break;
+ }
+
+ channel->rfs_expire_index = index;
+ mutex_unlock(&efx->rps_mutex);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h
new file mode 100644
index 000000000000..6b37f83ecb30
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx_common.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_10G 256
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void efx_siena_rx_slow_fill(struct timer_list *t);
+
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
+
+void efx_siena_rx_config_page_split(struct efx_nic *efx);
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+ bool atomic);
+
+void
+efx_siena_rx_packet_gro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh, __wsum csum);
+
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+ u32 id);
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+ unsigned int filter_idx, bool *force);
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+ unsigned int quota);
+#endif
+
+int efx_siena_probe_filters(struct efx_nic *efx);
+void efx_siena_remove_filters(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c
new file mode 100644
index 000000000000..07715a3d6bea
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/selftest.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "nic.h"
+#include "mcdi_port_common.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ * slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ * tasklet or normal task to be given higher priority than our IRQ
+ * threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ char msg[64];
+} __packed;
+
+/* Loopback test source MAC address */
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char payload_msg[] =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/* Interrupt mode names */
+static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
+static const char *const efx_siena_interrupt_mode_names[] = {
+ [EFX_INT_MODE_MSIX] = "MSI-X",
+ [EFX_INT_MODE_MSI] = "MSI",
+ [EFX_INT_MODE_LEGACY] = "legacy",
+};
+#define INT_MODE(efx) \
+ STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
+
+/**
+ * struct efx_loopback_state - persistent state during a loopback selftest
+ * @flush: Drop all packets in efx_siena_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @offload_csum: Checksums are being offloaded
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct efx_loopback_state {
+ bool flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ bool offload_csum;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct efx_loopback_payload payload;
+};
+
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
+/**************************************************************************
+ *
+ * MII, NVRAM and register tests
+ *
+ **************************************************************************/
+
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ rc = efx_siena_mcdi_phy_test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
+
+ return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->type->test_nvram) {
+ rc = efx->type->test_nvram(efx);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ tests->nvram = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ unsigned long timeout, wait;
+ int cpu;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ rc = efx_siena_irq_test_start(efx);
+ if (rc == -ENOTSUPP) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "direct interrupt testing not supported\n");
+ tests->interrupt = 0;
+ return 0;
+ }
+
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of test interrupt. */
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
+ do {
+ schedule_timeout_uninterruptible(wait);
+ cpu = efx_nic_irq_test_irq_cpu(efx);
+ if (cpu >= 0)
+ goto success;
+ wait *= 2;
+ } while (time_before(jiffies, timeout));
+
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx), cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+ unsigned int read_ptr[EFX_MAX_CHANNELS];
+ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+ unsigned long timeout, wait;
+
+ BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
+
+ efx_for_each_channel(channel, efx) {
+ read_ptr[channel->channel] = channel->eventq_read_ptr;
+ set_bit(channel->channel, &dma_pend);
+ set_bit(channel->channel, &int_pend);
+ efx_siena_event_test_start(channel);
+ }
+
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of interrupts. NAPI processing may or may
+ * not complete in time, but we can cope in any case.
+ */
+ do {
+ schedule_timeout_uninterruptible(wait);
+
+ efx_for_each_channel(channel, efx) {
+ efx_siena_stop_eventq(channel);
+ if (channel->eventq_read_ptr !=
+ read_ptr[channel->channel]) {
+ set_bit(channel->channel, &napi_ran);
+ clear_bit(channel->channel, &dma_pend);
+ clear_bit(channel->channel, &int_pend);
+ } else {
+ if (efx_siena_event_present(channel))
+ clear_bit(channel->channel, &dma_pend);
+ if (efx_nic_event_test_irq_cpu(channel) >= 0)
+ clear_bit(channel->channel, &int_pend);
+ }
+ efx_siena_start_eventq(channel);
+ }
+
+ wait *= 2;
+ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
+
+ efx_for_each_channel(channel, efx) {
+ bool dma_seen = !test_bit(channel->channel, &dma_pend);
+ bool int_seen = !test_bit(channel->channel, &int_pend);
+
+ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+ if (dma_seen && int_seen) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "channel %d event queue passed (with%s NAPI)\n",
+ channel->channel,
+ test_bit(channel->channel, &napi_ran) ?
+ "" : "out");
+ } else {
+ /* Report failure and whether either interrupt or DMA
+ * worked
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
+ if (int_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt "
+ "during event queue test\n",
+ channel->channel);
+ if (dma_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n",
+ channel->channel);
+ }
+ }
+
+ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
+}
+
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags)
+{
+ int rc;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
+ mutex_unlock(&efx->mac_lock);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ netif_info(efx, drv, efx->net_dev,
+ "%s phy selftest\n", rc ? "Failed" : "Passed");
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_siena_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *received;
+ struct efx_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct efx_loopback_payload *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ if (state->offload_csum)
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_siena_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = (__force __sum16) htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i;
+ netdev_tx_t rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = skb_put(skb, sizeof(state->payload));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ netif_tx_lock_bh(efx->net_dev);
+ rc = efx_enqueue_skb(tx_queue, skb);
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->label,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+
+ return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i = 0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb(skb);
+ }
+
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->label, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->label, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->label] += state->packet_count;
+ lb_tests->tx_done[tx_queue->label] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ int i, begin_rc, end_rc;
+
+ for (i = 0; i < 3; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = efx->txq_entries / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
+ if (!state->skbs)
+ return -ENOMEM;
+ state->flush = false;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d (hw %d) testing %s loopback with %d packets\n",
+ tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ efx_iterate_state(efx);
+ begin_rc = efx_begin_loopback(tx_queue);
+
+ /* This will normally complete very quickly, but be
+ * prepared to wait much longer. */
+ msleep(1);
+ if (!efx_poll_loopback(efx)) {
+ msleep(LOOPBACK_TIMEOUT_MS);
+ efx_poll_loopback(efx);
+ }
+
+ end_rc = efx_end_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (begin_rc || end_rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return begin_rc ? begin_rc : end_rc;
+ }
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return 0;
+}
+
+/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
+ * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int efx_wait_for_link(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+ int count, link_up_count = 0;
+ bool link_up;
+
+ for (count = 0; count < 40; count++) {
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ if (efx->type->monitor != NULL) {
+ mutex_lock(&efx->mac_lock);
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ mutex_lock(&efx->mac_lock);
+ link_up = link_state->up;
+ if (link_up)
+ link_up = !efx->type->check_mac_fault(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ enum efx_loopback_mode mode;
+ struct efx_loopback_state *state;
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+ BUG_ON(efx->loopback_selftest);
+ state->flush = true;
+ efx->loopback_selftest = state;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = true;
+ mutex_lock(&efx->mac_lock);
+ efx->loopback_mode = mode;
+ rc = __efx_siena_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ rc = efx_wait_for_link(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ /* Test all enabled types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ state->offload_csum = (tx_queue->type &
+ EFX_TXQ_TYPE_OUTER_CSUM);
+ rc = efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Remove the flush. The caller will remove the loopback setting */
+ state->flush = true;
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ if (rc == -EPERM)
+ rc = 0;
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry point
+ *
+ *************************************************************************/
+
+int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned int flags)
+{
+ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+ int rc_test = 0, rc_reset, rc;
+
+ efx_siena_selftest_async_cancel(efx);
+
+ /* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+
+ rc = efx_test_phy_alive(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_nvram(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_interrupts(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_eventq_irq(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ if (rc_test)
+ return rc_test;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return efx_test_phy(efx, tests, flags);
+
+ /* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+ efx_device_detach_sync(efx);
+
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
+
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
+
+ /* Ensure that the phy is powered and out of loopback
+ * for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+ efx->loopback_mode = LOOPBACK_NONE;
+ __efx_siena_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ rc = efx_test_phy(efx, tests, flags);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ /* restore the PHY to the previous state */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode = phy_mode;
+ efx->loopback_mode = loopback_mode;
+ __efx_siena_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_device_attach_if_not_resetting(efx);
+
+ return rc_test;
+}
+
+void efx_siena_selftest_async_start(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_siena_event_test_start(channel);
+ schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void efx_siena_selftest_async_cancel(struct efx_nic *efx)
+{
+ cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+static void efx_siena_selftest_async_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ selftest_work.work);
+ struct efx_channel *channel;
+ int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = efx_nic_event_test_irq_cpu(channel);
+ if (cpu < 0)
+ netif_err(efx, ifup, efx->net_dev,
+ "channel %d failed to trigger an interrupt\n",
+ channel->channel);
+ else
+ netif_dbg(efx, ifup, efx->net_dev,
+ "channel %d triggered interrupt on CPU %d\n",
+ channel->channel, cpu);
+ }
+}
+
+void efx_siena_selftest_async_init(struct efx_nic *efx)
+{
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work);
+}
diff --git a/drivers/net/ethernet/sfc/siena/selftest.h b/drivers/net/ethernet/sfc/siena/selftest.h
new file mode 100644
index 000000000000..6af6e7fbfcee
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/selftest.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+ int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
+ int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
+ int rx_good;
+ int rx_bad;
+};
+
+#define EFX_MAX_PHY_TESTS 20
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure; 0 indicates test could not be run.
+ */
+struct efx_self_tests {
+ /* online tests */
+ int phy_alive;
+ int nvram;
+ int interrupt;
+ int eventq_dma[EFX_MAX_CHANNELS];
+ int eventq_int[EFX_MAX_CHANNELS];
+ /* offline tests */
+ int memory;
+ int registers;
+ int phy_ext[EFX_MAX_PHY_TESTS];
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
+};
+
+void efx_siena_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned int flags);
+void efx_siena_selftest_async_init(struct efx_nic *efx);
+void efx_siena_selftest_async_start(struct efx_nic *efx);
+void efx_siena_selftest_async_cancel(struct efx_nic *efx);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index 16347a6d0c47..a44c8fa25748 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -25,6 +25,7 @@
#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
+#include "rx_common.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -39,7 +40,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
if (channel->irq_moderation_us) {
unsigned int ticks;
- ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
+ ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
@@ -55,16 +56,16 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-void siena_prepare_flush(struct efx_nic *efx)
+void efx_siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
- efx_mcdi_set_mac(efx);
+ efx_siena_mcdi_set_mac(efx);
}
void siena_finish_flush(struct efx_nic *efx)
{
if (--efx->fc_disable == 0)
- efx_mcdi_set_mac(efx);
+ efx_siena_mcdi_set_mac(efx);
}
static const struct efx_farch_register_test siena_register_tests[] = {
@@ -101,12 +102,12 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2;
- efx_reset_down(efx, reset_method);
+ efx_siena_reset_down(efx, reset_method);
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
- rc = efx_mcdi_reset(efx, reset_method);
+ rc = efx_siena_mcdi_reset(efx, reset_method);
if (rc)
goto out;
@@ -115,9 +116,9 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
ARRAY_SIZE(siena_register_tests))
? -1 : 1;
- rc = efx_mcdi_reset(efx, reset_method);
+ rc = efx_siena_mcdi_reset(efx, reset_method);
out:
- rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ rc2 = efx_siena_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
}
@@ -142,27 +143,28 @@ static int siena_ptp_set_ts_config(struct efx_nic *efx,
switch (init->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* if TX timestamping is still requested then leave PTP on */
- return efx_ptp_change_mode(efx,
- init->tx_type != HWTSTAMP_TX_OFF,
- efx_ptp_get_mode(efx));
+ return efx_siena_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_siena_ptp_get_mode(efx));
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ return efx_siena_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- rc = efx_ptp_change_mode(efx, true,
- MC_CMD_PTP_MODE_V2_ENHANCED);
+ rc = efx_siena_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
/* bug 33070 - old versions of the firmware do not support the
* improved UUID filtering option. Similarly old versions of the
* application do not expect it to be enabled. If the firmware
* does not accept the enhanced mode, fall back to the standard
* PTP v2 UUID filtering. */
if (rc != 0)
- rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ rc = efx_siena_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2);
return rc;
default:
return -ERANGE;
@@ -221,7 +223,8 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
u32 caps = 0;
int rc;
- rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
+ rc = efx_siena_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL,
+ &caps);
efx->timer_quantum_ns =
(caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
@@ -284,12 +287,12 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- rc = efx_mcdi_init(efx);
+ rc = efx_siena_mcdi_init(efx);
if (rc)
goto fail1;
/* Now we can reset the NIC */
- rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ rc = efx_siena_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -298,8 +301,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx);
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
- GFP_KERNEL);
+ rc = efx_siena_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -321,23 +324,23 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
- rc = efx_mcdi_mon_probe(efx);
+ rc = efx_siena_mcdi_mon_probe(efx);
if (rc)
goto fail5;
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
efx_siena_sriov_probe(efx);
#endif
- efx_ptp_defer_probe_with_channel(efx);
+ efx_siena_ptp_defer_probe_with_channel(efx);
return 0;
fail5:
- efx_nic_free_buffer(efx, &efx->irq_status);
+ efx_siena_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
- efx_mcdi_detach(efx);
- efx_mcdi_fini(efx);
+ efx_siena_mcdi_detach(efx);
+ efx_siena_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
@@ -404,7 +407,7 @@ static int siena_init_nic(struct efx_nic *efx)
int rc;
/* Recover from a failed assertion post-reset */
- rc = efx_mcdi_handle_assertion(efx);
+ rc = efx_siena_mcdi_handle_assertion(efx);
if (rc)
return rc;
@@ -438,7 +441,7 @@ static int siena_init_nic(struct efx_nic *efx)
efx->rss_context.context_id = 0; /* indicates RSS is active */
/* Enable event logging */
- rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ rc = efx_siena_mcdi_log_ctrl(efx, true, false, 0);
if (rc)
return rc;
@@ -455,14 +458,14 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx)
{
- efx_mcdi_mon_remove(efx);
+ efx_siena_mcdi_mon_remove(efx);
- efx_nic_free_buffer(efx, &efx->irq_status);
+ efx_siena_free_buffer(efx, &efx->irq_status);
- efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ efx_siena_mcdi_reset(efx, RESET_TYPE_ALL);
- efx_mcdi_detach(efx);
- efx_mcdi_fini(efx);
+ efx_siena_mcdi_detach(efx);
+ efx_siena_mcdi_fini(efx);
/* Tear down the private nic state */
kfree(efx->nic_data);
@@ -544,8 +547,8 @@ static const unsigned long siena_stat_mask[] = {
static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
{
- return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
- siena_stat_mask, names);
+ return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
}
static int siena_try_update_nic_stats(struct efx_nic *efx)
@@ -561,16 +564,16 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
- efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
- stats, efx->stats_buffer.addr, false);
+ efx_siena_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx,
- &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
+ efx_siena_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -582,7 +585,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
stats[SIENA_STAT_rx_bytes] -
stats[SIENA_STAT_rx_bad_bytes]);
- efx_update_sw_stats(efx, stats);
+ efx_siena_update_sw_stats(efx, stats);
return 0;
}
@@ -647,14 +650,14 @@ static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unu
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- rc = efx_mcdi_set_mac(efx);
+ rc = efx_siena_mcdi_set_mac(efx);
if (rc != 0)
return rc;
memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
efx->multicast_hash.byte, sizeof(efx->multicast_hash));
- return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- inbuf, sizeof(inbuf), NULL, 0, NULL);
+ return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
}
/**************************************************************************
@@ -687,16 +690,17 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
if (type & WAKE_MAGIC) {
if (nic_data->wol_filter_id != -1)
- efx_mcdi_wol_filter_remove(efx,
- nic_data->wol_filter_id);
- rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
- &nic_data->wol_filter_id);
+ efx_siena_mcdi_wol_filter_remove(efx,
+ nic_data->wol_filter_id);
+ rc = efx_siena_mcdi_wol_filter_set_magic(efx,
+ efx->net_dev->dev_addr,
+ &nic_data->wol_filter_id);
if (rc)
goto fail;
pci_wake_from_d3(efx->pci_dev, true);
} else {
- rc = efx_mcdi_wol_filter_reset(efx);
+ rc = efx_siena_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
pci_wake_from_d3(efx->pci_dev, false);
if (rc)
@@ -716,12 +720,12 @@ static void siena_init_wol(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
int rc;
- rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
+ rc = efx_siena_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
if (rc != 0) {
/* If it failed, attempt to get into a synchronised
* state with MC by resetting any set WoL filters */
- efx_mcdi_wol_filter_reset(efx);
+ efx_siena_mcdi_wol_filter_reset(efx);
nic_data->wol_filter_id = -1;
} else if (nic_data->wol_filter_id != -1) {
pci_wake_from_d3(efx->pci_dev, true);
@@ -826,7 +830,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx)
**************************************************************************
*/
-#ifdef CONFIG_SFC_MTD
+#ifdef CONFIG_SFC_SIENA_MTD
struct siena_nvram_type_info {
int port;
@@ -867,7 +871,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_siena_mcdi_nvram_info(efx, type, &size, &erase_size,
+ &protected);
if (rc)
return rc;
if (protected)
@@ -894,7 +899,7 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
size_t i;
int rc;
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ rc = efx_siena_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
if (rc)
return rc;
@@ -914,7 +919,7 @@ static int siena_mtd_probe(struct efx_nic *efx)
ASSERT_RTNL();
- rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ rc = efx_siena_mcdi_nvram_types(efx, &nvram_types);
if (rc)
return rc;
@@ -942,14 +947,14 @@ static int siena_mtd_probe(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
kfree(parts);
return rc;
}
-#endif /* CONFIG_SFC_MTD */
+#endif /* CONFIG_SFC_SIENA_MTD */
static unsigned int siena_check_caps(const struct efx_nic *efx,
u8 flag, u32 offset)
@@ -958,6 +963,12 @@ static unsigned int siena_check_caps(const struct efx_nic *efx,
return 0;
}
+static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed is 10G */
+ return EFX_RECYCLE_RING_SIZE_10G;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -973,36 +984,36 @@ const struct efx_nic_type siena_a0_nic_type = {
.remove = siena_remove_nic,
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = efx_siena_port_dummy_op_void,
#ifdef CONFIG_EEH
.monitor = siena_monitor,
#else
.monitor = NULL,
#endif
- .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_reason = efx_siena_mcdi_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
- .reset = efx_mcdi_reset,
- .probe_port = efx_mcdi_port_probe,
- .remove_port = efx_mcdi_port_remove,
+ .reset = efx_siena_mcdi_reset,
+ .probe_port = efx_siena_mcdi_port_probe,
+ .remove_port = efx_siena_mcdi_port_remove,
.fini_dmaq = efx_farch_fini_dmaq,
- .prepare_flush = siena_prepare_flush,
+ .prepare_flush = efx_siena_prepare_flush,
.finish_flush = siena_finish_flush,
- .prepare_flr = efx_port_dummy_op_void,
+ .prepare_flr = efx_siena_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
+ .start_stats = efx_siena_mcdi_mac_start_stats,
+ .pull_stats = efx_siena_mcdi_mac_pull_stats,
+ .stop_stats = efx_siena_mcdi_mac_stop_stats,
.push_irq_moderation = siena_push_irq_moderation,
.reconfigure_mac = siena_mac_reconfigure,
- .check_mac_fault = efx_mcdi_mac_check_fault,
- .reconfigure_port = efx_mcdi_port_reconfigure,
+ .check_mac_fault = efx_siena_mcdi_mac_check_fault,
+ .reconfigure_port = efx_siena_mcdi_port_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
- .test_nvram = efx_mcdi_nvram_test_all,
+ .test_nvram = efx_siena_mcdi_nvram_test_all,
.mcdi_request = siena_mcdi_request,
.mcdi_poll_response = siena_mcdi_poll_response,
.mcdi_read_response = siena_mcdi_read_response,
@@ -1017,7 +1028,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
- .tx_enqueue = __efx_enqueue_skb,
+ .tx_enqueue = __efx_siena_enqueue_skb,
.rx_push_rss_config = siena_rx_push_rss_config,
.rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
@@ -1025,7 +1036,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
- .rx_packet = __efx_rx_packet,
+ .rx_packet = __efx_siena_rx_packet,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
@@ -1047,17 +1058,17 @@ const struct efx_nic_type siena_a0_nic_type = {
#ifdef CONFIG_RFS_ACCEL
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
-#ifdef CONFIG_SFC_MTD
+#ifdef CONFIG_SFC_SIENA_MTD
.mtd_probe = siena_mtd_probe,
- .mtd_rename = efx_mcdi_mtd_rename,
- .mtd_read = efx_mcdi_mtd_read,
- .mtd_erase = efx_mcdi_mtd_erase,
- .mtd_write = efx_mcdi_mtd_write,
- .mtd_sync = efx_mcdi_mtd_sync,
+ .mtd_rename = efx_siena_mcdi_mtd_rename,
+ .mtd_read = efx_siena_mcdi_mtd_read,
+ .mtd_erase = efx_siena_mcdi_mtd_erase,
+ .mtd_write = efx_siena_mcdi_mtd_write,
+ .mtd_sync = efx_siena_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
.sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
@@ -1068,9 +1079,9 @@ const struct efx_nic_type siena_a0_nic_type = {
.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
.sriov_get_vf_config = efx_siena_sriov_get_vf_config,
- .vswitching_probe = efx_port_dummy_op_int,
- .vswitching_restore = efx_port_dummy_op_int,
- .vswitching_remove = efx_port_dummy_op_void,
+ .vswitching_probe = efx_siena_port_dummy_op_int,
+ .vswitching_restore = efx_siena_port_dummy_op_int,
+ .vswitching_remove = efx_siena_port_dummy_op_void,
.set_mac_address = efx_siena_sriov_mac_address_changed,
#endif
@@ -1097,5 +1108,6 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
.check_caps = siena_check_caps,
- .sensor_event = efx_mcdi_sensor_event,
+ .sensor_event = efx_siena_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_siena_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c
index f12851a527d9..8353c15dc233 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c
@@ -206,8 +206,9 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
- outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf,
+ MC_CMD_SRIOV_IN_LEN, outbuf,
+ MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -288,7 +289,7 @@ static int efx_siena_sriov_memcpy(struct efx_nic *efx,
++req;
}
- rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
mb(); /* Don't write source/read dest before DMA is complete */
@@ -689,7 +690,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
rtnl_lock();
- siena_prepare_flush(efx);
+ efx_siena_prepare_flush(efx);
rtnl_unlock();
/* Flush all the initialized queues */
@@ -712,7 +713,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
NULL, 0, NULL);
WARN_ON(rc < 0);
@@ -1011,9 +1012,9 @@ static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
- if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
+ if (!efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_siena_sriov_reset_vf(vf, &buf);
- efx_nic_free_buffer(efx, &buf);
+ efx_siena_free_buffer(efx, &buf);
}
}
@@ -1043,7 +1044,7 @@ efx_siena_sriov_get_channel_name(struct efx_channel *channel,
static const struct efx_channel_type efx_siena_sriov_channel_type = {
.handle_no_channel = efx_siena_sriov_handle_no_channel,
.pre_probe = efx_siena_sriov_probe_channel,
- .post_remove = efx_channel_dummy_op_void,
+ .post_remove = efx_siena_channel_dummy_op_void,
.get_name = efx_siena_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
@@ -1228,7 +1229,7 @@ static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = nic_data->vf + pos;
- efx_nic_free_buffer(efx, &vf->buf);
+ efx_siena_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
vf->peer_page_addrs = NULL;
vf->peer_page_count = 0;
@@ -1268,8 +1269,8 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
- GFP_KERNEL);
+ rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
if (rc)
goto fail;
@@ -1302,8 +1303,8 @@ int efx_siena_sriov_init(struct efx_nic *efx)
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
- sizeof(*vfdi_status), GFP_KERNEL);
+ rc = efx_siena_alloc_buffer(efx, &nic_data->vfdi_status,
+ sizeof(*vfdi_status), GFP_KERNEL);
if (rc)
goto fail_status;
vfdi_status = nic_data->vfdi_status.addr;
@@ -1358,7 +1359,7 @@ fail_vfs:
efx_siena_sriov_free_local(efx);
kfree(nic_data->vf);
fail_alloc:
- efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+ efx_siena_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
efx_siena_sriov_cmd(efx, false, NULL, NULL);
fail_cmd:
@@ -1395,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
kfree(nic_data->vf);
- efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+ efx_siena_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
@@ -1563,7 +1564,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
efx_siena_sriov_usrev(efx, true);
(void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
- if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
+ if (efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
@@ -1571,7 +1572,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
efx_siena_sriov_reset_vf(vf, &buf);
}
- efx_nic_free_buffer(efx, &buf);
+ efx_siena_free_buffer(efx, &buf);
}
int efx_init_sriov(void)
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena/siena_sriov.h
index e548c4daf189..50f6e924495e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.h
+++ b/drivers/net/ethernet/sfc/siena/siena_sriov.h
@@ -54,18 +54,21 @@ int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
struct ifla_vf_info *ivf);
-#ifdef CONFIG_SFC_SRIOV
+#ifdef CONFIG_SFC_SIENA_SRIOV
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
-#else /* !CONFIG_SFC_SRIOV */
+
+int efx_init_sriov(void);
+void efx_fini_sriov(void);
+#else /* !CONFIG_SFC_SIENA_SRIOV */
static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return false;
}
-#endif /* CONFIG_SFC_SRIOV */
+#endif /* CONFIG_SFC_SIENA_SRIOV */
void efx_siena_sriov_probe(struct efx_nic *efx);
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h
new file mode 100644
index 000000000000..a6981bad7621
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/sriov.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SIENA_SRIOV
+
+static inline
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_mac)
+ return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+ else
+ return -EOPNOTSUPP;
+}
+
+static inline
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos, __be16 vlan_proto)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_vlan) {
+ if ((vlan & ~VLAN_VID_MASK) ||
+ (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_spoofchk)
+ return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+ else
+ return -EOPNOTSUPP;
+}
+
+static inline
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_vf_config)
+ return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+ else
+ return -EOPNOTSUPP;
+}
+
+static inline
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_link_state)
+ return efx->type->sriov_set_vf_link_state(efx, vf_i,
+ link_state);
+ else
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_SFC_SIENA_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
new file mode 100644
index 000000000000..91e87594ed1e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/cache.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "io.h"
+#include "nic.h"
+#include "tx.h"
+#include "tx_common.h"
+#include "workarounds.h"
+
+static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
+ struct efx_buffer *page_buf =
+ &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
+ unsigned int offset =
+ ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
+
+ if (unlikely(!page_buf->addr) &&
+ efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
+ return NULL;
+ buffer->dma_addr = page_buf->dma_addr + offset;
+ buffer->unmap_len = 0;
+ return (u8 *)page_buf->addr + offset;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+ /* We need to consider all queues that the net core sees as one */
+ struct efx_nic *efx = txq1->efx;
+ struct efx_tx_queue *txq2;
+ unsigned int fill_level;
+
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
+ if (likely(fill_level < efx->txq_stop_thresh))
+ return;
+
+ /* We used the stale old_read_count above, which gives us a
+ * pessimistic estimate of the fill level (which may even
+ * validly be >= efx->txq_entries). Now try again using
+ * read_count (more likely to be a cache miss).
+ *
+ * If we read read_count and then conditionally stop the
+ * queue, it is possible for the completion path to race with
+ * us and complete all outstanding descriptors in the middle,
+ * after which there will be no more completions to wake it.
+ * Therefore we stop the queue first, then read read_count
+ * (with a memory barrier to ensure the ordering), then
+ * restart the queue if the fill level turns out to be low
+ * enough.
+ */
+ netif_tx_stop_queue(txq1->core_txq);
+ smp_mb();
+ efx_for_each_channel_tx_queue(txq2, txq1->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
+ if (likely(fill_level < efx->txq_stop_thresh)) {
+ smp_mb();
+ if (likely(!efx->loopback_selftest))
+ netif_tx_start_queue(txq1->core_txq);
+ }
+}
+
+static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ unsigned int copy_len = skb->len;
+ struct efx_tx_buffer *buffer;
+ u8 *copy_buffer;
+ int rc;
+
+ EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
+
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
+ if (unlikely(!copy_buffer))
+ return -ENOMEM;
+
+ rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
+ EFX_WARN_ON_PARANOID(rc);
+ buffer->len = copy_len;
+
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB;
+
+ ++tx_queue->insert_count;
+ return rc;
+}
+
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+ struct efx_tx_queue *q;
+
+ efx_for_each_channel_tx_queue(q, channel) {
+ if (q->xmit_pending)
+ efx_nic_push_buffers(q);
+ }
+}
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue. The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from efx_siena_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK.
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ unsigned int old_insert_count = tx_queue->insert_count;
+ bool xmit_more = netdev_xmit_more();
+ bool data_mapped = false;
+ unsigned int segments;
+ unsigned int skb_len;
+ int rc;
+
+ skb_len = skb->len;
+ segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
+ if (segments == 1)
+ segments = 0; /* Don't use TSO for a single segment. */
+
+ /* Handle TSO first - it's *possible* (although unlikely) that we might
+ * be passed a packet to segment that's smaller than the copybreak/PIO
+ * size limit.
+ */
+ if (segments) {
+ rc = efx_siena_tx_tso_fallback(tx_queue, skb);
+ tx_queue->tso_fallbacks++;
+ if (rc == 0)
+ return 0;
+ goto err;
+ } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
+ /* Pad short packets or coalesce short fragmented packets. */
+ if (efx_enqueue_skb_copy(tx_queue, skb))
+ goto err;
+ tx_queue->cb_packets++;
+ data_mapped = true;
+ }
+
+ /* Map for DMA and create descriptors if we haven't done so already. */
+ if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
+ goto err;
+
+ efx_tx_maybe_stop_queue(tx_queue);
+
+ tx_queue->xmit_pending = true;
+
+ /* Pass off to hardware */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+ efx_tx_send_pending(tx_queue->channel);
+
+ tx_queue->tx_packets++;
+ return NETDEV_TX_OK;
+
+
+err:
+ efx_siena_enqueue_unwind(tx_queue, old_insert_count);
+ dev_kfree_skb_any(skb);
+
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!xmit_more)
+ efx_tx_send_pending(tx_queue->channel);
+
+ return NETDEV_TX_OK;
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush)
+{
+ struct efx_tx_buffer *tx_buffer;
+ struct efx_tx_queue *tx_queue;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ int space;
+ int cpu;
+ int i = 0;
+
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+ if (unlikely(!n))
+ return 0;
+
+ cpu = raw_smp_processor_id();
+ if (unlikely(cpu >= efx->xdp_tx_queue_count))
+ return -EINVAL;
+
+ tx_queue = efx->xdp_tx_queues[cpu];
+ if (unlikely(!tx_queue))
+ return -EINVAL;
+
+ if (!tx_queue->initialised)
+ return -EINVAL;
+
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
+
+ /* If we're borrowing net stack queues we have to handle stop-restart
+ * or we might block the queue and it will be considered as frozen
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ if (netif_tx_queue_stopped(tx_queue->core_txq))
+ goto unlock;
+ efx_tx_maybe_stop_queue(tx_queue);
+ }
+
+ /* Check for available space. We should never need multiple
+ * descriptors per frame.
+ */
+ space = efx->txq_entries +
+ tx_queue->read_count - tx_queue->insert_count;
+
+ for (i = 0; i < n; i++) {
+ xdpf = xdpfs[i];
+
+ if (i >= space)
+ break;
+
+ /* We'll want a descriptor for this tx. */
+ prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+ len = xdpf->len;
+
+ /* Map for DMA. */
+ dma_addr = dma_map_single(&efx->pci_dev->dev,
+ xdpf->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+ break;
+
+ /* Create descriptor and set up for unmapping DMA. */
+ tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
+ tx_buffer->xdpf = xdpf;
+ tx_buffer->flags = EFX_TX_BUF_XDP |
+ EFX_TX_BUF_MAP_SINGLE;
+ tx_buffer->dma_offset = 0;
+ tx_buffer->unmap_len = len;
+ tx_queue->tx_packets++;
+ }
+
+ /* Pass mapped frames to hardware. */
+ if (flush && i > 0)
+ efx_nic_push_buffers(tx_queue);
+
+unlock:
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
+ return i == 0 ? -EIO : i;
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels).
+ *
+ * Context: non-blocking.
+ * Should always return NETDEV_TX_OK and consume the skb.
+ */
+netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ unsigned index, type;
+
+ EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+
+ index = skb_get_queue_mapping(skb);
+ type = efx_tx_csum_type_skb(skb);
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+
+ /* PTP "event" packet */
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+ ((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
+ unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) {
+ /* There may be existing transmits on the channel that are
+ * waiting for this packet to trigger the doorbell write.
+ * We need to send the packets at this point.
+ */
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return efx_siena_ptp_tx(efx, skb);
+ }
+
+ tx_queue = efx_get_tx_queue(efx, index, type);
+ if (WARN_ON_ONCE(!tx_queue)) {
+ /* We don't have a TXQ of the right type.
+ * This should never happen, as we don't advertise offload
+ * features unless we can support them.
+ */
+ dev_kfree_skb_any(skb);
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return NETDEV_TX_OK;
+ }
+
+ return __efx_siena_enqueue_skb(tx_queue, skb);
+}
+
+void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->channel->channel +
+ ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ unsigned tc, num_tc;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ /* Only Siena supported highpri queues */
+ if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
+ return -EOPNOTSUPP;
+
+ num_tc = mqprio->num_tc;
+
+ if (num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ net_dev->num_tc = num_tc;
+
+ return netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+}
diff --git a/drivers/net/ethernet/sfc/siena/tx.h b/drivers/net/ethernet/sfc/siena/tx.h
new file mode 100644
index 000000000000..ee801950c909
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2015 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_TX_H
+#define EFX_TX_H
+
+#include <linux/types.h>
+
+/* Driver internal tx-path related declarations. */
+/* What TXQ type will satisfy the checksum offloads required for this skb? */
+static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0; /* no checksum offload */
+
+ if (skb->encapsulation &&
+ skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
+ /* we only advertise features for IPv4 and IPv6 checksums on
+ * encapsulated packets, so if the checksum is for the inner
+ * packet, it must be one of them; no further checking required.
+ */
+
+ /* Do we also need to offload the outer header checksum? */
+ if (skb_shinfo(skb)->gso_segs > 1 &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
+ return EFX_TXQ_TYPE_INNER_CSUM;
+ }
+
+ /* similarly, we only advertise features for IPv4 and IPv6 checksums,
+ * so it must be one of them. No need for further checks.
+ */
+ return EFX_TXQ_TYPE_OUTER_CSUM;
+}
+#endif /* EFX_TX_H */
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
new file mode 100644
index 000000000000..93a32d61944f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic_common.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+ PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring, determine TXQ type */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->notify_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_pending = false;
+ tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_siena_ptp_channel(efx));
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+ tx_queue->tso_version = 0;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+ efx_siena_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
+}
+
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_siena_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_pending = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!efx_tx_buffer_in_use(buffer)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %d\n",
+ tx_queue->queue, read_ptr);
+ efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ /* Ensure that read_count is flushed. */
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ struct efx_nic *efx = tx_queue->efx;
+
+ EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ efx_siena_xmit_done_check_empty(tx_queue);
+}
+
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
+ */
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
+{
+ struct efx_tx_buffer *buffer;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != insert_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ }
+}
+
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ if (nic_type->tx_limit_len)
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+ else
+ dma_len = len;
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+static int efx_tx_tso_header_length(struct sk_buff *skb)
+{
+ size_t header_len;
+
+ if (skb->encapsulation)
+ header_len = skb_inner_transport_header(skb) -
+ skb->data +
+ (inner_tcp_hdr(skb)->doff << 2u);
+ else
+ header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+ return header_len;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = efx_tx_tso_header_length(skb);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
+ }
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
+
+/*
+ * Fallback to software TSO.
+ *
+ * This is used if we are unable to send a GSO packet through hardware TSO.
+ * This should only ever happen due to per-queue restrictions - unsupported
+ * packets should first be filtered by the feature flags.
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ struct sk_buff *segments, *next;
+
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments))
+ return PTR_ERR(segments);
+
+ dev_consume_skb_any(skb);
+
+ skb_list_walk_safe(segments, skb, next) {
+ skb_mark_not_on_list(skb);
+ efx_enqueue_skb(tx_queue, skb);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h
new file mode 100644
index 000000000000..31ca52a25015
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx_common.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+ return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
+void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count);
+
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
+
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+
+extern bool efx_siena_separate_tx_channels;
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/vfdi.h b/drivers/net/ethernet/sfc/siena/vfdi.h
new file mode 100644
index 000000000000..480b872eb4d1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/vfdi.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
+ */
+#ifndef _VFDI_H
+#define _VFDI_H
+
+/**
+ * DOC: Virtual Function Driver Interface
+ *
+ * This file contains software structures used to form a two way
+ * communication channel between the VF driver and the PF driver,
+ * named Virtual Function Driver Interface (VFDI).
+ *
+ * For the purposes of VFDI, a page is a memory region with size and
+ * alignment of 4K. All addresses are DMA addresses to be used within
+ * the domain of the relevant VF.
+ *
+ * The only hardware-defined channels for a VF driver to communicate
+ * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
+ * registers). Writing to these registers generates an event with
+ * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
+ * and USER_EV_REG_VALUE set to the value written. The PF driver may
+ * direct or disable delivery of these events by setting
+ * %FR_CZ_USR_EV_CFG.
+ *
+ * The PF driver can send arbitrary events to arbitrary event queues.
+ * However, for consistency, VFDI events from the PF are defined to
+ * follow the same form and be sent to the first event queue assigned
+ * to the VF while that queue is enabled by the VF driver.
+ *
+ * The general form of the variable bits of VFDI events is:
+ *
+ * 0 16 24 31
+ * | DATA | TYPE | SEQ |
+ *
+ * SEQ is a sequence number which should be incremented by 1 (modulo
+ * 256) for each event. The sequence numbers used in each direction
+ * are independent.
+ *
+ * The VF submits requests of type &struct vfdi_req by sending the
+ * address of the request (ADDR) in a series of 4 events:
+ *
+ * 0 16 24 31
+ * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
+ * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
+ * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
+ * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
+ *
+ * The address must be page-aligned. After receiving such a valid
+ * series of events, the PF driver will attempt to read the request
+ * and write a response to the same address. In case of an invalid
+ * sequence of events or a DMA error, there will be no response.
+ *
+ * The VF driver may request that the PF driver writes status
+ * information into its domain asynchronously. After writing the
+ * status, the PF driver will send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
+ *
+ * In case the VF must be reset for any reason, the PF driver will
+ * send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_RESET | SEQ |
+ *
+ * It is then the responsibility of the VF driver to request
+ * reinitialisation of its queues.
+ */
+#define VFDI_EV_SEQ_LBN 24
+#define VFDI_EV_SEQ_WIDTH 8
+#define VFDI_EV_TYPE_LBN 16
+#define VFDI_EV_TYPE_WIDTH 8
+#define VFDI_EV_TYPE_REQ_WORD0 0
+#define VFDI_EV_TYPE_REQ_WORD1 1
+#define VFDI_EV_TYPE_REQ_WORD2 2
+#define VFDI_EV_TYPE_REQ_WORD3 3
+#define VFDI_EV_TYPE_STATUS 4
+#define VFDI_EV_TYPE_RESET 5
+#define VFDI_EV_DATA_LBN 0
+#define VFDI_EV_DATA_WIDTH 16
+
+struct vfdi_endpoint {
+ u8 mac_addr[ETH_ALEN];
+ __be16 tci;
+};
+
+/**
+ * enum vfdi_op - VFDI operation enumeration
+ * @VFDI_OP_RESPONSE: Indicates a response to the request.
+ * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
+ * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
+ * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
+ * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
+ * finalize the SRAM entries.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
+ * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
+ * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
+ * from PF and write the initial status.
+ * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
+ * updates from PF.
+ */
+enum vfdi_op {
+ VFDI_OP_RESPONSE = 0,
+ VFDI_OP_INIT_EVQ = 1,
+ VFDI_OP_INIT_RXQ = 2,
+ VFDI_OP_INIT_TXQ = 3,
+ VFDI_OP_FINI_ALL_QUEUES = 4,
+ VFDI_OP_INSERT_FILTER = 5,
+ VFDI_OP_REMOVE_ALL_FILTERS = 6,
+ VFDI_OP_SET_STATUS_PAGE = 7,
+ VFDI_OP_CLEAR_STATUS_PAGE = 8,
+ VFDI_OP_LIMIT,
+};
+
+/* Response codes for VFDI operations. Other values may be used in future. */
+#define VFDI_RC_SUCCESS 0
+#define VFDI_RC_ENOMEM (-12)
+#define VFDI_RC_EINVAL (-22)
+#define VFDI_RC_EOPNOTSUPP (-95)
+#define VFDI_RC_ETIMEDOUT (-110)
+
+/**
+ * struct vfdi_req - Request from VF driver to PF driver
+ * @op: Operation code or response indicator, taken from &enum vfdi_op.
+ * @rc: Response code. Set to 0 on success or a negative error code on failure.
+ * @u.init_evq.index: Index of event queue to create.
+ * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
+ * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
+ * address of each page backing the event queue.
+ * @u.init_rxq.index: Index of receive queue to create.
+ * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
+ * @u.init_rxq.evq: Instance of event queue to target receive events at.
+ * @u.init_rxq.label: Label used in receive events.
+ * @u.init_rxq.flags: Unused.
+ * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
+ * address of each page backing the receive queue.
+ * @u.init_txq.index: Index of transmit queue to create.
+ * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
+ * @u.init_txq.evq: Instance of event queue to target transmit completion
+ * events at.
+ * @u.init_txq.label: Label used in transmit completion events.
+ * @u.init_txq.flags: Checksum offload flags.
+ * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
+ * address of each page backing the transmit queue.
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
+ * all traffic at this receive queue.
+ * @u.mac_filter.flags: MAC filter flags.
+ * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
+ * This address must be page-aligned and the PF may write up to a
+ * whole page (allowing for extension of the structure).
+ * @u.set_status_page.peer_page_count: Number of additional pages the VF
+ * has provided into which peer addresses may be DMAd.
+ * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
+ * If the number of peers exceeds 256, then the VF must provide
+ * additional pages in this array. The PF will then DMA up to
+ * 512 vfdi_endpoint structures into each page. These addresses
+ * must be page-aligned.
+ */
+struct vfdi_req {
+ u32 op;
+ u32 reserved1;
+ s32 rc;
+ u32 reserved2;
+ union {
+ struct {
+ u32 index;
+ u32 buf_count;
+ u64 addr[];
+ } init_evq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_RXQ_FLAG_SCATTER_EN 1
+ u32 reserved;
+ u64 addr[];
+ } init_rxq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
+#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
+ u32 reserved;
+ u64 addr[];
+ } init_txq;
+ struct {
+ u32 rxq;
+ u32 flags;
+#define VFDI_MAC_FILTER_FLAG_RSS 1
+#define VFDI_MAC_FILTER_FLAG_SCATTER 2
+ } mac_filter;
+ struct {
+ u64 dma_addr;
+ u64 peer_page_count;
+ u64 peer_page_addr[];
+ } set_status_page;
+ } u;
+};
+
+/**
+ * struct vfdi_status - Status provided by PF driver to VF driver
+ * @generation_start: A generation count DMA'd to VF *before* the
+ * rest of the structure.
+ * @generation_end: A generation count DMA'd to VF *after* the
+ * rest of the structure.
+ * @version: Version of this structure; currently set to 1. Later
+ * versions must either be layout-compatible or only be sent to VFs
+ * that specifically request them.
+ * @length: Total length of this structure including embedded tables
+ * @vi_scale: log2 the number of VIs available on this VF. This quantity
+ * is used by the hardware for register decoding.
+ * @max_tx_channels: The maximum number of transmit queues the VF can use.
+ * @rss_rxq_count: The number of receive queues present in the shared RSS
+ * indirection table.
+ * @peer_count: Total number of peers in the complete peer list. If larger
+ * than ARRAY_SIZE(%peers), then the VF must provide sufficient
+ * additional pages each of which is filled with vfdi_endpoint structures.
+ * @local: The MAC address and outer VLAN tag of *this* VF
+ * @peers: Table of peer addresses. The @tci fields in these structures
+ * are currently unused and must be ignored. Additional peers are
+ * written into any additional pages provided by the VF.
+ * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
+ * for interrupt moderation timers, in nanoseconds. This member is only
+ * present if @length is sufficiently large.
+ */
+struct vfdi_status {
+ u32 generation_start;
+ u32 generation_end;
+ u32 version;
+ u32 length;
+ u8 vi_scale;
+ u8 max_tx_channels;
+ u8 rss_rxq_count;
+ u8 reserved1;
+ u16 peer_count;
+ u16 reserved2;
+ struct vfdi_endpoint local;
+ struct vfdi_endpoint peers[256];
+
+ /* Members below here extend version 1 of this structure */
+ u32 timer_quantum_ns;
+};
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/workarounds.h b/drivers/net/ethernet/sfc/siena/workarounds.h
new file mode 100644
index 000000000000..42fb143a94ab
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/workarounds.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+#define EFX_WORKAROUND_10G(efx) 1
+
+/* Bit-bashed I2C reads cause performance drop */
+#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+
+/* Moderation timer access must go through MCDI */
+#define EFX_EF10_WORKAROUND_61265(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
+
+#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 3f241e6c881a..fc9f0189f285 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -10,7 +10,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
@@ -21,7 +21,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
@@ -40,7 +40,7 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
@@ -51,7 +51,7 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
@@ -62,7 +62,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_link_state)
return efx->type->sriov_set_vf_link_state(efx, vf_i,
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644
index 000000000000..3478860d4023
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <net/pkt_cls.h>
+#include "tc.h"
+#include "tc_bindings.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+#define EFX_EFV_PF NULL
+/* Look up the representor information (efv) for a device.
+ * May return NULL for the PF (us), or an error pointer for a device that
+ * isn't supported as a TC offload endpoint
+ */
+static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
+ struct net_device *dev)
+{
+ struct efx_rep *efv;
+
+ if (!dev)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it us (the PF)? */
+ if (dev == efx->net_dev)
+ return EFX_EFV_PF;
+ /* Is it an efx vfrep at all? */
+ if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it ours? We don't support TC rules that include another
+ * EF100's netdevices (not even on another port of the same NIC).
+ */
+ efv = netdev_priv(dev);
+ if (efv->parent != efx)
+ return ERR_PTR(-EOPNOTSUPP);
+ return efv;
+}
+
+/* Convert a driver-internal vport ID into an external device (wire or VF) */
+static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
+{
+ u32 mport;
+
+ if (IS_ERR(efv))
+ return PTR_ERR(efv);
+ if (!efv) /* device is PF (us) */
+ efx_mae_mport_wire(efx, &mport);
+ else /* device is repr */
+ efx_mae_mport_mport(efx, efv->mport, &mport);
+ return mport;
+}
+
+static const struct rhashtable_params efx_tc_match_action_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_flow_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_flow_rule, linkage),
+};
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+ struct efx_tc_action_set *act, bool in_hw)
+{
+ /* Failure paths calling this on the 'running action' set in_hw=false,
+ * because if the alloc had succeeded we'd've put it in acts.list and
+ * not still have it in act.
+ */
+ if (in_hw) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ /* in_hw is true iff we are on an acts.list; make sure to
+ * remove ourselves from that list before we are freed.
+ */
+ list_del(&act->list);
+ }
+ kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts,
+ bool in_hw)
+{
+ struct efx_tc_action_set *act, *next;
+
+ /* Failure paths set in_hw=false, because usually the acts didn't get
+ * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+ * has a separate efx_mae_free_action_set_list() before calling us.
+ */
+ if (in_hw)
+ efx_mae_free_action_set_list(efx, acts);
+ /* Any act that's on the list will be in_hw even if the list isn't */
+ list_for_each_entry_safe(act, next, &acts->list, list)
+ efx_tc_free_action_set(efx, act, true);
+ /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static void efx_tc_flow_free(void *ptr, void *arg)
+{
+ struct efx_tc_flow_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+
+ kfree(rule);
+}
+
+static int efx_tc_flower_parse_match(struct efx_nic *efx,
+ struct flow_rule *rule,
+ struct efx_tc_match *match,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(rule, &fm);
+
+ if (fm.mask->flags) {
+ efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
+ fm.mask->flags);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC))) {
+ efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(rule, &fm);
+ if (fm.mask->n_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ if (fm.mask->ip_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_flower_replace(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc,
+ struct efx_rep *efv)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_action_set *act = NULL;
+ const struct flow_action_entry *fa;
+ struct efx_rep *from_efv, *to_efv;
+ struct efx_tc_match match;
+ s64 rc;
+ int i;
+
+ if (!tc_can_offload_extack(efx->net_dev, extack))
+ return -EOPNOTSUPP;
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
+ if (IS_ERR(from_efv)) {
+ /* Might be a tunnel decap rule from an indirect block.
+ * Support for those not implemented yet.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (efv != from_efv) {
+ /* can't happen */
+ efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
+ netdev_name(net_dev), efv ? "non-" : "",
+ from_efv ? "non-" : "");
+ if (efv)
+ NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
+ else
+ NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
+ return -EINVAL;
+ }
+
+ /* Parse match */
+ memset(&match, 0, sizeof(match));
+ rc = efx_tc_flower_external_mport(efx, from_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
+ return rc;
+ }
+ match.value.ingress_port = rc;
+ match.mask.ingress_port = ~0;
+ rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
+ if (rc)
+ return rc;
+
+ if (tc->common.chain_index) {
+ EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
+ return -EOPNOTSUPP;
+ }
+ match.mask.recirc_id = 0xff;
+
+ rc = efx_mae_match_check_caps(efx, &match.mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&rule->acts.list);
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_action_set save;
+
+ if (!act) {
+ /* more actions after a non-pipe action */
+ EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
+ rc = -EINVAL;
+ goto release;
+ }
+
+ switch (fa->id) {
+ case FLOW_ACTION_DROP:
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* end of the line */
+ break;
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED:
+ save = *act;
+ to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
+ if (IS_ERR(to_efv)) {
+ EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
+ rc = PTR_ERR(to_efv);
+ goto release;
+ }
+ rc = efx_tc_flower_external_mport(efx, to_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
+ goto release;
+ }
+ act->dest_mport = rc;
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL;
+ if (fa->id == FLOW_ACTION_REDIRECT)
+ break; /* end of the line */
+ /* Mirror, so continue on with saved act */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+ *act = save;
+ break;
+ default:
+ efx_tc_err(efx, "Unhandled action %u\n", fa->id);
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ goto release;
+ }
+ }
+
+ if (act) {
+ /* Not shot/redirected, so deliver to default dest */
+ if (from_efv == EFX_EFV_PF)
+ /* Rule applies to traffic from the wire,
+ * and default dest is thus the PF
+ */
+ efx_mae_mport_uplink(efx, &act->dest_mport);
+ else
+ /* Representor, so rule applies to traffic from
+ * representee, and default dest is thus the rep.
+ * All reps use the same mport for delivery
+ */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &act->dest_mport);
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* Prevent double-free in error path */
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed filter (cookie %lx)\n",
+ tc->cookie);
+
+ rule->match = match;
+
+ rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
+ goto release;
+ }
+ rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
+ rule->acts.fw_id, &rule->fw_id);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
+ goto release_acts;
+ }
+ return 0;
+
+release_acts:
+ efx_mae_free_action_set_list(efx, &rule->acts);
+release:
+ /* We failed to insert the rule, so free up any entries we created in
+ * subsidiary tables.
+ */
+ if (act)
+ efx_tc_free_action_set(efx, act, false);
+ if (rule) {
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ efx_tc_free_action_set_list(efx, &rule->acts, false);
+ }
+ kfree(rule);
+ return rc;
+}
+
+static int efx_tc_flower_destroy(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule;
+
+ rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
+ efx_tc_match_action_ht_params);
+ if (!rule) {
+ /* Only log a message if we're the ingress device. Otherwise
+ * it's a foreign filter and we might just not have been
+ * interested (e.g. we might not have been the egress device
+ * either).
+ */
+ if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
+ netif_warn(efx, drv, efx->net_dev,
+ "Filter %lx not found to remove\n", tc->cookie);
+ NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
+ return -ENOENT;
+ }
+
+ /* Remove it from HW */
+ efx_tc_delete_rule(efx, rule);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
+ efx_tc_match_action_ht_params);
+ netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
+ kfree(rule);
+ return 0;
+}
+
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv)
+{
+ int rc;
+
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->tc->mutex);
+ switch (tc->command) {
+ case FLOW_CLS_REPLACE:
+ rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
+ break;
+ case FLOW_CLS_DESTROY:
+ rc = efx_tc_flower_destroy(efx, net_dev, tc);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&efx->tc->mutex);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+ u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+ struct efx_tc_action_set_list *acts = &rule->acts;
+ struct efx_tc_match *match = &rule->match;
+ struct efx_tc_action_set *act;
+ int rc;
+
+ match->value.ingress_port = ing_port;
+ match->mask.ingress_port = ~0;
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (!act)
+ return -ENOMEM;
+ act->deliver = 1;
+ act->dest_mport = eg_port;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc)
+ goto fail1;
+ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+ list_add_tail(&act->list, &acts->list);
+ rc = efx_mae_alloc_action_set_list(efx, acts);
+ if (rc)
+ goto fail2;
+ rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+ acts->fw_id, &rule->fw_id);
+ if (rc)
+ goto fail3;
+ return 0;
+fail3:
+ efx_mae_free_action_set_list(efx, acts);
+fail2:
+ list_del(&act->list);
+ efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+ kfree(act);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_uplink(efx, &ing_port);
+ efx_mae_mport_wire(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_wire(efx, &ing_port);
+ efx_mae_mport_uplink(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+ struct efx_tc_flow_rule *rule = &efv->dflt;
+ struct efx_nic *efx = efv->parent;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_mport(efx, efv->mport, &ing_port);
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule)
+{
+ if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+ efx_tc_delete_rule(efx, rule);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+ u32 rep_mport_label;
+ int rc;
+
+ rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+ efx->tc->reps_mport_id, rep_mport_label);
+ /* Use mport *selector* as vport ID */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &efx->tc->reps_mport_vport_id);
+ return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+ efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+ efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+ struct efx_filter_spec promisc, allmulti;
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+ if (!efx->tc)
+ return 0;
+ efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_uc_def(&promisc);
+ efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &promisc, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_uc = rc;
+ efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_mc_def(&allmulti);
+ efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &allmulti, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_mc = rc;
+ return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return;
+ if (!efx->tc)
+ return;
+ if (efx->tc->reps_filter_mc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+ efx->tc->reps_filter_mc = -1;
+ if (efx->tc->reps_filter_uc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+ efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mae_get_caps(efx, efx->tc->caps);
+ if (rc)
+ return rc;
+ if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
+ /* Firmware supports some match fields the driver doesn't know
+ * about. Not fatal, unless any of those fields are required
+ * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
+ */
+ netif_warn(efx, probe, efx->net_dev,
+ "FW reports additional match fields %u\n",
+ efx->tc->caps->match_field_count);
+ if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
+ netif_err(efx, probe, efx->net_dev,
+ "Too few action prios supported (have %u, need %u)\n",
+ efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
+ return -EIO;
+ }
+ rc = efx_tc_configure_default_rule_pf(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_default_rule_wire(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+ efx->tc->up = true;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+ /* We can get called even if efx_init_struct_tc() failed */
+ if (!efx->tc)
+ return;
+ if (efx->tc->up)
+ flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+ efx->tc->up = false;
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+ if (!efx->tc)
+ return -ENOMEM;
+ efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
+ if (!efx->tc->caps) {
+ rc = -ENOMEM;
+ goto fail_alloc_caps;
+ }
+ INIT_LIST_HEAD(&efx->tc->block_list);
+
+ mutex_init(&efx->tc->mutex);
+ rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
+ if (rc < 0)
+ goto fail_match_action_ht;
+ efx->tc->reps_filter_uc = -1;
+ efx->tc->reps_filter_mc = -1;
+ INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ return 0;
+fail_match_action_ht:
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+fail_alloc_caps:
+ kfree(efx->tc);
+ efx->tc = NULL;
+ return rc;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+ if (!efx->tc)
+ return;
+
+ mutex_lock(&efx->tc->mutex);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
+ efx);
+ mutex_unlock(&efx->tc->mutex);
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+ kfree(efx->tc);
+ efx->tc = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
new file mode 100644
index 000000000000..196fd74ed973
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_H
+#define EFX_TC_H
+#include <net/flow_offload.h>
+#include <linux/rhashtable.h>
+#include "net_driver.h"
+
+/* Error reporting: convenience macros. For indicating why a given filter
+ * insertion is not supported; errors in internal operation or in the
+ * hardware should be netif_err()s instead.
+ */
+/* Used when error message is constant. */
+#define EFX_TC_ERR_MSG(efx, extack, message) do { \
+ NL_SET_ERR_MSG_MOD(extack, message); \
+ if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, "%s\n", message); \
+} while (0)
+/* Used when error message is not constant; caller should also supply a
+ * constant extack message with NL_SET_ERR_MSG_MOD().
+ */
+#define efx_tc_err(efx, fmt, args...) do { \
+if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, fmt, ##args);\
+} while (0)
+
+struct efx_tc_action_set {
+ u16 deliver:1;
+ u32 dest_mport;
+ u32 fw_id; /* index of this entry in firmware actions table */
+ struct list_head list;
+};
+
+struct efx_tc_match_fields {
+ /* L1 */
+ u32 ingress_port;
+ u8 recirc_id;
+};
+
+struct efx_tc_match {
+ struct efx_tc_match_fields value;
+ struct efx_tc_match_fields mask;
+};
+
+struct efx_tc_action_set_list {
+ struct list_head list;
+ u32 fw_id;
+};
+
+struct efx_tc_flow_rule {
+ unsigned long cookie;
+ struct rhash_head linkage;
+ struct efx_tc_match match;
+ struct efx_tc_action_set_list acts;
+ u32 fw_id;
+};
+
+enum efx_tc_rule_prios {
+ EFX_TC_PRIO_TC, /* Rule inserted by TC */
+ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
+ EFX_TC_PRIO__NUM
+};
+
+/**
+ * struct efx_tc_state - control plane data for TC offload
+ *
+ * @caps: MAE capabilities reported by MCDI
+ * @block_list: List of &struct efx_tc_block_binding
+ * @mutex: Used to serialise operations on TC hashtables
+ * @match_action_ht: Hashtable of TC match-action rules
+ * @reps_mport_id: MAE port allocated for representor RX
+ * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
+ * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
+ * @reps_mport_vport_id: vport_id for representor RX filters
+ * @dflt: Match-action rules for default switching; at priority
+ * %EFX_TC_PRIO_DFLT. Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ * @up: have TC datastructures been set up?
+ */
+struct efx_tc_state {
+ struct mae_caps *caps;
+ struct list_head block_list;
+ struct mutex mutex;
+ struct rhashtable match_action_ht;
+ u32 reps_mport_id, reps_mport_vport_id;
+ s32 reps_filter_uc, reps_filter_mc;
+ struct {
+ struct efx_tc_flow_rule pf;
+ struct efx_tc_flow_rule wire;
+ } dflt;
+ bool up;
+};
+
+struct efx_rep;
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule);
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv);
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx);
+void efx_tc_remove_rep_filters(struct efx_nic *efx);
+
+int efx_init_tc(struct efx_nic *efx);
+void efx_fini_tc(struct efx_nic *efx);
+
+int efx_init_struct_tc(struct efx_nic *efx);
+void efx_fini_struct_tc(struct efx_nic *efx);
+
+#endif /* EFX_TC_H */
diff --git a/drivers/net/ethernet/sfc/tc_bindings.c b/drivers/net/ethernet/sfc/tc_bindings.c
new file mode 100644
index 000000000000..c18d64519c2d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_bindings.h"
+#include "tc.h"
+
+struct efx_tc_block_binding {
+ struct list_head list;
+ struct efx_nic *efx;
+ struct efx_rep *efv;
+ struct net_device *otherdev; /* may actually be us */
+ struct flow_block *block;
+};
+
+static struct efx_tc_block_binding *efx_tc_find_binding(struct efx_nic *efx,
+ struct net_device *otherdev)
+{
+ struct efx_tc_block_binding *binding;
+
+ ASSERT_RTNL();
+ list_for_each_entry(binding, &efx->tc->block_list, list)
+ if (binding->otherdev == otherdev)
+ return binding;
+ return NULL;
+}
+
+static int efx_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+ struct flow_cls_offload *tcf = type_data;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return efx_tc_flower(binding->efx, binding->otherdev,
+ tcf, binding->efv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void efx_tc_block_unbind(void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+
+ list_del(&binding->list);
+ kfree(binding);
+}
+
+static struct efx_tc_block_binding *efx_tc_create_binding(
+ struct efx_nic *efx, struct efx_rep *efv,
+ struct net_device *otherdev, struct flow_block *block)
+{
+ struct efx_tc_block_binding *binding = kmalloc(sizeof(*binding), GFP_KERNEL);
+
+ if (!binding)
+ return ERR_PTR(-ENOMEM);
+ binding->efx = efx;
+ binding->efv = efv;
+ binding->otherdev = otherdev;
+ binding->block = block;
+ list_add(&binding->list, &efx->tc->block_list);
+ return binding;
+}
+
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv)
+{
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ int rc;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, efv, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind %sdirect block for device %s, rc %d\n",
+ net_dev == efx->net_dev ? "" :
+ efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (binding) {
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (block_cb) {
+ flow_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbound %sdirect block for device %s\n",
+ net_dev == efx->net_dev ? "" :
+ binding->efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ }
+ }
+ /* If we're in driver teardown, then we expect to have
+ * already unbound all our blocks (we did it early while
+ * we still had MCDI to remove the filters), so getting
+ * unbind callbacks now isn't a problem.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ !efx->tc->up, warn,
+ "%sdirect block unbind for device %s, was never bound\n",
+ net_dev == efx->net_dev ? "" : "in",
+ net_dev ? net_dev->name : NULL);
+ return -ENOENT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct flow_block_offload *tcb = type_data;
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ struct efx_nic *efx = cb_priv;
+ bool is_ovs_int_port;
+ int rc;
+
+ if (!net_dev)
+ return -EOPNOTSUPP;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ is_ovs_int_port = netif_is_ovs_master(net_dev);
+ if (tcb->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ !is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ if (is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, NULL, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind,
+ tcb, net_dev, sch, data, binding,
+ cleanup);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind indr block for device %s, rc %d\n",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (!binding)
+ return -ENOENT;
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (!block_cb)
+ return -ENOENT;
+ flow_indr_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbind indr block for device %s\n",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* .ndo_setup_tc implementation
+ * Entry point for flower block and filter management.
+ */
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ if (efx->type->is_vf)
+ return -EOPNOTSUPP;
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, NULL);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, NULL);
+
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/tc_bindings.h b/drivers/net/ethernet/sfc/tc_bindings.h
new file mode 100644
index 000000000000..c210bb09150e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_BINDINGS_H
+#define EFX_TC_BINDINGS_H
+#include "net_driver.h"
+
+#include <net/sch_generic.h>
+
+struct efx_rep;
+
+void efx_tc_block_unbind(void *cb_priv);
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv);
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data);
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+#endif /* EFX_TC_BINDINGS_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d16e031e95f4..c5f88f7a7a04 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (unlikely(!tx_queue))
return -EINVAL;
+ if (!tx_queue->initialised)
+ return -EINVAL;
+
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
@@ -509,7 +512,7 @@ unlock:
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
@@ -524,7 +527,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
+ unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
/* There may be existing transmits on the channel that are
* waiting for this packet to trigger the doorbell write.
* We need to send the packets at this point.
@@ -545,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
@@ -555,6 +559,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
bool finished = false;
@@ -576,7 +581,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
/* Need to check the flag before dequeueing. */
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -585,7 +591,7 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- EFX_WARN_ON_PARANOID(pkts_compl != 1);
+ EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
efx_xmit_done_check_empty(tx_queue);
}
@@ -605,7 +611,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
unsigned tc, num_tc;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index d530cde2b864..67e789b96c43 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -101,15 +101,19 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
+ tx_queue->initialised = false;
+
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
}
@@ -144,7 +148,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -162,9 +167,15 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->flags & EFX_TX_BUF_SKB) {
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
+ if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) {
+ EFX_WARN_ON_PARANOID(!efv_pkts_compl);
+ (*efv_pkts_compl)++;
+ } else {
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
+
if (tx_queue->timestamping &&
(tx_queue->completed_timestamp_major ||
tx_queue->completed_timestamp_minor)) {
@@ -197,7 +208,8 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -216,7 +228,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
+ efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -239,15 +252,17 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- if (pkts_compl > 1)
+ if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
@@ -272,6 +287,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
@@ -280,7 +296,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
}
}
@@ -414,7 +431,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EFX_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EFX_PAGE_SIZE));
return max_descs;
}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index bbab7f248250..d87aecbc7bf1 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -19,7 +19,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl);
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index e2d009866a7b..8fc3f5272fa7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1158,9 +1158,9 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
static void ioc3_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
- strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strscpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 216bb2d34d7c..dda4e488c77a 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1769,9 +1769,9 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 23a336c5096e..cb7fec226cab 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2027,9 +2027,9 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ strscpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sis_priv->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 72e42a868346..2524c907f386 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -8,7 +8,7 @@ config NET_VENDOR_SMSC
default y
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
ISA || MAC || MIPS || NIOS2 || PCI || \
- PCMCIA || SUPERH || XTENSA || H8300 || COMPILE_TEST
+ PCMCIA || SUPERH || XTENSA || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -40,7 +40,7 @@ config SMC91X
select MII
depends on !OF || GPIOLIB
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
- MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST
+ MIPS || NIOS2 || SUPERH || XTENSA || COMPILE_TEST
help
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index a0654e88444c..013e90d69182 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -482,7 +482,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &epic_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &ep->napi, epic_poll, 64);
+ netif_napi_add(dev, &ep->napi, epic_poll);
ret = register_netdev(dev);
if (ret < 0)
@@ -1392,9 +1392,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
+ unregister_netdev(dev);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
ep->tx_ring_dma);
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
ep->rx_ring_dma);
- unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
- pci_release_regions(pdev);
free_netdev(dev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
/* pci_power_off(pdev, -1); */
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index dd6f69ced4ee..52ecfb461c41 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -140,7 +140,7 @@ static void PRINT_PKT(u_char *buf, int length)
pr_cont("\n");
}
#else
-#define PRINT_PKT(x...) do { } while (0)
+static inline void PRINT_PKT(u_char *buf, int length) { }
#endif
@@ -430,7 +430,7 @@ static inline void smc911x_rcv(struct net_device *dev)
SMC_PULL_DATA(lp, data, pkt_len+2+3);
DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
- PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+ PRINT_PKT(data, min(pkt_len - 4, 64U));
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
@@ -480,7 +480,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
SMC_SET_TX_FIFO(lp, cmdB);
DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
- PRINT_PKT(buf, len <= 64 ? len : 64);
+ PRINT_PKT(buf, min(len, 64U));
/* Send pkt via PIO or DMA */
#ifdef SMC_USE_DMA
@@ -1509,9 +1509,9 @@ smc911x_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
return ret;
if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
return ret;
- }
+ }
memcpy(data, eebuf+eeprom->offset, eeprom->len);
return 0;
}
@@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev,
return ret;
/* write byte */
if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
- return ret;
+ return ret;
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
return ret;
- }
- return 0;
+ }
+ return 0;
}
static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 37c822e27207..29bb19f42de9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a31c159e96ea..35e99bf0c401 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1588,9 +1588,9 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 387539a8094b..c521ea8f94f2 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -182,17 +182,6 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_IRQ_FLAGS 0
-#elif defined(CONFIG_H8300)
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 0
-
-#define SMC_inb(a, r) ioread8((a) + (r))
-#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
-#define SMC_insb(a, r, p, l) ioread8_rep((a) + (r), p, l)
-#define SMC_outsb(a, r, p, l) iowrite8_rep((a) + (r), p, l)
-
#else
/*
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 7a50ba00f8ae..a2e511912e6a 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -1953,9 +1955,9 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
- strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
+ strscpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -2304,7 +2306,8 @@ static int smsc911x_init(struct net_device *dev)
return -ENODEV;
dev->flags |= IFF_MULTICAST;
- netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &pdata->napi, smsc911x_poll,
+ SMSC_NAPI_WEIGHT);
dev->netdev_ops = &smsc911x_netdev_ops;
dev->ethtool_ops = &smsc911x_ethtool_ops;
@@ -2431,7 +2434,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (irq == -EPROBE_DEFER) {
retval = -EPROBE_DEFER;
goto out_0;
- } else if (irq <= 0) {
+ } else if (irq < 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
@@ -2586,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ if (!device_may_wakeup(dev))
+ phy_stop(ndev->phydev);
}
/* enable wake on LAN, energy detection and the external PME
@@ -2627,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
+ if (!device_may_wakeup(dev))
+ phy_start(ndev->phydev);
}
return 0;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d937af18973e..71fbb358bb7d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -215,10 +215,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(pd->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
@@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &smsc9420_netdev_ops;
dev->ethtool_ops = &smsc9420_ethtool_ops;
- netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &pd->napi, smsc9420_rx_poll);
result = register_netdev(dev);
if (result) {
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index 409e82b2018a..876410a256c6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -15,7 +15,6 @@
/* interrupt deassertion in multiples of 10us */
#define INT_DEAS_TIME (50)
-#define NAPI_WEIGHT (64)
#define SMSC_BAR (3)
#ifdef __BIG_ENDIAN
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 556bd353dd42..9b46579b5a10 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -526,8 +526,8 @@ static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
static void netsec_et_get_drvinfo(struct net_device *net_device,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "netsec", sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ strscpy(info->driver, "netsec", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(net_device->dev.parent),
sizeof(info->bus_info));
}
@@ -1044,7 +1044,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
"rx failed to build skb\n");
break;
}
- page_pool_release_page(dring->page_pool, page);
+ skb_mark_for_recycle(skb);
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
@@ -1961,11 +1961,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
ret = PTR_ERR(priv->phydev);
dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
priv->phydev = NULL;
+ mdiobus_unregister(bus);
return -ENODEV;
}
ret = phy_device_register(priv->phydev);
if (ret) {
+ phy_device_free(priv->phydev);
mdiobus_unregister(bus);
dev_err(priv->dev,
"phy_device_register err(%d)\n", ret);
@@ -2093,7 +2095,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 2c48f8b8ab71..d2c6a5dfdc0e 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -395,8 +395,8 @@ static void ave_ethtool_get_drvinfo(struct net_device *ndev,
{
struct device *dev = ndev->dev.parent;
- strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ strscpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
}
@@ -1229,6 +1229,8 @@ static int ave_init(struct net_device *ndev)
phy_support_asym_pause(phydev);
+ phydev->mac_managed_pm = true;
+
phy_attached_info(phydev);
return 0;
@@ -1687,10 +1689,8 @@ static int ave_probe(struct platform_device *pdev)
pdev->name, pdev->id);
/* Register as a NAPI supported driver */
- netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
- NAPI_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
+ netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
platform_set_drvdata(pdev, ndev);
@@ -1758,6 +1758,10 @@ static int ave_resume(struct device *dev)
ave_global_reset(ndev);
+ ret = phy_init_hw(ndev->phydev);
+ if (ret)
+ return ret;
+
ave_ethtool_get_wol(ndev, &wol);
wol.wolopts = priv->wolopts;
__ave_ethtool_set_wol(ndev, &wol);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 929cfc22cd0c..31ff35174034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -91,6 +91,9 @@ config DWMAC_IPQ806X
acceleration features available on this SoC. Network devices
will behave like standard non-accelerated ethernet interfaces.
+ Select the QCOM_SOCINFO config flag to enable specific dwmac
+ fixup based on the ipq806x SoC revision.
+
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index cd478d2cd871..00f6d347eaf7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-#define SGMII_ADAPTER_ENABLE 0x0000
-
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
- writew(SGMII_ADAPTER_ENABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
index 442812c0a4bd..694ac25ef426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d2cdc02d9f94..2e8744ac6b91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- priv->dma_rx_size) *
+ priv->dma_conf.dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((tx_q->dirty_tx + 1) %
- priv->dma_tx_size))
+ priv->dma_conf.dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bc91fd867dcd..80efdeeb0b59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -361,6 +361,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
+ data->sph_disable = 1;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
@@ -444,9 +445,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to probe subdriver: %d\n",
- ret);
+ dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
goto remove_config;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 84651207a1de..bd52fb7cf486 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
is required by i.MX8MP.
- is optinoal for i.MX8DXL.
+ is optional for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 9a6d819b84ae..378b4dd826bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
@@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 8e8778cfbbad..7deb1f817dac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
priv->plat->mdio_bus_data->xpcs_an_inband = false;
} else {
priv->plat->max_speed = 1000;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
priv->plat->mdio_bus_data->xpcs_an_inband = true;
}
}
@@ -298,6 +297,11 @@ static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
*art_time = ns;
}
+static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
+{
+ return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
+}
+
static int intel_crosststamp(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
@@ -313,8 +317,6 @@ static int intel_crosststamp(ktime_t *device,
u32 num_snapshot;
u32 gpio_value;
u32 acr_value;
- int ret;
- u32 v;
int i;
if (!boot_cpu_has(X86_FEATURE_ART))
@@ -328,6 +330,8 @@ static int intel_crosststamp(ktime_t *device,
if (priv->plat->ext_snapshot_en)
return -EBUSY;
+ priv->plat->int_snapshot_en = 1;
+
mutex_lock(&priv->aux_ts_lock);
/* Enable Internal snapshot trigger */
acr_value = readl(ptpaddr + PTP_ACR);
@@ -347,6 +351,7 @@ static int intel_crosststamp(ktime_t *device,
break;
default:
mutex_unlock(&priv->aux_ts_lock);
+ priv->plat->int_snapshot_en = 0;
return -EINVAL;
}
writel(acr_value, ptpaddr + PTP_ACR);
@@ -368,13 +373,12 @@ static int intel_crosststamp(ktime_t *device,
gpio_value |= GMAC_GPO1;
writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
- /* Poll for time sync operation done */
- ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
- (v & GMAC_INT_TSIE), 100, 10000);
-
- if (ret == -ETIMEDOUT) {
- pr_err("%s: Wait for time sync operation timeout\n", __func__);
- return ret;
+ /* Time sync done Indication - Interrupt method */
+ if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
+ stmmac_cross_ts_isr(priv),
+ HZ / 100)) {
+ priv->plat->int_snapshot_en = 0;
+ return -ETIMEDOUT;
}
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
@@ -383,15 +387,16 @@ static int intel_crosststamp(ktime_t *device,
/* Repeat until the timestamps are from the FIFO last segment */
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
*device = ns_to_ktime(ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
*system = convert_art_to_tsc(art_time);
}
system->cycles *= intel_priv->crossts_adj;
+ priv->plat->int_snapshot_en = 0;
return 0;
}
@@ -443,6 +448,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct fwnode_handle *fwnode;
char clk_name[20];
int ret;
int i;
@@ -454,6 +460,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
+ plat->sph_disable = 1;
/* Multiplying factor to the clk_eee_i clock time
* period to make it closer to 100 ns. This value
@@ -560,12 +567,42 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+ /* For fixed-link setup, we allow phy-mode setting */
+ fwnode = dev_fwnode(&pdev->dev);
+ if (fwnode) {
+ int phy_mode;
+
+ /* "phy-mode" setting is optional. If it is set,
+ * we allow either sgmii or 1000base-x for now.
+ */
+ phy_mode = fwnode_get_phy_mode(fwnode);
+ if (phy_mode >= 0) {
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ plat->phy_interface = phy_mode;
+ else
+ dev_warn(&pdev->dev, "Invalid phy-mode\n");
+ }
+ }
+
/* Intel mgbe SGMII interface uses pcs-xcps */
- if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
plat->mdio_bus_data->has_xpcs = true;
plat->mdio_bus_data->xpcs_an_inband = true;
}
+ /* For fixed-link setup, we clear xpcs_an_inband */
+ if (fwnode) {
+ struct fwnode_handle *fixed_node;
+
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node)
+ plat->mdio_bus_data->xpcs_an_inband = false;
+
+ fwnode_handle_put(fixed_node);
+ }
+
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
@@ -573,8 +610,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
plat->ext_snapshot_num = AUX_SNAPSHOT0;
- plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
+ plat->int_snapshot_en = 0;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
@@ -592,7 +629,6 @@ static int ehl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
- plat->clk_ptp_rate = 200000000;
plat->use_phy_wol = 1;
plat->safety_feat_cfg->tsoee = 1;
@@ -617,6 +653,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->clk_ptp_rate = 204800000;
+
return ehl_common_data(pdev, plat);
}
@@ -630,6 +668,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+ plat->clk_ptp_rate = 204800000;
+
return ehl_common_data(pdev, plat);
}
@@ -646,6 +686,8 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
plat->bus_id = 2;
plat->addr64 = 32;
+ plat->clk_ptp_rate = 200000000;
+
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat);
@@ -685,6 +727,8 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
plat->bus_id = 3;
plat->addr64 = 32;
+ plat->clk_ptp_rate = 200000000;
+
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat);
@@ -720,7 +764,8 @@ static int tgl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
- plat->clk_ptp_rate = 200000000;
+ plat->clk_ptp_rate = 204800000;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -740,7 +785,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -755,7 +799,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{
plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -1072,13 +1115,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
- goto err_dvr_probe;
+ goto err_alloc_irq;
}
return 0;
-err_dvr_probe:
- pci_free_irq_vectors(pdev);
err_alloc_irq:
clk_disable_unprepare(plat->stmmac_clk);
clk_unregister_fixed_rate(plat->stmmac_clk);
@@ -1099,9 +1140,8 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
- pcim_iounmap_regions(pdev, BIT(0));
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
@@ -1160,6 +1200,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
+#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
+#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1177,6 +1219,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f7dc8458cde8..e888c8a9c830 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -27,6 +27,8 @@
#include <linux/stmmac.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
#include "stmmac_platform.h"
@@ -64,6 +66,17 @@
#define NSS_COMMON_CLK_DIV_SGMII_100 4
#define NSS_COMMON_CLK_DIV_SGMII_10 49
+#define QSGMII_PCS_ALL_CH_CTL 0x80
+#define QSGMII_PCS_CH_SPEED_FORCE BIT(1)
+#define QSGMII_PCS_CH_SPEED_10 0x0
+#define QSGMII_PCS_CH_SPEED_100 BIT(2)
+#define QSGMII_PCS_CH_SPEED_1000 BIT(3)
+#define QSGMII_PCS_CH_SPEED_MASK (QSGMII_PCS_CH_SPEED_FORCE | \
+ QSGMII_PCS_CH_SPEED_10 | \
+ QSGMII_PCS_CH_SPEED_100 | \
+ QSGMII_PCS_CH_SPEED_1000)
+#define QSGMII_PCS_CH_SPEED_SHIFT(x) ((x) * 4)
+
#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
@@ -75,11 +88,20 @@
#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
#define QSGMII_PHY_QSGMII_EN BIT(7)
-#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
-#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
-#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
-#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
-#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_DEEMPHASIS_LVL_MASK GENMASK(11, 10)
+#define QSGMII_PHY_DEEMPHASIS_LVL(x) FIELD_PREP(QSGMII_PHY_DEEMPHASIS_LVL_MASK, (x))
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK GENMASK(14, 12)
+#define QSGMII_PHY_PHASE_LOOP_GAIN(x) FIELD_PREP(QSGMII_PHY_PHASE_LOOP_GAIN_MASK, (x))
+#define QSGMII_PHY_RX_DC_BIAS_MASK GENMASK(19, 18)
+#define QSGMII_PHY_RX_DC_BIAS(x) FIELD_PREP(QSGMII_PHY_RX_DC_BIAS_MASK, (x))
+#define QSGMII_PHY_RX_INPUT_EQU_MASK GENMASK(21, 20)
+#define QSGMII_PHY_RX_INPUT_EQU(x) FIELD_PREP(QSGMII_PHY_RX_INPUT_EQU_MASK, (x))
+#define QSGMII_PHY_CDR_PI_SLEW_MASK GENMASK(23, 22)
+#define QSGMII_PHY_CDR_PI_SLEW(x) FIELD_PREP(QSGMII_PHY_CDR_PI_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_SLEW_MASK GENMASK(27, 26)
+#define QSGMII_PHY_TX_SLEW(x) FIELD_PREP(QSGMII_PHY_TX_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_DRV_AMP_MASK GENMASK(31, 28)
+#define QSGMII_PHY_TX_DRV_AMP(x) FIELD_PREP(QSGMII_PHY_TX_DRV_AMP_MASK, (x))
struct ipq806x_gmac {
struct platform_device *pdev;
@@ -242,6 +264,113 @@ static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
ipq806x_gmac_set_speed(gmac, speed);
}
+static int
+ipq806x_gmac_configure_qsgmii_pcs_speed(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn;
+ int link_speed;
+ int val = 0;
+ int ret;
+
+ /* Some bootloader may apply wrong configuration and cause
+ * not functioning port. If fixed link is not set,
+ * reset the force speed bit.
+ */
+ if (!of_phy_is_fixed_link(pdev->dev.of_node))
+ goto write;
+
+ dn = of_get_child_by_name(pdev->dev.of_node, "fixed-link");
+ ret = of_property_read_u32(dn, "speed", &link_speed);
+ of_node_put(dn);
+ if (ret) {
+ dev_err(dev, "found fixed-link node with no speed");
+ return ret;
+ }
+
+ val = QSGMII_PCS_CH_SPEED_FORCE;
+
+ switch (link_speed) {
+ case SPEED_1000:
+ val |= QSGMII_PCS_CH_SPEED_1000;
+ break;
+ case SPEED_100:
+ val |= QSGMII_PCS_CH_SPEED_100;
+ break;
+ case SPEED_10:
+ val |= QSGMII_PCS_CH_SPEED_10;
+ break;
+ }
+
+write:
+ regmap_update_bits(gmac->qsgmii_csr, QSGMII_PCS_ALL_CH_CTL,
+ QSGMII_PCS_CH_SPEED_MASK <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id),
+ val <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id));
+
+ return 0;
+}
+
+static const struct soc_device_attribute ipq806x_gmac_soc_v1[] = {
+ {
+ .revision = "1.*",
+ },
+ {
+ /* sentinel */
+ }
+};
+
+static int
+ipq806x_gmac_configure_qsgmii_params(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ u32 qsgmii_param;
+
+ switch (gmac->id) {
+ case 1:
+ soc = soc_device_match(ipq806x_gmac_soc_v1);
+
+ if (soc)
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xc) |
+ QSGMII_PHY_TX_SLEW(0x2) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x2);
+ else
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xd) |
+ QSGMII_PHY_TX_SLEW(0x0) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x0);
+
+ qsgmii_param |= QSGMII_PHY_RX_DC_BIAS(0x2);
+ break;
+ case 2:
+ case 3:
+ qsgmii_param = QSGMII_PHY_RX_DC_BIAS(0x3) |
+ QSGMII_PHY_TX_DRV_AMP(0xc);
+ break;
+ default: /* gmac 0 can't be set in SGMII mode */
+ dev_err(dev, "gmac id %d can't be in SGMII mode", gmac->id);
+ return -EINVAL;
+ }
+
+ /* Common params across all gmac id */
+ qsgmii_param |= QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ QSGMII_PHY_PHASE_LOOP_GAIN(0x4) |
+ QSGMII_PHY_RX_INPUT_EQU(0x1) |
+ QSGMII_PHY_CDR_PI_SLEW(0x2);
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ qsgmii_param);
+
+ return 0;
+}
+
static int ipq806x_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -328,17 +457,13 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
- QSGMII_PHY_CDR_EN |
- QSGMII_PHY_RX_FRONT_EN |
- QSGMII_PHY_RX_SIGNAL_DETECT_EN |
- QSGMII_PHY_TX_DRIVER_EN |
- QSGMII_PHY_QSGMII_EN |
- 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
- 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
- 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
- 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
- 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ err = ipq806x_gmac_configure_qsgmii_params(gmac);
+ if (err)
+ goto err_remove_config_dt;
+
+ err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
+ if (err)
+ goto err_remove_config_dt;
}
plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index ecf759ee1c9f..a25c187d3185 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -51,7 +51,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
struct stmmac_resources res;
struct device_node *np;
int ret, i, phy_mode;
- bool mdio = false;
np = dev_of_node(&pdev->dev);
@@ -69,29 +68,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (!plat)
return -ENOMEM;
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
if (plat->mdio_node) {
- dev_err(&pdev->dev, "Found MDIO subnode\n");
- mdio = true;
- }
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
- if (mdio) {
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
- if (!plat->mdio_bus_data)
- return -ENOMEM;
+ if (!plat->mdio_bus_data) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
plat->mdio_bus_data->needs_reset = true;
}
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg)
- return -ENOMEM;
+ if (!plat->dma_cfg) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- return ret;
+ goto err_put_node;
}
/* Get the base address of device */
@@ -100,7 +101,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
continue;
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (ret)
- return ret;
+ goto err_disable_device;
break;
}
@@ -111,7 +112,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
phy_mode = device_get_phy_mode(&pdev->dev);
if (phy_mode < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
- return phy_mode;
+ ret = phy_mode;
+ goto err_disable_device;
}
plat->phy_interface = phy_mode;
@@ -128,6 +130,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.irq < 0) {
dev_err(&pdev->dev, "IRQ macirq not found\n");
ret = -ENODEV;
+ goto err_disable_msi;
}
res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
@@ -140,15 +143,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.lpi_irq < 0) {
dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
ret = -ENODEV;
+ goto err_disable_msi;
}
- return stmmac_dvr_probe(&pdev->dev, plat, &res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret)
+ goto err_disable_msi;
+
+ return ret;
+
+err_disable_msi:
+ pci_disable_msi(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_put_node:
+ of_node_put(plat->mdio_node);
+ return ret;
}
static void loongson_dwmac_remove(struct pci_dev *pdev)
{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int i;
+ of_node_put(priv->plat->mdio_node);
stmmac_dvr_remove(&pdev->dev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -158,6 +177,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break;
}
+ pci_disable_msi(pdev);
pci_disable_device(pdev);
}
@@ -205,7 +225,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
-struct pci_driver loongson_dwmac_driver = {
+static struct pci_driver loongson_dwmac_driver = {
.name = "dwmac-loongson-pci",
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 58c0feaa8131..d42e1afb6521 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -9,7 +9,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -40,6 +39,33 @@
#define ETH_FINE_DLY_GTXC BIT(1)
#define ETH_FINE_DLY_RXC BIT(0)
+/* Peri Configuration register for mt8195 */
+#define MT8195_PERI_ETH_CTRL0 0xFD0
+#define MT8195_RMII_CLK_SRC_INTERNAL BIT(28)
+#define MT8195_RMII_CLK_SRC_RXC BIT(27)
+#define MT8195_ETH_INTF_SEL GENMASK(26, 24)
+#define MT8195_RGMII_TXC_PHASE_CTRL BIT(22)
+#define MT8195_EXT_PHY_MODE BIT(21)
+#define MT8195_DLY_GTXC_INV BIT(12)
+#define MT8195_DLY_GTXC_ENABLE BIT(5)
+#define MT8195_DLY_GTXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL1 0xFD4
+#define MT8195_DLY_RXC_INV BIT(25)
+#define MT8195_DLY_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_TXC_INV BIT(12)
+#define MT8195_DLY_TXC_ENABLE BIT(5)
+#define MT8195_DLY_TXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL2 0xFD8
+#define MT8195_DLY_RMII_RXC_INV BIT(25)
+#define MT8195_DLY_RMII_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RMII_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_RMII_TXC_INV BIT(12)
+#define MT8195_DLY_RMII_TXC_ENABLE BIT(5)
+#define MT8195_DLY_RMII_TXC_STAGES GENMASK(4, 0)
+
struct mac_delay_struct {
u32 tx_delay;
u32 rx_delay;
@@ -50,19 +76,21 @@ struct mac_delay_struct {
struct mediatek_dwmac_plat_data {
const struct mediatek_dwmac_variant *variant;
struct mac_delay_struct mac_delay;
+ struct clk *rmii_internal_clk;
struct clk_bulk_data *clks;
- struct device_node *np;
struct regmap *peri_regmap;
+ struct device_node *np;
struct device *dev;
phy_interface_t phy_mode;
- int num_clks_to_config;
bool rmii_clk_from_mac;
bool rmii_rxc;
+ bool mac_wol;
};
struct mediatek_dwmac_variant {
int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+ void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
/* clock ids to be requested */
const char * const *clk_list;
@@ -75,7 +103,11 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static const char * const mt8195_dwmac_clk_l[] = {
+ "axi", "apb", "mac_cg", "mac_main", "ptp_ref"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
@@ -84,23 +116,12 @@ static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
- /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
- * only in RMII(when MAC provides the reference clock), and useless for
- * RGMII/MII/RMII(when PHY provides the reference clock).
- * num_clks_to_config indicates the real number of clocks should be
- * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
- * then +1 for rmii_clk_from_mac case.
- */
- plat->num_clks_to_config = plat->variant->num_clks - 1;
-
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- if (plat->rmii_clk_from_mac)
- plat->num_clks_to_config++;
intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -268,6 +289,193 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
.tx_delay_max = 17600,
};
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
+ int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (rmii_rxc | rmii_clk_from_mac);
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ /* MT8195 only support external PHY */
+ intf_val |= MT8195_EXT_PHY_MODE;
+
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL0, intf_val);
+
+ return 0;
+}
+
+static void mt8195_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay /= 290;
+ mac_delay->rx_delay /= 290;
+}
+
+static void mt8195_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay *= 290;
+ mac_delay->rx_delay *= 290;
+}
+
+static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
+
+ mt8195_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by RMII_TXC delay macro circuit.
+ * The ingress timing can be adjusted by RMII_RXC delay macro circuit.
+ */
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
+ !!mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
+ mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
+ mac_delay->tx_inv);
+
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
+ mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
+ mac_delay->rx_inv);
+ }
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_STAGES,
+ gtxc_delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
+
+ mt8195_delay_stage2ps(plat);
+
+ return 0;
+}
+
+static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = priv;
+
+ if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
+ /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
+ * when link speed is 1Gbps with RGMII interface,
+ * Fall back to delay macro circuit for 10/100Mbps link speed.
+ */
+ if (speed == SPEED_1000)
+ regmap_update_bits(priv_plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_STAGES,
+ MT8195_RGMII_TXC_PHASE_CTRL);
+ else
+ mt8195_set_delay(priv_plat);
+ }
+}
+
+static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+ .dwmac_set_phy_interface = mt8195_set_interface,
+ .dwmac_set_delay = mt8195_set_delay,
+ .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
+ .clk_list = mt8195_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+ .dma_bit_mask = 35,
+ .rx_delay_max = 9280,
+ .tx_delay_max = 9280,
+};
+
static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -308,6 +516,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
+ plat->mac_wol = of_property_read_bool(plat->np, "mediatek,mac-wol");
return 0;
}
@@ -315,18 +524,34 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
{
const struct mediatek_dwmac_variant *variant = plat->variant;
- int i, num = variant->num_clks;
+ int i, ret;
- plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+ plat->clks = devm_kcalloc(plat->dev, variant->num_clks, sizeof(*plat->clks), GFP_KERNEL);
if (!plat->clks)
return -ENOMEM;
- for (i = 0; i < num; i++)
+ for (i = 0; i < variant->num_clks; i++)
plat->clks[i].id = variant->clk_list[i];
- plat->num_clks_to_config = variant->num_clks;
+ ret = devm_clk_bulk_get(plat->dev, variant->num_clks, plat->clks);
+ if (ret)
+ return ret;
+
+ /* The clock labeled as "rmii_internal" is needed only in RMII(when
+ * MAC provides the reference clock), and useless for RGMII/MII or
+ * RMII(when PHY provides the reference clock).
+ * So, "rmii_internal" clock is got and configured only when
+ * reference clock of RMII is from MAC.
+ */
+ if (plat->rmii_clk_from_mac) {
+ plat->rmii_internal_clk = devm_clk_get(plat->dev, "rmii_internal");
+ if (IS_ERR(plat->rmii_internal_clk))
+ ret = PTR_ERR(plat->rmii_internal_clk);
+ } else {
+ plat->rmii_internal_clk = NULL;
+ }
- return devm_clk_bulk_get(plat->dev, num, plat->clks);
+ return ret;
}
static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
@@ -335,44 +560,91 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
const struct mediatek_dwmac_variant *variant = plat->variant;
int ret;
- ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
- if (ret) {
- dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_phy_interface) {
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_phy_interface(plat);
- if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_delay) {
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_delay(plat);
- if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
- return ret;
- }
+ return 0;
+}
- ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
- if (ret) {
- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
- return ret;
- }
+static int mediatek_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ }
- return 0;
+ return ret;
}
-static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
+static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct mediatek_dwmac_plat_data *priv_plat)
{
- struct mediatek_dwmac_plat_data *plat = priv;
+ int i;
+
+ plat->interface = priv_plat->phy_mode;
+ plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ plat->riwt_off = 1;
+ plat->maxmtu = ETH_DATA_LEN;
+ plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->bsp_priv = priv_plat;
+ plat->init = mediatek_dwmac_init;
+ plat->clks_config = mediatek_dwmac_clks_config;
+ if (priv_plat->variant->dwmac_fix_mac_speed)
+ plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
- clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return 0;
}
static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -411,29 +683,43 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->interface = priv_plat->phy_mode;
- plat_dat->has_gmac4 = 1;
- plat_dat->has_gmac = 0;
- plat_dat->pmt = 0;
- plat_dat->riwt_off = 1;
- plat_dat->maxmtu = ETH_DATA_LEN;
- plat_dat->bsp_priv = priv_plat;
- plat_dat->init = mediatek_dwmac_init;
- plat_dat->exit = mediatek_dwmac_exit;
+ mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
mediatek_dwmac_init(pdev, priv_plat);
+ ret = mediatek_dwmac_clks_config(priv_plat, true);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return ret;
- }
+ if (ret)
+ goto err_drv_probe;
return 0;
+
+err_drv_probe:
+ mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int mediatek_dwmac_remove(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ mediatek_dwmac_clks_config(priv_plat, false);
+
+ return ret;
}
static const struct of_device_id mediatek_dwmac_match[] = {
{ .compatible = "mediatek,mt2712-gmac",
.data = &mt2712_gmac_variant },
+ { .compatible = "mediatek,mt8195-gmac",
+ .data = &mt8195_gmac_variant },
{ }
};
@@ -441,7 +727,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = {
.probe = mediatek_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = mediatek_dwmac_remove,
.driver = {
.name = "dwmac-mediatek",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index c7a6588d9398..e8b507f88fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -272,11 +272,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
if (ret)
return ret;
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
-
- return 0;
+ return devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
}
static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index adfeb8d3293d..62a69a91ab22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -48,46 +49,60 @@
#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+struct oxnas_dwmac;
+
+struct oxnas_dwmac_data {
+ int (*setup)(struct oxnas_dwmac *dwmac);
+};
+
struct oxnas_dwmac {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct oxnas_dwmac_data *data;
};
-static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
{
- struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
- /* Reset HW here before changing the glue configuration */
- ret = device_reset(dwmac->dev);
- if (ret)
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
return ret;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX);
+
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0) {
- clk_disable_unprepare(dwmac->clk);
+ if (ret < 0)
return ret;
- }
/* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
value |= BIT(DWMAC_CKEN_GTX) |
/* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX) |
- /* set auto switch tx clock source */
- BIT(DWMAC_AUTO_TX_SOURCE) |
- /* enable tx & rx vardelay */
- BIT(DWMAC_CKEN_TX_OUT) |
- BIT(DWMAC_CKEN_TXN_OUT) |
- BIT(DWMAC_CKEN_TX_IN) |
- BIT(DWMAC_CKEN_RX_OUT) |
- BIT(DWMAC_CKEN_RXN_OUT) |
- BIT(DWMAC_CKEN_RX_IN);
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
/* set tx & rx vardelay */
@@ -100,6 +115,27 @@ static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = dwmac->data->setup(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct oxnas_dwmac *dwmac = priv;
@@ -128,6 +164,12 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->init = oxnas_dwmac_init;
@@ -166,8 +208,23 @@ err_remove_config_dt:
return ret;
}
+static const struct oxnas_dwmac_data ox810se_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox810se,
+};
+
+static const struct oxnas_dwmac_data ox820_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox820,
+};
+
static const struct of_device_id oxnas_dwmac_match[] = {
- { .compatible = "oxsemi,ox820-dwmac" },
+ {
+ .compatible = "oxsemi,ox810se-dwmac",
+ .data = &ox810se_dwmac_data,
+ },
+ {
+ .compatible = "oxsemi,ox820-dwmac",
+ .data = &ox820_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2ffa0a11eea5..835caa15d55f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -78,6 +78,7 @@ struct ethqos_emac_por {
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
struct qcom_ethqos {
@@ -90,6 +91,7 @@ struct qcom_ethqos {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -181,6 +183,22 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
+ .rgmii_config_looback_en = true,
+};
+
+static const struct ethqos_emac_por emac_v2_1_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v2_1_0_data = {
+ .por = emac_v2_1_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_1_0_por),
+ .rgmii_config_looback_en = false,
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
@@ -297,8 +315,12 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
@@ -331,8 +353,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+
break;
case SPEED_10:
@@ -460,6 +487,13 @@ static int ethqos_clks_config(void *priv, bool enabled)
dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
return ret;
}
+
+ /* Enable functional clock to prevent DMA reset to timeout due
+ * to lacking PHY clock after the hardware block has been power
+ * cycled. The actual configuration will be adjusted once
+ * ethqos_fix_mac_speed() is invoked.
+ */
+ ethqos_set_func_clk_en(ethqos);
} else {
clk_disable_unprepare(ethqos->rgmii_clk);
}
@@ -504,6 +538,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
+ ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -558,6 +593,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
+ { .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index c469abc91fa1..6656d76b6766 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -32,6 +32,8 @@ struct rk_gmac_ops {
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
+ bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
bool regs_valid;
u32 regs[];
@@ -66,6 +68,7 @@ struct rk_priv_data {
int rx_delay;
struct regmap *grf;
+ struct regmap *php_grf;
};
#define HIWORD_UPDATE(val, mask, shift) \
@@ -1101,6 +1104,153 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* sys_grf */
+#define RK3588_GRF_GMAC_CON7 0X031c
+#define RK3588_GRF_GMAC_CON8 0X0320
+#define RK3588_GRF_GMAC_CON9 0X0324
+
+#define RK3588_GMAC_RXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 3)
+#define RK3588_GMAC_RXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 3)
+#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
+#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+/* php_grf */
+#define RK3588_GRF_GMAC_CON0 0X0008
+#define RK3588_GRF_CLK_CON1 0X0070
+
+#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
+ (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
+ (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+
+#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
+#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+
+#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+
+#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
+#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
+ (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+
+#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
+#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+
+static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 offset_con, id = bsp_priv->id;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
+ RK3588_GRF_GMAC_CON8;
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RGMII_MODE(id));
+
+ regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
+ RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
+ RK3588_GMAC_TXCLK_DLY_ENABLE(id));
+
+ regmap_write(bsp_priv->grf, offset_con,
+ RK3588_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
+}
+
+static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, id = bsp_priv->id;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV20(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV50(id);
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV2(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV5(id);
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMAC_CLK_RGMII_DIV1(id);
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+
+ val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
+ RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+}
+
+static const struct rk_gmac_ops rk3588_ops = {
+ .set_to_rgmii = rk3588_set_to_rgmii,
+ .set_to_rmii = rk3588_set_to_rmii,
+ .set_rgmii_speed = rk3588_set_gmac_speed,
+ .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_clock_selection = rk3588_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0xfe1b0000, /* gmac0 */
+ 0xfe1c0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
@@ -1153,6 +1303,130 @@ static const struct rk_gmac_ops rv1108_ops = {
.set_rmii_speed = rv1108_set_rmii_speed,
};
+#define RV1126_GRF_GMAC_CON0 0X0070
+#define RV1126_GRF_GMAC_CON1 0X0074
+#define RV1126_GRF_GMAC_CON2 0X0078
+
+/* RV1126_GRF_GMAC_CON0 */
+#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
+#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
+#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RV1126_GMAC_M0_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RV1126_GMAC_M0_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RV1126_GMAC_M0_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RV1126_GMAC_M1_RXCLK_DLY_ENABLE GRF_BIT(3)
+#define RV1126_GMAC_M1_RXCLK_DLY_DISABLE GRF_CLR_BIT(3)
+#define RV1126_GMAC_M1_TXCLK_DLY_ENABLE GRF_BIT(2)
+#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
+
+/* RV1126_GRF_GMAC_CON1 */
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+/* RV1126_GRF_GMAC_CON2 */
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON1,
+ RV1126_GMAC_M0_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M0_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON2,
+ RV1126_GMAC_M1_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rv1126_ops = {
+ .set_to_rgmii = rv1126_set_to_rgmii,
+ .set_to_rmii = rv1126_set_to_rmii,
+ .set_rgmii_speed = rv1126_set_rgmii_speed,
+ .set_rmii_speed = rv1126_set_rmii_speed,
+};
+
#define RK_GRF_MACPHY_CON0 0xb00
#define RK_GRF_MACPHY_CON1 0xb04
#define RK_GRF_MACPHY_CON2 0xb08
@@ -1304,6 +1578,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->clk_mac_speed))
clk_prepare_enable(bsp_priv->clk_mac_speed);
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, true);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1330,6 +1608,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->mac_clk_tx);
clk_disable_unprepare(bsp_priv->clk_mac_speed);
+
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1444,6 +1726,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
+ bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1680,7 +1964,9 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
+ { .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index b7c2579c963b..6b447d8f0bd8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if ((tse_pcs_base) && (sgmii_adapter_base))
+ if (sgmii_adapter_base)
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
@@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (tse_pcs_base && sgmii_adapter_base)
+ if (phy_dev && sgmii_adapter_base) {
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 617d0e4c6495..f834472599f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -16,6 +16,7 @@
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -57,7 +58,6 @@ struct emac_variant {
};
/* struct sunxi_priv_data - hold all sunxi private data
- * @tx_clk: reference to MAC TX clock
* @ephy_clk: reference to the optional EPHY clock for the internal PHY
* @regulator: reference to the optional regulator
* @rst_ephy: reference to the optional EPHY reset for the internal PHY
@@ -68,7 +68,6 @@ struct emac_variant {
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
- struct clk *tx_clk;
struct clk *ephy_clk;
struct regulator *regulator;
struct reset_control *rst_ephy;
@@ -579,22 +578,14 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
}
}
- ret = clk_prepare_enable(gmac->tx_clk);
- if (ret) {
- dev_err(&pdev->dev, "Could not enable AHB clock\n");
- goto err_disable_regulator;
- }
-
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
if (ret)
- goto err_disable_clk;
+ goto err_disable_regulator;
}
return 0;
-err_disable_clk:
- clk_disable_unprepare(gmac->tx_clk);
err_disable_regulator:
if (gmac->regulator)
regulator_disable(gmac->regulator);
@@ -756,7 +747,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
if (err) {
dev_err(priv->device, "EMAC reset timeout\n");
- return -EFAULT;
+ return err;
}
return 0;
}
@@ -916,6 +907,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
&gmac->mux_handle, priv, priv->mii);
+ of_node_put(mdio_mux);
return ret;
}
@@ -1043,8 +1035,6 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
if (gmac->variant->soc_has_internal_phy)
sun8i_dwmac_unpower_internal_phy(gmac);
- clk_disable_unprepare(gmac->tx_clk);
-
if (gmac->regulator)
regulator_disable(gmac->regulator);
}
@@ -1167,12 +1157,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return -EINVAL;
}
- gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "Could not get TX clock\n");
- return PTR_ERR(gmac->tx_clk);
- }
-
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
@@ -1254,6 +1238,12 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
+ /* the MAC is runtime suspended after stmmac_dvr_probe(), so we
+ * need to ensure the MAC resume back before other operations such
+ * as reset.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
/* The mux must be registered after parent MDIO
* so after stmmac_dvr_probe()
*/
@@ -1272,12 +1262,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
goto dwmac_remove;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
dwmac_mux:
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
dwmac_remove:
+ pm_runtime_put_noidle(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index e2e0f977875d..c3f10a92b62b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -22,21 +22,21 @@
#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
@@ -49,13 +49,15 @@ struct visconti_eth {
void __iomem *reg;
u32 phy_intf_sel;
struct clk *phy_ref_clk;
+ struct device *dev;
spinlock_t lock; /* lock to protect register update */
};
static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
{
struct visconti_eth *dwmac = priv;
- unsigned int val, clk_sel_val;
+ struct net_device *netdev = dev_get_drvdata(dwmac->dev);
+ unsigned int val, clk_sel_val = 0;
unsigned long flags;
spin_lock_irqsave(&dwmac->lock, flags);
@@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
break;
default:
/* No bit control */
- break;
+ netdev_err(netdev, "Unsupported speed request (%d)", speed);
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+ return;
}
writel(val, dwmac->reg + MAC_CTRL_REG);
@@ -96,31 +100,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
val |= ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
switch (dwmac->phy_intf_sel) {
case ETHER_CONFIG_INTF_RGMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_RMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_MII:
default:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_EN;
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
}
- /* Start clock */
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
spin_unlock_irqrestore(&dwmac->lock, flags);
}
@@ -219,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
+ dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 35ab8d0bdce7..7ab791c8d355 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -56,7 +56,7 @@
#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+#define MAC_CORE_INIT (MAC_CONTROL_HBD)
/* MAC FLOW CTRL defines */
#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 3c73453725f9..4296ddda8aaa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,7 +126,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 76edb9b72675..0e00dd83d027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,7 +15,6 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
@@ -24,7 +23,6 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
- struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@@ -32,13 +30,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
- /* Clear ACS bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
- value &= ~GMAC_CONTROL_ACS;
-
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 75071a7d551a..a6e8d7bd9588 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,6 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "dwmac100.h"
@@ -28,13 +27,6 @@ static void dwmac100_core_init(struct mac_device_info *hw,
value |= MAC_CORE_INIT;
- /* Clear ASTP bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev))
- value &= ~MAC_CONTROL_ASTP;
-
writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 462ca7ed095a..71dad409f78b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -150,7 +150,8 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
+#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
+ GMAC_INT_TSIE)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index fd41db65fe1d..c25bfecb4a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
-#include <net/dsa.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
@@ -23,6 +22,7 @@
static void dwmac4_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -58,6 +58,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_INT_FPE_EN;
writel(value, ioaddr + GMAC_INT_EN);
+
+ if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
+ init_waitqueue_head(&priv->tstamp_busy_wait);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -219,6 +222,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else if (queue > 4) {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index d3b4765c1a5b..8cc80b1db4cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -462,11 +462,6 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
-static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des0);
-}
-
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des0 = cpu_to_le32(lower_32_bits(addr));
@@ -575,7 +570,6 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt,
- .get_addr = dwmac4_get_addr,
.set_addr = dwmac4_set_addr,
.clear = dwmac4_clear,
.set_sarc = dwmac4_set_sarc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 1914ad698cab..acd70b9a3173 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -150,6 +150,7 @@
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
+#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index ccfb0102dde4..b1f0c3984a09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -239,11 +239,6 @@ static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
}
-static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des0);
-}
-
static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des0 = cpu_to_le32(lower_32_bits(addr));
@@ -366,7 +361,6 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.init_rx_desc = dwxgmac2_init_rx_desc,
.init_tx_desc = dwxgmac2_init_tx_desc,
.set_mss = dwxgmac2_set_mss,
- .get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
.get_rx_hash = dwxgmac2_get_rx_hash,
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 6650edfab5bc..1bcbbd724fb5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -440,11 +440,6 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
pr_info("\n");
}
-static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des2);
-}
-
static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
@@ -475,7 +470,6 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring,
- .get_addr = enh_desc_get_addr,
.set_addr = enh_desc_set_addr,
.clear = enh_desc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index f7dc447f05a0..592b4067f9b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -82,8 +82,6 @@ struct stmmac_desc_ops {
dma_addr_t dma_rx_phy, unsigned int desc_size);
/* set MSS via context descriptor */
void (*set_mss)(struct dma_desc *p, unsigned int mss);
- /* get descriptor skbuff address */
- void (*get_addr)(struct dma_desc *p, unsigned int *addr);
/* set descriptor skbuff address */
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
@@ -142,8 +140,6 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, display_ring, __args)
#define stmmac_set_mss(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_mss, __args)
-#define stmmac_get_desc_addr(__priv, __args...) \
- stmmac_do_void_callback(__priv, desc, get_addr, __args)
#define stmmac_set_desc_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a57b0fa815ab..ea4910ae0921 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -197,7 +197,7 @@ static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
MMC_CNTRL, value);
}
-/* To mask all all interrupts.*/
+/* To mask all interrupts.*/
static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 98ef43f35802..e3da4da242ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -292,11 +292,6 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx,
pr_info("\n");
}
-static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des2);
-}
-
static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
@@ -326,7 +321,6 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring,
- .get_addr = ndesc_get_addr,
.set_addr = ndesc_set_addr,
.clear = ndesc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 8ad900949dc8..2b5b17d8b8a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 40b5ed94cb54..bdbf86cb102a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -188,13 +188,24 @@ struct stmmac_rfs_entry {
int tc;
};
+struct stmmac_dma_conf {
+ unsigned int dma_buf_sz;
+
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
- int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
@@ -202,7 +213,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int dma_buf_sz;
unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -214,13 +224,7 @@ struct stmmac_priv {
int (*hwif_quirks)(struct stmmac_priv *priv);
struct mutex lock;
- /* RX Queue */
- struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
- unsigned int dma_rx_size;
-
- /* TX Queue */
- struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- unsigned int dma_tx_size;
+ struct stmmac_dma_conf dma_conf;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
@@ -229,7 +233,6 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_irq[PHY_MAX_ADDR];
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -265,9 +268,10 @@ struct stmmac_priv {
u32 adv_ts;
int use_riwt;
int irq_wake;
- spinlock_t ptp_lock;
+ rwlock_t ptp_lock;
/* Protects auxiliary snapshot registers from concurrent access. */
struct mutex aux_ts_lock;
+ wait_queue_head_t tstamp_busy_wait;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 164dff5ec32e..f453b0d09366 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -21,10 +21,18 @@
#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
+#define GMAC4_REG_SPACE_SIZE 0x116C
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
#define XGMAC_ETHTOOL_NAME "st_xgmac"
+/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
+ *
+ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
+ * same time due to the conflicting macro names.
+ */
+#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
+
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
@@ -279,15 +287,15 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac || priv->plat->has_gmac4)
- strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else if (priv->plat->has_xgmac)
- strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ strscpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
if (priv->plat->pdev) {
- strlcpy(info->bus_info, pci_name(priv->plat->pdev),
+ strscpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
}
@@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
if (priv->plat->has_xgmac)
return XGMAC_REGSIZE * 4;
+ else if (priv->plat->has_gmac4)
+ return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- if (!priv->plat->has_xgmac) {
- /* Copy DMA registers to where ethtool expects them */
+ /* Copy DMA registers to where ethtool expects them */
+ if (priv->plat->has_gmac4) {
+ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
+ NUM_DWMAC4_DMA_REGS * 4);
+ } else if (!priv->plat->has_xgmac) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
@@ -470,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE;
- ring->rx_pending = priv->dma_rx_size;
- ring->tx_pending = priv->dma_tx_size;
+ ring->rx_pending = priv->dma_conf.dma_rx_size;
+ ring->tx_pending = priv->dma_conf.dma_tx_size;
}
static int stmmac_set_ringparam(struct net_device *netdev,
@@ -788,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
- if (priv->hw->xpcs) {
- ret = xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- edata->eee_enabled);
- if (ret)
- return ret;
- }
-
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 074e2cdfb0fa..764832f4dae1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
- 10000, 100000);
+ 10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
@@ -145,15 +145,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
static void get_systime(void __iomem *ioaddr, u64 *systime)
{
- u64 ns;
-
- /* Get the TSSS value */
- ns = readl(ioaddr + PTP_STNSR);
- /* Get the TSS and convert sec time value to nanosecond */
- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+ u64 ns, sec0, sec1;
+
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ do {
+ sec0 = sec1;
+ /* Get the TSSS value */
+ ns = readl_relaxed(ioaddr + PTP_STNSR);
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ } while (sec0 != sec1);
if (systime)
- *systime = ns;
+ *systime = ns + (sec1 * 1000000000ULL);
}
static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
@@ -174,6 +179,11 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
u64 ptp_time;
int i;
+ if (priv->plat->int_snapshot_en) {
+ wake_up(&priv->tstamp_busy_wait);
+ return;
+ }
+
tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
if (!tsync_int)
@@ -191,9 +201,9 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
GMAC_TIMESTAMP_ATSNS_SHIFT;
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ptp_time;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 63ff2dad8c85..8273e6a175c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,8 +74,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_queues_param(struct stmmac_priv *priv);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
@@ -231,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
/* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) {
- rx_q = &priv->rx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
if (rx_q->xsk_pool) {
synchronize_rcu();
break;
@@ -357,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 avail;
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -375,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
*/
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty;
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -402,23 +405,24 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
/* check if all TX queues have the work finished */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -450,8 +454,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -833,19 +837,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0) {
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -889,6 +884,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -911,8 +909,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -936,105 +932,15 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-static void stmmac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- int tx_cnt = priv->plat->tx_queues_to_use;
- int max_speed = priv->plat->max_speed;
- phylink_set(mac_supported, 10baseT_Half);
- phylink_set(mac_supported, 10baseT_Full);
- phylink_set(mac_supported, 100baseT_Half);
- phylink_set(mac_supported, 100baseT_Full);
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
-
- phylink_set(mac_supported, Autoneg);
- phylink_set(mac_supported, Pause);
- phylink_set(mac_supported, Asym_Pause);
- phylink_set_port_modes(mac_supported);
-
- /* Cut down 1G if asked to */
- if ((max_speed > 0) && (max_speed < 1000)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- } else if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || (max_speed >= 2500)) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- if (!max_speed || (max_speed >= 5000)) {
- phylink_set(mac_supported, 5000baseT_Full);
- }
- if (!max_speed || (max_speed >= 10000)) {
- phylink_set(mac_supported, 10000baseSR_Full);
- phylink_set(mac_supported, 10000baseLR_Full);
- phylink_set(mac_supported, 10000baseER_Full);
- phylink_set(mac_supported, 10000baseLRM_Full);
- phylink_set(mac_supported, 10000baseT_Full);
- phylink_set(mac_supported, 10000baseKX4_Full);
- phylink_set(mac_supported, 10000baseKR_Full);
- }
- if (!max_speed || (max_speed >= 25000)) {
- phylink_set(mac_supported, 25000baseCR_Full);
- phylink_set(mac_supported, 25000baseKR_Full);
- phylink_set(mac_supported, 25000baseSR_Full);
- }
- if (!max_speed || (max_speed >= 40000)) {
- phylink_set(mac_supported, 40000baseKR4_Full);
- phylink_set(mac_supported, 40000baseCR4_Full);
- phylink_set(mac_supported, 40000baseSR4_Full);
- phylink_set(mac_supported, 40000baseLR4_Full);
- }
- if (!max_speed || (max_speed >= 50000)) {
- phylink_set(mac_supported, 50000baseCR2_Full);
- phylink_set(mac_supported, 50000baseKR2_Full);
- phylink_set(mac_supported, 50000baseSR2_Full);
- phylink_set(mac_supported, 50000baseKR_Full);
- phylink_set(mac_supported, 50000baseSR_Full);
- phylink_set(mac_supported, 50000baseCR_Full);
- phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
- phylink_set(mac_supported, 50000baseDR_Full);
- }
- if (!max_speed || (max_speed >= 100000)) {
- phylink_set(mac_supported, 100000baseKR4_Full);
- phylink_set(mac_supported, 100000baseSR4_Full);
- phylink_set(mac_supported, 100000baseCR4_Full);
- phylink_set(mac_supported, 100000baseLR4_ER4_Full);
- phylink_set(mac_supported, 100000baseKR2_Full);
- phylink_set(mac_supported, 100000baseSR2_Full);
- phylink_set(mac_supported, 100000baseCR2_Full);
- phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
- phylink_set(mac_supported, 100000baseDR2_Full);
- }
- }
-
- /* Half-Duplex can only work with single queue */
- if (tx_cnt > 1) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- }
-
- linkmode_and(supported, supported, mac_supported);
- linkmode_andnot(supported, supported, mask);
-
- linkmode_and(state->advertising, state->advertising, mac_supported);
- linkmode_andnot(state->advertising, state->advertising, mask);
+ if (!priv->hw->xpcs)
+ return NULL;
- /* If PCS is supported, check which modes it supports. */
- if (priv->hw->xpcs)
- xpcs_validate(priv->hw->xpcs, supported, state);
+ return &priv->hw->xpcs->pcs;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1080,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1158,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -1173,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
- .validate = stmmac_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
@@ -1215,18 +1123,20 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct device_node *node;
+ struct fwnode_handle *fwnode;
int ret;
- node = priv->plat->phylink_node;
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
- if (node)
- ret = phylink_of_phy_connect(priv->phylink, node, 0);
+ if (fwnode)
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!node || ret) {
+ if (!fwnode || ret) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1253,12 +1163,12 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int max_speed = priv->plat->max_speed;
int mode = priv->plat->phy_interface;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.pcs_poll = true;
if (priv->plat->mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
@@ -1266,19 +1176,57 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!fwnode)
fwnode = dev_fwnode(priv->device);
+ /* Set the platform/firmware specified interface mode */
+ __set_bit(mode, priv->phylink_config.supported_interfaces);
+
+ /* If we have an xpcs, it defines which PHY interfaces are supported. */
+ if (priv->hw->xpcs)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ if (!max_speed || max_speed >= 1000)
+ priv->phylink_config.mac_capabilities |= MAC_1000;
+
+ if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (!max_speed || max_speed >= 5000)
+ priv->phylink_config.mac_capabilities |= MAC_5000FD;
+ if (!max_speed || max_speed >= 10000)
+ priv->phylink_config.mac_capabilities |= MAC_10000FD;
+ if (!max_speed || max_speed >= 25000)
+ priv->phylink_config.mac_capabilities |= MAC_25000FD;
+ if (!max_speed || max_speed >= 40000)
+ priv->phylink_config.mac_capabilities |= MAC_40000FD;
+ if (!max_speed || max_speed >= 50000)
+ priv->phylink_config.mac_capabilities |= MAC_50000FD;
+ if (!max_speed || max_speed >= 100000)
+ priv->phylink_config.mac_capabilities |= MAC_100000FD;
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ priv->phylink_config.mac_managed_pm = true;
+
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- if (priv->hw->xpcs)
- phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
-
priv->phylink = phylink;
return 0;
}
-static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size;
@@ -1287,7 +1235,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue);
@@ -1300,12 +1248,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
}
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
}
-static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size;
@@ -1314,7 +1263,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue);
@@ -1329,18 +1278,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size);
}
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Display RX ring */
- stmmac_display_rx_rings(priv);
+ stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */
- stmmac_display_tx_rings(priv);
+ stmmac_display_tx_rings(priv, dma_conf);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1364,44 +1314,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/**
* stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
/* Clear the RX descriptors */
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
}
/**
* stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index.
* Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
/* Clear the TX descriptors */
- for (i = 0; i < priv->dma_tx_size; i++) {
- int last = (i == (priv->dma_tx_size - 1));
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1418,10 +1374,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
@@ -1429,16 +1387,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++)
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++)
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
}
/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @p: descriptor pointer
* @i: descriptor index
* @flags: gfp flag
@@ -1446,10 +1405,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
-static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -1478,7 +1439,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
return 0;
@@ -1487,12 +1448,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure
- * @queue: RX queue index
+ * @rx_q: RX queue
* @i: buffer index.
*/
-static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
+ struct stmmac_rx_queue *rx_q,
+ int i)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
@@ -1507,12 +1469,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @i: buffer index.
*/
-static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, int i)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
@@ -1551,23 +1516,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, rx_q, i);
}
-static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- gfp_t flags)
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p;
int ret;
@@ -1576,7 +1546,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else
p = rx_q->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue);
if (ret)
return ret;
@@ -1590,14 +1560,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
@@ -1608,12 +1581,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
}
}
-static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
@@ -1648,22 +1623,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
@@ -1690,36 +1668,35 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only.
*/
- stmmac_alloc_rx_buffers_zc(priv, queue);
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else {
- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0)
return -ENOMEM;
}
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
+ dma_conf->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
+ dma_conf->dma_rx_size, 0);
}
return 0;
}
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
+ int queue;
int ret;
/* RX INITIALIZATION */
@@ -1727,7 +1704,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
- ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret)
goto err_init_rx_buffers;
}
@@ -1736,19 +1713,16 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers:
while (queue >= 0) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
- if (queue == 0)
- break;
-
queue--;
}
@@ -1758,14 +1732,17 @@ err_init_rx_buffers:
/**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure
- * @queue : TX queue index
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
netif_dbg(priv, probe, priv->dev,
@@ -1777,16 +1754,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
+ dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
+ dma_conf->dma_tx_size, 0);
}
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1805,16 +1782,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[i] = NULL;
}
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
-
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
-
return 0;
}
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int init_dma_tx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
@@ -1823,7 +1795,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
- __init_dma_tx_desc_rings(priv, queue);
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0;
}
@@ -1831,26 +1803,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
+ * @dma_conf: structure to take the dma data
* @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_rx_desc_rings(dev, flags);
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret)
return ret;
- ret = init_dma_tx_desc_rings(dev);
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv))
- stmmac_display_rings(priv);
+ stmmac_display_rings(priv, dma_conf);
return ret;
}
@@ -1858,17 +1833,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
tx_q->xsk_frames_done = 0;
- for (i = 0; i < priv->dma_tx_size; i++)
- stmmac_free_tx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
@@ -1887,34 +1865,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++)
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
}
/**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1926,29 +1907,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool);
}
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
}
/**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc);
@@ -1961,7 +1946,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx;
}
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1969,28 +1954,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff);
}
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++)
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
}
/**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
@@ -2002,8 +1991,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = priv->dma_rx_size;
- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.pool_size = dma_conf->dma_rx_size;
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
@@ -2018,7 +2007,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret;
}
- rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
@@ -2026,7 +2015,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -2035,7 +2024,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -2060,7 +2049,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
@@ -2068,7 +2058,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2076,7 +2066,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2084,28 +2074,31 @@ err_dma:
/**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -2118,7 +2111,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else
size = sizeof(struct dma_desc);
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -2135,7 +2128,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2143,7 +2137,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2151,27 +2145,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
/**
* alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* RX Allocation */
- int ret = alloc_dma_rx_desc_resources(priv);
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret)
return ret;
- ret = alloc_dma_tx_desc_resources(priv);
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2179,16 +2175,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/**
* free_dma_desc_resources - free dma desc resources
* @priv: private structure
+ * @dma_conf: structure to take the dma data
*/
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+static void free_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Release the DMA TX socket buffers */
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned.
*/
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
}
/**
@@ -2260,6 +2258,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2345,7 +2360,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
@@ -2360,7 +2375,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
chan);
}
}
@@ -2376,7 +2391,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2451,7 +2466,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
@@ -2492,7 +2507,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
*/
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
@@ -2505,7 +2520,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */
- while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
struct xdp_frame *xdpf;
struct sk_buff *skb;
struct dma_desc *p;
@@ -2607,7 +2622,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2647,8 +2662,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -2672,17 +2687,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
*/
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
- dma_free_tx_skbufs(priv, chan);
- stmmac_clear_tx_descriptors(priv, chan);
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
+ stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
@@ -2742,8 +2754,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir);
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
@@ -2902,12 +2914,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -2921,7 +2935,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -2936,7 +2950,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
@@ -2986,7 +3000,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
@@ -3008,12 +3022,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (priv->dma_tx_size - 1), chan);
+ (priv->dma_conf.dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (priv->dma_rx_size - 1), chan);
+ (priv->dma_conf.dma_rx_size - 1), chan);
}
/**
@@ -3238,7 +3252,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3248,7 +3262,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3305,14 +3319,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
}
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
+
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
/* Convert the timer from msec to usec */
@@ -3340,7 +3362,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3362,7 +3384,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
@@ -3406,7 +3428,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL);
- free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
}
}
irq_idx = priv->plat->rx_queues_to_use;
@@ -3415,7 +3437,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL);
- free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
}
}
@@ -3550,7 +3572,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
- 0, int_name, &priv->rx_queue[i]);
+ 0, int_name, &priv->dma_conf.rx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
@@ -3575,7 +3597,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
- 0, int_name, &priv->tx_queue[i]);
+ 0, int_name, &priv->dma_conf.tx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
@@ -3662,27 +3684,99 @@ static int stmmac_request_irq(struct net_device *dev)
}
/**
- * stmmac_open - open entry point of the driver
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
+ * @priv: driver private structure
+ * @mtu: MTU to setup the dma queue and buf with
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
+ * Allocate the Tx/Rx DMA queue and init them.
+ * Return value:
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
+ */
+static struct stmmac_dma_conf *
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
+{
+ struct stmmac_dma_conf *dma_conf;
+ int chan, bfsize, ret;
+
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
+ if (!dma_conf) {
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(mtu, 0);
+
+ dma_conf->dma_buf_sz = bfsize;
+ /* Chose the tx/rx size from the already defined one in the
+ * priv struct. (if defined)
+ */
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
+
+ if (!dma_conf->dma_tx_size)
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!dma_conf->dma_rx_size)
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv, dma_conf);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto alloc_error;
+ }
+
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ return dma_conf;
+
+init_error:
+ free_dma_desc_resources(priv, dma_conf);
+alloc_error:
+ kfree(dma_conf);
+ return ERR_PTR(ret);
+}
+
+/**
+ * __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
+ * @dma_conf : structure to take the dma data
* Description:
* This function is the open entry point of the driver.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_open(struct net_device *dev)
+static int __stmmac_open(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
- int bfsize = 0;
u32 chan;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
@@ -3701,44 +3795,20 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- if (bfsize < 0)
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
- priv->dma_buf_sz = bfsize;
- buf_sz = bfsize;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
- if (!priv->dma_tx_size)
- priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- if (!priv->dma_rx_size)
- priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+ buf_sz = dma_conf->dma_buf_sz;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
- /* Earlier check for TBS */
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
-
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- }
-
- ret = alloc_dma_desc_resources(priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- __func__);
- goto dma_desc_error;
- }
+ stmmac_reset_queues_param(priv);
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- __func__);
- goto init_error;
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
}
ret = stmmac_hw_setup(dev, true);
@@ -3759,6 +3829,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -3766,18 +3837,32 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
-dma_desc_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
return ret;
}
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_dma_conf *dma_conf;
+ int ret;
+
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
+ if (IS_ERR(dma_conf))
+ return PTR_ERR(dma_conf);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ return ret;
+}
+
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
@@ -3799,8 +3884,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- netif_tx_disable(dev);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -3810,7 +3893,9 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ netif_tx_disable(dev);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -3824,11 +3909,15 @@ static int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -3868,7 +3957,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true;
}
@@ -3886,7 +3975,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
@@ -3897,7 +3986,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3925,7 +4014,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
int desc_size;
if (likely(priv->extend_desc))
@@ -3987,7 +4076,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
int i;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -3995,7 +4084,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
@@ -4027,7 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -4139,7 +4228,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -4227,7 +4316,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, first_tx;
dma_addr_t des;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4290,7 +4379,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -4361,7 +4450,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -4476,7 +4565,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -4530,7 +4619,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -4558,12 +4647,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
/* First descriptor and last descriptor and not split header */
- return min_t(unsigned int, priv->dma_buf_sz, plen);
+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
@@ -4579,7 +4668,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
/* Not last descriptor */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
@@ -4590,7 +4679,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4653,7 +4742,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
@@ -4827,7 +4916,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
@@ -4870,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
if (rx_desc) {
@@ -4885,7 +4974,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
@@ -4907,7 +4996,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -4954,7 +5043,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5001,16 +5090,8 @@ read_again:
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
buf1_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
}
@@ -5075,7 +5156,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
@@ -5088,7 +5169,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5102,7 +5183,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -5146,7 +5227,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5187,16 +5268,8 @@ read_again:
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
len += buf2_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
@@ -5281,7 +5354,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
@@ -5293,7 +5366,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
@@ -5477,18 +5550,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ struct stmmac_dma_conf *dma_conf;
const int mtu = new_mtu;
+ int ret;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
- if (netif_running(dev)) {
- netdev_err(priv->dev, "must be stopped to change its MTU\n");
- return -EBUSY;
- }
-
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
return -EINVAL;
@@ -5500,8 +5570,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = mtu;
+ if (netif_running(dev)) {
+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
+ /* Try to allocate the new DMA conf with the new mtu */
+ dma_conf = stmmac_setup_dma_desc(priv, mtu);
+ if (IS_ERR(dma_conf)) {
+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
+ mtu);
+ return PTR_ERR(dma_conf);
+ }
+
+ stmmac_release(dev);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ if (ret) {
+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ return ret;
+ }
+ stmmac_set_rx_mode(dev);
+ }
+
+ dev->mtu = mtu;
netdev_update_features(dev);
return 0;
@@ -5735,11 +5826,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = tx_q->queue_index;
struct stmmac_priv *priv;
int status;
- priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5765,10 +5858,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = rx_q->queue_index;
struct stmmac_priv *priv;
- priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5799,10 +5894,10 @@ static void stmmac_poll_controller(struct net_device *dev)
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
@@ -5920,11 +6015,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
ret = eth_mac_addr(ndev, addr);
if (ret)
@@ -5983,34 +6076,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
return 0;
for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
seq_printf(seq, "RX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
}
}
for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
seq_printf(seq, "TX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
}
}
@@ -6254,11 +6347,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6346,31 +6437,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
int ret;
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return;
}
- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) {
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n");
return;
}
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_reset_rx_queue(priv, queue);
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
@@ -6387,7 +6479,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6408,30 +6500,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return;
}
- ret = __init_dma_tx_desc_rings(priv, queue);
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) {
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n");
return;
}
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_reset_tx_queue(priv, queue);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
@@ -6459,7 +6552,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -6468,7 +6561,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -6493,14 +6586,14 @@ int stmmac_xdp_open(struct net_device *dev)
u32 chan;
int ret;
- ret = alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__);
@@ -6508,15 +6601,17 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -6534,7 +6629,7 @@ int stmmac_xdp_open(struct net_device *dev)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6543,7 +6638,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -6570,16 +6665,17 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
return ret;
}
@@ -6596,18 +6692,18 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return -ENETDOWN;
if (!stmmac_xdp_is_enabled(priv))
- return -ENXIO;
+ return -EINVAL;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- rx_q = &priv->rx_queue[queue];
- tx_q = &priv->tx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
/* EQoS does not have per-DMA channel SW interrupt,
@@ -6792,19 +6888,16 @@ static void stmmac_napi_add(struct net_device *dev)
spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
}
if (queue < priv->plat->tx_queues_to_use) {
- netif_tx_napi_add(dev, &ch->tx_napi,
- stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx);
}
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_add(dev, &ch->rxtx_napi,
- stmmac_napi_poll_rxtx,
- NAPI_POLL_WEIGHT);
+ stmmac_napi_poll_rxtx);
}
}
}
@@ -6859,8 +6952,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
if (netif_running(dev))
stmmac_release(dev);
- priv->dma_rx_size = rx_size;
- priv->dma_tx_size = tx_size;
+ priv->dma_conf.dma_rx_size = rx_size;
+ priv->dma_conf.dma_tx_size = tx_size;
if (netif_running(dev))
ret = stmmac_open(dev);
@@ -7052,7 +7145,7 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen) {
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
@@ -7159,16 +7252,17 @@ int stmmac_dvr_probe(struct device *device,
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- dev_err(priv->device,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err_probe(priv->device, ret,
+ "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
@@ -7195,14 +7289,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -7217,8 +7303,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
@@ -7249,6 +7333,8 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+ pm_runtime_get_sync(dev);
+
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
@@ -7267,8 +7353,6 @@ int stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -7276,6 +7360,9 @@ int stmmac_dvr_remove(struct device *dev)
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7303,7 +7390,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
@@ -7352,6 +7439,25 @@ int stmmac_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+}
+
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
/**
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
@@ -7362,22 +7468,11 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
- for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
- }
-
- for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
-
- tx_q->cur_tx = 0;
- tx_q->dirty_tx = 0;
- tx_q->mss = 0;
+ for (queue = 0; queue < rx_cnt; queue++)
+ stmmac_reset_rx_queue(priv, queue);
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
- }
+ for (queue = 0; queue < tx_cnt; queue++)
+ stmmac_reset_tx_queue(priv, queue);
}
/**
@@ -7437,7 +7532,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
@@ -7446,6 +7541,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
@@ -7462,7 +7558,7 @@ static int __init stmmac_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
if (kstrtoint(opt + 6, 0, &debug))
@@ -7493,11 +7589,11 @@ static int __init stmmac_cmdline_opt(char *str)
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion", __func__);
- return -EINVAL;
+ return 1;
}
__setup("stmmaceth=", stmmac_cmdline_opt);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5d150c5f3d8..5f177ea80725 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data = 0;
u32 v;
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
+ data = pm_runtime_resume_and_get(priv->device);
+ if (data < 0)
return data;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
u32 value = MII_BUSY;
u32 v;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -442,9 +434,11 @@ int stmmac_mdio_register(struct net_device *ndev)
int err = 0;
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
+ struct fwnode_handle *fixed_node;
int addr, found, max_addr;
if (!mdio_bus_data)
@@ -490,7 +484,7 @@ int stmmac_mdio_register(struct net_device *ndev)
err = of_mdiobus_register(new_bus, mdio_node);
if (err != 0) {
- dev_err(dev, "Cannot register the MDIO bus\n");
+ dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
@@ -498,6 +492,18 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->plat->has_xgmac)
stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+ /* If fixed-link is set, skip PHY scanning */
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ if (fwnode) {
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node) {
+ fwnode_handle_put(fixed_node);
+ goto bus_register_done;
+ }
+ }
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fcf17d8a0494..644bb54f5f02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
-
- pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5d29f336315b..50f6b4a14be4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->phylink_node = np;
/* Get max speed of operation from device tree */
- if (of_property_read_u32(np, "max-speed", &plat->max_speed))
- plat->max_speed = -1;
+ of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
@@ -441,11 +440,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
- /* Default to get clk_csr from stmmac_clk_crs_set(),
+ /* Default to get clk_csr from stmmac_clk_csr_set(),
* or get clk_csr from device tree.
*/
plat->clk_csr = -1;
- of_property_read_u32(np, "clk_csr", &plat->clk_csr);
+ if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
+ of_property_read_u32(np, "clk_csr", &plat->clk_csr);
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
@@ -816,7 +816,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
- stmmac_init_tstamp_counter(priv, priv->systime_flags);
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0d24ebd37873..4d11980dcd64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -39,9 +39,9 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
diff = div_u64(adj, 1000000000ULL);
addend = neg_adj ? (addend - diff) : (addend + diff);
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_config_addend(priv, priv->ptpaddr, addend);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -86,9 +86,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
mutex_unlock(&priv->plat->est->lock);
}
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
/* Caculate new basetime and re-configured EST after PTP time adjust. */
if (est_rst) {
@@ -137,9 +137,9 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
unsigned long flags;
u64 ns = 0;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &ns);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -162,9 +162,9 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -175,11 +175,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
void __iomem *ptpaddr = priv->ptpaddr;
- void __iomem *ioaddr = priv->hw->pcsr;
struct stmmac_pps_cfg *cfg;
- u32 intr_value, acr_value;
int ret = -EOPNOTSUPP;
unsigned long flags;
+ u32 acr_value;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
@@ -194,12 +193,12 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
ret = stmmac_flex_pps_config(priv, priv->ioaddr,
rq->perout.index, cfg, on,
priv->sub_second_inc,
priv->systime_flags);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
case PTP_CLK_REQ_EXTTS:
priv->plat->ext_snapshot_en = on;
@@ -213,19 +212,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Enable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value |= GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
-
} else {
netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Disable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value &= ~GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
}
writel(acr_value, ptpaddr + PTP_ACR);
mutex_unlock(&priv->aux_ts_lock);
@@ -297,9 +287,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
- if (priv->plat->ptp_clk_freq_config)
- priv->plat->ptp_clk_freq_config(priv);
-
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
@@ -317,7 +304,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
- spin_lock_init(&priv->ptp_lock);
+ rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index be3cb63675a5..49af7e78b7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
struct stmmac_channel *ch = &priv->channel[i];
u32 tail;
- tail = priv->rx_queue[i].dma_rx_phy +
- (priv->dma_rx_size * sizeof(struct dma_desc));
+ tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
+ (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
@@ -1084,8 +1084,9 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
struct tc_cls_u32_offload cls_u32 = { };
struct stmmac_packet_attrs attr = { };
- struct tc_action **actions, *act;
+ struct tc_action **actions;
struct tc_u32_sel *sel;
+ struct tcf_gact *gact;
struct tcf_exts *exts;
int ret, i, nk = 1;
@@ -1110,8 +1111,8 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
goto cleanup_exts;
}
- act = kcalloc(nk, sizeof(*act), GFP_KERNEL);
- if (!act) {
+ gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
+ if (!gact) {
ret = -ENOMEM;
goto cleanup_actions;
}
@@ -1126,9 +1127,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
exts->nr_actions = nk;
exts->actions = actions;
for (i = 0; i < nk; i++) {
- struct tcf_gact *gact = to_gact(&act[i]);
-
- actions[i] = &act[i];
+ actions[i] = (struct tc_action *)&gact[i];
gact->tcf_action = TC_ACT_SHOT;
}
@@ -1152,7 +1151,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
cleanup_act:
- kfree(act);
+ kfree(gact);
cleanup_actions:
kfree(actions);
cleanup_exts:
@@ -1681,7 +1680,7 @@ cleanup:
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_packet_attrs attr = { };
- int size = priv->dma_buf_sz;
+ int size = priv->dma_conf.dma_buf_sz;
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
@@ -1764,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
/* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break;
if (i >= priv->plat->tx_queues_to_use)
@@ -1777,9 +1776,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
if (ret)
return ret;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if (!curr_time) {
ret = -EOPNOTSUPP;
@@ -1799,9 +1798,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
goto fail_disable;
/* Check if expected time has elapsed */
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
ret = -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index d61766eeac6d..773e415cc2de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL;
if (qopt->enable)
- priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else
- priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index dba9f12efa1c..0aca193d9550 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -29,7 +29,7 @@
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
@@ -88,6 +88,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
#define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x))
@@ -1234,19 +1235,6 @@ static void cas_init_rx_dma(struct cas *cp)
*/
readl(cp->regs + REG_INTR_STATUS_ALIAS);
writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
- if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
-
- /* 2 is different from 3 and 4 */
- if (N_RX_COMP_RINGS > 1)
- writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
- cp->regs + REG_PLUS_ALIASN_CLEAR(1));
-
- for (i = 2; i < N_RX_COMP_RINGS; i++)
- writel(INTR_RX_DONE_ALT,
- cp->regs + REG_PLUS_ALIASN_CLEAR(i));
- }
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
@@ -1325,7 +1313,7 @@ static void cas_init_rx_dma(struct cas *cp)
writel(val, cp->regs + REG_RX_PAGE_SIZE);
/* enable the header parser if desired */
- if (CAS_HP_FIRMWARE == cas_prog_null)
+ if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
return;
val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
@@ -3508,9 +3496,6 @@ enable_rx_done:
if (N_RX_DESC_RINGS > 1)
writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
-
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
@@ -3795,7 +3780,7 @@ static void cas_reset(struct cas *cp, int blkflag)
/* program header parser */
if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
- (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
cas_load_firmware(cp, CAS_HP_FIRMWARE);
} else {
cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
@@ -4063,8 +4048,8 @@ static void cas_link_timer(struct timer_list *t)
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
- ((jiffies - cp->link_transition_jiffies) >
- (link_transition_timeout))) {
+ time_is_before_jiffies(cp->link_transition_jiffies +
+ link_transition_timeout)) {
/* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
@@ -4499,9 +4484,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static int cas_get_link_ksettings(struct net_device *dev,
@@ -4679,7 +4664,7 @@ static void cas_set_msglevel(struct net_device *dev, u32 value)
static int cas_get_regs_len(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
- return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+ return min_t(int, cp->casreg_len, CAS_MAX_REGS);
}
static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -5065,7 +5050,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->watchdog_timeo = CAS_TX_TIMEOUT;
#ifdef USE_NAPI
- netif_napi_add(dev, &cp->napi, cas_poll, 64);
+ netif_napi_add(dev, &cp->napi, cas_poll);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index ae5f05f03f88..2d91f4936d52 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -764,7 +764,7 @@
* PAUSE thresholds defined in terms of FIFO occupancy and may be translated
* into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
* when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
- * value is is 0x6F.
+ * value is 0x6F.
* DEFAULT: 0x00078
*/
#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 6b59b14e74b1..8addee6d04bd 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -63,8 +63,8 @@ static struct vio_version vsw_versions[] = {
static void vsw_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vsw_get_msglevel(struct net_device *dev)
@@ -335,7 +335,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->tsolen = 0;
/* Mark the port as belonging to ldmvsw which directs the
- * the common code to use the net_device in the vnet_port
+ * common code to use the net_device in the vnet_port
* rather than the net_device in the vnet (which is used
* by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE
* macro.
@@ -354,8 +354,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
dev_set_drvdata(&vdev->dev, port);
- netif_napi_add(dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, sunvnet_poll_common);
spin_lock_irqsave(&vp->lock, flags);
list_add_rcu(&port->list, &vp->port_list);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ba8ad76313a9..e6144d963eaa 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -35,6 +35,25 @@
#include "niu.h"
+/* This driver wants to store a link to a "next page" within the
+ * page struct itself by overloading the content of the "mapping"
+ * member. This is not expected by the page API, but does currently
+ * work. However, the randstruct plugin gets very bothered by this
+ * case because "mapping" (struct address_space) is randomized, so
+ * casts to/from it trigger warnings. Hide this by way of a union,
+ * to create a typed alias of "mapping", since that's how it is
+ * actually being used here.
+ */
+union niu_page {
+ struct page page;
+ struct {
+ unsigned long __flags; /* unused alias of "flags" */
+ struct list_head __lru; /* unused alias of "lru" */
+ struct page *next; /* alias of "mapping" */
+ };
+};
+#define niu_next_page(p) container_of(p, union niu_page, page)->next
+
#define DRV_MODULE_NAME "niu"
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "Apr 22, 2010"
@@ -3283,7 +3302,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
- for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
+ for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
if (p->index == addr) {
*link = pp;
goto found;
@@ -3300,7 +3319,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
unsigned int h = niu_hash_rxaddr(rp, base);
page->index = base;
- page->mapping = (struct address_space *) rp->rxhash[h];
+ niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3382,11 +3401,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
if ((page->index + PAGE_SIZE) - rcr_size == addr) {
- *link = (struct page *) page->mapping;
+ *link = niu_next_page(page);
np->ops->unmap_page(np->device, page->index,
PAGE_SIZE, DMA_FROM_DEVICE);
page->index = 0;
- page->mapping = NULL;
+ niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
}
@@ -3451,11 +3470,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
- *link = (struct page *) page->mapping;
+ *link = niu_next_page(page);
np->ops->unmap_page(np->device, page->index,
PAGE_SIZE, DMA_FROM_DEVICE);
page->index = 0;
- page->mapping = NULL;
+ niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
get_page(page);
@@ -3518,13 +3537,13 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
- struct page *next = (struct page *) page->mapping;
+ struct page *next = niu_next_page(page);
u64 base = page->index;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
page->index = 0;
- page->mapping = NULL;
+ niu_next_page(page) = NULL;
__free_page(page);
@@ -6440,8 +6459,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
- struct page *next =
- (struct page *) page->mapping;
+ struct page *next = niu_next_page(page);
u64 base = page->index;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
@@ -6780,12 +6798,12 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strlcpy(info->bus_info, pci_name(np->pdev),
+ strscpy(info->bus_info, pci_name(np->pdev),
sizeof(info->bus_info));
}
@@ -7909,7 +7927,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
@@ -9097,7 +9115,7 @@ static int niu_ldg_init(struct niu *np)
for (i = 0; i < np->num_ldg; i++) {
struct niu_ldg *lp = &np->ldg[i];
- netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
+ netif_napi_add(np->dev, &lp->napi, niu_poll);
lp->np = np;
lp->ldg_num = ldg_num_map[i];
@@ -10176,6 +10194,9 @@ static int __init niu_init(void)
BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
+ BUILD_BUG_ON(offsetof(struct page, mapping) !=
+ offsetof(union niu_page, next));
+
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
#ifdef CONFIG_SPARC64
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 531a6f449afa..34b94153bf0c 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1038,8 +1038,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunbmac", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->driver, "sunbmac", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 036856102c50..4154e68639ac 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -52,7 +52,6 @@
#endif
#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#endif
@@ -1089,7 +1088,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
/* netif_stop_queue() must be done before checking
- * checking tx index in TX_BUFFS_AVAIL() below, because
+ * tx index in TX_BUFFS_AVAIL() below, because
* in gem_tx(), we update tx_old before checking for
* netif_queue_stopped().
*/
@@ -2522,9 +2521,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_link_ksettings(struct net_device *dev,
@@ -2981,7 +2980,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_consistent;
dev->netdev_ops = &gem_netdev_ops;
- netif_napi_add(dev, &gp->napi, gem_poll, 64);
+ netif_napi_add(dev, &gp->napi, gem_poll);
dev->ethtool_ops = &gem_ethtool_ops;
dev->watchdog_timeo = 5 * HZ;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ad9029ae6848..1c16548415cd 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -61,15 +61,8 @@
#include "sunhme.h"
#define DRV_NAME "sunhme"
-#define DRV_VERSION "3.10"
-#define DRV_RELDATE "August 26, 2008"
-#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
-static char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
MODULE_LICENSE("GPL");
@@ -87,13 +80,17 @@ static struct quattro *qfe_sbus_list;
static struct quattro *qfe_pci_list;
#endif
-#undef HMEDEBUG
-#undef SXDEBUG
-#undef RXDEBUG
-#undef TXDEBUG
-#undef TXLOGGING
+#define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+#define HMD hme_debug
+
+/* "Auto Switch Debug" aka phy debug */
+#if 1
+#define ASD hme_debug
+#else
+#define ASD(...)
+#endif
-#ifdef TXLOGGING
+#if 0
struct hme_tx_logent {
unsigned int tstamp;
int tx_new, tx_old;
@@ -128,46 +125,16 @@ static __inline__ void tx_dump_log(void)
this = txlog_cur_entry;
for (i = 0; i < TX_LOG_LEN; i++) {
- printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+ pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
tx_log[this].tstamp,
tx_log[this].tx_new, tx_log[this].tx_old,
tx_log[this].action, tx_log[this].status);
this = (this + 1) & (TX_LOG_LEN - 1);
}
}
-static __inline__ void tx_dump_ring(struct happy_meal *hp)
-{
- struct hmeal_init_block *hb = hp->happy_block;
- struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
- int i;
-
- for (i = 0; i < TX_RING_SIZE; i+=4) {
- printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
- i, i + 4,
- le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
- le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
- le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
- le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
- }
-}
-#else
-#define tx_add_log(hp, a, s) do { } while(0)
-#define tx_dump_log() do { } while(0)
-#define tx_dump_ring(hp) do { } while(0)
-#endif
-
-#ifdef HMEDEBUG
-#define HMD(x) printk x
#else
-#define HMD(x)
-#endif
-
-/* #define AUTO_SWITCH_DEBUG */
-
-#ifdef AUTO_SWITCH_DEBUG
-#define ASD(x) printk x
-#else
-#define ASD(x)
+#define tx_add_log(hp, a, s)
+#define tx_dump_log()
#endif
#define DEFAULT_IPG0 16 /* For lance-mode only */
@@ -343,8 +310,6 @@ static int happy_meal_bb_read(struct happy_meal *hp,
int retval = 0;
int i;
- ASD(("happy_meal_bb_read: reg=%d ", reg));
-
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -378,7 +343,7 @@ static int happy_meal_bb_read(struct happy_meal *hp,
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
- ASD(("value=%x\n", retval));
+ ASD("reg=%d value=%x\n", reg, retval);
return retval;
}
@@ -389,7 +354,7 @@ static void happy_meal_bb_write(struct happy_meal *hp,
u32 tmp;
int i;
- ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+ ASD("reg=%d value=%x\n", reg, value);
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -433,14 +398,13 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
int tries = TCVR_READ_TRIES;
int retval;
- ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
if (hp->tcvr_type == none) {
- ASD(("no transceiver, value=TCVR_FAILURE\n"));
+ ASD("no transceiver, value=TCVR_FAILURE\n");
return TCVR_FAILURE;
}
if (!(hp->happy_flags & HFLAG_FENABLE)) {
- ASD(("doing bit bang\n"));
+ ASD("doing bit bang\n");
return happy_meal_bb_read(hp, tregs, reg);
}
@@ -449,11 +413,11 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
udelay(20);
if (!tries) {
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
return TCVR_FAILURE;
}
retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
- ASD(("value=%04x\n", retval));
+ ASD("reg=0x%02x value=%04x\n", reg, retval);
return retval;
}
@@ -465,7 +429,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
{
int tries = TCVR_WRITE_TRIES;
- ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+ ASD("reg=0x%02x value=%04x\n", reg, value);
/* Welcome to Sun Microsystems, can I take your order please? */
if (!(hp->happy_flags & HFLAG_FENABLE)) {
@@ -482,7 +446,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Anything else? */
if (!tries)
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
/* Fifty-two cents is your change, have a nice day. */
}
@@ -545,43 +509,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
- if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
- if (hp->sw_lpa & LPA_100FULL)
- printk("100Mb/s, Full Duplex.\n");
- else
- printk("100Mb/s, Half Duplex.\n");
- } else {
- if (hp->sw_lpa & LPA_10FULL)
- printk("10Mb/s, Full Duplex.\n");
- else
- printk("10Mb/s, Half Duplex.\n");
- }
+
+ netdev_info(hp->dev,
+ "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
+ hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
}
static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- if (hp->sw_bmcr & BMCR_SPEED100)
- printk("100Mb/s, ");
- else
- printk("10Mb/s, ");
- if (hp->sw_bmcr & BMCR_FULLDPLX)
- printk("Full Duplex.\n");
- else
- printk("Half Duplex.\n");
+
+ netdev_info(hp->dev,
+ "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
+ hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
}
static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
@@ -679,8 +624,8 @@ static void happy_meal_timer(struct timer_list *t)
/* Enter force mode. */
do_force_mode:
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto-Negotiation unsuccessful, trying force link mode\n");
hp->sw_bmcr = BMCR_SPEED100;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -739,8 +684,8 @@ static void happy_meal_timer(struct timer_list *t)
restart_timer = 0;
} else {
if (hp->timer_ticks >= 10) {
- printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
- "not completely up.\n", hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto negotiation successful, link still not completely up.\n");
hp->timer_ticks = 0;
restart_timer = 1;
} else {
@@ -795,14 +740,14 @@ static void happy_meal_timer(struct timer_list *t)
*/
/* Let the user know... */
- printk(KERN_NOTICE "%s: Link down, cable problem?\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Link down, cable problem?\n");
ret = happy_meal_init(hp);
if (ret) {
/* ho hum... */
- printk(KERN_ERR "%s: Error, cannot re-init the "
- "Happy Meal.\n", hp->dev->name);
+ netdev_err(hp->dev,
+ "Error, cannot re-init the Happy Meal.\n");
}
goto out;
}
@@ -824,8 +769,8 @@ static void happy_meal_timer(struct timer_list *t)
case asleep:
default:
/* Can't happens.... */
- printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Aieee, link timer is asleep but we got one anyways!\n");
restart_timer = 0;
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
@@ -849,7 +794,7 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = TX_RESET_TRIES;
- HMD(("happy_meal_tx_reset: reset, "));
+ HMD("reset...\n");
/* Would you like to try our SMCC Delux? */
hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
@@ -858,10 +803,10 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Lettuce, tomato, buggy hardware (no extra charge)? */
if (!tries)
- printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
/* Take care. */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -869,7 +814,7 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = RX_RESET_TRIES;
- HMD(("happy_meal_rx_reset: reset, "));
+ HMD("reset...\n");
/* We have a special on GNU/Viking hardware bugs today. */
hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
@@ -878,10 +823,10 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Will that be all? */
if (!tries)
- printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
/* Don't forget your vik_1137125_wa. Have a nice day. */
- HMD(("done\n"));
+ HMD("done\n");
}
#define STOP_TRIES 16
@@ -891,7 +836,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
{
int tries = STOP_TRIES;
- HMD(("happy_meal_stop: reset, "));
+ HMD("reset...\n");
/* We're consolidating our STB products, it's your lucky day. */
hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
@@ -900,10 +845,10 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* Come back next week when we are "Sun Microelectronics". */
if (!tries)
- printk(KERN_ERR "happy meal: Fry guys.");
+ netdev_err(hp->dev, "Fry guys.\n");
/* Remember: "Different name, same old buggy as shit hardware." */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -932,21 +877,18 @@ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
/* hp->happy_lock must be held */
static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
{
- ASD(("happy_meal_poll_stop: "));
-
/* If polling disabled or not polling already, nothing to do. */
if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
(HFLAG_POLLENABLE | HFLAG_POLL)) {
- HMD(("not polling, return\n"));
+ ASD("not polling, return\n");
return;
}
/* Shut up the MIF. */
- ASD(("were polling, mif ints off, "));
+ ASD("were polling, mif ints off, polling off\n");
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* Turn off polling. */
- ASD(("polling off, "));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
@@ -955,7 +897,7 @@ static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
/* Let the bits set. */
udelay(200);
- ASD(("done\n"));
+ ASD("done\n");
}
/* Only Sun can take such nice parts and fuck up the programming interface
@@ -971,44 +913,40 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
int result, tries = TCVR_RESET_TRIES;
tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+ ASD("tcfg=%08x\n", tconfig);
if (hp->tcvr_type == external) {
- ASD(("external<"));
hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail\n");
return -1;
}
- ASD(("phyread_ok,PSELECT>"));
+ ASD("external: ISOLATE, phyread_ok, PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->tcvr_type = external;
hp->paddr = TCV_PADDR_ETX;
} else {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("internal<PSELECT,"));
hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail>\n");
return -1;
}
- ASD(("phyread_ok,~PSELECT>"));
+ ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
}
}
- ASD(("BMCR_RESET "));
+ ASD("BMCR_RESET...\n");
happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
while (--tries) {
@@ -1021,10 +959,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD(("BMCR RESET FAILED!\n"));
+ ASD("BMCR RESET FAILED!\n");
return -1;
}
- ASD(("RESET_OK\n"));
+ ASD("RESET_OK\n");
/* Get fresh copies of the PHY registers. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
@@ -1032,7 +970,7 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- ASD(("UNISOLATE"));
+ ASD("UNISOLATE...\n");
hp->sw_bmcr &= ~(BMCR_ISOLATE);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1046,10 +984,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD((" FAILED!\n"));
+ ASD("UNISOLATE FAILED!\n");
return -1;
}
- ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+ ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
if (!is_lucent_phy(hp)) {
result = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
@@ -1067,60 +1005,55 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tr
{
unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+ ASD("tcfg=%08lx\n", tconfig);
if (hp->happy_flags & HFLAG_POLL) {
/* If we are polling, we must stop to get the transceiver type. */
- ASD(("<polling> "));
if (hp->tcvr_type == internal) {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("<internal> <poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
tconfig &= ~(TCV_CFG_PENABLE);
tconfig |= TCV_CFG_PSELECT;
hme_write32(hp, tregs + TCVR_CFG, tconfig);
+ ASD("poll stop, internal->external\n");
}
} else {
if (hp->tcvr_type == external) {
- ASD(("<external> "));
if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
- ASD(("<poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) &
~(TCV_CFG_PSELECT));
+ ASD("poll stop, external->internal\n");
}
- ASD(("\n"));
} else {
- ASD(("<none>\n"));
+ ASD("polling, none\n");
}
}
} else {
u32 reread = hme_read32(hp, tregs + TCVR_CFG);
/* Else we can just work off of the MDIO bits. */
- ASD(("<not polling> "));
if (reread & TCV_CFG_MDIO1) {
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
+ ASD("not polling, external\n");
} else {
if (reread & TCV_CFG_MDIO0) {
hme_write32(hp, tregs + TCVR_CFG,
tconfig & ~(TCV_CFG_PSELECT));
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
+ ASD("not polling, internal\n");
} else {
- printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+ netdev_err(hp->dev,
+ "Transceiver and a coke please.");
hp->tcvr_type = none; /* Grrr... */
- ASD(("<none>\n"));
+ ASD("not polling, none\n");
}
}
}
@@ -1227,15 +1160,14 @@ static void happy_meal_init_rings(struct happy_meal *hp)
struct hmeal_init_block *hb = hp->happy_block;
int i;
- HMD(("happy_meal_init_rings: counters to zero, "));
+ HMD("counters to zero\n");
hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
/* Free any skippy bufs left around in the rings. */
- HMD(("clean, "));
happy_meal_clean_rings(hp);
/* Now get new skippy bufs for the receive ring. */
- HMD(("init rxring, "));
+ HMD("init rxring\n");
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
u32 mapping;
@@ -1262,11 +1194,11 @@ static void happy_meal_init_rings(struct happy_meal *hp)
skb_reserve(skb, RX_OFFSET);
}
- HMD(("init txring, "));
+ HMD("init txring\n");
for (i = 0; i < TX_RING_SIZE; i++)
hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -1313,17 +1245,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
* XXX so I completely skip checking for it in the BMSR for now.
*/
-#ifdef AUTO_SWITCH_DEBUG
- ASD(("%s: Advertising [ ", hp->dev->name));
- if (hp->sw_advertise & ADVERTISE_10HALF)
- ASD(("10H "));
- if (hp->sw_advertise & ADVERTISE_10FULL)
- ASD(("10F "));
- if (hp->sw_advertise & ADVERTISE_100HALF)
- ASD(("100H "));
- if (hp->sw_advertise & ADVERTISE_100FULL)
- ASD(("100F "));
-#endif
+ ASD("Advertising [ %s%s%s%s]\n",
+ hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
+ hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
+ hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
+ hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
/* Enable Auto-Negotiation, this is usually on already... */
hp->sw_bmcr |= BMCR_ANENABLE;
@@ -1343,10 +1269,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
udelay(10);
}
if (!timeout) {
- printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
- "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
- printk(KERN_NOTICE "%s: Performing force link detection.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
+ hp->sw_bmcr);
+ netdev_notice(hp->dev,
+ "Performing force link detection.\n");
goto force_link;
} else {
hp->timer_state = arbwait;
@@ -1401,70 +1328,69 @@ static int happy_meal_init(struct happy_meal *hp)
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
+ const char *bursts = "64";
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
- HMD(("happy_meal_init: happy_flags[%08x] ",
- hp->happy_flags));
+ HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
- HMD(("set HFLAG_INIT, "));
+ HMD("set HFLAG_INIT\n");
hp->happy_flags |= HFLAG_INIT;
happy_meal_get_counters(hp, bregs);
}
/* Stop polling. */
- HMD(("to happy_meal_poll_stop\n"));
+ HMD("to happy_meal_poll_stop\n");
happy_meal_poll_stop(hp, tregs);
/* Stop transmitter and receiver. */
- HMD(("happy_meal_init: to happy_meal_stop\n"));
+ HMD("to happy_meal_stop\n");
happy_meal_stop(hp, gregs);
/* Alloc and reset the tx/rx descriptor chains. */
- HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+ HMD("to happy_meal_init_rings\n");
happy_meal_init_rings(hp);
/* Shut up the MIF. */
- HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
- hme_read32(hp, tregs + TCVR_IMASK)));
+ HMD("Disable all MIF irqs (old[%08x])\n",
+ hme_read32(hp, tregs + TCVR_IMASK));
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* See if we can enable the MIF frame on this card to speak to the DP83840. */
if (hp->happy_flags & HFLAG_FENABLE) {
- HMD(("use frame old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use frame old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
} else {
- HMD(("use bitbang old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use bitbang old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
}
/* Check the state of the transceiver. */
- HMD(("to happy_meal_transceiver_check\n"));
+ HMD("to happy_meal_transceiver_check\n");
happy_meal_transceiver_check(hp, tregs);
/* Put the Big Mac into a sane state. */
- HMD(("happy_meal_init: "));
switch(hp->tcvr_type) {
case none:
/* Cannot operate if we don't know the transceiver type! */
- HMD(("AAIEEE no transceiver type, EAGAIN"));
+ HMD("AAIEEE no transceiver type, EAGAIN\n");
return -EAGAIN;
case internal:
/* Using the MII buffers. */
- HMD(("internal, using MII, "));
+ HMD("internal, using MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, 0);
break;
case external:
/* Not using the MII, disable it. */
- HMD(("external, disable MII, "));
+ HMD("external, disable MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
}
@@ -1473,18 +1399,16 @@ static int happy_meal_init(struct happy_meal *hp)
return -EAGAIN;
/* Reset the Happy Meal Big Mac transceiver and the receiver. */
- HMD(("tx/rx reset, "));
+ HMD("tx/rx reset\n");
happy_meal_tx_reset(hp, bregs);
happy_meal_rx_reset(hp, bregs);
/* Set jam size and inter-packet gaps to reasonable defaults. */
- HMD(("jsize/ipg1/ipg2, "));
hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
/* Load up the MAC address and random seed. */
- HMD(("rseed/macaddr, "));
/* The docs recommend to use the 10LSB of our MAC here. */
hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
@@ -1493,7 +1417,6 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
- HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
@@ -1523,9 +1446,9 @@ static int happy_meal_init(struct happy_meal *hp)
}
/* Set the RX and TX ring ptrs. */
- HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+ HMD("ring ptrs rxr[%08x] txr[%08x]\n",
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
hme_write32(hp, erxregs + ERX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
hme_write32(hp, etxregs + ETX_RING,
@@ -1543,9 +1466,6 @@ static int happy_meal_init(struct happy_meal *hp)
| 0x4);
/* Set the supported burst sizes. */
- HMD(("happy_meal_init: old[%08x] bursts<",
- hme_read32(hp, gregs + GREG_CFG)));
-
#ifndef CONFIG_SPARC
/* It is always PCI and can handle 64byte bursts. */
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
@@ -1573,34 +1493,35 @@ static int happy_meal_init(struct happy_meal *hp)
}
#endif
- HMD(("64>"));
+ bursts = "64";
hme_write32(hp, gregs + GREG_CFG, gcfg);
} else if (hp->happy_bursts & DMA_BURST32) {
- HMD(("32>"));
+ bursts = "32";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
} else if (hp->happy_bursts & DMA_BURST16) {
- HMD(("16>"));
+ bursts = "16";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
} else {
- HMD(("XXX>"));
+ bursts = "XXX";
hme_write32(hp, gregs + GREG_CFG, 0);
}
#endif /* CONFIG_SPARC */
+ HMD("old[%08x] bursts<%s>\n",
+ hme_read32(hp, gregs + GREG_CFG), bursts);
+
/* Turn off interrupts we do not want to hear. */
- HMD((", enable global interrupts, "));
hme_write32(hp, gregs + GREG_IMASK,
(GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
/* Set the transmit ring buffer size. */
- HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
- hme_read32(hp, etxregs + ETX_RSIZE)));
+ HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
+ hme_read32(hp, etxregs + ETX_RSIZE));
hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
/* Enable transmitter DVMA. */
- HMD(("tx dma enable old[%08x], ",
- hme_read32(hp, etxregs + ETX_CFG)));
+ HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
hme_write32(hp, etxregs + ETX_CFG,
hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
@@ -1609,21 +1530,23 @@ static int happy_meal_init(struct happy_meal *hp)
* properly. I cannot think of a sane way to provide complete
* coverage for this hardware bug yet.
*/
- HMD(("erx regs bug old[%08x]\n",
- hme_read32(hp, erxregs + ERX_CFG)));
+ HMD("erx regs bug old[%08x]\n",
+ hme_read32(hp, erxregs + ERX_CFG));
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
regtmp = hme_read32(hp, erxregs + ERX_CFG);
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
- printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
- printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
- ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+ netdev_err(hp->dev,
+ "Eieee, rx config register gets greasy fries.\n");
+ netdev_err(hp->dev,
+ "Trying to set %08x, reread gives %08x\n",
+ ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
/* XXX Should return failure here... */
}
/* Enable Big Mac hash table filter. */
- HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("enable hash rx_cfg_old[%08x]\n",
+ hme_read32(hp, bregs + BMAC_RXCFG));
rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
if (hp->dev->flags & IFF_PROMISC)
rxcfg |= BIGMAC_RXCFG_PMISC;
@@ -1633,7 +1556,7 @@ static int happy_meal_init(struct happy_meal *hp)
udelay(10);
/* Ok, configure the Big Mac transmitter. */
- HMD(("BIGMAC init, "));
+ HMD("BIGMAC init\n");
regtmp = 0;
if (hp->happy_flags & HFLAG_FULL)
regtmp |= BIGMAC_TXCFG_FULLDPLX;
@@ -1657,14 +1580,13 @@ static int happy_meal_init(struct happy_meal *hp)
if (hp->tcvr_type == external)
regtmp |= BIGMAC_XCFG_MIIDISAB;
- HMD(("XIF config old[%08x], ",
- hme_read32(hp, bregs + BMAC_XIFCFG)));
+ HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
/* Start things up. */
- HMD(("tx old[%08x] and rx [%08x] ON!\n",
- hme_read32(hp, bregs + BMAC_TXCFG),
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("tx old[%08x] and rx [%08x] ON!\n",
+ hme_read32(hp, bregs + BMAC_TXCFG),
+ hme_read32(hp, bregs + BMAC_RXCFG));
/* Set larger TX/RX size to allow for 802.1q */
hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
@@ -1754,25 +1676,26 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
GREG_STAT_SLVPERR))
- printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
- hp->dev->name, status);
+ netdev_err(hp->dev,
+ "Error interrupt for happy meal, status = %08x\n",
+ status);
if (status & GREG_STAT_RFIFOVF) {
/* Receive FIFO overflow is harmless and the hardware will take
care of it, just some packets are lost. Who cares. */
- printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+ netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
}
if (status & GREG_STAT_STSTERR) {
/* BigMAC SQE link test failed. */
- printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
reset = 1;
}
if (status & GREG_STAT_TFIFO_UND) {
/* Transmit FIFO underrun, again DMA error likely. */
- printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal transmitter FIFO underrun, DMA error.\n");
reset = 1;
}
@@ -1780,7 +1703,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver error, tried to transmit something larger
* than ethernet max mtu.
*/
- printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
reset = 1;
}
@@ -1790,21 +1713,16 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
* faster than the interrupt handler could keep up
* with.
*/
- printk(KERN_INFO "%s: Happy Meal out of receive "
- "descriptors, packet dropped.\n",
- hp->dev->name);
+ netdev_info(hp->dev,
+ "Happy Meal out of receive descriptors, packet dropped.\n");
}
if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
/* All sorts of DMA receive errors. */
- printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_RXERR)
- printk("GenericError ");
- if (status & GREG_STAT_RXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_RXTERR)
- printk("RxTagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
+ status & GREG_STAT_RXERR ? "GenericError " : "",
+ status & GREG_STAT_RXPERR ? "ParityError " : "",
+ status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
reset = 1;
}
@@ -1812,29 +1730,24 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver bug, didn't set EOP bit in tx descriptor given
* to the happy meal.
*/
- printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "EOP not set in happy meal transmit descriptor!\n");
reset = 1;
}
if (status & GREG_STAT_MIFIRQ) {
/* MIF signalled an interrupt, were we polling it? */
- printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
}
if (status &
(GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
/* All sorts of transmit DMA errors. */
- printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_TXEACK)
- printk("GenericError ");
- if (status & GREG_STAT_TXLERR)
- printk("LateError ");
- if (status & GREG_STAT_TXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_TXTERR)
- printk("TagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
+ status & GREG_STAT_TXEACK ? "GenericError " : "",
+ status & GREG_STAT_TXLERR ? "LateError " : "",
+ status & GREG_STAT_TXPERR ? "ParityError " : "",
+ status & GREG_STAT_TXTERR ? "TagBotch " : "");
reset = 1;
}
@@ -1842,14 +1755,14 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Bus or parity error when cpu accessed happy meal registers
* or it's internal FIFO's. Should never see this.
*/
- printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
- hp->dev->name,
- (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+ netdev_err(hp->dev,
+ "Happy Meal register access SBUS slave (%s) error.\n",
+ (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
reset = 1;
}
if (reset) {
- printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+ netdev_notice(hp->dev, "Resetting...\n");
happy_meal_init(hp);
return 1;
}
@@ -1861,22 +1774,22 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
- printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+ netdev_info(hp->dev, "Link status change.\n");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
/* Use the fastest transmission protocol possible. */
if (hp->sw_lpa & LPA_100FULL) {
- printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
} else if (hp->sw_lpa & LPA_100HALF) {
- printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
hp->sw_bmcr |= BMCR_SPEED100;
} else if (hp->sw_lpa & LPA_10FULL) {
- printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
hp->sw_bmcr |= BMCR_FULLDPLX;
} else {
- printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1884,12 +1797,6 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
happy_meal_poll_stop(hp, tregs);
}
-#ifdef TXDEBUG
-#define TXD(x) printk x
-#else
-#define TXD(x)
-#endif
-
/* hp->happy_lock must be held */
static void happy_meal_tx(struct happy_meal *hp)
{
@@ -1899,13 +1806,12 @@ static void happy_meal_tx(struct happy_meal *hp)
int elem;
elem = hp->tx_old;
- TXD(("TX<"));
while (elem != hp->tx_new) {
struct sk_buff *skb;
u32 flags, dma_addr, dma_len;
int frag;
- TXD(("[%d]", elem));
+ netdev_vdbg(hp->dev, "TX[%d]\n", elem);
this = &txbase[elem];
flags = hme_read_desc32(hp, &this->tx_flags);
if (flags & TXFLAG_OWN)
@@ -1941,19 +1847,12 @@ static void happy_meal_tx(struct happy_meal *hp)
dev->stats.tx_packets++;
}
hp->tx_old = elem;
- TXD((">"));
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(dev);
}
-#ifdef RXDEBUG
-#define RXD(x) printk x
-#else
-#define RXD(x)
-#endif
-
/* Originally I used to handle the allocation failure by just giving back just
* that one ring buffer to the happy meal. Problem is that usually when that
* condition is triggered, the happy meal expects you to do something reasonable
@@ -1970,7 +1869,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
int elem = hp->rx_new, drops = 0;
u32 flags;
- RXD(("RX<"));
this = &rxbase[elem];
while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
struct sk_buff *skb;
@@ -1978,11 +1876,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
u16 csum = flags & RXFLAG_CSUM;
u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
- RXD(("[%d ", elem));
-
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
- RXD(("ERR(%08x)]", flags));
+ netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
dev->stats.rx_errors++;
if (len < ETH_ZLEN)
dev->stats.rx_length_errors++;
@@ -2039,9 +1935,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
@@ -2054,7 +1950,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->csum = csum_unfold(~(__force __sum16)htons(csum));
skb->ip_summed = CHECKSUM_COMPLETE;
- RXD(("len=%d csum=%4x]", len, csum));
+ netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -2066,8 +1962,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
hp->rx_new = elem;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
- RXD((">"));
+ netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
}
static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
@@ -2076,32 +1971,25 @@ static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
spin_lock(&hp->happy_lock);
if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
goto out;
}
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
- HMD(("done\n"));
+ HMD("done\n");
out:
spin_unlock(&hp->happy_lock);
@@ -2119,7 +2007,7 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("quattro_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
if (!(happy_status & (GREG_STAT_ERRORS |
GREG_STAT_MIFIRQ |
@@ -2129,31 +2017,23 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
spin_lock(&hp->happy_lock);
- if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
+ if (happy_status & GREG_STAT_ERRORS)
if (happy_meal_is_not_so_happy(hp, happy_status))
goto next;
- }
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
next:
spin_unlock(&hp->happy_lock);
}
- HMD(("done\n"));
+ HMD("done\n");
return IRQ_HANDLED;
}
@@ -2164,8 +2044,6 @@ static int happy_meal_open(struct net_device *dev)
struct happy_meal *hp = netdev_priv(dev);
int res;
- HMD(("happy_meal_open: "));
-
/* On SBUS Quattro QFE cards, all hme interrupts are concentrated
* into a single source which we register handling at probe time.
*/
@@ -2173,15 +2051,14 @@ static int happy_meal_open(struct net_device *dev)
res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
dev->name, dev);
if (res) {
- HMD(("EAGAIN\n"));
- printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
- hp->irq);
+ HMD("EAGAIN\n");
+ netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
return -EAGAIN;
}
}
- HMD(("to happy_meal_init\n"));
+ HMD("to happy_meal_init\n");
spin_lock_irq(&hp->happy_lock);
res = happy_meal_init(hp);
@@ -2215,22 +2092,16 @@ static int happy_meal_close(struct net_device *dev)
return 0;
}
-#ifdef SXDEBUG
-#define SXD(x) printk x
-#else
-#define SXD(x)
-#endif
-
static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
- printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tx_dump_log();
- printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
- hme_read32(hp, hp->gregs + GREG_STAT),
- hme_read32(hp, hp->etxregs + ETX_CFG),
- hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+ netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
+ hme_read32(hp, hp->gregs + GREG_STAT),
+ hme_read32(hp, hp->etxregs + ETX_CFG),
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
spin_lock_irq(&hp->happy_lock);
happy_meal_init(hp);
@@ -2280,13 +2151,12 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&hp->happy_lock);
- printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
entry = hp->tx_new;
- SXD(("SX<l[%d]e[%d]>", len, entry));
+ netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
hp->tx_skbs[entry] = skb;
if (skb_shinfo(skb)->nr_frags == 0) {
@@ -2486,11 +2356,10 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strlcpy(info->driver, "sunhme", sizeof(info->driver));
- strlcpy(info->version, "2.02", sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2523,8 +2392,6 @@ static const struct ethtool_ops hme_ethtool_ops = {
.set_link_ksettings = hme_set_link_ksettings,
};
-static int hme_version_printed;
-
#ifdef CONFIG_SBUS
/* Given a happy meal sbus device, find it's quattro parent.
* If none exist, allocate and return a new one.
@@ -2542,19 +2409,15 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
if (qp)
return qp;
- qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ qp->quattro_dev = child;
+ qp->next = qfe_sbus_list;
+ qfe_sbus_list = qp;
- qp->quattro_dev = child;
- qp->next = qfe_sbus_list;
- qfe_sbus_list = qp;
-
- platform_set_drvdata(op, qp);
- }
+ platform_set_drvdata(op, qp);
return qp;
}
@@ -2582,8 +2445,9 @@ static int __init quattro_sbus_register_irqs(void)
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
- printk(KERN_ERR "Quattro HME: IRQ registration "
- "error %d.\n", err);
+ dev_err(&op->dev,
+ "Quattro HME: IRQ registration error %d.\n",
+ err);
return err;
}
}
@@ -2614,30 +2478,33 @@ static void quattro_sbus_free_irqs(void)
#ifdef CONFIG_PCI
static struct quattro *quattro_pci_find(struct pci_dev *pdev)
{
+ int i;
struct pci_dev *bdev = pdev->bus->self;
struct quattro *qp;
- if (!bdev) return NULL;
+ if (!bdev)
+ return ERR_PTR(-ENODEV);
+
for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
struct pci_dev *qpdev = qp->quattro_dev;
if (qpdev == bdev)
return qp;
}
+
qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
- qp->quattro_dev = bdev;
- qp->next = qfe_pci_list;
- qfe_pci_list = qp;
+ qp->quattro_dev = bdev;
+ qp->next = qfe_pci_list;
+ qfe_pci_list = qp;
- /* No range tricks necessary on PCI. */
- qp->nranges = 0;
- }
+ /* No range tricks necessary on PCI. */
+ qp->nranges = 0;
return qp;
}
#endif /* CONFIG_PCI */
@@ -2687,9 +2554,6 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out;
SET_NETDEV_DEV(dev, &op->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
/* If user did not specify a MAC address specifically, use
* the Quattro local-mac-address property...
*/
@@ -2731,35 +2595,35 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
hp->gregs = of_ioremap(&op->resource[0], 0,
GREG_REG_SIZE, "HME Global Regs");
if (!hp->gregs) {
- printk(KERN_ERR "happymeal: Cannot map global registers.\n");
+ dev_err(&op->dev, "Cannot map global registers.\n");
goto err_out_free_netdev;
}
hp->etxregs = of_ioremap(&op->resource[1], 0,
ETX_REG_SIZE, "HME TX Regs");
if (!hp->etxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC TX registers.\n");
goto err_out_iounmap;
}
hp->erxregs = of_ioremap(&op->resource[2], 0,
ERX_REG_SIZE, "HME RX Regs");
if (!hp->erxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC RX registers.\n");
goto err_out_iounmap;
}
hp->bigmacregs = of_ioremap(&op->resource[3], 0,
BMAC_REG_SIZE, "HME BIGMAC Regs");
if (!hp->bigmacregs) {
- printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
+ dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
goto err_out_iounmap;
}
hp->tcvregs = of_ioremap(&op->resource[4], 0,
TCVR_REG_SIZE, "HME Tranceiver Regs");
if (!hp->tcvregs) {
- printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
+ dev_err(&op->dev, "Cannot map TCVR registers.\n");
goto err_out_iounmap;
}
@@ -2826,21 +2690,19 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
err = register_netdev(hp->dev);
if (err) {
- printk(KERN_ERR "happymeal: Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting.\n");
goto err_out_free_coherent;
}
platform_set_drvdata(op, hp);
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
@@ -2968,7 +2830,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
struct happy_meal *hp;
struct net_device *dev;
void __iomem *hpreg_base;
- unsigned long hpreg_res;
+ struct resource *hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
u8 addr[ETH_ALEN];
@@ -2985,32 +2847,33 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
strcpy(prom_name, "SUNW,hme");
#endif
- err = -ENODEV;
-
- if (pci_enable_device(pdev))
+ err = pcim_enable_device(pdev);
+ if (err)
goto err_out;
pci_set_master(pdev);
if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
qp = quattro_pci_find(pdev);
- if (qp == NULL)
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err_out;
+ }
+
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
- if (qp->happy_meals[qfe_slot] == NULL)
+ if (!qp->happy_meals[qfe_slot])
break;
+
if (qfe_slot == 4)
goto err_out;
}
- dev = alloc_etherdev(sizeof(struct happy_meal));
- err = -ENOMEM;
- if (!dev)
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
+ if (!dev) {
+ err = -ENOMEM;
goto err_out;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
hp = netdev_priv(dev);
hp->happy_dev = pdev;
@@ -3024,21 +2887,26 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
qp->happy_meals[qfe_slot] = dev;
}
- hpreg_res = pci_resource_start(pdev, 0);
- err = -ENODEV;
+ err = -EINVAL;
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
- printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+ dev_err(&pdev->dev,
+ "Cannot find proper PCI device base address.\n");
goto err_out_clear_quattro;
}
- if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
- "aborting.\n");
+
+ hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), DRV_NAME);
+ if (!hpreg_res) {
+ err = -EBUSY;
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_clear_quattro;
}
- if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
- printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
- goto err_out_free_res;
+ hpreg_base = pcim_iomap(pdev, 0, 0x8000);
+ if (!hpreg_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Unable to remap card memory.\n");
+ goto err_out_clear_quattro;
}
for (i = 0; i < 6; i++) {
@@ -3104,11 +2972,12 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &hp->hblock_dvma, GFP_KERNEL);
- err = -ENODEV;
- if (!hp->happy_block)
- goto err_out_iounmap;
+ hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
+ if (!hp->happy_block) {
+ err = -ENOMEM;
+ goto err_out_clear_quattro;
+ }
hp->linkcheck = 0;
hp->timer_state = asleep;
@@ -3142,11 +3011,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- err = register_netdev(hp->dev);
+ err = devm_register_netdev(&pdev->dev, dev);
if (err) {
- printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
- "aborting.\n");
- goto err_out_iounmap;
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clear_quattro;
}
pci_set_drvdata(pdev, hp);
@@ -3159,57 +3027,30 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
int i = simple_strtoul(dev->name + 3, NULL, 10);
sprintf(prom_name, "-%d", i + 3);
}
- printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
- if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
- qpdev->device == PCI_DEVICE_ID_DEC_21153)
- printk("DEC 21153 PCI Bridge\n");
- else
- printk("unknown bridge %04x.%04x\n",
- qpdev->vendor, qpdev->device);
+ netdev_info(dev,
+ "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
+ prom_name, qpdev->vendor, qpdev->device);
}
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev,
+ "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
-err_out_iounmap:
- iounmap(hp->gregs);
-
-err_out_free_res:
- pci_release_regions(pdev);
-
err_out_clear_quattro:
if (qp != NULL)
qp->happy_meals[qfe_slot] = NULL;
- free_netdev(dev);
-
err_out:
return err;
}
-static void happy_meal_pci_remove(struct pci_dev *pdev)
-{
- struct happy_meal *hp = pci_get_drvdata(pdev);
- struct net_device *net_dev = hp->dev;
-
- unregister_netdev(net_dev);
-
- dma_free_coherent(hp->dma_dev, PAGE_SIZE,
- hp->happy_block, hp->hblock_dvma);
- iounmap(hp->gregs);
- pci_release_regions(hp->happy_dev);
-
- free_netdev(net_dev);
-}
-
static const struct pci_device_id happymeal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
@@ -3221,7 +3062,6 @@ static struct pci_driver hme_pci_driver = {
.name = "hme",
.id_table = happymeal_pci_ids,
.probe = happy_meal_pci_probe,
- .remove = happy_meal_pci_remove,
};
static int __init happy_meal_pci_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index efe0d33f6024..6418fcc3139f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -684,8 +684,8 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strlcpy(info->driver, "sunqe", sizeof(info->driver));
- strlcpy(info->version, "3.0", sizeof(info->version));
+ strscpy(info->driver, "sunqe", sizeof(info->driver));
+ strscpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index da8119625cf3..acda6cbd0238 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -60,8 +60,8 @@ static struct vio_version vnet_versions[] = {
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -467,8 +467,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
diff --git a/drivers/net/ethernet/sunplus/Kconfig b/drivers/net/ethernet/sunplus/Kconfig
new file mode 100644
index 000000000000..be50a6b723eb
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Sunplus network device configuration
+#
+
+config NET_VENDOR_SUNPLUS
+ bool "Sunplus devices"
+ default y
+ depends on ARCH_SUNPLUS || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this
+ class, say Y here.
+
+ Note that the answer to this question doesn't directly
+ affect the kernel: saying N will just cause the configurator
+ to skip all the questions about Sunplus cards. If you say Y,
+ you will be asked for your specific card in the following
+ questions.
+
+if NET_VENDOR_SUNPLUS
+
+config SP7021_EMAC
+ tristate "Sunplus Dual 10M/100M Ethernet devices"
+ depends on SOC_SP7021 || COMPILE_TEST
+ select PHYLIB
+ help
+ If you have Sunplus dual 10M/100M Ethernet devices, say Y.
+ The network device creates two net-device interfaces.
+ To compile this driver as a module, choose M here. The
+ module will be called sp7021_emac.
+
+endif # NET_VENDOR_SUNPLUS
diff --git a/drivers/net/ethernet/sunplus/Makefile b/drivers/net/ethernet/sunplus/Makefile
new file mode 100644
index 000000000000..ef7d7d0a7e71
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Sunplus network device drivers.
+#
+obj-$(CONFIG_SP7021_EMAC) += sp7021_emac.o
+sp7021_emac-objs := spl2sw_driver.o spl2sw_int.o spl2sw_desc.o spl2sw_mac.o spl2sw_mdio.o spl2sw_phy.o
diff --git a/drivers/net/ethernet/sunplus/spl2sw_define.h b/drivers/net/ethernet/sunplus/spl2sw_define.h
new file mode 100644
index 000000000000..acc6c1228ebc
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_define.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_DEFINE_H__
+#define __SPL2SW_DEFINE_H__
+
+#define MAX_NETDEV_NUM 2 /* Maximum # of net-device */
+
+/* Interrupt status */
+#define MAC_INT_DAISY_MODE_CHG BIT(31) /* Daisy Mode Change */
+#define MAC_INT_IP_CHKSUM_ERR BIT(23) /* IP Checksum Append Error */
+#define MAC_INT_WDOG_TIMER1_EXP BIT(22) /* Watchdog Timer1 Expired */
+#define MAC_INT_WDOG_TIMER0_EXP BIT(21) /* Watchdog Timer0 Expired */
+#define MAC_INT_INTRUDER_ALERT BIT(20) /* Atruder Alert */
+#define MAC_INT_PORT_ST_CHG BIT(19) /* Port Status Change */
+#define MAC_INT_BC_STORM BIT(18) /* Broad Cast Storm */
+#define MAC_INT_MUST_DROP_LAN BIT(17) /* Global Queue Exhausted */
+#define MAC_INT_GLOBAL_QUE_FULL BIT(16) /* Global Queue Full */
+#define MAC_INT_TX_SOC_PAUSE_ON BIT(15) /* Soc Port TX Pause On */
+#define MAC_INT_RX_SOC_QUE_FULL BIT(14) /* Soc Port Out Queue Full */
+#define MAC_INT_TX_LAN1_QUE_FULL BIT(9) /* Port 1 Out Queue Full */
+#define MAC_INT_TX_LAN0_QUE_FULL BIT(8) /* Port 0 Out Queue Full */
+#define MAC_INT_RX_L_DESCF BIT(7) /* Low Priority Descriptor Full */
+#define MAC_INT_RX_H_DESCF BIT(6) /* High Priority Descriptor Full */
+#define MAC_INT_RX_DONE_L BIT(5) /* RX Low Priority Done */
+#define MAC_INT_RX_DONE_H BIT(4) /* RX High Priority Done */
+#define MAC_INT_TX_DONE_L BIT(3) /* TX Low Priority Done */
+#define MAC_INT_TX_DONE_H BIT(2) /* TX High Priority Done */
+#define MAC_INT_TX_DES_ERR BIT(1) /* TX Descriptor Error */
+#define MAC_INT_RX_DES_ERR BIT(0) /* Rx Descriptor Error */
+
+#define MAC_INT_RX (MAC_INT_RX_DONE_H | MAC_INT_RX_DONE_L | \
+ MAC_INT_RX_DES_ERR)
+#define MAC_INT_TX (MAC_INT_TX_DONE_L | MAC_INT_TX_DONE_H | \
+ MAC_INT_TX_DES_ERR)
+#define MAC_INT_MASK_DEF (MAC_INT_DAISY_MODE_CHG | MAC_INT_IP_CHKSUM_ERR | \
+ MAC_INT_WDOG_TIMER1_EXP | MAC_INT_WDOG_TIMER0_EXP | \
+ MAC_INT_INTRUDER_ALERT | MAC_INT_PORT_ST_CHG | \
+ MAC_INT_BC_STORM | MAC_INT_MUST_DROP_LAN | \
+ MAC_INT_GLOBAL_QUE_FULL | MAC_INT_TX_SOC_PAUSE_ON | \
+ MAC_INT_RX_SOC_QUE_FULL | MAC_INT_TX_LAN1_QUE_FULL | \
+ MAC_INT_TX_LAN0_QUE_FULL | MAC_INT_RX_L_DESCF | \
+ MAC_INT_RX_H_DESCF)
+
+/* Address table search */
+#define MAC_ADDR_LOOKUP_IDLE BIT(2)
+#define MAC_SEARCH_NEXT_ADDR BIT(1)
+#define MAC_BEGIN_SEARCH_ADDR BIT(0)
+
+/* Address table status */
+#define MAC_HASH_LOOKUP_ADDR GENMASK(31, 22)
+#define MAC_R_PORT_MAP GENMASK(13, 12)
+#define MAC_R_CPU_PORT GENMASK(11, 10)
+#define MAC_R_VID GENMASK(9, 7)
+#define MAC_R_AGE GENMASK(6, 4)
+#define MAC_R_PROXY BIT(3)
+#define MAC_R_MC_INGRESS BIT(2)
+#define MAC_AT_TABLE_END BIT(1)
+#define MAC_AT_DATA_READY BIT(0)
+
+/* Wt mac ad0 */
+#define MAC_W_PORT_MAP GENMASK(13, 12)
+#define MAC_W_LAN_PORT_1 BIT(13)
+#define MAC_W_LAN_PORT_0 BIT(12)
+#define MAC_W_CPU_PORT GENMASK(11, 10)
+#define MAC_W_CPU_PORT_1 BIT(11)
+#define MAC_W_CPU_PORT_0 BIT(10)
+#define MAC_W_VID GENMASK(9, 7)
+#define MAC_W_AGE GENMASK(6, 4)
+#define MAC_W_PROXY BIT(3)
+#define MAC_W_MC_INGRESS BIT(2)
+#define MAC_W_MAC_DONE BIT(1)
+#define MAC_W_MAC_CMD BIT(0)
+
+/* W mac 15_0 bus */
+#define MAC_W_MAC_15_0 GENMASK(15, 0)
+
+/* W mac 47_16 bus */
+#define MAC_W_MAC_47_16 GENMASK(31, 0)
+
+/* PVID config 0 */
+#define MAC_P1_PVID GENMASK(6, 4)
+#define MAC_P0_PVID GENMASK(2, 0)
+
+/* VLAN member config 0 */
+#define MAC_VLAN_MEMSET_3 GENMASK(27, 24)
+#define MAC_VLAN_MEMSET_2 GENMASK(19, 16)
+#define MAC_VLAN_MEMSET_1 GENMASK(11, 8)
+#define MAC_VLAN_MEMSET_0 GENMASK(3, 0)
+
+/* VLAN member config 1 */
+#define MAC_VLAN_MEMSET_5 GENMASK(11, 8)
+#define MAC_VLAN_MEMSET_4 GENMASK(3, 0)
+
+/* Port ability */
+#define MAC_PORT_ABILITY_LINK_ST GENMASK(25, 24)
+
+/* CPU control */
+#define MAC_EN_SOC1_AGING BIT(15)
+#define MAC_EN_SOC0_AGING BIT(14)
+#define MAC_DIS_LRN_SOC1 BIT(13)
+#define MAC_DIS_LRN_SOC0 BIT(12)
+#define MAC_EN_CRC_SOC1 BIT(9)
+#define MAC_EN_CRC_SOC0 BIT(8)
+#define MAC_DIS_SOC1_CPU BIT(7)
+#define MAC_DIS_SOC0_CPU BIT(6)
+#define MAC_DIS_BC2CPU_P1 BIT(5)
+#define MAC_DIS_BC2CPU_P0 BIT(4)
+#define MAC_DIS_MC2CPU GENMASK(3, 2)
+#define MAC_DIS_MC2CPU_P1 BIT(3)
+#define MAC_DIS_MC2CPU_P0 BIT(2)
+#define MAC_DIS_UN2CPU GENMASK(1, 0)
+
+/* Port control 0 */
+#define MAC_DIS_PORT GENMASK(25, 24)
+#define MAC_DIS_PORT1 BIT(25)
+#define MAC_DIS_PORT0 BIT(24)
+#define MAC_DIS_RMC2CPU_P1 BIT(17)
+#define MAC_DIS_RMC2CPU_P0 BIT(16)
+#define MAC_EN_FLOW_CTL_P1 BIT(9)
+#define MAC_EN_FLOW_CTL_P0 BIT(8)
+#define MAC_EN_BACK_PRESS_P1 BIT(1)
+#define MAC_EN_BACK_PRESS_P0 BIT(0)
+
+/* Port control 1 */
+#define MAC_DIS_SA_LRN_P1 BIT(9)
+#define MAC_DIS_SA_LRN_P0 BIT(8)
+
+/* Port control 2 */
+#define MAC_EN_AGING_P1 BIT(9)
+#define MAC_EN_AGING_P0 BIT(8)
+
+/* Switch Global control */
+#define MAC_RMC_TB_FAULT_RULE GENMASK(26, 25)
+#define MAC_LED_FLASH_TIME GENMASK(24, 23)
+#define MAC_BC_STORM_PREV GENMASK(5, 4)
+
+/* LED port 0 */
+#define MAC_LED_ACT_HI BIT(28)
+
+/* PHY control register 0 */
+#define MAC_CPU_PHY_WT_DATA GENMASK(31, 16)
+#define MAC_CPU_PHY_CMD GENMASK(14, 13)
+#define MAC_CPU_PHY_REG_ADDR GENMASK(12, 8)
+#define MAC_CPU_PHY_ADDR GENMASK(4, 0)
+
+/* PHY control register 1 */
+#define MAC_CPU_PHY_RD_DATA GENMASK(31, 16)
+#define MAC_PHY_RD_RDY BIT(1)
+#define MAC_PHY_WT_DONE BIT(0)
+
+/* MAC force mode */
+#define MAC_EXT_PHY1_ADDR GENMASK(28, 24)
+#define MAC_EXT_PHY0_ADDR GENMASK(20, 16)
+#define MAC_FORCE_RMII_LINK GENMASK(9, 8)
+#define MAC_FORCE_RMII_EN_1 BIT(7)
+#define MAC_FORCE_RMII_EN_0 BIT(6)
+#define MAC_FORCE_RMII_FC GENMASK(5, 4)
+#define MAC_FORCE_RMII_DPX GENMASK(3, 2)
+#define MAC_FORCE_RMII_SPD GENMASK(1, 0)
+
+/* CPU transmit trigger */
+#define MAC_TRIG_L_SOC0 BIT(1)
+#define MAC_TRIG_H_SOC0 BIT(0)
+
+/* Config descriptor queue */
+#define TX_DESC_NUM 16 /* # of descriptors in TX queue */
+#define MAC_GUARD_DESC_NUM 2 /* # of descriptors of gap 0 */
+#define RX_QUEUE0_DESC_NUM 16 /* # of descriptors in RX queue 0 */
+#define RX_QUEUE1_DESC_NUM 16 /* # of descriptors in RX queue 1 */
+#define TX_DESC_QUEUE_NUM 1 /* # of TX queue */
+#define RX_DESC_QUEUE_NUM 2 /* # of RX queue */
+
+#define MAC_RX_LEN_MAX 2047 /* Size of RX buffer */
+
+/* Tx descriptor */
+/* cmd1 */
+#define TXD_OWN BIT(31)
+#define TXD_ERR_CODE GENMASK(29, 26)
+#define TXD_SOP BIT(25) /* start of a packet */
+#define TXD_EOP BIT(24) /* end of a packet */
+#define TXD_VLAN GENMASK(17, 12)
+#define TXD_PKT_LEN GENMASK(10, 0) /* packet length */
+/* cmd2 */
+#define TXD_EOR BIT(31) /* end of ring */
+#define TXD_BUF_LEN2 GENMASK(22, 12)
+#define TXD_BUF_LEN1 GENMASK(10, 0)
+
+/* Rx descriptor */
+/* cmd1 */
+#define RXD_OWN BIT(31)
+#define RXD_ERR_CODE GENMASK(29, 26)
+#define RXD_TCP_UDP_CHKSUM BIT(23)
+#define RXD_PROXY BIT(22)
+#define RXD_PROTOCOL GENMASK(21, 20)
+#define RXD_VLAN_TAG BIT(19)
+#define RXD_IP_CHKSUM BIT(18)
+#define RXD_ROUTE_TYPE GENMASK(17, 16)
+#define RXD_PKT_SP GENMASK(14, 12) /* packet source port */
+#define RXD_PKT_LEN GENMASK(10, 0) /* packet length */
+/* cmd2 */
+#define RXD_EOR BIT(31) /* end of ring */
+#define RXD_BUF_LEN2 GENMASK(22, 12)
+#define RXD_BUF_LEN1 GENMASK(10, 0)
+
+/* structure of descriptor */
+struct spl2sw_mac_desc {
+ u32 cmd1;
+ u32 cmd2;
+ u32 addr1;
+ u32 addr2;
+};
+
+struct spl2sw_skb_info {
+ struct sk_buff *skb;
+ u32 mapping;
+ u32 len;
+};
+
+struct spl2sw_common {
+ void __iomem *l2sw_reg_base;
+
+ struct platform_device *pdev;
+ struct reset_control *rstc;
+ struct clk *clk;
+
+ void *desc_base;
+ dma_addr_t desc_dma;
+ s32 desc_size;
+ struct spl2sw_mac_desc *rx_desc[RX_DESC_QUEUE_NUM];
+ struct spl2sw_skb_info *rx_skb_info[RX_DESC_QUEUE_NUM];
+ u32 rx_pos[RX_DESC_QUEUE_NUM];
+ u32 rx_desc_num[RX_DESC_QUEUE_NUM];
+ u32 rx_desc_buff_size;
+
+ struct spl2sw_mac_desc *tx_desc;
+ struct spl2sw_skb_info tx_temp_skb_info[TX_DESC_NUM];
+ u32 tx_done_pos;
+ u32 tx_pos;
+ u32 tx_desc_full;
+
+ struct net_device *ndev[MAX_NETDEV_NUM];
+ struct mii_bus *mii_bus;
+
+ struct napi_struct rx_napi;
+ struct napi_struct tx_napi;
+
+ spinlock_t tx_lock; /* spinlock for accessing tx buffer */
+ spinlock_t mdio_lock; /* spinlock for mdio commands */
+ spinlock_t int_mask_lock; /* spinlock for accessing int mask reg. */
+
+ u8 enable;
+};
+
+struct spl2sw_mac {
+ struct net_device *ndev;
+ struct spl2sw_common *comm;
+
+ u8 mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
+ struct device_node *phy_node;
+
+ u8 lan_port;
+ u8 to_vlan;
+ u8 vlan_id;
+};
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.c b/drivers/net/ethernet/sunplus/spl2sw_desc.c
new file mode 100644
index 000000000000..3f0d9f78b37d
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_desc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+
+#include "spl2sw_define.h"
+#include "spl2sw_desc.h"
+
+void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
+{
+ struct spl2sw_skb_info *rx_skbinfo;
+ struct spl2sw_mac_desc *rx_desc;
+ u32 i, j;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
+ rx_desc = comm->rx_desc[i];
+ rx_skbinfo = comm->rx_skb_info[i];
+ for (j = 0; j < comm->rx_desc_num[i]; j++) {
+ rx_desc[j].addr1 = rx_skbinfo[j].mapping;
+ rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
+ RXD_EOR | comm->rx_desc_buff_size :
+ comm->rx_desc_buff_size;
+ wmb(); /* Set RXD_OWN after other fields are ready. */
+ rx_desc[j].cmd1 = RXD_OWN;
+ }
+ }
+}
+
+void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
+{
+ u32 i;
+
+ if (!comm->tx_desc)
+ return;
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ comm->tx_desc[i].cmd1 = 0;
+ wmb(); /* Clear TXD_OWN and then set other fields. */
+ comm->tx_desc[i].cmd2 = 0;
+ comm->tx_desc[i].addr1 = 0;
+ comm->tx_desc[i].addr2 = 0;
+
+ if (comm->tx_temp_skb_info[i].mapping) {
+ dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
+ comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
+ comm->tx_temp_skb_info[i].mapping = 0;
+ }
+
+ if (comm->tx_temp_skb_info[i].skb) {
+ dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
+ comm->tx_temp_skb_info[i].skb = NULL;
+ }
+ }
+}
+
+void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
+{
+ struct spl2sw_skb_info *rx_skbinfo;
+ struct spl2sw_mac_desc *rx_desc;
+ u32 i, j;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
+ if (!comm->rx_skb_info[i])
+ continue;
+
+ rx_desc = comm->rx_desc[i];
+ rx_skbinfo = comm->rx_skb_info[i];
+ for (j = 0; j < comm->rx_desc_num[i]; j++) {
+ rx_desc[j].cmd1 = 0;
+ wmb(); /* Clear RXD_OWN and then set other fields. */
+ rx_desc[j].cmd2 = 0;
+ rx_desc[j].addr1 = 0;
+
+ if (rx_skbinfo[j].skb) {
+ dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
+ comm->rx_desc_buff_size, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_skbinfo[j].skb);
+ rx_skbinfo[j].skb = NULL;
+ rx_skbinfo[j].mapping = 0;
+ }
+ }
+
+ kfree(rx_skbinfo);
+ comm->rx_skb_info[i] = NULL;
+ }
+}
+
+void spl2sw_descs_clean(struct spl2sw_common *comm)
+{
+ spl2sw_rx_descs_clean(comm);
+ spl2sw_tx_descs_clean(comm);
+}
+
+void spl2sw_descs_free(struct spl2sw_common *comm)
+{
+ u32 i;
+
+ spl2sw_descs_clean(comm);
+ comm->tx_desc = NULL;
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
+ comm->rx_desc[i] = NULL;
+
+ /* Free descriptor area */
+ if (comm->desc_base) {
+ dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
+ comm->desc_dma);
+ comm->desc_base = NULL;
+ comm->desc_dma = 0;
+ comm->desc_size = 0;
+ }
+}
+
+void spl2sw_tx_descs_init(struct spl2sw_common *comm)
+{
+ memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
+ (TX_DESC_NUM + MAC_GUARD_DESC_NUM));
+}
+
+int spl2sw_rx_descs_init(struct spl2sw_common *comm)
+{
+ struct spl2sw_skb_info *rx_skbinfo;
+ struct spl2sw_mac_desc *rx_desc;
+ struct sk_buff *skb;
+ u32 mapping;
+ u32 i, j;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
+ comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
+ GFP_KERNEL | GFP_DMA);
+ if (!comm->rx_skb_info[i])
+ goto mem_alloc_fail;
+
+ rx_skbinfo = comm->rx_skb_info[i];
+ rx_desc = comm->rx_desc[i];
+ for (j = 0; j < comm->rx_desc_num[i]; j++) {
+ skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
+ if (!skb)
+ goto mem_alloc_fail;
+
+ rx_skbinfo[j].skb = skb;
+ mapping = dma_map_single(&comm->pdev->dev, skb->data,
+ comm->rx_desc_buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&comm->pdev->dev, mapping))
+ goto mem_alloc_fail;
+
+ rx_skbinfo[j].mapping = mapping;
+ rx_desc[j].addr1 = mapping;
+ rx_desc[j].addr2 = 0;
+ rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
+ RXD_EOR | comm->rx_desc_buff_size :
+ comm->rx_desc_buff_size;
+ wmb(); /* Set RXD_OWN after other fields are effective. */
+ rx_desc[j].cmd1 = RXD_OWN;
+ }
+ }
+
+ return 0;
+
+mem_alloc_fail:
+ spl2sw_rx_descs_clean(comm);
+ return -ENOMEM;
+}
+
+int spl2sw_descs_alloc(struct spl2sw_common *comm)
+{
+ s32 desc_size;
+ u32 i;
+
+ /* Alloc descriptor area */
+ desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
+ desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
+
+ comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
+ GFP_KERNEL);
+ if (!comm->desc_base)
+ return -ENOMEM;
+
+ comm->desc_size = desc_size;
+
+ /* Setup Tx descriptor */
+ comm->tx_desc = comm->desc_base;
+
+ /* Setup Rx descriptor */
+ comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
+ for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
+ comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
+
+ return 0;
+}
+
+int spl2sw_descs_init(struct spl2sw_common *comm)
+{
+ u32 i, ret;
+
+ /* Initialize rx descriptor's data */
+ comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
+ comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
+ comm->rx_desc[i] = NULL;
+ comm->rx_skb_info[i] = NULL;
+ comm->rx_pos[i] = 0;
+ }
+ comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
+
+ /* Initialize tx descriptor's data */
+ comm->tx_done_pos = 0;
+ comm->tx_desc = NULL;
+ comm->tx_pos = 0;
+ comm->tx_desc_full = 0;
+ for (i = 0; i < TX_DESC_NUM; i++)
+ comm->tx_temp_skb_info[i].skb = NULL;
+
+ /* Allocate tx & rx descriptors. */
+ ret = spl2sw_descs_alloc(comm);
+ if (ret)
+ return ret;
+
+ spl2sw_tx_descs_init(comm);
+
+ return spl2sw_rx_descs_init(comm);
+}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.h b/drivers/net/ethernet/sunplus/spl2sw_desc.h
new file mode 100644
index 000000000000..f04e2d85c3b3
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_desc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_DESC_H__
+#define __SPL2SW_DESC_H__
+
+void spl2sw_rx_descs_flush(struct spl2sw_common *comm);
+void spl2sw_tx_descs_clean(struct spl2sw_common *comm);
+void spl2sw_rx_descs_clean(struct spl2sw_common *comm);
+void spl2sw_descs_clean(struct spl2sw_common *comm);
+void spl2sw_descs_free(struct spl2sw_common *comm);
+void spl2sw_tx_descs_init(struct spl2sw_common *comm);
+int spl2sw_rx_descs_init(struct spl2sw_common *comm);
+int spl2sw_descs_alloc(struct spl2sw_common *comm);
+int spl2sw_descs_init(struct spl2sw_common *comm);
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
new file mode 100644
index 000000000000..c499a14314f1
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/of_net.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+
+#include "spl2sw_register.h"
+#include "spl2sw_define.h"
+#include "spl2sw_desc.h"
+#include "spl2sw_mdio.h"
+#include "spl2sw_phy.h"
+#include "spl2sw_int.h"
+#include "spl2sw_mac.h"
+
+/* net device operations */
+static int spl2sw_ethernet_open(struct net_device *ndev)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ struct spl2sw_common *comm = mac->comm;
+ u32 mask;
+
+ netdev_dbg(ndev, "Open port = %x\n", mac->lan_port);
+
+ comm->enable |= mac->lan_port;
+
+ spl2sw_mac_hw_start(comm);
+
+ /* Enable TX and RX interrupts */
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask &= ~(MAC_INT_TX | MAC_INT_RX);
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int spl2sw_ethernet_stop(struct net_device *ndev)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ struct spl2sw_common *comm = mac->comm;
+
+ netif_stop_queue(ndev);
+
+ comm->enable &= ~mac->lan_port;
+
+ phy_stop(ndev->phydev);
+
+ spl2sw_mac_hw_stop(comm);
+
+ return 0;
+}
+
+static netdev_tx_t spl2sw_ethernet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ struct spl2sw_common *comm = mac->comm;
+ struct spl2sw_skb_info *skbinfo;
+ struct spl2sw_mac_desc *txdesc;
+ unsigned long flags;
+ u32 mapping;
+ u32 tx_pos;
+ u32 cmd1;
+ u32 cmd2;
+
+ if (unlikely(comm->tx_desc_full == 1)) {
+ /* No TX descriptors left. Wait for tx interrupt. */
+ netdev_dbg(ndev, "TX descriptor queue full when xmit!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* If skb size is shorter than ETH_ZLEN (60), pad it with 0. */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
+ mapping = dma_map_single(&comm->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&comm->pdev->dev, mapping)) {
+ ndev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&comm->tx_lock, flags);
+
+ tx_pos = comm->tx_pos;
+ txdesc = &comm->tx_desc[tx_pos];
+ skbinfo = &comm->tx_temp_skb_info[tx_pos];
+ skbinfo->mapping = mapping;
+ skbinfo->len = skb->len;
+ skbinfo->skb = skb;
+
+ /* Set up a TX descriptor */
+ cmd1 = TXD_OWN | TXD_SOP | TXD_EOP | (mac->to_vlan << 12) |
+ (skb->len & TXD_PKT_LEN);
+ cmd2 = skb->len & TXD_BUF_LEN1;
+
+ if (tx_pos == (TX_DESC_NUM - 1))
+ cmd2 |= TXD_EOR;
+
+ txdesc->addr1 = skbinfo->mapping;
+ txdesc->cmd2 = cmd2;
+ wmb(); /* Set TXD_OWN after other fields are effective. */
+ txdesc->cmd1 = cmd1;
+
+ /* Move tx_pos to next position */
+ tx_pos = ((tx_pos + 1) == TX_DESC_NUM) ? 0 : tx_pos + 1;
+
+ if (unlikely(tx_pos == comm->tx_done_pos)) {
+ netif_stop_queue(ndev);
+ comm->tx_desc_full = 1;
+ }
+ comm->tx_pos = tx_pos;
+ wmb(); /* make sure settings are effective. */
+
+ /* Trigger mac to transmit */
+ writel(MAC_TRIG_L_SOC0, comm->l2sw_reg_base + L2SW_CPU_TX_TRIG);
+
+ spin_unlock_irqrestore(&comm->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void spl2sw_ethernet_set_rx_mode(struct net_device *ndev)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+
+ spl2sw_mac_rx_mode_set(mac);
+}
+
+static int spl2sw_ethernet_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ int err;
+
+ err = eth_mac_addr(ndev, addr);
+ if (err)
+ return err;
+
+ /* Delete the old MAC address */
+ netdev_dbg(ndev, "Old Ethernet (MAC) address = %pM\n", mac->mac_addr);
+ if (is_valid_ether_addr(mac->mac_addr)) {
+ err = spl2sw_mac_addr_del(mac);
+ if (err)
+ return err;
+ }
+
+ /* Set the MAC address */
+ ether_addr_copy(mac->mac_addr, ndev->dev_addr);
+ return spl2sw_mac_addr_add(mac);
+}
+
+static void spl2sw_ethernet_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ struct spl2sw_common *comm = mac->comm;
+ unsigned long flags;
+ int i;
+
+ netdev_err(ndev, "TX timed out!\n");
+ ndev->stats.tx_errors++;
+
+ spin_lock_irqsave(&comm->tx_lock, flags);
+
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i])
+ netif_stop_queue(comm->ndev[i]);
+
+ spl2sw_mac_soft_reset(comm);
+
+ /* Accept TX packets again. */
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i]) {
+ netif_trans_update(comm->ndev[i]);
+ netif_wake_queue(comm->ndev[i]);
+ }
+
+ spin_unlock_irqrestore(&comm->tx_lock, flags);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = spl2sw_ethernet_open,
+ .ndo_stop = spl2sw_ethernet_stop,
+ .ndo_start_xmit = spl2sw_ethernet_start_xmit,
+ .ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
+ .ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
+ .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
+};
+
+static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr)
+{
+ /* Byte order of MAC address of some samples are reversed.
+ * Check vendor id and convert byte order if it is wrong.
+ * OUI of Sunplus: fc:4b:bc
+ */
+ if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc &&
+ (mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) {
+
+ swap(mac_addr[0], mac_addr[5]);
+ swap(mac_addr[1], mac_addr[4]);
+ swap(mac_addr[2], mac_addr[3]);
+ }
+}
+
+static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *np,
+ void *addrbuf)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ u8 *mac;
+
+ /* Get nvmem cell of mac-address from dts. */
+ cell = of_nvmem_cell_get(np, "mac-address");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ /* Read mac address from nvmem cell. */
+ mac = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(mac))
+ return PTR_ERR(mac);
+
+ if (len != ETH_ALEN) {
+ kfree(mac);
+ dev_info(dev, "Invalid length of mac address in nvmem!\n");
+ return -EINVAL;
+ }
+
+ /* Byte order of some samples are reversed.
+ * Convert byte order here.
+ */
+ spl2sw_check_mac_vendor_id_and_convert(mac);
+
+ /* Check if mac address is valid */
+ if (!is_valid_ether_addr(mac)) {
+ dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
+ kfree(mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(addrbuf, mac);
+ kfree(mac);
+ return 0;
+}
+
+static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr,
+ struct net_device **r_ndev)
+{
+ struct net_device *ndev;
+ struct spl2sw_mac *mac;
+ int ret;
+
+ /* Allocate the devices, and also allocate spl2sw_mac,
+ * we can get it by netdev_priv().
+ */
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*mac));
+ if (!ndev) {
+ *r_ndev = NULL;
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &netdev_ops;
+ mac = netdev_priv(ndev);
+ mac->ndev = ndev;
+ ether_addr_copy(mac->mac_addr, mac_addr);
+
+ eth_hw_addr_set(ndev, mac_addr);
+ dev_info(&pdev->dev, "Ethernet (MAC) address = %pM\n", mac_addr);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n",
+ ndev->name);
+ *r_ndev = NULL;
+ return ret;
+ }
+ netdev_dbg(ndev, "Registered net device \"%s\" successfully.\n", ndev->name);
+
+ *r_ndev = ndev;
+ return 0;
+}
+
+static struct device_node *spl2sw_get_eth_child_node(struct device_node *ether_np, int id)
+{
+ struct device_node *port_np;
+ int port_id;
+
+ for_each_child_of_node(ether_np, port_np) {
+ /* It is not a 'port' node, continue. */
+ if (strcmp(port_np->name, "port"))
+ continue;
+
+ if (of_property_read_u32(port_np, "reg", &port_id) < 0)
+ continue;
+
+ if (port_id == id)
+ return port_np;
+ }
+
+ /* Not found! */
+ return NULL;
+}
+
+static int spl2sw_probe(struct platform_device *pdev)
+{
+ struct device_node *eth_ports_np;
+ struct device_node *port_np;
+ struct spl2sw_common *comm;
+ struct device_node *phy_np;
+ phy_interface_t phy_mode;
+ struct net_device *ndev;
+ struct spl2sw_mac *mac;
+ u8 mac_addr[ETH_ALEN];
+ int irq, i, ret;
+
+ if (platform_get_drvdata(pdev))
+ return -ENODEV;
+
+ /* Allocate memory for 'spl2sw_common' area. */
+ comm = devm_kzalloc(&pdev->dev, sizeof(*comm), GFP_KERNEL);
+ if (!comm)
+ return -ENOMEM;
+
+ comm->pdev = pdev;
+ platform_set_drvdata(pdev, comm);
+
+ spin_lock_init(&comm->tx_lock);
+ spin_lock_init(&comm->mdio_lock);
+ spin_lock_init(&comm->int_mask_lock);
+
+ /* Get memory resource 0 from dts. */
+ comm->l2sw_reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(comm->l2sw_reg_base))
+ return PTR_ERR(comm->l2sw_reg_base);
+
+ /* Get irq resource from dts. */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ irq = ret;
+
+ /* Get clock controller. */
+ comm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(comm->clk)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(comm->clk),
+ "Failed to retrieve clock controller!\n");
+ return PTR_ERR(comm->clk);
+ }
+
+ /* Get reset controller. */
+ comm->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(comm->rstc)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(comm->rstc),
+ "Failed to retrieve reset controller!\n");
+ return PTR_ERR(comm->rstc);
+ }
+
+ /* Enable clock. */
+ ret = clk_prepare_enable(comm->clk);
+ if (ret)
+ return ret;
+ udelay(1);
+
+ /* Reset MAC */
+ reset_control_assert(comm->rstc);
+ udelay(1);
+ reset_control_deassert(comm->rstc);
+ usleep_range(1000, 2000);
+
+ /* Request irq. */
+ ret = devm_request_irq(&pdev->dev, irq, spl2sw_ethernet_interrupt, 0,
+ dev_name(&pdev->dev), comm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq #%d!\n", irq);
+ goto out_clk_disable;
+ }
+
+ /* Initialize TX and RX descriptors. */
+ ret = spl2sw_descs_init(comm);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to initialize mac descriptors!\n");
+ spl2sw_descs_free(comm);
+ goto out_clk_disable;
+ }
+
+ /* Initialize MAC. */
+ spl2sw_mac_init(comm);
+
+ /* Initialize mdio bus */
+ ret = spl2sw_mdio_init(comm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize mdio bus!\n");
+ goto out_clk_disable;
+ }
+
+ /* Get child node ethernet-ports. */
+ eth_ports_np = of_get_child_by_name(pdev->dev.of_node, "ethernet-ports");
+ if (!eth_ports_np) {
+ dev_err(&pdev->dev, "No ethernet-ports child node found!\n");
+ ret = -ENODEV;
+ goto out_free_mdio;
+ }
+
+ for (i = 0; i < MAX_NETDEV_NUM; i++) {
+ /* Get port@i of node ethernet-ports. */
+ port_np = spl2sw_get_eth_child_node(eth_ports_np, i);
+ if (!port_np)
+ continue;
+
+ /* Get phy-mode. */
+ if (of_get_phy_mode(port_np, &phy_mode)) {
+ dev_err(&pdev->dev, "Failed to get phy-mode property of port@%d!\n",
+ i);
+ continue;
+ }
+
+ /* Get phy-handle. */
+ phy_np = of_parse_phandle(port_np, "phy-handle", 0);
+ if (!phy_np) {
+ dev_err(&pdev->dev, "Failed to get phy-handle property of port@%d!\n",
+ i);
+ continue;
+ }
+
+ /* Get mac-address from nvmem. */
+ ret = spl2sw_nvmem_get_mac_address(&pdev->dev, port_np, mac_addr);
+ if (ret == -EPROBE_DEFER) {
+ goto out_unregister_dev;
+ } else if (ret) {
+ dev_info(&pdev->dev, "Generate a random mac address!\n");
+ eth_random_addr(mac_addr);
+ }
+
+ /* Initialize the net device. */
+ ret = spl2sw_init_netdev(pdev, mac_addr, &ndev);
+ if (ret)
+ goto out_unregister_dev;
+
+ ndev->irq = irq;
+ comm->ndev[i] = ndev;
+ mac = netdev_priv(ndev);
+ mac->phy_node = phy_np;
+ mac->phy_mode = phy_mode;
+ mac->comm = comm;
+
+ mac->lan_port = 0x1 << i; /* forward to port i */
+ mac->to_vlan = 0x1 << i; /* vlan group: i */
+ mac->vlan_id = i; /* vlan group: i */
+
+ /* Set MAC address */
+ ret = spl2sw_mac_addr_add(mac);
+ if (ret)
+ goto out_unregister_dev;
+
+ spl2sw_mac_rx_mode_set(mac);
+ }
+
+ /* Find first valid net device. */
+ for (i = 0; i < MAX_NETDEV_NUM; i++) {
+ if (comm->ndev[i])
+ break;
+ }
+ if (i >= MAX_NETDEV_NUM) {
+ dev_err(&pdev->dev, "No valid ethernet port!\n");
+ ret = -ENODEV;
+ goto out_free_mdio;
+ }
+
+ /* Save first valid net device */
+ ndev = comm->ndev[i];
+
+ ret = spl2sw_phy_connect(comm);
+ if (ret) {
+ netdev_err(ndev, "Failed to connect phy!\n");
+ goto out_unregister_dev;
+ }
+
+ /* Add and enable napi. */
+ netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll);
+ napi_enable(&comm->rx_napi);
+ netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
+ napi_enable(&comm->tx_napi);
+ return 0;
+
+out_unregister_dev:
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i])
+ unregister_netdev(comm->ndev[i]);
+
+out_free_mdio:
+ spl2sw_mdio_remove(comm);
+
+out_clk_disable:
+ clk_disable_unprepare(comm->clk);
+ return ret;
+}
+
+static int spl2sw_remove(struct platform_device *pdev)
+{
+ struct spl2sw_common *comm;
+ int i;
+
+ comm = platform_get_drvdata(pdev);
+
+ spl2sw_phy_remove(comm);
+
+ /* Unregister and free net device. */
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i])
+ unregister_netdev(comm->ndev[i]);
+
+ comm->enable = 0;
+ spl2sw_mac_hw_stop(comm);
+ spl2sw_descs_free(comm);
+
+ /* Disable and delete napi. */
+ napi_disable(&comm->rx_napi);
+ netif_napi_del(&comm->rx_napi);
+ napi_disable(&comm->tx_napi);
+ netif_napi_del(&comm->tx_napi);
+
+ spl2sw_mdio_remove(comm);
+
+ clk_disable_unprepare(comm->clk);
+
+ return 0;
+}
+
+static const struct of_device_id spl2sw_of_match[] = {
+ {.compatible = "sunplus,sp7021-emac"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, spl2sw_of_match);
+
+static struct platform_driver spl2sw_driver = {
+ .probe = spl2sw_probe,
+ .remove = spl2sw_remove,
+ .driver = {
+ .name = "sp7021_emac",
+ .of_match_table = spl2sw_of_match,
+ },
+};
+
+module_platform_driver(spl2sw_driver);
+
+MODULE_AUTHOR("Wells Lu <wellslutw@gmail.com>");
+MODULE_DESCRIPTION("Sunplus Dual 10M/100M Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c
new file mode 100644
index 000000000000..a37c9a4c281f
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_int.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/of_mdio.h>
+
+#include "spl2sw_register.h"
+#include "spl2sw_define.h"
+#include "spl2sw_int.h"
+
+int spl2sw_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
+ struct spl2sw_mac_desc *desc, *h_desc;
+ struct net_device_stats *stats;
+ struct sk_buff *skb, *new_skb;
+ struct spl2sw_skb_info *sinfo;
+ int budget_left = budget;
+ unsigned long flags;
+ u32 rx_pos, pkg_len;
+ u32 num, rx_count;
+ s32 queue;
+ u32 mask;
+ int port;
+ u32 cmd;
+ u32 len;
+
+ /* Process high-priority queue and then low-priority queue. */
+ for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
+ rx_pos = comm->rx_pos[queue];
+ rx_count = comm->rx_desc_num[queue];
+
+ for (num = 0; num < rx_count && budget_left; num++) {
+ sinfo = comm->rx_skb_info[queue] + rx_pos;
+ desc = comm->rx_desc[queue] + rx_pos;
+ cmd = desc->cmd1;
+
+ if (cmd & RXD_OWN)
+ break;
+
+ port = FIELD_GET(RXD_PKT_SP, cmd);
+ if (port < MAX_NETDEV_NUM && comm->ndev[port])
+ stats = &comm->ndev[port]->stats;
+ else
+ goto spl2sw_rx_poll_rec_err;
+
+ pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
+ if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
+ stats->rx_length_errors++;
+ stats->rx_dropped++;
+ goto spl2sw_rx_poll_rec_err;
+ }
+
+ dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
+ comm->rx_desc_buff_size, DMA_FROM_DEVICE);
+
+ skb = sinfo->skb;
+ skb_put(skb, pkg_len - 4); /* Minus FCS */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, comm->ndev[port]);
+ len = skb->len;
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ /* Allocate a new skb for receiving. */
+ new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
+ if (unlikely(!new_skb)) {
+ desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
+ RXD_EOR : 0;
+ sinfo->skb = NULL;
+ sinfo->mapping = 0;
+ desc->addr1 = 0;
+ goto spl2sw_rx_poll_alloc_err;
+ }
+
+ sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
+ comm->rx_desc_buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
+ dev_kfree_skb_irq(new_skb);
+ desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
+ RXD_EOR : 0;
+ sinfo->skb = NULL;
+ sinfo->mapping = 0;
+ desc->addr1 = 0;
+ goto spl2sw_rx_poll_alloc_err;
+ }
+
+ sinfo->skb = new_skb;
+ desc->addr1 = sinfo->mapping;
+
+spl2sw_rx_poll_rec_err:
+ desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
+ RXD_EOR | comm->rx_desc_buff_size :
+ comm->rx_desc_buff_size;
+
+ wmb(); /* Set RXD_OWN after other fields are effective. */
+ desc->cmd1 = RXD_OWN;
+
+spl2sw_rx_poll_alloc_err:
+ /* Move rx_pos to next position */
+ rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
+
+ budget_left--;
+
+ /* If there are packets in high-priority queue,
+ * stop processing low-priority queue.
+ */
+ if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
+ break;
+ }
+
+ comm->rx_pos[queue] = rx_pos;
+
+ /* Save pointer to last rx descriptor of high-priority queue. */
+ if (queue == 0)
+ h_desc = comm->rx_desc[queue] + rx_pos;
+ }
+
+ spin_lock_irqsave(&comm->int_mask_lock, flags);
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask &= ~MAC_INT_RX;
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ spin_unlock_irqrestore(&comm->int_mask_lock, flags);
+
+ napi_complete(napi);
+ return budget - budget_left;
+}
+
+int spl2sw_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
+ struct spl2sw_skb_info *skbinfo;
+ struct net_device_stats *stats;
+ int budget_left = budget;
+ unsigned long flags;
+ u32 tx_done_pos;
+ u32 mask;
+ u32 cmd;
+ int i;
+
+ spin_lock(&comm->tx_lock);
+
+ tx_done_pos = comm->tx_done_pos;
+ while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
+ cmd = comm->tx_desc[tx_done_pos].cmd1;
+ if (cmd & TXD_OWN)
+ break;
+
+ skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
+ if (unlikely(!skbinfo->skb))
+ goto spl2sw_tx_poll_next;
+
+ i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
+ if (i < MAX_NETDEV_NUM && comm->ndev[i])
+ stats = &comm->ndev[i]->stats;
+ else
+ goto spl2sw_tx_poll_unmap;
+
+ if (unlikely(cmd & (TXD_ERR_CODE))) {
+ stats->tx_errors++;
+ } else {
+ stats->tx_packets++;
+ stats->tx_bytes += skbinfo->len;
+ }
+
+spl2sw_tx_poll_unmap:
+ dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
+ DMA_TO_DEVICE);
+ skbinfo->mapping = 0;
+ dev_kfree_skb_irq(skbinfo->skb);
+ skbinfo->skb = NULL;
+
+spl2sw_tx_poll_next:
+ /* Move tx_done_pos to next position */
+ tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
+
+ if (comm->tx_desc_full == 1)
+ comm->tx_desc_full = 0;
+
+ budget_left--;
+ }
+
+ comm->tx_done_pos = tx_done_pos;
+ if (!comm->tx_desc_full)
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i])
+ if (netif_queue_stopped(comm->ndev[i]))
+ netif_wake_queue(comm->ndev[i]);
+
+ spin_unlock(&comm->tx_lock);
+
+ spin_lock_irqsave(&comm->int_mask_lock, flags);
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask &= ~MAC_INT_TX;
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ spin_unlock_irqrestore(&comm->int_mask_lock, flags);
+
+ napi_complete(napi);
+ return budget - budget_left;
+}
+
+irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
+{
+ struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
+ u32 status;
+ u32 mask;
+ int i;
+
+ status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
+ if (unlikely(!status)) {
+ dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
+ goto spl2sw_ethernet_int_out;
+ }
+ writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
+
+ if (status & MAC_INT_RX) {
+ /* Disable RX interrupts. */
+ spin_lock(&comm->int_mask_lock);
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask |= MAC_INT_RX;
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ spin_unlock(&comm->int_mask_lock);
+
+ if (unlikely(status & MAC_INT_RX_DES_ERR)) {
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i]) {
+ comm->ndev[i]->stats.rx_fifo_errors++;
+ break;
+ }
+ dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
+ }
+
+ napi_schedule(&comm->rx_napi);
+ }
+
+ if (status & MAC_INT_TX) {
+ /* Disable TX interrupts. */
+ spin_lock(&comm->int_mask_lock);
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask |= MAC_INT_TX;
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ spin_unlock(&comm->int_mask_lock);
+
+ if (unlikely(status & MAC_INT_TX_DES_ERR)) {
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i]) {
+ comm->ndev[i]->stats.tx_fifo_errors++;
+ break;
+ }
+ dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
+
+ spin_lock(&comm->int_mask_lock);
+ mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ mask &= ~MAC_INT_TX;
+ writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ spin_unlock(&comm->int_mask_lock);
+ } else {
+ napi_schedule(&comm->tx_napi);
+ }
+ }
+
+spl2sw_ethernet_int_out:
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.h b/drivers/net/ethernet/sunplus/spl2sw_int.h
new file mode 100644
index 000000000000..64f6f2572fe3
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_int.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_INT_H__
+#define __SPL2SW_INT_H__
+
+int spl2sw_rx_poll(struct napi_struct *napi, int budget);
+int spl2sw_tx_poll(struct napi_struct *napi, int budget);
+irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id);
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.c b/drivers/net/ethernet/sunplus/spl2sw_mac.c
new file mode 100644
index 000000000000..57e431dfc467
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_mac.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/of_mdio.h>
+
+#include "spl2sw_register.h"
+#include "spl2sw_define.h"
+#include "spl2sw_desc.h"
+#include "spl2sw_mac.h"
+
+void spl2sw_mac_hw_stop(struct spl2sw_common *comm)
+{
+ u32 reg;
+
+ if (comm->enable == 0) {
+ /* Mask and clear all interrupts. */
+ writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+ writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
+
+ /* Disable cpu 0 and cpu 1. */
+ reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
+ writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ }
+
+ /* Disable LAN ports. */
+ reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+ reg |= FIELD_PREP(MAC_DIS_PORT, ~comm->enable);
+ writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+}
+
+void spl2sw_mac_hw_start(struct spl2sw_common *comm)
+{
+ u32 reg;
+
+ /* Enable cpu port 0 (6) & CRC padding (8) */
+ reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ reg &= ~MAC_DIS_SOC0_CPU;
+ reg |= MAC_EN_CRC_SOC0;
+ writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
+
+ /* Enable port 0 & port 1 */
+ reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+ reg &= FIELD_PREP(MAC_DIS_PORT, ~comm->enable) | ~MAC_DIS_PORT;
+ writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+}
+
+int spl2sw_mac_addr_add(struct spl2sw_mac *mac)
+{
+ struct spl2sw_common *comm = mac->comm;
+ u32 reg;
+ int ret;
+
+ /* Write 6-octet MAC address. */
+ writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
+ comm->l2sw_reg_base + L2SW_W_MAC_15_0);
+ writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
+ (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
+ comm->l2sw_reg_base + L2SW_W_MAC_47_16);
+
+ /* Set learn port = cpu_port, aging = 1 */
+ reg = MAC_W_CPU_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) |
+ FIELD_PREP(MAC_W_AGE, 1) | MAC_W_MAC_CMD;
+ writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
+
+ /* Wait for completing. */
+ ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
+ comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
+ if (ret) {
+ netdev_err(mac->ndev, "Failed to add address to table!\n");
+ return ret;
+ }
+
+ netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
+ readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
+ (u32)FIELD_GET(MAC_W_MAC_47_16,
+ readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
+ (u32)FIELD_GET(MAC_W_MAC_15_0,
+ readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
+ return 0;
+}
+
+int spl2sw_mac_addr_del(struct spl2sw_mac *mac)
+{
+ struct spl2sw_common *comm = mac->comm;
+ u32 reg;
+ int ret;
+
+ /* Write 6-octet MAC address. */
+ writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
+ comm->l2sw_reg_base + L2SW_W_MAC_15_0);
+ writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
+ (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
+ comm->l2sw_reg_base + L2SW_W_MAC_47_16);
+
+ /* Set learn port = lan_port0 and aging = 0
+ * to wipe (age) out the entry.
+ */
+ reg = MAC_W_LAN_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | MAC_W_MAC_CMD;
+ writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
+
+ /* Wait for completing. */
+ ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
+ comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
+ if (ret) {
+ netdev_err(mac->ndev, "Failed to delete address from table!\n");
+ return ret;
+ }
+
+ netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
+ readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
+ (u32)FIELD_GET(MAC_W_MAC_47_16,
+ readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
+ (u32)FIELD_GET(MAC_W_MAC_15_0,
+ readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
+ return 0;
+}
+
+void spl2sw_mac_hw_init(struct spl2sw_common *comm)
+{
+ u32 reg;
+
+ /* Disable cpu0 and cpu 1 port. */
+ reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
+ writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
+
+ /* Set base addresses of TX and RX queues. */
+ writel(comm->desc_dma, comm->l2sw_reg_base + L2SW_TX_LBASE_ADDR_0);
+ writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * TX_DESC_NUM,
+ comm->l2sw_reg_base + L2SW_TX_HBASE_ADDR_0);
+ writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
+ MAC_GUARD_DESC_NUM), comm->l2sw_reg_base + L2SW_RX_HBASE_ADDR_0);
+ writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
+ MAC_GUARD_DESC_NUM + RX_QUEUE0_DESC_NUM),
+ comm->l2sw_reg_base + L2SW_RX_LBASE_ADDR_0);
+
+ /* Fc_rls_th=0x4a, Fc_set_th=0x3a, Drop_rls_th=0x2d, Drop_set_th=0x1d */
+ writel(0x4a3a2d1d, comm->l2sw_reg_base + L2SW_FL_CNTL_TH);
+
+ /* Cpu_rls_th=0x4a, Cpu_set_th=0x3a, Cpu_th=0x12, Port_th=0x12 */
+ writel(0x4a3a1212, comm->l2sw_reg_base + L2SW_CPU_FL_CNTL_TH);
+
+ /* mtcc_lmt=0xf, Pri_th_l=6, Pri_th_h=6, weigh_8x_en=1 */
+ writel(0xf6680000, comm->l2sw_reg_base + L2SW_PRI_FL_CNTL);
+
+ /* High-active LED */
+ reg = readl(comm->l2sw_reg_base + L2SW_LED_PORT0);
+ reg |= MAC_LED_ACT_HI;
+ writel(reg, comm->l2sw_reg_base + L2SW_LED_PORT0);
+
+ /* Disable aging of cpu port 0 & 1.
+ * Disable SA learning of cpu port 0 & 1.
+ * Enable UC and MC packets
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ reg &= ~(MAC_EN_SOC1_AGING | MAC_EN_SOC0_AGING |
+ MAC_DIS_BC2CPU_P1 | MAC_DIS_BC2CPU_P0 |
+ MAC_DIS_MC2CPU_P1 | MAC_DIS_MC2CPU_P0);
+ reg |= MAC_DIS_LRN_SOC1 | MAC_DIS_LRN_SOC0;
+ writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
+
+ /* Enable RMC2CPU for port 0 & 1
+ * Enable Flow control for port 0 & 1
+ * Enable Back pressure for port 0 & 1
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+ reg &= ~(MAC_DIS_RMC2CPU_P1 | MAC_DIS_RMC2CPU_P0);
+ reg |= MAC_EN_FLOW_CTL_P1 | MAC_EN_FLOW_CTL_P0 |
+ MAC_EN_BACK_PRESS_P1 | MAC_EN_BACK_PRESS_P0;
+ writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
+
+ /* Disable LAN port SA learning. */
+ reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL1);
+ reg |= MAC_DIS_SA_LRN_P1 | MAC_DIS_SA_LRN_P0;
+ writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL1);
+
+ /* Enable rmii force mode and
+ * set both external phy-address to 31.
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+ reg &= ~(MAC_EXT_PHY1_ADDR | MAC_EXT_PHY0_ADDR);
+ reg |= FIELD_PREP(MAC_EXT_PHY1_ADDR, 31) | FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
+ reg |= MAC_FORCE_RMII_EN_1 | MAC_FORCE_RMII_EN_0;
+ writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+
+ /* Port 0: VLAN group 0
+ * Port 1: VLAN group 1
+ */
+ reg = FIELD_PREP(MAC_P1_PVID, 1) | FIELD_PREP(MAC_P0_PVID, 0);
+ writel(reg, comm->l2sw_reg_base + L2SW_PVID_CONFIG0);
+
+ /* VLAN group 0: cpu0 (bit3) + port0 (bit0) = 1001 = 0x9
+ * VLAN group 1: cpu0 (bit3) + port1 (bit1) = 1010 = 0xa
+ */
+ reg = FIELD_PREP(MAC_VLAN_MEMSET_1, 0xa) | FIELD_PREP(MAC_VLAN_MEMSET_0, 9);
+ writel(reg, comm->l2sw_reg_base + L2SW_VLAN_MEMSET_CONFIG0);
+
+ /* RMC forward: to_cpu (1)
+ * LED: 60mS (1)
+ * BC storm prev: 31 BC (1)
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
+ reg &= ~(MAC_RMC_TB_FAULT_RULE | MAC_LED_FLASH_TIME | MAC_BC_STORM_PREV);
+ reg |= FIELD_PREP(MAC_RMC_TB_FAULT_RULE, 1) |
+ FIELD_PREP(MAC_LED_FLASH_TIME, 1) |
+ FIELD_PREP(MAC_BC_STORM_PREV, 1);
+ writel(reg, comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
+
+ writel(MAC_INT_MASK_DEF, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
+}
+
+void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac)
+{
+ struct spl2sw_common *comm = mac->comm;
+ struct net_device *ndev = mac->ndev;
+ u32 mask, reg, rx_mode;
+
+ netdev_dbg(ndev, "ndev->flags = %08x\n", ndev->flags);
+ mask = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
+ FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
+ reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Allow MC and unknown UC packets */
+ rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
+ FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
+ } else if ((!netdev_mc_empty(ndev) && (ndev->flags & IFF_MULTICAST)) ||
+ (ndev->flags & IFF_ALLMULTI)) {
+ /* Allow MC packets */
+ rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port);
+ } else {
+ /* Disable MC and unknown UC packets */
+ rx_mode = 0;
+ }
+
+ writel((reg & (~mask)) | ((~rx_mode) & mask), comm->l2sw_reg_base + L2SW_CPU_CNTL);
+ netdev_dbg(ndev, "cpu_cntl = %08x\n", readl(comm->l2sw_reg_base + L2SW_CPU_CNTL));
+}
+
+void spl2sw_mac_init(struct spl2sw_common *comm)
+{
+ u32 i;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
+ comm->rx_pos[i] = 0;
+ mb(); /* make sure settings are effective. */
+
+ spl2sw_mac_hw_init(comm);
+}
+
+void spl2sw_mac_soft_reset(struct spl2sw_common *comm)
+{
+ u32 i;
+
+ spl2sw_mac_hw_stop(comm);
+
+ spl2sw_rx_descs_flush(comm);
+ comm->tx_pos = 0;
+ comm->tx_done_pos = 0;
+ comm->tx_desc_full = 0;
+
+ for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
+ comm->rx_pos[i] = 0;
+ mb(); /* make sure settings are effective. */
+
+ spl2sw_mac_hw_init(comm);
+ spl2sw_mac_hw_start(comm);
+}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.h b/drivers/net/ethernet/sunplus/spl2sw_mac.h
new file mode 100644
index 000000000000..41a929b3a380
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_mac.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_MAC_H__
+#define __SPL2SW_MAC_H__
+
+void spl2sw_mac_hw_stop(struct spl2sw_common *comm);
+void spl2sw_mac_hw_start(struct spl2sw_common *comm);
+int spl2sw_mac_addr_add(struct spl2sw_mac *mac);
+int spl2sw_mac_addr_del(struct spl2sw_mac *mac);
+void spl2sw_mac_hw_init(struct spl2sw_common *comm);
+void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac);
+void spl2sw_mac_init(struct spl2sw_common *comm);
+void spl2sw_mac_soft_reset(struct spl2sw_common *comm);
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
new file mode 100644
index 000000000000..733ae1704269
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/of_mdio.h>
+
+#include "spl2sw_register.h"
+#include "spl2sw_define.h"
+#include "spl2sw_mdio.h"
+
+#define SPL2SW_MDIO_READ_CMD 0x02
+#define SPL2SW_MDIO_WRITE_CMD 0x01
+
+static int spl2sw_mdio_access(struct spl2sw_common *comm, u8 cmd, u8 addr, u8 regnum, u16 wdata)
+{
+ u32 reg, reg2;
+ u32 val;
+ int ret;
+
+ /* Note that addr (of phy) should match either ext_phy0_addr
+ * or ext_phy1_addr, or mdio commands won't be sent out.
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+ reg &= ~MAC_EXT_PHY0_ADDR;
+ reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, addr);
+
+ reg2 = FIELD_PREP(MAC_CPU_PHY_WT_DATA, wdata) | FIELD_PREP(MAC_CPU_PHY_CMD, cmd) |
+ FIELD_PREP(MAC_CPU_PHY_REG_ADDR, regnum) | FIELD_PREP(MAC_CPU_PHY_ADDR, addr);
+
+ /* Set ext_phy0_addr and then issue mdio command.
+ * No interrupt is allowed in between.
+ */
+ spin_lock_irq(&comm->mdio_lock);
+ writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+ writel(reg2, comm->l2sw_reg_base + L2SW_PHY_CNTL_REG0);
+ spin_unlock_irq(&comm->mdio_lock);
+
+ ret = read_poll_timeout(readl, val, val & cmd, 1, 1000, true,
+ comm->l2sw_reg_base + L2SW_PHY_CNTL_REG1);
+
+ /* Set ext_phy0_addr back to 31 to prevent
+ * from sending mdio command to phy by
+ * hardware auto-mdio function.
+ */
+ reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+ reg &= ~MAC_EXT_PHY0_ADDR;
+ reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
+ writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+
+ if (ret == 0)
+ return val >> 16;
+ else
+ return ret;
+}
+
+static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct spl2sw_common *comm = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0);
+}
+
+static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct spl2sw_common *comm = bus->priv;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+u32 spl2sw_mdio_init(struct spl2sw_common *comm)
+{
+ struct device_node *mdio_np;
+ struct mii_bus *mii_bus;
+ int ret;
+
+ /* Get mdio child node. */
+ mdio_np = of_get_child_by_name(comm->pdev->dev.of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(&comm->pdev->dev, "No mdio child node found!\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and register mdio bus. */
+ mii_bus = devm_mdiobus_alloc(&comm->pdev->dev);
+ if (!mii_bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mii_bus->name = "sunplus_mii_bus";
+ mii_bus->parent = &comm->pdev->dev;
+ mii_bus->priv = comm;
+ mii_bus->read = spl2sw_mii_read;
+ mii_bus->write = spl2sw_mii_write;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&comm->pdev->dev));
+
+ ret = of_mdiobus_register(mii_bus, mdio_np);
+ if (ret) {
+ dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n");
+ goto out;
+ }
+
+ comm->mii_bus = mii_bus;
+
+out:
+ of_node_put(mdio_np);
+ return ret;
+}
+
+void spl2sw_mdio_remove(struct spl2sw_common *comm)
+{
+ if (comm->mii_bus) {
+ mdiobus_unregister(comm->mii_bus);
+ comm->mii_bus = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.h b/drivers/net/ethernet/sunplus/spl2sw_mdio.h
new file mode 100644
index 000000000000..8a24c9caeb6f
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_MDIO_H__
+#define __SPL2SW_MDIO_H__
+
+u32 spl2sw_mdio_init(struct spl2sw_common *comm);
+void spl2sw_mdio_remove(struct spl2sw_common *comm);
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.c b/drivers/net/ethernet/sunplus/spl2sw_phy.c
new file mode 100644
index 000000000000..404f508a54d4
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_phy.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/of_mdio.h>
+
+#include "spl2sw_register.h"
+#include "spl2sw_define.h"
+#include "spl2sw_phy.h"
+
+static void spl2sw_mii_link_change(struct net_device *ndev)
+{
+ struct spl2sw_mac *mac = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ struct spl2sw_common *comm = mac->comm;
+ u32 reg;
+
+ reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+
+ if (phydev->link) {
+ reg |= FIELD_PREP(MAC_FORCE_RMII_LINK, mac->lan_port);
+
+ if (phydev->speed == 100) {
+ reg |= FIELD_PREP(MAC_FORCE_RMII_SPD, mac->lan_port);
+ } else {
+ reg &= FIELD_PREP(MAC_FORCE_RMII_SPD, ~mac->lan_port) |
+ ~MAC_FORCE_RMII_SPD;
+ }
+
+ if (phydev->duplex) {
+ reg |= FIELD_PREP(MAC_FORCE_RMII_DPX, mac->lan_port);
+ } else {
+ reg &= FIELD_PREP(MAC_FORCE_RMII_DPX, ~mac->lan_port) |
+ ~MAC_FORCE_RMII_DPX;
+ }
+
+ if (phydev->pause) {
+ reg |= FIELD_PREP(MAC_FORCE_RMII_FC, mac->lan_port);
+ } else {
+ reg &= FIELD_PREP(MAC_FORCE_RMII_FC, ~mac->lan_port) |
+ ~MAC_FORCE_RMII_FC;
+ }
+ } else {
+ reg &= FIELD_PREP(MAC_FORCE_RMII_LINK, ~mac->lan_port) |
+ ~MAC_FORCE_RMII_LINK;
+ }
+
+ writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
+
+ phy_print_status(phydev);
+}
+
+int spl2sw_phy_connect(struct spl2sw_common *comm)
+{
+ struct phy_device *phydev;
+ struct net_device *ndev;
+ struct spl2sw_mac *mac;
+ int i;
+
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i]) {
+ ndev = comm->ndev[i];
+ mac = netdev_priv(ndev);
+ phydev = of_phy_connect(ndev, mac->phy_node, spl2sw_mii_link_change,
+ 0, mac->phy_mode);
+ if (!phydev)
+ return -ENODEV;
+
+ phy_support_asym_pause(phydev);
+ phy_attached_info(phydev);
+ }
+
+ return 0;
+}
+
+void spl2sw_phy_remove(struct spl2sw_common *comm)
+{
+ struct net_device *ndev;
+ int i;
+
+ for (i = 0; i < MAX_NETDEV_NUM; i++)
+ if (comm->ndev[i]) {
+ ndev = comm->ndev[i];
+ if (ndev) {
+ phy_disconnect(ndev->phydev);
+ ndev->phydev = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.h b/drivers/net/ethernet/sunplus/spl2sw_phy.h
new file mode 100644
index 000000000000..3d051a2548c8
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_phy.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_PHY_H__
+#define __SPL2SW_PHY_H__
+
+int spl2sw_phy_connect(struct spl2sw_common *comm);
+void spl2sw_phy_remove(struct spl2sw_common *comm);
+
+#endif
diff --git a/drivers/net/ethernet/sunplus/spl2sw_register.h b/drivers/net/ethernet/sunplus/spl2sw_register.h
new file mode 100644
index 000000000000..9718e2ee2b6c
--- /dev/null
+++ b/drivers/net/ethernet/sunplus/spl2sw_register.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef __SPL2SW_REGISTER_H__
+#define __SPL2SW_REGISTER_H__
+
+/* Register L2SW */
+#define L2SW_SW_INT_STATUS_0 0x0
+#define L2SW_SW_INT_MASK_0 0x4
+#define L2SW_FL_CNTL_TH 0x8
+#define L2SW_CPU_FL_CNTL_TH 0xc
+#define L2SW_PRI_FL_CNTL 0x10
+#define L2SW_VLAN_PRI_TH 0x14
+#define L2SW_EN_TOS_BUS 0x18
+#define L2SW_TOS_MAP0 0x1c
+#define L2SW_TOS_MAP1 0x20
+#define L2SW_TOS_MAP2 0x24
+#define L2SW_TOS_MAP3 0x28
+#define L2SW_TOS_MAP4 0x2c
+#define L2SW_TOS_MAP5 0x30
+#define L2SW_TOS_MAP6 0x34
+#define L2SW_TOS_MAP7 0x38
+#define L2SW_GLOBAL_QUE_STATUS 0x3c
+#define L2SW_ADDR_TBL_SRCH 0x40
+#define L2SW_ADDR_TBL_ST 0x44
+#define L2SW_MAC_AD_SER0 0x48
+#define L2SW_MAC_AD_SER1 0x4c
+#define L2SW_WT_MAC_AD0 0x50
+#define L2SW_W_MAC_15_0 0x54
+#define L2SW_W_MAC_47_16 0x58
+#define L2SW_PVID_CONFIG0 0x5c
+#define L2SW_PVID_CONFIG1 0x60
+#define L2SW_VLAN_MEMSET_CONFIG0 0x64
+#define L2SW_VLAN_MEMSET_CONFIG1 0x68
+#define L2SW_PORT_ABILITY 0x6c
+#define L2SW_PORT_ST 0x70
+#define L2SW_CPU_CNTL 0x74
+#define L2SW_PORT_CNTL0 0x78
+#define L2SW_PORT_CNTL1 0x7c
+#define L2SW_PORT_CNTL2 0x80
+#define L2SW_SW_GLB_CNTL 0x84
+#define L2SW_L2SW_SW_RESET 0x88
+#define L2SW_LED_PORT0 0x8c
+#define L2SW_LED_PORT1 0x90
+#define L2SW_LED_PORT2 0x94
+#define L2SW_LED_PORT3 0x98
+#define L2SW_LED_PORT4 0x9c
+#define L2SW_WATCH_DOG_TRIG_RST 0xa0
+#define L2SW_WATCH_DOG_STOP_CPU 0xa4
+#define L2SW_PHY_CNTL_REG0 0xa8
+#define L2SW_PHY_CNTL_REG1 0xac
+#define L2SW_MAC_FORCE_MODE 0xb0
+#define L2SW_VLAN_GROUP_CONFIG0 0xb4
+#define L2SW_VLAN_GROUP_CONFIG1 0xb8
+#define L2SW_FLOW_CTRL_TH3 0xbc
+#define L2SW_QUEUE_STATUS_0 0xc0
+#define L2SW_DEBUG_CNTL 0xc4
+#define L2SW_RESERVED_1 0xc8
+#define L2SW_MEM_TEST_INFO 0xcc
+#define L2SW_SW_INT_STATUS_1 0xd0
+#define L2SW_SW_INT_MASK_1 0xd4
+#define L2SW_SW_GLOBAL_SIGNAL 0xd8
+
+#define L2SW_CPU_TX_TRIG 0x208
+#define L2SW_TX_HBASE_ADDR_0 0x20c
+#define L2SW_TX_LBASE_ADDR_0 0x210
+#define L2SW_RX_HBASE_ADDR_0 0x214
+#define L2SW_RX_LBASE_ADDR_0 0x218
+#define L2SW_TX_HW_ADDR_0 0x21c
+#define L2SW_TX_LW_ADDR_0 0x220
+#define L2SW_RX_HW_ADDR_0 0x224
+#define L2SW_RX_LW_ADDR_0 0x228
+#define L2SW_CPU_PORT_CNTL_REG_0 0x22c
+#define L2SW_TX_HBASE_ADDR_1 0x230
+#define L2SW_TX_LBASE_ADDR_1 0x234
+#define L2SW_RX_HBASE_ADDR_1 0x238
+#define L2SW_RX_LBASE_ADDR_1 0x23c
+#define L2SW_TX_HW_ADDR_1 0x240
+#define L2SW_TX_LW_ADDR_1 0x244
+#define L2SW_RX_HW_ADDR_1 0x248
+#define L2SW_RX_LW_ADDR_1 0x24c
+#define L2SW_CPU_PORT_CNTL_REG_1 0x250
+
+#endif
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index 5c9b6c90942b..f8e133604146 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -54,8 +54,8 @@ static void xlgmac_default_config(struct xlgmac_pdata *pdata)
pdata->phy_speed = SPEED_25000;
pdata->sysclk_rate = XLGMAC_SYSCLOCK;
- strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
- strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+ strscpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strscpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
}
static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
index 49f8c6be9459..e794da727fe0 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -102,9 +102,9 @@ static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev,
u32 ver = pdata->hw_feat.version;
u32 snpsver, devid, userver;
- strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
/* S|SNPSVER: Synopsys-defined Version
* D|DEVID: Indicates the Device family
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d435519236e4..36b948820c1e 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -81,7 +81,7 @@ static int xlgmac_prep_tso(struct sk_buff *skb,
if (ret)
return ret;
- pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->header_len = skb_tcp_all_headers(skb);
pkt_info->tcp_header_len = tcp_hdrlen(skb);
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
pkt_info->mss = skb_shinfo(skb)->gso_size;
@@ -419,15 +419,14 @@ static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xlgmac_one_poll,
- NAPI_POLL_WEIGHT);
+ xlgmac_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xlgmac_all_poll, NAPI_POLL_WEIGHT);
+ xlgmac_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index 98e3a271e017..a848e10f3ea4 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -38,7 +38,8 @@
#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+#define XLGMAC_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for a SKB */
#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 985073eba3bd..ca409515ead5 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1994,7 +1994,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->nic = nic;
priv->msg_enable = BDX_DEF_MSG_ENABLE;
- netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
+ netif_napi_add(ndev, &priv->napi, bdx_poll);
if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
DBG("HW statistics not supported\n");
@@ -2133,10 +2133,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
+ strscpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index affcf92cd3aa..fce06663e1e1 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -33,6 +33,7 @@ config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
+ select MDIO_BITBANG
help
This driver supports TI's DaVinci MDIO module.
@@ -94,6 +95,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
+ select PHYLINK
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d45b6bb86f0b..c51e2af91f69 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -6,7 +6,7 @@
*/
#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
int ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
dev_err(common->dev, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(common->dev);
- }
return ret;
}
@@ -404,9 +402,9 @@ static void am65_cpsw_get_drvinfo(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- strlcpy(info->driver, dev_driver_string(common->dev),
+ strscpy(info->driver, dev_driver_string(common->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
}
static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
@@ -471,9 +469,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = salve->rx_pause ? true : false;
- pause->tx_pause = salve->tx_pause ? true : false;
+ phylink_ethtool_get_pauseparam(salve->phylink, pause);
}
static int am65_cpsw_set_pauseparam(struct net_device *ndev,
@@ -481,18 +477,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EINVAL;
-
- if (!phy_validate_pause(salve->phy, pause))
- return -EINVAL;
-
- salve->rx_pause = pause->rx_pause ? true : false;
- salve->tx_pause = pause->tx_pause ? true : false;
-
- phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(salve->phylink, pause);
}
static void am65_cpsw_get_wol(struct net_device *ndev,
@@ -500,11 +485,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (salve->phy)
- phy_ethtool_get_wol(salve->phy, wol);
+ phylink_ethtool_get_wol(salve->phylink, wol);
}
static int am65_cpsw_set_wol(struct net_device *ndev,
@@ -512,10 +493,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_wol(salve->phy, wol);
+ return phylink_ethtool_set_wol(salve->phylink, wol);
}
static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
@@ -523,11 +501,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(salve->phy, ecmd);
- return 0;
+ return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
}
static int
@@ -536,40 +510,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_ksettings_set(salve->phy, ecmd);
+ return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_get_eee(salve->phy, edata);
+ return phylink_ethtool_get_eee(salve->phylink, edata);
}
static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_eee(salve->phy, edata);
+ return phylink_ethtool_set_eee(salve->phylink, edata);
}
static int am65_cpsw_nway_reset(struct net_device *ndev)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_restart_aneg(salve->phy);
+ return phylink_ethtool_nway_reset(salve->phylink);
}
static int am65_cpsw_get_regs_len(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 8251d7eb001b..c50b137f92d7 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
@@ -18,7 +19,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -73,6 +74,9 @@
#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
+
#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
@@ -159,69 +163,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
common->pdata.quirks);
}
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct phy_device *phy = port->slave.phy;
- u32 mac_control = 0;
-
- if (!phy)
- return;
-
- if (phy->link) {
- mac_control = CPSW_SL_CTL_GMII_EN;
-
- if (phy->speed == 1000)
- mac_control |= CPSW_SL_CTL_GIG;
- if (phy->speed == 10 && phy_interface_is_rgmii(phy))
- /* Can be used with in band mode only */
- mac_control |= CPSW_SL_CTL_EXT_EN;
- if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
- mac_control |= CPSW_SL_CTL_IFCTL_A;
- if (phy->duplex)
- mac_control |= CPSW_SL_CTL_FULLDUPLEX;
-
- /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
-
- /* rx_pause/tx_pause */
- if (port->slave.rx_pause)
- mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
-
- if (port->slave.tx_pause)
- mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
-
- cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
-
- /* enable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
-
- am65_cpsw_qos_link_up(ndev, phy->speed);
- netif_tx_wake_all_queues(ndev);
- } else {
- int tmo;
-
- /* disable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
-
- cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
-
- tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
- dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
- cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
- tmo);
-
- cpsw_sl_ctl_reset(port->slave.mac_sl);
-
- am65_cpsw_qos_link_down(ndev);
- netif_tx_stop_all_queues(ndev);
- }
-
- phy_print_status(phy);
-}
-
static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
@@ -236,11 +177,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
if (!vid)
@@ -266,11 +205,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(common->ale, vid,
@@ -426,8 +363,7 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
-static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
- netdev_features_t features)
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
int port_idx, i, ret;
@@ -589,15 +525,11 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
- if (port->slave.phy)
- phy_stop(port->slave.phy);
+ phylink_stop(port->slave.phylink);
netif_tx_stop_all_queues(ndev);
- if (port->slave.phy) {
- phy_disconnect(port->slave.phy);
- port->slave.phy = NULL;
- }
+ phylink_disconnect_phy(port->slave.phylink);
ret = am65_cpsw_nuss_common_stop(common);
if (ret)
@@ -624,11 +556,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
@@ -646,7 +576,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
for (i = 0; i < common->tx_ch_num; i++)
netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
- ret = am65_cpsw_nuss_common_open(common, ndev->features);
+ ret = am65_cpsw_nuss_common_open(common);
if (ret)
return ret;
@@ -662,30 +592,14 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
- port->slave.phy_if);
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
if (ret)
goto error_cleanup;
- if (port->slave.phy_node) {
- port->slave.phy = of_phy_connect(ndev,
- port->slave.phy_node,
- &am65_cpsw_nuss_adjust_link,
- 0, port->slave.phy_if);
- if (!port->slave.phy) {
- dev_err(common->dev, "phy %pOF not found on slave %d\n",
- port->slave.phy_node,
- port->port_id);
- ret = -ENODEV;
- goto error_cleanup;
- }
- }
-
/* restore vlan configurations */
vlan_for_each(ndev, cpsw_restore_vlans, port);
- phy_attached_info(port->slave.phy);
- phy_start(port->slave.phy);
+ phylink_start(port->slave.phylink);
return 0;
@@ -1292,11 +1206,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
HOST_PORT_NUM, 0, 0);
@@ -1431,10 +1343,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
return am65_cpsw_nuss_hwtstamp_get(ndev, req);
}
- if (!port->slave.phy)
- return -EOPNOTSUPP;
-
- return phy_mii_ioctl(port->slave.phy, req, cmd);
+ return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
@@ -1494,6 +1403,88 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+
+ if (common->pdata.extra_modes & BIT(state->interface))
+ writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ int tmo;
+
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ am65_cpsw_qos_link_down(ndev);
+ netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ u32 mac_control = CPSW_SL_CTL_GMII_EN;
+ struct net_device *ndev = port->ndev;
+
+ if (speed == SPEED_1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ if (duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* rx_pause/tx_pause */
+ if (rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ am65_cpsw_qos_link_up(ndev, speed);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = am65_cpsw_nuss_mac_config,
+ .mac_link_down = am65_cpsw_nuss_mac_link_down,
+ .mac_link_up = am65_cpsw_nuss_mac_link_up,
+};
+
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
{
struct am65_cpsw_common *common = port->common;
@@ -1802,6 +1793,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
if (IS_ERR(cpts)) {
int ret = PTR_ERR(cpts);
+ of_node_put(node);
if (ret == -EOPNOTSUPP) {
dev_info(dev, "cpts disabled\n");
return 0;
@@ -1859,6 +1851,8 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->common = common;
port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ if (common->pdata.extra_modes)
+ port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
@@ -1890,27 +1884,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- if (of_phy_is_fixed_link(port_np)) {
- ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- ret = dev_err_probe(dev, ret,
- "failed to register fixed-link phy %pOF\n",
- port_np);
- goto of_node_put;
- }
- port->slave.phy_node = of_node_get(port_np);
- } else {
- port->slave.phy_node =
- of_parse_phandle(port_np, "phy-handle", 0);
- }
-
- if (!port->slave.phy_node) {
- dev_err(dev,
- "slave[%d] no phy found\n", port_id);
- ret = -ENODEV;
- goto of_node_put;
- }
-
+ port->slave.phy_node = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -1918,6 +1892,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
}
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ if (ret)
+ goto of_node_put;
+
ret = of_get_mac_address(port_np, port->slave.mac_addr);
if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
@@ -1952,12 +1930,25 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
+ struct phylink *phylink;
int ret;
port = &common->ports[port_idx];
@@ -1995,6 +1986,33 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+ /* Configuring Phylink */
+ port->slave.phylink_config.dev = &port->ndev->dev;
+ port->slave.phylink_config.type = PHYLINK_NETDEV;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ phylink = phylink_create(&port->slave.phylink_config,
+ of_node_to_fwnode(port->slave.phy_node),
+ port->slave.phy_if,
+ &am65_cpsw_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ port->slave.phylink = phylink;
+
/* Disable TX checksum offload by default due to HW bug */
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
@@ -2026,7 +2044,7 @@ static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
}
netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+ am65_cpsw_nuss_rx_poll);
return ret;
}
@@ -2039,8 +2057,8 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2458,7 +2476,10 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ if (port->ndev)
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ else
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
attrs.phys.port_number = port->port_id;
attrs.switch_id.id_len = sizeof(resource_size_t);
memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
@@ -2470,7 +2491,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port->port_id, ret);
goto dl_port_unreg;
}
- devlink_port_type_eth_set(dl_port, port->ndev);
}
devlink_register(common->devlink);
return ret;
@@ -2514,6 +2534,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2530,6 +2551,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
}
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -2542,25 +2567,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
i, ret);
goto err_cleanup_ndev;
}
+
+ dl_port = &port->devlink_port;
+ devlink_port_type_eth_set(dl_port, port->ndev);
}
ret = am65_cpsw_register_notifiers(common);
if (ret)
goto err_cleanup_ndev;
- ret = am65_cpsw_nuss_register_devlink(common);
- if (ret)
- goto clean_unregister_notifiers;
-
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
return 0;
-clean_unregister_notifiers:
- am65_cpsw_unregister_notifiers(common);
+
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
return ret;
}
@@ -2611,10 +2635,18 @@ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
+static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
+ { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2669,9 +2701,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (!node)
return -ENOENT;
common->port_num = of_get_child_count(node);
+ of_node_put(node);
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
- of_node_put(node);
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
@@ -2691,9 +2723,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2761,15 +2792,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
pm_runtime_put(dev);
return 0;
+err_free_phylink:
+ am65_cpsw_nuss_phylink_cleanup(common);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2786,11 +2819,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
@@ -2799,6 +2830,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
* dma_deconfigure(dev) before devres_release_all(dev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_nuss_phylink_cleanup(common);
of_platform_device_destroy(common->mdio_dev, NULL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 048ed10143c1..2c9850fdfcb6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
@@ -30,13 +30,14 @@ struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
struct device_node *phy_node;
- struct phy_device *phy;
phy_interface_t phy_if;
struct phy *ifphy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
int port_vlan;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
struct am65_cpsw_port {
@@ -45,6 +46,7 @@ struct am65_cpsw_port {
const char *name;
u32 port_id;
void __iomem *port_base;
+ void __iomem *sgmii_base;
void __iomem *stat_base;
void __iomem *fetch_ram_base;
bool disabled;
@@ -87,6 +89,7 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata {
u32 quirks;
+ u64 extra_modes;
enum k3_ring_mode fdqring_mode;
const char *ale_dev_id;
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index ebcc6386cc34..e162771893af 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -8,10 +8,12 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
+#include "cpsw_ale.h"
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_PN_REG_CTL 0x004
@@ -164,8 +166,7 @@ static void am65_cpsw_admin_to_oper(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- if (port->qos.est_oper)
- devm_kfree(&ndev->dev, port->qos.est_oper);
+ devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = port->qos.est_admin;
port->qos.est_admin = NULL;
@@ -432,11 +433,8 @@ static void am65_cpsw_purge_est(struct net_device *ndev)
am65_cpsw_stop_est(ndev);
- if (port->qos.est_admin)
- devm_kfree(&ndev->dev, port->qos.est_admin);
-
- if (port->qos.est_oper)
- devm_kfree(&ndev->dev, port->qos.est_oper);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = NULL;
port->qos.est_admin = NULL;
@@ -522,8 +520,7 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
ret = am65_cpsw_configure_taprio(ndev, est_new);
if (!ret) {
if (taprio->enable) {
- if (port->qos.est_admin)
- devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
port->qos.est_admin = est_new;
} else {
@@ -588,12 +585,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct am65_cpsw_qos *qos = &port->qos;
+ struct flow_match_eth_addrs match;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_bc_ratelimit.cookie = cls->cookie;
+ qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_mc_ratelimit.cookie = cls->cookie;
+ qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
+{
+ struct am65_cpsw_qos *qos = &port->qos;
+
+ if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
+ qos->ale_bc_ratelimit.cookie = 0;
+ qos->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
+ }
+
+ if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
+ qos->ale_mc_ratelimit.cookie = 0;
+ qos->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return am65_cpsw_qos_configure_clsflower(port, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return am65_cpsw_qos_delete_clsflower(port, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct am65_cpsw_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(am65_cpsw_qos_block_cb_list);
+
+static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
+ am65_cpsw_qos_setup_tc_block_cb,
+ port, port, true);
+}
+
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index e8f1b6b59e93..fb223b43b196 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -14,11 +14,19 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct am65_cpsw_qos {
struct am65_cpsw_est *est_admin;
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+
+ struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 599708a3e81d..d4c56da98a6a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -237,15 +237,11 @@ static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
port->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c30a6e510aa3..e2f0fb286143 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -943,9 +943,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->irq = of_irq_get_byname(node, "cpts");
if (cpts->irq <= 0) {
ret = cpts->irq ?: -ENXIO;
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get IRQ number (err = %d)\n",
- ret);
+ dev_err_probe(dev, ret, "Failed to get IRQ number\n");
return ERR_PTR(ret);
}
@@ -965,8 +963,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
if (IS_ERR(cpts->refclk)) {
ret = PTR_ERR(cpts->refclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get refclk %d\n", ret);
+ dev_err_probe(dev, ret, "Failed to get refclk\n");
return ERR_PTR(ret);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index bef5e68dac31..80eeeb463c4f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -851,8 +851,8 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "cpmac", sizeof(info->driver));
- strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ strscpy(info->driver, "cpmac", sizeof(info->driver));
+ strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
}
@@ -1109,7 +1109,7 @@ static int cpmac_probe(struct platform_device *pdev)
dev->netdev_ops = &cpmac_netdev_ops;
dev->ethtool_ops = &cpmac_ethtool_ops;
- netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+ netif_napi_add(dev, &priv->napi, cpmac_poll);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->rx_lock);
@@ -1169,7 +1169,7 @@ static struct platform_driver cpmac_driver = {
.remove = cpmac_remove,
};
-int cpmac_init(void)
+int __init cpmac_init(void)
{
u32 mask;
int i, res;
@@ -1239,7 +1239,7 @@ fail_alloc:
return res;
}
-void cpmac_exit(void)
+void __exit cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 33142d505fc8..13c9c2d6b79b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -349,7 +349,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
int pkt_size = cpsw->rx_packet_max;
int ret = 0, port, ch = xmeta->ch;
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
struct cpsw_priv *priv;
struct page_pool *pool;
@@ -392,7 +392,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -442,7 +442,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
@@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
int ret;
u32 reg;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
netif_carrier_off(ndev);
@@ -856,6 +854,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
err_cleanup:
if (!cpsw->usage_count) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
cpdma_ctlr_stop(cpsw->dma);
cpsw_destroy_xdp_rxqs(cpsw);
}
@@ -968,11 +968,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
@@ -1052,11 +1050,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
@@ -1090,11 +1086,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
int i;
@@ -1180,9 +1174,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct platform_device *pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw", sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw", sizeof(info->driver));
+ strscpy(info->version, "1.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1327,8 +1321,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
*/
ret = of_phy_register_fixed_link(slave_node);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
@@ -1567,11 +1560,9 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto clean_runtime_disable_ret;
- }
ret = cpsw_probe_dt(&cpsw->data, pdev);
if (ret)
@@ -1648,11 +1639,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
- CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx,
- cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
- CPSW_POLL_WEIGHT);
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
+ netif_napi_add_tx(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
@@ -1734,11 +1723,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 1ef0aaef5c61..231370e9a801 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -50,6 +50,8 @@
/* ALE_AGING_TIMER */
#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
/**
* struct ale_entry_fld - The ALE tbl entry field description
* @start_bit: field start bit
@@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
return tmp & BITMASK(info->bits);
}
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
static void cpsw_ale_timer(struct timer_list *t)
{
struct cpsw_ale *ale = from_timer(ale, t, timer);
@@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 13fe47687fde..aba4572cfa3b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index aa42141be3c0..a557a477d039 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -364,11 +364,9 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
int ret;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(cpsw->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 279e261e4720..83596ec0c7cb 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -283,7 +283,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
void *pa = page_address(page);
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct cpsw_meta_xdp *xmeta;
struct cpsw_common *cpsw;
struct net_device *ndev;
@@ -336,7 +336,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -386,7 +386,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
@@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -498,6 +496,8 @@ static void cpsw_restore(struct cpsw_priv *priv)
/* restore CBS offload */
cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ cpsw_qos_clsflower_resume(priv);
}
static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
@@ -829,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
dev_info(priv->dev, "starting ndev. mode: %s\n",
cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
@@ -985,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
vid = cpsw->slaves[slave_no].port_vlan;
flags = ALE_VLAN | ALE_SECURE;
@@ -1024,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* reset the return code as pm_runtime_get_sync() can return
* non zero values as well.
@@ -1152,9 +1146,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev;
pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1246,8 +1240,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
sizeof(struct cpsw_slave_data),
GFP_KERNEL);
- if (!data->slave_data)
+ if (!data->slave_data) {
+ of_node_put(tmp_node);
return -ENOMEM;
+ }
/* Populate all the child nodes here...
*/
@@ -1292,9 +1288,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
+ dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
+ port_np);
goto err_node_put;
}
slave_data->phy_node = of_node_get(port_np);
@@ -1341,6 +1336,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
err_node_put:
of_node_put(port_np);
+ of_node_put(tmp_node);
return ret;
}
@@ -1407,7 +1403,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1420,13 +1416,10 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
* accordingly.
*/
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ?
- cpsw_rx_poll : cpsw_rx_mq_poll,
- CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
+ netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ?
- cpsw_tx_poll : cpsw_tx_mq_poll,
- CPSW_POLL_WEIGHT);
+ cpsw_tx_poll : cpsw_tx_mq_poll);
}
napi_ndev = ndev;
@@ -1918,9 +1911,8 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2045,11 +2037,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 3537502e5e8b..758295c898ac 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -364,7 +364,7 @@ void cpsw_split_res(struct cpsw_common *cpsw)
if (cpsw->tx_ch_num == rlim_ch_num) {
max_rate = consumed_rate;
} else if (!rlim_ch_num) {
- ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
+ ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
bigest_rate = 0;
max_rate = consumed_rate;
} else {
@@ -379,19 +379,19 @@ void cpsw_split_res(struct cpsw_common *cpsw)
if (max_rate < consumed_rate)
max_rate *= 10;
- ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
- ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
+ ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
+ ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
(cpsw->tx_ch_num - rlim_ch_num);
bigest_rate = (max_rate - consumed_rate) /
(cpsw->tx_ch_num - rlim_ch_num);
}
/* split tx weight/budget */
- budget = CPSW_POLL_WEIGHT;
+ budget = NAPI_POLL_WEIGHT;
for (i = 0; i < cpsw->tx_ch_num; i++) {
ch_rate = cpdma_chan_get_rate(txv[i].ch);
if (ch_rate) {
- txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
+ txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
if (!txv[i].budget)
txv[i].budget++;
if (ch_rate > bigest_rate) {
@@ -417,7 +417,7 @@ void cpsw_split_res(struct cpsw_common *cpsw)
txv[bigest_rate_ch].budget += budget;
/* split rx budget */
- budget = CPSW_POLL_WEIGHT;
+ budget = NAPI_POLL_WEIGHT;
ch_budget = budget / cpsw->rx_ch_num;
for (i = 0; i < cpsw->rx_ch_num; i++) {
cpsw->rxv[i].budget = ch_budget;
@@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ageout = ale_ageout;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -754,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return -EINVAL;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
pm_runtime_put(cpsw->dev);
@@ -970,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev,
return -1;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
bw = qopt->enable ? qopt->idleslope : 0;
ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
@@ -1008,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
if (mqprio->mode != TC_MQPRIO_MODE_DCB)
return -EINVAL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (num_tc) {
for (i = 0; i < 8; i++) {
@@ -1048,6 +1043,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
+
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
@@ -1058,6 +1055,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
case TC_SETUP_QDISC_MQPRIO:
return cpsw_set_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return cpsw_qos_setup_tc_block(ndev, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -1122,7 +1122,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
xmeta->ndev = priv->ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
page, dma,
cpsw->rx_packet_max,
@@ -1146,7 +1146,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
- struct page_pool_params pp_params;
+ struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;
@@ -1381,3 +1381,202 @@ drop:
page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
+
+static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ u32 port_id;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_bc_ratelimit.cookie = cls->cookie;
+ priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_mc_ratelimit.cookie = cls->cookie;
+ priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return cpsw_qos_clsflower_add_policer(priv, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
+ priv->ale_bc_ratelimit.cookie = 0;
+ priv->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
+ }
+
+ if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
+ priv->ale_mc_ratelimit.cookie = 0;
+ priv->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return cpsw_qos_configure_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return cpsw_qos_delete_clsflower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct cpsw_priv *priv = cb_priv;
+ int ret;
+
+ if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ pm_runtime_put(priv->dev);
+ return ret;
+}
+
+static LIST_HEAD(cpsw_qos_block_cb_list);
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
+ cpsw_qos_setup_tc_block_cb,
+ priv, priv, true);
+}
+
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (priv->ale_bc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
+ priv->ale_bc_ratelimit.rate_packet_ps);
+
+ if (priv->ale_mc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
+ priv->ale_mc_ratelimit.rate_packet_ps);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 74555970730c..34230145ca0b 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -89,7 +89,6 @@ do { \
#define CPDMA_TXCP 0x40
#define CPDMA_RXCP 0x60
-#define CPSW_POLL_WEIGHT 64
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
@@ -364,6 +363,11 @@ struct cpsw_common {
u8 base_mac[ETH_ALEN];
};
+struct cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct cpsw_priv {
struct net_device *ndev;
struct device *dev;
@@ -384,6 +388,8 @@ struct cpsw_priv {
struct cpsw_common *cpsw;
int offload_fwd_mark;
u32 tx_packet_min;
+ struct cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct cpsw_ale_ratelimit ale_mc_ratelimit;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
@@ -411,7 +417,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp {
/* The buf includes headroom compatible with both skb and xdpf */
#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
static inline int cpsw_is_xdpf_handle(void *handle)
{
@@ -462,6 +467,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index a7d97d429e06..ce85f7610273 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -252,15 +252,11 @@ static int cpsw_port_vlans_add(struct cpsw_priv *priv,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
priv->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dc70a6bfaa6a..92ca739fac01 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- clk_enable(cpts->refclk);
+ err = clk_enable(cpts->refclk);
+ if (err)
+ return err;
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 31df3267a01a..2eb9d5a32588 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -113,7 +113,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
-#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
/* Buffer descriptor parameters */
#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
@@ -375,8 +374,8 @@ static char *emac_rxhost_errcodes[16] = {
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, emac_version_string, sizeof(info->driver));
- strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, emac_version_string, sizeof(info->driver));
+ strscpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -950,7 +949,7 @@ static void emac_tx_handler(void *token, int len, int status)
*
* Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
*/
-static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
@@ -1422,9 +1421,8 @@ static int emac_dev_open(struct net_device *ndev)
struct phy_device *phydev = NULL;
struct device *phy = NULL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_resume_and_get(&priv->pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
return ret;
@@ -1604,6 +1602,7 @@ static int emac_dev_stop(struct net_device *ndev)
int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
+ int ret = 0;
/* inform the upper layers. */
netif_stop_queue(ndev);
@@ -1618,17 +1617,31 @@ static int emac_dev_stop(struct net_device *ndev)
phy_disconnect(ndev->phydev);
/* Free IRQ */
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
- for (irq_num = res->start; irq_num <= res->end; irq_num++)
- free_irq(irq_num, priv->ndev);
- i++;
+ if (dev_of_node(&priv->pdev->dev)) {
+ do {
+ ret = platform_get_irq_optional(priv->pdev, i);
+ if (ret < 0 && ret != -ENXIO)
+ break;
+ if (ret > 0) {
+ free_irq(ret, priv->ndev);
+ } else {
+ ret = 0;
+ break;
+ }
+ } while (++i);
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
}
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
pm_runtime_put(&priv->pdev->dev);
- return 0;
+ return ret;
}
/**
@@ -1646,9 +1659,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
u32 stats_clear_mask;
int err;
- err = pm_runtime_get_sync(&priv->pdev->dev);
+ err = pm_runtime_resume_and_get(&priv->pdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, err);
return &ndev->stats;
@@ -1936,12 +1948,11 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
- netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, emac_poll);
pm_runtime_enable(&pdev->dev);
- rc = pm_runtime_get_sync(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
if (rc < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc);
goto err_napi_del;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a4efd5e35158..946b9753ccfb 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -26,6 +26,8 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/sys_soc.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
@@ -41,6 +43,7 @@
struct davinci_mdio_of_param {
int autosuspend_delay_ms;
+ bool manual_mode;
};
struct davinci_mdio_regs {
@@ -49,6 +52,15 @@ struct davinci_mdio_regs {
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
#define CONTROL_MAX_DIV (0xffff)
+#define CONTROL_CLKDIV GENMASK(15, 0)
+
+#define MDIO_MAN_MDCLK_O BIT(2)
+#define MDIO_MAN_OE BIT(1)
+#define MDIO_MAN_PIN BIT(0)
+#define MDIO_MANUALMODE BIT(31)
+
+#define MDIO_PIN 0
+
u32 alive;
u32 link;
@@ -59,7 +71,9 @@ struct davinci_mdio_regs {
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclr;
- u32 __reserved_1[20];
+ u32 manualif;
+ u32 poll;
+ u32 __reserved_1[18];
struct {
u32 access;
@@ -70,7 +84,7 @@ struct davinci_mdio_regs {
#define USERACCESS_DATA (0xffff)
u32 physel;
- } user[0];
+ } user[];
};
static const struct mdio_platform_data default_pdata = {
@@ -79,6 +93,7 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
+ struct mdiobb_ctrl bb_ctrl;
struct davinci_mdio_regs __iomem *regs;
struct clk *clk;
struct device *dev;
@@ -90,6 +105,7 @@ struct davinci_mdio_data {
*/
bool skip_scan;
u32 clk_div;
+ bool manual_mode;
};
static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
@@ -128,16 +144,132 @@ static void davinci_mdio_enable(struct davinci_mdio_data *data)
writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
-static int davinci_mdio_reset(struct mii_bus *bus)
+static void davinci_mdio_disable(struct davinci_mdio_data *data)
+{
+ u32 reg;
+
+ /* Disable MDIO state machine */
+ reg = readl(&data->regs->control);
+
+ reg &= ~CONTROL_CLKDIV;
+ reg |= data->clk_div;
+
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &data->regs->control);
+}
+
+static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
+{
+ u32 reg;
+ /* set manual mode */
+ reg = readl(&data->regs->poll);
+ reg |= MDIO_MANUALMODE;
+ writel(reg, &data->regs->poll);
+}
+
+static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (level)
+ reg |= MDIO_MAN_MDCLK_O;
+ else
+ reg &= ~MDIO_MAN_MDCLK_O;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (output)
+ reg |= MDIO_MAN_OE;
+ else
+ reg &= ~MDIO_MAN_OE;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (value)
+ reg |= MDIO_MAN_PIN;
+ else
+ reg &= ~MDIO_MAN_PIN;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct davinci_mdio_data *data;
+ unsigned long reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+ return test_bit(MDIO_PIN, &reg);
+}
+
+static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read(bus, phy, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
{
- struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
int ret;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
+
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
}
/* wait for scan logic to settle */
@@ -173,6 +305,23 @@ done:
return 0;
}
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+
+ return davinci_mdio_common_reset(data);
+}
+
+static int davinci_mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ struct davinci_mdio_data *data;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+
+ return davinci_mdio_common_reset(data);
+}
+
/* wait until hardware is ready for another user access */
static inline int wait_for_user_access(struct davinci_mdio_data *data)
{
@@ -232,11 +381,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
(phy_id << 16));
@@ -276,11 +423,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
(phy_id << 16) | (phy_data & USERACCESS_DATA));
@@ -324,6 +469,28 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return 0;
}
+struct k3_mdio_soc_data {
+ bool manual_mode;
+};
+
+static const struct k3_mdio_soc_data am65_mdio_soc_data = {
+ .manual_mode = true,
+};
+
+static const struct soc_device_attribute k3_mdio_socinfo[] = {
+ { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { /* sentinel */ },
+};
+
#if IS_ENABLED(CONFIG_OF)
static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
.autosuspend_delay_ms = 100,
@@ -337,6 +504,14 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
#endif
+static const struct mdiobb_ops davinci_mdiobb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = davinci_set_mdc,
+ .set_mdio_dir = davinci_set_mdio_dir,
+ .set_mdio_data = davinci_set_mdio_data,
+ .get_mdio_data = davinci_get_mdio_data,
+};
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -351,7 +526,26 @@ static int davinci_mdio_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->bus = devm_mdiobus_alloc(dev);
+ data->manual_mode = false;
+ data->bb_ctrl.ops = &davinci_mdiobb_ops;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct soc_device_attribute *soc_match_data;
+
+ soc_match_data = soc_device_match(k3_mdio_socinfo);
+ if (soc_match_data && soc_match_data->data) {
+ const struct k3_mdio_soc_data *socdata =
+ soc_match_data->data;
+
+ data->manual_mode = socdata->manual_mode;
+ }
+ }
+
+ if (data->manual_mode)
+ data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
+ else
+ data->bus = devm_mdiobus_alloc(dev);
+
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
return -ENOMEM;
@@ -377,11 +571,20 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read;
- data->bus->write = davinci_mdio_write;
- data->bus->reset = davinci_mdio_reset;
+
+ if (data->manual_mode) {
+ data->bus->read = davinci_mdiobb_read;
+ data->bus->write = davinci_mdiobb_write;
+ data->bus->reset = davinci_mdiobb_reset;
+
+ dev_info(dev, "Configuring MDIO in manual mode\n");
+ } else {
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
+ data->bus->priv = data;
+ }
data->bus->parent = dev;
- data->bus->priv = data;
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
@@ -439,9 +642,13 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus)
+ if (data->bus) {
mdiobus_unregister(data->bus);
+ if (data->manual_mode)
+ free_mdio_bitbang(data->bus);
+ }
+
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -458,7 +665,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
writel(ctrl, &data->regs->control);
- wait_for_idle(data);
+
+ if (!data->manual_mode)
+ wait_for_idle(data);
return 0;
}
@@ -467,7 +676,12 @@ static int davinci_mdio_runtime_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- davinci_mdio_enable(data);
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ } else {
+ davinci_mdio_enable(data);
+ }
return 0;
}
#endif
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index b818e4579f6f..aba70bef4894 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -24,7 +24,6 @@
#include "netcp.h"
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
-#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
@@ -2082,7 +2081,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->tx_pool_region_id = temp[1];
if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
- dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+ dev_err(dev, "tx-pool size too small, must be at least %ld\n",
MAX_SKB_FRAGS);
ret = -ENODEV;
goto quit;
@@ -2096,8 +2095,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
}
/* NAPI register */
- netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
- netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll);
+ netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
/* Register the network device */
ndev->dev_id = 0;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 741c42c6a417..b3da76efa8f5 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -762,12 +762,12 @@ static void tlan_get_drvinfo(struct net_device *dev,
{
struct tlan_priv *priv = netdev_priv(dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
if (priv->pci_dev)
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+ strscpy(info->bus_info, "EISA", sizeof(info->bus_info));
}
static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3dbfb1b20649..cf8de8a7a8a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1187,8 +1187,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_link_ksettings(struct net_device *netdev,
@@ -1441,7 +1441,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
- netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, napi, gelic_net_poll);
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f47b8358669d..50d7eacfec58 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -35,6 +35,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
+#include <linux/of.h>
#include <net/checksum.h>
#include "spider_net.h"
@@ -2269,8 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->aneg_count = 0;
timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
- netif_napi_add(netdev, &card->napi,
- spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
+ netif_napi_add(netdev, &card->napi, spider_net_poll);
spider_net_setup_netdev_ops(netdev);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 05b1a0736835..51948e2b3a34 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -44,7 +44,6 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_RX_CSUM_DEFAULT 1
#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-#define SPIDER_NET_NAPI_WEIGHT 64
#define SPIDER_NET_FIRMWARE_SEQS 6
#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 93110dba0bfa..fef9fd127b5e 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -63,12 +63,12 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- strlcpy(drvinfo->driver, spider_net_driver_name,
+ strscpy(drvinfo->driver, spider_net_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "no information",
+ strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "no information",
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ strscpy(drvinfo->bus_info, pci_name(card->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index ce38f7515225..b50be67b398b 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -804,7 +804,7 @@ static int tc35815_init_one(struct pci_dev *pdev,
dev->netdev_ops = &tc35815_netdev_ops;
dev->ethtool_ops = &tc35815_ethtool_ops;
dev->watchdog_timeo = TC35815_TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
dev->irq = pdev->irq;
dev->base_addr = (unsigned long)ioaddr;
@@ -1956,9 +1956,9 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct tc35815_local *lp = netdev_priv(dev);
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static u32 tc35815_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index cf0917b29e30..d09d352e1c0a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -59,9 +59,6 @@
/* Check the phy status every half a second. */
#define CHECK_PHY_INTERVAL (HZ/2)
-static int tsi108_init_one(struct platform_device *pdev);
-static int tsi108_ether_remove(struct platform_device *pdev);
-
struct tsi108_prv_data {
void __iomem *regs; /* Base of normal regs */
void __iomem *phyregs; /* Base of register bank used for PHY access */
@@ -144,16 +141,6 @@ struct tsi108_prv_data {
struct platform_device *pdev;
};
-/* Structure for a device driver */
-
-static struct platform_driver tsi_eth_driver = {
- .probe = tsi108_init_one,
- .remove = tsi108_ether_remove,
- .driver = {
- .name = "tsi-ethernet",
- },
-};
-
static void tsi108_timed_checker(struct timer_list *t);
#ifdef DEBUG
@@ -1091,20 +1078,22 @@ static int tsi108_get_mac(struct net_device *dev)
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+ u8 addr[ETH_ALEN];
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x06;
- dev->dev_addr[2] = 0xd2;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x00;
+ addr[0] = 0x00;
+ addr[1] = 0x06;
+ addr[2] = 0xd2;
+ addr[3] = 0x00;
+ addr[4] = 0x00;
if (0x8 == data->phy)
- dev->dev_addr[5] = 0x01;
+ addr[5] = 0x01;
else
- dev->dev_addr[5] = 0x02;
+ addr[5] = 0x02;
+ eth_hw_addr_set(dev, addr);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1114,12 +1103,13 @@ static int tsi108_get_mac(struct net_device *dev)
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
- dev->dev_addr[0] = (word2 >> 16) & 0xff;
- dev->dev_addr[1] = (word2 >> 24) & 0xff;
- dev->dev_addr[2] = (word1 >> 0) & 0xff;
- dev->dev_addr[3] = (word1 >> 8) & 0xff;
- dev->dev_addr[4] = (word1 >> 16) & 0xff;
- dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ addr[0] = (word2 >> 16) & 0xff;
+ addr[1] = (word2 >> 24) & 0xff;
+ addr[2] = (word1 >> 0) & 0xff;
+ addr[3] = (word1 >> 8) & 0xff;
+ addr[4] = (word1 >> 16) & 0xff;
+ addr[5] = (word1 >> 24) & 0xff;
+ eth_hw_addr_set(dev, addr);
}
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -1136,14 +1126,12 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
- int i;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (i = 0; i < 6; i++)
- /* +2 is for the offset of the HW addr type */
- dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ /* +2 is for the offset of the HW addr type */
+ eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1302,12 +1290,15 @@ static int tsi108_open(struct net_device *dev)
data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL);
- if (!data->rxring)
+ if (!data->rxring) {
+ free_irq(data->irq_num, dev);
return -ENOMEM;
+ }
data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
+ free_irq(data->irq_num, dev);
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
return -ENOMEM;
@@ -1597,7 +1588,7 @@ tsi108_init_one(struct platform_device *pdev)
data->phy_type = einfo->phy_type;
data->irq_num = einfo->irq_num;
data->id = pdev->id;
- netif_napi_add(dev, &data->napi, tsi108_poll, 64);
+ netif_napi_add(dev, &data->napi, tsi108_poll);
dev->netdev_ops = &tsi108_netdev_ops;
dev->ethtool_ops = &tsi108_ethtool_ops;
@@ -1682,6 +1673,16 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+ .probe = tsi108_init_one,
+ .remove = tsi108_ether_remove,
+ .driver = {
+ .name = "tsi-ethernet",
+ },
+};
module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig
index 6e2cf062ddba..4184a635fe01 100644
--- a/drivers/net/ethernet/vertexcom/Kconfig
+++ b/drivers/net/ethernet/vertexcom/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_VERTEXCOM
bool "Vertexcom devices"
- default n
+ default y
help
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89a31783fbb4..aeed2a093e34 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -362,7 +362,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
mse102x_dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, mse->ndev);
- netif_rx_ni(skb);
+ netif_rx(skb);
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
@@ -731,7 +731,7 @@ static int mse102x_probe_spi(struct spi_device *spi)
return 0;
}
-static int mse102x_remove_spi(struct spi_device *spi)
+static void mse102x_remove_spi(struct spi_device *spi)
{
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
@@ -741,8 +741,6 @@ static int mse102x_remove_spi(struct spi_device *spi)
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
-
- return 0;
}
static const struct of_device_id mse102x_match_table[] = {
@@ -752,6 +750,13 @@ static const struct of_device_id mse102x_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mse102x_match_table);
+static const struct spi_device_id mse102x_ids[] = {
+ { "mse1021" },
+ { "mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mse102x_ids);
+
static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
@@ -760,10 +765,11 @@ static struct spi_driver mse102x_driver = {
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
+ .id_table = mse102x_ids,
};
module_spi_driver(mse102x_driver);
MODULE_DESCRIPTION("MSE102x Network driver");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@chargebyte.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 509c5e9b29df..0fb15a17b547 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -965,7 +965,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
+ netif_napi_add(dev, &rp->napi, rhine_napipoll);
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
@@ -2281,8 +2281,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct device *hwdev = dev->dev.parent;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index be2b992f24d9..a502812ac418 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2846,8 +2846,7 @@ static int velocity_probe(struct device *dev, int irq,
netdev->netdev_ops = &velocity_netdev_ops;
netdev->ethtool_ops = &velocity_ethtool_ops;
- netif_napi_add(netdev, &vptr->napi, velocity_poll,
- VELOCITY_NAPI_WEIGHT);
+ netif_napi_add(netdev, &vptr->napi, velocity_poll);
netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX;
@@ -3420,13 +3419,13 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct velocity_info *vptr = netdev_priv(dev);
- strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
- strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strscpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strscpy(info->version, VELOCITY_VERSION, sizeof(info->version));
if (vptr->pdev)
- strlcpy(info->bus_info, pci_name(vptr->pdev),
+ strscpy(info->bus_info, pci_name(vptr->pdev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index d3f960cc7c6e..ffdac6fac054 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -23,7 +23,6 @@
#define VELOCITY_VERSION "1.15"
#define VELOCITY_IO_SIZE 256
-#define VELOCITY_NAPI_WEIGHT 64
#define PKT_BUF_SZ 1540
@@ -939,7 +938,7 @@ enum velocity_owner {
#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
ignore MIBFI,RACEI to
reduce intr. frequency
- NOTE.... do not enable NoBuf int mask at driver driver
+ NOTE.... do not enable NoBuf int mask at driver
when (1) NoBuf -> RxThreshold = SF
(2) OK -> RxThreshold = original value
*/
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
new file mode 100644
index 000000000000..f5d43d8c9629
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wangxun network device configuration
+#
+
+config NET_VENDOR_WANGXUN
+ bool "Wangxun devices"
+ default y
+ help
+ If you have a network (Ethernet) card from Wangxun(R), say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Wangxun(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_WANGXUN
+
+config NGBE
+ tristate "Wangxun(R) GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ngbe.
+
+config TXGBE
+ tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) 10GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called txgbe.
+
+endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
new file mode 100644
index 000000000000..ac3fb06b233c
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Wangxun network device drivers.
+#
+
+obj-$(CONFIG_TXGBE) += txgbe/
+obj-$(CONFIG_NGBE) += ngbe/
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
new file mode 100644
index 000000000000..0baf75907496
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_NGBE) += ngbe.o
+
+ngbe-objs := ngbe_main.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
new file mode 100644
index 000000000000..f5fa6e5238cc
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_H_
+#define _NGBE_H_
+
+#include "ngbe_type.h"
+
+#define NGBE_MAX_FDIR_INDICES 7
+
+#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct ngbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char ngbe_driver_name[];
+
+#endif /* _NGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
new file mode 100644
index 000000000000..7674cb6e5700
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "ngbe.h"
+char ngbe_driver_name[] = "ngbe";
+
+/* ngbe_pci_tbl - PCI Device ID Table
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ngbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void ngbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ ngbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * ngbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ngbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ngbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int ngbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct ngbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ ngbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct ngbe_adapter),
+ NGBE_MAX_TX_QUEUES,
+ NGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ngbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ngbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void ngbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ngbe_driver = {
+ .name = ngbe_driver_name,
+ .id_table = ngbe_pci_tbl,
+ .probe = ngbe_probe,
+ .remove = ngbe_remove,
+ .shutdown = ngbe_shutdown,
+};
+
+module_pci_driver(ngbe_driver);
+
+MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@net-swift.com>");
+MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
new file mode 100644
index 000000000000..26e776c3539a
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_TYPE_H_
+#define _NGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ NGBE_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100
+#define NGBE_DEV_ID_EM_WX1860A2 0x0101
+#define NGBE_DEV_ID_EM_WX1860A2S 0x0102
+#define NGBE_DEV_ID_EM_WX1860A4 0x0103
+#define NGBE_DEV_ID_EM_WX1860A4S 0x0104
+#define NGBE_DEV_ID_EM_WX1860AL2 0x0105
+#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106
+#define NGBE_DEV_ID_EM_WX1860AL4 0x0107
+#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108
+#define NGBE_DEV_ID_EM_WX1860LC 0x0109
+#define NGBE_DEV_ID_EM_WX1860A1 0x010a
+#define NGBE_DEV_ID_EM_WX1860A1L 0x010b
+
+/* Subsystem ID */
+#define NGBE_SUBID_M88E1512_SFP 0x0003
+#define NGBE_SUBID_OCP_CARD 0x0040
+#define NGBE_SUBID_LY_M88E1512_SFP 0x0050
+#define NGBE_SUBID_M88E1512_RJ45 0x0051
+#define NGBE_SUBID_M88E1512_MIX 0x0052
+#define NGBE_SUBID_YT8521S_SFP 0x0060
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061
+#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064
+#define NGBE_SUBID_LY_YT8521S_SFP 0x0070
+#define NGBE_SUBID_RGMII_FPGA 0x0080
+
+#define NGBE_OEM_MASK 0x00FF
+
+#define NGBE_NCSI_SUP 0x8000
+#define NGBE_NCSI_MASK 0x8000
+#define NGBE_WOL_SUP 0x4000
+#define NGBE_WOL_MASK 0x4000
+
+#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
new file mode 100644
index 000000000000..431303ca75b4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbe.o
+
+txgbe-objs := txgbe_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
new file mode 100644
index 000000000000..38ddbde0ed0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_H_
+#define _TXGBE_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct txgbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char txgbe_driver_name[];
+
+#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
new file mode 100644
index 000000000000..d3b9f73ecba4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "txgbe.h"
+
+char txgbe_driver_name[] = "txgbe";
+
+/* txgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void txgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ txgbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * txgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * txgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct txgbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ txgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct txgbe_adapter),
+ TXGBE_MAX_TX_QUEUES,
+ TXGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver txgbe_driver = {
+ .name = txgbe_driver_name,
+ .id_table = txgbe_pci_tbl,
+ .probe = txgbe_probe,
+ .remove = txgbe_remove,
+ .shutdown = txgbe_shutdown,
+};
+
+module_pci_driver(txgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
new file mode 100644
index 000000000000..b2e329f50bae
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_TYPE_H_
+#define _TXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ txgbe_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define TXGBE_DEV_ID_SP1000 0x1001
+#define TXGBE_DEV_ID_WX1820 0x2001
+
+/* Subsystem IDs */
+/* SFP */
+#define TXGBE_ID_SP1000_SFP 0x0000
+#define TXGBE_ID_WX1820_SFP 0x2000
+#define TXGBE_ID_SFP 0x00
+
+/* copper */
+#define TXGBE_ID_SP1000_XAUI 0x1010
+#define TXGBE_ID_WX1820_XAUI 0x2010
+#define TXGBE_ID_XAUI 0x10
+#define TXGBE_ID_SP1000_SGMII 0x1020
+#define TXGBE_ID_WX1820_SGMII 0x2020
+#define TXGBE_ID_SGMII 0x20
+/* backplane */
+#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030
+#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030
+#define TXGBE_ID_KR_KX_KX4 0x30
+/* MAC Interface */
+#define TXGBE_ID_SP1000_MAC_XAUI 0x1040
+#define TXGBE_ID_WX1820_MAC_XAUI 0x2040
+#define TXGBE_ID_MAC_XAUI 0x40
+#define TXGBE_ID_SP1000_MAC_SGMII 0x1060
+#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
+#define TXGBE_ID_MAC_SGMII 0x60
+
+#define TXGBE_NCSI_SUP 0x8000
+#define TXGBE_NCSI_MASK 0x8000
+#define TXGBE_WOL_SUP 0x4000
+#define TXGBE_WOL_MASK 0x4000
+#define TXGBE_DEV_MASK 0xf0
+
+/* Combined interface*/
+#define TXGBE_ID_SFI_XAUI 0x50
+
+/* Revision ID */
+#define TXGBE_SP_MPW 1
+
+#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 7779a36da3c8..7c52796273a4 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -461,11 +461,9 @@ static int w5100_spi_probe(struct spi_device *spi)
return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
}
-static int w5100_spi_remove(struct spi_device *spi)
+static void w5100_spi_remove(struct spi_device *spi)
{
w5100_remove(&spi->dev);
-
- return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index ae24d6b86803..634946e87e5f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -719,9 +719,9 @@ static void w5100_hw_close(struct w5100_priv *priv)
static void w5100_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
@@ -883,7 +883,7 @@ static void w5100_rx_work(struct work_struct *work)
struct sk_buff *skb;
while ((skb = w5100_rx_skb(priv->ndev)))
- netif_rx_ni(skb);
+ netif_rx(skb);
w5100_enable_intr(priv);
}
@@ -1133,7 +1133,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
ndev->netdev_ops = &w5100_netdev_ops;
ndev->ethtool_ops = &w5100_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
+ netif_napi_add_weight(ndev, &priv->napi, w5100_napi_poll, 16);
/* This chip doesn't support VLAN packets with normal MTU,
* so disable VLAN for this device.
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 402d5036f266..b0958fe8111e 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -282,9 +282,9 @@ static void w5300_hw_close(struct w5300_priv *priv)
static void w5300_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
@@ -603,7 +603,7 @@ static int w5300_probe(struct platform_device *pdev)
ndev->netdev_ops = &w5300_netdev_ops;
ndev->ethtool_ops = &w5300_ethtool_ops;
ndev->watchdog_timeo = HZ;
- netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
+ netif_napi_add_weight(ndev, &priv->napi, w5300_napi_poll, 16);
/* This chip doesn't support VLAN packets with normal MTU,
* so disable VLAN for this device.
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 911b5ef9e680..0014729b8865 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 4a73127e10a6..6668d1b760d8 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -21,36 +21,45 @@
/* Configuration options */
/* Accept all incoming packets.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_PROMISC (1 << 0)
/* Jumbo frame support for Tx & Rx.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_JUMBO (1 << 1)
/* VLAN Rx & Tx frame support.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_VLAN (1 << 2)
/* Enable recognition of flow control frames on Rx
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames.
* Note: PAD from VLAN frames is not stripped.
- * This option defaults to disabled (set) */
+ * This option defaults to disabled (set)
+ */
#define XTE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
-set, the MAC will filter frames that have a mismatched type/length field
-and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
-types of frames are encountered. When this option is cleared, the MAC will
-allow these types of frames to be received.
-This option defaults to enabled (set) */
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received.
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_TXEN (1 << 11)
/* Enable the receiver
-* This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_RXEN (1 << 12)
/* Default options set when device is initialized or reset */
@@ -68,18 +77,18 @@ This option defaults to enabled (set) */
#define TX_TAILDESC_PTR 0x04 /* rw */
#define TX_CHNL_CTRL 0x05 /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
-*/
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
+ */
#define CHNL_CTRL_IRQ_IOE (1 << 9)
#define CHNL_CTRL_IRQ_EN (1 << 7)
#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
@@ -87,35 +96,35 @@ This option defaults to enabled (set) */
#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
#define TX_IRQ_REG 0x06 /* rw */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
- 29 2 ErrIrq
- 30 1 DlyIrq
- 31 0 CoalIrq
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ * 29 2 ErrIrq
+ * 30 1 DlyIrq
+ * 31 0 CoalIrq
*/
#define TX_CHNL_STS 0x07 /* r */
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define RX_NXTDESC_PTR 0x08 /* r */
#define RX_CURBUF_ADDR 0x09 /* r */
@@ -124,17 +133,17 @@ This option defaults to enabled (set) */
#define RX_TAILDESC_PTR 0x0c /* rw */
#define RX_CHNL_CTRL 0x0d /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
*/
#define RX_IRQ_REG 0x0e /* rw */
#define IRQ_COAL (1 << 0)
@@ -142,13 +151,13 @@ This option defaults to enabled (set) */
#define IRQ_ERR (1 << 2)
#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
-*/
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ */
#define RX_CHNL_STS 0x0f /* r */
#define CHNL_STS_ENGBUSY (1 << 1)
#define CHNL_STS_EOP (1 << 2)
@@ -165,23 +174,23 @@ This option defaults to enabled (set) */
#define CHNL_STS_CMPERR (1 << 20)
#define CHNL_STS_TAILERR (1 << 21)
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define DMA_CONTROL_REG 0x10 /* rw */
#define DMA_CONTROL_RST (1 << 0)
@@ -271,7 +280,7 @@ This option defaults to enabled (set) */
#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
-/** MII Mamagement Control register (MGTCR) */
+/* MII Management Control register (MGTCR) */
#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
@@ -283,7 +292,7 @@ This option defaults to enabled (set) */
#define STS_CTRL_APP0_ERR (1 << 31)
#define STS_CTRL_APP0_IRQONEND (1 << 30)
-/* undoccumented */
+/* undocumented */
#define STS_CTRL_APP0_STOPONEND (1 << 29)
#define STS_CTRL_APP0_CMPLT (1 << 28)
#define STS_CTRL_APP0_SOP (1 << 27)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b900ab5aef2a..1066420d6a83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -117,8 +117,8 @@ int temac_indirect_busywait(struct temac_local *lp)
spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
if (WARN_ON(!hard_acs_rdy(lp)))
return -ETIMEDOUT;
- else
- return 0;
+
+ return 0;
}
/*
@@ -261,7 +261,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
unsigned int dcrs;
@@ -286,7 +286,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
* such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
return -1;
}
@@ -307,11 +307,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
- else {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(lp->rx_skb[i]);
- }
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skb[i]);
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
@@ -361,8 +359,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
- skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
@@ -429,7 +428,8 @@ static void temac_do_set_mac_address(struct net_device *ndev)
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
- * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ * so don't affect them Set MAC bits [47:32] in EUAW1
+ */
temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
@@ -529,66 +529,66 @@ static struct temac_option {
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXJMBO_MASK,
+ .m_or = XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXVLAN_MASK,
+ .m_or = XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXVLAN_MASK,
+ .m_or = XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXFCS_MASK,
+ .m_or = XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXFCS_MASK,
+ .m_or = XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXLT_MASK,
+ .m_or = XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_RXFLO_MASK,
+ .m_or = XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_TXFLO_MASK,
+ .m_or = XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
- .m_or =XTE_AFM_EPPRM_MASK,
+ .m_or = XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXEN_MASK,
+ .m_or = XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXEN_MASK,
+ .m_or = XTE_RXC1_RXEN_MASK,
},
{}
};
@@ -640,7 +640,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset RX reset timeout!!\n");
+ "%s RX reset timeout!!\n", __func__);
break;
}
}
@@ -652,7 +652,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset TX reset timeout!!\n");
+ "%s TX reset timeout!!\n", __func__);
break;
}
}
@@ -671,7 +671,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset DMA reset timeout!!\n");
+ "%s DMA reset timeout!!\n", __func__);
break;
}
}
@@ -679,7 +679,7 @@ static void temac_device_reset(struct net_device *ndev)
if (temac_dma_bd_init(ndev)) {
dev_err(&ndev->dev,
- "temac_device_reset descriptor allocation failed\n");
+ "%s descriptor allocation failed\n", __func__);
}
spin_lock_irqsave(lp->indirect_lock, flags);
@@ -690,7 +690,8 @@ static void temac_device_reset(struct net_device *ndev)
spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
- * but leave receiver and transmitter disabled. */
+ * but leave receiver and transmitter disabled.
+ */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
@@ -722,9 +723,15 @@ static void temac_adjust_link(struct net_device *ndev)
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
- case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
- case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
- case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ case SPEED_1000:
+ mii_speed |= XTE_EMCFG_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ mii_speed |= XTE_EMCFG_LINKSPD_100;
+ break;
+ case SPEED_10:
+ mii_speed |= XTE_EMCFG_LINKSPD_10;
+ break;
}
/* Write new speed setting out to TEMAC */
@@ -1006,9 +1013,8 @@ static void ll_temac_recv(struct net_device *ndev)
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
-
/* Convert from device endianness (be32) to cpu
- * endiannes, and if necessary swap the bytes
+ * endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
* (be16).
*/
@@ -1433,6 +1439,8 @@ static int temac_probe(struct platform_device *pdev)
lp->indirect_lock = devm_kmalloc(&pdev->dev,
sizeof(*lp->indirect_lock),
GFP_KERNEL);
+ if (!lp->indirect_lock)
+ return -ENOMEM;
spin_lock_init(lp->indirect_lock);
}
@@ -1512,7 +1520,7 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
return PTR_ERR(lp->sdma_regs);
}
- if (of_get_property(dma_np, "little-endian", NULL)) {
+ if (of_property_read_bool(dma_np, "little-endian")) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
@@ -1560,16 +1568,12 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
- if (lp->rx_irq < 0) {
- if (lp->rx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA RX irq\n");
- return lp->rx_irq;
- }
- if (lp->tx_irq < 0) {
- if (lp->tx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA TX irq\n");
- return lp->tx_irq;
- }
+ if (lp->rx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->rx_irq,
+ "could not get DMA RX irq\n");
+ if (lp->tx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->tx_irq,
+ "could not get DMA TX irq\n");
if (temac_np) {
/* Retrieve the MAC address */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 6fd2dea4e60f..2371c072b53f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -29,7 +29,8 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
- * in the LSW0 register */
+ * in the LSW0 register
+ */
spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
@@ -88,7 +89,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
}
/* Enable the MDIO bus by asserting the enable bit and writing
- * in the clock config */
+ * in the clock config
+ */
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
bus = devm_mdiobus_alloc(&pdev->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 5b4d153b1492..6370c447ac5c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -119,11 +119,11 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
-/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
-#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
-#define XAXIDMA_DFT_RX_WAITBOUND 254
+#define XAXIDMA_DFT_TX_USEC 50
+#define XAXIDMA_DFT_RX_THRESHOLD 1
+#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -386,6 +386,7 @@ struct axidma_bd {
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
* @pcs_phy: Reference to PCS/PMA PHY if used
+ * @pcs: phylink pcs structure for PCS PHY
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
* @axi_clk: AXI4-Lite bus clock
* @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks)
@@ -394,6 +395,28 @@ struct axidma_bd {
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
+ * @napi_rx: NAPI RX control structure
+ * @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @rx_bd_num: Size of RX buffer descriptor ring
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @rx_packets: RX packet count for statistics
+ * @rx_bytes: RX byte count for statistics
+ * @rx_stat_sync: Synchronization object for RX stats
+ * @napi_tx: NAPI TX control structure
+ * @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @tx_bd_num: Size of TX buffer descriptor ring
+ * @tx_bd_ci: Stores the next Tx buffer descriptor in the ring that may be
+ * complete. Only updated at runtime by TX NAPI poll.
+ * @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring
+ * to be populated.
+ * @tx_packets: TX packet count for statistics
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -401,19 +424,6 @@ struct axidma_bd {
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @tx_bd_num: Size of TX buffer descriptor ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @rx_bd_num: Size of RX buffer descriptor ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -422,18 +432,19 @@ struct axidma_bd {
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @coalesce_usec_tx: IRQ coalesce delay for TX
*/
struct axienet_local {
struct net_device *ndev;
struct device *dev;
- struct device_node *phy_node;
-
struct phylink *phylink;
struct phylink_config phylink_config;
struct mdio_device *pcs_phy;
+ struct phylink_pcs pcs;
bool switch_x_sgmii;
@@ -447,6 +458,27 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
+ struct napi_struct napi_rx;
+ u32 rx_dma_cr;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
+ u32 rx_bd_ci;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ struct u64_stats_sync rx_stat_sync;
+
+ struct napi_struct napi_tx;
+ u32 tx_dma_cr;
+ struct axidma_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync tx_stat_sync;
+
struct work_struct dma_err_task;
int tx_irq;
@@ -457,16 +489,6 @@ struct axienet_local {
u32 options;
u32 features;
- struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
- u32 tx_bd_num;
- struct axidma_bd *rx_bd_v;
- dma_addr_t rx_bd_p;
- u32 rx_bd_num;
- u32 tx_bd_ci;
- u32 tx_bd_tail;
- u32 rx_bd_ci;
-
u32 max_frm_size;
u32 rxmem;
@@ -474,7 +496,9 @@ struct axienet_local {
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
+ u32 coalesce_usec_rx;
u32 coalesce_count_tx;
+ u32 coalesce_usec_tx;
};
/**
@@ -535,6 +559,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
iowrite32(value, lp->regs + offset);
}
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+
+static inline void axienet_dma_out32(struct axienet_local *lp,
+ off_t reg, u32 value)
+{
+ iowrite32(value, lp->dma_regs + reg);
+}
+
+#if defined(CONFIG_64BIT) && defined(iowrite64)
+/**
+ * axienet_dma_out64 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out64(struct axienet_local *lp,
+ off_t reg, u64 value)
+{
+ iowrite64(value, lp->dma_regs + reg);
+}
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out64(lp, reg, addr);
+ else
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#else /* CONFIG_64BIT */
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#endif /* CONFIG_64BIT */
+
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_enable(struct axienet_local *lp);
void axienet_mdio_disable(struct axienet_local *lp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 23ac353b35fe..d1d772580da9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
- * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ * Copyright (c) 2019 - 2022 Calian Advanced Technologies
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
+#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -132,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
return ioread32(lp->dma_regs + reg);
}
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- iowrite32(value, lp->dma_regs + reg);
-}
-
-static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
-{
- axienet_dma_out32(lp, reg, lower_32_bits(addr));
-
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
-}
-
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
struct axidma_bd *desc)
{
@@ -189,7 +166,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
@@ -214,18 +191,88 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
}
}
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
/**
+ * axienet_usec_to_timer - Calculate IRQ delay timer value
+ * @lp: Pointer to the axienet_local structure
+ * @coalesce_usec: Microseconds to convert into timer value
+ */
+static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+{
+ u32 result;
+ u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+
+ if (lp->axi_clk)
+ clk_rate = clk_get_rate(lp->axi_clk);
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
+ (u64)125000000);
+ if (result > 255)
+ result = 255;
+
+ return result;
+}
+
+/**
+ * axienet_dma_start - Set up DMA registers and start DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_start(struct axienet_local *lp)
+{
+ /* Start updating the Rx channel control register */
+ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_rx > 1)
+ lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_tx > 1)
+ lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+}
+
+/**
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
@@ -237,7 +284,6 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
int i;
struct sk_buff *skb;
struct axienet_local *lp = netdev_priv(ndev);
@@ -248,13 +294,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
return -ENOMEM;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
@@ -284,9 +330,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
lp->rx_bd_v[i].skb = skb;
- addr = dma_map_single(ndev->dev.parent, skb->data,
+ addr = dma_map_single(lp->dev, skb->data,
lp->max_frm_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, addr)) {
+ if (dma_mapping_error(lp->dev, addr)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
@@ -295,50 +341,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
return 0;
out:
@@ -496,7 +499,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
@@ -506,28 +510,74 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- return -ETIMEDOUT;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
+ }
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
}
return 0;
}
/**
+ * axienet_dma_stop - Stop DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_stop(struct axienet_local *lp)
+{
+ int count;
+ u32 cr, sr;
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ synchronize_irq(lp->rx_irq);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ synchronize_irq(lp->tx_irq);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ axienet_lock_mii(lp);
+ __axienet_device_reset(lp);
+ axienet_unlock_mii(lp);
+}
+
+/**
* axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
* @ndev: Pointer to the net_device structure
*
* This function is called to reset and initialize the Axi Ethernet core. This
* is typically called during initialization. It does a reset of the Axi DMA
* Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
- * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
* Returns 0 on success or a negative error number otherwise.
@@ -547,7 +597,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -590,54 +640,55 @@ static int axienet_device_reset(struct net_device *ndev)
/**
* axienet_free_tx_chain - Clean up a series of linked TX descriptors.
- * @ndev: Pointer to the net_device structure
+ * @lp: Pointer to the axienet_local structure
* @first_bd: Index of first descriptor to clean up
- * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @nr_bds: Max number of descriptors to clean up
+ * @force: Whether to clean descriptors even if not complete
* @sizep: Pointer to a u32 filled with the total sum of all bytes
- * in all cleaned-up descriptors. Ignored if NULL.
+ * in all cleaned-up descriptors. Ignored if NULL.
+ * @budget: NAPI budget (use 0 when not called from NAPI poll)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
* Returns the number of descriptors handled.
*/
-static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
- int nr_bds, u32 *sizep)
+static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ int nr_bds, bool force, u32 *sizep, int budget)
{
- struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- int max_bds = nr_bds;
unsigned int status;
dma_addr_t phys;
int i;
- if (max_bds == -1)
- max_bds = lp->tx_bd_num;
-
- for (i = 0; i < max_bds; i++) {
+ for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
status = cur_p->status;
- /* If no number is given, clean up *all* descriptors that have
- * been completed by the MAC.
+ /* If force is not specified, clean up only descriptors
+ * that have been completed by the MAC.
*/
- if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
- dev_consume_skb_irq(cur_p->skb);
+ napi_consume_skb(cur_p->skb, budget);
- cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,38 +698,6 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
- * @ndev: Pointer to the net_device structure
- *
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
- */
-static void axienet_start_xmit_done(struct net_device *ndev)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- u32 packets = 0;
- u32 size = 0;
-
- packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
-
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci -= lp->tx_bd_num;
-
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
-
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
-
- netif_wake_queue(ndev);
-}
-
-/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
@@ -689,19 +708,73 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
+ * returns a busy status.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+
+ /* Ensure we see all descriptor updates from device or TX polling */
+ rmb();
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
+ if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}
/**
+ * axienet_tx_poll - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of TX packets to process.
+ *
+ * Return: Number of TX packets processed.
+ *
+ * This function is invoked from the NAPI processing to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static int axienet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
+ struct net_device *ndev = lp->ndev;
+ u32 size = 0;
+ int packets;
+
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+
+ if (packets) {
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+ u64_stats_update_end(&lp->tx_stat_sync);
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable TX completion interrupts. This should
+ * cause an immediate interrupt if any TX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ }
+ return packets;
+}
+
+/**
* axienet_start_xmit - Starts the transmission.
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
@@ -723,27 +796,25 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
- num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -761,9 +832,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- phys = dma_map_single(ndev->dev.parent, skb->data,
+ phys = dma_map_single(lp->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -773,22 +844,20 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
- phys = dma_map_single(ndev->dev.parent,
+ phys = dma_map_single(lp->dev,
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
- axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
- NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -798,87 +867,108 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
+
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_tx_poll */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
return NETDEV_TX_OK;
}
/**
- * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
- * BD processing.
- * @ndev: Pointer to net_device structure.
+ * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of RX packets to process.
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * Return: Number of RX packets processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_rx_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0;
- u32 packets = 0;
+ int packets = 0;
dma_addr_t tail_p = 0;
- struct axienet_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
+ struct sk_buff *skb, *new_skb;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
- DMA_FROM_DEVICE);
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
-
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
- size += length;
- packets++;
+ size += length;
+ packets++;
+ }
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ new_skb = napi_alloc_skb(napi, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
- phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ phys = dma_map_single(lp->dev, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
- netdev_err(ndev, "RX DMA mapping error\n");
+ netdev_err(lp->ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -886,16 +976,32 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- ndev->stats.rx_packets += packets;
- ndev->stats.rx_bytes += size;
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, packets);
+ u64_stats_add(&lp->rx_bytes, size);
+ u64_stats_update_end(&lp->rx_stat_sync);
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable RX completion interrupts. This should
+ * cause an immediate interrupt if any RX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ }
+ return packets;
}
/**
@@ -905,46 +1011,40 @@ static void axienet_recv(struct net_device *ndev)
*
* Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
+ * TX BD processing.
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ } else {
+ /* Disable further TX completion interrupts and schedule
+ * NAPI to handle the completions.
+ */
+ u32 cr = lp->tx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_tx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -955,46 +1055,40 @@ out:
*
* Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
* processing.
*/
static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ } else {
+ /* Disable further RX completion interrupts and schedule
+ * NAPI receive.
+ */
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_rx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1068,6 +1162,9 @@ static int axienet_open(struct net_device *ndev)
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
ndev->name, ndev);
@@ -1093,6 +1190,8 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
@@ -1112,46 +1211,23 @@ err_tx_irq:
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr, sr;
- int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_stop(lp);
axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
-
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
-
- /* Do a reset to ensure DMA is really stopped */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
-
cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
@@ -1220,10 +1296,32 @@ static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(lp->phylink, rq, cmd);
}
+static void
+axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
.ndo_start_xmit = axienet_start_xmit,
+ .ndo_get_stats64 = axienet_get_stats64,
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -1245,8 +1343,8 @@ static const struct net_device_ops axienet_netdev_ops = {
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
- strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/**
@@ -1277,7 +1375,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1352,7 +1450,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1421,14 +1520,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+
+ ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
+ ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@@ -1461,8 +1558,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ if (ecoalesce->tx_coalesce_usecs)
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -1493,7 +1594,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops axienet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1509,78 +1611,78 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ return container_of(pcs, struct axienet_local, pcs);
+}
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
- break;
- default:
- break;
- }
+static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+
+ phylink_mii_c22_pcs_get_state(pcs_phy, state);
}
-static void axienet_mac_an_restart(struct phylink_config *config)
+static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
+ phylink_mii_c22_pcs_an_restart(pcs_phy);
}
-static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t iface)
+static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+ struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- switch (iface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (!lp->switch_x_sgmii)
- return 0;
-
- ret = mdiobus_write(lp->pcs_phy->bus,
- lp->pcs_phy->addr,
- XLNX_MII_STD_SELECT_REG,
- iface == PHY_INTERFACE_MODE_SGMII ?
+ if (lp->switch_x_sgmii) {
+ ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
+ interface == PHY_INTERFACE_MODE_SGMII ?
XLNX_MII_STD_SELECT_SGMII : 0);
- if (ret < 0)
- netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ if (ret < 0) {
+ netdev_warn(ndev,
+ "Failed to switch PHY interface: %d\n",
ret);
- return ret;
- default:
- return 0;
+ return ret;
+ }
}
+
+ ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
+
+ return ret;
}
-static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static const struct phylink_pcs_ops axienet_pcs_ops = {
+ .pcs_get_state = axienet_pcs_get_state,
+ .pcs_config = axienet_pcs_config,
+ .pcs_an_restart = axienet_pcs_an_restart,
+};
+
+static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
- state->interface,
- state->advertising);
- if (ret < 0)
- netdev_warn(ndev, "Failed to configure PCS: %d\n",
- ret);
- break;
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return &lp->pcs;
- default:
- break;
- }
+ return NULL;
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* nothing meaningful to do */
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1635,9 +1737,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = phylink_generic_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
- .mac_prepare = axienet_mac_prepare,
+ .mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1652,29 +1752,27 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
*/
static void axienet_dma_err_handler(struct work_struct *work)
{
+ u32 i;
u32 axienet_status;
- u32 cr, i;
+ struct axidma_bd *cur_p;
struct axienet_local *lp = container_of(work, struct axienet_local,
dma_err_task);
struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
+
+ axienet_dma_stop(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->cntrl) {
dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, addr,
+ dma_unmap_single(lp->dev, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -1707,50 +1805,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
axienet_status &= ~XAE_RCW1_RX_MASK;
@@ -1771,6 +1826,8 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
}
/**
@@ -1819,6 +1876,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+ u64_stats_init(&lp->rx_stat_sync);
+ u64_stats_init(&lp->tx_stat_sync);
+
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2003,6 +2066,11 @@ static int axienet_probe(struct platform_device *pdev)
iowrite32(0x0, desc);
}
}
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
+ }
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
@@ -2025,33 +2093,48 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- ret = axienet_mdio_setup(lp);
- if (ret)
- dev_warn(&pdev->dev,
- "error registering MDIO bus: %d\n", ret);
- }
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
- if (!lp->phy_node) {
- dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
+ if (!np) {
+ /* Deprecated: Always use "pcs-handle" for pcs_phy.
+ * Falling back to "phy-handle" here is only for
+ * backward compatibility with old device trees.
+ */
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ }
+ if (!np) {
+ dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;
goto cleanup_mdio;
}
- lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ lp->pcs_phy = of_mdio_find_device(np);
if (!lp->pcs_phy) {
ret = -EPROBE_DEFER;
+ of_node_put(np);
goto cleanup_mdio;
}
- lp->phylink_config.pcs_poll = true;
+ of_node_put(np);
+ lp->pcs.ops = &axienet_pcs_ops;
+ lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
- lp->phylink_config.legacy_pre_march2020 = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
@@ -2088,8 +2171,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
- of_node_put(lp->phy_node);
-
cleanup_clk:
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
@@ -2118,9 +2199,6 @@ static int axienet_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
-
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 48f544f6c999..0b3b6935c558 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -106,7 +106,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
- * into MWD register. The the MCR register is then appropriately setup
+ * into MWD register. The MCR register is then appropriately setup
* to finish the write operation.
*/
static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
@@ -126,7 +126,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
return ret;
}
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 519599480b15..a3967f8de417 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
- * 2007 - 2013 (c) Xilinx, Inc.
+ * Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
#include <linux/module.h>
@@ -91,13 +90,7 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
-#define ALIGNMENT 4
-
-/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -115,7 +108,7 @@
* @next_tx_buf_to_use: next Tx buffer to write to
* @next_rx_buf_to_use: next Rx buffer to read from
* @base_addr: base address of the Emaclite device
- * @reset_lock: lock used for synchronization
+ * @reset_lock: lock to serialize xmit and tx_timeout execution
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
* @phy_dev: pointer to the PHY device
@@ -124,7 +117,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +125,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +136,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
const u16 *from_u16_ptr;
u32 align_buffer;
@@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -498,7 +490,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
* @dev: Pointer to the network device instance
* @address: Void pointer to the sockaddr structure
*
- * This function copies the HW address from the sockaddr strucutre to the
+ * This function copies the HW address from the sockaddr structure to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
@@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = netdev_alloc_skb(dev, len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /* A new skb should have the data halfword aligned, but this code is
- * here just in case that isn't true. Calculate how many
- * bytes we should reserve to get the data to start on a word
- * boundary
- */
- align = BUFFER_ALIGN(skb->data);
- if (align)
- skb_reserve(skb, align);
-
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
@@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -823,10 +803,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
- int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
+ int rc, ret;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
@@ -836,16 +816,24 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
return -ENODEV;
}
npp = of_get_parent(np);
-
- of_address_to_resource(npp, 0, &res);
+ ret = of_address_to_resource(npp, 0, &res);
+ of_node_put(npp);
+ if (ret) {
+ dev_err(dev, "%s resource error!\n",
+ dev->of_node->full_name);
+ of_node_put(np);
+ return ret;
+ }
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
"MDIO of the phy is not registered yet\n");
else
put_device(&phydev->mdio.dev);
+ of_node_put(np);
return 0;
}
@@ -858,6 +846,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
+ of_node_put(np);
return -ENOMEM;
}
@@ -870,6 +859,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->parent = dev;
rc = of_mdiobus_register(bus, np);
+ of_node_put(np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;
@@ -926,8 +916,6 @@ static int xemaclite_open(struct net_device *dev)
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
- u32 bmcr;
-
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
@@ -938,19 +926,6 @@ static int xemaclite_open(struct net_device *dev)
/* EmacLite doesn't support giga-bit speeds */
phy_set_max_speed(lp->phy_dev, SPEED_100);
-
- /* Don't advertise 1000BASE-T Full/Half duplex speeds */
- phy_write(lp->phy_dev, MII_CTRL1000, 0);
-
- /* Advertise only 10 and 100mbps full/half duplex speeds */
- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
- ADVERTISE_CSMA);
-
- /* Restart auto negotiation */
- bmcr = phy_read(lp->phy_dev, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(lp->phy_dev, MII_BMCR, bmcr);
-
phy_start(lp->phy_dev);
}
@@ -1085,7 +1060,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
}
static const struct ethtool_ops xemaclite_ethtool_ops = {
@@ -1183,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
- goto error;
+ goto put_node;
}
dev_info(dev,
@@ -1191,6 +1166,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
(unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
return 0;
+put_node:
+ of_node_put(lp->phy_node);
error:
free_netdev(ndev);
return rc;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index f9587e55b842..894e92ef415b 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1402,7 +1402,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
+ strscpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 0e878fa6e322..b33f64c54b0e 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -20,9 +20,9 @@ if NET_VENDOR_XSCALE
config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
- depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR && OF
select PHYLIB
- select OF_MDIO if OF
+ select OF_MDIO
select NET_PTP_CLASSIFY
help
Say Y here if you want to use built-in Ethernet ports
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index df77a22d1b81..3b0c5f177447 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,8 +29,8 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
-#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
@@ -38,6 +38,11 @@
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
#include <linux/soc/ixp4xx/cpu.h>
+#include <linux/types.h>
+
+#define IXP4XX_ETH_NPEA 0x00
+#define IXP4XX_ETH_NPEB 0x10
+#define IXP4XX_ETH_NPEC 0x20
#include "ixp46x_ts.h"
@@ -147,6 +152,16 @@ typedef void buffer_t;
#define free_buffer_irq kfree
#endif
+/* Information about built-in Ethernet MAC interfaces */
+struct eth_plat_info {
+ u8 phy; /* MII PHY ID, 0 - 31 */
+ u8 rxq; /* configurable, currently 0 - 31 only */
+ u8 txreadyq;
+ u8 hwaddr[ETH_ALEN];
+ u8 npe; /* NPE instance used by this interface */
+ bool has_mdio; /* If this instance has an MDIO bus */
+};
+
struct eth_regs {
u32 tx_control[2], __res1[2]; /* 000 */
u32 rx_control[2], __res2[2]; /* 010 */
@@ -826,7 +841,7 @@ static void eth_txdone_irq(void *unused)
}
}
-static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = netdev_priv(dev);
unsigned int txreadyq = port->plat->txreadyq;
@@ -984,11 +999,11 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
{
struct port *port = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_ts_info(struct net_device *dev,
@@ -1366,7 +1381,6 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#ifdef CONFIG_OF
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
{
struct device_node *np = dev->of_node;
@@ -1374,6 +1388,7 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
struct of_phandle_args npe_spec;
struct device_node *mdio_np;
struct eth_plat_info *plat;
+ u8 mac[ETH_ALEN];
int ret;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
@@ -1415,14 +1430,14 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
}
plat->txreadyq = queue_spec.args[0];
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(plat->hwaddr, mac, ETH_ALEN);
+ }
+
return plat;
}
-#else
-static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
-{
- return NULL;
-}
-#endif
static int ixp4xx_eth_probe(struct platform_device *pdev)
{
@@ -1434,49 +1449,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
struct port *port;
int err;
- if (np) {
- plat = ixp4xx_of_get_platdata(dev);
- if (!plat)
- return -ENODEV;
- } else {
- plat = dev_get_platdata(dev);
- if (!plat)
- return -ENODEV;
- plat->npe = pdev->id;
- switch (plat->npe) {
- case IXP4XX_ETH_NPEA:
- /* If the MDIO bus is not up yet, defer probe */
- break;
- case IXP4XX_ETH_NPEB:
- /* On all except IXP43x, NPE-B is used for the MDIO bus.
- * If there is no NPE-B in the feature set, bail out,
- * else we have the MDIO bus here.
- */
- if (!cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-B */
- plat->has_mdio = true;
- }
- break;
- case IXP4XX_ETH_NPEC:
- /* IXP43x lacks NPE-B and uses NPE-C for the MDIO bus
- * access, if there is no NPE-C, no bus, nothing works,
- * so bail out.
- */
- if (cpu_is_ixp43x()) {
- if (!(ixp4xx_read_feature_bits() &
- IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- /* Else register the MDIO bus on NPE-B */
- plat->has_mdio = true;
- }
- break;
- default:
- return -ENODEV;
- }
- }
+ plat = ixp4xx_of_get_platdata(dev);
+ if (!plat)
+ return -ENODEV;
if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
@@ -1513,14 +1488,17 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
ndev->dev.dma_mask = dev->dma_mask;
ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
- netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
if (!(port->npe = npe_request(NPE_ID(port->id))))
return -EIO;
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- eth_hw_addr_set(ndev, plat->hwaddr);
+ if (is_valid_ether_addr(plat->hwaddr))
+ eth_hw_addr_set(ndev, plat->hwaddr);
+ else
+ eth_hw_addr_random(ndev);
platform_set_drvdata(pdev, ndev);
@@ -1530,21 +1508,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- if (np) {
- phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
- } else {
- phydev = mdiobus_get_phy(mdio_bus, plat->phy);
- if (!phydev) {
- err = -ENODEV;
- dev_err(dev, "could not connect phydev (%d)\n", err);
- goto err_free_mem;
- }
- err = phy_connect_direct(ndev, phydev, ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (err)
- goto err_free_mem;
-
- }
+ phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link);
if (!phydev) {
err = -ENODEV;
dev_err(dev, "no phydev\n");
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 39234852e01b..9abbdb71e629 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -16,7 +16,6 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/cpu.h>
-#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"
@@ -272,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev)
ixp_clock.master_irq = platform_get_irq(pdev, 0);
ixp_clock.slave_irq = platform_get_irq(pdev, 1);
if (IS_ERR(ixp_clock.regs) ||
- !ixp_clock.master_irq || !ixp_clock.slave_irq)
+ ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0)
return -ENXIO;
ixp_clock.caps = ptp_ixp_caps;
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index 4cbb145c74ab..036062376c06 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -1314,7 +1314,7 @@ void mac_set_rx_mode(struct s_smc *smc, int mode)
o Connect a UPPS ISA or EISA station to the network.
o Give the FORMAC of UPPS station the command to send
restricted tokens until the ring becomes instable.
- o Now connect your test test client.
+ o Now connect your test client.
o The restricted token monitor should detect the restricted token,
and your break point will be reached.
o You can ovserve how the station will clean the ring.
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 72c31f0013ad..dd15af4e98c2 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -747,7 +747,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#endif
#ifdef SBA
- DB_SBAN(2,"SBA: RAF frame received\n",0,0) ;
+ DB_SBAN(2, "SBA: RAF frame received") ;
sba_raf_received_pack(smc,sm,fs) ;
#endif
break ;
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 746736c83873..19c99529566b 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -151,11 +151,11 @@ static void fjes_get_drvinfo(struct net_device *netdev,
plat_dev = adapter->plat_dev;
- strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, fjes_driver_version,
+ strscpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, fjes_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"platform:%s", plat_dev->name);
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index ebd287039a54..1eff202f6a1f 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -32,68 +32,12 @@ MODULE_VERSION(DRV_VERSION);
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
-static int fjes_request_irq(struct fjes_adapter *);
-static void fjes_free_irq(struct fjes_adapter *);
-
-static int fjes_open(struct net_device *);
-static int fjes_close(struct net_device *);
-static int fjes_setup_resources(struct fjes_adapter *);
-static void fjes_free_resources(struct fjes_adapter *);
-static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
-static void fjes_raise_intr_rxdata_task(struct work_struct *);
-static void fjes_tx_stall_task(struct work_struct *);
-static void fjes_force_close_task(struct work_struct *);
-static irqreturn_t fjes_intr(int, void*);
-static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
-static int fjes_change_mtu(struct net_device *, int);
-static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
-static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
-
-static int fjes_acpi_add(struct acpi_device *);
-static int fjes_acpi_remove(struct acpi_device *);
-static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
-
-static int fjes_probe(struct platform_device *);
-static int fjes_remove(struct platform_device *);
-
-static int fjes_sw_init(struct fjes_adapter *);
-static void fjes_netdev_setup(struct net_device *);
-static void fjes_irq_watch_task(struct work_struct *);
-static void fjes_watch_unshare_task(struct work_struct *);
-static void fjes_rx_irq(struct fjes_adapter *, int);
-static int fjes_poll(struct napi_struct *, int);
-
static const struct acpi_device_id fjes_acpi_ids[] = {
{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
-static struct acpi_driver fjes_acpi_driver = {
- .name = DRV_NAME,
- .class = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = fjes_acpi_ids,
- .ops = {
- .add = fjes_acpi_add,
- .remove = fjes_acpi_remove,
- },
-};
-
-static struct platform_driver fjes_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = fjes_probe,
- .remove = fjes_remove,
-};
-
-static struct resource fjes_resource[] = {
- DEFINE_RES_MEM(0, 1),
- DEFINE_RES_IRQ(0)
-};
-
static bool is_extended_socket_device(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
@@ -139,43 +83,6 @@ static int acpi_check_extended_socket_status(struct acpi_device *device)
return 0;
}
-static int fjes_acpi_add(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
- acpi_status status;
-
- if (!is_extended_socket_device(device))
- return -ENODEV;
-
- if (acpi_check_extended_socket_status(device))
- return -ENODEV;
-
- status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- fjes_get_acpi_resource, fjes_resource);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* create platform_device */
- plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
- ARRAY_SIZE(fjes_resource));
- if (IS_ERR(plat_dev))
- return PTR_ERR(plat_dev);
-
- device->driver_data = plat_dev;
-
- return 0;
-}
-
-static int fjes_acpi_remove(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
-
- plat_dev = (struct platform_device *)acpi_driver_data(device);
- platform_device_unregister(plat_dev);
-
- return 0;
-}
-
static acpi_status
fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -206,143 +113,59 @@ fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
-static int fjes_request_irq(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int result = -1;
-
- adapter->interrupt_watch_enable = true;
- if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
- }
-
- if (!adapter->irq_registered) {
- result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
- IRQF_SHARED, netdev->name, adapter);
- if (result)
- adapter->irq_registered = false;
- else
- adapter->irq_registered = true;
- }
-
- return result;
-}
-
-static void fjes_free_irq(struct fjes_adapter *adapter)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- adapter->interrupt_watch_enable = false;
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
-
- if (adapter->irq_registered) {
- free_irq(adapter->hw.hw_res.irq, adapter);
- adapter->irq_registered = false;
- }
-}
-
-static const struct net_device_ops fjes_netdev_ops = {
- .ndo_open = fjes_open,
- .ndo_stop = fjes_close,
- .ndo_start_xmit = fjes_xmit_frame,
- .ndo_get_stats64 = fjes_get_stats64,
- .ndo_change_mtu = fjes_change_mtu,
- .ndo_tx_timeout = fjes_tx_retry,
- .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+static struct resource fjes_resource[] = {
+ DEFINE_RES_MEM(0, 1),
+ DEFINE_RES_IRQ(0)
};
-/* fjes_open - Called when a network interface is made active */
-static int fjes_open(struct net_device *netdev)
+static int fjes_acpi_add(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- int result;
-
- if (adapter->open_guard)
- return -ENXIO;
-
- result = fjes_setup_resources(adapter);
- if (result)
- goto err_setup_res;
-
- hw->txrx_stop_req_bit = 0;
- hw->epstop_req_bit = 0;
+ struct platform_device *plat_dev;
+ acpi_status status;
- napi_enable(&adapter->napi);
+ if (!is_extended_socket_device(device))
+ return -ENODEV;
- fjes_hw_capture_interrupt_status(hw);
+ if (acpi_check_extended_socket_status(device))
+ return -ENODEV;
- result = fjes_request_irq(adapter);
- if (result)
- goto err_req_irq;
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ fjes_get_acpi_resource, fjes_resource);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+ /* create platform_device */
+ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+ ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
+ device->driver_data = plat_dev;
return 0;
-
-err_req_irq:
- fjes_free_irq(adapter);
- napi_disable(&adapter->napi);
-
-err_setup_res:
- fjes_free_resources(adapter);
- return result;
}
-/* fjes_close - Disables a network interface */
-static int fjes_close(struct net_device *netdev)
+static int fjes_acpi_remove(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- unsigned long flags;
- int epidx;
-
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
-
- fjes_hw_raise_epstop(hw);
-
- napi_disable(&adapter->napi);
-
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
- continue;
-
- if (fjes_hw_get_partner_ep_status(hw, epidx) ==
- EP_PARTNER_SHARED)
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
- }
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- fjes_free_irq(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- adapter->unshare_watch_bitmask = 0;
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
-
- cancel_work_sync(&hw->update_zone_task);
- cancel_work_sync(&hw->epstop_task);
-
- fjes_hw_wait_epstop(hw);
+ struct platform_device *plat_dev;
- fjes_free_resources(adapter);
+ plat_dev = (struct platform_device *)acpi_driver_data(device);
+ platform_device_unregister(plat_dev);
return 0;
}
+static struct acpi_driver fjes_acpi_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = fjes_acpi_ids,
+ .ops = {
+ .add = fjes_acpi_add,
+ .remove = fjes_acpi_remove,
+ },
+};
+
static int fjes_setup_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -421,6 +244,188 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
return 0;
}
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+ adapter->unset_rx_last = true;
+ napi_schedule(&adapter->napi);
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ fallthrough;
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ break;
+ case EP_PARTNER_SHARED:
+ set_bit(src_epid, &hw->epstop_req_bit);
+
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq, &hw->epstop_task);
+ break;
+ }
+ trace_fjes_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ break;
+ case EP_PARTNER_WAITING:
+ if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+ break;
+ case EP_PARTNER_SHARED:
+ if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_REQUEST) {
+ set_bit(src_epid, &hw->epstop_req_bit);
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq,
+ &hw->epstop_task);
+ }
+ break;
+ }
+ trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ if (!work_pending(&hw->update_zone_task))
+ queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+ struct fjes_adapter *adapter = data;
+ struct fjes_hw *hw = &adapter->hw;
+ irqreturn_t ret;
+ u32 icr;
+
+ icr = fjes_hw_capture_interrupt_status(hw);
+
+ if (icr & REG_IS_MASK_IS_ASSERT) {
+ if (icr & REG_ICTL_MASK_RX_DATA) {
+ fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_rx += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
+ fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_stop += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
+ fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_unshare += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+ fjes_hw_set_irqmask(hw,
+ REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+ if (icr & REG_ICTL_MASK_INFO_UPDATE) {
+ fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_zoneupdate += 1;
+ }
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int result = -1;
+
+ adapter->interrupt_watch_enable = true;
+ if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+
+ if (!adapter->irq_registered) {
+ result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+ IRQF_SHARED, netdev->name, adapter);
+ if (result)
+ adapter->irq_registered = false;
+ else
+ adapter->irq_registered = true;
+ }
+
+ return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ adapter->interrupt_watch_enable = false;
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+ if (adapter->irq_registered) {
+ free_irq(adapter->hw.hw_res.irq, adapter);
+ adapter->irq_registered = false;
+ }
+}
+
static void fjes_free_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -477,121 +482,91 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
}
}
-static void fjes_tx_stall_task(struct work_struct *work)
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, tx_stall_task);
- struct net_device *netdev = adapter->netdev;
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- int all_queue_available, sendable;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
- union ep_buffer_info *info;
- int i;
-
- if (((long)jiffies -
- dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
- netif_wake_queue(netdev);
- return;
- }
-
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ int result;
- for (i = 0; i < 5; i++) {
- all_queue_available = 1;
+ if (adapter->open_guard)
+ return -ENXIO;
- for (epid = 0; epid < max_epid; epid++) {
- if (my_epid == epid)
- continue;
+ result = fjes_setup_resources(adapter);
+ if (result)
+ goto err_setup_res;
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- sendable = (pstatus == EP_PARTNER_SHARED);
- if (!sendable)
- continue;
+ hw->txrx_stop_req_bit = 0;
+ hw->epstop_req_bit = 0;
- info = adapter->hw.ep_shm_info[epid].tx.info;
+ napi_enable(&adapter->napi);
- if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
- return;
+ fjes_hw_capture_interrupt_status(hw);
- if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
- info->v1i.count_max)) {
- all_queue_available = 0;
- break;
- }
- }
+ result = fjes_request_irq(adapter);
+ if (result)
+ goto err_req_irq;
- if (all_queue_available) {
- netif_wake_queue(netdev);
- return;
- }
- }
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
- usleep_range(50, 100);
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
- queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
-}
+ return 0;
-static void fjes_force_close_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, force_close_task);
- struct net_device *netdev = adapter->netdev;
+err_req_irq:
+ fjes_free_irq(adapter);
+ napi_disable(&adapter->napi);
- rtnl_lock();
- dev_close(netdev);
- rtnl_unlock();
+err_setup_res:
+ fjes_free_resources(adapter);
+ return result;
}
-static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, raise_intr_rxdata_task);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
+ unsigned long flags;
+ int epidx;
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
- for (epid = 0; epid < max_epid; epid++)
- hw->ep_shm_info[epid].tx_status_work = 0;
+ fjes_hw_raise_epstop(hw);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ napi_disable(&adapter->napi);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if (pstatus == EP_PARTNER_SHARED) {
- hw->ep_shm_info[epid].tx_status_work =
- hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
- if (hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) {
- hw->ep_shm_info[epid].tx.info->v1i.tx_status =
- FJES_TX_DELAY_SEND_NONE;
- }
- }
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ fjes_free_irq(adapter);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if ((hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) &&
- (pstatus == EP_PARTNER_SHARED) &&
- !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
- FJES_RX_POLL_WORK)) {
- fjes_hw_raise_interrupt(hw, epid,
- REG_ICTL_MASK_RX_DATA);
- hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
- }
- }
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ adapter->unshare_watch_bitmask = 0;
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
- usleep_range(500, 1000);
+ cancel_work_sync(&hw->update_zone_task);
+ cancel_work_sync(&hw->epstop_task);
+
+ fjes_hw_wait_epstop(hw);
+
+ fjes_free_resources(adapter);
+
+ return 0;
}
static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
@@ -787,13 +762,6 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
-{
- struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
-
- netif_tx_wake_queue(queue);
-}
-
static void
fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
@@ -871,6 +839,13 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
+{
+ struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+ netif_tx_wake_queue(queue);
+}
+
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
@@ -907,137 +882,29 @@ static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- break;
- case EP_PARTNER_WAITING:
- if (src_epid < hw->my_epid) {
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
-
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- }
- break;
- case EP_PARTNER_SHARED:
- if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
- FJES_RX_STOP_REQ_REQUEST) {
- set_bit(src_epid, &hw->epstop_req_bit);
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq,
- &hw->epstop_task);
- }
- break;
- }
- trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_WAITING:
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- fallthrough;
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- break;
- case EP_PARTNER_SHARED:
- set_bit(src_epid, &hw->epstop_req_bit);
-
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq, &hw->epstop_task);
- break;
- }
- trace_fjes_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_update_zone_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- if (!work_pending(&hw->update_zone_task))
- queue_work(adapter->control_wq, &hw->update_zone_task);
-}
+static const struct net_device_ops fjes_netdev_ops = {
+ .ndo_open = fjes_open,
+ .ndo_stop = fjes_close,
+ .ndo_start_xmit = fjes_xmit_frame,
+ .ndo_get_stats64 = fjes_get_stats64,
+ .ndo_change_mtu = fjes_change_mtu,
+ .ndo_tx_timeout = fjes_tx_retry,
+ .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
-static irqreturn_t fjes_intr(int irq, void *data)
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
{
- struct fjes_adapter *adapter = data;
- struct fjes_hw *hw = &adapter->hw;
- irqreturn_t ret;
- u32 icr;
-
- icr = fjes_hw_capture_interrupt_status(hw);
-
- if (icr & REG_IS_MASK_IS_ASSERT) {
- if (icr & REG_ICTL_MASK_RX_DATA) {
- fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_rx += 1;
- }
-
- if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
- fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_stop += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
- fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_unshare += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
- fjes_hw_set_irqmask(hw,
- REG_ICTL_MASK_TXRX_STOP_DONE, true);
-
- if (icr & REG_ICTL_MASK_INFO_UPDATE) {
- fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_zoneupdate += 1;
- }
-
- ret = IRQ_HANDLED;
- } else {
- ret = IRQ_NONE;
- }
+ ether_setup(netdev);
- return ret;
+ netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+ netdev->netdev_ops = &fjes_netdev_ops;
+ fjes_set_ethtool_ops(netdev);
+ netdev->mtu = fjes_support_mtu[3];
+ netdev->min_mtu = fjes_support_mtu[0];
+ netdev->max_mtu = fjes_support_mtu[3];
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
@@ -1087,16 +954,6 @@ static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
}
-static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
-
- adapter->unset_rx_last = true;
- napi_schedule(&adapter->napi);
-}
-
static int fjes_poll(struct napi_struct *napi, int budget)
{
struct fjes_adapter *adapter =
@@ -1196,182 +1053,130 @@ static int fjes_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* fjes_probe - Device Initialization Routine */
-static int fjes_probe(struct platform_device *plat_dev)
+static int fjes_sw_init(struct fjes_adapter *adapter)
{
- struct fjes_adapter *adapter;
- struct net_device *netdev;
- struct resource *res;
- struct fjes_hw *hw;
- u8 addr[ETH_ALEN];
- int err;
-
- err = -ENOMEM;
- netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
- NET_NAME_UNKNOWN, fjes_netdev_setup,
- FJES_MAX_QUEUES);
-
- if (!netdev)
- goto err_out;
+ struct net_device *netdev = adapter->netdev;
- SET_NETDEV_DEV(netdev, &plat_dev->dev);
+ netif_napi_add(netdev, &adapter->napi, fjes_poll);
- dev_set_drvdata(&plat_dev->dev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->plat_dev = plat_dev;
- hw = &adapter->hw;
- hw->back = adapter;
+ return 0;
+}
- /* setup the private structure */
- err = fjes_sw_init(adapter);
- if (err)
- goto err_free_netdev;
+static void fjes_force_close_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, force_close_task);
+ struct net_device *netdev = adapter->netdev;
- INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
- adapter->force_reset = false;
- adapter->open_guard = false;
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+}
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->txrx_wq)) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, tx_stall_task);
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_hw *hw = &adapter->hw;
+ int all_queue_available, sendable;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
+ union ep_buffer_info *info;
+ int i;
- adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->control_wq)) {
- err = -ENOMEM;
- goto err_free_txrx_wq;
+ if (((long)jiffies -
+ dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
+ netif_wake_queue(netdev);
+ return;
}
- INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
- INIT_WORK(&adapter->raise_intr_rxdata_task,
- fjes_raise_intr_rxdata_task);
- INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
- adapter->unshare_watch_bitmask = 0;
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
- INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
- adapter->interrupt_watch_enable = false;
+ for (i = 0; i < 5; i++) {
+ all_queue_available = 1;
- res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- goto err_free_control_wq;
- }
- hw->hw_res.start = res->start;
- hw->hw_res.size = resource_size(res);
- hw->hw_res.irq = platform_get_irq(plat_dev, 0);
- if (hw->hw_res.irq < 0) {
- err = hw->hw_res.irq;
- goto err_free_control_wq;
- }
+ for (epid = 0; epid < max_epid; epid++) {
+ if (my_epid == epid)
+ continue;
- err = fjes_hw_init(&adapter->hw);
- if (err)
- goto err_free_control_wq;
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ sendable = (pstatus == EP_PARTNER_SHARED);
+ if (!sendable)
+ continue;
- /* setup MAC address (02:00:00:00:00:[epid])*/
- addr[0] = 2;
- addr[1] = 0;
- addr[2] = 0;
- addr[3] = 0;
- addr[4] = 0;
- addr[5] = hw->my_epid; /* EPID */
- eth_hw_addr_set(netdev, addr);
+ info = adapter->hw.ep_shm_info[epid].tx.info;
- err = register_netdev(netdev);
- if (err)
- goto err_hw_exit;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return;
- netif_carrier_off(netdev);
+ if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+ info->v1i.count_max)) {
+ all_queue_available = 0;
+ break;
+ }
+ }
- fjes_dbg_adapter_init(adapter);
+ if (all_queue_available) {
+ netif_wake_queue(netdev);
+ return;
+ }
+ }
- return 0;
+ usleep_range(50, 100);
-err_hw_exit:
- fjes_hw_exit(&adapter->hw);
-err_free_control_wq:
- destroy_workqueue(adapter->control_wq);
-err_free_txrx_wq:
- destroy_workqueue(adapter->txrx_wq);
-err_free_netdev:
- free_netdev(netdev);
-err_out:
- return err;
+ queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
}
-/* fjes_remove - Device Removal Routine */
-static int fjes_remove(struct platform_device *plat_dev)
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
{
- struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
- struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, raise_intr_rxdata_task);
struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
- fjes_dbg_adapter_exit(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
- if (adapter->control_wq)
- destroy_workqueue(adapter->control_wq);
- if (adapter->txrx_wq)
- destroy_workqueue(adapter->txrx_wq);
-
- unregister_netdev(netdev);
-
- fjes_hw_exit(hw);
-
- netif_napi_del(&adapter->napi);
-
- free_netdev(netdev);
-
- return 0;
-}
-
-static int fjes_sw_init(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
-
- return 0;
-}
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
-/* fjes_netdev_setup - netdevice initialization routine */
-static void fjes_netdev_setup(struct net_device *netdev)
-{
- ether_setup(netdev);
+ for (epid = 0; epid < max_epid; epid++)
+ hw->ep_shm_info[epid].tx_status_work = 0;
- netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
- netdev->netdev_ops = &fjes_netdev_ops;
- fjes_set_ethtool_ops(netdev);
- netdev->mtu = fjes_support_mtu[3];
- netdev->min_mtu = fjes_support_mtu[0];
- netdev->max_mtu = fjes_support_mtu[3];
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-}
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
-static void fjes_irq_watch_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(to_delayed_work(work),
- struct fjes_adapter, interrupt_watch_task);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if (pstatus == EP_PARTNER_SHARED) {
+ hw->ep_shm_info[epid].tx_status_work =
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status;
- local_irq_disable();
- fjes_intr(adapter->hw.hw_res.irq, adapter);
- local_irq_enable();
+ if (hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) {
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+ FJES_TX_DELAY_SEND_NONE;
+ }
+ }
+ }
- if (fjes_rxframe_search_exist(adapter, 0) >= 0)
- napi_schedule(&adapter->napi);
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
- if (adapter->interrupt_watch_enable) {
- if (!delayed_work_pending(&adapter->interrupt_watch_task))
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if ((hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) &&
+ (pstatus == EP_PARTNER_SHARED) &&
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+ FJES_RX_POLL_WORK)) {
+ fjes_hw_raise_interrupt(hw, epid,
+ REG_ICTL_MASK_RX_DATA);
+ hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
+ }
}
+
+ usleep_range(500, 1000);
}
static void fjes_watch_unshare_task(struct work_struct *work)
@@ -1508,16 +1313,178 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
}
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+ struct fjes_adapter, interrupt_watch_task);
+
+ local_irq_disable();
+ fjes_intr(adapter->hw.hw_res.irq, adapter);
+ local_irq_enable();
+
+ if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+ napi_schedule(&adapter->napi);
+
+ if (adapter->interrupt_watch_enable) {
+ if (!delayed_work_pending(&adapter->interrupt_watch_task))
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+ struct fjes_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *res;
+ struct fjes_hw *hw;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = -ENOMEM;
+ netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+ NET_NAME_UNKNOWN, fjes_netdev_setup,
+ FJES_MAX_QUEUES);
+
+ if (!netdev)
+ goto err_out;
+
+ SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+ dev_set_drvdata(&plat_dev->dev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->plat_dev = plat_dev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ /* setup the private structure */
+ err = fjes_sw_init(adapter);
+ if (err)
+ goto err_free_netdev;
+
+ INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+ adapter->force_reset = false;
+ adapter->open_guard = false;
+
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
+ WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
+
+ INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+ INIT_WORK(&adapter->raise_intr_rxdata_task,
+ fjes_raise_intr_rxdata_task);
+ INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+ adapter->unshare_watch_bitmask = 0;
+
+ INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+ adapter->interrupt_watch_enable = false;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
+ hw->hw_res.start = res->start;
+ hw->hw_res.size = resource_size(res);
+ hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ if (hw->hw_res.irq < 0) {
+ err = hw->hw_res.irq;
+ goto err_free_control_wq;
+ }
+
+ err = fjes_hw_init(&adapter->hw);
+ if (err)
+ goto err_free_control_wq;
+
+ /* setup MAC address (02:00:00:00:00:[epid])*/
+ addr[0] = 2;
+ addr[1] = 0;
+ addr[2] = 0;
+ addr[3] = 0;
+ addr[4] = 0;
+ addr[5] = hw->my_epid; /* EPID */
+ eth_hw_addr_set(netdev, addr);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_exit;
+
+ netif_carrier_off(netdev);
+
+ fjes_dbg_adapter_init(adapter);
+
+ return 0;
+
+err_hw_exit:
+ fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
+err_free_netdev:
+ free_netdev(netdev);
+err_out:
+ return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_dbg_adapter_exit(adapter);
+
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
+ if (adapter->control_wq)
+ destroy_workqueue(adapter->control_wq);
+ if (adapter->txrx_wq)
+ destroy_workqueue(adapter->txrx_wq);
+
+ unregister_netdev(netdev);
+
+ fjes_hw_exit(hw);
+
+ netif_napi_del(&adapter->napi);
+
+ free_netdev(netdev);
+
+ return 0;
+}
+
+static struct platform_driver fjes_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = fjes_probe,
+ .remove = fjes_remove,
+};
+
static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
void *context, void **return_value)
{
struct acpi_device *device;
bool *found = context;
- int result;
- result = acpi_bus_get_device(obj_handle, &device);
- if (result)
+ device = acpi_fetch_acpi_dev(obj_handle);
+ if (!device)
return AE_OK;
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c1fdd721a730..f393e454f45c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -56,6 +56,7 @@ struct geneve_config {
bool use_udp6_rx_checksums;
bool ttl_inherit;
enum ifla_geneve_df df;
+ bool inner_proto_inherit;
};
/* Pseudo network device */
@@ -251,17 +252,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
- skb_reset_mac_header(skb);
- skb->protocol = eth_type_trans(skb, geneve->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
if (tun_dst)
skb_dst_set(skb, &tun_dst->dst);
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
- geneve->dev->stats.rx_errors++;
- goto drop;
+ if (gnvh->proto_type == htons(ETH_P_TEB)) {
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, geneve->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source,
+ geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
+ goto drop;
+ }
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = geneve->dev;
+ skb->pkt_type = PACKET_HOST;
}
oiph = skb_network_header(skb);
@@ -345,6 +353,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct genevehdr *geneveh;
struct geneve_dev *geneve;
struct geneve_sock *gs;
+ __be16 inner_proto;
int opts_len;
/* Need UDP and Geneve header to be present */
@@ -356,7 +365,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(geneveh->ver != GENEVE_VER))
goto drop;
- if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+ inner_proto = geneveh->proto_type;
+
+ if (unlikely((inner_proto != htons(ETH_P_TEB) &&
+ inner_proto != htons(ETH_P_IP) &&
+ inner_proto != htons(ETH_P_IPV6))))
goto drop;
gs = rcu_dereference_sk_user_data(sk);
@@ -367,9 +380,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!geneve)
goto drop;
+ if (unlikely((!geneve->cfg.inner_proto_inherit &&
+ inner_proto != htons(ETH_P_TEB)))) {
+ geneve->dev->stats.rx_dropped++;
+ goto drop;
+ }
+
opts_len = geneveh->opt_len * 4;
- if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
- htons(ETH_P_TEB),
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
geneve->dev->stats.rx_dropped++;
goto drop;
@@ -485,12 +503,9 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
off_gnv = skb_gro_offset(skb);
hlen = off_gnv + sizeof(*gh);
- gh = skb_gro_header_fast(skb, off_gnv);
- if (skb_gro_header_hard(skb, hlen)) {
- gh = skb_gro_header_slow(skb, hlen, off_gnv);
- if (unlikely(!gh))
- goto out;
- }
+ gh = skb_gro_header(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
if (gh->ver != GENEVE_VER || gh->oam)
goto out;
@@ -515,14 +530,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
}
}
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
type = gh->proto_type;
+ if (likely(type == htons(ETH_P_TEB)))
+ return call_gro_receive(eth_gro_receive, head, skb);
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out;
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
@@ -545,6 +562,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
+ /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */
+ if (likely(type == htons(ETH_P_TEB)))
+ return eth_gro_complete(skb, nhoff + gh_len);
+
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
@@ -717,7 +738,8 @@ static int geneve_stop(struct net_device *dev)
}
static void geneve_build_header(struct genevehdr *geneveh,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 inner_proto)
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
@@ -725,7 +747,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
- geneveh->proto_type = htons(ETH_P_TEB);
+ geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
@@ -734,10 +756,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
const struct ip_tunnel_info *info,
- bool xnet, int ip_hdr_len)
+ bool xnet, int ip_hdr_len,
+ bool inner_proto_inherit)
{
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
struct genevehdr *gnvh;
+ __be16 inner_proto;
int min_headroom;
int err;
@@ -755,8 +779,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
goto free_dst;
gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
- geneve_build_header(gnvh, info);
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
+ geneve_build_header(gnvh, info, inner_proto);
+ skb_set_inner_protocol(skb, inner_proto);
return 0;
free_dst:
@@ -769,7 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -787,6 +813,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->saddr = info->key.u.ipv4.src;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_flags = info->key.flow_flags;
tos = info->key.tos;
if ((tos == 1) && !geneve->cfg.collect_md) {
@@ -794,6 +821,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -847,8 +876,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -882,6 +910,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -892,7 +921,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -925,7 +954,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(&rt->dst);
return -EMSGSIZE;
}
@@ -936,7 +965,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -959,7 +988,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
}
- err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
+ err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1021,7 +1051,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(dst);
return -EMSGSIZE;
}
@@ -1038,7 +1068,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
- err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
+ err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1118,7 +1149,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1166,8 +1197,8 @@ static const struct net_device_ops geneve_netdev_ops = {
static void geneve_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
}
static const struct ethtool_ops geneve_ethtool_ops = {
@@ -1238,6 +1269,7 @@ static void geneve_setup(struct net_device *dev)
}
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+ [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT },
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
[IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
@@ -1251,6 +1283,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
+ [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1388,6 +1421,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
dst_cache_reset(&geneve->cfg.info.dst_cache);
memcpy(&geneve->cfg, cfg, sizeof(*cfg));
+ if (geneve->cfg.inner_proto_inherit) {
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_NOARP;
+ }
+
err = register_netdevice(dev);
if (err)
return err;
@@ -1561,10 +1602,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
#endif
}
+ if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) {
+ if (changelink) {
+ attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT;
+ goto change_notsup;
+ }
+ cfg->inner_proto_inherit = true;
+ }
+
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
- "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
+ "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
}
@@ -1740,6 +1789,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
+ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
0;
}
@@ -1799,6 +1849,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
goto nla_put_failure;
+ if (geneve->cfg.inner_proto_inherit &&
+ nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 24e5c54d06c1..15c7dc82107f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -66,13 +66,23 @@ struct gtp_dev {
struct sock *sk0;
struct sock *sk1u;
+ u8 sk_created;
struct net_device *dev;
+ struct net *net;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
+
+ u8 restart_count;
+};
+
+struct echo_info {
+ struct in_addr ms_addr_ip4;
+ struct in_addr peer_addr_ip4;
+ u8 gtp_version;
};
static unsigned int gtp_net_id __read_mostly;
@@ -83,6 +93,16 @@ struct gtp_net {
static u32 gtp_h_initval;
+static struct genl_family gtp_genl_family;
+
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static void pdp_context_delete(struct pdp_ctx *pctx);
static inline u32 gtp0_hashfn(u64 tid)
@@ -207,7 +227,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
dev_sw_netstats_rx_add(pctx->dev, skb->len);
- netif_rx(skb);
+ __netif_rx(skb);
return 0;
err:
@@ -215,6 +235,174 @@ err:
return -1;
}
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+ const struct sock *sk,
+ __be32 daddr, __be32 saddr)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = sk->sk_bound_dev_if;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_proto = sk->sk_protocol;
+
+ return ip_route_output_key(sock_net(sk), fl4);
+}
+
+/* GSM TS 09.60. 7.3
+ * In all Path Management messages:
+ * - TID: is not used and shall be set to 0.
+ * - Flow Label is not used and shall be set to 0
+ * In signalling messages:
+ * - number: this field is not yet used in signalling messages.
+ * It shall be set to 255 by the sender and shall be ignored
+ * by the receiver
+ * Returns true if the echo req was correct, false otherwise.
+ */
+static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
+{
+ return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
+ gtp0->number != 0xff || gtp0->flow);
+}
+
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ hdr->flags = 0x1e; /* v0, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
+ * are not used and shall be set to 0.
+ */
+ hdr->flow = 0;
+ hdr->tid = 0;
+ hdr->number = 0xff;
+ hdr->spare[0] = 0xff;
+ hdr->spare[1] = 0xff;
+ hdr->spare[2] = 0xff;
+
+ len_pkt = sizeof(struct gtp0_packet);
+ len_hdr = sizeof(struct gtp0_header);
+
+ if (msg_type == GTP_ECHO_RSP)
+ hdr->length = htons(len_pkt - len_hdr);
+ else
+ hdr->length = 0;
+}
+
+static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_packet *gtp_pkt;
+ struct gtp0_header *gtp0;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+ __be16 seq;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ seq = gtp0->seq;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
+
+ gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
+
+ /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
+ * message shall be copied from the signalling request message
+ * that the GSN is replying to.
+ */
+ gtp_pkt->gtp0_h.seq = seq;
+
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = gtp->restart_count;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP0_PORT), htons(GTP0_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+ int flags, u32 type, struct echo_info echo)
+{
+ void *genlh;
+
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
+ type);
+ if (!genlh)
+ goto failure;
+
+ if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+ goto failure;
+
+ genlmsg_end(skb, genlh);
+ return 0;
+
+failure:
+ genlmsg_cancel(skb, genlh);
+ return -EMSGSIZE;
+}
+
+static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_header *gtp0;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V0;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
@@ -231,6 +419,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp0_send_echo_resp(gtp, skb);
+
+ if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp0_handle_echo_resp(gtp, skb);
+
if (gtp0->type != GTP_TPDU)
return 1;
@@ -243,6 +441,131 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ /* S flag must be set to 1 */
+ hdr->flags = 0x32; /* v1, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
+ hdr->tid = 0;
+
+ /* seq, npdu and next should be counted to the length of the GTP packet
+ * that's why szie of gtp1_header should be subtracted,
+ * not size of gtp1_header_long.
+ */
+
+ len_hdr = sizeof(struct gtp1_header);
+
+ if (msg_type == GTP_ECHO_RSP) {
+ len_pkt = sizeof(struct gtp1u_packet);
+ hdr->length = htons(len_pkt - len_hdr);
+ } else {
+ /* GTP_ECHO_REQ does not carry GTP Information Element,
+ * the why gtp1_header_long is used here.
+ */
+ len_pkt = sizeof(struct gtp1_header_long);
+ hdr->length = htons(len_pkt - len_hdr);
+ }
+}
+
+static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct gtp1u_packet *gtp_pkt;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb,
+ sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
+
+ gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
+
+ /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
+ * Recovery information element shall not be used, i.e. it shall
+ * be set to zero by the sender and shall be ignored by the receiver.
+ * The Recovery information element is mandatory due to backwards
+ * compatibility reasons.
+ */
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = 0;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP1U_PORT), htons(GTP1U_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V1;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
@@ -258,6 +581,16 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp1u_send_echo_resp(gtp, skb);
+
+ if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp1u_handle_echo_resp(gtp, skb);
+
if (gtp1->type != GTP_TPDU)
return 1;
@@ -320,8 +653,16 @@ static void gtp_encap_disable_sock(struct sock *sk)
static void gtp_encap_disable(struct gtp_dev *gtp)
{
- gtp_encap_disable_sock(gtp->sk0);
- gtp_encap_disable_sock(gtp->sk1u);
+ if (gtp->sk_created) {
+ udp_tunnel_sock_release(gtp->sk0->sk_socket);
+ udp_tunnel_sock_release(gtp->sk1u->sk_socket);
+ gtp->sk_created = false;
+ gtp->sk0 = NULL;
+ gtp->sk1u = NULL;
+ } else {
+ gtp_encap_disable_sock(gtp->sk0);
+ gtp_encap_disable_sock(gtp->sk1u);
+ }
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -388,20 +729,6 @@ static void gtp_dev_uninit(struct net_device *dev)
free_percpu(dev->tstats);
}
-static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
- const struct sock *sk,
- __be32 daddr)
-{
- memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = sk->sk_bound_dev_if;
- fl4->daddr = daddr;
- fl4->saddr = inet_sk(sk)->inet_saddr;
- fl4->flowi4_tos = RT_CONN_FLAGS(sk);
- fl4->flowi4_proto = sk->sk_protocol;
-
- return ip_route_output_key(sock_net(sk), fl4);
-}
-
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
@@ -507,7 +834,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+ inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
@@ -656,17 +984,69 @@ static void gtp_destructor(struct net_device *dev)
kfree(gtp->tid_hash);
}
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {};
+ struct udp_port_cfg udp_conf = {
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .family = AF_INET,
+ };
+ struct net *net = gtp->net;
+ struct socket *sock;
+ int err;
+
+ if (type == UDP_ENCAP_GTP0)
+ udp_conf.local_udp_port = htons(GTP0_PORT);
+ else if (type == UDP_ENCAP_GTP1U)
+ udp_conf.local_udp_port = htons(GTP1U_PORT);
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err)
+ return ERR_PTR(err);
+
+ tuncfg.sk_user_data = gtp;
+ tuncfg.encap_type = type;
+ tuncfg.encap_rcv = gtp_encap_recv;
+ tuncfg.encap_destroy = NULL;
+
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+ return sock->sk;
+}
+
+static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+{
+ struct sock *sk1u = NULL;
+ struct sock *sk0 = NULL;
+
+ sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
+
+ sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ udp_tunnel_sock_release(sk0->sk_socket);
+ return PTR_ERR(sk1u);
+ }
+
+ gtp->sk_created = true;
+ gtp->sk0 = sk0;
+ gtp->sk1u = sk1u;
+
+ return 0;
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
int hashsize, err;
- if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
- return -EINVAL;
-
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -677,11 +1057,28 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
hashsize = 1024;
}
+ if (data[IFLA_GTP_ROLE]) {
+ role = nla_get_u32(data[IFLA_GTP_ROLE]);
+ if (role > GTP_ROLE_SGSN)
+ return -EINVAL;
+ }
+ gtp->role = role;
+
+ if (!data[IFLA_GTP_RESTART_COUNT])
+ gtp->restart_count = 0;
+ else
+ gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
+
+ gtp->net = src_net;
+
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
return err;
- err = gtp_encap_enable(gtp, data);
+ if (data[IFLA_GTP_CREATE_SOCKETS])
+ err = gtp_create_sockets(gtp, data);
+ else
+ err = gtp_encap_enable(gtp, data);
if (err < 0)
goto out_hashtable;
@@ -726,6 +1123,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
+ [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
+ [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -740,7 +1139,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
static size_t gtp_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
- nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -751,6 +1151,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
+ goto nla_put_failure;
return 0;
@@ -848,7 +1250,9 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
- unsigned int role = GTP_ROLE_GGSN;
+
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+ return -EINVAL;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
@@ -868,18 +1272,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
- if (data[IFLA_GTP_ROLE]) {
- role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN) {
- gtp_encap_disable_sock(sk0);
- gtp_encap_disable_sock(sk1u);
- return -EINVAL;
- }
- }
-
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
- gtp->role = role;
return 0;
}
@@ -1183,16 +1577,6 @@ out_unlock:
return err;
}
-static struct genl_family gtp_genl_family;
-
-enum gtp_multicast_groups {
- GTP_GENL_MCGRP,
-};
-
-static const struct genl_multicast_group gtp_genl_mcgrps[] = {
- [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
-};
-
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1336,6 +1720,95 @@ out:
return skb->len;
}
+static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *skb_to_send;
+ __be32 src_ip, dst_ip;
+ unsigned int version;
+ struct gtp_dev *gtp;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct sock *sk;
+ __be16 port;
+ int len;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK] ||
+ !info->attrs[GTPA_PEER_ADDRESS] ||
+ !info->attrs[GTPA_MS_ADDRESS])
+ return -EINVAL;
+
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+ src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+ if (!gtp)
+ return -ENODEV;
+
+ if (!gtp->sk_created)
+ return -EOPNOTSUPP;
+ if (!(gtp->dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (version == GTP_V0) {
+ struct gtp0_header *gtp0_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk0;
+ port = htons(GTP0_PORT);
+
+ gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
+ memset(gtp0_h, 0, sizeof(struct gtp0_header));
+ gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
+ } else if (version == GTP_V1) {
+ struct gtp1_header_long *gtp1u_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) +
+ sizeof(struct gtp1_header_long) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk1u;
+ port = htons(GTP1U_PORT);
+
+ gtp1u_h = skb_push(skb_to_send,
+ sizeof(struct gtp1_header_long));
+ memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
+ gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
+ } else {
+ return -ENODEV;
+ }
+
+ rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
+ &dst_ip);
+ kfree_skb(skb_to_send);
+ return -ENODEV;
+ }
+
+ udp_tunnel_xmit_skb(rt, sk, skb_to_send,
+ fl4.saddr, fl4.daddr,
+ fl4.flowi4_tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ port, port,
+ !net_eq(sock_net(sk),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
@@ -1368,6 +1841,12 @@ static const struct genl_small_ops gtp_genl_ops[] = {
.dumpit = gtp_genl_dump_pdp,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = GTP_CMD_ECHOREQ,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = gtp_genl_send_echo_req,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family gtp_genl_family __ro_after_init = {
@@ -1380,6 +1859,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = gtp_genl_ops,
.n_small_ops = ARRAY_SIZE(gtp_genl_ops),
+ .resv_start_op = GTP_CMD_ECHOREQ + 1,
.mcgrps = gtp_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8a19a06b505d..9fb567524220 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -99,6 +99,7 @@ struct sixpack {
unsigned int rx_count;
unsigned int rx_count_cooked;
+ spinlock_t rxlock;
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
@@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev;
spin_lock_init(&sp->lock);
+ spin_lock_init(&sp->rxlock);
refcount_set(&sp->refcnt, 1);
init_completion(&sp->dead);
@@ -668,11 +670,11 @@ static void sixpack_close(struct tty_struct *tty)
*/
netif_stop_queue(sp->dev);
+ unregister_netdev(sp->dev);
+
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- unregister_netdev(sp->dev);
-
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
@@ -681,8 +683,8 @@ static void sixpack_close(struct tty_struct *tty)
}
/* Perform I/O control on an active 6pack channel. */
-static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct sixpack *sp = sp_get(tty);
struct net_device *dev;
@@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp->led_state = 0x60;
/* fill trailing bytes with zeroes */
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
+ spin_lock_bh(&sp->rxlock);
rest = sp->rx_count;
if (rest != 0)
for (i = rest; i <= 3; i++)
@@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp_bump(sp, 0);
}
sp->rx_count_cooked = 0;
+ spin_unlock_bh(&sp->rxlock);
}
break;
case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
@@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
decode_prio_command(sp, inbyte);
else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
decode_std_command(sp, inbyte);
- else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+ else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) {
+ spin_lock_bh(&sp->rxlock);
decode_data(sp, inbyte);
+ spin_unlock_bh(&sp->rxlock);
+ }
}
}
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 441da03c23ee..a9c44f08199d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -45,40 +45,6 @@ config BPQETHER
useful if some other computer on your local network has a direct
amateur radio connection.
-config DMASCC
- tristate "High-speed (DMA) SCC driver for AX.25"
- depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
- depends on VIRT_TO_BUS
- help
- This is a driver for high-speed SCC boards, i.e. those supporting
- DMA on one port. You usually use those boards to connect your
- computer to an amateur radio modem (such as the WA4DSY 56kbps
- modem), in order to send and receive AX.25 packet radio network
- traffic.
-
- Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis
- PackeTwin, and S5SCC/DMA boards. They are detected automatically.
- If you have one of these cards, say Y here and read the AX25-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- This driver can operate multiple boards simultaneously. If you
- compile it as a module (by saying M instead of Y), it will be called
- dmascc. If you don't pass any parameter to the driver, all
- possible I/O addresses are probed. This could irritate other devices
- that are currently not in use. You may specify the list of addresses
- to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the
- kernel image) or "io=addr1,addr2,..." (when loaded as a module). The
- network interfaces will be called dmascc0 and dmascc1 for the board
- detected first, dmascc2 and dmascc3 for the second one, and so on.
-
- Before you configure each interface with ifconfig, you MUST set
- certain parameters, such as channel access timing, clock mode, and
- DMA channel. This is accomplished with a small utility program,
- dmascc_cfg, available at
- <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to
- get at least version 1.27 of dmascc_cfg, as older versions will not
- work with the current driver.
-
config SCC
tristate "Z8530 SCC driver"
depends on ISA && AX25 && ISA_DMA_API
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 7a1518d763e3..25fc400369ba 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -11,7 +11,6 @@
# Christoph Hellwig <hch@infradead.org>
#
-obj-$(CONFIG_DMASCC) += dmascc.o
obj-$(CONFIG_SCC) += scc.o
obj-$(CONFIG_MKISS) += mkiss.o
obj-$(CONFIG_6PACK) += 6pack.o
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a03d0b474641..791b4a53d69f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -438,7 +438,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
if ((--bc->hdlctx.slotcnt) > 0)
return 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
- if ((prandom_u32() % 256) > bc->ch_params.ppersist)
+ if (get_random_u8() > bc->ch_params.ppersist)
return 0;
}
}
@@ -982,10 +982,10 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
bc->cfg.extmodem = 0;
if (strstr(modestr,"extmodem"))
bc->cfg.extmodem = 1;
- if (strstr(modestr,"noloopback"))
- bc->cfg.loopback = 0;
if (strstr(modestr,"loopback"))
bc->cfg.loopback = 1;
+ if (strstr(modestr, "noloopback"))
+ bc->cfg.loopback = 0;
if ((cp = strstr(modestr,"fclk="))) {
bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
if (bc->cfg.fclk < 1000000)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 30af0081e2be..83a16d10eedb 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -533,7 +533,7 @@ static int bpq_device_event(struct notifier_block *this,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
deleted file mode 100644
index 7e527499d3ad..000000000000
--- a/drivers/net/hamradio/dmascc.c
+++ /dev/null
@@ -1,1449 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for high-speed SCC boards (those with DMA support)
- * Copyright (C) 1997-2000 Klaus Kudielka
- *
- * S5SCC/DMA support by Janko Koleznik S52HI
- */
-
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
-#include <linux/sockios.h>
-#include <linux/workqueue.h>
-#include <linux/atomic.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <net/ax25.h>
-#include "z8530.h"
-
-
-/* Number of buffers per channel */
-
-#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
-#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
-#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
-
-
-/* Cards supported */
-
-#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
- 0, 8, 1843200, 3686400 }
-#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
- 0, 8, 3686400, 7372800 }
-#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
- 0, 4, 6144000, 6144000 }
-#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
- 0, 8, 4915200, 9830400 }
-
-#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
-
-#define TMR_0_HZ 25600 /* Frequency of timer 0 */
-
-#define TYPE_PI 0
-#define TYPE_PI2 1
-#define TYPE_TWIN 2
-#define TYPE_S5 3
-#define NUM_TYPES 4
-
-#define MAX_NUM_DEVS 32
-
-
-/* SCC chips supported */
-
-#define Z8530 0
-#define Z85C30 1
-#define Z85230 2
-
-#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
-
-
-/* I/O registers */
-
-/* 8530 registers relative to card base */
-#define SCCB_CMD 0x00
-#define SCCB_DATA 0x01
-#define SCCA_CMD 0x02
-#define SCCA_DATA 0x03
-
-/* 8253/8254 registers relative to card base */
-#define TMR_CNT0 0x00
-#define TMR_CNT1 0x01
-#define TMR_CNT2 0x02
-#define TMR_CTRL 0x03
-
-/* Additional PI/PI2 registers relative to card base */
-#define PI_DREQ_MASK 0x04
-
-/* Additional PackeTwin registers relative to card base */
-#define TWIN_INT_REG 0x08
-#define TWIN_CLR_TMR1 0x09
-#define TWIN_CLR_TMR2 0x0a
-#define TWIN_SPARE_1 0x0b
-#define TWIN_DMA_CFG 0x08
-#define TWIN_SERIAL_CFG 0x09
-#define TWIN_DMA_CLR_FF 0x0a
-#define TWIN_SPARE_2 0x0b
-
-
-/* PackeTwin I/O register values */
-
-/* INT_REG */
-#define TWIN_SCC_MSK 0x01
-#define TWIN_TMR1_MSK 0x02
-#define TWIN_TMR2_MSK 0x04
-#define TWIN_INT_MSK 0x07
-
-/* SERIAL_CFG */
-#define TWIN_DTRA_ON 0x01
-#define TWIN_DTRB_ON 0x02
-#define TWIN_EXTCLKA 0x04
-#define TWIN_EXTCLKB 0x08
-#define TWIN_LOOPA_ON 0x10
-#define TWIN_LOOPB_ON 0x20
-#define TWIN_EI 0x80
-
-/* DMA_CFG */
-#define TWIN_DMA_HDX_T1 0x08
-#define TWIN_DMA_HDX_R1 0x0a
-#define TWIN_DMA_HDX_T3 0x14
-#define TWIN_DMA_HDX_R3 0x16
-#define TWIN_DMA_FDX_T3R1 0x1b
-#define TWIN_DMA_FDX_T1R3 0x1d
-
-
-/* Status values */
-
-#define IDLE 0
-#define TX_HEAD 1
-#define TX_DATA 2
-#define TX_PAUSE 3
-#define TX_TAIL 4
-#define RTS_OFF 5
-#define WAIT 6
-#define DCD_ON 7
-#define RX_ON 8
-#define DCD_OFF 9
-
-
-/* Ioctls */
-
-#define SIOCGSCCPARAM SIOCDEVPRIVATE
-#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
-
-
-/* Data types */
-
-struct scc_param {
- int pclk_hz; /* frequency of BRG input (don't change) */
- int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
- int nrzi; /* 0 (nrz), 1 (nrzi) */
- int clocks; /* see dmascc_cfg documentation */
- int txdelay; /* [1/TMR_0_HZ] */
- int txtimeout; /* [1/HZ] */
- int txtail; /* [1/TMR_0_HZ] */
- int waittime; /* [1/TMR_0_HZ] */
- int slottime; /* [1/TMR_0_HZ] */
- int persist; /* 1 ... 256 */
- int dma; /* -1 (disable), 0, 1, 3 */
- int txpause; /* [1/TMR_0_HZ] */
- int rtsoff; /* [1/TMR_0_HZ] */
- int dcdon; /* [1/TMR_0_HZ] */
- int dcdoff; /* [1/TMR_0_HZ] */
-};
-
-struct scc_hardware {
- char *name;
- int io_region;
- int io_delta;
- int io_size;
- int num_devs;
- int scc_offset;
- int tmr_offset;
- int tmr_hz;
- int pclk_hz;
-};
-
-struct scc_priv {
- int type;
- int chip;
- struct net_device *dev;
- struct scc_info *info;
-
- int channel;
- int card_base, scc_cmd, scc_data;
- int tmr_cnt, tmr_ctrl, tmr_mode;
- struct scc_param param;
- char rx_buf[NUM_RX_BUF][BUF_SIZE];
- int rx_len[NUM_RX_BUF];
- int rx_ptr;
- struct work_struct rx_work;
- int rx_head, rx_tail, rx_count;
- int rx_over;
- char tx_buf[NUM_TX_BUF][BUF_SIZE];
- int tx_len[NUM_TX_BUF];
- int tx_ptr;
- int tx_head, tx_tail, tx_count;
- int state;
- unsigned long tx_start;
- int rr0;
- spinlock_t *register_lock; /* Per scc_info */
- spinlock_t ring_lock;
-};
-
-struct scc_info {
- int irq_used;
- int twin_serial_cfg;
- struct net_device *dev[2];
- struct scc_priv priv[2];
- struct scc_info *next;
- spinlock_t register_lock; /* Per device register lock */
-};
-
-
-/* Function declarations */
-static int setup_adapter(int card_base, int type, int n) __init;
-
-static void write_scc(struct scc_priv *priv, int reg, int val);
-static void write_scc_data(struct scc_priv *priv, int val, int fast);
-static int read_scc(struct scc_priv *priv, int reg);
-static int read_scc_data(struct scc_priv *priv);
-
-static int scc_open(struct net_device *dev);
-static int scc_close(struct net_device *dev);
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
-static int scc_set_mac_address(struct net_device *dev, void *sa);
-
-static inline void tx_on(struct scc_priv *priv);
-static inline void rx_on(struct scc_priv *priv);
-static inline void rx_off(struct scc_priv *priv);
-static void start_timer(struct scc_priv *priv, int t, int r15);
-static inline unsigned char random(void);
-
-static inline void z8530_isr(struct scc_info *info);
-static irqreturn_t scc_isr(int irq, void *dev_id);
-static void rx_isr(struct scc_priv *priv);
-static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(struct work_struct *);
-static void tx_isr(struct scc_priv *priv);
-static void es_isr(struct scc_priv *priv);
-static void tm_isr(struct scc_priv *priv);
-
-
-/* Initialization variables */
-
-static int io[MAX_NUM_DEVS] __initdata = { 0, };
-
-/* Beware! hw[] is also used in dmascc_exit(). */
-static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
-
-
-/* Global variables */
-
-static struct scc_info *first;
-static unsigned long rand;
-
-
-MODULE_AUTHOR("Klaus Kudielka");
-MODULE_DESCRIPTION("Driver for high-speed SCC boards");
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_LICENSE("GPL");
-
-static void __exit dmascc_exit(void)
-{
- int i;
- struct scc_info *info;
-
- while (first) {
- info = first;
-
- /* Unregister devices */
- for (i = 0; i < 2; i++)
- unregister_netdev(info->dev[i]);
-
- /* Reset board */
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- release_region(info->dev[0]->base_addr,
- hw[info->priv[0].type].io_size);
-
- for (i = 0; i < 2; i++)
- free_netdev(info->dev[i]);
-
- /* Free memory */
- first = info->next;
- kfree(info);
- }
-}
-
-static int __init dmascc_init(void)
-{
- int h, i, j, n;
- int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
- t1[MAX_NUM_DEVS];
- unsigned t_val;
- unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
- counting[MAX_NUM_DEVS];
-
- /* Initialize random number generator */
- rand = jiffies;
- /* Cards found = 0 */
- n = 0;
- /* Warning message */
- if (!io[0])
- printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
-
- /* Run autodetection for each card type */
- for (h = 0; h < NUM_TYPES; h++) {
-
- if (io[0]) {
- /* User-specified I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- base[i] = 0;
- for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
- j = (io[i] -
- hw[h].io_region) / hw[h].io_delta;
- if (j >= 0 && j < hw[h].num_devs &&
- hw[h].io_region +
- j * hw[h].io_delta == io[i]) {
- base[j] = io[i];
- }
- }
- } else {
- /* Default I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++) {
- base[i] =
- hw[h].io_region + i * hw[h].io_delta;
- }
- }
-
- /* Check valid I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if (!request_region
- (base[i], hw[h].io_size, "dmascc"))
- base[i] = 0;
- else {
- tcmd[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CTRL;
- t0[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT0;
- t1[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT1;
- }
- }
-
- /* Start timers */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
- outb(0x36, tcmd[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
- t0[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
- t0[i]);
- /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
- outb(0x70, tcmd[i]);
- outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
- outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
- start[i] = jiffies;
- delay[i] = 0;
- counting[i] = 1;
- /* Timer 2: LSB+MSB, Mode 0 */
- outb(0xb0, tcmd[i]);
- }
- time = jiffies;
- /* Wait until counter registers are loaded */
- udelay(2000000 / TMR_0_HZ);
-
- /* Timing loop */
- while (jiffies - time < 13) {
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i] && counting[i]) {
- /* Read back Timer 1: latch; read LSB; read MSB */
- outb(0x40, tcmd[i]);
- t_val =
- inb(t1[i]) + (inb(t1[i]) << 8);
- /* Also check whether counter did wrap */
- if (t_val == 0 ||
- t_val > TMR_0_HZ / HZ * 10)
- counting[i] = 0;
- delay[i] = jiffies - start[i];
- }
- }
-
- /* Evaluate measurements */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if ((delay[i] >= 9 && delay[i] <= 11) &&
- /* Ok, we have found an adapter */
- (setup_adapter(base[i], h, n) == 0))
- n++;
- else
- release_region(base[i],
- hw[h].io_size);
- }
-
- } /* NUM_TYPES */
-
- /* If any adapter was successfully initialized, return ok */
- if (n)
- return 0;
-
- /* If no adapter found, return error */
- printk(KERN_INFO "dmascc: no adapters found\n");
- return -EIO;
-}
-
-module_init(dmascc_init);
-module_exit(dmascc_exit);
-
-static void __init dev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_AX25;
- dev->hard_header_len = AX25_MAX_HEADER_LEN;
- dev->mtu = 1500;
- dev->addr_len = AX25_ADDR_LEN;
- dev->tx_queue_len = 64;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- dev_addr_set(dev, (u8 *)&ax25_defaddr);
-}
-
-static const struct net_device_ops scc_netdev_ops = {
- .ndo_open = scc_open,
- .ndo_stop = scc_close,
- .ndo_start_xmit = scc_send_packet,
- .ndo_siocdevprivate = scc_siocdevprivate,
- .ndo_set_mac_address = scc_set_mac_address,
-};
-
-static int __init setup_adapter(int card_base, int type, int n)
-{
- int i, irq, chip, err;
- struct scc_info *info;
- struct net_device *dev;
- struct scc_priv *priv;
- unsigned long time;
- unsigned int irqs;
- int tmr_base = card_base + hw[type].tmr_offset;
- int scc_base = card_base + hw[type].scc_offset;
- char *chipnames[] = CHIPNAMES;
-
- /* Initialize what is necessary for write_scc and write_scc_data */
- info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[0]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out1;
- }
-
- info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[1]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out2;
- }
- spin_lock_init(&info->register_lock);
-
- priv = &info->priv[0];
- priv->type = type;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + SCCA_CMD;
- priv->scc_data = scc_base + SCCA_DATA;
- priv->register_lock = &info->register_lock;
-
- /* Reset SCC */
- write_scc(priv, R9, FHWRES | MIE | NV);
-
- /* Determine type of chip by enabling SDLC/HDLC enhancements */
- write_scc(priv, R15, SHDLCE);
- if (!read_scc(priv, R15)) {
- /* WR7' not present. This is an ordinary Z8530 SCC. */
- chip = Z8530;
- } else {
- /* Put one character in TX FIFO */
- write_scc_data(priv, 0, 0);
- if (read_scc(priv, R0) & Tx_BUF_EMP) {
- /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
- chip = Z85230;
- } else {
- /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
- chip = Z85C30;
- }
- }
- write_scc(priv, R15, 0);
-
- /* Start IRQ auto-detection */
- irqs = probe_irq_on();
-
- /* Enable interrupts */
- if (type == TYPE_TWIN) {
- outb(0, card_base + TWIN_DMA_CFG);
- inb(card_base + TWIN_CLR_TMR1);
- inb(card_base + TWIN_CLR_TMR2);
- info->twin_serial_cfg = TWIN_EI;
- outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
- } else {
- write_scc(priv, R15, CTSIE);
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R1, EXT_INT_ENAB);
- }
-
- /* Start timer */
- outb(1, tmr_base + TMR_CNT1);
- outb(0, tmr_base + TMR_CNT1);
-
- /* Wait and detect IRQ */
- time = jiffies;
- while (jiffies - time < 2 + HZ / TMR_0_HZ);
- irq = probe_irq_off(irqs);
-
- /* Clear pending interrupt, disable interrupts */
- if (type == TYPE_TWIN) {
- inb(card_base + TWIN_CLR_TMR1);
- } else {
- write_scc(priv, R1, 0);
- write_scc(priv, R15, 0);
- write_scc(priv, R0, RES_EXT_INT);
- }
-
- if (irq <= 0) {
- printk(KERN_ERR
- "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
- hw[type].name, card_base, irq);
- err = -ENODEV;
- goto out3;
- }
-
- /* Set up data structures */
- for (i = 0; i < 2; i++) {
- dev = info->dev[i];
- priv = &info->priv[i];
- priv->type = type;
- priv->chip = chip;
- priv->dev = dev;
- priv->info = info;
- priv->channel = i;
- spin_lock_init(&priv->ring_lock);
- priv->register_lock = &info->register_lock;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
- priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
- priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
- priv->tmr_ctrl = tmr_base + TMR_CTRL;
- priv->tmr_mode = i ? 0xb0 : 0x70;
- priv->param.pclk_hz = hw[type].pclk_hz;
- priv->param.brg_tc = -1;
- priv->param.clocks = TCTRxCP | RCRTxCP;
- priv->param.persist = 256;
- priv->param.dma = -1;
- INIT_WORK(&priv->rx_work, rx_bh);
- dev->ml_priv = priv;
- snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
- dev->base_addr = card_base;
- dev->irq = irq;
- dev->netdev_ops = &scc_netdev_ops;
- dev->header_ops = &ax25_header_ops;
- }
- if (register_netdev(info->dev[0])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[0]->name);
- err = -ENODEV;
- goto out3;
- }
- if (register_netdev(info->dev[1])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[1]->name);
- err = -ENODEV;
- goto out4;
- }
-
-
- info->next = first;
- first = info;
- printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
- hw[type].name, chipnames[chip], card_base, irq);
- return 0;
-
- out4:
- unregister_netdev(info->dev[0]);
- out3:
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- free_netdev(info->dev[1]);
- out2:
- free_netdev(info->dev[0]);
- out1:
- kfree(info);
- out:
- return err;
-}
-
-
-/* Driver functions */
-
-static void write_scc(struct scc_priv *priv, int reg, int val)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- outb(val, priv->scc_cmd);
- return;
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- return;
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return;
- }
-}
-
-
-static void write_scc_data(struct scc_priv *priv, int val, int fast)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- outb(val, priv->scc_data);
- return;
- case TYPE_TWIN:
- outb_p(val, priv->scc_data);
- return;
- default:
- if (fast)
- outb_p(val, priv->scc_data);
- else {
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- outb_p(val, priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- }
- return;
- }
-}
-
-
-static int read_scc(struct scc_priv *priv, int reg)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- return inb(priv->scc_cmd);
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- return inb_p(priv->scc_cmd);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- rc = inb_p(priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int read_scc_data(struct scc_priv *priv)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- return inb(priv->scc_data);
- case TYPE_TWIN:
- return inb_p(priv->scc_data);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- rc = inb_p(priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int scc_open(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- /* Request IRQ if not already used by other channel */
- if (!info->irq_used) {
- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
- return -EAGAIN;
- }
- }
- info->irq_used++;
-
- /* Request DMA if required */
- if (priv->param.dma >= 0) {
- if (request_dma(priv->param.dma, "dmascc")) {
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
- return -EAGAIN;
- } else {
- unsigned long flags = claim_dma_lock();
- clear_dma_ff(priv->param.dma);
- release_dma_lock(flags);
- }
- }
-
- /* Initialize local variables */
- priv->rx_ptr = 0;
- priv->rx_over = 0;
- priv->rx_head = priv->rx_tail = priv->rx_count = 0;
- priv->state = IDLE;
- priv->tx_head = priv->tx_tail = priv->tx_count = 0;
- priv->tx_ptr = 0;
-
- /* Reset channel */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- /* X1 clock, SDLC mode */
- write_scc(priv, R4, SDLC | X1CLK);
- /* DMA */
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* 8 bit RX char, RX disable */
- write_scc(priv, R3, Rx8);
- /* 8 bit TX char, TX disable */
- write_scc(priv, R5, Tx8);
- /* SDLC address field */
- write_scc(priv, R6, 0);
- /* SDLC flag */
- write_scc(priv, R7, FLAG);
- switch (priv->chip) {
- case Z85C30:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* Auto EOM reset */
- write_scc(priv, R7, AUTOEOM);
- write_scc(priv, R15, 0);
- break;
- case Z85230:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* The following bits are set (see 2.5.2.1):
- - Automatic EOM reset
- - Interrupt request if RX FIFO is half full
- This bit should be ignored in DMA mode (according to the
- documentation), but actually isn't. The receiver doesn't work if
- it is set. Thus, we have to clear it in DMA mode.
- - Interrupt/DMA request if TX FIFO is completely empty
- a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
- compatibility).
- b) If cleared, DMA requests may follow each other very quickly,
- filling up the TX FIFO.
- Advantage: TX works even in case of high bus latency.
- Disadvantage: Edge-triggered DMA request circuitry may miss
- a request. No more data is delivered, resulting
- in a TX FIFO underrun.
- Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
- The PackeTwin doesn't. I don't know about the PI, but let's
- assume it behaves like the PI2.
- */
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- write_scc(priv, R7, AUTOEOM | TXFIFOE);
- else
- write_scc(priv, R7, AUTOEOM);
- } else {
- write_scc(priv, R7, AUTOEOM | RXFIFOH);
- }
- write_scc(priv, R15, 0);
- break;
- }
- /* Preset CRC, NRZ(I) encoding */
- write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
-
- /* Configure baud rate generator */
- if (priv->param.brg_tc >= 0) {
- /* Program BR generator */
- write_scc(priv, R12, priv->param.brg_tc & 0xFF);
- write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
- /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
- PackeTwin, not connected on the PI2); set DPLL source to BRG */
- write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
- /* Enable DPLL */
- write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
- } else {
- /* Disable BR generator */
- write_scc(priv, R14, DTRREQ | BRSRC);
- }
-
- /* Configure clocks */
- if (priv->type == TYPE_TWIN) {
- /* Disable external TX clock receiver */
- outb((info->twin_serial_cfg &=
- ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
- write_scc(priv, R11, priv->param.clocks);
- if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
- /* Enable external TX clock receiver */
- outb((info->twin_serial_cfg |=
- (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Configure PackeTwin */
- if (priv->type == TYPE_TWIN) {
- /* Assert DTR, enable interrupts */
- outb((info->twin_serial_cfg |= TWIN_EI |
- (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Read current status */
- priv->rr0 = read_scc(priv, R0);
- /* Enable DCD interrupt */
- write_scc(priv, R15, DCDIE);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-static int scc_close(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- netif_stop_queue(dev);
-
- if (priv->type == TYPE_TWIN) {
- /* Drop DTR */
- outb((info->twin_serial_cfg &=
- (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Reset channel, free DMA and IRQ */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- outb(0, card_base + TWIN_DMA_CFG);
- free_dma(priv->param.dma);
- }
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
-
- return 0;
-}
-
-
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
-{
- struct scc_priv *priv = dev->ml_priv;
-
- switch (cmd) {
- case SIOCGSCCPARAM:
- if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- case SIOCSSCCPARAM:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (netif_running(dev))
- return -EAGAIN;
- if (copy_from_user(&priv->param, data,
- sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- unsigned long flags;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP))
- return ax25_ip_xmit(skb);
-
- /* Temporarily stop the scheduler feeding us packets */
- netif_stop_queue(dev);
-
- /* Transfer data to DMA buffer */
- i = priv->tx_head;
- skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
- priv->tx_len[i] = skb->len - 1;
-
- /* Clear interrupts while we touch our circular buffers */
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move the ring buffer's head */
- priv->tx_head = (i + 1) % NUM_TX_BUF;
- priv->tx_count++;
-
- /* If we just filled up the last buffer, leave queue stopped.
- The higher layers must wait until we have a DMA buffer
- to accept the data. */
- if (priv->tx_count < NUM_TX_BUF)
- netif_wake_queue(dev);
-
- /* Set new TX state */
- if (priv->state == IDLE) {
- /* Assert RTS, start timer */
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- }
-
- /* Turn interrupts back on and free buffer */
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-
-static int scc_set_mac_address(struct net_device *dev, void *sa)
-{
- dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
- return 0;
-}
-
-
-static inline void tx_on(struct scc_priv *priv)
-{
- int i, n;
- unsigned long flags;
-
- if (priv->param.dma >= 0) {
- n = (priv->chip == Z85230) ? 3 : 1;
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
- set_dma_count(priv->param.dma,
- priv->tx_len[priv->tx_tail] - n);
- release_dma_lock(flags);
- /* Enable TX underrun interrupt */
- write_scc(priv, R15, TxUIE);
- /* Configure DREQ */
- if (priv->type == TYPE_TWIN)
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
- priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN |
- WT_RDY_ENAB);
- /* Write first byte(s) */
- spin_lock_irqsave(priv->register_lock, flags);
- for (i = 0; i < n; i++)
- write_scc_data(priv,
- priv->tx_buf[priv->tx_tail][i], 1);
- enable_dma(priv->param.dma);
- spin_unlock_irqrestore(priv->register_lock, flags);
- } else {
- write_scc(priv, R15, TxUIE);
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
- tx_isr(priv);
- }
- /* Reset EOM latch if we do not have the AUTOEOM feature */
- if (priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-}
-
-
-static inline void rx_on(struct scc_priv *priv)
-{
- unsigned long flags;
-
- /* Clear RX FIFO */
- while (read_scc(priv, R0) & Rx_CH_AV)
- read_scc_data(priv);
- priv->rx_over = 0;
- if (priv->param.dma >= 0) {
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_READ);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- enable_dma(priv->param.dma);
- /* Configure PackeTwin DMA */
- if (priv->type == TYPE_TWIN) {
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
- priv->card_base + TWIN_DMA_CFG);
- }
- /* Sp. cond. intr. only, ext int enable, RX DMA enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
- WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
- } else {
- /* Reset current frame */
- priv->rx_ptr = 0;
- /* Intr. on all Rx characters and Sp. cond., ext int enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
- WT_FN_RDYFN);
- }
- write_scc(priv, R0, ERR_RES);
- write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
-}
-
-
-static inline void rx_off(struct scc_priv *priv)
-{
- /* Disable receiver */
- write_scc(priv, R3, Rx8);
- /* Disable DREQ / RX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* Disable DMA */
- if (priv->param.dma >= 0)
- disable_dma(priv->param.dma);
-}
-
-
-static void start_timer(struct scc_priv *priv, int t, int r15)
-{
- outb(priv->tmr_mode, priv->tmr_ctrl);
- if (t == 0) {
- tm_isr(priv);
- } else if (t > 0) {
- outb(t & 0xFF, priv->tmr_cnt);
- outb((t >> 8) & 0xFF, priv->tmr_cnt);
- if (priv->type != TYPE_TWIN) {
- write_scc(priv, R15, r15 | CTSIE);
- priv->rr0 |= CTS;
- }
- }
-}
-
-
-static inline unsigned char random(void)
-{
- /* See "Numerical Recipes in C", second edition, p. 284 */
- rand = rand * 1664525L + 1013904223L;
- return (unsigned char) (rand >> 24);
-}
-
-static inline void z8530_isr(struct scc_info *info)
-{
- int is, i = 100;
-
- while ((is = read_scc(&info->priv[0], R3)) && i--) {
- if (is & CHARxIP) {
- rx_isr(&info->priv[0]);
- } else if (is & CHATxIP) {
- tx_isr(&info->priv[0]);
- } else if (is & CHAEXT) {
- es_isr(&info->priv[0]);
- } else if (is & CHBRxIP) {
- rx_isr(&info->priv[1]);
- } else if (is & CHBTxIP) {
- tx_isr(&info->priv[1]);
- } else {
- es_isr(&info->priv[1]);
- }
- write_scc(&info->priv[0], R0, RES_H_IUS);
- i++;
- }
- if (i < 0) {
- printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
- is);
- }
- /* Ok, no interrupts pending from this 8530. The INT line should
- be inactive now. */
-}
-
-
-static irqreturn_t scc_isr(int irq, void *dev_id)
-{
- struct scc_info *info = dev_id;
-
- spin_lock(info->priv[0].register_lock);
- /* At this point interrupts are enabled, and the interrupt under service
- is already acknowledged, but masked off.
-
- Interrupt processing: We loop until we know that the IRQ line is
- low. If another positive edge occurs afterwards during the ISR,
- another interrupt will be triggered by the interrupt controller
- as soon as the IRQ level is enabled again (see asm/irq.h).
-
- Bottom-half handlers will be processed after scc_isr(). This is
- important, since we only have small ringbuffers and want new data
- to be fetched/delivered immediately. */
-
- if (info->priv[0].type == TYPE_TWIN) {
- int is, card_base = info->priv[0].card_base;
- while ((is = ~inb(card_base + TWIN_INT_REG)) &
- TWIN_INT_MSK) {
- if (is & TWIN_SCC_MSK) {
- z8530_isr(info);
- } else if (is & TWIN_TMR1_MSK) {
- inb(card_base + TWIN_CLR_TMR1);
- tm_isr(&info->priv[0]);
- } else {
- inb(card_base + TWIN_CLR_TMR2);
- tm_isr(&info->priv[1]);
- }
- }
- } else
- z8530_isr(info);
- spin_unlock(info->priv[0].register_lock);
- return IRQ_HANDLED;
-}
-
-
-static void rx_isr(struct scc_priv *priv)
-{
- if (priv->param.dma >= 0) {
- /* Check special condition and perform error reset. See 2.4.7.5. */
- special_condition(priv, read_scc(priv, R1));
- write_scc(priv, R0, ERR_RES);
- } else {
- /* Check special condition for each character. Error reset not necessary.
- Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
- int rc;
- while (read_scc(priv, R0) & Rx_CH_AV) {
- rc = read_scc(priv, R1);
- if (priv->rx_ptr < BUF_SIZE)
- priv->rx_buf[priv->rx_head][priv->
- rx_ptr++] =
- read_scc_data(priv);
- else {
- priv->rx_over = 2;
- read_scc_data(priv);
- }
- special_condition(priv, rc);
- }
- }
-}
-
-
-static void special_condition(struct scc_priv *priv, int rc)
-{
- int cb;
- unsigned long flags;
-
- /* See Figure 2-15. Only overrun and EOF need to be checked. */
-
- if (rc & Rx_OVR) {
- /* Receiver overrun */
- priv->rx_over = 1;
- if (priv->param.dma < 0)
- write_scc(priv, R0, ERR_RES);
- } else if (rc & END_FR) {
- /* End of frame. Get byte count */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
- 2;
- release_dma_lock(flags);
- } else {
- cb = priv->rx_ptr - 2;
- }
- if (priv->rx_over) {
- /* We had an overrun */
- priv->dev->stats.rx_errors++;
- if (priv->rx_over == 2)
- priv->dev->stats.rx_length_errors++;
- else
- priv->dev->stats.rx_fifo_errors++;
- priv->rx_over = 0;
- } else if (rc & CRC_ERR) {
- /* Count invalid CRC only if packet length >= minimum */
- if (cb >= 15) {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_crc_errors++;
- }
- } else {
- if (cb >= 15) {
- if (priv->rx_count < NUM_RX_BUF - 1) {
- /* Put good frame in FIFO */
- priv->rx_len[priv->rx_head] = cb;
- priv->rx_head =
- (priv->rx_head +
- 1) % NUM_RX_BUF;
- priv->rx_count++;
- schedule_work(&priv->rx_work);
- } else {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_over_errors++;
- }
- }
- }
- /* Get ready for new frame */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- } else {
- priv->rx_ptr = 0;
- }
- }
-}
-
-
-static void rx_bh(struct work_struct *ugli_api)
-{
- struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
- int i = priv->rx_tail;
- int cb;
- unsigned long flags;
- struct sk_buff *skb;
- unsigned char *data;
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- while (priv->rx_count) {
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- cb = priv->rx_len[i];
- /* Allocate buffer */
- skb = dev_alloc_skb(cb + 1);
- if (skb == NULL) {
- /* Drop packet */
- priv->dev->stats.rx_dropped++;
- } else {
- /* Fill buffer */
- data = skb_put(skb, cb + 1);
- data[0] = 0;
- memcpy(&data[1], priv->rx_buf[i], cb);
- skb->protocol = ax25_type_trans(skb, priv->dev);
- netif_rx(skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += cb;
- }
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move tail */
- priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
- priv->rx_count--;
- }
- spin_unlock_irqrestore(&priv->ring_lock, flags);
-}
-
-
-static void tx_isr(struct scc_priv *priv)
-{
- int i = priv->tx_tail, p = priv->tx_ptr;
-
- /* Suspend TX interrupts if we don't want to send anything.
- See Figure 2-22. */
- if (p == priv->tx_len[i]) {
- write_scc(priv, R0, RES_Tx_P);
- return;
- }
-
- /* Write characters */
- while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
- write_scc_data(priv, priv->tx_buf[i][p++], 0);
- }
-
- /* Reset EOM latch of Z8530 */
- if (!priv->tx_ptr && p && priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-
- priv->tx_ptr = p;
-}
-
-
-static void es_isr(struct scc_priv *priv)
-{
- int i, rr0, drr0, res;
- unsigned long flags;
-
- /* Read status, reset interrupt bit (open latches) */
- rr0 = read_scc(priv, R0);
- write_scc(priv, R0, RES_EXT_INT);
- drr0 = priv->rr0 ^ rr0;
- priv->rr0 = rr0;
-
- /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
- it might have already been cleared again by AUTOEOM. */
- if (priv->state == TX_DATA) {
- /* Get remaining bytes */
- i = priv->tx_tail;
- if (priv->param.dma >= 0) {
- disable_dma(priv->param.dma);
- flags = claim_dma_lock();
- res = get_dma_residue(priv->param.dma);
- release_dma_lock(flags);
- } else {
- res = priv->tx_len[i] - priv->tx_ptr;
- priv->tx_ptr = 0;
- }
- /* Disable DREQ / TX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- if (res) {
- /* Update packet statistics */
- priv->dev->stats.tx_errors++;
- priv->dev->stats.tx_fifo_errors++;
- /* Other underrun interrupts may already be waiting */
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R0, RES_EXT_INT);
- } else {
- /* Update packet statistics */
- priv->dev->stats.tx_packets++;
- priv->dev->stats.tx_bytes += priv->tx_len[i];
- /* Remove frame from FIFO */
- priv->tx_tail = (i + 1) % NUM_TX_BUF;
- priv->tx_count--;
- /* Inform upper layers */
- netif_wake_queue(priv->dev);
- }
- /* Switch state */
- write_scc(priv, R15, 0);
- if (priv->tx_count &&
- (jiffies - priv->tx_start) < priv->param.txtimeout) {
- priv->state = TX_PAUSE;
- start_timer(priv, priv->param.txpause, 0);
- } else {
- priv->state = TX_TAIL;
- start_timer(priv, priv->param.txtail, 0);
- }
- }
-
- /* DCD transition */
- if (drr0 & DCD) {
- if (rr0 & DCD) {
- switch (priv->state) {
- case IDLE:
- case WAIT:
- priv->state = DCD_ON;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdon, 0);
- }
- } else {
- switch (priv->state) {
- case RX_ON:
- rx_off(priv);
- priv->state = DCD_OFF;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdoff, 0);
- }
- }
- }
-
- /* CTS transition */
- if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
- tm_isr(priv);
-
-}
-
-
-static void tm_isr(struct scc_priv *priv)
-{
- switch (priv->state) {
- case TX_HEAD:
- case TX_PAUSE:
- tx_on(priv);
- priv->state = TX_DATA;
- break;
- case TX_TAIL:
- write_scc(priv, R5, TxCRC_ENAB | Tx8);
- priv->state = RTS_OFF;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.rtsoff, 0);
- break;
- case RTS_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- priv->dev->stats.collisions++;
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv, priv->param.waittime, DCDIE);
- }
- break;
- case WAIT:
- if (priv->tx_count) {
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5,
- TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- } else {
- priv->state = IDLE;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, DCDIE);
- }
- break;
- case DCD_ON:
- case DCD_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv,
- random() / priv->param.persist *
- priv->param.slottime, DCDIE);
- }
- break;
- }
-}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8297411e87ea..2263029d1a20 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -377,7 +377,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
if ((--s->hdlctx.slotcnt) > 0)
return;
s->hdlctx.slotcnt = s->ch_params.slottime;
- if ((prandom_u32() % 256) > s->ch_params.ppersist)
+ if (get_random_u8() > s->ch_params.ppersist)
return;
start_tx(dev, s);
}
@@ -600,7 +600,7 @@ static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
- strlcpy(bi.data.drivername, s->ops->drvname,
+ strscpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index edde9c3ae12b..c251e04ae047 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -806,8 +806,8 @@ static void mkiss_close(struct tty_struct *tty)
}
/* Perform I/O control on an active ax25 channel. */
-static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct mkiss *ax = mkiss_get(tty);
struct net_device *dev;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6376b8485976..2ed2f836f09a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -626,7 +626,7 @@ static void yam_arbitrate(struct net_device *dev)
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
- if ((prandom_u32() % 256) > yp->pers)
+ if (get_random_u8() > yp->pers)
return;
yam_start_tx(dev, yp);
@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __
ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
- if (ym->cmd != SIOCYAMSMCS)
- return -EINVAL;
- if (ym->bitrate > YAM_MAXBITRATE) {
+ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 16105292b140..aa8f828a0ae7 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
+ pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
@@ -1355,7 +1356,9 @@ static int rr_close(struct net_device *dev)
rrpriv->fw_running = 0;
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
+ spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, &regs->TxPi);
writel(0, &regs->IpRxPi);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 315278a7cf88..dd5919ec408b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/hyperv.h>
#include <linux/rndis.h>
+#include <linux/jhash.h>
/* RSS related */
#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
@@ -164,6 +165,7 @@ struct hv_netvsc_packet {
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen;
+ struct hv_dma_range *dma_range;
};
#define NETVSC_HASH_KEYLEN 40
@@ -236,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp);
unsigned int netvsc_xdp_fraglen(unsigned int len);
@@ -245,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netvsc_device *nvdev);
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
@@ -941,12 +946,21 @@ struct nvsc_rsc {
#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
-struct netvsc_stats {
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
u64 packets;
u64 bytes;
u64 broadcast;
u64 multicast;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
struct u64_stats_sync syncp;
};
@@ -1037,7 +1051,8 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
-
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
/* Is the current data path through the VF NIC? */
bool data_path_is_vf;
@@ -1045,6 +1060,55 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
@@ -1059,9 +1123,10 @@ struct netvsc_channel {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
- struct netvsc_stats tx_stats;
- struct netvsc_stats rx_stats;
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
};
/* Per netvsc device */
@@ -1074,6 +1139,7 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
+ void *recv_original_buf;
u32 recv_buf_size; /* allocated bytes */
struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
@@ -1082,6 +1148,7 @@ struct netvsc_device {
/* Send buffer allocated by us */
void *send_buf;
+ void *send_original_buf;
u32 send_buf_size;
struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
@@ -1731,4 +1798,6 @@ struct rndis_message {
#define RETRY_US_HI 10000
#define RETRY_MAX 2000 /* >10 sec */
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet);
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5086cd07d1ed..9352dad58996 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/filter.h>
#include <asm/sync_bitops.h>
#include <asm/mshyperv.h>
@@ -153,8 +154,17 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (nvdev->recv_original_buf)
+ vfree(nvdev->recv_original_buf);
+ else
+ vfree(nvdev->recv_buf);
+
+ if (nvdev->send_original_buf)
+ vfree(nvdev->send_original_buf);
+ else
+ vfree(nvdev->send_buf);
+
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
@@ -337,6 +347,7 @@ static int netvsc_init_buf(struct hv_device *device,
struct nvsp_message *init_packet;
unsigned int buf_size;
int i, ret = 0;
+ void *vaddr;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -372,6 +383,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -475,6 +497,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->send_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -729,6 +762,12 @@ void netvsc_device_remove(struct hv_device *device)
netvsc_teardown_send_gpadl(device, net_device, ndev);
}
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
/* Release all resources */
free_netvsc_device_rcu(net_device);
}
@@ -754,9 +793,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
int queue_sends;
u64 cmd_rqst;
- cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
+ cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Incorrect transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -764,10 +803,10 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
/* Notify the layer above us */
if (likely(skb)) {
- const struct hv_netvsc_packet *packet
+ struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
- struct netvsc_stats *tx_stats;
+ struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
@@ -780,6 +819,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
napi_consume_skb(skb, budget);
}
@@ -815,9 +855,9 @@ static void netvsc_send_completion(struct net_device *ndev,
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
- (u64)desc->trans_id);
+ desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Invalid transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -944,6 +984,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
memset(dest, 0, padding);
}
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return;
+
+ if (!packet->dma_range)
+ return;
+
+ for (i = 0; i < page_count; i++)
+ dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ packet->dma_range[i].mapping_size,
+ DMA_TO_DEVICE);
+
+ kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data. Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array. And the entries are not physically contiguous. So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ dma_addr_t dma;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return 0;
+
+ packet->dma_range = kcalloc(page_count,
+ sizeof(*packet->dma_range),
+ GFP_KERNEL);
+ if (!packet->dma_range)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ + pb[i].offset);
+ u32 len = pb[i].len;
+
+ dma = dma_map_single(&hv_dev->device, src, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hv_dev->device, dma)) {
+ kfree(packet->dma_range);
+ return -ENOMEM;
+ }
+
+ /* pb[].offset and pb[].len are not changed during dma mapping
+ * and so not reassign.
+ */
+ packet->dma_range[i].dma = dma;
+ packet->dma_range[i].mapping_size = len;
+ pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -984,14 +1106,24 @@ static inline int netvsc_send_pkt(
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
+ packet->dma_range = NULL;
if (packet->page_buf_cnt) {
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
+ ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ if (ret) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
ret = vmbus_sendpacket_pagebuffer(out_channel,
pb, packet->page_buf_cnt,
&nvmsg, sizeof(nvmsg),
req_id);
+
+ if (ret)
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
ret = vmbus_sendpacket(out_channel,
&nvmsg, sizeof(nvmsg),
@@ -999,6 +1131,7 @@ static inline int netvsc_send_pkt(
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
+exit:
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
@@ -1447,6 +1580,10 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
@@ -1498,7 +1635,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
case VM_PKT_DATA_USING_XFER_PAGES:
return netvsc_receive(ndev, net_device, nvchan, desc);
- break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, net_device, desc);
@@ -1539,12 +1675,17 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
+ nvchan->xdp_flush = false;
+
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
@@ -1642,8 +1783,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
}
/* Enable NAPI handler before init callbacks */
- netif_napi_add(ndev, &net_device->chan_table[0].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1691,6 +1831,12 @@ cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 7856905414eb..4a9522689fa4 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
@@ -23,11 +24,13 @@
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
+ bool drop = true;
xdp->data_hard_start = NULL;
@@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
switch (act) {
case XDP_PASS:
case XDP_TX:
+ drop = false;
+ break;
+
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
@@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
out:
rcu_read_unlock();
- if (page && act != XDP_PASS && act != XDP_TX) {
+ if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
@@ -137,7 +165,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
- bpf_op_t ndo_bpf;
int ret;
ASSERT_RTNL();
@@ -145,8 +172,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
if (!vf_netdev)
return 0;
- ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
- if (!ndo_bpf)
+ if (!vf_netdev->netdev_ops->ndo_bpf)
return 0;
memset(&xdp, 0, sizeof(xdp));
@@ -157,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = ndo_bpf(vf_netdev, &xdp);
+ ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
@@ -199,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return -EINVAL;
}
}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index efa963b7af54..89eb4f179a3c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg,
return ppi + 1;
}
-/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
- * packets. We can use ethtool to change UDP hash level when necessary.
- */
-static inline u32 netvsc_get_hash(
- struct sk_buff *skb,
- const struct net_device_context *ndc)
-{
- struct flow_keys flow;
- u32 hash, pkt_proto = 0;
- static u32 hashrnd __read_mostly;
-
- net_get_random_once(&hashrnd, sizeof(hashrnd));
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- return 0;
-
- switch (flow.basic.ip_proto) {
- case IPPROTO_TCP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_TCP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_TCP6_L4HASH;
-
- break;
-
- case IPPROTO_UDP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_UDP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_UDP6_L4HASH;
-
- break;
- }
-
- if (pkt_proto & ndc->l4_hash) {
- return skb_get_hash(skb);
- } else {
- if (flow.basic.n_proto == htons(ETH_P_IP))
- hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
- else
- return 0;
-
- __skb_set_sw_hash(skb, hash, false);
- }
-
- return hash;
-}
-
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
@@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
}
/* This function should only be called after skb_record_rx_queue() */
-static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
@@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net,
act = netvsc_run_xdp(net, nvchan, &xdp);
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
@@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net,
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -979,8 +935,8 @@ int netvsc_recv_callback(struct net_device *net,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void netvsc_get_channels(struct net_device *net,
@@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net,
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- multicast = stats->multicast + stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1515,8 +1473,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
- const struct netvsc_stats *qstats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
@@ -1562,31 +1524,40 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
- qstats = &nvdev->chan_table[j].tx_stats;
+ tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
- qstats = &nvdev->chan_table[j].rx_stats;
+ rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- xdp_drop = qstats->xdp_drop;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
+ if (!pcpu_sum)
+ return;
+
netvsc_get_pcpu_stats(dev, pcpu_sum);
for_each_present_cpu(cpu) {
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
@@ -1619,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
@@ -2054,6 +2028,7 @@ static const struct net_device_ops device_ops = {
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
@@ -2338,6 +2313,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
+ /* Fallback path to check synthetic vf with
+ * help of mac addr
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+ netdev_notice(vf_netdev,
+ "falling back to mac addr based matching\n");
+ return ndev;
+ }
+ }
+
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
@@ -2434,6 +2421,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
ret = netvsc_switch_datapath(ndev, vf_is_up);
if (ret) {
@@ -2465,6 +2457,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netvsc_vf_setxdp(vf_netdev, NULL);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2504,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
@@ -2516,6 +2510,7 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
@@ -2667,7 +2662,10 @@ static int netvsc_suspend(struct hv_device *dev)
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
-
+ if (!ndev_ctx->saved_netvsc_dev_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f6c9c2a670f9..eea777ec2541 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
+#include <linux/string.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
@@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
- memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
data + RNDIS_HEADER_SIZE + sizeof(*req_id),
- resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+ "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
@@ -361,6 +363,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
}
}
+ netvsc_dma_unmap(((struct net_device_context *)
+ netdev_priv(ndev))->device_ctx, &request->pkt);
complete(&request->wait_event);
} else {
netdev_err(ndev,
@@ -1347,7 +1351,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_MAX_SIZE;
+ unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
/* Find HW offload capabilities */
@@ -1429,7 +1433,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_gso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
@@ -1573,7 +1577,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
for (i = 1; i < net_device->num_chn; i++)
netif_napi_add(net, &net_device->chan_table[i].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netvsc_poll);
return net_device;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 0f7c6dc2ed15..95da876c5613 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -33,13 +33,6 @@ config IEEE802154_AT86RF230
This driver can also be built as a module. To do so, say M here.
the module will be called 'at86rf230'.
-config IEEE802154_AT86RF230_DEBUGFS
- depends on IEEE802154_AT86RF230
- bool "AT86RF230 debugfs interface"
- depends on DEBUG_FS
- help
- This option compiles debugfs code for the at86rf230 driver.
-
config IEEE802154_MRF24J40
tristate "Microchip MRF24J40 transceiver driver"
depends on IEEE802154_DRIVERS && MAC802154
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 7db9cbd0f5de..5cf218c674a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1304,20 +1304,19 @@ err_alloc_wq:
return ret;
}
-static int adf7242_remove(struct spi_device *spi)
+static void adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id adf7242_of_match[] = {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 7d67f41387f5..15f283b26721 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,7 +23,6 @@
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
-#include <linux/debugfs.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
@@ -72,19 +71,11 @@ struct at86rf230_state_change {
void (*complete)(void *context);
u8 from_state;
u8 to_state;
+ int trac;
bool free;
};
-struct at86rf230_trac {
- u64 success;
- u64 success_data_pending;
- u64 success_wait_for_ack;
- u64 channel_access_failure;
- u64 no_ack;
- u64 invalid;
-};
-
struct at86rf230_local {
struct spi_device *spi;
@@ -100,11 +91,10 @@ struct at86rf230_local {
unsigned long cal_timeout;
bool is_tx;
bool is_tx_from_off;
+ bool was_tx;
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
-
- struct at86rf230_trac trac;
};
#define AT86RF2XX_NUMREGS 0x3F
@@ -343,7 +333,10 @@ at86rf230_async_error_recover_complete(void *context)
if (ctx->free)
kfree(ctx);
- ieee802154_wake_queue(lp->hw);
+ if (lp->was_tx) {
+ lp->was_tx = 0;
+ ieee802154_xmit_hw_error(lp->hw, lp->tx_skb);
+ }
}
static void
@@ -352,7 +345,11 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
+ if (lp->is_tx) {
+ lp->was_tx = 1;
+ lp->is_tx = 0;
+ }
+
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_async_error_recover_complete);
}
@@ -644,7 +641,11 @@ at86rf230_tx_complete(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+ if (ctx->trac == IEEE802154_SUCCESS)
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+ else
+ ieee802154_xmit_error(lp->hw, lp->tx_skb, ctx->trac);
+
kfree(ctx);
}
@@ -663,30 +664,21 @@ at86rf230_tx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
+ u8 trac = TRAC_MASK(ctx->buf[1]);
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
- u8 trac = TRAC_MASK(ctx->buf[1]);
-
- switch (trac) {
- case TRAC_SUCCESS:
- lp->trac.success++;
- break;
- case TRAC_SUCCESS_DATA_PENDING:
- lp->trac.success_data_pending++;
- break;
- case TRAC_CHANNEL_ACCESS_FAILURE:
- lp->trac.channel_access_failure++;
- break;
- case TRAC_NO_ACK:
- lp->trac.no_ack++;
- break;
- case TRAC_INVALID:
- lp->trac.invalid++;
- break;
- default:
- WARN_ONCE(1, "received tx trac status %d\n", trac);
- break;
- }
+ switch (trac) {
+ case TRAC_SUCCESS:
+ case TRAC_SUCCESS_DATA_PENDING:
+ ctx->trac = IEEE802154_SUCCESS;
+ break;
+ case TRAC_CHANNEL_ACCESS_FAILURE:
+ ctx->trac = IEEE802154_CHANNEL_ACCESS_FAILURE;
+ break;
+ case TRAC_NO_ACK:
+ ctx->trac = IEEE802154_NO_ACK;
+ break;
+ default:
+ ctx->trac = IEEE802154_SYSTEM_ERROR;
}
at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
@@ -728,25 +720,6 @@ at86rf230_rx_trac_check(void *context)
u8 *buf = ctx->buf;
int rc;
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
- u8 trac = TRAC_MASK(buf[1]);
-
- switch (trac) {
- case TRAC_SUCCESS:
- lp->trac.success++;
- break;
- case TRAC_SUCCESS_WAIT_FOR_ACK:
- lp->trac.success_wait_for_ack++;
- break;
- case TRAC_INVALID:
- lp->trac.invalid++;
- break;
- default:
- WARN_ONCE(1, "received rx trac status %d\n", trac);
- break;
- }
- }
-
buf[0] = CMD_FB;
ctx->trx.len = AT86RF2XX_MAX_BUF;
ctx->msg.complete = at86rf230_rx_read_frame_complete;
@@ -942,10 +915,6 @@ at86rf230_start(struct ieee802154_hw *hw)
{
struct at86rf230_local *lp = hw->priv;
- /* reset trac stats on start */
- if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS))
- memset(&lp->trac, 0, sizeof(struct at86rf230_trac));
-
at86rf230_awake(lp);
enable_irq(lp->spi->irq);
@@ -1055,36 +1024,6 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
if (rc < 0)
return rc;
- /* This sets the symbol_duration according frequency on the 212.
- * TODO move this handling while set channel and page in cfg802154.
- * We can do that, this timings are according 802.15.4 standard.
- * If we do that in cfg802154, this is a more generic calculation.
- *
- * This should also protected from ifs_timer. Means cancel timer and
- * init with a new value. For now, this is okay.
- */
- if (channel == 0) {
- if (page == 0) {
- /* SUB:0 and BPSK:0 -> BPSK-20 */
- lp->hw->phy->symbol_duration = 50;
- } else {
- /* SUB:1 and BPSK:0 -> BPSK-40 */
- lp->hw->phy->symbol_duration = 25;
- }
- } else {
- if (page == 0)
- /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
- lp->hw->phy->symbol_duration = 40;
- else
- /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
- lp->hw->phy->symbol_duration = 16;
- }
-
- lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
- lp->hw->phy->symbol_duration;
- lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
- lp->hw->phy->symbol_duration;
-
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
@@ -1560,7 +1499,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->data = &at86rf231_data;
lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 11;
- lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf231_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels;
@@ -1573,7 +1511,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->hw->phy->supported.channels[0] = 0x00007FF;
lp->hw->phy->supported.channels[2] = 0x00007FF;
lp->hw->phy->current_channel = 5;
- lp->hw->phy->symbol_duration = 25;
lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
lp->hw->phy->supported.tx_powers = at86rf212_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
@@ -1585,7 +1522,6 @@ at86rf230_detect_device(struct at86rf230_local *lp)
lp->data = &at86rf233_data;
lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 13;
- lp->hw->phy->symbol_duration = 16;
lp->hw->phy->supported.tx_powers = at86rf233_powers;
lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels;
@@ -1606,47 +1542,6 @@ not_supp:
return rc;
}
-#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
-static struct dentry *at86rf230_debugfs_root;
-
-static int at86rf230_stats_show(struct seq_file *file, void *offset)
-{
- struct at86rf230_local *lp = file->private;
-
- seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success);
- seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n",
- lp->trac.success_data_pending);
- seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n",
- lp->trac.success_wait_for_ack);
- seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n",
- lp->trac.channel_access_failure);
- seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack);
- seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(at86rf230_stats);
-
-static void at86rf230_debugfs_init(struct at86rf230_local *lp)
-{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
-
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
-
- at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
-
- debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp,
- &at86rf230_stats_fops);
-}
-
-static void at86rf230_debugfs_remove(void)
-{
- debugfs_remove_recursive(at86rf230_debugfs_root);
-}
-#else
-static void at86rf230_debugfs_init(struct at86rf230_local *lp) { }
-static void at86rf230_debugfs_remove(void) { }
-#endif
-
static int at86rf230_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
@@ -1743,23 +1638,19 @@ static int at86rf230_probe(struct spi_device *spi)
/* going into sleep by default */
at86rf230_sleep(lp);
- at86rf230_debugfs_init(lp);
-
rc = ieee802154_register_hw(lp->hw);
if (rc)
- goto free_debugfs;
+ goto free_dev;
return rc;
-free_debugfs:
- at86rf230_debugfs_remove();
free_dev:
ieee802154_free_hw(lp->hw);
return rc;
}
-static int at86rf230_remove(struct spi_device *spi)
+static void at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -1767,10 +1658,7 @@ static int at86rf230_remove(struct spi_device *spi)
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_hw(lp->hw);
ieee802154_free_hw(lp->hw);
- at86rf230_debugfs_remove();
dev_dbg(&spi->dev, "unregistered at86rf230\n");
-
- return 0;
}
static const struct of_device_id at86rf230_of_match[] = {
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 2f5e7b31032a..2c338783893d 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -74,81 +74,6 @@ struct atusb_chip_data {
int (*set_txpower)(struct ieee802154_hw*, s32);
};
-/* ----- USB commands without data ----------------------------------------- */
-
-/* To reduce the number of error checks in the code, we record the first error
- * in atusb->err and reject all subsequent requests until the error is cleared.
- */
-
-static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index,
- void *data, __u16 size, int timeout)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
-
- if (atusb->err)
- return atusb->err;
-
- ret = usb_control_msg(usb_dev, pipe, request, requesttype,
- value, index, data, size, timeout);
- if (ret < size) {
- ret = ret < 0 ? ret : -ENODATA;
-
- atusb->err = ret;
- dev_err(&usb_dev->dev,
- "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
- __func__, request, value, index, ret);
- }
- return ret;
-}
-
-static int atusb_command(struct atusb *atusb, u8 cmd, u8 arg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: cmd = 0x%x\n", __func__, cmd);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
-}
-
-static int atusb_write_reg(struct atusb *atusb, u8 reg, u8 value)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
- value, reg, NULL, 0, 1000);
-}
-
-static int atusb_read_reg(struct atusb *atusb, u8 reg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
- u8 *buffer;
- u8 value;
-
- buffer = kmalloc(1, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- dev_dbg(&usb_dev->dev, "%s: reg = 0x%x\n", __func__, reg);
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, buffer, 1, 1000);
-
- if (ret >= 0) {
- value = buffer[0];
- kfree(buffer);
- return value;
- } else {
- kfree(buffer);
- return ret;
- }
-}
-
static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
u8 shift, u8 value)
{
@@ -158,7 +83,10 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- orig = atusb_read_reg(atusb, reg);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &orig, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
/* Write the value only into that part of the register which is allowed
* by the mask. All other bits stay as before.
@@ -167,7 +95,8 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
tmp |= (value << shift) & mask;
if (tmp != orig)
- ret = atusb_write_reg(atusb, reg, tmp);
+ ret = usb_control_msg_send(usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ tmp, reg, NULL, 0, 1000, GFP_KERNEL);
return ret;
}
@@ -176,12 +105,16 @@ static int atusb_read_subreg(struct atusb *lp,
unsigned int addr, unsigned int mask,
unsigned int shift)
{
- int rc;
+ int reg, ret;
- rc = atusb_read_reg(lp, addr);
- rc = (rc & mask) >> shift;
+ ret = usb_control_msg_recv(lp->usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, addr, &reg, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- return rc;
+ reg = (reg & mask) >> shift;
+
+ return reg;
}
static int atusb_get_and_clear_error(struct atusb *atusb)
@@ -273,9 +206,7 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq)
* unlikely case now that seq == expect is then true, but can
* happen and fail with a tx_skb = NULL;
*/
- ieee802154_wake_queue(atusb->hw);
- if (atusb->tx_skb)
- dev_kfree_skb_irq(atusb->tx_skb);
+ ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb);
}
}
@@ -419,16 +350,22 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(dev, "%s called for saddr\n", __func__);
- atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
- atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr, RG_SHORT_ADDR_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr >> 8, RG_SHORT_ADDR_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(dev, "%s called for pan id\n", __func__);
- atusb_write_reg(atusb, RG_PAN_ID_0, pan);
- atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan, RG_PAN_ID_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan >> 8, RG_PAN_ID_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
@@ -437,7 +374,9 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
dev_vdbg(dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
- atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr[i], RG_IEEE_ADDR_0 + i, NULL, 0,
+ 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
@@ -459,7 +398,8 @@ static int atusb_start(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
schedule_delayed_work(&atusb->work, 0);
- atusb_command(atusb, ATUSB_RX_MODE, 1);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 1, 0,
+ NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (ret < 0)
usb_kill_anchored_urbs(&atusb->idle_urbs);
@@ -473,7 +413,8 @@ static void atusb_stop(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
usb_kill_anchored_urbs(&atusb->idle_urbs);
- atusb_command(atusb, ATUSB_RX_MODE, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_clear_error(atusb);
}
@@ -580,9 +521,11 @@ atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val)
{
- unsigned int cca_ed_thres;
+ int cca_ed_thres;
cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES);
+ if (cca_ed_thres < 0)
+ return cca_ed_thres;
switch (rssi_base_val) {
case -98:
@@ -669,36 +612,6 @@ static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
if (rc < 0)
return rc;
- /* This sets the symbol_duration according frequency on the 212.
- * TODO move this handling while set channel and page in cfg802154.
- * We can do that, this timings are according 802.15.4 standard.
- * If we do that in cfg802154, this is a more generic calculation.
- *
- * This should also protected from ifs_timer. Means cancel timer and
- * init with a new value. For now, this is okay.
- */
- if (channel == 0) {
- if (page == 0) {
- /* SUB:0 and BPSK:0 -> BPSK-20 */
- lp->hw->phy->symbol_duration = 50;
- } else {
- /* SUB:1 and BPSK:0 -> BPSK-40 */
- lp->hw->phy->symbol_duration = 25;
- }
- } else {
- if (page == 0)
- /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
- lp->hw->phy->symbol_duration = 40;
- else
- /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
- lp->hw->phy->symbol_duration = 16;
- }
-
- lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
- lp->hw->phy->symbol_duration;
- lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
- lp->hw->phy->symbol_duration;
-
return atusb_write_subreg(lp, SR_CHANNEL, channel);
}
@@ -799,18 +712,13 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char *hw_name;
- unsigned char *buffer;
+ unsigned char buffer[3];
int ret;
- buffer = kmalloc(3, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Get a couple of the ATMega Firmware values */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, 3, 1000);
- if (ret >= 0) {
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000, GFP_KERNEL);
+ if (!ret) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
@@ -849,7 +757,6 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
- kfree(buffer);
return ret;
}
@@ -863,7 +770,6 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build)
return -ENOMEM;
- /* We cannot call atusb_control_msg() here, since this request may read various length data */
ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
@@ -881,14 +787,27 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
u8 man_id_0, man_id_1, part_num, version_num;
const char *chip;
struct ieee802154_hw *hw = atusb->hw;
+ int ret;
- man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
- man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
- part_num = atusb_read_reg(atusb, RG_PART_NUM);
- version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_0, &man_id_0, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- if (atusb->err)
- return atusb->err;
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_1, &man_id_1, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_PART_NUM, &part_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_VERSION_NUM, &version_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
@@ -918,7 +837,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
chip = "AT86RF230";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
- atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
@@ -928,7 +846,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
chip = "AT86RF231";
atusb->hw->phy->supported.channels[0] = 0x7FFF800;
atusb->hw->phy->current_channel = 11; /* reset default */
- atusb->hw->phy->symbol_duration = 16;
atusb->hw->phy->supported.tx_powers = atusb_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->supported.cca_ed_levels = atusb_ed_levels;
@@ -940,7 +857,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
atusb->hw->phy->supported.channels[0] = 0x00007FF;
atusb->hw->phy->supported.channels[2] = 0x00007FF;
atusb->hw->phy->current_channel = 5;
- atusb->hw->phy->symbol_duration = 25;
atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
atusb->hw->phy->supported.tx_powers = at86rf212_powers;
atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
@@ -969,7 +885,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char *buffer;
+ unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
__le64 extended_addr;
u64 addr;
int ret;
@@ -982,18 +898,12 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
- buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Firmware is new enough so we fetch the address from EEPROM */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000, GFP_KERNEL);
if (ret < 0) {
dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
- kfree(buffer);
return ret;
}
@@ -1009,7 +919,6 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
- kfree(buffer);
return ret;
}
@@ -1051,7 +960,8 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
- atusb_command(atusb, ATUSB_RF_RESET, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RF_RESET, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_conf_chip(atusb);
atusb_get_and_show_revision(atusb);
atusb_get_and_show_build(atusb);
@@ -1076,7 +986,9 @@ static int atusb_probe(struct usb_interface *interface,
* explicitly. Any resets after that will send us straight to TRX_OFF,
* making the command below redundant.
*/
- atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ STATE_FORCE_TRX_OFF, RG_TRX_STATE, NULL, 0, 1000, GFP_KERNEL);
+
msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
#if 0
@@ -1104,7 +1016,8 @@ static int atusb_probe(struct usb_interface *interface,
atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1);
#endif
- atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ 0xff, RG_IRQ_MASK, NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (!ret)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index ece6ff6049f6..450b16ad40a4 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -89,48 +89,6 @@
#define CA8210_TEST_INT_FILE_NAME "ca8210_test"
#define CA8210_TEST_INT_FIFO_SIZE 256
-/* MAC status enumerations */
-#define MAC_SUCCESS (0x00)
-#define MAC_ERROR (0x01)
-#define MAC_CANCELLED (0x02)
-#define MAC_READY_FOR_POLL (0x03)
-#define MAC_COUNTER_ERROR (0xDB)
-#define MAC_IMPROPER_KEY_TYPE (0xDC)
-#define MAC_IMPROPER_SECURITY_LEVEL (0xDD)
-#define MAC_UNSUPPORTED_LEGACY (0xDE)
-#define MAC_UNSUPPORTED_SECURITY (0xDF)
-#define MAC_BEACON_LOST (0xE0)
-#define MAC_CHANNEL_ACCESS_FAILURE (0xE1)
-#define MAC_DENIED (0xE2)
-#define MAC_DISABLE_TRX_FAILURE (0xE3)
-#define MAC_SECURITY_ERROR (0xE4)
-#define MAC_FRAME_TOO_LONG (0xE5)
-#define MAC_INVALID_GTS (0xE6)
-#define MAC_INVALID_HANDLE (0xE7)
-#define MAC_INVALID_PARAMETER (0xE8)
-#define MAC_NO_ACK (0xE9)
-#define MAC_NO_BEACON (0xEA)
-#define MAC_NO_DATA (0xEB)
-#define MAC_NO_SHORT_ADDRESS (0xEC)
-#define MAC_OUT_OF_CAP (0xED)
-#define MAC_PAN_ID_CONFLICT (0xEE)
-#define MAC_REALIGNMENT (0xEF)
-#define MAC_TRANSACTION_EXPIRED (0xF0)
-#define MAC_TRANSACTION_OVERFLOW (0xF1)
-#define MAC_TX_ACTIVE (0xF2)
-#define MAC_UNAVAILABLE_KEY (0xF3)
-#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4)
-#define MAC_INVALID_ADDRESS (0xF5)
-#define MAC_ON_TIME_TOO_LONG (0xF6)
-#define MAC_PAST_TIME (0xF7)
-#define MAC_TRACKING_OFF (0xF8)
-#define MAC_INVALID_INDEX (0xF9)
-#define MAC_LIMIT_REACHED (0xFA)
-#define MAC_READ_ONLY (0xFB)
-#define MAC_SCAN_IN_PROGRESS (0xFC)
-#define MAC_SUPERFRAME_OVERLAP (0xFD)
-#define MAC_SYSTEM_ERROR (0xFF)
-
/* HWME attribute IDs */
#define HWME_EDTHRESHOLD (0x04)
#define HWME_EDVALUE (0x06)
@@ -551,58 +509,58 @@ static int link_to_linux_err(int link_status)
return link_status;
}
switch (link_status) {
- case MAC_SUCCESS:
- case MAC_REALIGNMENT:
+ case IEEE802154_SUCCESS:
+ case IEEE802154_REALIGNMENT:
return 0;
- case MAC_IMPROPER_KEY_TYPE:
+ case IEEE802154_IMPROPER_KEY_TYPE:
return -EKEYREJECTED;
- case MAC_IMPROPER_SECURITY_LEVEL:
- case MAC_UNSUPPORTED_LEGACY:
- case MAC_DENIED:
+ case IEEE802154_IMPROPER_SECURITY_LEVEL:
+ case IEEE802154_UNSUPPORTED_LEGACY:
+ case IEEE802154_DENIED:
return -EACCES;
- case MAC_BEACON_LOST:
- case MAC_NO_ACK:
- case MAC_NO_BEACON:
+ case IEEE802154_BEACON_LOST:
+ case IEEE802154_NO_ACK:
+ case IEEE802154_NO_BEACON:
return -ENETUNREACH;
- case MAC_CHANNEL_ACCESS_FAILURE:
- case MAC_TX_ACTIVE:
- case MAC_SCAN_IN_PROGRESS:
+ case IEEE802154_CHANNEL_ACCESS_FAILURE:
+ case IEEE802154_TX_ACTIVE:
+ case IEEE802154_SCAN_IN_PROGRESS:
return -EBUSY;
- case MAC_DISABLE_TRX_FAILURE:
- case MAC_OUT_OF_CAP:
+ case IEEE802154_DISABLE_TRX_FAILURE:
+ case IEEE802154_OUT_OF_CAP:
return -EAGAIN;
- case MAC_FRAME_TOO_LONG:
+ case IEEE802154_FRAME_TOO_LONG:
return -EMSGSIZE;
- case MAC_INVALID_GTS:
- case MAC_PAST_TIME:
+ case IEEE802154_INVALID_GTS:
+ case IEEE802154_PAST_TIME:
return -EBADSLT;
- case MAC_INVALID_HANDLE:
+ case IEEE802154_INVALID_HANDLE:
return -EBADMSG;
- case MAC_INVALID_PARAMETER:
- case MAC_UNSUPPORTED_ATTRIBUTE:
- case MAC_ON_TIME_TOO_LONG:
- case MAC_INVALID_INDEX:
+ case IEEE802154_INVALID_PARAMETER:
+ case IEEE802154_UNSUPPORTED_ATTRIBUTE:
+ case IEEE802154_ON_TIME_TOO_LONG:
+ case IEEE802154_INVALID_INDEX:
return -EINVAL;
- case MAC_NO_DATA:
+ case IEEE802154_NO_DATA:
return -ENODATA;
- case MAC_NO_SHORT_ADDRESS:
+ case IEEE802154_NO_SHORT_ADDRESS:
return -EFAULT;
- case MAC_PAN_ID_CONFLICT:
+ case IEEE802154_PAN_ID_CONFLICT:
return -EADDRINUSE;
- case MAC_TRANSACTION_EXPIRED:
+ case IEEE802154_TRANSACTION_EXPIRED:
return -ETIME;
- case MAC_TRANSACTION_OVERFLOW:
+ case IEEE802154_TRANSACTION_OVERFLOW:
return -ENOBUFS;
- case MAC_UNAVAILABLE_KEY:
+ case IEEE802154_UNAVAILABLE_KEY:
return -ENOKEY;
- case MAC_INVALID_ADDRESS:
+ case IEEE802154_INVALID_ADDRESS:
return -ENXIO;
- case MAC_TRACKING_OFF:
- case MAC_SUPERFRAME_OVERLAP:
+ case IEEE802154_TRACKING_OFF:
+ case IEEE802154_SUPERFRAME_OVERLAP:
return -EREMOTEIO;
- case MAC_LIMIT_REACHED:
+ case IEEE802154_LIMIT_REACHED:
return -EDQUOT;
- case MAC_READ_ONLY:
+ case IEEE802154_READ_ONLY:
return -EROFS;
default:
return -EPROTO;
@@ -754,7 +712,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
ca8210_net_rx(priv->hw, buf, len);
if (buf[0] == SPI_MCPS_DATA_CONFIRM) {
- if (buf[3] == MAC_TRANSACTION_OVERFLOW) {
+ if (buf[3] == IEEE802154_TRANSACTION_OVERFLOW) {
dev_info(
&priv->spi->dev,
"Waiting for transaction overflow to stabilise...\n");
@@ -831,7 +789,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
finish:;
}
-static int ca8210_remove(struct spi_device *spi_device);
+static void ca8210_remove(struct spi_device *spi_device);
/**
* ca8210_spi_transfer_complete() - Called when a single spi transfer has
@@ -1128,7 +1086,7 @@ static u8 tdme_setsfr_request_sync(
);
if (ret) {
dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret);
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_TDME_SETSFR_CONFIRM) {
@@ -1137,7 +1095,7 @@ static u8 tdme_setsfr_request_sync(
"sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n",
response.command_id
);
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
return response.pdata.tdme_set_sfr_cnf.status;
@@ -1151,7 +1109,7 @@ static u8 tdme_setsfr_request_sync(
*/
static u8 tdme_chipinit(void *device_ref)
{
- u8 status = MAC_SUCCESS;
+ u8 status = IEEE802154_SUCCESS;
u8 sfr_address;
struct spi_device *spi = device_ref;
struct preamble_cfg_sfr pre_cfg_value = {
@@ -1220,7 +1178,7 @@ static u8 tdme_chipinit(void *device_ref)
goto finish;
finish:
- if (status != MAC_SUCCESS) {
+ if (status != IEEE802154_SUCCESS) {
dev_err(
&spi->dev,
"failed to set sfr at %#03x, status = %#03x\n",
@@ -1287,7 +1245,7 @@ static u8 tdme_checkpibattribute(
const void *pib_attribute_value
)
{
- u8 status = MAC_SUCCESS;
+ u8 status = IEEE802154_SUCCESS;
u8 value;
value = *((u8 *)pib_attribute_value);
@@ -1296,52 +1254,52 @@ static u8 tdme_checkpibattribute(
/* PHY */
case PHY_TRANSMIT_POWER:
if (value > 0x3F)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case PHY_CCA_MODE:
if (value > 0x03)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* MAC */
case MAC_BATT_LIFE_EXT_PERIODS:
if (value < 6 || value > 41)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_PAYLOAD:
if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_PAYLOAD_LENGTH:
if (value > MAX_BEACON_PAYLOAD_LENGTH)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_BEACON_ORDER:
if (value > 15)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_BE:
if (value < 3 || value > 8)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_CSMA_BACKOFFS:
if (value > 5)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MAX_FRAME_RETRIES:
if (value > 7)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_MIN_BE:
if (value > 8)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_RESPONSE_WAIT_TIME:
if (value < 2 || value > 64)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_SUPERFRAME_ORDER:
if (value > 15)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* boolean */
case MAC_ASSOCIATED_PAN_COORD:
@@ -1353,16 +1311,16 @@ static u8 tdme_checkpibattribute(
case MAC_RX_ON_WHEN_IDLE:
case MAC_SECURITY_ENABLED:
if (value > 1)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
/* MAC SEC */
case MAC_AUTO_REQUEST_SECURITY_LEVEL:
if (value > 7)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
case MAC_AUTO_REQUEST_KEY_ID_MODE:
if (value > 3)
- status = MAC_INVALID_PARAMETER;
+ status = IEEE802154_INVALID_PARAMETER;
break;
default:
break;
@@ -1522,9 +1480,9 @@ static u8 mcps_data_request(
if (ca8210_spi_transfer(device_ref, &command.command_id,
command.length + 2))
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
- return MAC_SUCCESS;
+ return IEEE802154_SUCCESS;
}
/**
@@ -1553,11 +1511,11 @@ static u8 mlme_reset_request_sync(
&response.command_id,
device_ref)) {
dev_err(&spi->dev, "cascoda_api_downstream failed\n");
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_MLME_RESET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
status = response.pdata.status;
@@ -1600,7 +1558,7 @@ static u8 mlme_set_request_sync(
*/
if (tdme_checkpibattribute(
pib_attribute, pib_attribute_length, pib_attribute_value)) {
- return MAC_INVALID_PARAMETER;
+ return IEEE802154_INVALID_PARAMETER;
}
if (pib_attribute == PHY_CURRENT_CHANNEL) {
@@ -1636,11 +1594,11 @@ static u8 mlme_set_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_MLME_SET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
return response.pdata.status;
}
@@ -1678,11 +1636,11 @@ static u8 hwme_set_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_HWME_SET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
return response.pdata.hwme_set_cnf.status;
}
@@ -1714,13 +1672,13 @@ static u8 hwme_get_request_sync(
command.length + 2,
&response.command_id,
device_ref)) {
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
}
if (response.command_id != SPI_HWME_GET_CONFIRM)
- return MAC_SYSTEM_ERROR;
+ return IEEE802154_SYSTEM_ERROR;
- if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) {
+ if (response.pdata.hwme_get_cnf.status == IEEE802154_SUCCESS) {
*hw_attribute_length =
response.pdata.hwme_get_cnf.hw_attribute_length;
memcpy(
@@ -1770,8 +1728,8 @@ static int ca8210_async_xmit_complete(
"Link transmission unsuccessful, status = %d\n",
status
);
- if (status != MAC_TRANSACTION_OVERFLOW) {
- ieee802154_wake_queue(priv->hw);
+ if (status != IEEE802154_TRANSACTION_OVERFLOW) {
+ ieee802154_xmit_error(priv->hw, priv->tx_skb, status);
return 0;
}
}
@@ -2335,7 +2293,7 @@ static int ca8210_set_csma_params(
* @retries: Number of retries
*
* Sets the number of times to retry a transmission if no acknowledgment was
- * was received from the other end when one was requested.
+ * received from the other end when one was requested.
*
* Return: 0 or linux error code
*/
@@ -2435,7 +2393,7 @@ static int ca8210_test_check_upstream(u8 *buf, void *device_ref)
if (ret) {
response[0] = SPI_MLME_SET_CONFIRM;
response[1] = 3;
- response[2] = MAC_INVALID_PARAMETER;
+ response[2] = IEEE802154_INVALID_PARAMETER;
response[3] = buf[2];
response[4] = buf[3];
if (cascoda_api_upstream)
@@ -2974,8 +2932,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
@@ -3048,7 +3006,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
*
* Return: 0 or linux error code
*/
-static int ca8210_remove(struct spi_device *spi_device)
+static void ca8210_remove(struct spi_device *spi_device)
{
struct ca8210_priv *priv;
struct ca8210_platform_data *pdata;
@@ -3088,8 +3046,6 @@ static int ca8210_remove(struct spi_device *spi_device)
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS))
ca8210_test_interface_clear(priv);
}
-
- return 0;
}
/**
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 89c046b204e0..c69b87d3837d 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
@@ -1213,7 +1214,7 @@ err_hw_init:
return ret;
}
-static int cc2520_remove(struct spi_device *spi)
+static void cc2520_remove(struct spi_device *spi)
{
struct cc2520_private *priv = spi_get_drvdata(spi);
@@ -1222,8 +1223,6 @@ static int cc2520_remove(struct spi_device *spi)
ieee802154_unregister_hw(priv->hw);
ieee802154_free_hw(priv->hw);
-
- return 0;
}
static const struct spi_device_id cc2520_ids[] = {
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8caa61ec718f..2f0544dd7c2a 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -630,6 +630,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_nl_ops,
.n_small_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .resv_start_op = MAC802154_HWSIM_CMD_NEW_EDGE + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -786,11 +787,12 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
goto err_pib;
}
+ pib->channel = 13;
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
- hw->flags = IEEE802154_HW_PROMISCUOUS;
+ hw->flags = IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_RX_DROP_BAD_CKSUM;
hw->parent = dev;
err = ieee802154_register_hw(hw);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 8dc04e2590b1..2fe0e4a0a0c4 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -975,10 +975,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__);
- phy->symbol_duration = 16;
- phy->lifs_period = 40;
- phy->sifs_period = 12;
-
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS;
@@ -1006,7 +1002,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
phy->current_page = 0;
/* MCR20A default reset value */
phy->current_channel = 20;
- phy->symbol_duration = 16;
phy->supported.tx_powers = mcr20a_powers;
phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
phy->cca_ed_level = phy->supported.cca_ed_levels[75];
@@ -1335,7 +1330,7 @@ free_dev:
return ret;
}
-static int mcr20a_remove(struct spi_device *spi)
+static void mcr20a_remove(struct spi_device *spi)
{
struct mcr20a_local *lp = spi_get_drvdata(spi);
@@ -1343,8 +1338,6 @@ static int mcr20a_remove(struct spi_device *spi)
ieee802154_unregister_hw(lp->hw);
ieee802154_free_hw(lp->hw);
-
- return 0;
}
static const struct of_device_id mcr20a_of_match[] = {
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ff83e00b77af..ee4cfbf2c5cc 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -1356,7 +1356,7 @@ err_ret:
return ret;
}
-static int mrf24j40_remove(struct spi_device *spi)
+static void mrf24j40_remove(struct spi_device *spi)
{
struct mrf24j40 *devrec = spi_get_drvdata(spi);
@@ -1366,8 +1366,6 @@ static int mrf24j40_remove(struct spi_device *spi)
ieee802154_free_hw(devrec->hw);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
-
- return 0;
}
static const struct of_device_id mrf24j40_of_match[] = {
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index d037682fb7ad..6782c2cbf542 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -2,7 +2,9 @@ config QCOM_IPA
tristate "Qualcomm IPA support"
depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index bdfb2430ab2c..48255fc4b25c 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,3 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm IPA driver.
+
+IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
@@ -7,6 +13,6 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
-ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
- ipa_data-v4.2.o ipa_data-v4.5.o \
- ipa_data-v4.9.o ipa_data-v4.11.o
+ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
+
+ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 06ddb85f39b2..e0d71f609272 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
@@ -101,7 +101,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -148,6 +150,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
@@ -522,7 +526,7 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 760c22bbdf70..42f2c88a92d4 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
@@ -92,7 +92,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -140,6 +142,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
@@ -175,10 +179,10 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
- .min = 1, .max = 255,
+ .min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
- .min = 1, .max = 255,
+ .min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
@@ -403,11 +407,11 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_TX_NOT_USING_BRESP_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index fea91451a0c3..a204e439c23d 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
@@ -86,7 +86,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -133,6 +135,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 32768,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 2a231e79d5e1..04f574fe006f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
@@ -82,7 +82,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -130,6 +132,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 2da2c4194f2e..684239e71f46 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
@@ -95,7 +95,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -142,6 +144,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 2421b5abb5d4..2333e15f9533 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
@@ -87,7 +87,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -134,6 +136,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index bc981043cc80..bea2da1c4c51 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -56,9 +56,9 @@
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
- * Each TRE refers to a block of data--also located DRAM. After writing one
- * or more TREs to a channel, the writer (either the IPA or an EE) writes a
- * doorbell register to inform the receiving side how many elements have
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
@@ -665,7 +665,8 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ struct gsi_ring *ring = &evt_ring->ring;
+ size_t size;
u32 val;
/* We program all event rings as GPI type/protocol */
@@ -674,6 +675,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
@@ -681,9 +683,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = lower_32_bits(evt_ring->ring.addr);
+ val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
- val = upper_32_bits(evt_ring->ring.addr);
+ val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -700,48 +702,40 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
- /* Finally, tell the hardware we've completed event 0 (arbitrary) */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- const struct list_head *list;
+ u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* There is a small chance a TX transaction got allocated just
- * before we disabled transmits, so check for that.
- */
- if (channel->toward_ipa) {
- list = &trans_info->alloc;
- if (!list_empty(list))
- goto done;
- list = &trans_info->pending;
- if (!list_empty(list))
- goto done;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
}
- /* Otherwise (TX or RX) we want to wait for anything that
- * has completed, or has been polled but not released yet.
- */
- list = &trans_info->complete;
- if (!list_empty(list))
- goto done;
- list = &trans_info->polled;
- if (list_empty(list))
- list = NULL;
-done:
- trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
-
/* Caller will wait for this, so take a reference */
- if (trans)
- refcount_inc(&trans->refcount);
-
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
return trans;
}
@@ -770,9 +764,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 wrr_weight = 0;
u32 val;
- /* Arbitrarily pick TRE 0 as the first channel element to use */
- channel->tre_ring.index = 0;
-
/* We program all channels as GPI type/protocol */
val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
@@ -823,7 +814,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
- gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
@@ -949,6 +940,8 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
@@ -991,75 +984,66 @@ void gsi_resume(struct gsi *gsi)
enable_irq(gsi->irq);
}
-/**
- * gsi_channel_tx_queued() - Report queued TX transfers for a channel
- * @channel: Channel for which to report
- *
- * Report to the network stack the number of bytes and transactions that
- * have been queued to hardware since last call. This and the next function
- * supply information used by the network stack for throttling.
- *
- * For each channel we track the number of transactions used and bytes of
- * data those transactions represent. We also track what those values are
- * each time this function is called. Subtracting the two tells us
- * the number of bytes and transactions that have been added between
- * successive calls.
- *
- * Calling this each time we ring the channel doorbell allows us to
- * provide accurate information to the network stack about how much
- * work we've given the hardware at any point in time.
- */
-void gsi_channel_tx_queued(struct gsi_channel *channel)
+void gsi_trans_tx_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
+ channel = &gsi->channel[channel_id];
+
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
- ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
- * gsi_channel_tx_update() - Report completed TX transfers
- * @channel: Channel that has completed transmitting packets
- * @trans: Last transation known to be complete
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
*
- * Compute the number of transactions and bytes that have been transferred
- * over a TX channel since the given transaction was committed. Report this
- * information to the network stack.
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
*
- * At the time a transaction is committed, we record its channel's
- * committed transaction and byte counts *in the transaction*.
- * Completions are signaled by the hardware with an interrupt, and
- * we can determine the latest completed transaction at that time.
- *
- * The difference between the byte/transaction count recorded in
- * the transaction and the count last time we recorded a completion
- * tells us exactly how much data has been transferred between
- * completions.
- *
- * Calling this each time we learn of a newly-completed transaction
- * allows us to provide accurate information to the network stack
- * about how much work has been completed by the hardware at a given
- * point in time.
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
*/
-static void
-gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
- u64 byte_count = trans->byte_count + trans->len;
- u64 trans_count = trans->trans_count + 1;
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
- byte_count -= channel->compl_byte_count;
- channel->compl_byte_count += byte_count;
- trans_count -= channel->compl_trans_count;
channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
- ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
@@ -1179,15 +1163,15 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* Similarly, we could get an error back when updating flow control
* on a channel because it's not in the proper state.
*
- * In either case, we silently ignore a CHANNEL_NOT_RUNNING error
- * if we receive it.
+ * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
+ * error if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) {
case GENERIC_EE_SUCCESS:
- case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
@@ -1327,60 +1311,73 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
}
/* Return the transaction associated with a transfer completion event */
-static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
- struct gsi_event *event)
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
- return gsi_channel_trans_mapped(channel, tre_index);
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
}
/**
- * gsi_evt_ring_rx_update() - Record lengths of received data
- * @evt_ring: Event ring associated with channel that received packets
- * @index: Event index in ring reported by hardware
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
+ *
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
- * sequential within the transaction pool.
+ * sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
-static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
- struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- struct gsi_trans_info *trans_info;
struct gsi_event *event_done;
struct gsi_event *event;
- struct gsi_trans *trans;
- u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
- trans_info = &channel->trans_info;
-
- /* We'll start with the oldest un-processed event. RX channels
- * replenish receive buffers in single-TRE transactions, so we
- * can just map that event to its transaction. Transactions
- * associated with completion events are consecutive.
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
- trans = gsi_event_trans(channel, event);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
@@ -1388,20 +1385,28 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
- trans->len = __le16_to_cpu(event->len);
- byte_count += trans->len;
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
- trans = gsi_trans_pool_next(&trans_info->pool, trans);
} while (event != event_done);
- /* We record RX bytes when they are received */
- channel->byte_count += byte_count;
- channel->trans_count++;
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1421,6 +1426,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
ring->addr = addr;
ring->count = count;
+ ring->index = 0;
return 0;
}
@@ -1468,8 +1474,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
}
-/* Consult hardware, move any newly completed transactions to completed list */
-static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1488,33 +1494,19 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return NULL;
+ return;
- /* Get the transaction for the latest completed event. Take a
- * reference to keep it from completing before we give the events
- * for this and previous transactions back to the hardware.
- */
- trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
- refcount_inc(&trans->refcount);
+ /* Get the transaction for the latest completed event. */
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
- if (channel->toward_ipa)
- gsi_channel_tx_update(channel, trans);
- else
- gsi_evt_ring_rx_update(evt_ring, index);
-
- gsi_trans_move_complete(trans);
-
- /* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
-
- gsi_trans_free(trans);
-
- return gsi_channel_trans_complete(channel);
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
}
/**
@@ -1523,21 +1515,18 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
*
* Return: Transaction pointer, or null if none are available
*
- * This function returns the first entry on a channel's completed transaction
- * list. If that list is empty, the hardware is consulted to determine
- * whether any new transactions have completed. If so, they're moved to the
- * completed list and the new first entry is returned. If there are no more
- * completed transactions, a null pointer is returned.
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
- /* Get the first transaction from the completed list */
+ /* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
- if (!trans) /* List is empty; see if there's more to do */
- trans = gsi_channel_update(channel);
-
if (trans)
gsi_trans_move_polled(trans);
@@ -1614,11 +1603,11 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_program(channel, true);
if (channel->toward_ipa)
- netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ gsi_channel_poll);
return 0;
@@ -2005,9 +1994,10 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-static bool gsi_channel_data_valid(struct gsi *gsi,
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2023,10 +2013,24 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
return false;
}
- if (!data->channel.tlv_count ||
- data->channel.tlv_count > GSI_TLV_MAX) {
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
- channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
return false;
}
@@ -2035,22 +2039,22 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
- if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
- channel_id, data->channel.tlv_count,
- data->channel.tre_count);
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.tre_count)) {
+ if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
- channel_id, data->channel.tre_count);
+ channel_id, channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.event_count)) {
+ if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
- channel_id, data->channel.event_count);
+ channel_id, channel_data->event_count);
return false;
}
@@ -2066,7 +2070,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
u32 tre_count;
int ret;
- if (!gsi_channel_data_valid(gsi, data))
+ if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
@@ -2084,7 +2088,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->tlv_count = data->channel.tlv_count;
+ channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -2299,13 +2303,5 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
- return channel->tre_count - (channel->tlv_count - 1);
-}
-
-/* Returns the maximum number of TREs in a single transaction for a channel */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
-{
- struct gsi_channel *channel = &gsi->channel[channel_id];
-
- return channel->tlv_count;
+ return channel->tre_count - (channel->trans_tre_max - 1);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 9cc657658811..49dcadba4e0b 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -31,14 +31,6 @@ struct gsi_trans;
struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
-/* Execution environment IDs */
-enum gsi_ee_id {
- GSI_EE_AP = 0x0,
- GSI_EE_MODEM = 0x1,
- GSI_EE_UC = 0x2,
- GSI_EE_TZ = 0x3,
-};
-
struct gsi_ring {
void *virt; /* ring array base address */
dma_addr_t addr; /* primarily low 32 bits used */
@@ -48,12 +40,13 @@ struct gsi_ring {
*
* A channel ring consists of TRE entries filled by the AP and passed
* to the hardware for processing. For a channel ring, the ring index
- * identifies the next unused entry to be filled by the AP.
+ * identifies the next unused entry to be filled by the AP. In this
+ * case the initial value is assumed by hardware to be 0.
*
* An event ring consists of event structures filled by the hardware
* and passed to the AP. For event rings, the ring index identifies
* the next ring entry that is not known to have been filled by the
- * hardware.
+ * hardware. The initial value used is arbitrary (so we use 0).
*/
u32 index;
};
@@ -81,17 +74,18 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
- struct gsi_trans_pool pool; /* transaction pool */
- struct gsi_trans_pool sg_pool; /* scatterlist pool */
- struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
- struct gsi_trans_pool info_pool;/* command information pool */
+
+ u16 free_id; /* first free trans in array */
+ u16 allocated_id; /* first allocated transaction */
+ u16 committed_id; /* first committed transaction */
+ u16 pending_id; /* first pending transaction */
+ u16 completed_id; /* first completed transaction */
+ u16 polled_id; /* first polled transaction */
+ struct gsi_trans *trans; /* transaction array */
struct gsi_trans **map; /* TRE -> transaction map */
- spinlock_t spinlock; /* protects updates to the lists */
- struct list_head alloc; /* allocated, not committed */
- struct list_head pending; /* committed, awaiting completion */
- struct list_head complete; /* completed, awaiting poll */
- struct list_head polled; /* returned by gsi_channel_poll_one() */
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
};
/* Hardware values signifying the state of a channel */
@@ -111,16 +105,16 @@ struct gsi_channel {
bool toward_ipa;
bool command; /* AP command TX channel or not */
- u8 tlv_count; /* # entries in TLV FIFO */
+ u8 trans_tre_max; /* max TREs in a transaction */
u16 tre_count;
u16 event_count;
struct gsi_ring tre_ring;
u32 evt_ring_id;
+ /* The following counts are used only for TX endpoints */
u64 byte_count; /* total # bytes transferred */
u64 trans_count; /* total # transactions */
- /* The following counts are used only for TX endpoints */
u64 queued_byte_count; /* last reported queued byte count */
u64 queued_trans_count; /* ...and queued trans count */
u64 compl_byte_count; /* last reported completed byte count */
@@ -185,20 +179,11 @@ void gsi_teardown(struct gsi *gsi);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * Return: The maximum number of TREs oustanding on the channel
+ * Return: The maximum number of TREs outstanding on the channel
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
/**
- * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
- * @gsi: GSI pointer
- * @channel_id: Channel whose limit is to be returned
- *
- * Return: The maximum TRE count per transaction on the channel
- */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
-
-/**
* gsi_channel_start() - Start an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to start
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index ea333a244cf5..c65f7c5cdc8d 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -16,18 +16,15 @@ struct gsi_channel;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
-/* Return the entry that follows one provided in a transaction pool */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
-
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
- * @trans: Transaction to commit
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_complete(struct gsi_trans *trans);
/**
* gsi_trans_move_polled() - Mark a transaction polled
- * @trans: Transaction to update
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_polled(struct gsi_trans *trans);
@@ -97,6 +94,14 @@ void gsi_channel_trans_exit(struct gsi_channel *channel);
*/
void gsi_channel_doorbell(struct gsi_channel *channel);
+/* gsi_channel_update() - Update knowledge of channel hardware state
+ * @channel: Channel to be updated
+ *
+ * Consult hardware, change the state of any newly-completed transactions
+ * on a channel.
+ */
+void gsi_channel_update(struct gsi_channel *channel);
+
/**
* gsi_ring_virt() - Return virtual address for a ring entry
* @ring: Ring whose address is to be translated
@@ -105,14 +110,21 @@ void gsi_channel_doorbell(struct gsi_channel *channel);
void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
/**
- * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
- * @channel: Channel whose bytes have been queued
+ * gsi_trans_tx_committed() - Record bytes committed for transmit
+ * @trans: TX endpoint transaction being committed
+ *
+ * Report that a TX transaction has been committed. It updates some
+ * statistics used to manage transmit rates.
+ */
+void gsi_trans_tx_committed(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_tx_queued() - Report a queued TX channel transaction
+ * @trans: Transaction being passed to hardware
*
- * This arranges for the the number of transactions and bytes for
- * transfer that have been queued to hardware to be reported. It
- * passes this information up the network stack so it can be used to
- * throttle transmissions.
+ * Report to the network stack that a TX transaction is being supplied
+ * to the hardware.
*/
-void gsi_channel_tx_queued(struct gsi_channel *channel);
+void gsi_trans_tx_queued(struct gsi_trans *trans);
#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 8906f4381032..3763359f208f 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -55,14 +55,10 @@
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c020 + 0x1000 * (ee))
+ (0x0000c020 + 0x1000 * GSI_EE_AP)
#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c024 + 0x1000 * (ee))
+ (0x0000c024 + 0x1000 * GSI_EE_AP)
/* All other register offsets are relative to gsi->virt */
@@ -81,9 +77,7 @@ enum gsi_channel_type {
};
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
- (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
@@ -112,9 +106,7 @@ chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
}
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
- (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
static inline u32 r_length_encoded(enum ipa_version version, u32 length)
@@ -125,19 +117,13 @@ static inline u32 r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
- (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
- (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_QOS_OFFSET(ch) \
- GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
- (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
@@ -158,29 +144,19 @@ enum gsi_prefetch_mode {
};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
- (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
- (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
- (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
- (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
- (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4)
@@ -190,9 +166,7 @@ enum gsi_prefetch_mode {
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
- (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
{
@@ -202,83 +176,53 @@ static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
- (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
- (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
- (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
- (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define MODT_FMASK GENMASK(15, 0)
#define MODC_FMASK GENMASK(23, 16)
#define MOD_CNT_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
- (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
- (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
- (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
- (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
- (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
- (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
- (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
- (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+ (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
- (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+ (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
#define GSI_GSI_STATUS_OFFSET \
- GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
- (0x0001f000 + 0x4000 * (ee))
+ (0x0001f000 + 0x4000 * GSI_EE_AP)
#define ENABLED_FMASK GENMASK(0, 0)
#define GSI_CH_CMD_OFFSET \
- GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CH_CMD_OFFSET(ee) \
- (0x0001f008 + 0x4000 * (ee))
+ (0x0001f008 + 0x4000 * GSI_EE_AP)
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
@@ -293,9 +237,7 @@ enum gsi_ch_cmd_opcode {
};
#define GSI_EV_CH_CMD_OFFSET \
- GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
- (0x0001f010 + 0x4000 * (ee))
+ (0x0001f010 + 0x4000 * GSI_EE_AP)
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
@@ -307,9 +249,7 @@ enum gsi_evt_cmd_opcode {
};
#define GSI_GENERIC_CMD_OFFSET \
- GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
- (0x0001f018 + 0x4000 * (ee))
+ (0x0001f018 + 0x4000 * GSI_EE_AP)
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
@@ -326,9 +266,7 @@ enum gsi_generic_cmd_opcode {
/* The next register is present for IPA v3.5.1 and above */
#define GSI_GSI_HW_PARAM_2_OFFSET \
- GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
- (0x0001f040 + 0x4000 * (ee))
+ (0x0001f040 + 0x4000 * GSI_EE_AP)
#define IRAM_SIZE_FMASK GENMASK(2, 0)
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
@@ -357,13 +295,9 @@ enum gsi_iram_size {
/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
- (0x0001f080 + 0x4000 * (ee))
+ (0x0001f080 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
+ (0x0001f088 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
enum gsi_irq_type_id {
@@ -377,62 +311,38 @@ enum gsi_irq_type_id {
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
- (0x0001f090 + 0x4000 * (ee))
+ (0x0001f090 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
- (0x0001f094 + 0x4000 * (ee))
+ (0x0001f094 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f098 + 0x4000 * (ee))
+ (0x0001f098 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f09c + 0x4000 * (ee))
+ (0x0001f09c + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a0 + 0x4000 * (ee))
+ (0x0001f0a0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a4 + 0x4000 * (ee))
+ (0x0001f0a4 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
- (0x0001f0b0 + 0x4000 * (ee))
+ (0x0001f0b0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
- (0x0001f0b8 + 0x4000 * (ee))
+ (0x0001f0b8 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f0c0 + 0x4000 * (ee))
+ (0x0001f0c0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
- (0x0001f100 + 0x4000 * (ee))
+ (0x0001f100 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
- (0x0001f108 + 0x4000 * (ee))
+ (0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f110 + 0x4000 * (ee))
+ (0x0001f110 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the GLOB_IRQ_* registers */
enum gsi_global_irq_id {
ERROR_INT = 0x0,
@@ -442,17 +352,11 @@ enum gsi_global_irq_id {
};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
- (0x0001f118 + 0x4000 * (ee))
+ (0x0001f118 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
- (0x0001f120 + 0x4000 * (ee))
+ (0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
- (0x0001f128 + 0x4000 * (ee))
+ (0x0001f128 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the (general) GSI_IRQ_* registers */
enum gsi_general_id {
BREAK_POINT = 0x0,
@@ -462,15 +366,11 @@ enum gsi_general_id {
};
#define GSI_CNTXT_INTSET_OFFSET \
- GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
- (0x0001f180 + 0x4000 * (ee))
+ (0x0001f180 + 0x4000 * GSI_EE_AP)
#define INTYPE_FMASK GENMASK(0, 0)
#define GSI_ERROR_LOG_OFFSET \
- GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
- (0x0001f200 + 0x4000 * (ee))
+ (0x0001f200 + 0x4000 * GSI_EE_AP)
/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
@@ -501,21 +401,17 @@ enum gsi_err_type {
};
#define GSI_ERROR_LOG_CLR_OFFSET \
- GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
- (0x0001f210 + 0x4000 * (ee))
+ (0x0001f210 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SCRATCH_0_OFFSET \
- GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
- (0x0001f400 + 0x4000 * (ee))
+ (0x0001f400 + 0x4000 * GSI_EE_AP)
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
GENERIC_EE_SUCCESS = 0x1,
- GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2,
GENERIC_EE_INCORRECT_DIRECTION = 0x3,
GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
GENERIC_EE_INCORRECT_CHANNEL = 0x5,
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 1544564bc283..26b7f683a3e1 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -22,37 +22,36 @@
* DOC: GSI Transactions
*
* A GSI transaction abstracts the behavior of a GSI channel by representing
- * everything about a related group of IPA commands in a single structure.
- * (A "command" in this sense is either a data transfer or an IPA immediate
+ * everything about a related group of IPA operations in a single structure.
+ * (A "operation" in this sense is either a data transfer or an IPA immediate
* command.) Most details of interaction with the GSI hardware are managed
- * by the GSI transaction core, allowing users to simply describe commands
+ * by the GSI transaction core, allowing users to simply describe operations
* to be performed. When a transaction has completed a callback function
* (dependent on the type of endpoint associated with the channel) allows
* cleanup of resources associated with the transaction.
*
- * To perform a command (or set of them), a user of the GSI transaction
+ * To perform an operation (or set of them), a user of the GSI transaction
* interface allocates a transaction, indicating the number of TREs required
- * (one per command). If sufficient TREs are available, they are reserved
+ * (one per operation). If sufficient TREs are available, they are reserved
* for use in the transaction and the allocation succeeds. This way
- * exhaustion of the available TREs in a channel ring is detected
- * as early as possible. All resources required to complete a transaction
- * are allocated at transaction allocation time.
+ * exhaustion of the available TREs in a channel ring is detected as early
+ * as possible. Any other resources that might be needed to complete a
+ * transaction are also allocated when the transaction is allocated.
*
- * Commands performed as part of a transaction are represented in an array
- * of Linux scatterlist structures. This array is allocated with the
- * transaction, and its entries are initialized using standard scatterlist
- * functions (such as sg_set_buf() or skb_to_sgvec()).
+ * Operations performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures, allocated with the transaction. These
+ * scatterlist structures are initialized by "adding" operations to the
+ * transaction. If a buffer in an operation must be mapped for DMA, this is
+ * done at the time it is added to the transaction. It is possible for a
+ * mapping error to occur when an operation is added. In this case the
+ * transaction should simply be freed; this correctly releases resources
+ * associated with the transaction.
*
- * Once a transaction's scatterlist structures have been initialized, the
- * transaction is committed. The caller is responsible for mapping buffers
- * for DMA if necessary, and this should be done *before* allocating
- * the transaction. Between a successful allocation and commit of a
- * transaction no errors should occur.
- *
- * Committing transfers ownership of the entire transaction to the GSI
- * transaction core. The GSI transaction code formats the content of
- * the scatterlist array into the channel ring buffer and informs the
- * hardware that new TREs are available to process.
+ * Once all operations have been successfully added to a transaction, the
+ * transaction is committed. Committing transfers ownership of the entire
+ * transaction to the GSI transaction core. The GSI transaction code
+ * formats the content of the scatterlist array into the channel ring
+ * buffer and informs the hardware that new TREs are available to process.
*
* The last TRE in each transaction is marked to interrupt the AP when the
* GSI hardware has completed it. Because transfers described by TREs are
@@ -125,11 +124,10 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
memset(pool, 0, sizeof(*pool));
}
-/* Allocate the requested number of (zeroed) entries from the pool */
-/* Home-grown DMA pool. This way we can preallocate and use the tre_count
- * to guarantee allocations will succeed. Even though we specify max_alloc
- * (and it can be more than one), we only allow allocation of a single
- * element from a DMA pool.
+/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
+ * allocations will succeed. The immediate commands in a transaction can
+ * require up to max_alloc elements from the pool. But we only allow
+ * allocation of a single element from a DMA pool at a time.
*/
int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size_t size, u32 count, u32 max_alloc)
@@ -214,26 +212,14 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
return pool->base + offset;
}
-/* Return the pool element that immediately follows the one given.
- * This only works done if elements are allocated one at a time.
- */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+/* Map a TRE ring entry index to the transaction it is associated with */
+static void gsi_trans_map(struct gsi_trans *trans, u32 index)
{
- void *end = pool->base + pool->count * pool->size;
-
- WARN_ON(element < pool->base);
- WARN_ON(element >= end);
- WARN_ON(pool->max_alloc != 1);
-
- element += pool->size;
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- return element < end ? element : pool->base;
-}
+ /* The completion event will indicate the last TRE used */
+ index += trans->used_count - 1;
-/* Map a given ring entry index to the transaction associated with it */
-static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
- struct gsi_trans *trans)
-{
/* Note: index *must* be used modulo the ring count here */
channel->trans_info.map[index % channel->tre_ring.count] = trans;
}
@@ -249,52 +235,63 @@ gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
/* Return the oldest completed transaction for a channel (or null) */
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
{
- return list_first_entry_or_null(&channel->trans_info.complete,
- struct gsi_trans, links);
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->completed_id;
+
+ if (trans_id == trans_info->pending_id) {
+ gsi_channel_update(channel);
+ if (trans_id == trans_info->pending_id)
+ return NULL;
+ }
+
+ return &trans_info->trans[trans_id %= channel->tre_count];
}
-/* Move a transaction from the allocated list to the pending list */
-static void gsi_trans_move_pending(struct gsi_trans *trans)
+/* Move a transaction from allocated to committed state */
+static void gsi_trans_move_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
+ /* This allocated transaction is now committed */
+ trans_info->allocated_id++;
+}
- list_move_tail(&trans->links, &trans_info->pending);
+/* Move committed transactions to pending state */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_index = trans - &trans_info->trans[0];
+ u16 delta;
- spin_unlock_bh(&trans_info->spinlock);
+ /* These committed transactions are now pending */
+ delta = trans_index - trans_info->committed_id + 1;
+ trans_info->committed_id += delta % channel->tre_count;
}
-/* Move a transaction and all of its predecessors from the pending list
- * to the completed list.
- */
+/* Move pending transactions to completed state */
void gsi_trans_move_complete(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* Move this transaction and all predecessors to completed list */
- list_cut_position(&list, &trans_info->pending, &trans->links);
- list_splice_tail(&list, &trans_info->complete);
+ u16 trans_index = trans - trans_info->trans;
+ u16 delta;
- spin_unlock_bh(&trans_info->spinlock);
+ /* These pending transactions are now completed */
+ delta = trans_index - trans_info->pending_id + 1;
+ delta %= channel->tre_count;
+ trans_info->pending_id += delta;
}
-/* Move a transaction from the completed list to the polled list */
+/* Move a transaction from completed to polled state */
void gsi_trans_move_polled(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->polled);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This completed transaction is now polled */
+ trans_info->completed_id++;
}
/* Reserve some number of TREs on a channel. Returns true if successful */
@@ -320,6 +317,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail);
}
+/* Return true if no transactions are allocated, false otherwise */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
+{
+ u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
+ struct gsi_trans_info *trans_info;
+
+ trans_info = &gsi->channel[channel_id].trans_info;
+
+ return atomic_read(&trans_info->tre_avail) == tre_max;
+}
+
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,
@@ -328,85 +336,78 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
+ u16 trans_index;
- if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
- /* We reserve the TREs now, but consume them at commit time.
- * If there aren't enough available, we're done.
- */
+ /* If we can't reserve the TREs for the transaction, we're done */
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the the transaction */
- trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans_index = trans_info->free_id % channel->tre_count;
+ trans = &trans_info->trans[trans_index];
+ memset(trans, 0, sizeof(*trans));
+
+ /* Initialize non-zero fields in the transaction */
trans->gsi = gsi;
trans->channel_id = channel_id;
- trans->tre_count = tre_count;
+ trans->rsvd_count = tre_count;
init_completion(&trans->completion);
- /* Allocate the scatterlist and (if requested) info entries. */
+ /* Allocate the scatterlist */
trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
sg_init_marker(trans->sgl, tre_count);
trans->direction = direction;
-
- spin_lock_bh(&trans_info->spinlock);
-
- list_add_tail(&trans->links, &trans_info->alloc);
-
- spin_unlock_bh(&trans_info->spinlock);
-
refcount_set(&trans->refcount, 1);
+ /* This free transaction is now allocated */
+ trans_info->free_id++;
+
return trans;
}
/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
- refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
- bool last;
- /* We must hold the lock to release the last reference */
- if (refcount_dec_not_one(refcount))
+ if (!refcount_dec_and_test(&trans->refcount))
return;
+ /* Unused transactions are allocated but never committed, pending,
+ * completed, or polled.
+ */
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+ if (!trans->used_count) {
+ trans_info->allocated_id++;
+ trans_info->committed_id++;
+ trans_info->pending_id++;
+ trans_info->completed_id++;
+ } else {
+ ipa_gsi_trans_release(trans);
+ }
- spin_lock_bh(&trans_info->spinlock);
-
- /* Reference might have been added before we got the lock */
- last = refcount_dec_and_test(refcount);
- if (last)
- list_del(&trans->links);
-
- spin_unlock_bh(&trans_info->spinlock);
-
- if (!last)
- return;
-
- ipa_gsi_trans_release(trans);
+ /* This transaction is now free */
+ trans_info->polled_id++;
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
*/
- gsi_trans_tre_release(trans_info, trans->tre_count);
+ gsi_trans_tre_release(trans_info, trans->rsvd_count);
}
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode)
+ dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
- struct ipa_cmd_info *info;
- u32 which = trans->used++;
+ u32 which = trans->used_count++;
struct scatterlist *sg;
- WARN_ON(which >= trans->tre_count);
+ WARN_ON(which >= trans->rsvd_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -427,9 +428,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- info = &trans->info[which];
- info->opcode = opcode;
- info->direction = direction;
+ trans->cmd_opcode[which] = opcode;
}
/* Add a page transfer to a transaction. It will fill the only TRE. */
@@ -439,9 +438,9 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
sg_set_page(sg, page, size, offset);
@@ -449,7 +448,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
if (!ret)
return -ENOMEM;
- trans->used++; /* Transaction now owns the (DMA mapped) page */
+ trans->used_count++; /* Transaction now owns the (DMA mapped) page */
return 0;
}
@@ -458,25 +457,26 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
{
struct scatterlist *sg = &trans->sgl[0];
- u32 used;
+ u32 used_count;
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (ret < 0)
return ret;
- used = ret;
+ used_count = ret;
- ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
if (!ret)
return -ENOMEM;
- trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+ /* Transaction now owns the (DMA mapped) skb */
+ trans->used_count += used_count;
return 0;
}
@@ -535,68 +535,64 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
*
* Formats channel ring TRE entries based on the content of the scatterlist.
* Maps a transaction pointer to the last ring entry used for the transaction,
- * so it can be recovered when it completes. Moves the transaction to the
- * pending list. Finally, updates the channel ring pointer and optionally
+ * so it can be recovered when it completes. Moves the transaction to
+ * pending state. Finally, updates the channel ring pointer and optionally
* rings the doorbell.
*/
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
- struct ipa_cmd_info *info;
struct gsi_tre *dest_tre;
struct scatterlist *sg;
u32 byte_count = 0;
+ u8 *cmd_opcode;
u32 avail;
u32 i;
- WARN_ON(!trans->used);
+ WARN_ON(!trans->used_count);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
* If there is no info array we're doing a simple data
* transfer request, whose opcode is IPA_CMD_NONE.
*/
- info = trans->info ? &trans->info[0] : NULL;
- avail = ring->count - ring->index % ring->count;
- dest_tre = gsi_ring_virt(ring, ring->index);
- for_each_sg(trans->sgl, sg, trans->used, i) {
- bool last_tre = i == trans->used - 1;
+ cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
+ avail = tre_ring->count - tre_ring->index % tre_ring->count;
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ for_each_sg(trans->sgl, sg, trans->used_count, i) {
+ bool last_tre = i == trans->used_count - 1;
dma_addr_t addr = sg_dma_address(sg);
u32 len = sg_dma_len(sg);
byte_count += len;
if (!avail--)
- dest_tre = gsi_ring_virt(ring, 0);
- if (info)
- opcode = info++->opcode;
+ dest_tre = gsi_ring_virt(tre_ring, 0);
+ if (cmd_opcode)
+ opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
}
- ring->index += trans->used;
-
- if (channel->toward_ipa) {
- /* We record TX bytes when they are sent */
- trans->len = byte_count;
- trans->trans_count = channel->trans_count;
- trans->byte_count = channel->byte_count;
- channel->trans_count++;
- channel->byte_count += byte_count;
- }
+ /* Associate the TRE with the transaction */
+ gsi_trans_map(trans, tre_ring->index);
+
+ tre_ring->index += trans->used_count;
- /* Associate the last TRE with the transaction */
- gsi_channel_trans_map(channel, ring->index - 1, trans);
+ trans->len = byte_count;
+ if (channel->toward_ipa)
+ gsi_trans_tx_committed(trans);
- gsi_trans_move_pending(trans);
+ gsi_trans_move_committed(trans);
/* Ring doorbell if requested, or if all TREs are allocated */
if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
/* Report what we're handing off to hardware for TX channels */
if (channel->toward_ipa)
- gsi_channel_tx_queued(channel);
+ gsi_trans_tx_queued(trans);
+ gsi_trans_move_pending(trans);
gsi_channel_doorbell(channel);
}
}
@@ -604,7 +600,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction */
void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
- if (trans->used)
+ if (trans->used_count)
__gsi_trans_commit(trans, ring_db);
else
gsi_trans_free(trans);
@@ -613,7 +609,7 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction and wait for it to complete */
void gsi_trans_commit_wait(struct gsi_trans *trans)
{
- if (!trans->used)
+ if (!trans->used_count)
goto out_trans_free;
refcount_inc(&trans->refcount);
@@ -626,34 +622,12 @@ out_trans_free:
gsi_trans_free(trans);
}
-/* Commit a GSI transaction and wait for it to complete, with timeout */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout)
-{
- unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
- unsigned long remaining = 1; /* In case of empty transaction */
-
- if (!trans->used)
- goto out_trans_free;
-
- refcount_inc(&trans->refcount);
-
- __gsi_trans_commit(trans, true);
-
- remaining = wait_for_completion_timeout(&trans->completion,
- timeout_jiffies);
-out_trans_free:
- gsi_trans_free(trans);
-
- return remaining ? 0 : -ETIMEDOUT;
-}
-
/* Process the completion of a transaction; called while polling */
void gsi_trans_complete(struct gsi_trans *trans)
{
/* If the entire SGL was mapped when added, unmap it now */
if (trans->direction != DMA_NONE)
- dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
trans->direction);
ipa_gsi_trans_complete(trans);
@@ -667,30 +641,34 @@ void gsi_trans_complete(struct gsi_trans *trans)
void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct gsi_trans *trans;
- bool cancelled;
+ u16 trans_id = trans_info->pending_id;
/* channel->gsi->mutex is held by caller */
- spin_lock_bh(&trans_info->spinlock);
- cancelled = !list_empty(&trans_info->pending);
- list_for_each_entry(trans, &trans_info->pending, links)
- trans->cancelled = true;
+ /* If there are no pending transactions, we're done */
+ if (trans_id == trans_info->committed_id)
+ return;
- list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+ /* Mark all pending transactions cancelled */
+ do {
+ struct gsi_trans *trans;
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ trans->cancelled = true;
+ } while (++trans_id != trans_info->committed_id);
+
+ /* All pending transactions are now completed */
+ trans_info->pending_id = trans_info->committed_id;
/* Schedule NAPI polling to complete the cancelled transactions */
- if (cancelled)
- napi_schedule(&channel->napi);
+ napi_schedule(&channel->napi);
}
/* Issue a command to read a single byte from a channel */
int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
struct gsi_trans_info *trans_info;
struct gsi_tre *dest_tre;
@@ -700,12 +678,12 @@ int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
- /* Now fill the the reserved TRE and tell the hardware */
+ /* Now fill the reserved TRE and tell the hardware */
- dest_tre = gsi_ring_virt(ring, ring->index);
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
- ring->index++;
+ tre_ring->index++;
gsi_channel_doorbell(channel);
return 0;
@@ -723,6 +701,7 @@ void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 tre_count = channel->tre_count;
struct gsi_trans_info *trans_info;
u32 tre_max;
int ret;
@@ -730,68 +709,66 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
/* Ensure the size of a channel element is what's expected */
BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
- /* The map array is used to determine what transaction is associated
- * with a TRE that the hardware reports has completed. We need one
- * map entry per TRE.
- */
trans_info = &channel->trans_info;
- trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
- GFP_KERNEL);
- if (!trans_info->map)
- return -ENOMEM;
- /* We can't use more TREs than there are available in the ring.
- * This limits the number of transactions that can be oustanding.
- * Worst case is one TRE per transaction (but we actually limit
- * it to something a little less than that). We allocate resources
- * for transactions (including transaction structures) based on
- * this maximum number.
+ /* The tre_avail field is what ultimately limits the number of
+ * outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient
+ * for what the transaction might need.
*/
tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+ atomic_set(&trans_info->tre_avail, tre_max);
- /* Transactions are allocated one at a time. */
- ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
- tre_max, 1);
- if (ret)
- goto err_kfree;
+ /* We can't use more TREs than the number available in the ring.
+ * This limits the number of transactions that can be outstanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). By allocating a
+ * power-of-two number of transactions we can use an index
+ * modulo that number to determine the next one that's free.
+ * Transactions are allocated one at a time.
+ */
+ trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
+ GFP_KERNEL);
+ if (!trans_info->trans)
+ return -ENOMEM;
+ trans_info->free_id = 0; /* all modulo channel->tre_count */
+ trans_info->allocated_id = 0;
+ trans_info->committed_id = 0;
+ trans_info->pending_id = 0;
+ trans_info->completed_id = 0;
+ trans_info->polled_id = 0;
+
+ /* A completion event contains a pointer to the TRE that caused
+ * the event (which will be the last one used by the transaction).
+ * Each entry in this map records the transaction associated
+ * with a corresponding completed TRE.
+ */
+ trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map) {
+ ret = -ENOMEM;
+ goto err_trans_free;
+ }
/* A transaction uses a scatterlist array to represent the data
* transfers implemented by the transaction. Each scatterlist
* element is used to fill a single TRE when the transaction is
* committed. So we need as many scatterlist elements as the
* maximum number of TREs that can be outstanding.
- *
- * All TREs in a transaction must fit within the channel's TLV FIFO.
- * A transaction on a channel can allocate as many TREs as that but
- * no more.
*/
ret = gsi_trans_pool_init(&trans_info->sg_pool,
sizeof(struct scatterlist),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
if (ret)
- goto err_trans_pool_exit;
-
- /* Finally, the tre_avail field is what ultimately limits the number
- * of outstanding transactions and their resources. A transaction
- * allocation succeeds only if the TREs available are sufficient for
- * what the transaction might need. Transaction resource pools are
- * sized based on the maximum number of outstanding TREs, so there
- * will always be resources available if there are TREs available.
- */
- atomic_set(&trans_info->tre_avail, tre_max);
+ goto err_map_free;
- spin_lock_init(&trans_info->spinlock);
- INIT_LIST_HEAD(&trans_info->alloc);
- INIT_LIST_HEAD(&trans_info->pending);
- INIT_LIST_HEAD(&trans_info->complete);
- INIT_LIST_HEAD(&trans_info->polled);
return 0;
-err_trans_pool_exit:
- gsi_trans_pool_exit(&trans_info->pool);
-err_kfree:
+err_map_free:
kfree(trans_info->map);
+err_trans_free:
+ kfree(trans_info->trans);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
@@ -805,6 +782,6 @@ void gsi_channel_trans_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
gsi_trans_pool_exit(&trans_info->sg_pool);
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
kfree(trans_info->map);
}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 17fd1822d8a9..30c1c2dc77c6 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
@@ -22,44 +22,47 @@ struct gsi;
struct gsi_trans;
struct gsi_trans_pool;
+/* Maximum number of TREs in an IPA immediate command transaction */
+#define IPA_COMMAND_TRANS_TRE_MAX 8
+
/**
* struct gsi_trans - a GSI transaction
*
* Most fields in this structure for internal use by the transaction core code:
- * @links: Links for channel transaction lists by state
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
- * @tre_count: Number of TREs reserved for this transaction
- * @used: Number of TREs *used* (could be less than tre_count)
- * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @rsvd_count: Number of TREs reserved for this transaction
+ * @used_count: Number of TREs *used* (could be less than rsvd_count)
+ * @len: Number of bytes sent or received by the transaction
* @data: Preserved but not touched by the core transaction code
+ * @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
- * @info: Array of command information structures (command channel)
* @direction: DMA transfer direction (DMA_NONE for commands)
* @refcount: Reference count used for destruction
* @completion: Completed when the transaction completes
* @byte_count: TX channel byte count recorded when transaction committed
* @trans_count: Channel transaction count when committed (for BQL accounting)
*
- * The size used for some fields in this structure were chosen to ensure
- * the full structure size is no larger than 128 bytes.
+ * The @len field is set when the transaction is committed. For RX
+ * transactions it is updated later to reflect the actual number of bytes
+ * received.
*/
struct gsi_trans {
- struct list_head links; /* gsi_channel lists */
-
struct gsi *gsi;
u8 channel_id;
bool cancelled; /* true if transaction was cancelled */
- u8 tre_count; /* # TREs requested */
- u8 used; /* # entries used in sgl[] */
+ u8 rsvd_count; /* # TREs requested */
+ u8 used_count; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
- void *data;
+ union {
+ void *data;
+ u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX];
+ };
struct scatterlist *sgl;
- struct ipa_cmd_info *info; /* array of entries, or null */
enum dma_data_direction direction;
refcount_t refcount;
@@ -71,7 +74,7 @@ struct gsi_trans {
/**
* gsi_trans_pool_init() - Initialize a pool of structures for transactions
- * @pool: GSI transaction poll pointer
+ * @pool: GSI transaction pool pointer
* @size: Size of elements in the pool
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
@@ -130,6 +133,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
+ * gsi_channel_trans_idle() - Return whether no transactions are allocated
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ *
+ * Return: True if no transactions are allocated, false otherwise
+ *
+ */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
+
+/**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
@@ -155,12 +168,10 @@ void gsi_trans_free(struct gsi_trans *trans);
* @buf: Buffer pointer for command payload
* @size: Number of bytes in buffer
* @addr: DMA address for payload
- * @direction: Direction of DMA transfer (or DMA_NONE if none required)
* @opcode: IPA immediate command opcode
*/
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode);
+ dma_addr_t addr, enum ipa_cmd_opcode opcode);
/**
* gsi_trans_page_add() - Add a page transfer to a transaction
@@ -196,15 +207,6 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
void gsi_trans_commit_wait(struct gsi_trans *trans);
/**
- * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
- * it to complete, with timeout
- * @trans: Transaction to commit
- * @timeout: Timeout period (in milliseconds)
- */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout);
-
-/**
* gsi_trans_read_byte() - Issue a single byte read TRE on a channel
* @gsi: GSI pointer
* @channel_id: Channel on which to read a byte
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 9fc880eb7e3a..09ead433ec38 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
@@ -44,6 +44,7 @@ struct ipa_interrupt;
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
+ * @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
@@ -62,6 +63,7 @@ struct ipa_interrupt;
* @initialized: Bit mask indicating endpoints initialized
* @set_up: Bit mask indicating endpoints set up
* @enabled: Bit mask indicating endpoints enabled
+ * @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
* @name_map: Mapping of IPA endpoint name to IPA endpoint
@@ -89,6 +91,7 @@ struct ipa {
dma_addr_t reg_addr;
void __iomem *reg_virt;
+ const struct ipa_regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
@@ -114,6 +117,7 @@ struct ipa {
u32 set_up;
u32 enabled;
+ u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d57472ea077f..26c3db9f52b1 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -26,14 +26,13 @@
* other than data transfer to another endpoint.
*
* Immediate commands are represented by GSI transactions just like other
- * transfer requests, represented by a single GSI TRE. Each immediate
- * command has a well-defined format, having a payload of a known length.
- * This allows the transfer element's length field to be used to hold an
- * immediate command's opcode. The payload for a command resides in DRAM
- * and is described by a single scatterlist entry in its transaction.
- * Commands do not require a transaction completion callback. To commit
- * an immediate command transaction, either gsi_trans_commit_wait() or
- * gsi_trans_commit_wait_timeout() is used.
+ * transfer requests, and use a single GSI TRE. Each immediate command
+ * has a well-defined format, having a payload of a known length. This
+ * allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in AP
+ * memory and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback, and are
+ * always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
@@ -306,6 +305,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
const char *name;
u32 offset;
@@ -313,7 +313,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -326,7 +327,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -350,29 +352,17 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- int ret;
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
- /* Even though command payloads are allocated one at a time,
- * a single transaction can require up to tlv_count of them,
- * so we treat them as if that many can be allocated at once.
+ /* Command payloads are allocated one at a time, but a single
+ * transaction can require up to the maximum supported by the
+ * channel; treat them as if they were allocated all at once.
*/
- ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
- sizeof(union ipa_cmd_payload),
- tre_max, channel->tlv_count);
- if (ret)
- return ret;
-
- /* Each TRE needs a command info structure */
- ret = gsi_trans_pool_init(&trans_info->info_pool,
- sizeof(struct ipa_cmd_info),
- tre_max, channel->tlv_count);
- if (ret)
- gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
-
- return ret;
+ return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->trans_tre_max);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
@@ -380,7 +370,6 @@ void ipa_cmd_pool_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- gsi_trans_pool_exit(&trans_info->info_pool);
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
}
@@ -403,7 +392,6 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
dma_addr_t hash_addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_ip_fltrt_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -434,7 +422,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
payload->nhash_rules_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Initialize header space in IPA-local memory */
@@ -443,7 +431,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_hdr_init_local *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -465,7 +452,7 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le32(flags);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
@@ -522,7 +509,7 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
payload->clear_options = cpu_to_le32(options);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- DMA_NONE, opcode);
+ opcode);
}
/* Skip IP packet processing on the next data transfer on a TX channel */
@@ -530,7 +517,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -542,7 +528,7 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */
@@ -553,7 +539,6 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
struct ipa_cmd_hw_dma_mem_mem *payload;
union ipa_cmd_payload *cmd_payload;
- enum dma_data_direction direction;
dma_addr_t payload_addr;
u16 flags;
@@ -584,17 +569,14 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le16(flags);
payload->system_addr = cpu_to_le64(addr);
- direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_tag_status *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -605,14 +587,13 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Issue a small command TX data transfer */
static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
@@ -621,7 +602,7 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans)
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Add immediate commands to a transaction to clear the hardware pipeline */
@@ -661,28 +642,16 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
-static struct ipa_cmd_info *
-ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
-{
- struct gsi_channel *channel;
-
- channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
-
- return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
-}
-
/* Allocate a transaction for the command TX endpoint */
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
{
struct ipa_endpoint *endpoint;
- struct gsi_trans *trans;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
+ return NULL;
- trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
- tre_count, DMA_NONE);
- if (trans)
- trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
- return trans;
+ return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 05ed7e42e184..8e4243c1f0bb 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
@@ -47,17 +47,6 @@ enum ipa_cmd_opcode {
};
/**
- * struct ipa_cmd_info - information needed for an IPA immediate command
- *
- * @opcode: The command opcode.
- * @direction: Direction of data transfer for DMA commands
- */
-struct ipa_cmd_info {
- enum ipa_cmd_opcode opcode;
- enum dma_data_direction direction;
-};
-
-/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 6d329e9ce5d2..e5a6ce75c7dd 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
@@ -31,7 +31,7 @@
* communication path between the IPA and a particular execution environment
* (EE), such as the AP or Modem. Each EE has a set of channels associated
* with it, and each channel has an ID unique for that EE. For the most part
- * the only GSI channels of concern to this driver belong to the AP
+ * the only GSI channels of concern to this driver belong to the AP.
*
* An endpoint is an IPA construct representing a single channel anywhere
* in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
@@ -96,69 +96,9 @@ struct gsi_channel_data {
};
/**
- * struct ipa_endpoint_tx_data - configuration data for TX endpoints
- * @seq_type: primary packet processing sequencer type
- * @seq_rep_type: sequencer type for replication processing
- * @status_endpoint: endpoint to which status elements are sent
- *
- * The @status_endpoint is only valid if the endpoint's @status_enable
- * flag is set.
- */
-struct ipa_endpoint_tx_data {
- enum ipa_seq_type seq_type;
- enum ipa_seq_rep_type seq_rep_type;
- enum ipa_endpoint_name status_endpoint;
-};
-
-/**
- * struct ipa_endpoint_rx_data - configuration data for RX endpoints
- * @pad_align: power-of-2 boundary to which packet payload is aligned
- * @aggr_close_eof: whether aggregation closes on end-of-frame
- *
- * With each packet it transfers, the IPA hardware can perform certain
- * transformations of its packet data. One of these is adding pad bytes
- * to the end of the packet data so the result ends on a power-of-2 boundary.
- *
- * It is also able to aggregate multiple packets into a single receive buffer.
- * Aggregation is "open" while a buffer is being filled, and "closes" when
- * certain criteria are met. One of those criteria is the sender indicating
- * a "frame" consisting of several transfers has ended.
- */
-struct ipa_endpoint_rx_data {
- u32 pad_align;
- bool aggr_close_eof;
-};
-
-/**
- * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
- * @resource_group: resource group to assign endpoint to
- * @checksum: whether checksum offload is enabled
- * @qmap: whether endpoint uses QMAP protocol
- * @aggregation: whether endpoint supports aggregation
- * @status_enable: whether endpoint uses status elements
- * @dma_mode: whether endpoint operates in DMA mode
- * @dma_endpoint: peer endpoint, if operating in DMA mode
- * @tx: TX-specific endpoint information (see above)
- * @rx: RX-specific endpoint information (see above)
- */
-struct ipa_endpoint_config_data {
- u32 resource_group;
- bool checksum;
- bool qmap;
- bool aggregation;
- bool status_enable;
- bool dma_mode;
- enum ipa_endpoint_name dma_endpoint;
- union {
- struct ipa_endpoint_tx_data tx;
- struct ipa_endpoint_rx_data rx;
- };
-};
-
-/**
* struct ipa_endpoint_data - IPA endpoint configuration data
* @filter_support: whether endpoint supports filtering
- * @config: hardware configuration (see above)
+ * @config: hardware configuration
*
* Not all endpoints support the IPA filtering capability. A filter table
* defines the filters to apply for those endpoints that support it. The
@@ -166,12 +106,12 @@ struct ipa_endpoint_config_data {
* for non-AP endpoints. For this reason we define *all* endpoints used
* in the system, and indicate whether they support filtering.
*
- * The remaining endpoint configuration data applies only to AP endpoints.
+ * The remaining endpoint configuration data specifies default hardware
+ * configuration values that apply only to AP endpoints.
*/
struct ipa_endpoint_data {
bool filter_support;
- /* Everything else is specified only for AP endpoints */
- struct ipa_endpoint_config_data config;
+ struct ipa_endpoint_config config;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 49d9a077d037..093e11ec7c2d 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -23,12 +23,8 @@
#include "ipa_gsi.h"
#include "ipa_power.h"
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-
-#define IPA_REPLENISH_BATCH 16
-
-/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
-#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+/* Hardware is told about receive buffers once a "batch" has been queued */
+#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
@@ -37,7 +33,6 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -75,6 +70,24 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
+{
+ /* A hard aggregation limit will not be crossed; aggregation closes
+ * if saving incoming data would cross the hard byte limit boundary.
+ *
+ * With a soft limit, aggregation closes *after* the size boundary
+ * has been crossed. In that case the limit must leave enough space
+ * after that limit to receive a full MTU of data plus overhead.
+ */
+ if (!aggr_hard_limit)
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ /* The byte limit is encoded as a number of kilobytes */
+
+ return rx_buffer_size / SZ_1K;
+}
+
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -87,6 +100,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
+ const struct ipa_endpoint_rx *rx_config;
+ const struct ipa_reg *reg;
+ u32 buffer_size;
+ u32 aggr_size;
+ u32 limit;
+
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
"RX endpoint %u\n",
@@ -94,9 +113,77 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
+ /* Nothing more to check for non-AP RX */
+ if (data->ee_id != GSI_EE_AP)
+ return true;
+
+ rx_config = &data->endpoint.config.rx;
+
+ /* The buffer size must hold an MTU plus overhead */
+ buffer_size = rx_config->buffer_size;
+ limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (buffer_size < limit) {
+ dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
+ data->endpoint_id, buffer_size, limit);
+ return false;
+ }
+
+ if (!data->endpoint.config.aggregation) {
+ bool result = true;
+
+ /* No aggregation; check for bogus aggregation data */
+ if (rx_config->aggr_time_limit) {
+ dev_err(dev,
+ "time limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_hard_limit) {
+ dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_close_eof) {
+ dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ return result; /* Nothing more to check */
+ }
+
+ /* For an endpoint supporting receive aggregation, the byte
+ * limit defines the point at which aggregation closes. This
+ * check ensures the receive buffer size doesn't result in a
+ * limit that exceeds what's representable in the aggregation
+ * byte limit field.
+ */
+ aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+
+ limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ if (aggr_size > limit) {
+ dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
+ data->endpoint_id, aggr_size, limit);
+
+ return false;
+ }
+
return true; /* Nothing more to check for RX */
}
+ /* Starting with IPA v4.5 sequencer replication is obsolete */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ if (data->endpoint.config.tx.seq_rep_type) {
+ dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+ }
+
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint;
if (other_name >= count) {
@@ -156,21 +243,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
- u32 limit;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -178,26 +256,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check ensures we don't define a receive buffer size
- * that would exceed what we can represent in the field that
- * is used to program its size.
- */
- limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
- limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
- if (limit < IPA_RX_BUFFER_SIZE) {
- dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
- IPA_RX_BUFFER_SIZE, limit);
- return false;
- }
-
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -243,8 +301,10 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
- u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 field_id;
+ u32 offset;
bool state;
u32 mask;
u32 val;
@@ -254,9 +314,13 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
- mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
-
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
+
+ field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
+ mask = ipa_reg_bit(reg, field_id);
+
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
@@ -283,13 +347,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
WARN_ON(!(mask & ipa->available));
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
return !!(val & mask);
}
@@ -298,10 +362,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -317,7 +383,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- if (!endpoint->data->aggregation)
+ if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
@@ -386,12 +452,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
struct gsi_trans *trans;
u32 count;
- /* We need one command per modem TX endpoint. We can get an upper
- * bound on that by assuming all initialized endpoints are modem->IPA.
- * That won't happen, and we could be more precise, but this is fine
- * for now. End the transaction with commands to clear the pipeline.
+ /* We need one command per modem TX endpoint, plus the commands
+ * that clear the pipeline.
*/
- count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
+ count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -402,6 +466,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
+ const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
@@ -411,7 +476,8 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -422,7 +488,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_pipeline_clear_add(trans);
- /* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
ipa_cmd_pipeline_clear_wait(ipa);
@@ -432,22 +497,23 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
- if (endpoint->data->checksum) {
- enum ipa_version version = endpoint->ipa->version;
+ if (endpoint->config.checksum) {
+ enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
- u32 checksum_offset;
+ u32 off;
/* Checksum header offset is in 4-byte units */
- checksum_offset = sizeof(struct rmnet_map_header);
- checksum_offset /= sizeof(u32);
- val |= u32_encode_bits(checksum_offset,
- CS_METADATA_HDR_OFFSET_FMASK);
+ off = sizeof(struct rmnet_map_header) / sizeof(u32);
+ val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -460,24 +526,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
+ val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
- u32 offset;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
- val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
+ val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static u32
@@ -486,7 +554,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
- if (!endpoint->data->checksum)
+ if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
@@ -501,6 +569,50 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
return header_size;
}
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static u32 ipa_header_size_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 header_size)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(header_size > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(field_max);
+ WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
+ val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static u32 ipa_metadata_offset_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 offset)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(offset > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(field_max);
+ WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+
+ return val;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -524,36 +636,38 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- if (endpoint->data->qmap) {
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
+ if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
- val = ipa_header_size_encoded(version, header_size);
+ val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 offset; /* Field offset within header */
+ u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
- offset = offsetof(struct rmnet_map_header, mux_id);
- val |= ipa_metadata_offset_encoded(version, offset);
+ off = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
- offset = offsetof(struct rmnet_map_header, pkt_len);
+ off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+ off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
+ val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -561,228 +675,211 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
- u32 pad_align = endpoint->data->rx.pad_align;
+ u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
-
- /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
- * driver assumes this field is meaningful in packets it receives,
- * and assumes the header's payload length includes that padding.
- * The RMNet driver does *not* pad packets it sends, however, so
- * the pad field (although 0) should be ignored.
- */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
+ if (endpoint->config.qmap) {
+ /* We have a header, so we must specify its endianness */
+ val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+
+ /* A QMAP header contains a 6 bit pad field at offset 0.
+ * The RMNet driver assumes this field is meaningful in
+ * packets it receives, and assumes the header's payload
+ * length includes that padding. The RMNet driver does
+ * *not* pad packets it sends, however, so the pad field
+ * (although 0) should be ignored.
+ */
+ if (!endpoint->toward_ipa) {
+ val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ }
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+ val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- u32 offset;
-
- offset = offsetof(struct rmnet_map_header, pkt_len);
- offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
- val |= u32_encode_bits(offset,
- HDR_OFST_PKT_SIZE_MSB_FMASK);
+ if (endpoint->config.qmap && !endpoint->toward_ipa) {
+ u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 off; /* Field offset within header */
+
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Low bits are in the ENDP_INIT_HDR register */
+ off >>= hweight32(mask);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + offset);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (endpoint->data->qmap)
+ if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- if (endpoint->data->dma_mode) {
- enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
- u32 dma_endpoint_id;
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
+ if (endpoint->config.dma_mode) {
+ enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
+ u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
-
- val = u32_encode_bits(IPA_DMA, MODE_FMASK);
- val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
}
-/* Compute the aggregation size value to use for a given buffer size */
-static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
+/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
+ * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
+ * they're configured to have granularity 100 usec and 1 msec, respectively.
+ *
+ * The return value is the positive or negative Qtime value to use to
+ * express the (microsecond) time provided. A positive return value
+ * means pulse generator 0 can be used; otherwise use pulse generator 1.
+ */
+static int ipa_qtime_val(u32 microseconds, u32 max)
{
- /* We don't use "hard byte limit" aggregation, so we define the
- * aggregation limit such that our buffer has enough space *after*
- * that limit to receive a full MTU of data, plus overhead.
- */
- rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ u32 val;
- return rx_buffer_size / SZ_1K;
-}
+ /* Use 100 microsecond granularity if possible */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val <= max)
+ return (int)val;
-/* Encoded values for AGGR endpoint register fields */
-static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ WARN_ON(val > max);
- return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+ return (int)-val;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
- u32 gran_sel;
- u32 fmask;
+ u32 max;
u32 val;
- if (version < IPA_VERSION_4_5) {
- /* We set aggregation granularity in ipa_hardware_config() */
- limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ if (!microseconds)
+ return 0; /* Nothing to compute if time limit is 0 */
- return u32_encode_bits(limit, aggr_time_limit_fmask(true));
- }
+ max = ipa_reg_field_max(reg, TIME_LIMIT);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
- /* IPA v4.5 expresses the time limit using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- fmask = aggr_time_limit_fmask(false);
- val = DIV_ROUND_CLOSEST(limit, 100);
- if (val > field_max(fmask)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = AGGR_GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(limit, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
+ return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
}
- return gran_sel | u32_encode_bits(val, fmask);
-}
-
-static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
-{
- u32 val = enabled ? 1 : 0;
-
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
+ /* We program aggregation granularity in ipa_hardware_config() */
+ val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ microseconds, max * IPA_AGGR_GRANULARITY);
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+ return ipa_reg_encode(reg, TIME_LIMIT, val);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
- enum ipa_version version = endpoint->ipa->version;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- if (endpoint->data->aggregation) {
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+ if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
- bool close_eof;
+ const struct ipa_endpoint_rx *rx_config;
+ u32 buffer_size;
u32 limit;
- val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ rx_config = &endpoint->config.rx;
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
- val |= aggr_byte_limit_encoded(version, limit);
+ buffer_size = rx_config->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
- limit = IPA_AGGR_TIME_LIMIT;
- val |= aggr_time_limit_encoded(version, limit);
+ limit = rx_config->aggr_time_limit;
+ val |= aggr_time_limit_encode(ipa, reg, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = endpoint->data->rx.aggr_close_eof;
- val |= aggr_sw_eof_active_encoded(version, close_eof);
-
- /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
+ if (rx_config->aggr_close_eof)
+ val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
- AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
-}
-
-/* Return the Qtime-based head-of-line blocking timer value that
- * represents the given number of microseconds. The result
- * includes both the timer value and the selected timer granularity.
- */
-static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
-{
- u32 gran_sel;
- u32 val;
-
- /* IPA v4.5 expresses time limits using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val > field_max(TIME_LIMIT_FMASK)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
-
- return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -790,12 +887,11 @@ static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
* derived from the 19.2 MHz SoC XO clock. For older IPA versions
* each tick represents 128 cycles of the IPA core clock.
*
- * Return the encoded value that should be written to that register
- * that represents the timeout period provided. For IPA v4.2 this
- * encodes a base and scale value, while for earlier versions the
- * value is a simple tick count.
+ * Return the encoded value representing the timeout period provided
+ * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
u32 width;
u32 scale;
@@ -807,18 +903,34 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
- if (ipa->version >= IPA_VERSION_4_5)
- return hol_block_timer_qtime_val(ipa, microseconds);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
+
+ return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ }
- /* Use 64 bit arithmetic to avoid overflow... */
+ /* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
- /* ...but we still need to fit into a 32-bit register */
- WARN_ON(ticks > U32_MAX);
+
+ /* We still need the result to fit into the field */
+ WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return (u32)ticks;
+ return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -828,8 +940,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
* count, and extract the number of bits in the base field
* such that high bit is included.
*/
- high = fls(ticks); /* 1..32 */
- width = HWEIGHT32(BASE_VALUE_FMASK);
+ high = fls(ticks); /* 1..32 (or warning above) */
+ width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -839,8 +951,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
scale++;
}
- val = u32_encode_bits(scale, SCALE_FMASK);
- val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
+ val = ipa_reg_encode(reg, TIMER_SCALE, scale);
+ val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -851,28 +963,34 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = hol_block_timer_val(ipa, microseconds);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
+ val = hol_block_timer_encode(ipa, reg, microseconds);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- val = enable ? HOL_BLOCK_EN_FMASK : 0;
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
+ val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+
+ iowrite32(val, ipa->reg_virt + offset);
+
/* When enabling, the register must be written twice for IPA v4.5+ */
- if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */
@@ -905,46 +1023,58 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ u32 resource_group = endpoint->config.resource_group;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
+ val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
- u32 val = 0;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
+
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
+ val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
- /* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
- SEQ_REP_TYPE_FMASK);
+ /* Second byte (if supported) configures replicated packet processing */
+ if (ipa->version < IPA_VERSION_4_5)
+ val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/**
@@ -965,7 +1095,7 @@ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
* If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (nr_frags > endpoint->skb_frag_max) {
if (skb_linearize(skb))
return -E2BIG;
nr_frags = 0;
@@ -994,149 +1124,123 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
- if (endpoint->data->status_enable) {
- val |= STATUS_EN_FMASK;
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ if (endpoint->config.status_enable) {
+ val |= ipa_reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
- name = endpoint->data->tx.status_endpoint;
+ name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= u32_encode_bits(status_endpoint_id,
- STATUS_ENDP_FMASK);
+ val |= ipa_reg_encode(reg, STATUS_ENDP,
+ status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5)
+ * packet (not present for IPA v4.5+)
*/
- /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
-static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
- struct gsi_trans *trans;
- bool doorbell = false;
struct page *page;
+ u32 buffer_size;
u32 offset;
u32 len;
int ret;
- page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ buffer_size = endpoint->config.rx.buffer_size;
+ page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
- trans = ipa_endpoint_trans_alloc(endpoint, 1);
- if (!trans)
- goto err_free_pages;
-
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
- len = IPA_RX_BUFFER_SIZE - offset;
+ len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- goto err_trans_free;
- trans->data = page; /* transaction owns page now */
-
- if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
- doorbell = true;
- endpoint->replenish_ready = 0;
- }
-
- gsi_trans_commit(trans, doorbell);
-
- return 0;
-
-err_trans_free:
- gsi_trans_free(trans);
-err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
+ else
+ trans->data = page; /* transaction owns page now */
- return -ENOMEM;
+ return ret;
}
/**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @add_one: Whether this is replacing a just-consumed buffer
*
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
- * an endpoint can be disabled, in which case requests to replenish a
- * buffer are "saved", and transferred to the backlog once it is re-enabled
- * again.
+ * an endpoint can be disabled, in which case buffers are not queued to
+ * the hardware.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi;
- u32 backlog;
+ struct gsi_trans *trans;
- if (!endpoint->replenish_enabled) {
- if (add_one)
- atomic_inc(&endpoint->replenish_saved);
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
- }
- while (atomic_dec_not_zero(&endpoint->replenish_backlog))
- if (ipa_endpoint_replenish_one(endpoint))
+ /* Skip it if it's already active */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
+ return;
+
+ while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
+ bool doorbell;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later;
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+
+
+ /* Ring the doorbell if we've got a full batch */
+ doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
+ gsi_trans_commit(trans, doorbell);
+ }
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
return;
try_again_later:
- /* The last one didn't succeed, so fix the backlog */
- backlog = atomic_inc_return(&endpoint->replenish_backlog);
-
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ gsi_trans_free(trans);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
- * Receive buffer transactions use one TRE, so schedule work to
- * try replenishing again if our backlog is *all* available TREs.
+ * If the hardware has no receive buffers queued, schedule work to
+ * try replenishing again.
*/
- gsi = &endpoint->ipa->gsi;
- if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi = &endpoint->ipa->gsi;
- u32 max_backlog;
- u32 saved;
-
- endpoint->replenish_enabled = true;
- while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
- atomic_add(saved, &endpoint->replenish_backlog);
+ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
/* Start replenishing if hardware currently has no buffers */
- max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
- if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, false);
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
- u32 backlog;
-
- endpoint->replenish_enabled = false;
- while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
- atomic_add(backlog, &endpoint->replenish_saved);
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
}
static void ipa_endpoint_replenish_work(struct work_struct *work)
@@ -1146,7 +1250,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, false);
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1158,13 +1262,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
return;
skb = __dev_alloc_skb(len, GFP_ATOMIC);
- if (!skb)
- return;
-
- /* Copy the data into the socket buffer and receive it */
- skb_put(skb, len);
- memcpy(skb->data, data, len);
- skb->truesize += extra;
+ if (skb) {
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
ipa_modem_skb_rx(endpoint->netdev, skb);
}
@@ -1172,15 +1275,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
if (!endpoint->netdev)
return false;
- WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+ WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
- skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ skb = build_skb(page_address(page), buffer_size);
if (skb) {
/* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
@@ -1278,8 +1382,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
- u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 unused = buffer_size - total_len;
u32 resid = total_len;
while (resid) {
@@ -1307,10 +1412,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
- align = endpoint->data->rx.pad_align ? : 1;
+ align = endpoint->config.rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
len = sizeof(*status) + ALIGN(len, align);
- if (endpoint->data->checksum)
+ if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, status)) {
@@ -1337,38 +1442,25 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
}
}
-/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
-static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
-}
-
-/* Complete transaction initiated in ipa_endpoint_replenish_one() */
-static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
struct page *page;
- ipa_endpoint_replenish(endpoint, true);
+ if (endpoint->toward_ipa)
+ return;
if (trans->cancelled)
- return;
+ goto done;
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
- if (endpoint->data->status_enable)
+ if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
-}
-
-void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
- if (endpoint->toward_ipa)
- ipa_endpoint_tx_complete(endpoint, trans);
- else
- ipa_endpoint_rx_complete(endpoint, trans);
+done:
+ ipa_endpoint_replenish(endpoint);
}
void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
@@ -1388,22 +1480,24 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct page *page = trans->data;
if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
}
}
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_HDR_TABLE_FMASK;
- val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
- val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+ val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ /* ROUTE_DEF_HDR_OFST is 0 */
+ val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1519,7 +1613,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
- endpoint->data->aggregation;
+ endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
@@ -1553,8 +1647,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
- if (!endpoint->toward_ipa)
- ipa_endpoint_init_hol_block_disable(endpoint);
+ if (!endpoint->toward_ipa) {
+ if (endpoint->config.rx.holb_drop)
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
+ else
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ }
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
@@ -1686,15 +1784,13 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
if (endpoint->ee_id != GSI_EE_AP)
return;
- endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
if (!endpoint->toward_ipa) {
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
- endpoint->replenish_enabled = false;
- atomic_set(&endpoint->replenish_saved,
- gsi_channel_tre_max(gsi, endpoint->channel_id));
- atomic_set(&endpoint->replenish_backlog, 0);
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
@@ -1745,6 +1841,7 @@ void ipa_endpoint_teardown(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
u32 initialized;
u32 rx_base;
u32 rx_mask;
@@ -1771,11 +1868,12 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
+ rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1784,7 +1882,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
+ max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
@@ -1836,7 +1934,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
- endpoint->data = &data->endpoint.config;
+ endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
}
@@ -1870,6 +1968,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 filter_map;
+ BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
+
if (!ipa_endpoint_data_valid(ipa, count, data))
return 0; /* Error */
@@ -1884,6 +1984,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
if (data->endpoint.filter_support)
filter_map |= BIT(data->endpoint_id);
+ if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
+ ipa->modem_tx_count++;
}
if (!ipa_filter_map_valid(ipa, filter_map))
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0a859d10312d..d8dfa24f5214 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
@@ -41,20 +41,112 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
+ * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
+ * @seq_type: primary packet processing sequencer type
+ * @seq_rep_type: sequencer type for replication processing
+ * @status_endpoint: endpoint to which status elements are sent
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx {
+ enum ipa_seq_type seq_type;
+ enum ipa_seq_rep_type seq_rep_type;
+ enum ipa_endpoint_name status_endpoint;
+};
+
+/**
+ * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_time_limit: time before aggregation closes (microseconds)
+ * @aggr_hard_limit: whether aggregation closes before or after boundary
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ * @holb_drop: whether to drop packets to avoid head-of-line blocking
+ *
+ * The actual size of the receive buffer is rounded up if necessary
+ * to be a power-of-2 number of pages.
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met.
+ *
+ * A time limit can be specified to close aggregation. Aggregation will be
+ * closed if this period passes after data is first written into a receive
+ * buffer. If not specified, no time limit is imposed.
+ *
+ * Insufficient space available in the receive buffer can close aggregation.
+ * The aggregation byte limit defines the point (in units of 1024 bytes) in
+ * the buffer where aggregation closes. With a "soft" aggregation limit,
+ * aggregation closes when a packet written to the buffer *crosses* that
+ * aggregation limit. With a "hard" aggregation limit, aggregation will
+ * close *before* writing a packet that would cross that boundary.
+ */
+struct ipa_endpoint_rx {
+ u32 buffer_size;
+ u32 pad_align;
+ u32 aggr_time_limit;
+ bool aggr_hard_limit;
+ bool aggr_close_eof;
+ bool holb_drop;
+};
+
+/**
+ * struct ipa_endpoint_config - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config {
+ u32 resource_group;
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx tx;
+ struct ipa_endpoint_rx rx;
+ };
+};
+
+/**
+ * enum ipa_replenish_flag: RX buffer replenish flags
+ *
+ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
+ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
+ */
+enum ipa_replenish_flag {
+ IPA_REPLENISH_ENABLED,
+ IPA_REPLENISH_ACTIVE,
+ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
+};
+
+/**
* struct ipa_endpoint - IPA endpoint information
* @ipa: IPA pointer
* @ee_id: Execution environmnent endpoint is associated with
* @channel_id: GSI channel used by the endpoint
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
- * @data: Endpoint configuration data
- * @trans_tre_max: Maximum number of TRE descriptors per transaction
+ * @config: Default endpoint configuration
+ * @skb_frag_max: Maximum allowed number of TX SKB fragments
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
- * @replenish_enabled: Whether receive buffer replenishing is enabled
- * @replenish_ready: Number of replenish transactions without doorbell
- * @replenish_saved: Replenish requests held while disabled
- * @replenish_backlog: Number of buffers needed to fill hardware queue
+ * @replenish_flags: Replenishing state flags
+ * @replenish_count: Total number of replenish transactions committed
* @replenish_work: Work item used for repeated replenish failures
*/
struct ipa_endpoint {
@@ -63,19 +155,17 @@ struct ipa_endpoint {
u32 channel_id;
u32 endpoint_id;
bool toward_ipa;
- const struct ipa_endpoint_config_data *data;
+ struct ipa_endpoint_config config;
- u32 trans_tre_max;
+ u32 skb_frag_max; /* Used for netdev TX only */
u32 evt_ring_id;
/* Net device this endpoint is associated with, if any */
struct net_device *netdev;
/* Receive buffer replenishing for RX endpoints */
- bool replenish_enabled;
- u32 replenish_ready;
- atomic_t replenish_saved;
- atomic_t replenish_backlog;
+ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
+ u64 replenish_count;
struct delayed_work replenish_work; /* global wq */
};
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index b35170a93b0f..c269432f9c2e 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -53,13 +53,15 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
@@ -80,6 +82,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ const struct ipa_reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -95,7 +98,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_irq_stts_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = ipa_reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -112,7 +116,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
@@ -128,6 +133,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
@@ -137,7 +143,8 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
@@ -164,18 +171,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- offset = ipa_reg_irq_suspend_info_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
@@ -189,16 +196,18 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Remove the handler for an IPA interrupt type */
@@ -206,14 +215,16 @@ void
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
interrupt->handler[ipa_irq] = NULL;
}
@@ -223,8 +234,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
+ const struct ipa_reg *reg;
unsigned int irq;
- u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
@@ -242,8 +253,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 231390cea52a..f31fd9965fdc 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 3757ce3de2c5..49537fccf6ad 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -183,31 +183,97 @@ static void ipa_teardown(struct ipa *ipa)
gsi_teardown(&ipa->gsi);
}
+static void
+ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct ipa_reg *reg;
+ u32 val;
+
+ /* IPA v4.5+ has no backward compatibility register */
+ if (ipa->version >= IPA_VERSION_4_5)
+ return;
+
+ reg = ipa_reg(ipa, IPA_BCR);
+ val = data->backward_compat;
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_tx(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
+ return;
+
+ /* Disable PA mask to allow HOLB drop */
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = ipa_reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_hardware_config_clkon(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 val;
+
+ if (version >= IPA_VERSION_4_5)
+ return;
+
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
+
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1) {
+ /* Disable MISC clock gating */
+ val = ipa_reg_bit(reg, CLKON_MISC);
+ } else { /* IPA v4.0+ */
+ /* Enable open global clocks in the CLKON configuration */
+ val = ipa_reg_bit(reg, CLKON_GLOBAL);
+ val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ }
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
- val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
- val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
- val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
- /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
+ /* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
- val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
- iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
@@ -216,6 +282,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
+ const struct ipa_reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -224,25 +291,31 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
- val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= u32_encode_bits(data1->max_writes,
- GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
+ data1->max_writes);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
- val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data0->max_reads_beats,
- GEN_QMB_0_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val |= u32_encode_bits(data1->max_reads,
- GEN_QMB_1_MAX_READS_FMASK);
+ val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
+ data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data1->max_reads_beats,
- GEN_QMB_1_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -278,48 +351,99 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+ val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
- val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
+ val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
- val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+
+ iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ val |= ipa_reg_bit(reg, DIV_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Before IPA v4.5 timing is controlled by a counter register */
+static void ipa_hardware_config_counter(struct ipa *ipa)
+{
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct ipa_reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, COUNTER_CFG);
+ /* If defined, EOT_COAL_GRANULARITY is 0 */
+ val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_timing(struct ipa *ipa)
+{
+ if (ipa->version < IPA_VERSION_4_5)
+ ipa_hardware_config_counter(ipa);
+ else
+ ipa_qtime_config(ipa);
+}
+
+static void ipa_hardware_config_hashing(struct ipa *ipa)
+{
+ const struct ipa_reg *reg;
+
+ if (ipa->version != IPA_VERSION_4_2)
+ return;
+
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+
+ /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
+ * IPV4_FILTER_HASH are all zero.
+ */
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- val = u32_encode_bits(enter_idle_debounce_thresh,
- ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ if (ipa->version < IPA_VERSION_3_5_1)
+ return;
+
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
+ val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= CONST_NON_IDLE_ENABLE_FMASK;
+ val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
- offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -349,55 +473,13 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
- enum ipa_version version = ipa->version;
- u32 granularity;
- u32 val;
-
- /* IPA v4.5+ has no backward compatibility register */
- if (version < IPA_VERSION_4_5) {
- val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
- }
-
- /* Implement some hardware workarounds */
- if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Disable PA mask to allow HOLB drop */
- val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
-
- /* Enable open global clocks in the CLKON configuration */
- val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
- } else if (version == IPA_VERSION_3_1) {
- val = MISC_FMASK; /* Disable MISC clock gating */
- } else {
- val = 0; /* No CLKON configuration needed */
- }
- if (val)
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
+ ipa_hardware_config_bcr(ipa, data);
+ ipa_hardware_config_tx(ipa);
+ ipa_hardware_config_clkon(ipa);
ipa_hardware_config_comp(ipa);
-
- /* Configure system bus limits */
ipa_hardware_config_qsb(ipa, data);
-
- if (version < IPA_VERSION_4_5) {
- /* Configure aggregation timer granularity */
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- } else {
- ipa_qtime_config(ipa);
- }
-
- /* IPA v4.2 does not support hashed tables, so disable them */
- if (version == IPA_VERSION_4_2) {
- u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
-
- iowrite32(0, ipa->reg_virt + offset);
- }
-
- /* Enable dynamic clock division */
+ ipa_hardware_config_timing(ipa);
+ ipa_hardware_config_hashing(ipa);
ipa_hardware_dcd_config(ipa);
}
@@ -612,29 +694,6 @@ static void ipa_validate_build(void)
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
- BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY_FMASK));
-}
-
-static bool ipa_version_valid(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_0:
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- return true;
-
- default:
- return false;
- }
}
/**
@@ -678,8 +737,8 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_valid(data->version)) {
- dev_err(dev, "invalid IPA version\n");
+ if (!ipa_version_supported(data->version)) {
+ dev_err(dev, "unsupported IPA version %u\n", data->version);
return -EINVAL;
}
@@ -836,6 +895,8 @@ out_power_put:
kfree(ipa);
ipa_power_exit(power);
+ dev_info(dev, "IPA driver removed");
+
return 0;
}
@@ -851,6 +912,7 @@ static void ipa_shutdown(struct platform_device *pdev)
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
+ &ipa_endpoint_id_attribute_group,
&ipa_modem_attribute_group,
NULL,
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..f84c6830495a 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -112,8 +113,10 @@ int ipa_mem_setup(struct ipa *ipa)
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
- val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
+
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
return 0;
}
@@ -306,6 +309,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
@@ -314,12 +318,14 @@ int ipa_mem_config(struct ipa *ipa)
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+ mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
@@ -568,7 +574,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 27d87097433f..423422a2a445 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/errno.h>
@@ -9,6 +9,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
+#include <linux/etherdevice.h>
+#include <net/pkt_sched.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
@@ -127,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
goto err_drop_skb;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
- if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
/* The hardware must be powered for us to transmit */
@@ -203,15 +205,20 @@ static const struct net_device_ops ipa_modem_ops = {
static void ipa_modem_netdev_setup(struct net_device *netdev)
{
netdev->netdev_ops = &ipa_modem_ops;
- ether_setup(netdev);
- /* No header ops (override value set by ether_setup()) */
+
netdev->header_ops = NULL;
netdev->type = ARPHRD_RAWIP;
netdev->hard_header_len = 0;
+ netdev->min_header_len = ETH_HLEN;
+ netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IPA_MTU;
netdev->mtu = netdev->max_mtu;
netdev->addr_len = 0;
+ netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ netdev->priv_flags |= IFF_TX_SKB_SHARING;
+ eth_broadcast_addr(netdev->broadcast);
+
/* The endpoint is configured for QMAP */
netdev->needed_headroom = sizeof(struct rmnet_map_header);
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index e64ccc2402e9..d85718db9a57 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index b1c6c0fcb654..8420f93128a2 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/clk.h>
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include "linux/soc/qcom/qcom_aoss.h"
+
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@@ -33,18 +35,6 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * struct ipa_interconnect - IPA interconnect information
- * @path: Interconnect path
- * @average_bandwidth: Average interconnect bandwidth (KB/second)
- * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
- */
-struct ipa_interconnect {
- struct icc_path *path;
- u32 average_bandwidth;
- u32 peak_bandwidth;
-};
-
-/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
@@ -64,6 +54,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@@ -72,167 +63,82 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
+ struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
- struct ipa_interconnect *interconnect;
+ struct icc_bulk_data interconnect[];
};
-static int ipa_interconnect_init_one(struct device *dev,
- struct ipa_interconnect *interconnect,
- const struct ipa_interconnect_data *data)
-{
- struct icc_path *path;
-
- path = of_icc_get(dev, data->name);
- if (IS_ERR(path)) {
- int ret = PTR_ERR(path);
-
- dev_err_probe(dev, ret, "error getting %s interconnect\n",
- data->name);
-
- return ret;
- }
-
- interconnect->path = path;
- interconnect->average_bandwidth = data->average_bandwidth;
- interconnect->peak_bandwidth = data->peak_bandwidth;
-
- return 0;
-}
-
-static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
-{
- icc_put(interconnect->path);
- memset(interconnect, 0, sizeof(*interconnect));
-}
-
/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
+static int ipa_interconnect_init(struct ipa_power *power,
const struct ipa_interconnect_data *data)
{
- struct ipa_interconnect *interconnect;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
- if (!interconnect)
- return -ENOMEM;
- power->interconnect = interconnect;
-
- while (count--) {
- ret = ipa_interconnect_init_one(dev, interconnect, data++);
- if (ret)
- goto out_unwind;
- interconnect++;
- }
-
- return 0;
-
-out_unwind:
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-
- return ret;
-}
-
-/* Inverse of ipa_interconnect_init() */
-static void ipa_interconnect_exit(struct ipa_power *power)
-{
- struct ipa_interconnect *interconnect;
-
- interconnect = power->interconnect + power->interconnect_count;
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-}
-
-/* Currently we only use one bandwidth level, so just "enable" interconnects */
-static int ipa_interconnect_enable(struct ipa *ipa)
-{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
+ struct icc_bulk_data *interconnect;
int ret;
u32 i;
- interconnect = power->interconnect;
+ /* Initialize our interconnect data array for bulk operations */
+ interconnect = &power->interconnect[0];
for (i = 0; i < power->interconnect_count; i++) {
- ret = icc_set_bw(interconnect->path,
- interconnect->average_bandwidth,
- interconnect->peak_bandwidth);
- if (ret) {
- dev_err(&ipa->pdev->dev,
- "error %d enabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- goto out_unwind;
- }
+ /* interconnect->path is filled in by of_icc_bulk_get() */
+ interconnect->name = data->name;
+ interconnect->avg_bw = data->average_bandwidth;
+ interconnect->peak_bw = data->peak_bandwidth;
+ data++;
interconnect++;
}
- return 0;
+ ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+ power->interconnect);
+ if (ret)
+ return ret;
-out_unwind:
- while (interconnect-- > power->interconnect)
- (void)icc_set_bw(interconnect->path, 0, 0);
+ /* All interconnects are initially disabled */
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+ /* Set the bandwidth values to be used when enabled */
+ ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+ if (ret)
+ icc_bulk_put(power->interconnect_count, power->interconnect);
return ret;
}
-/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
- struct device *dev = &ipa->pdev->dev;
- int result = 0;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = power->interconnect + count;
- while (count--) {
- interconnect--;
- ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret) {
- dev_err(dev, "error %d disabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- /* Try to disable all; record only the first error */
- if (!result)
- result = ret;
- }
- }
-
- return result;
+ icc_bulk_put(power->interconnect_count, power->interconnect);
}
/* Enable IPA power, enabling interconnects and the core clock */
static int ipa_power_enable(struct ipa *ipa)
{
+ struct ipa_power *power = ipa->power;
int ret;
- ret = ipa_interconnect_enable(ipa);
+ ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
if (ret)
return ret;
- ret = clk_prepare_enable(ipa->power->core);
+ ret = clk_prepare_enable(power->core);
if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
- (void)ipa_interconnect_disable(ipa);
+ dev_err(power->dev, "error %d enabling core clock\n", ret);
+ icc_bulk_disable(power->interconnect_count,
+ power->interconnect);
}
return ret;
}
/* Inverse of ipa_power_enable() */
-static int ipa_power_disable(struct ipa *ipa)
+static void ipa_power_disable(struct ipa *ipa)
{
- clk_disable_unprepare(ipa->power->core);
+ struct ipa_power *power = ipa->power;
- return ipa_interconnect_disable(ipa);
+ clk_disable_unprepare(power->core);
+
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
}
static int ipa_runtime_suspend(struct device *dev)
@@ -246,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev)
gsi_suspend(&ipa->gsi);
}
- return ipa_power_disable(ipa);
+ ipa_power_disable(ipa);
+
+ return 0;
}
static int ipa_runtime_resume(struct device *dev)
@@ -382,6 +290,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
+
+ ret = qmp_send(power->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@@ -408,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
{
struct ipa_power *power;
struct clk *clk;
+ size_t size;
int ret;
clk = clk_get(dev, "core");
@@ -424,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
goto err_clk_put;
}
- power = kzalloc(sizeof(*power), GFP_KERNEL);
+ size = struct_size(power, interconnect, data->interconnect_count);
+ power = kzalloc(size, GFP_KERNEL);
if (!power) {
ret = -ENOMEM;
goto err_clk_put;
@@ -434,16 +385,22 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(power, dev, data->interconnect_data);
+ ret = ipa_interconnect_init(power, data->interconnect_data);
if (ret)
goto err_kfree;
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@@ -460,6 +417,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 2151805d7fbb..896f052e51a1 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
@@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 90f3aec55b36..8295fd4b70d1 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
*/
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{
- struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct ipa *ipa;
int ret;
/* We aren't ready until the modem and microcontroller are */
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 856ef629ccc8..1c236826c17a 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..97c0befe8d86 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/stddef.h>
#include <linux/soc/qcom/qmi.h>
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 3233d145fd87..e29663965f43 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
@@ -214,7 +219,7 @@ struct ipa_init_modem_driver_req {
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
* QMI response, but contains other information as well. Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
* ignore any other data that might be returned.
*/
struct ipa_init_modem_driver_rsp {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index e6147a1cd787..22f067741d9b 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/io.h>
@@ -9,11 +9,105 @@
#include "ipa.h"
#include "ipa_reg.h"
+/* Is this register valid and defined for the current IPA version? */
+static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ enum ipa_version version = ipa->version;
+ bool valid;
+
+ /* Check for bogus (out of range) register IDs */
+ if ((u32)reg_id >= ipa->regs->reg_count)
+ return false;
+
+ switch (reg_id) {
+ case IPA_BCR:
+ case COUNTER_CFG:
+ valid = version < IPA_VERSION_4_5;
+ break;
+
+ case IPA_TX_CFG:
+ case FLAVOR_0:
+ case IDLE_INDICATION_CFG:
+ valid = version >= IPA_VERSION_3_5;
+ break;
+
+ case QTIME_TIMESTAMP_CFG:
+ case TIMERS_XO_CLK_DIV_CFG:
+ case TIMERS_PULSE_GRAN_CFG:
+ valid = version >= IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1;
+ break;
+
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ valid = version != IPA_VERSION_4_2;
+ break;
+
+ case IRQ_SUSPEND_EN:
+ case IRQ_SUSPEND_CLR:
+ valid = version >= IPA_VERSION_3_1;
+ break;
+
+ default:
+ valid = true; /* Others should be defined for all versions */
+ break;
+ }
+
+ /* To be valid, it must be defined */
+
+ return valid && ipa->regs->reg[reg_id];
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ return NULL;
+
+ return ipa->regs->reg[reg_id];
+}
+
+static const struct ipa_regs *ipa_regs(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ return &ipa_regs_v3_1;
+ case IPA_VERSION_3_5_1:
+ return &ipa_regs_v3_5_1;
+ case IPA_VERSION_4_2:
+ return &ipa_regs_v4_2;
+ case IPA_VERSION_4_5:
+ return &ipa_regs_v4_5;
+ case IPA_VERSION_4_9:
+ return &ipa_regs_v4_9;
+ case IPA_VERSION_4_11:
+ return &ipa_regs_v4_11;
+ default:
+ return NULL;
+ }
+}
+
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_regs *regs;
struct resource *res;
+ regs = ipa_regs(ipa->version);
+ if (!regs)
+ return -EINVAL;
+
+ if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
+ return -EINVAL;
+
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
@@ -28,6 +122,7 @@ int ipa_reg_init(struct ipa *ipa)
return -ENOMEM;
}
ipa->reg_addr = res->start;
+ ipa->regs = regs;
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..7bf70f70f63f 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
#include <linux/bitfield.h>
+#include <linux/bug.h>
#include "ipa_version.h"
@@ -16,304 +17,325 @@ struct ipa;
* DOC: IPA Registers
*
* IPA registers are located within the "ipa-reg" address space defined by
- * Device Tree. The offset of each register within that space is specified
- * by symbols defined below. The address space is mapped to virtual memory
- * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ * Device Tree. Each register has a specified offset within that space,
+ * which is mapped into virtual memory space in ipa_mem_init(). Each
+ * has a unique identifer, taken from the ipa_reg_id enumerated type.
+ * All IPA registers are 32 bits wide.
*
- * Certain register types are duplicated for a number of instances of
- * something. For example, each IPA endpoint has an set of registers
- * defining its configuration. The offset to an endpoint's set of registers
- * is computed based on an "base" offset, plus an endpoint's ID multiplied
- * and a "stride" value for the register. For such registers, the offset is
- * computed by a function-like macro that takes a parameter used in the
- * computation.
+ * Certain "parameterized" register types are duplicated for a number of
+ * instances of something. For example, each IPA endpoint has an set of
+ * registers defining its configuration. The offset to an endpoint's set
+ * of registers is computed based on an "base" offset, plus an endpoint's
+ * ID multiplied and a "stride" value for the register. Similarly, some
+ * registers have an offset that depends on execution environment. In
+ * this case, the stride is multiplied by a member of the gsi_ee_id
+ * enumerated type.
*
- * Some register offsets depend on execution environment. For these an "ee"
- * parameter is supplied to the offset macro. The "ee" value is a member of
- * the gsi_ee enumerated type.
+ * Each version of IPA implements an array of ipa_reg structures indexed
+ * by register ID. Each entry in the array specifies the base offset and
+ * (for parameterized registers) a non-zero stride value. Not all versions
+ * of IPA define all registers. The offset for a register is returned by
+ * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * zero is returned for an undefined register (this should never happen).
*
- * The offset of a register dependent on endpoint ID is computed by a macro
- * that is supplied a parameter "ep", "txep", or "rxep". A register with an
- * "ep" parameter is valid for any endpoint; a register with a "txep" or
- * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
- * "*ep" value is assumed to be less than the maximum valid endpoint ID
- * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
- *
- * The offset of registers related to filter and route tables is computed
- * by a macro that is supplied a parameter "er". The "er" represents an
- * endpoint ID for filters, or a route ID for routes. For filters, the
- * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
- * because not all endpoints support filtering. For routes, the route ID
- * must be less than IPA_ROUTE_MAX.
- *
- * The offset of registers related to resource types is computed by a macro
- * that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
- * source endpoint resources or the ipa_resource_type_dst enumerated type
- * for destination endpoint resources.
- *
- * Some registers encode multiple fields within them. For these, each field
- * has a symbol below defining a field mask that encodes both the position
- * and width of the field within its register.
- *
- * In some cases, different versions of IPA hardware use different offset or
- * field mask values. In such cases an inline_function(ipa) is used rather
- * than a MACRO to define the offset or field mask to use.
- *
- * Finally, some registers hold bitmasks representing endpoints. In such
- * cases the @available field in the @ipa structure defines the "full" set
- * of valid bits for the register.
+ * Some registers encode multiple fields within them. Each field in
+ * such a register has a unique identifier (from an enumerated type).
+ * The position and width of the fields in a register are defined by
+ * an array of field masks, indexed by field ID. Two functions are
+ * used to access register fields; both take an ipa_reg structure as
+ * argument. To encode a value to be represented in a register field,
+ * the value and field ID are passed to ipa_reg_encode(). To extract
+ * a value encoded in a register field, the field ID is passed to
+ * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * can be used to either encode the bit value, or to generate a mask
+ * used to extract the bit value.
*/
-#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
-/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */
-#define ENABLE_FMASK GENMASK(0, 0)
-/* The next field is present for IPA v4.7+ */
-#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0)
-#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
-#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
-#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
-/* The next field is not present for IPA v4.5+ */
-#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
-/* The next twelve fields are present for IPA v4.0+ */
-#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
-#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
-#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
-#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
-#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
-#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
-#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
-#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
-#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
-#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
-#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
-#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
-/* The next five fields are present for IPA v4.9+ */
-#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19)
-#define GENQMB_AOOOWR_FMASK GENMASK(20, 20)
-#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21)
-#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30)
-#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31)
-
-/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */
-static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
- u32 mask)
-{
- WARN_ON(version < IPA_VERSION_4_0);
+/* enum ipa_reg_id - IPA register IDs */
+enum ipa_reg_id {
+ COMP_CFG,
+ CLKON_CFG,
+ ROUTE,
+ SHARED_MEM_SIZE,
+ QSB_MAX_WRITES,
+ QSB_MAX_READS,
+ FILT_ROUT_HASH_EN,
+ FILT_ROUT_HASH_FLUSH,
+ STATE_AGGR_ACTIVE,
+ IPA_BCR, /* Not IPA v4.5+ */
+ LOCAL_PKT_PROC_CNTXT,
+ AGGR_FORCE_CLOSE,
+ COUNTER_CFG, /* Not IPA v4.5+ */
+ IPA_TX_CFG, /* IPA v3.5+ */
+ FLAVOR_0, /* IPA v3.5+ */
+ IDLE_INDICATION_CFG, /* IPA v3.5+ */
+ QTIME_TIMESTAMP_CFG, /* IPA v4.5+ */
+ TIMERS_XO_CLK_DIV_CFG, /* IPA v4.5+ */
+ TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
+ SRC_RSRC_GRP_01_RSRC_TYPE,
+ SRC_RSRC_GRP_23_RSRC_TYPE,
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_01_RSRC_TYPE,
+ DST_RSRC_GRP_23_RSRC_TYPE,
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
+ ENDP_INIT_CFG,
+ ENDP_INIT_NAT, /* TX only */
+ ENDP_INIT_HDR,
+ ENDP_INIT_HDR_EXT,
+ ENDP_INIT_HDR_METADATA_MASK, /* RX only */
+ ENDP_INIT_MODE, /* TX only */
+ ENDP_INIT_AGGR,
+ ENDP_INIT_HOL_BLOCK_EN, /* RX only */
+ ENDP_INIT_HOL_BLOCK_TIMER, /* RX only */
+ ENDP_INIT_DEAGGR, /* TX only */
+ ENDP_INIT_RSRC_GRP,
+ ENDP_INIT_SEQ, /* TX only */
+ ENDP_STATUS,
+ ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
+ /* The IRQ registers are only used for GSI_EE_AP */
+ IPA_IRQ_STTS,
+ IPA_IRQ_EN,
+ IPA_IRQ_CLR,
+ IPA_IRQ_UC,
+ IRQ_SUSPEND_INFO,
+ IRQ_SUSPEND_EN, /* IPA v3.1+ */
+ IRQ_SUSPEND_CLR, /* IPA v3.1+ */
+ IPA_REG_ID_COUNT, /* Last; not an ID */
+};
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(20, 17));
+/**
+ * struct ipa_reg - An IPA register descriptor
+ * @offset: Register offset relative to base of the "ipa-reg" memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the IPA register
+ */
+struct ipa_reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
- if (version == IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(24, 22));
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define IPA_REG(__NAME, __reg_id, __offset) \
+ IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
- return u32_encode_bits(mask, GENMASK(23, 22));
-}
+/* Helper macro for defining parameterized registers, specifying stride */
+#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
-/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */
-static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
- bool enable)
-{
- u32 val = enable ? 1 : 0;
+#define IPA_REG_FIELDS(__NAME, __name, __offset) \
+ IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
- WARN_ON(version < IPA_VERSION_4_5);
+#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
+ .fmask = ipa_reg_ ## __name ## _fmask, \
+ }
- if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
- return u32_encode_bits(val, GENMASK(21, 21));
+/**
+ * struct ipa_regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct ipa_regs {
+ u32 reg_count;
+ const struct ipa_reg **reg;
+};
- return u32_encode_bits(val, GENMASK(17, 17));
-}
+/* COMP_CFG register */
+enum ipa_reg_comp_cfg_field_id {
+ COMP_CFG_ENABLE, /* Not IPA v4.0+ */
+ RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS, /* IPA v4.7+ */
+ GSI_SNOC_BYPASS_DIS,
+ GEN_QMB_0_SNOC_BYPASS_DIS,
+ GEN_QMB_1_SNOC_BYPASS_DIS,
+ IPA_DCMP_FAST_CLK_EN, /* Not IPA v4.5+ */
+ IPA_QMB_SELECT_CONS_EN, /* IPA v4.0+ */
+ IPA_QMB_SELECT_PROD_EN, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS, /* IPA v4.0+ */
+ GSI_SNOC_CNOC_LOOP_PROT_DISABLE, /* IPA v4.0+ */
+ GSI_MULTI_AXI_MASTERS_DIS, /* IPA v4.0+ */
+ IPA_QMB_SELECT_GLOBAL_EN, /* IPA v4.0+ */
+ QMB_RAM_RD_CACHE_DISABLE, /* IPA v4.9+ */
+ GENQMB_AOOOWR, /* IPA v4.9+ */
+ IF_OUT_OF_BUF_STOP_RESET_MASK_EN, /* IPA v4.9+ */
+ GEN_QMB_1_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ GEN_QMB_0_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ ATOMIC_FETCHER_ARB_LOCK_DIS, /* IPA v4.0+ */
+ FULL_FLUSH_WAIT_RS_CLOSURE_EN, /* IPA v4.5+ */
+};
-#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
-#define RX_FMASK GENMASK(0, 0)
-#define PROC_FMASK GENMASK(1, 1)
-#define TX_WRAPPER_FMASK GENMASK(2, 2)
-#define MISC_FMASK GENMASK(3, 3)
-#define RAM_ARB_FMASK GENMASK(4, 4)
-#define FTCH_HPS_FMASK GENMASK(5, 5)
-#define FTCH_DPS_FMASK GENMASK(6, 6)
-#define HPS_FMASK GENMASK(7, 7)
-#define DPS_FMASK GENMASK(8, 8)
-#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
-#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
-#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
-#define RSRC_MNGR_FMASK GENMASK(12, 12)
-#define CTX_HANDLER_FMASK GENMASK(13, 13)
-#define ACK_MNGR_FMASK GENMASK(14, 14)
-#define D_DCPH_FMASK GENMASK(15, 15)
-#define H_DCPH_FMASK GENMASK(16, 16)
-/* The next field is not present for IPA v4.5+ */
-#define DCMP_FMASK GENMASK(17, 17)
-/* The next three fields are present for IPA v3.5+ */
-#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
-#define TX_0_FMASK GENMASK(19, 19)
-#define TX_1_FMASK GENMASK(20, 20)
-/* The next field is present for IPA v3.5.1+ */
-#define FNR_FMASK GENMASK(21, 21)
-/* The next eight fields are present for IPA v4.0+ */
-#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
-#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
-#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
-#define QMB_FMASK GENMASK(25, 25)
-#define WEIGHT_ARB_FMASK GENMASK(26, 26)
-#define GSI_IF_FMASK GENMASK(27, 27)
-#define GLOBAL_FMASK GENMASK(28, 28)
-#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
-/* The next field is present for IPA v4.5+ */
-#define DPL_FIFO_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.7+ */
-#define DRBIP_FMASK GENMASK(31, 31)
-
-#define IPA_REG_ROUTE_OFFSET 0x00000048
-#define ROUTE_DIS_FMASK GENMASK(0, 0)
-#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
-#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
-#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
-#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
-#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
-
-#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
-#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
-#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
-
-#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
-#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
-
-#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
-#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0+ */
-#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
-#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-
-static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x000008c;
+/* CLKON_CFG register */
+enum ipa_reg_clkon_cfg_field_id {
+ CLKON_RX,
+ CLKON_PROC,
+ TX_WRAPPER,
+ CLKON_MISC,
+ RAM_ARB,
+ FTCH_HPS,
+ FTCH_DPS,
+ CLKON_HPS,
+ CLKON_DPS,
+ RX_HPS_CMDQS,
+ HPS_DPS_CMDQS,
+ DPS_TX_CMDQS,
+ RSRC_MNGR,
+ CTX_HANDLER,
+ ACK_MNGR,
+ D_DCPH,
+ H_DCPH,
+ CLKON_DCMP, /* IPA v4.5+ */
+ NTF_TX_CMDQS, /* IPA v3.5+ */
+ CLKON_TX_0, /* IPA v3.5+ */
+ CLKON_TX_1, /* IPA v3.5+ */
+ CLKON_FNR, /* IPA v3.5.1+ */
+ QSB2AXI_CMDQ_L, /* IPA v4.0+ */
+ AGGR_WRAPPER, /* IPA v4.0+ */
+ RAM_SLAVEWAY, /* IPA v4.0+ */
+ CLKON_QMB, /* IPA v4.0+ */
+ WEIGHT_ARB, /* IPA v4.0+ */
+ GSI_IF, /* IPA v4.0+ */
+ CLKON_GLOBAL, /* IPA v4.0+ */
+ GLOBAL_2X_CLK, /* IPA v4.0+ */
+ DPL_FIFO, /* IPA v4.5+ */
+ DRBIP, /* IPA v4.7+ */
+};
- return 0x0000148;
-}
+/* ROUTE register */
+enum ipa_reg_route_field_id {
+ ROUTE_DIS,
+ ROUTE_DEF_PIPE,
+ ROUTE_DEF_HDR_TABLE,
+ ROUTE_DEF_HDR_OFST,
+ ROUTE_FRAG_DEF_PIPE,
+ ROUTE_DEF_RETAIN_HDR,
+};
-static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000090;
+/* SHARED_MEM_SIZE register */
+enum ipa_reg_shared_mem_size_field_id {
+ MEM_SIZE,
+ MEM_BADDR,
+};
- return 0x000014c;
-}
+/* QSB_MAX_WRITES register */
+enum ipa_reg_qsb_max_writes_field_id {
+ GEN_QMB_0_MAX_WRITES,
+ GEN_QMB_1_MAX_WRITES,
+};
-/* The next four fields are used for the hash enable and flush registers */
-#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+/* QSB_MAX_READS register */
+enum ipa_reg_qsb_max_reads_field_id {
+ GEN_QMB_0_MAX_READS,
+ GEN_QMB_1_MAX_READS,
+ GEN_QMB_0_MAX_READS_BEATS, /* IPA v4.0+ */
+ GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
+};
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000010c;
+/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
+enum ipa_reg_rout_hash_field_id {
+ IPV6_ROUTER_HASH,
+ IPV6_FILTER_HASH,
+ IPV4_ROUTER_HASH,
+ IPV4_FILTER_HASH,
+};
- return 0x000000b4;
-}
+/* BCR register */
+enum ipa_bcr_compat {
+ BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
+ BCR_TX_NOT_USING_BRESP = 0x1, /* Not IPA v4.2+ */
+ BCR_TX_SUSPEND_IRQ_ASSERT_ONCE = 0x2, /* Not IPA v4.0+ */
+ BCR_SUSPEND_L2_IRQ = 0x3, /* Not IPA v4.2+ */
+ BCR_HOLB_DROP_L2_IRQ = 0x4, /* Not IPA v4.2+ */
+ BCR_DUAL_TX = 0x5, /* IPA v3.5+ */
+ BCR_ENABLE_FILTER_DATA_CACHE = 0x6, /* IPA v3.5+ */
+ BCR_NOTIF_PRIORITY_OVER_ZLT = 0x7, /* IPA v3.5+ */
+ BCR_FILTER_PREFETCH_EN = 0x8, /* IPA v3.5+ */
+ BCR_ROUTER_PREFETCH_EN = 0x9, /* IPA v3.5+ */
+};
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_BCR_OFFSET 0x000001d0
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
-#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
-/* The next field is invalid for IPA v4.0+ */
-#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
-#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
-/* The next five fields are present for IPA v3.5+ */
-#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
-#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
-#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
-#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
-#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
-
-/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */
-#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8
-
-/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */
-static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version,
- u32 addr)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(addr, GENMASK(16, 0));
+/* LOCAL_PKT_PROC_CNTXT register */
+enum ipa_reg_local_pkt_proc_cntxt_field_id {
+ IPA_BASE_ADDR,
+};
- return u32_encode_bits(addr, GENMASK(17, 0));
-}
+/* COUNTER_CFG register */
+enum ipa_reg_counter_cfg_field_id {
+ EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ AGGR_GRANULARITY,
+};
-/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
-
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-/* The next field is not present for IPA v3.5+ */
-#define EOT_COAL_GRANULARITY GENMASK(3, 0)
-#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_TX_CFG_OFFSET 0x000001fc
-/* The next three fields are not present for IPA v4.0+ */
-#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
-/* The next six fields are present for IPA v4.0+ */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
-#define PA_MASK_EN_FMASK GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
-/* The next field is present for IPA v4.5+ */
-#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
-/* The next field is present for IPA v4.2+, but not IPA v4.5 */
-#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
-/* The next field is present for IPA v4.2 only */
-#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
-#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
-
-/* The next register is present for IPA v3.5+ */
-static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
-{
- if (version >= IPA_VERSION_4_2)
- return 0x00000240;
+/* IPA_TX_CFG register */
+enum ipa_reg_ipa_tx_cfg_field_id {
+ TX0_PREFETCH_DISABLE, /* Not v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
+ PA_MASK_EN, /* v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
+ DUAL_TX_ENABLE, /* v4.5+ */
+ SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+};
- return 0x00000220;
-}
+/* FLAVOR_0 register */
+enum ipa_reg_flavor_0_field_id {
+ MAX_PIPES,
+ MAX_CONS_PIPES,
+ MAX_PROD_PIPES,
+ PROD_LOWEST,
+};
+
+/* IDLE_INDICATION_CFG register */
+enum ipa_reg_idle_indication_cfg_field_id {
+ ENTER_IDLE_DEBOUNCE_THRESH,
+ CONST_NON_IDLE_ENABLE,
+};
+
+/* QTIME_TIMESTAMP_CFG register */
+enum ipa_reg_qtime_timestamp_cfg_field_id {
+ DPL_TIMESTAMP_LSB,
+ DPL_TIMESTAMP_SEL,
+ TAG_TIMESTAMP_LSB,
+ NAT_TIMESTAMP_LSB,
+};
-#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
-#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
-#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
-#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
-#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
-#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
-#define DIV_VALUE_FMASK GENMASK(8, 0)
-#define DIV_ENABLE_FMASK GENMASK(31, 31)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
-#define GRAN_0_FMASK GENMASK(2, 0)
-#define GRAN_1_FMASK GENMASK(5, 3)
-#define GRAN_2_FMASK GENMASK(8, 6)
-/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+/* TIMERS_XO_CLK_DIV_CFG register */
+enum ipa_reg_timers_xo_clk_div_cfg_field_id {
+ DIV_VALUE,
+ DIV_ENABLE,
+};
+
+/* TIMERS_PULSE_GRAN_CFG register */
+enum ipa_reg_timers_pulse_gran_cfg_field_id {
+ PULSE_GRAN_0,
+ PULSE_GRAN_1,
+ PULSE_GRAN_2,
+};
+
+/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
enum ipa_pulse_gran {
IPA_GRAN_10_US = 0x0,
IPA_GRAN_20_US = 0x1,
@@ -325,267 +347,160 @@ enum ipa_pulse_gran {
IPA_GRAN_655350_US = 0x7,
};
-/* Not all of the following are present (depends on IPA version) */
-#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000400 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000404 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000408 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000040c + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000500 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000504 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000508 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000050c + 0x0020 * (rt))
-/* The next four fields are used for all resource group registers */
-#define X_MIN_LIM_FMASK GENMASK(5, 0)
-#define X_MAX_LIM_FMASK GENMASK(13, 8)
-/* The next two fields are not always present (if resource count is odd) */
-#define Y_MIN_LIM_FMASK GENMASK(21, 16)
-#define Y_MAX_LIM_FMASK GENMASK(29, 24)
-
-#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
- (0x00000800 + 0x0070 * (ep))
-/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */
-#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
-/* Valid only for TX (IPA consumer) endpoints */
-#define ENDP_DELAY_FMASK GENMASK(1, 1)
-
-#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
- (0x00000808 + 0x0070 * (ep))
-#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
-#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
-#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
-#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/* {SRC,DST}_RSRC_GRP_{01,23,45,67}_RSRC_TYPE registers */
+enum ipa_reg_rsrc_grp_rsrc_type_field_id {
+ X_MIN_LIM,
+ X_MAX_LIM,
+ Y_MIN_LIM,
+ Y_MAX_LIM,
+};
+
+/* ENDP_INIT_CTRL register */
+enum ipa_reg_endp_init_ctrl_field_id {
+ ENDP_SUSPEND, /* Not v4.0+ */
+ ENDP_DELAY, /* Not v4.2+ */
+};
+
+/* ENDP_INIT_CFG register */
+enum ipa_reg_endp_init_cfg_field_id {
+ FRAG_OFFLOAD_EN,
+ CS_OFFLOAD_EN,
+ CS_METADATA_HDR_OFFSET,
+ CS_GEN_QMB_MASTER_SEL,
+};
/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0x0,
- IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
- IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
- IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL /* TX */ = 0x1, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_DL /* RX */ = 0x2, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_INLINE /* TX and RX */ = 0x1, /* IPA v4.5+ */
};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \
- (0x0000080c + 0x0070 * (ep))
-#define NAT_EN_FMASK GENMASK(1, 0)
+/* ENDP_INIT_NAT register */
+enum ipa_reg_endp_init_nat_field_id {
+ NAT_EN,
+};
/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
-};
-
-#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
- (0x00000810 + 0x0070 * (ep))
-#define HDR_LEN_FMASK GENMASK(5, 0)
-#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
-#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
-#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
-#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
-#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
-/* The next field is not present for IPA v4.9+ */
-#define HDR_A5_MUX_FMASK GENMASK(26, 26)
-#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
-/* The next two fields are present for IPA v4.5+ */
-#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
-#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
-
-/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
-static inline u32 ipa_header_size_encoded(enum ipa_version version,
- u32 header_size)
-{
- u32 size = header_size & field_mask(HDR_LEN_FMASK);
- u32 val;
-
- val = u32_encode_bits(size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(header_size != size);
- return val;
- }
-
- /* IPA v4.5 adds a few more most-significant bits */
- size = header_size >> hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
-
- return val;
-}
-
-/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
-static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
- u32 offset)
-{
- u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
- u32 val;
-
- val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(offset != off);
- return val;
- }
+ IPA_NAT_BYPASS = 0x0,
+ IPA_NAT_SRC = 0x1,
+ IPA_NAT_DST = 0x2,
+};
- /* IPA v4.5 adds a few more most-significant bits */
- off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
+/* ENDP_INIT_HDR register */
+enum ipa_reg_endp_init_hdr_field_id {
+ HDR_LEN,
+ HDR_OFST_METADATA_VALID,
+ HDR_OFST_METADATA,
+ HDR_ADDITIONAL_CONST_LEN,
+ HDR_OFST_PKT_SIZE_VALID,
+ HDR_OFST_PKT_SIZE,
+ HDR_A5_MUX, /* Not v4.9+ */
+ HDR_LEN_INC_DEAGG_HDR,
+ HDR_METADATA_REG_VALID, /* Not v4.5+ */
+ HDR_LEN_MSB, /* v4.5+ */
+ HDR_OFST_METADATA_MSB, /* v4.5+ */
+};
- return val;
-}
+/* ENDP_INIT_HDR_EXT register */
+enum ipa_reg_endp_init_hdr_ext_field_id {
+ HDR_ENDIANNESS,
+ HDR_TOTAL_LEN_OR_PAD_VALID,
+ HDR_TOTAL_LEN_OR_PAD,
+ HDR_PAYLOAD_LEN_INC_PADDING,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET,
+ HDR_PAD_TO_ALIGNMENT,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+};
-#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
- (0x00000814 + 0x0070 * (ep))
-#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
-#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
-#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
-#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
-#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-/* The next three fields are present for IPA v4.5+ */
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
-#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
-#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
- (0x00000818 + 0x0070 * (rxep))
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
- (0x00000820 + 0x0070 * (txep))
-#define MODE_FMASK GENMASK(2, 0)
-/* The next field is present for IPA v4.5+ */
-#define DCPH_ENABLE_FMASK GENMASK(3, 3)
-#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
-#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
-#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
-#define PAD_EN_FMASK GENMASK(29, 29)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.9+ */
-#define DRBIP_ACL_ENABLE GENMASK(30, 30)
+/* ENDP_INIT_MODE register */
+enum ipa_reg_endp_init_mode_field_id {
+ ENDP_MODE,
+ DCPH_ENABLE, /* v4.5+ */
+ DEST_PIPE_INDEX,
+ BYTE_THRESHOLD,
+ PIPE_REPLICATION_EN,
+ PAD_EN,
+ HDR_FTCH_DISABLE, /* v4.5+ */
+ DRBIP_ACL_ENABLE, /* v4.9+ */
+};
/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
enum ipa_mode {
- IPA_BASIC = 0x0,
- IPA_ENABLE_FRAMING_HDLC = 0x1,
- IPA_ENABLE_DEFRAMING_HDLC = 0x2,
- IPA_DMA = 0x3,
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
};
-#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
- (0x00000824 + 0x0070 * (ep))
-#define AGGR_EN_FMASK GENMASK(1, 0)
-#define AGGR_TYPE_FMASK GENMASK(4, 2)
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_byte_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_time_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_pkt_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_sw_eof_active_fmask(bool legacy)
-{
- return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_force_close_fmask(bool legacy)
-{
- return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
-{
- return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
-}
-
-/* The next field is present for IPA v4.5+ */
-#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+/* ENDP_INIT_AGGR register */
+enum ipa_reg_endp_init_aggr_field_id {
+ AGGR_EN,
+ AGGR_TYPE,
+ BYTE_LIMIT,
+ TIME_LIMIT,
+ PKT_LIMIT,
+ SW_EOF_ACTIVE,
+ FORCE_CLOSE,
+ HARD_BYTE_LIMIT_EN,
+ AGGR_GRAN_SEL,
+};
/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */
- IPA_ENABLE_AGGR = 0x1, /* (RX) */
- IPA_ENABLE_DEAGGR = 0x2, /* (TX) */
+ IPA_BYPASS_AGGR /* TX and RX */ = 0x0,
+ IPA_ENABLE_AGGR /* RX */ = 0x1,
+ IPA_ENABLE_DEAGGR /* TX */ = 0x2,
};
/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
enum ipa_aggr_type {
- IPA_MBIM_16 = 0x0,
- IPA_HDLC = 0x1,
- IPA_TLP = 0x2,
- IPA_RNDIS = 0x3,
- IPA_GENERIC = 0x4,
- IPA_COALESCE = 0x5,
- IPA_QCMAP = 0x6,
-};
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
- (0x0000082c + 0x0070 * (rxep))
-#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
- (0x00000830 + 0x0070 * (rxep))
-/* The next two fields are present for IPA v4.2 only */
-#define BASE_VALUE_FMASK GENMASK(4, 0)
-#define SCALE_FMASK GENMASK(12, 8)
-/* The next two fields are present for IPA v4.5 */
-#define TIME_LIMIT_FMASK GENMASK(4, 0)
-#define GRAN_SEL_FMASK GENMASK(8, 8)
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
- (0x00000834 + 0x0070 * (txep))
-#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
-#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
-#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
-#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
-#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
-#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
-
-#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
- (0x00000838 + 0x0070 * (ep))
-/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
-static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
-{
- if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5)
- return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
- if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7)
- return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+/* ENDP_INIT_HOL_BLOCK_EN register */
+enum ipa_reg_endp_init_hol_block_en_field_id {
+ HOL_BLOCK_EN,
+};
- return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
-}
+/* ENDP_INIT_HOL_BLOCK_TIMER register */
+enum ipa_reg_endp_init_hol_block_timer_field_id {
+ TIMER_BASE_VALUE, /* Not v4.5+ */
+ TIMER_SCALE, /* v4.2 only */
+ TIMER_LIMIT, /* v4.5+ */
+ TIMER_GRAN_SEL, /* v4.5+ */
+};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
- (0x0000083c + 0x0070 * (txep))
-#define SEQ_TYPE_FMASK GENMASK(7, 0)
-#define SEQ_REP_TYPE_FMASK GENMASK(15, 8)
+/* ENDP_INIT_DEAGGR register */
+enum ipa_reg_endp_deaggr_field_id {
+ DEAGGR_HDR_LEN,
+ SYSPIPE_ERR_DETECTION,
+ PACKET_OFFSET_VALID,
+ PACKET_OFFSET_LOCATION,
+ IGNORE_MIN_PKT_ERR,
+ MAX_PACKET_LEN,
+};
+
+/* ENDP_INIT_RSRC_GRP register */
+enum ipa_reg_endp_init_rsrc_grp_field_id {
+ ENDP_RSRC_GRP,
+};
+
+/* ENDP_INIT_SEQ register */
+enum ipa_reg_endp_init_seq_field_id {
+ SEQ_TYPE,
+ SEQ_REP_TYPE, /* Not v4.5+ */
+};
/**
* enum ipa_seq_type - HPS and DPS sequencer type
@@ -629,76 +544,36 @@ enum ipa_seq_rep_type {
IPA_SEQ_REP_DMA_PARSER = 0x08,
};
-#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
- (0x00000840 + 0x0070 * (ep))
-#define STATUS_EN_FMASK GENMASK(0, 0)
-#define STATUS_ENDP_FMASK GENMASK(5, 1)
-/* The next field is not present for IPA v4.5+ */
-#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0+ */
-#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-
-/* The next register is not present for IPA v4.2 (which no hashing support) */
-#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
- (0x0000085c + 0x0070 * (er))
-#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
-#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
-#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
-#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
-#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
-#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
-#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
-#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
-
-#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
-#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
-#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
-#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
-#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
-#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
-#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
-#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
-
-static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version,
- u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003008 + 0x1000 * ee;
-
- return 0x00004008 + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version)
-{
- return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000300c + 0x1000 * ee;
-
- return 0x0000400c + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_en_offset(enum ipa_version version)
-{
- return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003010 + 0x1000 * ee;
-
- return 0x00004010 + 0x1000 * ee;
-}
+/* ENDP_STATUS register */
+enum ipa_reg_endp_status_field_id {
+ STATUS_EN,
+ STATUS_ENDP,
+ STATUS_LOCATION, /* Not v4.5+ */
+ STATUS_PKT_SUPPRESS, /* v4.0+ */
+};
-static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version)
-{
- return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP);
-}
+/* ENDP_FILTER_ROUTER_HSH_CFG register */
+enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
+ FILTER_HASH_MSK_SRC_ID,
+ FILTER_HASH_MSK_SRC_IP,
+ FILTER_HASH_MSK_DST_IP,
+ FILTER_HASH_MSK_SRC_PORT,
+ FILTER_HASH_MSK_DST_PORT,
+ FILTER_HASH_MSK_PROTOCOL,
+ FILTER_HASH_MSK_METADATA,
+ FILTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+
+ ROUTER_HASH_MSK_SRC_ID,
+ ROUTER_HASH_MSK_SRC_IP,
+ ROUTER_HASH_MSK_DST_IP,
+ ROUTER_HASH_MSK_SRC_PORT,
+ ROUTER_HASH_MSK_DST_PORT,
+ ROUTER_HASH_MSK_PROTOCOL,
+ ROUTER_HASH_MSK_METADATA,
+ ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+};
+/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
* @IPA_IRQ_UC_0: Microcontroller event interrupt
@@ -774,74 +649,82 @@ enum ipa_irq_id {
IPA_IRQ_COUNT, /* Last; not an id */
};
-static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000301c + 0x1000 * ee;
+/* IPA_IRQ_UC register */
+enum ipa_reg_ipa_irq_uc_field_id {
+ UC_INTR,
+};
- return 0x0000401c + 0x1000 * ee;
-}
+extern const struct ipa_regs ipa_regs_v3_1;
+extern const struct ipa_regs ipa_regs_v3_5_1;
+extern const struct ipa_regs ipa_regs_v4_2;
+extern const struct ipa_regs ipa_regs_v4_5;
+extern const struct ipa_regs ipa_regs_v4_9;
+extern const struct ipa_regs ipa_regs_v4_11;
-static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version)
+/* Return the field mask for a field in a register */
+static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
{
- return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP);
-}
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
-#define UC_INTR_FMASK GENMASK(0, 0)
+ return reg->fmask[field_id];
+}
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-static inline u32
-ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the mask for a single-bit field in a register */
+static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
{
- if (version == IPA_VERSION_3_0)
- return 0x00003098 + 0x1000 * ee;
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003030 + 0x1000 * ee;
+ WARN_ON(!is_power_of_2(fmask));
- return 0x00004030 + 0x1000 * ee;
+ return fmask;
}
+/* Encode a value into the given field of a register */
static inline u32
-ipa_reg_irq_suspend_info_offset(enum ipa_version version)
+ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP);
-}
+ u32 fmask = ipa_reg_fmask(reg, field_id);
-/* ipa->available defines the valid bits in the SUSPEND_EN register */
-static inline u32
-ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- WARN_ON(version == IPA_VERSION_3_0);
+ if (!fmask)
+ return 0;
- if (version < IPA_VERSION_4_9)
- return 0x00003034 + 0x1000 * ee;
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
- return 0x00004034 + 0x1000 * ee;
+ return val;
}
+/* Given a register value, decode (extract) the value in the given field */
static inline u32
-ipa_reg_irq_suspend_en_offset(enum ipa_version version)
+ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
-/* ipa->available defines the valid bits in the SUSPEND_CLR register */
-static inline u32
-ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
{
- WARN_ON(version == IPA_VERSION_3_0);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003038 + 0x1000 * ee;
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
- return 0x00004038 + 0x1000 * ee;
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
+{
+ return reg ? reg->offset : 0;
}
-static inline u32
-ipa_reg_irq_suspend_clr_offset(enum ipa_version version)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
{
- return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
+ return reg ? reg->offset + n * reg->stride : 0;
}
int ipa_reg_init(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 06cec7199382..a257f0e5e361 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -69,20 +69,21 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
}
static void
-ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
+ const struct ipa_reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
- val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -91,34 +92,35 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_src[resource_type];
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
@@ -127,34 +129,35 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_dst[resource_type];
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
/* Configure resources; there is no ipa_resource_deconfig() */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 211233612039..5620dc271fac 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 59cee31a7383..9b969b03d1a4 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SMP2P_H_
#define _IPA_SMP2P_H_
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index ff61dbdd70d8..5cbc15a971f9 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2022 Linaro Ltd. */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -96,38 +96,71 @@ const struct attribute_group ipa_feature_attribute_group = {
.attrs = ipa_feature_attrs,
};
-static ssize_t
-ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+ struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
+ struct device_attribute *dev_attr;
+ struct dev_ext_attribute *ea;
+ bool visible;
+
+ /* An endpoint id attribute is only visible if it's defined */
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ ea = container_of(dev_attr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+ visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return visible ? attr->mode : 0;
}
-static ssize_t rx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t endpoint_id_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ struct ipa_endpoint *endpoint;
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(attr, struct dev_ext_attribute, attr);
+ endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+ return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
}
-static DEVICE_ATTR_RO(rx_endpoint_id);
+#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
-static ssize_t tx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
+ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
+ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
-}
+static struct attribute *ipa_endpoint_id_attrs[] = {
+ &dev_attr_endpoint_id_modem_rx.attr.attr,
+ &dev_attr_endpoint_id_modem_tx.attr.attr,
+ NULL
+};
-static DEVICE_ATTR_RO(tx_endpoint_id);
+const struct attribute_group ipa_endpoint_id_attribute_group = {
+ .name = "endpoint_id",
+ .is_visible = ipa_endpoint_id_is_visible,
+ .attrs = ipa_endpoint_id_attrs,
+};
+
+/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
+#define MODEM_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
+MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_modem_attrs[] = {
- &dev_attr_rx_endpoint_id.attr,
- &dev_attr_tx_endpoint_id.attr,
- NULL
+ &dev_attr_modem_rx_endpoint_id.attr.attr,
+ &dev_attr_modem_tx_endpoint_id.attr.attr,
+ NULL,
};
const struct attribute_group ipa_modem_attribute_group = {
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index b34e5650bf8c..58ba22810bab 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
@@ -10,6 +10,7 @@ struct attribute_group;
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_endpoint_id_attribute_group;
extern const struct attribute_group ipa_modem_attribute_group;
#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2f5a58bfc529..510ff2dc8999 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
@@ -386,8 +384,9 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ const struct ipa_reg *reg;
struct gsi_trans *trans;
+ u32 offset;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -399,8 +398,13 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
- val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
+
+ val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
ipa_cmd_register_write_add(trans, offset, val, val, false);
@@ -518,15 +522,18 @@ int ipa_table_setup(struct ipa *ipa)
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -567,13 +574,17 @@ static bool ipa_route_id_modem(u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, route_id);
+
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..395189f75d78 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 856e55a080a7..f0ee47281015 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
+#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
if (!ipa->uc_powered)
return;
@@ -217,7 +222,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* Fill in the command data */
@@ -228,9 +233,10 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
- val = u32_encode_bits(1, UC_INTR_FMASK);
- offset = ipa_reg_irq_uc_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
+ val = ipa_reg_bit(reg, UC_INTR);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 23847f934d64..8514096e6f36 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 6c16c895d842..7870e0cc3d7c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
@@ -19,10 +19,10 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
- * Please update ipa_version_valid() and ipa_version_string() whenever a
- * new version is added.
+ * Please update ipa_version_string() whenever a new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
@@ -36,6 +36,30 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_COUNT, /* Last; not a version */
+};
+
+static inline bool ipa_version_supported(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
new file mode 100644
index 000000000000..0d002c3c38a2
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
new file mode 100644
index 000000000000..6e2f939b18f1
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ [TX0_PREFETCH_DISABLE] = BIT(0),
+ [TX1_PREFETCH_DISABLE] = BIT(1),
+ [PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
new file mode 100644
index 000000000000..8fd36569bb9f
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
+ /* Bits 24-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
new file mode 100644
index 000000000000..f8e78e1907c8
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ /* Bit 17 reserved */
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ [SSPND_PA_NO_BQ_STATE] = BIT(19),
+ /* Bits 20-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_SCALE] = GENMASK(12, 8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
new file mode 100644
index 000000000000..d32b805abb11
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ /* Bits 18-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
new file mode 100644
index 000000000000..eabbc5451937
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
+ /* Bits 25-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 3837c897832e..de94921cbef9 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -47,11 +47,11 @@ typedef enum {
} ipvl_hdr_type;
struct ipvl_pcpu_stats {
- u64 rx_pkts;
- u64 rx_bytes;
- u64 rx_mcast;
- u64 tx_pkts;
- u64 tx_bytes;
+ u64_stats_t rx_pkts;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_mcast;
+ u64_stats_t tx_pkts;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errs;
u32 tx_drps;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c613900c3811..bb1c298c1e78 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -19,10 +19,10 @@ void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
- pcptr->rx_pkts++;
- pcptr->rx_bytes += len;
+ u64_stats_inc(&pcptr->rx_pkts);
+ u64_stats_add(&pcptr->rx_bytes, len);
if (mcast)
- pcptr->rx_mcast++;
+ u64_stats_inc(&pcptr->rx_mcast);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -555,7 +556,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port,
schedule_work(&port->wq);
} else {
spin_unlock(&port->backlog.lock);
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb(skb);
}
}
@@ -589,7 +590,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 696e245f6d00..54c94a69c2bb 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -139,8 +139,7 @@ static int ipvlan_init(struct net_device *dev)
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
- netif_set_gso_max_size(dev, phy_dev->gso_max_size);
- netif_set_gso_max_segs(dev, phy_dev->gso_max_segs);
+ netif_inherit_tso_max(dev, phy_dev);
dev->hard_header_len = phy_dev->hard_header_len;
netdev_lockdep_set_classes(dev);
@@ -225,8 +224,8 @@ static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
- pcptr->tx_pkts++;
- pcptr->tx_bytes += skblen;
+ u64_stats_inc(&pcptr->tx_pkts);
+ u64_stats_add(&pcptr->tx_bytes, skblen);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
@@ -301,11 +300,11 @@ static void ipvlan_get_stats64(struct net_device *dev,
pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
do {
strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
- rx_pkts = pcptr->rx_pkts;
- rx_bytes = pcptr->rx_bytes;
- rx_mcast = pcptr->rx_mcast;
- tx_pkts = pcptr->tx_pkts;
- tx_bytes = pcptr->tx_bytes;
+ rx_pkts = u64_stats_read(&pcptr->rx_pkts);
+ rx_bytes = u64_stats_read(&pcptr->rx_bytes);
+ rx_mcast = u64_stats_read(&pcptr->rx_mcast);
+ tx_pkts = u64_stats_read(&pcptr->tx_pkts);
+ tx_bytes = u64_stats_read(&pcptr->tx_bytes);
} while (u64_stats_fetch_retry_irq(&pcptr->syncp,
strt));
@@ -316,8 +315,8 @@ static void ipvlan_get_stats64(struct net_device *dev,
s->tx_bytes += tx_bytes;
/* u32 values are updated without syncp protection. */
- rx_errs += pcptr->rx_errs;
- tx_drps += pcptr->tx_drps;
+ rx_errs += READ_ONCE(pcptr->rx_errs);
+ tx_drps += READ_ONCE(pcptr->tx_drps);
}
s->rx_errors = rx_errs;
s->rx_dropped = rx_errs;
@@ -409,8 +408,8 @@ static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
}
static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
@@ -762,8 +761,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- netif_set_gso_max_size(ipvlan->dev, dev->gso_max_size);
- netif_set_gso_max_segs(ipvlan->dev, dev->gso_max_segs);
+ netif_inherit_tso_max(ipvlan->dev, dev);
netdev_update_features(ipvlan->dev);
}
break;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ed0edf5884ef..14e8d04cb434 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -74,11 +74,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* do not fool net_timestamp_check() with various clock bases */
- skb->tstamp = 0;
+ skb_clear_tstamp(skb);
skb_orphan(skb);
- /* Before queueing this packet to netif_rx(),
+ /* Before queueing this packet to __netif_rx(),
* make sure dst is refcounted.
*/
skb_dst_force(skb);
@@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
dev_lstats_add(dev, len);
return NETDEV_TX_OK;
@@ -191,6 +191,8 @@ static void gen_lo_setup(struct net_device *dev,
dev->netdev_ops = dev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = dev_destructor;
+
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
/* The loopback device is special. There is only one instance
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 16aa3a478e9e..85376d2f24ca 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -18,14 +18,13 @@
#include <net/sock.h>
#include <net/gro_cells.h>
#include <net/macsec.h>
+#include <net/dst_metadata.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
-#define MACSEC_SCI_LEN 8
-
/* SecTAG length = macsec_eth_header without the optional SCI */
#define MACSEC_TAG_LEN 6
@@ -46,20 +45,10 @@ struct macsec_eth_header {
u8 secure_channel_id[8]; /* optional */
} __packed;
-#define MACSEC_TCI_VERSION 0x80
-#define MACSEC_TCI_ES 0x40 /* end station */
-#define MACSEC_TCI_SC 0x20 /* SCI present */
-#define MACSEC_TCI_SCB 0x10 /* epon */
-#define MACSEC_TCI_E 0x08 /* encryption */
-#define MACSEC_TCI_C 0x04 /* changed text */
-#define MACSEC_AN_MASK 0x03 /* association number */
-#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
-
/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
#define MIN_NON_SHORT_LEN 48
#define GCM_AES_IV_LEN 12
-#define DEFAULT_ICV_LEN 16
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
@@ -99,6 +88,7 @@ struct pcpu_secy_stats {
* struct macsec_dev - private data
* @secy: SecY config
* @real_dev: pointer to underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
@@ -107,6 +97,7 @@ struct pcpu_secy_stats {
struct macsec_dev {
struct macsec_secy secy;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
@@ -160,6 +151,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -228,7 +232,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
return (struct macsec_cb *)skb->cb;
}
-#define MACSEC_PORT_ES (htons(0x0001))
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
@@ -241,14 +244,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
-
-static bool send_sci(const struct macsec_secy *secy)
-{
- const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
-
- return tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
-}
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
static sci_t make_sci(const u8 *addr, __be16 port)
{
@@ -314,7 +310,7 @@ static void macsec_fill_sectag(struct macsec_eth_header *h,
/* with GCM, C/E clear for !encrypt, both set for encrypt */
if (tx_sc->encrypt)
h->tci_an |= MACSEC_TCI_CONFID;
- else if (secy->icv_len != DEFAULT_ICV_LEN)
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
h->tci_an |= MACSEC_TCI_C;
h->tci_an |= tx_sc->encoding_sa;
@@ -446,11 +442,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -497,18 +488,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -521,8 +522,8 @@ static void count_tx(struct net_device *dev, int ret, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
}
@@ -538,9 +539,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -632,7 +634,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- sci_present = send_sci(secy);
+ sci_present = macsec_send_sci(secy);
hh = skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
@@ -699,6 +701,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -740,15 +743,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -761,6 +766,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ secy->netdev->stats.rx_errors++;
return false;
}
@@ -823,8 +830,8 @@ static void count_rx(struct net_device *dev, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
@@ -853,9 +860,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -997,11 +1004,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
struct ethhdr *hdr = eth_hdr(skb);
+ struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ md_dst = skb_metadata_dst(skb);
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1012,6 +1021,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+ if (md_dst && md_dst->type == METADATA_MACSEC &&
+ (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ continue;
+
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1033,7 +1046,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
else
nskb->pkt_type = PACKET_MULTICAST;
- netif_rx(nskb);
+ __netif_rx(nskb);
}
continue;
}
@@ -1046,6 +1059,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@@ -1056,7 +1070,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
nskb->dev = ndev;
- if (netif_rx(nskb) == NET_RX_SUCCESS) {
+ if (__netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1155,6 +1169,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@@ -1165,11 +1180,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_errors++;
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1179,6 +1198,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1199,6 +1220,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@@ -1227,6 +1249,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1234,7 +1257,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1260,7 +1282,7 @@ nosci:
/* 10.6.1 if the SC is not found */
cbit = !!(hdr->tci_an & MACSEC_TCI_C);
if (!cbit)
- macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
+ macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
@@ -1276,6 +1298,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_errors++;
continue;
}
@@ -1288,7 +1311,7 @@ nosci:
macsec_reset_skb(nskb, macsec->secy.netdev);
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
if (ret == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUnknownSCI++;
@@ -1390,7 +1413,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
return NULL;
}
-static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
+static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
+ bool active)
{
struct macsec_rx_sc *rx_sc;
struct macsec_dev *macsec;
@@ -1414,7 +1438,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
}
rx_sc->sci = sci;
- rx_sc->active = true;
+ rx_sc->active = active;
refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy;
@@ -1640,22 +1664,8 @@ static int macsec_offload(int (* const func)(struct macsec_context *),
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_lock(&ctx->phydev->lock);
- /* Phase I: prepare. The drive should fail here if there are going to be
- * issues in the commit phase.
- */
- ctx->prepare = true;
ret = (*func)(ctx);
- if (ret)
- goto phy_unlock;
- /* Phase II: commit. This step cannot fail. */
- ctx->prepare = false;
- ret = (*func)(ctx);
- /* This should never happen: commit is not allowed to fail */
- if (unlikely(ret))
- WARN(1, "MACsec offloading commit failed (%d)\n", ret);
-
-phy_unlock:
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_unlock(&ctx->phydev->lock);
@@ -1695,7 +1705,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
return false;
if (attrs[MACSEC_SA_ATTR_PN] &&
- *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1751,7 +1761,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
- if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ if (tb_sa[MACSEC_SA_ATTR_PN] &&
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
rtnl_unlock();
@@ -1767,7 +1778,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -1804,6 +1815,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -1822,16 +1839,11 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
- if (secy->xpn) {
- rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -1840,7 +1852,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
- kfree(rx_sa);
+ macsec_rxsa_put(rx_sa);
rtnl_unlock();
return err;
}
@@ -1866,7 +1878,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy;
- bool was_active;
+ bool active = true;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1888,16 +1900,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
- rx_sc = create_rx_sc(dev, sci);
+ if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
+ active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+
+ rx_sc = create_rx_sc(dev, sci, active);
if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
- was_active = rx_sc->active;
- if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
- rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
-
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -1921,7 +1932,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
- rx_sc->active = was_active;
+ del_rx_sc(secy, sci);
+ free_rx_sc(rx_sc);
rtnl_unlock();
return ret;
}
@@ -1937,7 +1949,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2009,7 +2021,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -2046,6 +2058,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -2064,16 +2082,11 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
- if (secy->xpn) {
- tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
@@ -2083,7 +2096,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
secy->operational = was_operational;
- kfree(tx_sa);
+ macsec_txsa_put(tx_sa);
rtnl_unlock();
return err;
}
@@ -2291,7 +2304,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2560,7 +2573,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
int i;
- if (secy->n_rx_sc > 0)
+ if (secy->rx_sc)
return true;
for (i = 0; i < MACSEC_NUM_AN; i++)
@@ -2644,11 +2657,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
if (ret)
goto rollback;
- /* Force features update, since they are different for SW MACSec and
- * HW offloading cases.
- */
- netdev_update_features(dev);
-
rtnl_unlock();
return 0;
@@ -3366,6 +3374,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.module = THIS_MODULE,
.small_ops = macsec_genl_ops,
.n_small_ops = ARRAY_SIZE(macsec_genl_ops),
+ .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
@@ -3377,6 +3386,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
int ret, len;
if (macsec_is_offloaded(netdev_priv(dev))) {
+ struct metadata_dst *md_dst = secy->tx_sc.md_dst;
+
+ skb_dst_drop(skb);
+ dst_hold(&md_dst->dst);
+ skb_dst_set(skb, &md_dst->dst);
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@@ -3400,6 +3414,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@@ -3410,22 +3425,14 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
}
-#define SW_MACSEC_FEATURES \
+#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-/* If h/w offloading is enabled, use real device features save for
- * VLAN_FEATURES - they require additional ops
- * HW_MACSEC - no reason to report it
- */
-#define REAL_DEV_FEATURES(dev) \
- ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
-
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3442,12 +3449,8 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
- if (macsec_is_offloaded(macsec)) {
- dev->features = REAL_DEV_FEATURES(real_dev);
- } else {
- dev->features = real_dev->features & SW_MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
- }
+ dev->features = real_dev->features & MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@@ -3459,6 +3462,9 @@ static int macsec_dev_init(struct net_device *dev)
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+ /* Get macsec's reference to real_dev */
+ netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
+
return 0;
}
@@ -3476,10 +3482,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- if (macsec_is_offloaded(macsec))
- return REAL_DEV_FEATURES(real_dev);
-
- features &= (real_dev->features & SW_MACSEC_FEATURES) |
+ features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@@ -3615,7 +3618,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3655,6 +3657,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)
@@ -3701,9 +3704,13 @@ static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ if (macsec->secy.tx_sc.md_dst)
+ metadata_dst_free(macsec->secy.tx_sc.md_dst);
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
+ /* Get rid of the macsec's reference to real_dev */
+ netdev_put(macsec->real_dev, &macsec->dev_tracker);
}
static void macsec_setup(struct net_device *dev)
@@ -3738,9 +3745,6 @@ static int macsec_changelink_common(struct net_device *dev,
secy->operational = tx_sa && tx_sa->active;
}
- if (data[IFLA_MACSEC_WINDOW])
- secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
if (data[IFLA_MACSEC_ENCRYPT])
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
@@ -3786,6 +3790,16 @@ static int macsec_changelink_common(struct net_device *dev,
}
}
+ if (data[IFLA_MACSEC_WINDOW]) {
+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+ /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+ * for XPN cipher suites */
+ if (secy->xpn &&
+ secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3815,7 +3829,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ret = macsec_changelink_common(dev, data);
if (ret)
- return ret;
+ goto cleanup;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3870,6 +3884,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
macsec_del_dev(macsec);
@@ -3884,18 +3910,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- /* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
- const struct macsec_ops *ops;
- struct macsec_context ctx;
-
- ops = macsec_get_ops(netdev_priv(dev), &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_del_secy, &ctx);
- }
- }
-
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -3944,6 +3958,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3959,6 +3978,13 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
return -ENOMEM;
}
+ secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!secy->tx_sc.md_dst) {
+ free_percpu(secy->tx_sc.stats);
+ free_percpu(macsec->stats);
+ return -ENOMEM;
+ }
+
if (sci == MACSEC_UNDEF_SCI)
sci = dev_to_sci(dev, MACSEC_PORT_ES);
@@ -3972,6 +3998,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
secy->xpn = DEFAULT_XPN;
secy->sci = sci;
+ secy->tx_sc.md_dst->u.macsec_info.sci = sci;
secy->tx_sc.active = true;
secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
@@ -3990,7 +4017,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
rx_handler_func_t *rx_handler;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
int err, mtu;
sci_t sci;
@@ -4018,6 +4045,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
!macsec_check_offload(macsec->offload, macsec))
return -EOPNOTSUPP;
+ /* send_sci must be set to true when transmit sci explicitly is set */
+ if ((data && data[IFLA_MACSEC_SCI]) &&
+ (data && data[IFLA_MACSEC_INC_SCI])) {
+ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
+
+ if (!send_sci)
+ return -EINVAL;
+ }
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
@@ -4105,7 +4141,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
int flag;
bool es, scb, sci;
@@ -4117,7 +4153,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
- if (icv_len != DEFAULT_ICV_LEN) {
+ if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
char dummy_key[DEFAULT_SAK_LEN] = { 0 };
struct crypto_aead *dummy_tfm;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6ef5f77be4d0..578897aaada0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -285,7 +285,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE) ?:
- netif_rx_ni(nskb);
+ netif_rx(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, true);
}
@@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- schedule_work(&port->bc_work);
+ queue_work(system_unbound_wq, &port->bc_work);
if (err)
goto free_nskb;
@@ -371,7 +371,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
free_nskb:
kfree_skb(nskb);
err:
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
}
static void macvlan_flush_sources(struct macvlan_port *port,
@@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
nskb->pkt_type = PACKET_HOST;
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
@@ -460,15 +460,17 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
/* forward to original port. */
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
- netif_rx(skb);
+ __netif_rx(skb);
handle_res = RX_HANDLER_CONSUMED;
goto out;
}
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
@@ -571,8 +575,8 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->tx_packets++;
- pcpu_stats->tx_bytes += len;
+ u64_stats_inc(&pcpu_stats->tx_packets);
+ u64_stats_add(&pcpu_stats->tx_bytes, len);
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
@@ -889,7 +893,7 @@ static void macvlan_set_lockdep_class(struct net_device *dev)
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- const struct net_device *lowerdev = vlan->lowerdev;
+ struct net_device *lowerdev = vlan->lowerdev;
struct macvlan_port *port = vlan->port;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
@@ -900,8 +904,7 @@ static int macvlan_init(struct net_device *dev)
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
dev->hw_enc_features |= dev->features;
- netif_set_gso_max_size(dev, lowerdev->gso_max_size);
- netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
+ netif_inherit_tso_max(dev, lowerdev);
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -911,6 +914,9 @@ static int macvlan_init(struct net_device *dev)
port->count += 1;
+ /* Get macvlan's reference to lowerdev */
+ netdev_hold(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
+
return 0;
}
@@ -943,11 +949,11 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- rx_multicast = p->rx_multicast;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ rx_multicast = u64_stats_read(&p->rx_multicast);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
@@ -958,8 +964,8 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
/* rx_errors & tx_dropped are u32, updated
* without syncp protection.
*/
- rx_errors += p->rx_errors;
- tx_dropped += p->tx_dropped;
+ rx_errors += READ_ONCE(p->rx_errors);
+ tx_dropped += READ_ONCE(p->tx_dropped);
}
stats->rx_errors = rx_errors;
stats->rx_dropped = rx_errors;
@@ -1014,7 +1020,8 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
@@ -1036,8 +1043,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
@@ -1173,17 +1180,26 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_features_check = passthru_features_check,
};
+static void macvlan_dev_free(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ /* Get rid of the macvlan's reference to lowerdev */
+ netdev_put(vlan->lowerdev, &vlan->dev_tracker);
+}
+
void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->min_mtu = 0;
+ /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
+ dev->priv_destructor = macvlan_dev_free;
dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
@@ -1517,8 +1533,10 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(lowerdev))
+ if (create && macvlan_port_get_rtnl(lowerdev)) {
+ macvlan_flush_sources(port, vlan);
macvlan_port_destroy(port->dev);
+ }
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
@@ -1747,8 +1765,7 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- netif_set_gso_max_size(vlan->dev, dev->gso_max_size);
- netif_set_gso_max_segs(vlan->dev, dev->gso_max_segs);
+ netif_inherit_tso_max(vlan->dev, dev);
netdev_update_features(vlan->dev);
}
break;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6b12902a803f..d1f435788e90 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev)
dev->tx_queue_len = TUN_READQ_SIZE;
}
+static struct net *macvtap_link_net(const struct net_device *dev)
+{
+ return dev_net(macvlan_dev_real_dev(dev));
+}
+
static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
.kind = "macvtap",
.setup = macvtap_setup,
.newlink = macvtap_newlink,
.dellink = macvtap_dellink,
+ .get_link_net = macvtap_link_net,
.priv_size = sizeof(struct macvtap_dev),
};
@@ -201,7 +207,7 @@ static struct notifier_block macvtap_notifier_block __read_mostly = {
.notifier_call = macvtap_device_event,
};
-static int macvtap_init(void)
+static int __init macvtap_init(void)
{
int err;
@@ -235,7 +241,7 @@ out1:
}
module_init(macvtap_init);
-static void macvtap_exit(void)
+static void __exit macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 2929471395ae..dc71657d9184 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -21,6 +21,18 @@ config MCTP_SERIAL
Say y here if you need to connect to MCTP endpoints over serial. To
compile as a module, use m; the module will be called mctp-serial.
+config MCTP_TRANSPORT_I2C
+ tristate "MCTP SMBus/I2C transport"
+ # i2c-mux is optional, but we must build as a module if i2c-mux is a module
+ depends on I2C_MUX || !I2C_MUX
+ depends on I2C
+ depends on I2C_SLAVE
+ select MCTP_FLOWS
+ help
+ Provides a driver to access MCTP devices over SMBus/I2C transport,
+ from DMTF specification DSP0237. A MCTP protocol network device is
+ created for each I2C bus that has been assigned a mctp-i2c device.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index d32622613ce4..1ca3e6028f77 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
+obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
new file mode 100644
index 000000000000..0762c735dd8a
--- /dev/null
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -0,0 +1,1080 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Controller Transport Protocol (MCTP)
+ * Implements DMTF specification
+ * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C
+ * Transport Binding"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf
+ *
+ * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C
+ * mux topology a single I2C client is attached to the root of the mux topology,
+ * shared between all mux I2C busses underneath. For non-mux cases an I2C client
+ * is attached per netdev.
+ *
+ * mctp-i2c-controller.yml devicetree binding has further details.
+ *
+ * Copyright (c) 2022 Code Construct
+ * Copyright (c) 2022 Google
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/if_arp.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
+/* byte_count is limited to u8 */
+#define MCTP_I2C_MAXBLOCK 255
+/* One byte is taken by source_slave */
+#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1)
+#define MCTP_I2C_MINMTU (64 + 4)
+/* Allow space for dest_address, command, byte_count, data, PEC */
+#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1)
+#define MCTP_I2C_MINLEN 8
+#define MCTP_I2C_COMMANDCODE 0x0f
+#define MCTP_I2C_TX_WORK_LEN 100
+/* Sufficient for 64kB at min mtu */
+#define MCTP_I2C_TX_QUEUE_LEN 1100
+
+#define MCTP_I2C_OF_PROP "mctp-controller"
+
+enum {
+ MCTP_I2C_FLOW_STATE_NEW = 0,
+ MCTP_I2C_FLOW_STATE_ACTIVE,
+};
+
+/* List of all struct mctp_i2c_client
+ * Lock protects driver_clients and also prevents adding/removing adapters
+ * during mctp_i2c_client probe/remove.
+ */
+static DEFINE_MUTEX(driver_clients_lock);
+static LIST_HEAD(driver_clients);
+
+struct mctp_i2c_client;
+
+/* The netdev structure. One of these per I2C adapter. */
+struct mctp_i2c_dev {
+ struct net_device *ndev;
+ struct i2c_adapter *adapter;
+ struct mctp_i2c_client *client;
+ struct list_head list; /* For mctp_i2c_client.devs */
+
+ size_t rx_pos;
+ u8 rx_buffer[MCTP_I2C_BUFSZ];
+ struct completion rx_done;
+
+ struct task_struct *tx_thread;
+ wait_queue_head_t tx_wq;
+ struct sk_buff_head tx_queue;
+ u8 tx_scratch[MCTP_I2C_BUFSZ];
+
+ /* A fake entry in our tx queue to perform an unlock operation */
+ struct sk_buff unlock_marker;
+
+ /* Spinlock protects i2c_lock_count, release_count, allow_rx */
+ spinlock_t lock;
+ int i2c_lock_count;
+ int release_count;
+ /* Indicates that the netif is ready to receive incoming packets */
+ bool allow_rx;
+
+};
+
+/* The i2c client structure. One per hardware i2c bus at the top of the
+ * mux tree, shared by multiple netdevs
+ */
+struct mctp_i2c_client {
+ struct i2c_client *client;
+ u8 lladdr;
+
+ struct mctp_i2c_dev *sel;
+ struct list_head devs;
+ spinlock_t sel_lock; /* Protects sel and devs */
+
+ struct list_head list; /* For driver_clients */
+};
+
+/* Header on the wire. */
+struct mctp_i2c_hdr {
+ u8 dest_slave;
+ u8 command;
+ /* Count of bytes following byte_count, excluding PEC */
+ u8 byte_count;
+ u8 source_slave;
+};
+
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev);
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+static void mctp_i2c_ndo_uninit(struct net_device *dev);
+static int mctp_i2c_ndo_open(struct net_device *dev);
+
+static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ return i2c_root_adapter(&adap->dev);
+#else
+ /* In non-mux config all i2c adapters are root adapters */
+ return adap;
+#endif
+}
+
+/* Creates a new i2c slave device attached to the root adapter.
+ * Sets up the slave callback.
+ * Must be called with a client on a root adapter.
+ */
+static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ struct i2c_adapter *root = NULL;
+ int rc;
+
+ if (client->flags & I2C_CLIENT_TEN) {
+ dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n",
+ client->addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ root = mux_root_adapter(client->adapter);
+ if (!root) {
+ dev_err(&client->dev, "failed to find root adapter\n");
+ rc = -ENOENT;
+ goto err;
+ }
+ if (root != client->adapter) {
+ dev_err(&client->dev,
+ "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n"
+ " It should be placed on the mux tree root adapter\n"
+ " then set mctp-controller property on adapters to attach\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ mcli = kzalloc(sizeof(*mcli), GFP_KERNEL);
+ if (!mcli) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ spin_lock_init(&mcli->sel_lock);
+ INIT_LIST_HEAD(&mcli->devs);
+ INIT_LIST_HEAD(&mcli->list);
+ mcli->lladdr = client->addr & 0xff;
+ mcli->client = client;
+ i2c_set_clientdata(client, mcli);
+
+ rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb);
+ if (rc < 0) {
+ dev_err(&client->dev, "i2c register failed %d\n", rc);
+ mcli->client = NULL;
+ i2c_set_clientdata(client, NULL);
+ goto err;
+ }
+
+ return mcli;
+err:
+ if (mcli) {
+ if (mcli->client)
+ i2c_unregister_device(mcli->client);
+ kfree(mcli);
+ }
+ return ERR_PTR(rc);
+}
+
+static void mctp_i2c_free_client(struct mctp_i2c_client *mcli)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ WARN_ON(!list_empty(&mcli->devs));
+ WARN_ON(mcli->sel); /* sanity check, no locking */
+
+ rc = i2c_slave_unregister(mcli->client);
+ /* Leak if it fails, we can't propagate errors upwards */
+ if (rc < 0)
+ dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc);
+ else
+ kfree(mcli);
+}
+
+/* Switch the mctp i2c device to receive responses.
+ * Call with sel_lock held
+ */
+static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ assert_spin_locked(&mcli->sel_lock);
+ if (midev)
+ dev_hold(midev->ndev);
+ if (mcli->sel)
+ dev_put(mcli->sel->ndev);
+ mcli->sel = midev;
+}
+
+/* Switch the mctp i2c device to receive responses */
+static void mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+}
+
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ midev = mcli->sel;
+ if (midev)
+ dev_hold(midev->ndev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (!midev)
+ return 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (midev->rx_pos < MCTP_I2C_BUFSZ) {
+ midev->rx_buffer[midev->rx_pos] = *val;
+ midev->rx_pos++;
+ } else {
+ midev->ndev->stats.rx_over_errors++;
+ }
+
+ break;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ /* dest_slave as first byte */
+ midev->rx_buffer[0] = mcli->lladdr << 1;
+ midev->rx_pos = 1;
+ break;
+ case I2C_SLAVE_STOP:
+ rc = mctp_i2c_recv(midev);
+ break;
+ default:
+ break;
+ }
+
+ dev_put(midev->ndev);
+ return rc;
+}
+
+/* Processes incoming data that has been accumulated by the slave cb */
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
+{
+ struct net_device *ndev = midev->ndev;
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u8 pec, calc_pec;
+ size_t recvlen;
+ int status;
+
+ /* + 1 for the PEC */
+ if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+ /* recvlen excludes PEC */
+ recvlen = midev->rx_pos - 1;
+
+ hdr = (void *)midev->rx_buffer;
+ if (hdr->command != MCTP_I2C_COMMANDCODE) {
+ ndev->stats.rx_dropped++;
+ return -EINVAL;
+ }
+
+ if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ pec = midev->rx_buffer[midev->rx_pos - 1];
+ calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen);
+ if (pec != calc_pec) {
+ ndev->stats.rx_crc_errors++;
+ return -EINVAL;
+ }
+
+ skb = netdev_alloc_skb(ndev, recvlen);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, midev->rx_buffer, recvlen);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 1;
+ cb->haddr[0] = hdr->source_slave >> 1;
+
+ /* We need to ensure that the netif is not used once netdev
+ * unregister occurs
+ */
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->allow_rx) {
+ reinit_completion(&midev->rx_done);
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ status = netif_rx(skb);
+ complete(&midev->rx_done);
+ } else {
+ status = NET_RX_DROP;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ }
+
+ if (status == NET_RX_SUCCESS) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += recvlen;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+ return 0;
+}
+
+enum mctp_i2c_flow_state {
+ MCTP_I2C_TX_FLOW_INVALID,
+ MCTP_I2C_TX_FLOW_NONE,
+ MCTP_I2C_TX_FLOW_NEW,
+ MCTP_I2C_TX_FLOW_EXISTING,
+};
+
+static enum mctp_i2c_flow_state
+mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ enum mctp_i2c_flow_state state;
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ key = flow->key;
+ if (!key)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ spin_lock_irqsave(&key->lock, flags);
+ /* If the key is present but invalid, we're unlikely to be able
+ * to handle the flow at all; just drop now
+ */
+ if (!key->valid) {
+ state = MCTP_I2C_TX_FLOW_INVALID;
+
+ } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) {
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+ state = MCTP_I2C_TX_FLOW_NEW;
+ } else {
+ state = MCTP_I2C_TX_FLOW_EXISTING;
+ }
+
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ return state;
+}
+
+/* We're not contending with ourselves here; we only need to exclude other
+ * i2c clients from using the bus. refcounts are simply to prevent
+ * recursive locking.
+ */
+static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool lock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ lock = midev->i2c_lock_count == 0;
+ midev->i2c_lock_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (lock)
+ i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!"))
+ midev->i2c_lock_count--;
+ unlock = midev->i2c_lock_count == 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+/* Unlocks the bus if was previously locked, used for cleanup */
+static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ unlock = midev->i2c_lock_count > 0;
+ midev->i2c_lock_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &midev->ndev->stats;
+ enum mctp_i2c_flow_state fs;
+ struct mctp_i2c_hdr *hdr;
+ struct i2c_msg msg = {0};
+ u8 *pecp;
+ int rc;
+
+ fs = mctp_i2c_get_tx_flow_state(midev, skb);
+
+ hdr = (void *)skb_mac_header(skb);
+ /* Sanity check that packet contents matches skb length,
+ * and can't exceed MCTP_I2C_BUFSZ
+ */
+ if (skb->len != hdr->byte_count + 3) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "Bad tx length %d vs skb %u\n",
+ hdr->byte_count + 3, skb->len);
+ return;
+ }
+
+ if (skb_tailroom(skb) >= 1) {
+ /* Linear case with space, we can just append the PEC */
+ skb_put(skb, 1);
+ } else {
+ /* Otherwise need to copy the buffer */
+ skb_copy_bits(skb, 0, midev->tx_scratch, skb->len);
+ hdr = (void *)midev->tx_scratch;
+ }
+
+ pecp = (void *)&hdr->source_slave + hdr->byte_count;
+ *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3);
+ msg.buf = (void *)&hdr->command;
+ /* command, bytecount, data, pec */
+ msg.len = 2 + hdr->byte_count + 1;
+ msg.addr = hdr->dest_slave >> 1;
+
+ switch (fs) {
+ case MCTP_I2C_TX_FLOW_NONE:
+ /* no flow: full lock & unlock */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ mctp_i2c_unlock_nest(midev);
+ break;
+
+ case MCTP_I2C_TX_FLOW_NEW:
+ /* new flow: lock, tx, but don't unlock; that will happen
+ * on flow release
+ */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ fallthrough;
+
+ case MCTP_I2C_TX_FLOW_EXISTING:
+ /* existing flow: we already have the lock; just tx */
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ break;
+
+ case MCTP_I2C_TX_FLOW_INVALID:
+ return;
+ }
+
+ if (rc < 0) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "__i2c_transfer failed %d\n", rc);
+ stats->tx_errors++;
+ } else {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ }
+}
+
+static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->release_count > midev->i2c_lock_count) {
+ WARN_ONCE(1, "release count overflow");
+ midev->release_count = midev->i2c_lock_count;
+ }
+
+ midev->i2c_lock_count -= midev->release_count;
+ unlock = midev->i2c_lock_count == 0 && midev->release_count > 0;
+ midev->release_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_hdr *mhdr;
+ u8 lldst, llsrc;
+
+ if (len > MCTP_I2C_MAXMTU)
+ return -EMSGSIZE;
+
+ lldst = *((u8 *)daddr);
+ llsrc = *((u8 *)saddr);
+
+ skb_push(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_mac_header(skb);
+ hdr = (void *)skb_mac_header(skb);
+ mhdr = mctp_hdr(skb);
+ hdr->dest_slave = (lldst << 1) & 0xff;
+ hdr->command = MCTP_I2C_COMMANDCODE;
+ hdr->byte_count = len + 1;
+ hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
+ mhdr->ver = 0x01;
+
+ return sizeof(struct mctp_i2c_hdr);
+}
+
+static int mctp_i2c_tx_thread(void *data)
+{
+ struct mctp_i2c_dev *midev = data;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ for (;;) {
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ skb = __skb_dequeue(&midev->tx_queue);
+ if (netif_queue_stopped(midev->ndev))
+ netif_wake_queue(midev->ndev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ if (skb == &midev->unlock_marker) {
+ mctp_i2c_flow_release(midev);
+
+ } else if (skb) {
+ mctp_i2c_xmit(midev, skb);
+ kfree_skb(skb);
+
+ } else {
+ wait_event_idle(midev->tx_wq,
+ !skb_queue_empty(&midev->tx_queue) ||
+ kthread_should_stop());
+ }
+ }
+
+ return 0;
+}
+
+static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&midev->tx_queue, skb);
+ if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ wake_up(&midev->tx_wq);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_i2c_release_flow(struct mctp_dev *mdev,
+ struct mctp_sk_key *key)
+
+{
+ struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->release_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ /* Ensure we have a release operation queued, through the fake
+ * marker skb
+ */
+ spin_lock(&midev->tx_queue.lock);
+ if (!midev->unlock_marker.next)
+ __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker);
+ spin_unlock(&midev->tx_queue.lock);
+
+ wake_up(&midev->tx_wq);
+}
+
+static const struct net_device_ops mctp_i2c_ops = {
+ .ndo_start_xmit = mctp_i2c_start_xmit,
+ .ndo_uninit = mctp_i2c_ndo_uninit,
+ .ndo_open = mctp_i2c_ndo_open,
+};
+
+static const struct header_ops mctp_i2c_headops = {
+ .create = mctp_i2c_header_create,
+};
+
+static const struct mctp_netdev_ops mctp_i2c_mctp_ops = {
+ .release_flow = mctp_i2c_release_flow,
+};
+
+static void mctp_i2c_net_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_I2C_MAXMTU;
+ dev->min_mtu = MCTP_I2C_MINMTU;
+ dev->max_mtu = MCTP_I2C_MAXMTU;
+ dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN;
+
+ dev->hard_header_len = sizeof(struct mctp_i2c_hdr);
+ dev->addr_len = 1;
+
+ dev->netdev_ops = &mctp_i2c_ops;
+ dev->header_ops = &mctp_i2c_headops;
+}
+
+/* Populates the mctp_i2c_dev priv struct for a netdev.
+ * Returns an error pointer on failure.
+ */
+static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev,
+ struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev,
+ "%s/tx", dev->name);
+ if (IS_ERR(midev->tx_thread))
+ return ERR_CAST(midev->tx_thread);
+
+ midev->ndev = dev;
+ get_device(&adap->dev);
+ midev->adapter = adap;
+ get_device(&mcli->client->dev);
+ midev->client = mcli;
+ INIT_LIST_HEAD(&midev->list);
+ spin_lock_init(&midev->lock);
+ midev->i2c_lock_count = 0;
+ midev->release_count = 0;
+ init_completion(&midev->rx_done);
+ complete(&midev->rx_done);
+ init_waitqueue_head(&midev->tx_wq);
+ skb_queue_head_init(&midev->tx_queue);
+
+ /* Add to the parent mcli */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_add(&midev->list, &mcli->devs);
+ /* Select a device by default */
+ if (!mcli->sel)
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ /* Start the worker thread */
+ wake_up_process(midev->tx_thread);
+
+ return midev;
+}
+
+/* Counterpart of mctp_i2c_midev_init */
+static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev)
+{
+ struct mctp_i2c_client *mcli = midev->client;
+ unsigned long flags;
+
+ if (midev->tx_thread) {
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+ }
+
+ /* Unconditionally unlock on close */
+ mctp_i2c_unlock_reset(midev);
+
+ /* Remove the netdev from the parent i2c client. */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_del(&midev->list);
+ if (mcli->sel == midev) {
+ struct mctp_i2c_dev *first;
+
+ first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list);
+ __mctp_i2c_device_select(mcli, first);
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ skb_queue_purge(&midev->tx_queue);
+ put_device(&midev->adapter->dev);
+ put_device(&mcli->client->dev);
+}
+
+/* Stops, unregisters, and frees midev */
+static void mctp_i2c_unregister(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ /* Stop tx thread prior to unregister, it uses netif_() functions */
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+
+ /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ wait_for_completion(&midev->rx_done);
+
+ mctp_unregister_netdev(midev->ndev);
+ /* midev has been freed now by mctp_i2c_ndo_uninit callback */
+
+ free_netdev(midev->ndev);
+}
+
+static void mctp_i2c_ndo_uninit(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+
+ /* Perform cleanup here to ensure that mcli->sel isn't holding
+ * a reference that would prevent unregister_netdevice()
+ * from completing.
+ */
+ mctp_i2c_midev_free(midev);
+}
+
+static int mctp_i2c_ndo_open(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ /* i2c rx handler can only pass packets once the netdev is registered */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = true;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+}
+
+static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL;
+ struct net_device *ndev = NULL;
+ struct i2c_adapter *root;
+ unsigned long flags;
+ char namebuf[30];
+ int rc;
+
+ root = mux_root_adapter(adap);
+ if (root != mcli->client->adapter) {
+ dev_err(&mcli->client->dev,
+ "I2C adapter %s is not a child bus of %s\n",
+ mcli->client->adapter->name, root->name);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr);
+ ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup);
+ if (!ndev) {
+ dev_err(&mcli->client->dev, "alloc netdev failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ dev_net_set(ndev, current->nsproxy->net_ns);
+ SET_NETDEV_DEV(ndev, &adap->dev);
+ dev_addr_set(ndev, &mcli->lladdr);
+
+ midev = mctp_i2c_midev_init(ndev, mcli, adap);
+ if (IS_ERR(midev)) {
+ rc = PTR_ERR(midev);
+ midev = NULL;
+ goto err;
+ }
+
+ rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops);
+ if (rc < 0) {
+ dev_err(&mcli->client->dev,
+ "register netdev \"%s\" failed %d\n",
+ ndev->name, rc);
+ goto err;
+ }
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+err:
+ if (midev)
+ mctp_i2c_midev_free(midev);
+ if (ndev)
+ free_netdev(ndev);
+ return rc;
+}
+
+/* Removes any netdev for adap. mcli is the parent root i2c client */
+static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL, *m = NULL;
+ unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ /* List size is limited by number of MCTP netdevs on a single hardware bus */
+ list_for_each_entry(m, &mcli->devs, list)
+ if (m->adapter == adap) {
+ midev = m;
+ break;
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (midev)
+ mctp_i2c_unregister(midev);
+}
+
+/* Determines whether a device is an i2c adapter.
+ * Optionally returns the root i2c_adapter
+ */
+static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev,
+ struct i2c_adapter **ret_root)
+{
+ struct i2c_adapter *root, *adap;
+
+ if (dev->type != &i2c_adapter_type)
+ return NULL;
+ adap = to_i2c_adapter(dev);
+ root = mux_root_adapter(adap);
+ WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n",
+ dev_name(dev));
+ if (!root)
+ return NULL;
+ if (ret_root)
+ *ret_root = root;
+ return adap;
+}
+
+/* Determines whether a device is an i2c adapter with the "mctp-controller"
+ * devicetree property set. If adap is not an OF node, returns match_no_of
+ */
+static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of)
+{
+ if (!adap->dev.of_node)
+ return match_no_of;
+ return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP);
+}
+
+/* Called for each existing i2c device (adapter or client) when a
+ * new mctp-i2c client is probed.
+ */
+static int mctp_i2c_client_try_attach(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap = NULL, *root = NULL;
+ struct mctp_i2c_client *mcli = data;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return 0;
+ if (mcli->client->adapter != root)
+ return 0;
+ /* Must either have mctp-controller property on the adapter, or
+ * be a root adapter if it's non-devicetree
+ */
+ if (!mctp_i2c_adapter_match(adap, adap == root))
+ return 0;
+
+ return mctp_i2c_add_netdev(mcli, adap);
+}
+
+static void mctp_i2c_notify_add(struct device *dev)
+{
+ struct mctp_i2c_client *mcli = NULL, *m = NULL;
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ int rc;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+ /* Check for mctp-controller property on the adapter */
+ if (!mctp_i2c_adapter_match(adap, false))
+ return;
+
+ /* Find an existing mcli for adap's root */
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(m, &driver_clients, list) {
+ if (m->client->adapter == root) {
+ mcli = m;
+ break;
+ }
+ }
+
+ if (mcli) {
+ rc = mctp_i2c_add_netdev(mcli, adap);
+ if (rc < 0)
+ dev_warn(dev, "Failed adding mctp-i2c net device\n");
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static void mctp_i2c_notify_del(struct device *dev)
+{
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ struct mctp_i2c_client *mcli = NULL;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(mcli, &driver_clients, list) {
+ if (mcli->client->adapter == root) {
+ mctp_i2c_remove_netdev(mcli, adap);
+ break;
+ }
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static int mctp_i2c_probe(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ int rc;
+
+ mutex_lock(&driver_clients_lock);
+ mcli = mctp_i2c_new_client(client);
+ if (IS_ERR(mcli)) {
+ rc = PTR_ERR(mcli);
+ mcli = NULL;
+ goto out;
+ } else {
+ list_add(&mcli->list, &driver_clients);
+ }
+
+ /* Add a netdev for adapters that have a 'mctp-controller' property */
+ i2c_for_each_dev(mcli, mctp_i2c_client_try_attach);
+ rc = 0;
+out:
+ mutex_unlock(&driver_clients_lock);
+ return rc;
+}
+
+static void mctp_i2c_remove(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
+
+ mutex_lock(&driver_clients_lock);
+ list_del(&mcli->list);
+ /* Remove all child adapter netdevs */
+ list_for_each_entry_safe(midev, tmp, &mcli->devs, list)
+ mctp_i2c_unregister(midev);
+
+ mctp_i2c_free_client(mcli);
+ mutex_unlock(&driver_clients_lock);
+}
+
+/* We look for a 'mctp-controller' property on I2C busses as they are
+ * added/deleted, creating/removing netdevs as required.
+ */
+static int mctp_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ mctp_i2c_notify_add(dev);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ mctp_i2c_notify_del(dev);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mctp_i2c_notifier = {
+ .notifier_call = mctp_i2c_notifier_call,
+};
+
+static const struct i2c_device_id mctp_i2c_id[] = {
+ { "mctp-i2c-interface", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
+
+static const struct of_device_id mctp_i2c_of_match[] = {
+ { .compatible = "mctp-i2c-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mctp_i2c_of_match);
+
+static struct i2c_driver mctp_i2c_driver = {
+ .driver = {
+ .name = "mctp-i2c-interface",
+ .of_match_table = mctp_i2c_of_match,
+ },
+ .probe_new = mctp_i2c_probe,
+ .remove = mctp_i2c_remove,
+ .id_table = mctp_i2c_id,
+};
+
+static __init int mctp_i2c_mod_init(void)
+{
+ int rc;
+
+ pr_info("MCTP I2C interface driver\n");
+ rc = i2c_add_driver(&mctp_i2c_driver);
+ if (rc < 0)
+ return rc;
+ rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0) {
+ i2c_del_driver(&mctp_i2c_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static __exit void mctp_i2c_mod_exit(void)
+{
+ int rc;
+
+ rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0)
+ pr_warn("MCTP I2C could not unregister notifier, %d\n", rc);
+ i2c_del_driver(&mctp_i2c_driver);
+}
+
+module_init(mctp_i2c_mod_init);
+module_exit(mctp_i2c_mod_exit);
+
+MODULE_DESCRIPTION("MCTP I2C device");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>");
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index eaa6fb3224bc..7cd103fd34ef 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
cb = __mctp_cb(skb);
cb->halen = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->netdev->stats.rx_packets++;
dev->netdev->stats.rx_bytes += dev->rxlen;
}
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1becb1a731f6..689e728345ce 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -10,10 +10,31 @@
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pse-pd/pse.h>
MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
+static struct pse_control *
+fwnode_find_pse_control(struct fwnode_handle *fwnode)
+{
+ struct pse_control *psec;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_PSE_CONTROLLER))
+ return NULL;
+
+ np = to_of_node(fwnode);
+ if (!np)
+ return NULL;
+
+ psec = of_pse_control_get(np);
+ if (PTR_ERR(psec) == -ENOENT)
+ return NULL;
+
+ return psec;
+}
+
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
@@ -43,6 +64,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
int rc;
rc = fwnode_irq_get(child, 0);
+ /* Don't wait forever if the IRQ provider doesn't become available,
+ * just fall back to poll mode
+ */
+ if (rc == -EPROBE_DEFER)
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
if (rc == -EPROBE_DEFER)
return rc;
@@ -86,14 +112,21 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
struct fwnode_handle *child, u32 addr)
{
struct mii_timestamper *mii_ts = NULL;
+ struct pse_control *psec = NULL;
struct phy_device *phy;
bool is_c45 = false;
u32 phy_id;
int rc;
+ psec = fwnode_find_pse_control(child);
+ if (IS_ERR(psec))
+ return PTR_ERR(psec);
+
mii_ts = fwnode_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
+ if (IS_ERR(mii_ts)) {
+ rc = PTR_ERR(mii_ts);
+ goto clean_pse;
+ }
rc = fwnode_property_match_string(child, "compatible",
"ethernet-phy-ieee802.3-c45");
@@ -105,8 +138,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
else
phy = phy_device_create(bus, addr, phy_id, 0, NULL);
if (IS_ERR(phy)) {
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
+ rc = PTR_ERR(phy);
+ goto clean_mii_ts;
}
if (is_acpi_node(child)) {
@@ -120,25 +153,33 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
/* All data is now stored in the phy struct, so register it */
rc = phy_device_register(phy);
if (rc) {
- phy_device_free(phy);
fwnode_handle_put(phy->mdio.dev.fwnode);
- return rc;
+ goto clean_phy;
}
} else if (is_of_node(child)) {
rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr);
- if (rc) {
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
- return rc;
- }
+ if (rc)
+ goto clean_phy;
}
+ phy->psec = psec;
+
/* phy->mii_ts may already be defined by the PHY driver. A
* mii_timestamper probed via the device tree will still have
* precedence.
*/
if (mii_ts)
phy->mii_ts = mii_ts;
+
return 0;
+
+clean_phy:
+ phy_device_free(phy);
+clean_mii_ts:
+ unregister_mii_timestamper(mii_ts);
+clean_pse:
+ pse_control_put(psec);
+
+ return rc;
}
EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 966c3b4ad59d..944d005d2bd1 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -3,6 +3,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/reset.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
@@ -21,6 +22,10 @@
#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26)
#define MDIO_C22_OP_WRITE 0b01
#define MDIO_C22_OP_READ 0b10
+#define MDIO_C45_OP_ADDR 0b00
+#define MDIO_C45_OP_WRITE 0b01
+#define MDIO_C45_OP_PREAD 0b10
+#define MDIO_C45_OP_READ 0b11
#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21)
#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16)
#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0)
@@ -37,36 +42,38 @@
struct aspeed_mdio {
void __iomem *base;
+ struct reset_control *reset;
};
-static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
+ u16 data)
{
struct aspeed_mdio *ctx = bus->priv;
u32 ctrl;
- u32 data;
- int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
- regnum);
-
- /* Just clause 22 for the moment */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n",
+ __func__, st, op, phyad, regad, data);
ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum);
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad)
+ | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data);
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
- rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
!(ctrl & ASPEED_MDIO_CTRL_FIRE),
ASPEED_MDIO_INTERVAL_US,
ASPEED_MDIO_TIMEOUT_US);
- if (rc < 0)
- return rc;
+}
+
+static int aspeed_mdio_get_data(struct mii_bus *bus)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 data;
+ int rc;
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
data & ASPEED_MDIO_DATA_IDLE,
@@ -78,31 +85,80 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data);
}
-static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum)
{
- struct aspeed_mdio *ctx = bus->priv;
- u32 ctrl;
+ int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
- __func__, addr, regnum, val);
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ,
+ addr, regnum, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE,
+ addr, regnum, val);
+}
+
+static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ,
+ addr, c45_dev, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE,
+ addr, c45_dev, val);
+}
+
+static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
+ regnum);
- /* Just clause 22 for the moment */
if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ return aspeed_mdio_read_c45(bus, addr, regnum);
- ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum)
- | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val);
+ return aspeed_mdio_read_c22(bus, addr, regnum);
+}
- iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
+ __func__, addr, regnum, val);
- return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
- !(ctrl & ASPEED_MDIO_CTRL_FIRE),
- ASPEED_MDIO_INTERVAL_US,
- ASPEED_MDIO_TIMEOUT_US);
+ if (regnum & MII_ADDR_C45)
+ return aspeed_mdio_write_c45(bus, addr, regnum, val);
+
+ return aspeed_mdio_write_c22(bus, addr, regnum, val);
}
static int aspeed_mdio_probe(struct platform_device *pdev)
@@ -120,15 +176,23 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
+ ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(ctx->reset))
+ return PTR_ERR(ctx->reset);
+
+ reset_control_deassert(ctx->reset);
+
bus->name = DRV_NAME;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
bus->read = aspeed_mdio_read;
bus->write = aspeed_mdio_write;
+ bus->probe_capabilities = MDIOBUS_C22_C45;
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ reset_control_assert(ctx->reset);
return rc;
}
@@ -139,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
static int aspeed_mdio_remove(struct platform_device *pdev)
{
- mdiobus_unregister(platform_get_drvdata(pdev));
+ struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev);
+ struct aspeed_mdio *ctx = bus->priv;
+
+ reset_control_assert(ctx->reset);
+ mdiobus_unregister(bus);
return 0;
}
@@ -148,6 +216,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 09200a70b315..bf8bf5e20faf 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -3,6 +3,7 @@
* MDIO I2C bridge
*
* Copyright (C) 2015-2016 Russell King
+ * Copyright (C) 2021 Marek Behun
*
* Network PHYs can appear on I2C buses when they are part of SFP module.
* This driver exposes these PHYs to the networking PHY code, allowing
@@ -12,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
@@ -28,7 +30,7 @@ static unsigned int i2c_mii_phy_addr(int phy_id)
return phy_id + 0x40;
}
-static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
@@ -62,7 +64,8 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
return data[0] << 8 | data[1];
}
-static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
@@ -91,9 +94,288 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
return ret < 0 ? ret : 0;
}
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
+/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
+ * instead via address 0x51, when SFP page is set to 0x03 and password to
+ * 0xffffffff.
+ *
+ * address size contents description
+ * ------- ---- -------- -----------
+ * 0x80 1 CMD 0x01/0x02/0x04 for write/read/done
+ * 0x81 1 DEV Clause 45 device
+ * 0x82 2 REG Clause 45 register
+ * 0x84 2 VAL Register value
+ */
+#define ROLLBALL_PHY_I2C_ADDR 0x51
+
+#define ROLLBALL_PASSWORD (SFP_VSL + 3)
+
+#define ROLLBALL_CMD_ADDR 0x80
+#define ROLLBALL_DATA_ADDR 0x81
+
+#define ROLLBALL_CMD_WRITE 0x01
+#define ROLLBALL_CMD_READ 0x02
+#define ROLLBALL_CMD_DONE 0x04
+
+#define SFP_PAGE_ROLLBALL_MDIO 3
+
+static int __i2c_transfer_err(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+
+ ret = __i2c_transfer(i2c, msgs, num);
+ if (ret < 0)
+ return ret;
+ else if (ret != num)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int __i2c_rollball_get_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 *page)
+{
+ struct i2c_msg msgs[2];
+ u8 addr = SFP_PAGE;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = page;
+
+ return __i2c_transfer_err(i2c, msgs, 2);
+}
+
+static int __i2c_rollball_set_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 page)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = SFP_PAGE;
+ buf[1] = page;
+
+ msg.addr = bus_addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+
+ return __i2c_transfer_err(i2c, &msg, 1);
+}
+
+/* In order to not interfere with other SFP code (which possibly may manipulate
+ * SFP_PAGE), for every transfer we do this:
+ * 1. lock the bus
+ * 2. save content of SFP_PAGE
+ * 3. set SFP_PAGE to 3
+ * 4. do the transfer
+ * 5. restore original SFP_PAGE
+ * 6. unlock the bus
+ * Note that one might think that steps 2 to 5 could be theoretically done all
+ * in one call to i2c_transfer (by constructing msgs array in such a way), but
+ * unfortunately tests show that this does not work :-( Changed SFP_PAGE does
+ * not take into account until i2c_transfer() is done.
+ */
+static int i2c_transfer_rollball(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ int ret, main_err = 0;
+ u8 saved_page;
+
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ /* save original page */
+ ret = __i2c_rollball_get_page(i2c, msgs->addr, &saved_page);
+ if (ret)
+ goto unlock;
+
+ /* change to RollBall MDIO page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, SFP_PAGE_ROLLBALL_MDIO);
+ if (ret)
+ goto unlock;
+
+ /* do the transfer; we try to restore original page if this fails */
+ ret = __i2c_transfer_err(i2c, msgs, num);
+ if (ret)
+ main_err = ret;
+
+ /* restore original page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, saved_page);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ return main_err ? : ret;
+}
+
+static int i2c_rollball_mii_poll(struct mii_bus *bus, int bus_addr, u8 *buf,
+ size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmd_addr, tmp, *res;
+ int i, ret;
+
+ cmd_addr = ROLLBALL_CMD_ADDR;
+
+ res = buf ? buf : &tmp;
+ len = buf ? len : 1;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &cmd_addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = res;
+
+ /* By experiment it takes up to 70 ms to access a register for these
+ * SFPs. Sleep 20ms between iterations and try 10 times.
+ */
+ i = 10;
+ do {
+ msleep(20);
+
+ ret = i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+ if (ret)
+ return ret;
+
+ if (*res == ROLLBALL_CMD_DONE)
+ return 0;
+ } while (i-- > 0);
+
+ dev_dbg(&bus->dev, "poll timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
+ u8 *data, size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmdbuf[2];
+
+ cmdbuf[0] = ROLLBALL_CMD_ADDR;
+ cmdbuf[1] = cmd;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = 0;
+ msgs[1].len = sizeof(cmdbuf);
+ msgs[1].buf = cmdbuf;
+
+ return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+}
+
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+{
+ u8 buf[4], res[6];
+ int bus_addr, ret;
+ u16 val;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0xffff;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_READ, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, res, sizeof(res));
+ if (ret == -ETIMEDOUT)
+ return 0xffff;
+ else if (ret < 0)
+ return ret;
+
+ val = res[4] << 8 | res[5];
+
+ return val;
+}
+
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ int bus_addr, ret;
+ u8 buf[6];
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+ buf[4] = val >> 8;
+ buf[5] = val & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_WRITE, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
+{
+ struct i2c_msg msg;
+ u8 pw[5];
+ int ret;
+
+ pw[0] = ROLLBALL_PASSWORD;
+ pw[1] = 0xff;
+ pw[2] = 0xff;
+ pw[3] = 0xff;
+ pw[4] = 0xff;
+
+ msg.addr = ROLLBALL_PHY_I2C_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(pw);
+ msg.buf = pw;
+
+ ret = i2c_transfer(i2c, &msg, 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EIO;
+ else
+ return 0;
+}
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
+ int ret;
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return ERR_PTR(-EINVAL);
@@ -104,10 +386,28 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent));
mii->parent = parent;
- mii->read = i2c_mii_read;
- mii->write = i2c_mii_write;
mii->priv = i2c;
+ switch (protocol) {
+ case MDIO_I2C_ROLLBALL:
+ ret = i2c_mii_init_rollball(i2c);
+ if (ret < 0) {
+ dev_err(parent,
+ "Cannot initialize RollBall MDIO I2C protocol: %d\n",
+ ret);
+ mdiobus_free(mii);
+ return ERR_PTR(ret);
+ }
+
+ mii->read = i2c_mii_read_rollball;
+ mii->write = i2c_mii_write_rollball;
+ break;
+ default:
+ mii->read = i2c_mii_read_default;
+ mii->write = i2c_mii_write_default;
+ break;
+ }
+
return mii;
}
EXPORT_SYMBOL_GPL(mdio_i2c_alloc);
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 5f4cd24a0241..4eba5a91075c 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
if (ret)
return ret;
- return clk_prepare_enable(priv->mdio_clk);
+ ret = clk_prepare_enable(priv->mdio_clk);
+ if (ret == 0)
+ mdelay(10);
+
+ return ret;
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 7d2abaf2b2c9..51f68daac152 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -7,14 +7,17 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/mfd/ocelot.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#define MSCC_MIIM_REG_STATUS 0x0
@@ -29,6 +32,8 @@
#define MSCC_MIIM_CMD_VLD BIT(31)
#define MSCC_MIIM_REG_DATA 0xC
#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
+#define MSCC_MIIM_REG_CFG 0x10
+#define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0)
#define MSCC_PHY_REG_PHY_CFG 0x0
#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
@@ -36,11 +41,21 @@
#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define LAN966X_CUPHY_COMMON_CFG 0x0
+#define CUPHY_COMMON_CFG_RESET_N BIT(0)
+
+struct mscc_miim_info {
+ unsigned int phy_reset_offset;
+ unsigned int phy_reset_bits;
+};
+
struct mscc_miim_dev {
struct regmap *regs;
int mii_status_offset;
struct regmap *phy_regs;
- int phy_reset_offset;
+ const struct mscc_miim_info *info;
+ struct clk *clk;
+ u32 bus_freq;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -93,6 +108,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
u32 val;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -136,6 +154,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
struct mscc_miim_dev *miim = bus->priv;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
@@ -157,27 +178,29 @@ out:
static int mscc_miim_reset(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
- int offset = miim->phy_reset_offset;
+ unsigned int offset, bits;
int ret;
- if (miim->phy_regs) {
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset set error %d\n", ret);
- return ret;
- }
-
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0x1ff);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset clear error %d\n", ret);
- return ret;
- }
-
- mdelay(500);
+ if (!miim->phy_regs)
+ return 0;
+
+ offset = miim->info->phy_reset_offset;
+ bits = miim->info->phy_reset_bits;
+
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, 0);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset set error %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, bits);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset clear error %d\n", ret);
+ return ret;
}
+ mdelay(500);
+
return 0;
}
@@ -187,6 +210,13 @@ static const struct regmap_config mscc_miim_regmap_config = {
.reg_stride = 4,
};
+static const struct regmap_config mscc_miim_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "phy",
+};
+
int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
struct regmap *mii_regmap, int status_offset)
{
@@ -217,78 +247,127 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
}
EXPORT_SYMBOL(mscc_miim_setup);
+static int mscc_miim_clk_set(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ unsigned long rate;
+ u32 div;
+
+ /* Keep the current settings */
+ if (!miim->bus_freq)
+ return 0;
+
+ rate = clk_get_rate(miim->clk);
+
+ div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1;
+ if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) {
+ dev_err(&bus->dev, "Incorrect MDIO clock frequency\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG,
+ MSCC_MIIM_CFG_PRESCALE_MASK, div);
+}
+
static int mscc_miim_probe(struct platform_device *pdev)
{
- struct regmap *mii_regmap, *phy_regmap = NULL;
- void __iomem *regs, *phy_regs;
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *mii_regmap, *phy_regmap;
+ struct device *dev = &pdev->dev;
struct mscc_miim_dev *miim;
- struct resource *res;
struct mii_bus *bus;
int ret;
- regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(regs)) {
- dev_err(&pdev->dev, "Unable to map MIIM registers\n");
- return PTR_ERR(regs);
- }
-
- mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
- &mscc_miim_regmap_config);
-
- if (IS_ERR(mii_regmap)) {
- dev_err(&pdev->dev, "Unable to create MIIM regmap\n");
- return PTR_ERR(mii_regmap);
- }
+ mii_regmap = ocelot_regmap_from_resource(pdev, 0,
+ &mscc_miim_regmap_config);
+ if (IS_ERR(mii_regmap))
+ return dev_err_probe(dev, PTR_ERR(mii_regmap),
+ "Unable to create MIIM regmap\n");
/* This resource is optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- phy_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
- return PTR_ERR(phy_regs);
- }
-
- phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
- &mscc_miim_regmap_config);
- if (IS_ERR(phy_regmap)) {
- dev_err(&pdev->dev, "Unable to create phy register regmap\n");
- return PTR_ERR(phy_regmap);
- }
- }
+ phy_regmap = ocelot_regmap_from_resource_optional(pdev, 1,
+ &mscc_miim_phy_regmap_config);
+ if (IS_ERR(phy_regmap))
+ return dev_err_probe(dev, PTR_ERR(phy_regmap),
+ "Unable to create phy register regmap\n");
- ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0);
+ ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Unable to setup the MDIO bus\n");
+ dev_err(dev, "Unable to setup the MDIO bus\n");
return ret;
}
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->phy_reset_offset = 0;
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ miim->info = device_get_match_data(dev);
+ if (!miim->info)
+ return -EINVAL;
+
+ miim->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(miim->clk))
+ return PTR_ERR(miim->clk);
+
+ of_property_read_u32(np, "clock-frequency", &miim->bus_freq);
+
+ if (miim->bus_freq && !miim->clk) {
+ dev_err(dev, "cannot use clock-frequency without a clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(miim->clk);
+ if (ret)
return ret;
+
+ ret = mscc_miim_clk_set(bus);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_disable_clk;
}
platform_set_drvdata(pdev, bus);
return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(miim->clk);
+ return ret;
}
static int mscc_miim_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mscc_miim_dev *miim = bus->priv;
+ clk_disable_unprepare(miim->clk);
mdiobus_unregister(bus);
return 0;
}
+static const struct mscc_miim_info mscc_ocelot_miim_info = {
+ .phy_reset_offset = MSCC_PHY_REG_PHY_CFG,
+ .phy_reset_bits = PHY_CFG_PHY_ENA | PHY_CFG_PHY_COMMON_RESET |
+ PHY_CFG_PHY_RESET,
+};
+
+static const struct mscc_miim_info microchip_lan966x_miim_info = {
+ .phy_reset_offset = LAN966X_CUPHY_COMMON_CFG,
+ .phy_reset_bits = CUPHY_COMMON_CFG_RESET_N,
+};
+
static const struct of_device_id mscc_miim_match[] = {
- { .compatible = "mscc,ocelot-miim" },
+ {
+ .compatible = "mscc,ocelot-miim",
+ .data = &mscc_ocelot_miim_info
+ }, {
+ .compatible = "microchip,lan966x-miim",
+ .data = &microchip_lan966x_miim_info
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mscc_miim_match);
diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
index 6dcbf987d61b..8b444a8eb6b5 100644
--- a/drivers/net/mdio/mdio-mux-bcm6368.c
+++ b/drivers/net/mdio/mdio-mux-bcm6368.c
@@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev)
md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
- return ENOMEM;
+ return -ENOMEM;
}
bus = md->mii_bus;
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index b8866bc3f2e8..4a2e94faf57e 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -233,11 +233,9 @@ static int g12a_ephy_glue_clk_register(struct device *dev)
snprintf(in_name, sizeof(in_name), "clkin%d", i);
clk = devm_clk_get(dev, in_name);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Missing clock %s\n", in_name);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Missing clock %s\n", in_name);
parent_names[i] = __clk_get_name(clk);
}
@@ -317,12 +315,9 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get peripheral clock\n");
- return ret;
- }
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(dev, PTR_ERR(priv->pclk),
+ "failed to get peripheral clock\n");
/* Make sure the device registers are clocked */
ret = clk_prepare_enable(priv->pclk);
@@ -339,8 +334,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
&priv->mux_handle, dev, NULL);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "mdio multiplexer init failed: %d", ret);
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
goto err;
}
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index c02fb2a067ee..c02c9c660016 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -159,12 +159,9 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
mdio_mux_mmioreg_switch_fn,
&s->mux_handle, s, NULL);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to register mdio-mux bus %pOF\n", np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register mdio-mux bus %pOF\n", np);
pdev->dev.platform_data = s;
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index 527acfc3c045..bfa5af577b0a 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -72,12 +72,9 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
return -ENOMEM;
s->muxc = devm_mux_control_get(dev, NULL);
- if (IS_ERR(s->muxc)) {
- ret = PTR_ERR(s->muxc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(s->muxc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(s->muxc),
+ "Failed to get mux\n");
platform_set_drvdata(pdev, s);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index ebd001f0eece..a881e3523328 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -168,8 +168,8 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->priv = cb;
cb->mii_bus->name = "mdio_mux";
- snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
- pb->parent_id, v);
+ snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
+ cb->mii_bus->name, pb->parent_id, v);
cb->mii_bus->parent = dev;
cb->mii_bus->read = mdio_mux_read;
cb->mii_bus->write = mdio_mux_write;
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 7ab4e26db08c..7aafc221b5cf 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -285,7 +285,8 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
const union acpi_object *obj;
u32 phy_addr;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
return AE_OK;
if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj))
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9e3c815a070f..796e9c7857d0 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index aaa628f859fd..0b1b6f650104 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
u64_stats_inc(&mhi_netdev->stats.rx_packets);
u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- netif_rx(skb);
+ __netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 86ec5aae4289..7a28e082436e 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -89,7 +89,7 @@ static int net_failover_close(struct net_device *dev)
static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -324,8 +324,8 @@ static const struct net_device_ops failover_dev_ops = {
static void nfo_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
}
static int nfo_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ab8cd5551020..bdff9ac5056d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(oops_only, "Only log oops messages");
#ifndef MODULE
static int __init option_setup(char *opt)
{
- strlcpy(config, opt, MAX_PARAM_LENGTH);
+ strscpy(config, opt, MAX_PARAM_LENGTH);
return 1;
}
__setup("netconsole=", option_setup);
@@ -178,7 +178,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
goto fail;
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -414,7 +414,7 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
return -EINVAL;
}
- strlcpy(nt->np.dev_name, buf, IFNAMSIZ);
+ strscpy(nt->np.dev_name, buf, IFNAMSIZ);
/* Get rid of possible trailing newline from echo(1) */
len = strnlen(nt->np.dev_name, IFNAMSIZ);
@@ -630,7 +630,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
return ERR_PTR(-ENOMEM);
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -708,7 +708,7 @@ restart:
if (nt->np.dev == dev) {
switch (event) {
case NETDEV_CHANGENAME:
- strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
+ strscpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_RELEASE:
case NETDEV_JOIN:
@@ -721,7 +721,7 @@ restart:
__netpoll_cleanup(&nt->np);
spin_lock_irqsave(&target_list_lock, flags);
- dev_put_track(nt->np.dev, &nt->np.dev_tracker);
+ netdev_put(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
nt->enabled = false;
stopped = true;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index a1cbfa44a1e1..5735e5b1a2cb 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o hwstats.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index a43820212932..50854265864d 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -351,10 +351,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
- nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
- nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
@@ -496,7 +498,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
if (offmap->map.map_flags)
return -EINVAL;
- nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 25cb2e600d53..0052968e881e 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -72,16 +72,7 @@ new_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EBUSY;
-
- if (nsim_bus_dev->in_reload) {
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
- return -EBUSY;
- }
-
ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -102,16 +93,7 @@ del_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EBUSY;
-
- if (nsim_bus_dev->in_reload) {
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
- return -EBUSY;
- }
-
ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -135,6 +117,10 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
static void nsim_bus_dev_release(struct device *dev)
{
+ struct nsim_bus_dev *nsim_bus_dev;
+
+ nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev);
+ kfree(nsim_bus_dev);
}
static struct device_type nsim_bus_dev_type = {
@@ -298,7 +284,6 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
nsim_bus_dev->num_queues = num_queues;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
- mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
@@ -310,6 +295,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
err_nsim_bus_dev_id_free:
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ put_device(&nsim_bus_dev->dev);
+ nsim_bus_dev = NULL;
err_nsim_bus_dev_free:
kfree(nsim_bus_dev);
return ERR_PTR(err);
@@ -319,9 +306,8 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
{
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
- device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
- kfree(nsim_bus_dev);
+ device_unregister(&nsim_bus_dev->dev);
}
static struct device_driver nsim_driver = {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 08d7b465a0de..a7880c7ce94c 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -59,7 +59,7 @@ static struct dentry *nsim_dev_ddir;
unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
{
WARN_ON(!lockdep_rtnl_is_held() &&
- !lockdep_is_held(&nsim_dev->vfs_lock));
+ !devl_lock_is_held(priv_to_devlink(nsim_dev)));
return nsim_dev->nsim_bus_dev->num_vfs;
}
@@ -275,7 +275,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
return -ENOMEM;
nsim_dev = file->private_data;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
/* Reject if VFs are configured */
if (nsim_dev_get_vfs(nsim_dev)) {
ret = -EBUSY;
@@ -285,7 +285,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
*ppos += count;
ret = count;
}
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
kfree(vfconfigs);
return ret;
@@ -309,8 +309,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
if (IS_ERR(nsim_dev->ddir))
return PTR_ERR(nsim_dev->ddir);
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
- if (IS_ERR(nsim_dev->ports_ddir))
- return PTR_ERR(nsim_dev->ports_ddir);
+ if (IS_ERR(nsim_dev->ports_ddir)) {
+ err = PTR_ERR(nsim_dev->ports_ddir);
+ goto err_ddir;
+ }
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
@@ -339,13 +341,14 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
+ /* caution, dev_max_vfs write takes devlink lock */
debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
nsim_dev, &nsim_dev_max_vfs_fops);
nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
if (IS_ERR(nsim_dev->nodes_ddir)) {
err = PTR_ERR(nsim_dev->nodes_ddir);
- goto err_out;
+ goto err_ports_ddir;
}
debugfs_create_bool("fail_trap_drop_counter_get", 0600,
nsim_dev->ddir,
@@ -353,8 +356,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
nsim_udp_tunnels_debugfs_create(nsim_dev);
return 0;
-err_out:
+err_ports_ddir:
debugfs_remove_recursive(nsim_dev->ports_ddir);
+err_ddir:
debugfs_remove_recursive(nsim_dev->ddir);
return err;
}
@@ -435,64 +439,70 @@ static int nsim_dev_resources_register(struct devlink *devlink)
int err;
/* Resources for IPv4 */
- err = devlink_resource_register(devlink, "IPv4", (u64)-1,
- NSIM_RESOURCE_IPV4,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
if (err) {
pr_err("Failed to register IPv4 top resource\n");
- goto out;
+ goto err_out;
}
- err = devlink_resource_register(devlink, "fib", (u64)-1,
- NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4, &params);
+ err = devl_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB resource\n");
- return err;
+ goto err_out;
}
- err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4, &params);
+ err = devl_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB rules resource\n");
- return err;
+ goto err_out;
}
/* Resources for IPv6 */
- err = devlink_resource_register(devlink, "IPv6", (u64)-1,
- NSIM_RESOURCE_IPV6,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
if (err) {
pr_err("Failed to register IPv6 top resource\n");
- goto out;
+ goto err_out;
}
- err = devlink_resource_register(devlink, "fib", (u64)-1,
- NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6, &params);
+ err = devl_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB resource\n");
- return err;
+ goto err_out;
}
- err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6, &params);
+ err = devl_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB rules resource\n");
- return err;
+ goto err_out;
}
/* Resources for nexthops */
- err = devlink_resource_register(devlink, "nexthops", (u64)-1,
- NSIM_RESOURCE_NEXTHOPS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
+ err = devl_resource_register(devlink, "nexthops", (u64)-1,
+ NSIM_RESOURCE_NEXTHOPS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register NEXTHOPS resource\n");
+ goto err_out;
+ }
+ return 0;
-out:
+err_out:
+ devl_resources_unregister(devlink);
return err;
}
@@ -556,17 +566,20 @@ static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
nsim_dev->dummy_region =
- devlink_region_create(devlink, &dummy_region_ops,
- NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
- NSIM_DEV_DUMMY_REGION_SIZE);
+ devl_region_create(devlink, &dummy_region_ops,
+ NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
+ NSIM_DEV_DUMMY_REGION_SIZE);
return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
}
static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
{
- devlink_region_destroy(nsim_dev->dummy_region);
+ devl_region_destroy(nsim_dev->dummy_region);
}
+static int
+__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+ unsigned int port_index);
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
@@ -575,12 +588,10 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_dev_port *nsim_dev_port, *tmp;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_rate_nodes_destroy(devlink);
list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
if (nsim_dev_port_is_vf(nsim_dev_port))
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
}
@@ -588,11 +599,11 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct nsim_dev_port *nsim_dev_port, *tmp;
int i, err;
for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
- err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -603,8 +614,9 @@ static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
return 0;
err_port_add_vfs:
- for (i--; i >= 0; i--)
- nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ __nsim_dev_port_del(nsim_dev_port);
return err;
}
@@ -612,22 +624,16 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- int err = 0;
- mutex_lock(&nsim_dev->vfs_lock);
if (mode == nsim_dev->esw_mode)
- goto unlock;
+ return 0;
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- err = nsim_esw_legacy_enable(nsim_dev, extack);
- else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- err = nsim_esw_switchdev_enable(nsim_dev, extack);
- else
- err = -EINVAL;
+ return nsim_esw_legacy_enable(nsim_dev, extack);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return nsim_esw_switchdev_enable(nsim_dev, extack);
-unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
- return err;
+ return -EINVAL;
}
static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
@@ -835,14 +841,18 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
/* For each running port and enabled packet trap, generate a UDP
* packet with a random 5-tuple and report it.
*/
- mutex_lock(&nsim_dev->port_list_lock);
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+ return;
+ }
+
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
nsim_dev_trap_report(nsim_dev_port);
}
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
@@ -883,18 +893,18 @@ static int nsim_dev_traps_init(struct devlink *devlink)
nsim_trap_data->nsim_dev = nsim_dev;
nsim_dev->trap_data = nsim_trap_data;
- err = devlink_trap_policers_register(devlink, nsim_trap_policers_arr,
- policers_count);
+ err = devl_trap_policers_register(devlink, nsim_trap_policers_arr,
+ policers_count);
if (err)
goto err_trap_policers_cnt_free;
- err = devlink_trap_groups_register(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
if (err)
goto err_trap_policers_unregister;
- err = devlink_traps_register(devlink, nsim_traps_arr,
- ARRAY_SIZE(nsim_traps_arr), NULL);
+ err = devl_traps_register(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr), NULL);
if (err)
goto err_trap_groups_unregister;
@@ -906,11 +916,11 @@ static int nsim_dev_traps_init(struct devlink *devlink)
return 0;
err_trap_groups_unregister:
- devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
err_trap_policers_unregister:
- devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
- ARRAY_SIZE(nsim_trap_policers_arr));
+ devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
err_trap_policers_cnt_free:
kfree(nsim_trap_data->trap_policers_cnt_arr);
err_trap_items_free:
@@ -924,13 +934,14 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ /* caution, trap work takes devlink lock */
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
- devlink_traps_unregister(devlink, nsim_traps_arr,
- ARRAY_SIZE(nsim_traps_arr));
- devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
- ARRAY_SIZE(nsim_trap_groups_arr));
- devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
- ARRAY_SIZE(nsim_trap_policers_arr));
+ devl_traps_unregister(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr));
+ devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
kfree(nsim_dev->trap_data->trap_items_arr);
kfree(nsim_dev->trap_data);
@@ -945,24 +956,16 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- struct nsim_bus_dev *nsim_bus_dev;
-
- nsim_bus_dev = nsim_dev->nsim_bus_dev;
- if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
- return -EOPNOTSUPP;
if (nsim_dev->dont_allow_reload) {
/* For testing purposes, user set debugfs dont_allow_reload
* value to true. So forbid it.
*/
NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EOPNOTSUPP;
}
- nsim_bus_dev->in_reload = true;
nsim_dev_reload_destroy(nsim_dev);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return 0;
}
@@ -971,33 +974,35 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- struct nsim_bus_dev *nsim_bus_dev;
- int ret;
-
- nsim_bus_dev = nsim_dev->nsim_bus_dev;
- mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
- nsim_bus_dev->in_reload = false;
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EINVAL;
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- ret = nsim_dev_reload_create(nsim_dev, extack);
- mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
- return ret;
+
+ return nsim_dev_reload_create(nsim_dev, extack);
}
static int nsim_dev_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
- return devlink_info_driver_name_put(req, DRV_NAME);
+ int err;
+
+ err = devlink_info_driver_name_put(req, DRV_NAME);
+ if (err)
+ return err;
+ err = devlink_info_version_stored_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
+ if (err)
+ return err;
+ return devlink_info_version_running_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -1325,8 +1330,7 @@ nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.eswitch_mode_set = nsim_devlink_eswitch_mode_set,
.eswitch_mode_get = nsim_devlink_eswitch_mode_get,
- .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
- DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
@@ -1380,8 +1384,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- nsim_dev_port->port_index);
+ err = devl_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ nsim_dev_port->port_index);
if (err)
goto err_port_free;
@@ -1396,8 +1400,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
}
if (nsim_dev_port_is_vf(nsim_dev_port)) {
- err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
- nsim_dev_port);
+ err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
+ nsim_dev_port);
if (err)
goto err_nsim_destroy;
}
@@ -1412,7 +1416,7 @@ err_nsim_destroy:
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
err_port_free:
kfree(nsim_dev_port);
return err;
@@ -1424,11 +1428,11 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
list_del(&nsim_dev_port->list);
if (nsim_dev_port_is_vf(nsim_dev_port))
- devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
+ devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
kfree(nsim_dev_port);
}
@@ -1436,11 +1440,9 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
- mutex_lock(&nsim_dev->port_list_lock);
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -1470,7 +1472,6 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
devlink = priv_to_devlink(nsim_dev);
nsim_dev = devlink_priv(devlink);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
@@ -1498,10 +1499,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_health_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -1509,6 +1514,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_health_exit:
@@ -1532,13 +1539,12 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_bus_dev->initial_net, &nsim_bus_dev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->vfs_lock);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
@@ -1552,7 +1558,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
GFP_KERNEL | __GFP_NOWARN);
if (!nsim_dev->vfconfigs) {
err = -ENOMEM;
- goto err_devlink_free;
+ goto err_devlink_unlock;
}
err = nsim_dev_resources_register(devlink);
@@ -1595,15 +1601,22 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_bpf_dev_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_bpf_dev_exit:
@@ -1622,10 +1635,11 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
-err_devlink_free:
+err_devlink_unlock:
+ devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
return err;
@@ -1639,21 +1653,19 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
- mutex_lock(&nsim_dev->vfs_lock);
if (nsim_dev_get_vfs(nsim_dev)) {
nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
if (nsim_esw_mode_is_switchdev(nsim_dev))
nsim_esw_legacy_enable(nsim_dev, NULL);
}
- mutex_unlock(&nsim_dev->vfs_lock);
nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_hwstats_exit(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
- mutex_destroy(&nsim_dev->port_list_lock);
}
void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
@@ -1662,14 +1674,16 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
struct devlink *devlink = priv_to_devlink(nsim_dev);
devlink_unregister(devlink);
+ devl_lock(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
kfree(nsim_dev->vfconfigs);
+ devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
}
@@ -1693,12 +1707,12 @@ int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
err = __nsim_dev_port_add(nsim_dev, type, port_index);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1709,13 +1723,13 @@ int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev_port *nsim_dev_port;
int err = 0;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1723,9 +1737,10 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
int ret = 0;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(devlink);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_unlock;
if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -1751,7 +1766,7 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
}
exit_unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 4300261e2f9e..a1f91ff8ec56 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -22,6 +22,7 @@
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <net/fib_notifier.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
@@ -53,6 +54,7 @@ struct nsim_fib_data {
struct rhashtable nexthop_ht;
struct devlink *devlink;
struct work_struct fib_event_work;
+ struct work_struct fib_flush_work;
struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
struct mutex nh_lock; /* Protects NH HT */
@@ -60,6 +62,7 @@ struct nsim_fib_data {
bool fail_route_offload;
bool fail_res_nexthop_group_replace;
bool fail_nexthop_bucket_replace;
+ bool fail_route_delete;
};
struct nsim_fib_rt_key {
@@ -78,7 +81,7 @@ struct nsim_fib_rt {
struct nsim_fib4_rt {
struct nsim_fib_rt common;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -283,7 +286,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data,
fib4_rt->fi = fen_info->fi;
fib_info_hold(fib4_rt->fi);
- fib4_rt->tos = fen_info->tos;
+ fib4_rt->dscp = fen_info->dscp;
fib4_rt->type = fen_info->type;
return fib4_rt;
@@ -322,7 +325,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -342,7 +345,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net,
fri.tb_id = fib4_rt->common.key.tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_rt->tos;
+ fri.dscp = fib4_rt->dscp;
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
@@ -623,14 +626,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
@@ -913,6 +916,10 @@ static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
}
break;
case FIB_EVENT_ENTRY_DEL:
+ if (data->fail_route_delete) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
+ return -EINVAL;
+ }
nsim_fib_account(&data->ipv4.fib, false);
break;
}
@@ -951,6 +958,11 @@ static int nsim_fib6_prepare_event(struct fib_notifier_info *info,
}
break;
case FIB_EVENT_ENTRY_DEL:
+ if (data->fail_route_delete) {
+ err = -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
+ goto err_fib6_event_fini;
+ }
nsim_fib_account(&data->ipv6.fib, false);
break;
}
@@ -977,7 +989,7 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
if (!fib_event)
- return NOTIFY_BAD;
+ goto err_fib_event_alloc;
fib_event->data = data;
fib_event->event = event;
@@ -1005,6 +1017,9 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
err_fib_prepare_event:
kfree(fib_event);
+err_fib_event_alloc:
+ if (event == FIB_EVENT_ENTRY_DEL)
+ schedule_work(&data->fib_flush_work);
return NOTIFY_BAD;
}
@@ -1452,7 +1467,7 @@ static void nsim_fib_set_max_all(struct nsim_fib_data *data,
int err;
u64 val;
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ err = devl_resource_size_get(devlink, res_ids[i], &val);
if (err)
val = (u64) -1;
nsim_fib_set_max(data, res_ids[i], val);
@@ -1482,6 +1497,24 @@ static void nsim_fib_event_work(struct work_struct *work)
mutex_unlock(&data->fib_lock);
}
+static void nsim_fib_flush_work(struct work_struct *work)
+{
+ struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
+ fib_flush_work);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* Process pending work. */
+ flush_work(&data->fib_event_work);
+
+ mutex_lock(&data->fib_lock);
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
+ mutex_unlock(&data->fib_lock);
+}
+
static int
nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
{
@@ -1503,6 +1536,10 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir,
data, &nsim_nexthop_bucket_activity_fops);
+
+ data->fail_route_delete = false;
+ debugfs_create_bool("fail_route_delete", 0600, data->ddir,
+ &data->fail_route_delete);
return 0;
}
@@ -1540,6 +1577,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
goto err_rhashtable_nexthop_destroy;
INIT_WORK(&data->fib_event_work, nsim_fib_event_work);
+ INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work);
INIT_LIST_HEAD(&data->fib_event_queue);
spin_lock_init(&data->fib_event_queue_lock);
@@ -1561,31 +1599,32 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
goto err_nexthop_nb_unregister;
}
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_fib_ipv4_resource_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_fib_ipv4_rules_res_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_fib_ipv6_resource_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_fib_ipv6_rules_res_occ_get,
- data);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_NEXTHOPS,
- nsim_fib_nexthops_res_occ_get,
- data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ devl_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_NEXTHOPS,
+ nsim_fib_nexthops_res_occ_get,
+ data);
return data;
err_nexthop_nb_unregister:
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
err_rhashtable_fib_destroy:
+ cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
@@ -1603,18 +1642,19 @@ err_data_free:
void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
{
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_NEXTHOPS);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV6_FIB);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES);
- devlink_resource_occ_get_unregister(devlink,
- NSIM_RESOURCE_IPV4_FIB);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_NEXTHOPS);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devl_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
+ cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
new file mode 100644
index 000000000000..0e58aa7f0374
--- /dev/null
+++ b/drivers/net/netdevsim/hwstats.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "netdevsim.h"
+
+#define NSIM_DEV_HWSTATS_TRAFFIC_MS 100
+
+static struct list_head *
+nsim_dev_hwstats_get_list_head(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ switch (type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ return &hwstats->l3_list;
+ }
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void nsim_dev_hwstats_traffic_bump(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->enabled) {
+ hwsdev->stats.rx_packets += 1;
+ hwsdev->stats.tx_packets += 2;
+ hwsdev->stats.rx_bytes += 100;
+ hwsdev->stats.tx_bytes += 300;
+ }
+ }
+}
+
+static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
+{
+ struct nsim_dev_hwstats *hwstats;
+
+ hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ nsim_dev_hwstats_traffic_bump(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+}
+
+static struct nsim_dev_hwstats_netdev *
+nsim_dev_hwslist_find_hwsdev(struct list_head *hwsdev_list,
+ int ifindex)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->netdev->ifindex == ifindex)
+ return hwsdev;
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_hwsdev_enable(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netlink_ext_ack *extack)
+{
+ if (hwsdev->fail_enable) {
+ hwsdev->fail_enable = false;
+ NL_SET_ERR_MSG_MOD(extack, "Stats enablement set to fail");
+ return -ECANCELED;
+ }
+
+ hwsdev->enabled = true;
+ return 0;
+}
+
+static void nsim_dev_hwsdev_disable(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ hwsdev->enabled = false;
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+}
+
+static int
+nsim_dev_hwsdev_report_delta(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ netdev_offload_xstats_report_delta(info->report_delta, &hwsdev->stats);
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+ return 0;
+}
+
+static void
+nsim_dev_hwsdev_report_used(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (hwsdev->enabled)
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int nsim_dev_hwstats_event_off_xstats(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_offload_xstats_info *info;
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+ int err = 0;
+
+ info = ptr;
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, info->type);
+ if (!hwsdev_list)
+ return 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ goto out;
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ err = nsim_dev_hwsdev_enable(hwsdev, info->info.extack);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ nsim_dev_hwsdev_disable(hwsdev);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ nsim_dev_hwsdev_report_used(hwsdev, info);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ err = nsim_dev_hwsdev_report_delta(hwsdev, info);
+ break;
+ }
+
+out:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_fini(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ dev_put(hwsdev->netdev);
+ kfree(hwsdev);
+}
+
+static void
+__nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ return;
+
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+}
+
+static void nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev)
+{
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ __nsim_dev_hwstats_event_unregister(hwstats, dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+static int nsim_dev_hwstats_event(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return nsim_dev_hwstats_event_off_xstats(hwstats, dev,
+ event, ptr);
+ case NETDEV_UNREGISTER:
+ nsim_dev_hwstats_event_unregister(hwstats, dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int nsim_dev_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nsim_dev_hwstats *hwstats;
+ int err = 0;
+
+ hwstats = container_of(nb, struct nsim_dev_hwstats, netdevice_nb);
+ err = nsim_dev_hwstats_event(hwstats, dev, event, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
+ return NOTIFY_OK;
+}
+
+static int
+nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct nsim_dev *nsim_dev;
+ struct net_device *netdev;
+ bool notify = false;
+ struct net *net;
+ int err = 0;
+
+ nsim_dev = container_of(hwstats, struct nsim_dev, hwstats);
+ net = nsim_dev_net(nsim_dev);
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ goto out_unlock_list;
+
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev) {
+ err = -ENODEV;
+ goto out_unlock_list;
+ }
+
+ hwsdev = kzalloc(sizeof(*hwsdev), GFP_KERNEL);
+ if (!hwsdev) {
+ err = -ENOMEM;
+ goto out_put_netdev;
+ }
+
+ hwsdev->netdev = netdev;
+ list_add_tail(&hwsdev->list, hwsdev_list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (netdev_offload_xstats_enabled(netdev, type)) {
+ nsim_dev_hwsdev_enable(hwsdev, NULL);
+ notify = true;
+ }
+
+ if (notify)
+ rtnl_offload_xstats_notify(netdev);
+ rtnl_unlock();
+ return err;
+
+out_put_netdev:
+ dev_put(netdev);
+out_unlock_list:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_disable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ list_del(&hwsdev->list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (netdev_offload_xstats_enabled(hwsdev->netdev, type)) {
+ netdev_offload_xstats_push_delta(hwsdev->netdev, type,
+ &hwsdev->stats);
+ rtnl_offload_xstats_notify(hwsdev->netdev);
+ }
+ nsim_dev_hwsdev_fini(hwsdev);
+
+unlock_out:
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_fail_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto err_hwsdev_list_unlock;
+ }
+
+ hwsdev->fail_enable = true;
+
+err_hwsdev_list_unlock:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+enum nsim_dev_hwstats_do {
+ NSIM_DEV_HWSTATS_DO_DISABLE,
+ NSIM_DEV_HWSTATS_DO_ENABLE,
+ NSIM_DEV_HWSTATS_DO_FAIL,
+};
+
+struct nsim_dev_hwstats_fops {
+ const struct file_operations fops;
+ enum nsim_dev_hwstats_do action;
+ enum netdev_offload_xstats_type type;
+};
+
+static ssize_t
+nsim_dev_hwstats_do_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_hwstats *hwstats = file->private_data;
+ struct nsim_dev_hwstats_fops *hwsfops;
+ struct list_head *hwsdev_list;
+ int ifindex;
+ int err;
+
+ hwsfops = container_of(debugfs_real_fops(file),
+ struct nsim_dev_hwstats_fops, fops);
+
+ err = kstrtoint_from_user(data, count, 0, &ifindex);
+ if (err)
+ return err;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, hwsfops->type);
+ if (WARN_ON(!hwsdev_list))
+ return -EINVAL;
+
+ switch (hwsfops->action) {
+ case NSIM_DEV_HWSTATS_DO_DISABLE:
+ err = nsim_dev_hwstats_disable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_ENABLE:
+ err = nsim_dev_hwstats_enable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_FAIL:
+ err = nsim_dev_hwstats_fail_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ }
+ if (err)
+ return err;
+
+ return count;
+}
+
+#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
+ { \
+ .fops = { \
+ .open = simple_open, \
+ .write = nsim_dev_hwstats_do_write, \
+ .llseek = generic_file_llseek, \
+ .owner = THIS_MODULE, \
+ }, \
+ .action = ACTION, \
+ .type = TYPE, \
+ }
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_disable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_enable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_fail_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_FAIL,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+#undef NSIM_DEV_HWSTATS_FOPS
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+ int err;
+
+ mutex_init(&hwstats->hwsdev_list_lock);
+ INIT_LIST_HEAD(&hwstats->l3_list);
+
+ hwstats->netdevice_nb.notifier_call = nsim_dev_netdevice_event;
+ err = register_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ if (err)
+ goto err_mutex_destroy;
+
+ hwstats->ddir = debugfs_create_dir("hwstats", nsim_dev->ddir);
+ if (IS_ERR(hwstats->ddir)) {
+ err = PTR_ERR(hwstats->ddir);
+ goto err_unregister_notifier;
+ }
+
+ hwstats->l3_ddir = debugfs_create_dir("l3", hwstats->ddir);
+ if (IS_ERR(hwstats->l3_ddir)) {
+ err = PTR_ERR(hwstats->l3_ddir);
+ goto err_remove_hwstats_recursive;
+ }
+
+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops.fops);
+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops.fops);
+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops.fops);
+
+ INIT_DELAYED_WORK(&hwstats->traffic_dw,
+ &nsim_dev_hwstats_traffic_work);
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+ return 0;
+
+err_remove_hwstats_recursive:
+ debugfs_remove_recursive(hwstats->ddir);
+err_unregister_notifier:
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+err_mutex_destroy:
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_list_wipe(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev, *tmp;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ list_for_each_entry_safe(hwsdev, tmp, hwsdev_list, list) {
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+ }
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+
+ cancel_delayed_work_sync(&hwstats->traffic_dw);
+ debugfs_remove_recursive(hwstats->ddir);
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ nsim_dev_hwsdev_list_wipe(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index b80ed2ffd45e..386336a38f34 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -171,7 +171,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
return ret;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa.rx = true;
if (xs->props.family == AF_INET6)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e470e3398abc..9a1a5b203624 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index c49771f27f17..7d8ed8d8df5c 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -184,6 +184,28 @@ struct nsim_dev_health {
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+struct nsim_dev_hwstats_netdev {
+ struct list_head list;
+ struct net_device *netdev;
+ struct rtnl_hw_stats64 stats;
+ bool enabled;
+ bool fail_enable;
+};
+
+struct nsim_dev_hwstats {
+ struct dentry *ddir;
+ struct dentry *l3_ddir;
+
+ struct mutex hwsdev_list_lock; /* protects hwsdev list(s) */
+ struct list_head l3_list;
+
+ struct notifier_block netdevice_nb;
+ struct delayed_work traffic_dw;
+};
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev);
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev);
+
#if IS_ENABLED(CONFIG_PSAMPLE)
int nsim_dev_psample_init(struct nsim_dev *nsim_dev);
void nsim_dev_psample_exit(struct nsim_dev *nsim_dev);
@@ -239,7 +261,6 @@ struct nsim_dev {
struct dentry *take_snapshot;
struct dentry *nodes_ddir;
- struct mutex vfs_lock; /* Protects vfconfigs */
struct nsim_vf_config *vfconfigs;
struct bpf_offload_dev *bpf_dev;
@@ -252,7 +273,6 @@ struct nsim_dev {
struct list_head bpf_bound_maps;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
- struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
u32 fw_update_overwrite_mask;
u32 max_macs;
@@ -261,6 +281,7 @@ struct nsim_dev {
bool fail_reload;
struct devlink_region *dummy_region;
struct nsim_dev_health health;
+ struct nsim_dev_hwstats hwstats;
struct flow_action_cookie *fa_cookie;
spinlock_t fa_cookie_lock; /* protects fa_cookie */
bool fail_trap_group_set;
@@ -355,9 +376,6 @@ struct nsim_bus_dev {
*/
unsigned int max_vfs;
unsigned int num_vfs;
- /* Lock for devlink->reload_enabled in netdevsim module */
- struct mutex nsim_bus_reload_lock;
- bool in_reload;
bool init;
};
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 98ca6b18415e..464d88ca8ab0 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (netif_rx(skb) == NET_RX_DROP) {
+ if (__netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
@@ -364,9 +364,9 @@ static void ntb_get_drvinfo(struct net_device *ndev,
{
struct ntb_netdev *dev = netdev_priv(ndev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
static int ntb_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 22ba7b0b476d..6e7e6c346a3e 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -6,8 +6,8 @@
menu "PCS device drivers"
config PCS_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- depends on MDIO_DEVICE && MDIO_BUS
+ tristate
+ select PHYLINK
help
This module provides helper functions for Synopsys DesignWare XPCS
controllers.
@@ -18,4 +18,18 @@ config PCS_LYNX
This module provides helpers to phylink for managing the Lynx PCS
which is part of the Layerscape and QorIQ Ethernet SERDES.
+config PCS_RZN1_MIIC
+ tristate "Renesas RZ/N1 MII converter"
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ help
+ This module provides a driver for the MII converter that is available
+ on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
+ pass-through mode for MII.
+
+config PCS_ALTERA_TSE
+ tristate
+ help
+ This module provides helper functions for the Altera Triple Speed
+ Ethernet SGMII PCS, that can be found on the Intel Socfpga family.
+
endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index 0603d469bd57..4c780d8f2e98 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -5,3 +5,5 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
+obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
+obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o
diff --git a/drivers/net/pcs/pcs-altera-tse.c b/drivers/net/pcs/pcs-altera-tse.c
new file mode 100644
index 000000000000..97a7cabff962
--- /dev/null
+++ b/drivers/net/pcs/pcs-altera-tse.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/pcs-altera-tse.h>
+
+/* SGMII PCS register addresses
+ */
+#define SGMII_PCS_SCRATCH 0x10
+#define SGMII_PCS_REV 0x11
+#define SGMII_PCS_LINK_TIMER_0 0x12
+#define SGMII_PCS_LINK_TIMER_REG(x) (0x12 + (x))
+#define SGMII_PCS_LINK_TIMER_1 0x13
+#define SGMII_PCS_IF_MODE 0x14
+#define PCS_IF_MODE_SGMII_ENA BIT(0)
+#define PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define PCS_IF_MODE_SGMI_SPEED_MASK GENMASK(3, 2)
+#define PCS_IF_MODE_SGMI_SPEED_10 (0 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_100 (1 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_1000 (2 << 2)
+#define PCS_IF_MODE_SGMI_HALF_DUPLEX BIT(4)
+#define PCS_IF_MODE_SGMI_PHY_AN BIT(5)
+#define SGMII_PCS_DIS_READ_TO 0x15
+#define SGMII_PCS_READ_TO 0x16
+#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
+
+struct altera_tse_pcs {
+ struct phylink_pcs pcs;
+ void __iomem *base;
+ int reg_width;
+};
+
+static struct altera_tse_pcs *phylink_pcs_to_tse_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct altera_tse_pcs, pcs);
+}
+
+static u16 tse_pcs_read(struct altera_tse_pcs *tse_pcs, int regnum)
+{
+ if (tse_pcs->reg_width == 4)
+ return readl(tse_pcs->base + regnum * 4);
+ else
+ return readw(tse_pcs->base + regnum * 2);
+}
+
+static void tse_pcs_write(struct altera_tse_pcs *tse_pcs, int regnum,
+ u16 value)
+{
+ if (tse_pcs->reg_width == 4)
+ writel(value, tse_pcs->base + regnum * 4);
+ else
+ writew(value, tse_pcs->base + regnum * 2);
+}
+
+static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs)
+{
+ int i = 0;
+ u16 bmcr;
+
+ /* Reset PCS block */
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ for (i = 0; i < SGMII_PCS_SW_RESET_TIMEOUT; i++) {
+ if (!(tse_pcs_read(tse_pcs, MII_BMCR) & BMCR_RESET))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alt_tse_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ return 1;
+
+ return -EINVAL;
+}
+
+static int alt_tse_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u32 ctrl, if_mode;
+
+ ctrl = tse_pcs_read(tse_pcs, MII_BMCR);
+ if_mode = tse_pcs_read(tse_pcs, SGMII_PCS_IF_MODE);
+
+ /* Set link timer to 1.6ms, as per the MegaCore Function User Guide */
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_0, 0x0D40);
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_1, 0x03);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if_mode |= PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ if_mode &= ~(PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA);
+ if_mode |= PCS_IF_MODE_SGMI_SPEED_1000;
+ }
+
+ ctrl |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
+
+ tse_pcs_write(tse_pcs, MII_BMCR, ctrl);
+ tse_pcs_write(tse_pcs, SGMII_PCS_IF_MODE, if_mode);
+
+ return tse_pcs_reset(tse_pcs);
+}
+
+static void alt_tse_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmsr, lpa;
+
+ bmsr = tse_pcs_read(tse_pcs, MII_BMSR);
+ lpa = tse_pcs_read(tse_pcs, MII_LPA);
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+}
+
+static void alt_tse_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmcr;
+
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_ANRESTART;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ /* This PCS seems to require a soft reset to re-sync the AN logic */
+ tse_pcs_reset(tse_pcs);
+}
+
+static const struct phylink_pcs_ops alt_tse_pcs_ops = {
+ .pcs_validate = alt_tse_pcs_validate,
+ .pcs_get_state = alt_tse_pcs_get_state,
+ .pcs_config = alt_tse_pcs_config,
+ .pcs_an_restart = alt_tse_pcs_an_restart,
+};
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width)
+{
+ struct altera_tse_pcs *tse_pcs;
+
+ if (reg_width != 4 && reg_width != 2)
+ return ERR_PTR(-EINVAL);
+
+ tse_pcs = devm_kzalloc(&ndev->dev, sizeof(*tse_pcs), GFP_KERNEL);
+ if (!tse_pcs)
+ return ERR_PTR(-ENOMEM);
+
+ tse_pcs->pcs.ops = &alt_tse_pcs_ops;
+ tse_pcs->base = pcs_base;
+ tse_pcs->reg_width = reg_width;
+
+ return &tse_pcs->pcs;
+}
+EXPORT_SYMBOL_GPL(alt_tse_pcs_create);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Altera TSE PCS driver");
+MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index fd3445374955..7d5fc7f54b2f 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -71,12 +71,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
int bmsr, lpa;
- bmsr = mdiobus_read(bus, addr, MII_BMSR);
- lpa = mdiobus_read(bus, addr, MII_LPA);
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
if (bmsr < 0 || lpa < 0) {
state->link = false;
return;
@@ -124,57 +122,39 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
state->link, state->an_enabled, state->an_complete);
}
-static int lynx_pcs_config_1000basex(struct mdio_device *pcs,
- unsigned int mode,
- const unsigned long *advertising)
+static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u32 link_timer;
- int err;
-
- link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
-
- err = mdiobus_modify(bus, addr, IF_MODE,
- IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
- 0);
- if (err)
- return err;
-
- return phylink_mii_c22_pcs_config(pcs, mode,
- PHY_INTERFACE_MODE_1000BASEX,
- advertising);
-}
-
-static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
- const unsigned long *advertising)
-{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode;
int err;
- if_mode = IF_MODE_SGMII_EN;
- if (mode == MLO_AN_INBAND) {
- u32 link_timer;
-
- if_mode |= IF_MODE_USE_SGMII_AN;
-
- /* Adjust link timer for SGMII */
- link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
- mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
- mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+
+ if_mode = 0;
+ } else {
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+ }
}
- err = mdiobus_modify(bus, addr, IF_MODE,
+
+ err = mdiodev_modify(pcs, IF_MODE,
IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
if_mode);
if (err)
return err;
- return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
- advertising);
+ return phylink_mii_c22_pcs_config(pcs, mode, interface, advertising);
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
@@ -204,10 +184,10 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
switch (ifmode) {
case PHY_INTERFACE_MODE_1000BASEX:
- return lynx_pcs_config_1000basex(lynx->mdio, mode, advertising);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ return lynx_pcs_config_giga(lynx->mdio, mode, ifmode,
+ advertising);
case PHY_INTERFACE_MODE_2500BASEX:
if (phylink_autoneg_inband(mode)) {
dev_err(&lynx->mdio->dev,
@@ -237,9 +217,7 @@ static void lynx_pcs_an_restart(struct phylink_pcs *pcs)
static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
u16 if_mode = 0, sgmii_speed;
- int addr = pcs->addr;
/* The PCS needs to be configured manually only
* when not operating on in-band mode
@@ -269,7 +247,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
}
if_mode |= IF_MODE_SPEED(sgmii_speed);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
@@ -294,8 +272,6 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
unsigned int mode,
int speed, int duplex)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
u16 if_mode = 0;
if (mode == MLO_AN_INBAND) {
@@ -307,7 +283,7 @@ static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
if_mode |= IF_MODE_HALF_DUPLEX;
if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
- mdiobus_modify(bus, addr, IF_MODE,
+ mdiodev_modify(pcs, IF_MODE,
IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
if_mode);
}
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
new file mode 100644
index 000000000000..c1424119e821
--- /dev/null
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mdio.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/pm_runtime.h>
+#include <dt-bindings/net/pcs-rzn1-miic.h>
+
+#define MIIC_PRCMD 0x0
+#define MIIC_ESID_CODE 0x4
+
+#define MIIC_MODCTRL 0x20
+#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
+
+#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
+
+#define MIIC_CONVCTRL_CONV_SPEED GENMASK(1, 0)
+#define CONV_MODE_10MBPS 0
+#define CONV_MODE_100MBPS 1
+#define CONV_MODE_1000MBPS 2
+
+#define MIIC_CONVCTRL_CONV_MODE GENMASK(3, 2)
+#define CONV_MODE_MII 0
+#define CONV_MODE_RMII 1
+#define CONV_MODE_RGMII 2
+
+#define MIIC_CONVCTRL_FULLD BIT(8)
+#define MIIC_CONVCTRL_RGMII_LINK BIT(12)
+#define MIIC_CONVCTRL_RGMII_DUPLEX BIT(13)
+#define MIIC_CONVCTRL_RGMII_SPEED GENMASK(15, 14)
+
+#define MIIC_CONVRST 0x114
+#define MIIC_CONVRST_PHYIF_RST(port) BIT(port)
+#define MIIC_CONVRST_PHYIF_RST_MASK GENMASK(4, 0)
+
+#define MIIC_SWCTRL 0x304
+#define MIIC_SWDUPC 0x308
+
+#define MIIC_MAX_NR_PORTS 5
+
+#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_NONE -1
+
+/**
+ * struct modctrl_match - Matching table entry for convctrl configuration
+ * See section 8.2.1 of manual.
+ * @mode_cfg: Configuration value for convctrl
+ * @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
+ * then index 1 - 5 are CONV1 - CONV5.
+ */
+struct modctrl_match {
+ u32 mode_cfg;
+ u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+};
+
+static struct modctrl_match modctrl_match_table[] = {
+ {0x0, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x1, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x2, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x3, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x8, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x9, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xA, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0xB, {MIIC_RTOS_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}},
+
+ {0x10, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SERCOS_PORTB, MIIC_SERCOS_PORTA}},
+ {0x11, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x12, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_ETHERCAT_PORTC, MIIC_ETHERCAT_PORTB, MIIC_ETHERCAT_PORTA}},
+ {0x13, {MIIC_GMAC2_PORT, MIIC_GMAC1_PORT, MIIC_SWITCH_PORTD,
+ MIIC_SWITCH_PORTC, MIIC_SWITCH_PORTB, MIIC_SWITCH_PORTA}}
+};
+
+static const char * const conf_to_string[] = {
+ [MIIC_GMAC1_PORT] = "GMAC1_PORT",
+ [MIIC_GMAC2_PORT] = "GMAC2_PORT",
+ [MIIC_RTOS_PORT] = "RTOS_PORT",
+ [MIIC_SERCOS_PORTA] = "SERCOS_PORTA",
+ [MIIC_SERCOS_PORTB] = "SERCOS_PORTB",
+ [MIIC_ETHERCAT_PORTA] = "ETHERCAT_PORTA",
+ [MIIC_ETHERCAT_PORTB] = "ETHERCAT_PORTB",
+ [MIIC_ETHERCAT_PORTC] = "ETHERCAT_PORTC",
+ [MIIC_SWITCH_PORTA] = "SWITCH_PORTA",
+ [MIIC_SWITCH_PORTB] = "SWITCH_PORTB",
+ [MIIC_SWITCH_PORTC] = "SWITCH_PORTC",
+ [MIIC_SWITCH_PORTD] = "SWITCH_PORTD",
+ [MIIC_HSR_PORTA] = "HSR_PORTA",
+ [MIIC_HSR_PORTB] = "HSR_PORTB",
+};
+
+static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+ "SWITCH_PORTIN",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+ "CONV4",
+ "CONV5",
+};
+
+/**
+ * struct miic - MII converter structure
+ * @base: base address of the MII converter
+ * @dev: Device associated to the MII converter
+ * @clks: Clocks used for this device
+ * @nclk: Number of clocks
+ * @lock: Lock used for read-modify-write access
+ */
+struct miic {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_bulk_data *clks;
+ int nclk;
+ spinlock_t lock;
+};
+
+/**
+ * struct miic_port - Per port MII converter struct
+ * @miic: backiling to MII converter structure
+ * @pcs: PCS structure associated to the port
+ * @port: port number
+ * @interface: interface mode of the port
+ */
+struct miic_port {
+ struct miic *miic;
+ struct phylink_pcs pcs;
+ int port;
+ phy_interface_t interface;
+};
+
+static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct miic_port, pcs);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static u32 miic_reg_readl(struct miic *miic, int offset)
+{
+ return readl(miic->base + offset);
+}
+
+static void miic_reg_rmw(struct miic *miic, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&miic->lock);
+
+ reg = miic_reg_readl(miic, offset);
+ reg &= ~mask;
+ reg |= val;
+ miic_reg_writel(miic, offset, reg);
+
+ spin_unlock(&miic->lock);
+}
+
+static void miic_converter_enable(struct miic *miic, int port, int enable)
+{
+ u32 val = 0;
+
+ if (enable)
+ val = MIIC_CONVRST_PHYIF_RST(port);
+
+ miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
+}
+
+static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising, bool permit)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 speed, conv_mode, val, mask;
+ int port = miic_port->port;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ conv_mode = CONV_MODE_RMII;
+ speed = CONV_MODE_100MBPS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ conv_mode = CONV_MODE_RGMII;
+ speed = CONV_MODE_1000MBPS;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ conv_mode = CONV_MODE_MII;
+ /* When in MII mode, speed should be set to 0 (which is actually
+ * CONV_MODE_10MBPS)
+ */
+ speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, conv_mode);
+ mask = MIIC_CONVCTRL_CONV_MODE;
+
+ /* Update speed only if we are going to change the interface because
+ * the link might already be up and it would break it if the speed is
+ * changed.
+ */
+ if (interface != miic_port->interface) {
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, speed);
+ mask |= MIIC_CONVCTRL_CONV_SPEED;
+ miic_port->interface = interface;
+ }
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
+ miic_converter_enable(miic_port->miic, miic_port->port, 1);
+
+ return 0;
+}
+
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 conv_speed = 0, val = 0;
+ int port = miic_port->port;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MIIC_CONVCTRL_FULLD;
+
+ /* No speed in MII through-mode */
+ if (interface != PHY_INTERFACE_MODE_MII) {
+ switch (speed) {
+ case SPEED_1000:
+ conv_speed = CONV_MODE_1000MBPS;
+ break;
+ case SPEED_100:
+ conv_speed = CONV_MODE_100MBPS;
+ break;
+ case SPEED_10:
+ conv_speed = CONV_MODE_10MBPS;
+ break;
+ default:
+ return;
+ }
+ }
+
+ val |= FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, conv_speed);
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(port),
+ (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
+}
+
+static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (phy_interface_mode_is_rgmii(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_RMII ||
+ state->interface == PHY_INTERFACE_MODE_MII)
+ return 1;
+
+ return -EINVAL;
+}
+
+static const struct phylink_pcs_ops miic_phylink_ops = {
+ .pcs_validate = miic_validate,
+ .pcs_config = miic_config,
+ .pcs_link_up = miic_link_up,
+};
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct miic_port *miic_port;
+ struct device_node *pcs_np;
+ struct miic *miic;
+ u32 port;
+
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
+ if (of_property_read_u32(np, "reg", &port))
+ return ERR_PTR(-EINVAL);
+
+ if (port > MIIC_MAX_NR_PORTS || port < 1)
+ return ERR_PTR(-EINVAL);
+
+ /* The PCS pdev is attached to the parent node */
+ pcs_np = of_get_parent(np);
+ if (!pcs_np)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_device_is_available(pcs_np)) {
+ of_node_put(pcs_np);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pcs_np);
+ of_node_put(pcs_np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+ if (!miic_port)
+ return ERR_PTR(-ENOMEM);
+
+ miic = platform_get_drvdata(pdev);
+ device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ miic_port->miic = miic;
+ miic_port->port = port - 1;
+ miic_port->pcs.ops = &miic_phylink_ops;
+
+ return &miic_port->pcs;
+}
+EXPORT_SYMBOL(miic_create);
+
+void miic_destroy(struct phylink_pcs *pcs)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+
+ miic_converter_enable(miic_port->miic, miic_port->port, 0);
+ kfree(miic_port);
+}
+EXPORT_SYMBOL(miic_destroy);
+
+static int miic_init_hw(struct miic *miic, u32 cfg_mode)
+{
+ int port;
+
+ /* Unlock write access to accessory registers (cf datasheet). If this
+ * is going to be used in conjunction with the Cortex-M3, this sequence
+ * will have to be moved in register write
+ */
+ miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
+ miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+
+ miic_reg_writel(miic, MIIC_MODCTRL,
+ FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+
+ for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ miic_converter_enable(miic, port, 0);
+ /* Disable speed/duplex control from these registers, datasheet
+ * says switch registers should be used to setup switch port
+ * speed and duplex.
+ */
+ miic_reg_writel(miic, MIIC_SWCTRL, 0x0);
+ miic_reg_writel(miic, MIIC_SWDUPC, 0x0);
+ }
+
+ return 0;
+}
+
+static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
+ continue;
+
+ if (dt_val[i] != table_val[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void miic_dump_conf(struct device *dev,
+ s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+{
+ const char *conf_name;
+ int i;
+
+ for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ if (conf[i] != MIIC_MODCTRL_CONF_NONE)
+ conf_name = conf_to_string[conf[i]];
+ else
+ conf_name = "NONE";
+
+ dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ }
+}
+
+static int miic_match_dt_conf(struct device *dev,
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
+ u32 *mode_cfg)
+{
+ struct modctrl_match *table_entry;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
+ table_entry = &modctrl_match_table[i];
+
+ if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ *mode_cfg = table_entry->mode_cfg;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(dev, dt_val);
+
+ return -EINVAL;
+}
+
+static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+{
+ s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
+ struct device_node *np = dev->of_node;
+ struct device_node *conv;
+ u32 conf;
+ int port;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+
+ if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
+ dt_val[0] = conf;
+
+ for_each_child_of_node(np, conv) {
+ if (of_property_read_u32(conv, "reg", &port))
+ continue;
+
+ if (!of_device_is_available(conv))
+ continue;
+
+ if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
+ dt_val[port] = conf;
+ }
+
+ return miic_match_dt_conf(dev, dt_val, mode_cfg);
+}
+
+static int miic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct miic *miic;
+ u32 mode_cfg;
+ int ret;
+
+ ret = miic_parse_dt(dev, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
+ if (!miic)
+ return -ENOMEM;
+
+ spin_lock_init(&miic->lock);
+ miic->dev = dev;
+ miic->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(miic->base))
+ return PTR_ERR(miic->base);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = miic_init_hw(miic, mode_cfg);
+ if (ret)
+ goto disable_runtime_pm;
+
+ /* miic_create() relies on that fact that data are attached to the
+ * platform device to determine if the driver is ready so this needs to
+ * be the last thing to be done after everything is initialized
+ * properly.
+ */
+ platform_set_drvdata(pdev, miic);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int miic_remove(struct platform_device *pdev)
+{
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id miic_of_mtable[] = {
+ { .compatible = "renesas,rzn1-miic" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, miic_of_mtable);
+
+static struct platform_driver miic_driver = {
+ .driver = {
+ .name = "rzn1_miic",
+ .suppress_bind_attrs = true,
+ .of_match_table = miic_of_mtable,
+ },
+ .probe = miic_probe,
+ .remove = miic_remove,
+};
+module_platform_driver(miic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas MII converter PCS driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index cd6742e6ba8b..70f88eae2a9e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -77,6 +77,14 @@ static const int xpcs_sgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_1000basex_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_2500basex_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -102,6 +110,10 @@ static const phy_interface_t xpcs_sgmii_interfaces[] = {
PHY_INTERFACE_MODE_SGMII,
};
+static const phy_interface_t xpcs_1000basex_interfaces[] = {
+ PHY_INTERFACE_MODE_1000BASEX,
+};
+
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_MAX,
@@ -112,6 +124,7 @@ enum {
DW_XPCS_10GKR,
DW_XPCS_XLGMII,
DW_XPCS_SGMII,
+ DW_XPCS_1000BASEX,
DW_XPCS_2500BASEX,
DW_XPCS_INTERFACE_MAX,
};
@@ -175,20 +188,26 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
struct mii_bus *bus = xpcs->mdiodev->bus;
int addr = xpcs->mdiodev->addr;
- return mdiobus_read(bus, addr, reg_addr);
+ return mdiobus_c45_read(bus, addr, dev, reg);
}
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
struct mii_bus *bus = xpcs->mdiodev->bus;
int addr = xpcs->mdiodev->addr;
- return mdiobus_write(bus, addr, reg_addr, val);
+ return mdiobus_c45_write(bus, addr, dev, reg, val);
+}
+
+static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
+ u16 mask, u16 set)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+
+ return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set);
}
static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
@@ -239,6 +258,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
break;
case DW_AN_C37_SGMII:
case DW_2500BASEX:
+ case DW_AN_C37_1000BASEX:
dev = MDIO_MMD_VEND2;
break;
default:
@@ -632,35 +652,43 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
}
}
-void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
- struct phylink_link_state *state)
+static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, };
const struct xpcs_compat *compat;
+ struct dw_xpcs *xpcs;
int i;
- /* phylink expects us to report all supported modes with
- * PHY_INTERFACE_MODE_NA, just don't limit the supported and
- * advertising masks and exit.
- */
- if (state->interface == PHY_INTERFACE_MODE_NA)
- return;
-
- linkmode_zero(xpcs_supported);
-
+ xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
- /* Populate the supported link modes for this
- * PHY interface type
+ /* Populate the supported link modes for this PHY interface type.
+ * FIXME: what about the port modes and autoneg bit? This masks
+ * all those away.
*/
if (compat)
for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
- linkmode_and(state->advertising, state->advertising, xpcs_supported);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(xpcs_validate);
+
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+{
+ int i, j;
+
+ for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
+ const struct xpcs_compat *compat = &xpcs->id->compat[i];
+
+ for (j = 0; j < compat->num_interfaces; j++)
+ if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
+ __set_bit(compat->interface[j], interfaces);
+ }
+}
+EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -766,6 +794,68 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
return ret;
}
+static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ phy_interface_t interface = PHY_INTERFACE_MODE_1000BASEX;
+ int ret, mdio_ctrl, adv;
+ bool changed = 0;
+
+ /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must
+ * be disabled first:-
+ * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b
+ * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 00b (1000BASE-X C37)
+ */
+ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+ if (mdio_ctrl < 0)
+ return mdio_ctrl;
+
+ if (mdio_ctrl & AN_CL37_EN) {
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl & ~AN_CL37_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_VR_MII_PCS_MODE_MASK;
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Check for advertising changes and update the C45 MII ADV
+ * register accordingly.
+ */
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface,
+ advertising);
+ if (adv >= 0) {
+ ret = xpcs_modify_changed(xpcs, MDIO_MMD_VEND2,
+ MII_ADVERTISE, 0xffff, adv);
+ if (ret < 0)
+ return ret;
+
+ changed = ret;
+ }
+
+ /* Clear CL37 AN complete status */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0);
+ if (ret < 0)
+ return ret;
+
+ if (phylink_autoneg_inband(mode) &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl | AN_CL37_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return changed;
+}
+
static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
{
int ret;
@@ -789,7 +879,7 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
}
int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
- unsigned int mode)
+ unsigned int mode, const unsigned long *advertising)
{
const struct xpcs_compat *compat;
int ret;
@@ -811,6 +901,12 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
if (ret)
return ret;
break;
+ case DW_AN_C37_1000BASEX:
+ ret = xpcs_config_aneg_c37_1000basex(xpcs, mode,
+ advertising);
+ if (ret)
+ return ret;
+ break;
case DW_2500BASEX:
ret = xpcs_config_2500basex(xpcs);
if (ret)
@@ -837,7 +933,7 @@ static int xpcs_config(struct phylink_pcs *pcs, unsigned int mode,
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
- return xpcs_do_config(xpcs, interface, mode);
+ return xpcs_do_config(xpcs, interface, mode, advertising);
}
static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
@@ -858,7 +954,7 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
state->link = 0;
- return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND);
+ return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND, NULL);
}
if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) {
@@ -890,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
*/
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
if (ret < 0)
- return false;
+ return ret;
if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
int speed_value;
@@ -915,6 +1011,29 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
return 0;
}
+static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
+{
+ int lpa, bmsr;
+
+ if (state->an_enabled) {
+ /* Reset link state */
+ state->link = false;
+
+ lpa = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0 || lpa & LPA_RFAULT)
+ return lpa;
+
+ bmsr = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ }
+
+ return 0;
+}
+
static void xpcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
@@ -942,6 +1061,13 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
ERR_PTR(ret));
}
break;
+ case DW_AN_C37_1000BASEX:
+ ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ if (ret) {
+ pr_err("xpcs_get_state_c37_1000basex returned %pe\n",
+ ERR_PTR(ret));
+ }
+ break;
default:
return;
}
@@ -955,22 +1081,35 @@ static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int mode,
if (phylink_autoneg_inband(mode))
return;
+ val = mii_bmcr_encode_fixed(speed, duplex);
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
+ if (ret)
+ pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
+}
+
+static void xpcs_link_up_1000basex(struct dw_xpcs *xpcs, unsigned int mode,
+ int speed, int duplex)
+{
+ int val, ret;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
switch (speed) {
case SPEED_1000:
val = BMCR_SPEED1000;
break;
case SPEED_100:
- val = BMCR_SPEED100;
- break;
case SPEED_10:
- val = BMCR_SPEED10;
- break;
default:
+ pr_err("%s: speed = %d\n", __func__, speed);
return;
}
if (duplex == DUPLEX_FULL)
val |= BMCR_FULLDPLX;
+ else
+ pr_err("%s: half duplex not supported\n", __func__);
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
if (ret)
@@ -986,9 +1125,23 @@ void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
return xpcs_config_usxgmii(xpcs, speed);
if (interface == PHY_INTERFACE_MODE_SGMII)
return xpcs_link_up_sgmii(xpcs, mode, speed, duplex);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
+ return xpcs_link_up_1000basex(xpcs, mode, speed, duplex);
}
EXPORT_SYMBOL_GPL(xpcs_link_up);
+static void xpcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
+ if (ret >= 0) {
+ ret |= BMCR_ANRESTART;
+ xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
+ }
+}
+
static u32 xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
@@ -1054,6 +1207,12 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
.num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
.an_mode = DW_AN_C37_SGMII,
},
+ [DW_XPCS_1000BASEX] = {
+ .supported = xpcs_1000basex_features,
+ .interface = xpcs_1000basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_1000basex_interfaces),
+ .an_mode = DW_AN_C37_1000BASEX,
+ },
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
@@ -1106,8 +1265,10 @@ static const struct xpcs_id xpcs_id_list[] = {
};
static const struct phylink_pcs_ops xpcs_phylink_ops = {
+ .pcs_validate = xpcs_validate,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
+ .pcs_an_restart = xpcs_an_restart,
.pcs_link_up = xpcs_link_up,
};
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index 35651d32a224..770df50323a0 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -109,7 +109,6 @@
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
-
int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs);
int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 902495afcb38..c57a0262fb64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -83,6 +83,13 @@ config ADIN_PHY
- ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
Ethernet PHY
+config ADIN1100_PHY
+ tristate "Analog Devices Industrial Ethernet T1L PHYs"
+ help
+ Adds support for the Analog Devices Industrial T1L Ethernet PHYs.
+ Currently supports the:
+ - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
+
config AQUANTIA_PHY
tristate "Aquantia PHYs"
help
@@ -97,6 +104,8 @@ config AX88796B_PHY
config BROADCOM_PHY
tristate "Broadcom 54XX PHYs"
select BCM_NET_PHYLIB
+ select BCM_NET_PHYPTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
@@ -153,6 +162,9 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
+config BCM_NET_PHYPTP
+ tristate
+
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -209,6 +221,8 @@ config MARVELL_88X2222_PHY
config MAXLINEAR_GPHY
tristate "Maxlinear Ethernet PHYs"
+ select POLYNOMIAL if HWMON
+ depends on HWMON || HWMON=n
help
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
@@ -220,6 +234,7 @@ config MEDIATEK_GE_PHY
config MICREL_PHY
tristate "Micrel PHYs"
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Supports the KSZ9021, VSC8201, KS8001 PHYs.
@@ -334,6 +349,12 @@ config DP83869_PHY
Currently supports the DP83869 PHY. This PHY supports copper and
fiber connections.
+config DP83TD510_PHY
+ tristate "Texas Instruments DP83TD510 Ethernet 10Base-T1L PHY"
+ help
+ Support for the DP83TD510 Ethernet 10Base-T1L PHY. This PHY supports
+ a 10M single pair Ethernet connection for up to 1000 meter cable.
+
config VITESSE_PHY
tristate "Vitesse PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b2728d00fc9a..f7138d3c896b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,6 +31,7 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_ADIN_PHY) += adin.o
+obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
aquantia-objs += aquantia_main.o
ifdef CONFIG_HWMON
@@ -46,6 +47,7 @@ obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+obj-$(CONFIG_BCM_NET_PHYPTP) += bcm-phy-ptp.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
@@ -56,6 +58,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_DP83869_PHY) += dp83869.o
obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
+obj-$(CONFIG_DP83TD510_PHY) += dp83td510.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 5ce6da62cc8e..134637584a83 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -99,6 +99,15 @@
#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
#define ADIN1300_GE_SOFT_RESET BIT(0)
+#define ADIN1300_GE_CLK_CFG_REG 0xff1f
+#define ADIN1300_GE_CLK_CFG_MASK GENMASK(5, 0)
+#define ADIN1300_GE_CLK_CFG_RCVR_125 BIT(5)
+#define ADIN1300_GE_CLK_CFG_FREE_125 BIT(4)
+#define ADIN1300_GE_CLK_CFG_REF_EN BIT(3)
+#define ADIN1300_GE_CLK_CFG_HRT_RCVR BIT(2)
+#define ADIN1300_GE_CLK_CFG_HRT_FREE BIT(1)
+#define ADIN1300_GE_CLK_CFG_25 BIT(0)
+
#define ADIN1300_GE_RGMII_CFG_REG 0xff23
#define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6)
#define ADIN1300_GE_RGMII_RX_SEL(x) \
@@ -433,6 +442,33 @@ static int adin_set_tunable(struct phy_device *phydev,
}
}
+static int adin_config_clk_out(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *val = NULL;
+ u8 sel = 0;
+
+ device_property_read_string(dev, "adi,phy-output-clock", &val);
+ if (!val) {
+ /* property not present, do not enable GP_CLK pin */
+ } else if (strcmp(val, "25mhz-reference") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_25;
+ } else if (strcmp(val, "125mhz-free-running") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_FREE_125;
+ } else if (strcmp(val, "adaptive-free-running") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_HRT_FREE;
+ } else {
+ phydev_err(phydev, "invalid adi,phy-output-clock\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(dev, "adi,phy-output-reference-clock"))
+ sel |= ADIN1300_GE_CLK_CFG_REF_EN;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_CLK_CFG_REG,
+ ADIN1300_GE_CLK_CFG_MASK, sel);
+}
+
static int adin_config_init(struct phy_device *phydev)
{
int rc;
@@ -455,6 +491,10 @@ static int adin_config_init(struct phy_device *phydev)
if (rc < 0)
return rc;
+ rc = adin_config_clk_out(phydev);
+ if (rc < 0)
+ return rc;
+
phydev_dbg(phydev, "PHY is using mode '%s'\n",
phy_modes(phydev->interface));
@@ -709,7 +749,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) {
- strlcpy(&data[i * ETH_GSTRING_LEN],
+ strscpy(&data[i * ETH_GSTRING_LEN],
adin_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
new file mode 100644
index 000000000000..7619d6185801
--- /dev/null
+++ b/drivers/net/phy/adin1100.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Driver for Analog Devices Industrial Ethernet T1L PHYs
+ *
+ * Copyright 2020 Analog Devices Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+
+#define PHY_ID_ADIN1100 0x0283bc81
+#define PHY_ID_ADIN1110 0x0283bc91
+#define PHY_ID_ADIN2111 0x0283bca1
+
+#define ADIN_FORCED_MODE 0x8000
+#define ADIN_FORCED_MODE_EN BIT(0)
+
+#define ADIN_CRSM_SFT_RST 0x8810
+#define ADIN_CRSM_SFT_RST_EN BIT(0)
+
+#define ADIN_CRSM_SFT_PD_CNTRL 0x8812
+#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0)
+
+#define ADIN_AN_PHY_INST_STATUS 0x8030
+#define ADIN_IS_CFG_SLV BIT(2)
+#define ADIN_IS_CFG_MST BIT(3)
+
+#define ADIN_CRSM_STAT 0x8818
+#define ADIN_CRSM_SFT_PD_RDY BIT(1)
+#define ADIN_CRSM_SYS_RDY BIT(0)
+
+#define ADIN_MSE_VAL 0x830B
+
+#define ADIN_SQI_MAX 7
+
+struct adin_mse_sqi_range {
+ u16 start;
+ u16 end;
+};
+
+static const struct adin_mse_sqi_range adin_mse_sqi_map[] = {
+ { 0x0A74, 0xFFFF },
+ { 0x084E, 0x0A74 },
+ { 0x0698, 0x084E },
+ { 0x053D, 0x0698 },
+ { 0x0429, 0x053D },
+ { 0x034E, 0x0429 },
+ { 0x02A0, 0x034E },
+ { 0x0000, 0x02A0 },
+};
+
+/**
+ * struct adin_priv - ADIN PHY driver private data
+ * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L)
+ * @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L)
+ * @tx_level_prop_present: set if the TX level is specified in DT
+ */
+struct adin_priv {
+ unsigned int tx_level_2v4_able:1;
+ unsigned int tx_level_2v4:1;
+ unsigned int tx_level_prop_present:1;
+};
+
+static int adin_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_read_status(phydev);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ADIN_IS_CFG_SLV)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ if (ret & ADIN_IS_CFG_MST)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+
+ return 0;
+}
+
+static int adin_config_aneg(struct phy_device *phydev)
+{
+ struct adin_priv *priv = phydev->priv;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (priv->tx_level_prop_present && priv->tx_level_2v4)
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
+ MDIO_PMA_10T1L_CTRL_2V4_EN);
+ else
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL,
+ MDIO_PMA_10T1L_CTRL_2V4_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Force PHY to use above configurations */
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
+ }
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Request increased transmit level from LP. */
+ if (priv->tx_level_prop_present && priv->tx_level_2v4) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
+ MDIO_AN_T1_ADV_H_10L_TX_HI |
+ MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Disable 2.4 Vpp transmit level. */
+ if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H,
+ MDIO_AN_T1_ADV_H_10L_TX_HI |
+ MDIO_AN_T1_ADV_H_10L_TX_HI_REQ);
+ if (ret < 0)
+ return ret;
+ }
+
+ return genphy_c45_config_aneg(phydev);
+}
+
+static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
+{
+ int ret;
+ int val;
+
+ val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN_CRSM_SFT_PD_CNTRL, val);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
+ (ret & ADIN_CRSM_SFT_PD_RDY) == val,
+ 1000, 30000, true);
+}
+
+static int adin_suspend(struct phy_device *phydev)
+{
+ return adin_set_powerdown_mode(phydev, true);
+}
+
+static int adin_resume(struct phy_device *phydev)
+{
+ return adin_set_powerdown_mode(phydev, false);
+}
+
+static int adin_set_loopback(struct phy_device *phydev, bool enable)
+{
+ if (enable)
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
+ BMCR_LOOPBACK);
+
+ /* PCS loopback (according to 10BASE-T1L spec) */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
+ BMCR_LOOPBACK);
+}
+
+static int adin_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
+ (ret & ADIN_CRSM_SYS_RDY),
+ 10000, 30000, true);
+}
+
+static int adin_get_features(struct phy_device *phydev)
+{
+ struct adin_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+ u8 val;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT);
+ if (ret < 0)
+ return ret;
+
+ /* This depends on the voltage level from the power source */
+ priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE);
+
+ phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n",
+ priv->tx_level_2v4_able ? "yes" : "no");
+
+ priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp");
+ if (priv->tx_level_prop_present) {
+ ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val);
+ if (ret < 0)
+ return ret;
+
+ priv->tx_level_2v4 = val;
+ if (!priv->tx_level_2v4 && priv->tx_level_2v4_able)
+ phydev_info(phydev,
+ "PHY supports 2.4V TX level, but disabled via config\n");
+ }
+
+ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int adin_get_sqi(struct phy_device *phydev)
+{
+ u16 mse_val;
+ int sqi;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+ else if (!(ret & MDIO_STAT1_LSTATUS))
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL);
+ if (ret < 0)
+ return ret;
+
+ mse_val = 0xFFFF & ret;
+ for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) {
+ if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end)
+ return sqi;
+ }
+
+ return -EINVAL;
+}
+
+static int adin_get_sqi_max(struct phy_device *phydev)
+{
+ return ADIN_SQI_MAX;
+}
+
+static int adin_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct adin_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static struct phy_driver adin_driver[] = {
+ {
+ .phy_id = PHY_ID_ADIN1100,
+ .phy_id_mask = 0xffffffcf,
+ .name = "ADIN1100",
+ .get_features = adin_get_features,
+ .soft_reset = adin_soft_reset,
+ .probe = adin_probe,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .set_loopback = adin_set_loopback,
+ .suspend = adin_suspend,
+ .resume = adin_resume,
+ .get_sqi = adin_get_sqi,
+ .get_sqi_max = adin_get_sqi_max,
+ },
+};
+
+module_phy_driver(adin_driver);
+
+static struct mdio_device_id __maybe_unused adin_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, adin_tbl);
+MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 968dd43a2b1e..47a76df36b74 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -22,18 +22,24 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR113C 0x31c31c12
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX 1
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI 4
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI 7
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
+#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -88,6 +94,22 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -122,6 +144,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -231,9 +259,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
phydev->advertising))
reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
MDIO_AN_VEND_PROV_1000BASET_HALF |
- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -321,40 +360,57 @@ static int aqr_read_status(struct phy_device *phydev)
static int aqr107_read_rate(struct phy_device *phydev)
{
+ u32 config_reg;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
if (val < 0)
return val;
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
case MDIO_AN_TX_VEND_STATUS1_10BASET:
phydev->speed = SPEED_10;
+ config_reg = VEND1_GLOBAL_CFG_10M;
break;
case MDIO_AN_TX_VEND_STATUS1_100BASETX:
phydev->speed = SPEED_100;
+ config_reg = VEND1_GLOBAL_CFG_100M;
break;
case MDIO_AN_TX_VEND_STATUS1_1000BASET:
phydev->speed = SPEED_1000;
+ config_reg = VEND1_GLOBAL_CFG_1G;
break;
case MDIO_AN_TX_VEND_STATUS1_2500BASET:
phydev->speed = SPEED_2500;
+ config_reg = VEND1_GLOBAL_CFG_2_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_5000BASET:
phydev->speed = SPEED_5000;
+ config_reg = VEND1_GLOBAL_CFG_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_10GBASET:
phydev->speed = SPEED_10000;
+ config_reg = VEND1_GLOBAL_CFG_10G;
break;
default:
phydev->speed = SPEED_UNKNOWN;
- break;
+ return 0;
}
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
+ VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
else
- phydev->duplex = DUPLEX_HALF;
+ phydev->rate_matching = RATE_MATCH_NONE;
return 0;
}
@@ -378,15 +434,24 @@ static int aqr107_read_status(struct phy_device *phydev)
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
break;
@@ -499,11 +564,14 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_RXAUI)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
@@ -533,9 +601,7 @@ static int aqcs109_config_init(struct phy_device *phydev)
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- ret = phy_set_max_speed(phydev, SPEED_2500);
- if (ret)
- return ret;
+ phy_set_max_speed(phydev, SPEED_2500);
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
@@ -585,16 +651,62 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int aqr107_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ if (iface == PHY_INTERFACE_MODE_10GBASER ||
+ iface == PHY_INTERFACE_MODE_2500BASEX ||
+ iface == PHY_INTERFACE_MODE_NA)
+ return RATE_MATCH_PAUSE;
+ return RATE_MATCH_NONE;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
@@ -646,6 +758,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -664,6 +777,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -686,6 +800,25 @@ static struct phy_driver aqr_driver[] = {
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
+ .name = "Aquantia AQR113C",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -698,6 +831,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index dae95d9a07e8..349b7b1dbbf2 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -19,6 +19,8 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/consumer.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
@@ -51,6 +53,8 @@
#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
#define AT803X_INTR_ENABLE_WOL BIT(0)
@@ -85,7 +89,17 @@
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_SGMII 0x01
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
@@ -101,6 +115,7 @@
#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
#define AT803X_DEBUG_REG_3C 0x3C
@@ -178,6 +193,9 @@
#define AT803X_KEEP_PLL_ENABLED BIT(0)
#define AT803X_DISABLE_SMARTEEE BIT(1)
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
/* ADC threshold */
#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
@@ -283,6 +301,8 @@ struct at803x_priv {
u16 clk_25m_mask;
u8 smarteee_lpi_tw_1g;
u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -417,20 +437,21 @@ static void at803x_context_restore(struct phy_device *phydev,
static int at803x_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
int ret, irq_enabled;
- unsigned int i;
- const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
mac = (const u8 *) ndev->dev_addr;
if (!is_valid_ether_addr(mac))
@@ -650,6 +671,56 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
+static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at803x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at803x_sfp_insert,
+};
+
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -664,6 +735,9 @@ static int at803x_parse_dt(struct phy_device *phydev)
if (of_property_read_bool(node, "qca,disable-smarteee"))
priv->flags |= AT803X_DISABLE_SMARTEEE;
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
if (!tw || tw > 255) {
phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
@@ -757,6 +831,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
phydev_err(phydev, "failed to get VDDIO regulator\n");
return PTR_ERR(priv->vddio);
}
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -784,16 +863,34 @@ static int at803x_probe(struct phy_device *phydev)
return ret;
}
- /* Some bootloaders leave the fiber page selected.
- * Switch to the copper page, as otherwise we read
- * the PHY capabilities from the fiber side.
- */
if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ int mode_cfg;
+ struct ethtool_wolinfo wol = {
+ .wolopts = 0,
+ };
+
+ if (ccr < 0)
+ goto err;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WOL by default */
+ ret = at803x_set_wol(phydev, &wol);
+ if (ret < 0) {
+ phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
goto err;
+ }
}
return 0;
@@ -815,6 +912,7 @@ static void at803x_remove(struct phy_device *phydev)
static int at803x_get_features(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
err = genphy_read_abilities(phydev);
@@ -841,12 +939,13 @@ static int at803x_get_features(struct phy_device *phydev)
* As a result of that, ESTATUS_1000_XFULL is set
* to 1 even when operating in copper TP mode.
*
- * Remove this mode from the supported link modes,
- * as this driver currently only supports copper
- * operation.
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
return 0;
}
@@ -908,10 +1007,43 @@ static int at8031_pll_config(struct phy_device *phydev)
AT803X_DEBUG_PLL_ON, 0);
}
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
@@ -941,11 +1073,9 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
- }
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
/* Ar803x extended next page bit is enabled by default. Cisco
* multigig switches read this bit and attempt to negotiate 10Gbps
@@ -967,6 +1097,7 @@ static int at803x_ack_interrupt(struct phy_device *phydev)
static int at803x_config_intr(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
int value;
@@ -983,6 +1114,10 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+ if (priv->is_fiber) {
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+ }
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
} else {
@@ -1115,8 +1250,12 @@ static int at803x_read_specific_status(struct phy_device *phydev)
static int at803x_read_status(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err, old_link = phydev->link;
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev);
+
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
@@ -1170,6 +1309,7 @@ static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
static int at803x_config_aneg(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
@@ -1186,6 +1326,9 @@ static int at803x_config_aneg(struct phy_device *phydev)
return ret;
}
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
/* Do not restart auto-negotiation by setting ret to 0 defautly,
* when calling __genphy_config_aneg later.
*/
@@ -1615,7 +1758,7 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
{
- u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE);
+ u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE);
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
QCA808X_MASTER_SLAVE_SEED_CFG,
@@ -1688,19 +1831,19 @@ static int qca808x_read_status(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->link && phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SMII;
-
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected.
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (!phydev->link) {
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected.
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
@@ -1955,6 +2098,8 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
.name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -1970,6 +2115,8 @@ static struct phy_driver at803x_driver[] = {
/* Qualcomm Atheros QCA9561 */
PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
.name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -2034,6 +2181,8 @@ static struct phy_driver at803x_driver[] = {
PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
.name = "Qualcomm QCA8081",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 457896337505..0f1e617a26c9 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
/* Reset PHY, otherwise MII_LPA will provide outdated information.
* This issue is reproducible only with some link partner PHYs
*/
- if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
- phydev->drv->soft_reset(phydev);
+ if (phydev->state == PHY_NOLINK) {
+ phy_init_hw(phydev);
+ phy_start_aneg(phydev);
+ }
}
static struct phy_driver asix_driver[] = {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 287cccf8f7f4..b2c0baa51f39 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -519,7 +519,7 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index c3842f87c33b..9902fb182099 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -87,4 +87,23 @@ int bcm_phy_cable_test_start_rdb(struct phy_device *phydev);
int bcm_phy_cable_test_start(struct phy_device *phydev);
int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished);
+#if IS_ENABLED(CONFIG_BCM_NET_PHYPTP)
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev);
+void bcm_ptp_config_init(struct phy_device *phydev);
+void bcm_ptp_stop(struct bcm_ptp_private *priv);
+#else
+static inline struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ return NULL;
+}
+
+static inline void bcm_ptp_config_init(struct phy_device *phydev)
+{
+}
+
+static inline void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+}
+#endif
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
new file mode 100644
index 000000000000..ef00d6163061
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Meta Platforms Inc.
+ * Copyright (C) 2022 Jonathan Lemon <jonathan.lemon@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#include "bcm-phy-lib.h"
+
+/* IEEE 1588 Expansion registers */
+#define SLICE_CTRL 0x0810
+#define SLICE_TX_EN BIT(0)
+#define SLICE_RX_EN BIT(8)
+#define TX_EVENT_MODE 0x0811
+#define MODE_TX_UPDATE_CF BIT(0)
+#define MODE_TX_REPLACE_TS_CF BIT(1)
+#define MODE_TX_REPLACE_TS GENMASK(1, 0)
+#define RX_EVENT_MODE 0x0819
+#define MODE_RX_UPDATE_CF BIT(0)
+#define MODE_RX_INSERT_TS_48 BIT(1)
+#define MODE_RX_INSERT_TS_64 GENMASK(1, 0)
+
+#define MODE_EVT_SHIFT_SYNC 0
+#define MODE_EVT_SHIFT_DELAY_REQ 2
+#define MODE_EVT_SHIFT_PDELAY_REQ 4
+#define MODE_EVT_SHIFT_PDELAY_RESP 6
+
+#define MODE_SEL_SHIFT_PORT 0
+#define MODE_SEL_SHIFT_CPU 8
+
+#define RX_MODE_SEL(sel, evt, act) \
+ (((MODE_RX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+#define TX_MODE_SEL(sel, evt, act) \
+ (((MODE_TX_##act) << (MODE_EVT_SHIFT_##evt)) << (MODE_SEL_SHIFT_##sel))
+
+/* needs global TS capture first */
+#define TX_TS_CAPTURE 0x0821
+#define TX_TS_CAP_EN BIT(0)
+#define RX_TS_CAPTURE 0x0822
+#define RX_TS_CAP_EN BIT(0)
+
+#define TIME_CODE_0 0x0854
+#define TIME_CODE_1 0x0855
+#define TIME_CODE_2 0x0856
+#define TIME_CODE_3 0x0857
+#define TIME_CODE_4 0x0858
+
+#define DPLL_SELECT 0x085b
+#define DPLL_HB_MODE2 BIT(6)
+
+#define SHADOW_CTRL 0x085c
+#define SHADOW_LOAD 0x085d
+#define TIME_CODE_LOAD BIT(10)
+#define SYNC_OUT_LOAD BIT(9)
+#define NCO_TIME_LOAD BIT(7)
+#define FREQ_LOAD BIT(6)
+#define INTR_MASK 0x085e
+#define INTR_STATUS 0x085f
+#define INTC_FSYNC BIT(0)
+#define INTC_SOP BIT(1)
+
+#define NCO_FREQ_LSB 0x0873
+#define NCO_FREQ_MSB 0x0874
+
+#define NCO_TIME_0 0x0875
+#define NCO_TIME_1 0x0876
+#define NCO_TIME_2_CTRL 0x0877
+#define FREQ_MDIO_SEL BIT(14)
+
+#define SYNC_OUT_0 0x0878
+#define SYNC_OUT_1 0x0879
+#define SYNC_OUT_2 0x087a
+
+#define SYNC_IN_DIVIDER 0x087b
+
+#define SYNOUT_TS_0 0x087c
+#define SYNOUT_TS_1 0x087d
+#define SYNOUT_TS_2 0x087e
+
+#define NSE_CTRL 0x087f
+#define NSE_GMODE_EN GENMASK(15, 14)
+#define NSE_CAPTURE_EN BIT(13)
+#define NSE_INIT BIT(12)
+#define NSE_CPU_FRAMESYNC BIT(5)
+#define NSE_SYNC1_FRAMESYNC BIT(3)
+#define NSE_FRAMESYNC_MASK GENMASK(5, 2)
+#define NSE_PEROUT_EN BIT(1)
+#define NSE_ONESHOT_EN BIT(0)
+#define NSE_SYNC_OUT_MASK GENMASK(1, 0)
+
+#define TS_READ_CTRL 0x0885
+#define TS_READ_START BIT(0)
+#define TS_READ_END BIT(1)
+
+#define HB_REG_0 0x0886
+#define HB_REG_1 0x0887
+#define HB_REG_2 0x0888
+#define HB_REG_3 0x08ec
+#define HB_REG_4 0x08ed
+#define HB_STAT_CTRL 0x088e
+#define HB_READ_START BIT(10)
+#define HB_READ_END BIT(11)
+#define HB_READ_MASK GENMASK(11, 10)
+
+#define TS_REG_0 0x0889
+#define TS_REG_1 0x088a
+#define TS_REG_2 0x088b
+#define TS_REG_3 0x08c4
+
+#define TS_INFO_0 0x088c
+#define TS_INFO_1 0x088d
+
+#define TIMECODE_CTRL 0x08c3
+#define TX_TIMECODE_SEL GENMASK(7, 0)
+#define RX_TIMECODE_SEL GENMASK(15, 8)
+
+#define TIME_SYNC 0x0ff5
+#define TIME_SYNC_EN BIT(0)
+
+struct bcm_ptp_private {
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct ptp_pin_desc pin;
+ struct mutex mutex;
+ struct sk_buff_head tx_queue;
+ int tx_type;
+ bool hwts_rx;
+ u16 nse_ctrl;
+ bool pin_active;
+ struct delayed_work pin_work;
+};
+
+struct bcm_ptp_skb_cb {
+ unsigned long timeout;
+ u16 seq_id;
+ u8 msgtype;
+ bool discard;
+};
+
+struct bcm_ptp_capture {
+ ktime_t hwtstamp;
+ u16 seq_id;
+ u8 msgtype;
+ bool tx_dir;
+};
+
+#define BCM_SKB_CB(skb) ((struct bcm_ptp_skb_cb *)(skb)->cb)
+#define SKB_TS_TIMEOUT 10 /* jiffies */
+
+#define BCM_MAX_PULSE_8NS ((1U << 9) - 1)
+#define BCM_MAX_PERIOD_8NS ((1U << 30) - 1)
+
+#define BRCM_PHY_MODEL(phydev) \
+ ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
+
+static struct bcm_ptp_private *mii2priv(struct mii_timestamper *mii_ts)
+{
+ return container_of(mii_ts, struct bcm_ptp_private, mii_ts);
+}
+
+static struct bcm_ptp_private *ptp2priv(struct ptp_clock_info *info)
+{
+ return container_of(info, struct bcm_ptp_private, ptp_info);
+}
+
+static void bcm_ptp_get_framesync_ts(struct phy_device *phydev,
+ struct timespec64 *ts)
+{
+ u16 hb[4];
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_START);
+
+ hb[0] = bcm_phy_read_exp(phydev, HB_REG_0);
+ hb[1] = bcm_phy_read_exp(phydev, HB_REG_1);
+ hb[2] = bcm_phy_read_exp(phydev, HB_REG_2);
+ hb[3] = bcm_phy_read_exp(phydev, HB_REG_3);
+
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, HB_READ_END);
+ bcm_phy_write_exp(phydev, HB_STAT_CTRL, 0);
+
+ ts->tv_sec = (hb[3] << 16) | hb[2];
+ ts->tv_nsec = (hb[1] << 16) | hb[0];
+}
+
+static u16 bcm_ptp_framesync_disable(struct phy_device *phydev, u16 orig_ctrl)
+{
+ u16 ctrl = orig_ctrl & ~(NSE_FRAMESYNC_MASK | NSE_CAPTURE_EN);
+
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl);
+
+ return ctrl;
+}
+
+static void bcm_ptp_framesync_restore(struct phy_device *phydev, u16 orig_ctrl)
+{
+ if (orig_ctrl & NSE_FRAMESYNC_MASK)
+ bcm_phy_write_exp(phydev, NSE_CTRL, orig_ctrl);
+}
+
+static void bcm_ptp_framesync(struct phy_device *phydev, u16 ctrl)
+{
+ /* trigger framesync - must have 0->1 transition. */
+ bcm_phy_write_exp(phydev, NSE_CTRL, ctrl | NSE_CPU_FRAMESYNC);
+}
+
+static int bcm_ptp_framesync_ts(struct phy_device *phydev,
+ struct ptp_system_timestamp *sts,
+ struct timespec64 *ts,
+ u16 orig_ctrl)
+{
+ u16 ctrl, reg;
+ int i;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, orig_ctrl);
+
+ ptp_read_system_prets(sts);
+
+ /* trigger framesync + capture */
+ bcm_ptp_framesync(phydev, ctrl | NSE_CAPTURE_EN);
+
+ ptp_read_system_postts(sts);
+
+ /* poll for FSYNC interrupt from TS capture */
+ for (i = 0; i < 10; i++) {
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if (reg & INTC_FSYNC) {
+ bcm_ptp_get_framesync_ts(phydev, ts);
+ break;
+ }
+ }
+
+ bcm_ptp_framesync_restore(phydev, orig_ctrl);
+
+ return reg & INTC_FSYNC ? 0 : -ETIMEDOUT;
+}
+
+static int bcm_ptp_gettimex(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_framesync_ts(priv->phydev, sts, ts, priv->nse_ctrl);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_settime_locked(struct bcm_ptp_private *priv,
+ const struct timespec64 *ts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ctrl;
+ u64 ns;
+
+ ctrl = bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ /* set up time code */
+ bcm_phy_write_exp(phydev, TIME_CODE_0, ts->tv_nsec);
+ bcm_phy_write_exp(phydev, TIME_CODE_1, ts->tv_nsec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_2, ts->tv_sec);
+ bcm_phy_write_exp(phydev, TIME_CODE_3, ts->tv_sec >> 16);
+ bcm_phy_write_exp(phydev, TIME_CODE_4, ts->tv_sec >> 32);
+
+ /* set NCO counter to match */
+ ns = timespec64_to_ns(ts);
+ bcm_phy_write_exp(phydev, NCO_TIME_0, ns >> 4);
+ bcm_phy_write_exp(phydev, NCO_TIME_1, ns >> 20);
+ bcm_phy_write_exp(phydev, NCO_TIME_2_CTRL, (ns >> 36) & 0xfff);
+
+ /* set up load on next frame sync (auto-clears due to NSE_INIT) */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, TIME_CODE_LOAD | NCO_TIME_LOAD);
+
+ /* must have NSE_INIT in order to write time code */
+ bcm_ptp_framesync(phydev, ctrl | NSE_INIT);
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ return 0;
+}
+
+static int bcm_ptp_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_settime_locked(priv, ts);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_adjtime_locked(struct bcm_ptp_private *priv,
+ s64 delta_ns)
+{
+ struct timespec64 ts;
+ int err;
+ s64 ns;
+
+ err = bcm_ptp_framesync_ts(priv->phydev, NULL, &ts, priv->nse_ctrl);
+ if (!err) {
+ ns = timespec64_to_ns(&ts) + delta_ns;
+ ts = ns_to_timespec64(ns);
+ err = bcm_ptp_settime_locked(priv, &ts);
+ }
+ return err;
+}
+
+static int bcm_ptp_adjtime(struct ptp_clock_info *info, s64 delta_ns)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err;
+
+ mutex_lock(&priv->mutex);
+ err = bcm_ptp_adjtime_locked(priv, delta_ns);
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+/* A 125Mhz clock should adjust 8ns per pulse.
+ * The frequency adjustment base is 0x8000 0000, or 8*2^28.
+ *
+ * Frequency adjustment is
+ * adj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * which simplifies to:
+ * adj = scaled_ppm * 2^9 / 5^6
+ */
+static int bcm_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int neg_adj = 0;
+ u32 diff, freq;
+ u16 ctrl;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = scaled_ppm << 9;
+ diff = div_u64(adj, 15625);
+ freq = (8 << 28) + (neg_adj ? -diff : diff);
+
+ mutex_lock(&priv->mutex);
+
+ ctrl = bcm_ptp_framesync_disable(priv->phydev, priv->nse_ctrl);
+
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_LSB, freq);
+ bcm_phy_write_exp(priv->phydev, NCO_FREQ_MSB, freq >> 16);
+
+ bcm_phy_write_exp(priv->phydev, NCO_TIME_2_CTRL, FREQ_MDIO_SEL);
+
+ /* load on next framesync */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, FREQ_LOAD);
+
+ bcm_ptp_framesync(priv->phydev, ctrl);
+
+ /* clear load */
+ bcm_phy_write_exp(priv->phydev, SHADOW_LOAD, 0);
+
+ bcm_ptp_framesync_restore(priv->phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+static bool bcm_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct skb_shared_hwtstamps *hwts;
+ struct ptp_header *header;
+ u32 sec, nsec;
+ u8 *data;
+ int off;
+
+ if (!priv->hwts_rx)
+ return false;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return false;
+
+ data = (u8 *)(header + 1);
+ sec = get_unaligned_be32(data);
+ nsec = get_unaligned_be32(data + 4);
+
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = ktime_set(sec, nsec);
+
+ off = data - skb->data + 8;
+ if (off < skb->len) {
+ memmove(data, data + 8, skb->len - off);
+ __pskb_trim(skb, skb->len - 8);
+ }
+
+ return false;
+}
+
+static bool bcm_ptp_get_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct phy_device *phydev = priv->phydev;
+ u16 ts[4], reg;
+ u32 sec, nsec;
+
+ mutex_lock(&priv->mutex);
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_SOP) == 0) {
+ mutex_unlock(&priv->mutex);
+ return false;
+ }
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_START);
+
+ ts[0] = bcm_phy_read_exp(phydev, TS_REG_0);
+ ts[1] = bcm_phy_read_exp(phydev, TS_REG_1);
+ ts[2] = bcm_phy_read_exp(phydev, TS_REG_2);
+ ts[3] = bcm_phy_read_exp(phydev, TS_REG_3);
+
+ /* not in be32 format for some reason */
+ capts->seq_id = bcm_phy_read_exp(priv->phydev, TS_INFO_0);
+
+ reg = bcm_phy_read_exp(phydev, TS_INFO_1);
+ capts->msgtype = reg >> 12;
+ capts->tx_dir = !!(reg & BIT(11));
+
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, TS_READ_END);
+ bcm_phy_write_exp(phydev, TS_READ_CTRL, 0);
+
+ mutex_unlock(&priv->mutex);
+
+ sec = (ts[3] << 16) | ts[2];
+ nsec = (ts[1] << 16) | ts[0];
+ capts->hwtstamp = ktime_set(sec, nsec);
+
+ return true;
+}
+
+static void bcm_ptp_match_tstamp(struct bcm_ptp_private *priv,
+ struct bcm_ptp_capture *capts)
+{
+ struct skb_shared_hwtstamps hwts;
+ struct sk_buff *skb, *ts_skb;
+ unsigned long flags;
+ bool first = false;
+
+ ts_skb = NULL;
+ spin_lock_irqsave(&priv->tx_queue.lock, flags);
+ skb_queue_walk(&priv->tx_queue, skb) {
+ if (BCM_SKB_CB(skb)->seq_id == capts->seq_id &&
+ BCM_SKB_CB(skb)->msgtype == capts->msgtype) {
+ first = skb_queue_is_first(&priv->tx_queue, skb);
+ __skb_unlink(skb, &priv->tx_queue);
+ ts_skb = skb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+
+ /* TX captures one-step packets, discard them if needed. */
+ if (ts_skb) {
+ if (BCM_SKB_CB(ts_skb)->discard) {
+ kfree_skb(ts_skb);
+ } else {
+ memset(&hwts, 0, sizeof(hwts));
+ hwts.hwtstamp = capts->hwtstamp;
+ skb_complete_tx_timestamp(ts_skb, &hwts);
+ }
+ }
+
+ /* not first match, try and expire entries */
+ if (!first) {
+ while ((skb = skb_dequeue(&priv->tx_queue))) {
+ if (!time_after(jiffies, BCM_SKB_CB(skb)->timeout)) {
+ skb_queue_head(&priv->tx_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+ }
+ }
+}
+
+static long bcm_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ struct bcm_ptp_capture capts;
+ bool reschedule = false;
+
+ while (!skb_queue_empty_lockless(&priv->tx_queue)) {
+ if (!bcm_ptp_get_tstamp(priv, &capts)) {
+ reschedule = true;
+ break;
+ }
+ bcm_ptp_match_tstamp(priv, &capts);
+ }
+
+ return reschedule ? 1 : -1;
+}
+
+static int bcm_ptp_cancel_func(struct bcm_ptp_private *priv)
+{
+ if (!priv->pin_active)
+ return 0;
+
+ priv->pin_active = false;
+
+ priv->nse_ctrl &= ~(NSE_SYNC_OUT_MASK | NSE_SYNC1_FRAMESYNC |
+ NSE_CAPTURE_EN);
+ bcm_phy_write_exp(priv->phydev, NSE_CTRL, priv->nse_ctrl);
+
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ return 0;
+}
+
+static void bcm_ptp_perout_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct timespec64 ts;
+ u64 ns, next;
+ u16 ctrl;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ bcm_ptp_framesync_ts(phydev, NULL, &ts, priv->nse_ctrl);
+
+ /* this is 1PPS only */
+ next = NSEC_PER_SEC - ts.tv_nsec;
+ ts.tv_sec += next < NSEC_PER_MSEC ? 2 : 1;
+ ts.tv_nsec = 0;
+
+ ns = timespec64_to_ns(&ts);
+
+ /* force 0->1 transition for ONESHOT */
+ ctrl = bcm_ptp_framesync_disable(phydev,
+ priv->nse_ctrl & ~NSE_ONESHOT_EN);
+
+ bcm_phy_write_exp(phydev, SYNOUT_TS_0, ns & 0xfff0);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_1, ns >> 16);
+ bcm_phy_write_exp(phydev, SYNOUT_TS_2, ns >> 32);
+
+ /* load values on next framesync */
+ bcm_phy_write_exp(phydev, SHADOW_LOAD, SYNC_OUT_LOAD);
+
+ bcm_ptp_framesync(phydev, ctrl | NSE_ONESHOT_EN | NSE_INIT);
+
+ priv->nse_ctrl |= NSE_ONESHOT_EN;
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ mutex_unlock(&priv->mutex);
+
+ next = next + NSEC_PER_MSEC;
+ schedule_delayed_work(&priv->pin_work, nsecs_to_jiffies(next));
+}
+
+static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
+ struct ptp_perout_request *req, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+ u64 period, pulse;
+ u16 val;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ /* 1PPS */
+ if (req->period.sec != 1 || req->period.nsec != 0)
+ return -EINVAL;
+
+ period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
+
+ if (req->flags & PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
+ if (req->flags & PTP_PEROUT_DUTY_CYCLE)
+ pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
+ else
+ pulse = (u64)BCM_MAX_PULSE_8NS << 3;
+
+ /* convert to 8ns units */
+ pulse >>= 3;
+
+ if (!pulse || pulse > period || pulse > BCM_MAX_PULSE_8NS)
+ return -EINVAL;
+
+ bcm_phy_write_exp(phydev, SYNC_OUT_0, period);
+
+ val = ((pulse & 0x3) << 14) | ((period >> 16) & 0x3fff);
+ bcm_phy_write_exp(phydev, SYNC_OUT_1, val);
+
+ val = ((pulse >> 2) & 0x7f) | (pulse << 7);
+ bcm_phy_write_exp(phydev, SYNC_OUT_2, val);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_perout_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static void bcm_ptp_extts_work(struct work_struct *pin_work)
+{
+ struct bcm_ptp_private *priv =
+ container_of(pin_work, struct bcm_ptp_private, pin_work.work);
+ struct phy_device *phydev = priv->phydev;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u16 reg;
+
+ mutex_lock(&priv->mutex);
+
+ /* no longer running */
+ if (!priv->pin_active) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ reg = bcm_phy_read_exp(phydev, INTR_STATUS);
+ if ((reg & INTC_FSYNC) == 0)
+ goto out;
+
+ bcm_ptp_get_framesync_ts(phydev, &ts);
+
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = timespec64_to_ns(&ts);
+ ptp_clock_event(priv->ptp_clock, &event);
+
+out:
+ mutex_unlock(&priv->mutex);
+ schedule_delayed_work(&priv->pin_work, HZ / 4);
+}
+
+static int bcm_ptp_extts_locked(struct bcm_ptp_private *priv, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!on)
+ return bcm_ptp_cancel_func(priv);
+
+ if (priv->pin_active)
+ cancel_delayed_work_sync(&priv->pin_work);
+
+ bcm_ptp_framesync_disable(phydev, priv->nse_ctrl);
+
+ priv->nse_ctrl |= NSE_SYNC1_FRAMESYNC | NSE_CAPTURE_EN;
+
+ bcm_ptp_framesync_restore(phydev, priv->nse_ctrl);
+
+ priv->pin_active = true;
+ INIT_DELAYED_WORK(&priv->pin_work, bcm_ptp_extts_work);
+ schedule_delayed_work(&priv->pin_work, 0);
+
+ return 0;
+}
+
+static int bcm_ptp_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bcm_ptp_private *priv = ptp2priv(info);
+ int err = -EBUSY;
+
+ mutex_lock(&priv->mutex);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (priv->pin.func == PTP_PF_PEROUT)
+ err = bcm_ptp_perout_locked(priv, &rq->perout, on);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (priv->pin.func == PTP_PF_EXTTS)
+ err = bcm_ptp_extts_locked(priv, on);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int bcm_ptp_verify(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static const struct ptp_clock_info bcm_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .max_adj = 100000000,
+ .gettimex64 = bcm_ptp_gettimex,
+ .settime64 = bcm_ptp_settime,
+ .adjtime = bcm_ptp_adjtime,
+ .adjfine = bcm_ptp_adjfine,
+ .enable = bcm_ptp_enable,
+ .verify = bcm_ptp_verify,
+ .do_aux_work = bcm_ptp_do_aux_work,
+ .n_pins = 1,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+};
+
+static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct ptp_header *hdr;
+ bool discard = false;
+ int msgtype;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ goto out;
+ msgtype = ptp_get_msgtype(hdr, type);
+
+ switch (priv->tx_type) {
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ if (msgtype == PTP_MSGTYPE_PDELAY_RESP)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ discard = true;
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ BCM_SKB_CB(skb)->timeout = jiffies + SKB_TS_TIMEOUT;
+ BCM_SKB_CB(skb)->seq_id = be16_to_cpu(hdr->sequence_id);
+ BCM_SKB_CB(skb)->msgtype = msgtype;
+ BCM_SKB_CB(skb)->discard = discard;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&priv->tx_queue, skb);
+ ptp_schedule_worker(priv->ptp_clock, 0);
+ return;
+ default:
+ break;
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct ifreq *ifr)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+ struct hwtstamp_config cfg;
+ u16 mode, ctrl;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ priv->hwts_rx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_type = cfg.tx_type;
+
+ ctrl = priv->hwts_rx ? SLICE_RX_EN : 0;
+ ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0;
+
+ mode = TX_MODE_SEL(PORT, SYNC, REPLACE_TS) |
+ TX_MODE_SEL(PORT, DELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_REQ, REPLACE_TS) |
+ TX_MODE_SEL(PORT, PDELAY_RESP, REPLACE_TS);
+
+ bcm_phy_write_exp(priv->phydev, TX_EVENT_MODE, mode);
+
+ mode = RX_MODE_SEL(PORT, SYNC, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, DELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_REQ, INSERT_TS_64) |
+ RX_MODE_SEL(PORT, PDELAY_RESP, INSERT_TS_64);
+
+ bcm_phy_write_exp(priv->phydev, RX_EVENT_MODE, mode);
+
+ bcm_phy_write_exp(priv->phydev, SLICE_CTRL, ctrl);
+
+ if (ctrl & SLICE_TX_EN)
+ bcm_phy_write_exp(priv->phydev, TX_TS_CAPTURE, TX_TS_CAP_EN);
+ else
+ ptp_cancel_worker_sync(priv->ptp_clock);
+
+ /* purge existing data */
+ skb_queue_purge(&priv->tx_queue);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+
+ ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
+ ts_info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ ts_info->tx_types =
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC) |
+ BIT(HWTSTAMP_TX_ONESTEP_P2P);
+ ts_info->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+void bcm_ptp_stop(struct bcm_ptp_private *priv)
+{
+ ptp_cancel_worker_sync(priv->ptp_clock);
+ bcm_ptp_cancel_func(priv);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_stop);
+
+void bcm_ptp_config_init(struct phy_device *phydev)
+{
+ /* init network sync engine */
+ bcm_phy_write_exp(phydev, NSE_CTRL, NSE_GMODE_EN | NSE_INIT);
+
+ /* enable time sync (TX/RX SOP capture) */
+ bcm_phy_write_exp(phydev, TIME_SYNC, TIME_SYNC_EN);
+
+ /* use sec.nsec heartbeat capture */
+ bcm_phy_write_exp(phydev, DPLL_SELECT, DPLL_HB_MODE2);
+
+ /* use 64 bit timecode for TX */
+ bcm_phy_write_exp(phydev, TIMECODE_CTRL, TX_TIMECODE_SEL);
+
+ /* always allow FREQ_LOAD on framesync */
+ bcm_phy_write_exp(phydev, SHADOW_CTRL, FREQ_LOAD);
+
+ bcm_phy_write_exp(phydev, SYNC_IN_DIVIDER, 1);
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_config_init);
+
+static void bcm_ptp_init(struct bcm_ptp_private *priv)
+{
+ priv->nse_ctrl = NSE_GMODE_EN;
+
+ mutex_init(&priv->mutex);
+ skb_queue_head_init(&priv->tx_queue);
+
+ priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
+ priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
+ priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
+ priv->mii_ts.ts_info = bcm_ptp_ts_info;
+
+ priv->phydev->mii_ts = &priv->mii_ts;
+}
+
+struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
+{
+ struct bcm_ptp_private *priv;
+ struct ptp_clock *clock;
+
+ switch (BRCM_PHY_MODEL(phydev)) {
+ case PHY_ID_BCM54210E:
+ break;
+ default:
+ return NULL;
+ }
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ptp_info = bcm_ptp_clock_info;
+
+ snprintf(priv->pin.name, sizeof(priv->pin.name), "SYNC_OUT");
+ priv->ptp_info.pin_config = &priv->pin;
+
+ clock = ptp_clock_register(&priv->ptp_info, &phydev->mdio.dev);
+ if (IS_ERR(clock))
+ return ERR_CAST(clock);
+ priv->ptp_clock = clock;
+
+ priv->phydev = phydev;
+ bcm_ptp_init(priv);
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(bcm_ptp_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 313563482690..cc2858107668 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -10,12 +10,12 @@
#define PHY_ID_BCM8706 0x0143bdc1
#define PHY_ID_BCM8727 0x0143bff0
-#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
-#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
-#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
+#define BCM87XX_PMD_RX_SIGNAL_DETECT 0x000a
+#define BCM87XX_10GBASER_PCS_STATUS 0x0020
+#define BCM87XX_XGXS_LANE_STATUS 0x0018
-#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
-#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
+#define BCM87XX_LASI_CONTROL 0x9002
+#define BCM87XX_LASI_STATUS 0x9005
#if IS_ENABLED(CONFIG_OF_MDIO)
/* Set and/or override some configuration registers based on the
@@ -54,11 +54,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
u16 reg = be32_to_cpup(paddr++);
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
- u32 regnum = mdiobus_c45_addr(devid, reg);
int val = 0;
if (mask) {
- val = phy_read(phydev, regnum);
+ val = phy_read_mmd(phydev, devid, reg);
if (val < 0) {
ret = val;
goto err;
@@ -67,7 +66,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
}
val |= val_bits;
- ret = phy_write(phydev, regnum, val);
+ ret = phy_write_mmd(phydev, devid, reg, val);
if (ret < 0)
goto err;
}
@@ -104,21 +103,24 @@ static int bcm87xx_read_status(struct phy_device *phydev)
int pcs_status;
int xgxs_lane_status;
- rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
+ rx_signal_detect = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ BCM87XX_PMD_RX_SIGNAL_DETECT);
if (rx_signal_detect < 0)
return rx_signal_detect;
if ((rx_signal_detect & 1) == 0)
goto no_link;
- pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
+ pcs_status = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_10GBASER_PCS_STATUS);
if (pcs_status < 0)
return pcs_status;
if ((pcs_status & 1) == 0)
goto no_link;
- xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
+ xgxs_lane_status = phy_read_mmd(phydev, MDIO_MMD_PHYXS,
+ BCM87XX_XGXS_LANE_STATUS);
if (xgxs_lane_status < 0)
return xgxs_lane_status;
@@ -139,25 +141,27 @@ static int bcm87xx_config_intr(struct phy_device *phydev)
{
int reg, err;
- reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
+ reg = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_CONTROL);
if (reg < 0)
return reg;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS);
if (err)
return err;
reg |= 1;
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ err = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_LASI_CONTROL, reg);
} else {
reg &= ~1;
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ err = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ BCM87XX_LASI_CONTROL, reg);
if (err)
return err;
- err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS);
}
return err;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index bb5104ae4610..ad71c88c87e7 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -11,6 +11,7 @@
*/
#include "bcm-phy-lib.h"
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
@@ -26,6 +27,11 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+struct bcm54xx_phy_priv {
+ u64 *stats;
+ struct bcm_ptp_private *ptp;
+};
+
static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -312,6 +318,22 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
}
+static void bcm54xx_ptp_stop(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_stop(priv->ptp);
+}
+
+static void bcm54xx_ptp_config_init(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->ptp)
+ bcm_ptp_config_init(phydev);
+}
+
static int bcm54xx_config_init(struct phy_device *phydev)
{
int reg, err, val;
@@ -389,6 +411,8 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
}
+ bcm54xx_ptp_config_init(phydev);
+
return 0;
}
@@ -417,6 +441,8 @@ static int bcm54xx_suspend(struct phy_device *phydev)
{
int ret;
+ bcm54xx_ptp_stop(phydev);
+
/* We cannot use a read/modify/write here otherwise the PHY gets into
* a bad state where its LEDs keep flashing, thus defeating the purpose
* of low power mode.
@@ -602,6 +628,26 @@ static int brcm_fet_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ /* The datasheet indicates the PHY needs up to 1us to complete a reset,
+ * build some slack here.
+ */
+ usleep_range(1000, 2000);
+
+ /* The PHY requires 65 MDC clock cycles to complete a write operation
+ * and turnaround the line properly.
+ *
+ * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac)
+ * may flag the lack of turn-around as a read failure. This is
+ * particularly true with this combination since the MDIO controller
+ * only used 64 MDC cycles. This is not a critical failure in this
+ * specific case and it has no functional impact otherwise, so we let
+ * that one go through. If there is a genuine bus error, the next read
+ * of MII_BRCM_FET_INTREG will error out.
+ */
+ err = phy_read(phydev, MII_BMCR);
+ if (err < 0 && err != -EIO)
+ return err;
+
reg = phy_read(phydev, MII_BRCM_FET_INTREG);
if (reg < 0)
return reg;
@@ -720,9 +766,40 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-struct bcm54xx_phy_priv {
- u64 *stats;
-};
+static int brcm_fet_suspend(struct phy_device *phydev)
+{
+ int reg, err, err2, brcmtest;
+
+ /* We cannot use a read/modify/write here otherwise the PHY continues
+ * to drive LEDs which defeats the purpose of low power mode.
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_PDOWN);
+ if (err < 0)
+ return err;
+
+ /* Enable shadow register access */
+ brcmtest = phy_read(phydev, MII_BRCM_FET_BRCMTEST);
+ if (brcmtest < 0)
+ return brcmtest;
+
+ reg = brcmtest | MII_BRCM_FET_BT_SRE;
+
+ err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg);
+ if (err < 0)
+ return err;
+
+ /* Set standby mode */
+ err = phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4,
+ MII_BRCM_FET_SHDW_AM4_STANDBY,
+ MII_BRCM_FET_SHDW_AM4_STANDBY);
+
+ /* Disable shadow register access */
+ err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest);
+ if (!err)
+ err = err2;
+
+ return err;
+}
static int bcm54xx_phy_probe(struct phy_device *phydev)
{
@@ -740,6 +817,10 @@ static int bcm54xx_phy_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
+ priv->ptp = bcm_ptp_probe(phydev);
+ if (IS_ERR(priv->ptp))
+ return PTR_ERR(priv->ptp);
+
return 0;
}
@@ -854,6 +935,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
+ .soft_reset = genphy_soft_reset,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.config_intr = bcm_phy_config_intr,
@@ -986,6 +1068,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
@@ -994,6 +1078,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
@@ -1020,6 +1106,20 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
+ .phy_id = PHY_ID_BCM53128,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM53128",
+ .flags = PHY_IS_INTERNAL,
+ /* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
+ .config_init = bcm54xx_config_init,
+ .config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
@@ -1055,6 +1155,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
{ PHY_ID_BCM53125, 0xfffffff0 },
+ { PHY_ID_BCM53128, 0xfffffff0 },
{ PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c2d1a85ec559..ef8b14135133 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -886,7 +886,7 @@ out:
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
if (shhwtstamps)
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void decode_txts(struct dp83640_private *dp83640,
@@ -970,17 +970,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static int is_sync(struct sk_buff *skb, int type)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(skb, type);
- if (!hdr)
- return 0;
-
- return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
-}
-
static void dp83640_free_clocks(void)
{
struct dp83640_clock *clock;
@@ -1329,7 +1318,7 @@ static void rx_timestamp_work(struct work_struct *work)
break;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (!skb_queue_empty(&dp83640->rx_queue))
@@ -1380,7 +1369,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
return true;
@@ -1396,7 +1385,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
switch (dp83640->hwts_tx_en) {
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (is_sync(skb, type)) {
+ if (ptp_msg_is_sync(skb, type)) {
kfree_skb(skb);
return;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 211b5476a6f5..b60db8b6f477 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -94,7 +94,8 @@
#define DP83822_WOL_INDICATION_SEL BIT(8)
#define DP83822_WOL_CLR_INDICATION BIT(11)
-/* RSCR bits */
+/* RCSR bits */
+#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -228,9 +229,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (misr_status < 0)
return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN |
- DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_LINK_STAT_INT_EN |
+ misr_status |= (DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
@@ -255,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_EEE_ERROR_CHANGE_INT_EN);
if (!dp83822->fx_enabled)
- misr_status |= DP83822_MDI_XOVER_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
+ misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
@@ -274,7 +272,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_DP83822_MISR1, 0);
+ err = phy_write(phydev, MII_DP83822_MISR2, 0);
if (err < 0)
return err;
@@ -408,6 +406,12 @@ static int dp83822_config_init(struct phy_device *phydev)
if (err)
return err;
}
+
+ phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ } else {
+ phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
}
if (dp83822->fx_enabled) {
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8561f2d4443b..417527f8bbf5 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
+#include <linux/nvmem-consumer.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -137,6 +138,7 @@
#define DP83867_DOWNSHIFT_2_COUNT 2
#define DP83867_DOWNSHIFT_4_COUNT 4
#define DP83867_DOWNSHIFT_8_COUNT 8
+#define DP83867_SGMII_AUTONEG_EN BIT(7)
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
@@ -521,6 +523,51 @@ static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
}
#if IS_ENABLED(CONFIG_OF_MDIO)
+static int dp83867_of_init_io_impedance(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ struct nvmem_cell *cell;
+ u8 *buf, val;
+ int ret;
+
+ cell = of_nvmem_cell_get(of_node, "io_impedance_ctrl");
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ if (ret != -ENOENT && ret != -EOPNOTSUPP)
+ return phydev_err_probe(phydev, ret,
+ "failed to get nvmem cell io_impedance_ctrl\n");
+
+ /* If no nvmem cell, check for the boolean properties. */
+ if (of_property_read_bool(of_node, "ti,max-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
+ else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+ else
+ dp83867->io_impedance = -1; /* leave at default */
+
+ return 0;
+ }
+
+ buf = nvmem_cell_read(cell, NULL);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ val = *buf;
+ kfree(buf);
+
+ if ((val & DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK) != val) {
+ phydev_err(phydev, "nvmem cell 'io_impedance_ctrl' contents out of range\n");
+ return -ERANGE;
+ }
+ dp83867->io_impedance = val;
+
+ return 0;
+}
+
static int dp83867_of_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
@@ -548,12 +595,9 @@ static int dp83867_of_init(struct phy_device *phydev)
}
}
- if (of_property_read_bool(of_node, "ti,max-output-impedance"))
- dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
- else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
- dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
- else
- dp83867->io_impedance = -1; /* leave at default */
+ ret = dp83867_of_init_io_impedance(phydev);
+ if (ret)
+ return ret;
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
"ti,dp83867-rxctrl-strap-quirk");
@@ -809,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev)
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required for SGMII
+ * in addition to clearing bit 7, handled above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
@@ -855,6 +907,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
+static void dp83867_link_change_notify(struct phy_device *phydev)
+{
+ /* There is a limitation in DP83867 PHY device where SGMII AN is
+ * only triggered once after the device is booted up. Even after the
+ * PHY TPI is down and up again, SGMII AN is not triggered and
+ * hence no new in-band message from PHY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior
+ * to MAC. At this condition, once MAC side SGMII is up, MAC side
+ * SGMII wouldn`t receive new in-band message from TI PHY with
+ * correct link status, speed and duplex info.
+ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
+ * whenever there is a link change.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ int val = 0;
+
+ val = phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ if (val < 0)
+ return;
+
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ }
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -879,6 +957,8 @@ static struct phy_driver dp83867_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
+
+ .link_change_notify = dp83867_link_change_notify,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
new file mode 100644
index 000000000000..3cd9a77f9532
--- /dev/null
+++ b/drivers/net/phy/dp83td510.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for the Texas Instruments DP83TD510 PHY
+ * Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83TD510E_PHY_ID 0x20000181
+
+/* MDIO_MMD_VEND2 registers */
+#define DP83TD510E_PHY_STS 0x10
+#define DP83TD510E_STS_MII_INT BIT(7)
+#define DP83TD510E_LINK_STATUS BIT(0)
+
+#define DP83TD510E_GEN_CFG 0x11
+#define DP83TD510E_GENCFG_INT_POLARITY BIT(3)
+#define DP83TD510E_GENCFG_INT_EN BIT(1)
+#define DP83TD510E_GENCFG_INT_OE BIT(0)
+
+#define DP83TD510E_INTERRUPT_REG_1 0x12
+#define DP83TD510E_INT1_LINK BIT(13)
+#define DP83TD510E_INT1_LINK_EN BIT(5)
+
+#define DP83TD510E_AN_STAT_1 0x60c
+#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
+
+#define DP83TD510E_MSE_DETECT 0xa85
+
+#define DP83TD510_SQI_MAX 7
+
+/* Register values are converted to SNR(dB) as suggested by
+ * "Application Report - DP83TD510E Cable Diagnostics Toolkit":
+ * SNR(dB) = -10 * log10 (VAL/2^17) - 1.76 dB.
+ * SQI ranges are implemented according to "OPEN ALLIANCE - Advanced diagnostic
+ * features for 100BASE-T1 automotive Ethernet PHYs"
+ */
+static const u16 dp83td510_mse_sqi_map[] = {
+ 0x0569, /* < 18dB */
+ 0x044c, /* 18dB =< SNR < 19dB */
+ 0x0369, /* 19dB =< SNR < 20dB */
+ 0x02b6, /* 20dB =< SNR < 21dB */
+ 0x0227, /* 21dB =< SNR < 22dB */
+ 0x01b6, /* 22dB =< SNR < 23dB */
+ 0x015b, /* 23dB =< SNR < 24dB */
+ 0x0000 /* 24dB =< SNR */
+};
+
+static int dp83td510_config_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
+ 0x0);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_INTERRUPT_REG_1,
+ DP83TD510E_INT1_LINK_EN);
+ if (ret)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_GEN_CFG,
+ DP83TD510E_GENCFG_INT_POLARITY |
+ DP83TD510E_GENCFG_INT_EN |
+ DP83TD510E_GENCFG_INT_OE);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_INTERRUPT_REG_1, 0x0);
+ if (ret)
+ return ret;
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_GEN_CFG,
+ DP83TD510E_GENCFG_INT_EN);
+ if (ret)
+ return ret;
+
+ /* Clear any pending interrupts */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
+ 0x0);
+ }
+
+ return ret;
+}
+
+static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS);
+ if (ret < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ } else if (!(ret & DP83TD510E_STS_MII_INT)) {
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1);
+ if (ret < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ } else if (!(ret & DP83TD510E_INT1_LINK_EN) ||
+ !(ret & DP83TD510E_INT1_LINK)) {
+ return IRQ_NONE;
+ }
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int dp83td510_read_status(struct phy_device *phydev)
+{
+ u16 phy_sts;
+ int ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ linkmode_zero(phydev->lp_advertising);
+
+ phy_sts = phy_read(phydev, DP83TD510E_PHY_STS);
+
+ phydev->link = !!(phy_sts & DP83TD510E_LINK_STATUS);
+ if (phydev->link) {
+ /* This PHY supports only one link mode: 10BaseT1L_Full */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_10;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+ }
+ }
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_AN_STAT_1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DP83TD510E_MASTER_SLAVE_RESOL_FAIL)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_ERR;
+ } else {
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
+ }
+
+ return 0;
+}
+
+static int dp83td510_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret;
+
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_an_disable_aneg(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int dp83td510_get_sqi(struct phy_device *phydev)
+{
+ int sqi, ret;
+ u16 mse_val;
+
+ if (!phydev->link)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT);
+ if (ret < 0)
+ return ret;
+
+ mse_val = 0xFFFF & ret;
+ for (sqi = 0; sqi < ARRAY_SIZE(dp83td510_mse_sqi_map); sqi++) {
+ if (mse_val >= dp83td510_mse_sqi_map[sqi])
+ return sqi;
+ }
+
+ return -EINVAL;
+}
+
+static int dp83td510_get_sqi_max(struct phy_device *phydev)
+{
+ return DP83TD510_SQI_MAX;
+}
+
+static int dp83td510_get_features(struct phy_device *phydev)
+{
+ /* This PHY can't respond on MDIO bus if no RMII clock is enabled.
+ * In case RMII mode is used (most meaningful mode for this PHY) and
+ * the PHY do not have own XTAL, and CLK providing MAC is not probed,
+ * we won't be able to read all needed ability registers.
+ * So provide it manually.
+ */
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static struct phy_driver dp83td510_driver[] = {
+{
+ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
+ .name = "TI DP83TD510E",
+
+ .config_aneg = dp83td510_config_aneg,
+ .read_status = dp83td510_read_status,
+ .get_features = dp83td510_get_features,
+ .config_intr = dp83td510_config_intr,
+ .handle_interrupt = dp83td510_handle_interrupt,
+ .get_sqi = dp83td510_get_sqi,
+ .get_sqi_max = dp83td510_get_sqi_max,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+} };
+module_phy_driver(dp83td510_driver);
+
+static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
+ { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83td510_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83TD510E PHY driver");
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c65fb5f5d2dc..aef739c20ac4 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -180,7 +180,7 @@ static void fixed_phy_del(int phy_addr)
if (fp->link_gpiod)
gpiod_put(fp->link_gpiod);
kfree(fp);
- ida_simple_remove(&phy_fixed_ida, phy_addr);
+ ida_free(&phy_fixed_ida, phy_addr);
return;
}
}
@@ -244,13 +244,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
}
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
if (ret < 0) {
- ida_simple_remove(&phy_fixed_ida, phy_addr);
+ ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
@@ -353,6 +353,7 @@ static int __init fixed_mdio_bus_init(void)
fmb->mii_bus->parent = &pdev->dev;
fmb->mii_bus->read = &fixed_mdio_read;
fmb->mii_bus->write = &fixed_mdio_write;
+ fmb->mii_bus->phy_mask = ~0;
ret = mdiobus_register(fmb->mii_bus);
if (ret)
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index d8b31d4d2a73..fd9ad4820192 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -478,6 +478,7 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
@@ -489,7 +490,8 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
priv = (struct mv2222_data *)phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
+ phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
@@ -526,6 +528,7 @@ static void mv2222_sfp_remove(void *upstream)
priv->line_interface = PHY_INTERFACE_MODE_NA;
linkmode_zero(priv->supported);
+ phydev->port = PORT_NONE;
}
static void mv2222_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 739859c0dfb1..2810f4f9da0c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -189,6 +189,8 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define MII_88E1510_MSCR_2 0x15
+
#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10
#define MII_VCT5_TX_RX_MDI1_COUPLING 0x11
#define MII_VCT5_TX_RX_MDI2_COUPLING 0x12
@@ -551,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
else
mscr = 0;
- return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1121_PHY_MSCR_REG,
- MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+ return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -567,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
+ changed = err;
+
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- changed = err;
+ changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
@@ -957,7 +961,21 @@ static int m88e1111_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- return genphy_soft_reset(phydev);
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* If the HWCFG_MODE was changed from another mode (such as
+ * 1000BaseX) to SGMII, the state of the support bits may have
+ * also changed now that the PHY has been reset.
+ * Update the PHY abilities accordingly.
+ */
+ err = genphy_read_abilities(phydev);
+ linkmode_or(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ }
+ return err;
}
static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
@@ -1173,7 +1191,44 @@ static int m88e1318_config_init(struct phy_device *phydev)
static int m88e1510_config_init(struct phy_device *phydev)
{
+ static const struct {
+ u16 reg17, reg16;
+ } errata_vals[] = {
+ { 0x214b, 0x2144 },
+ { 0x0c28, 0x2146 },
+ { 0xb233, 0x214d },
+ { 0xcc0c, 0x2159 },
+ };
int err;
+ int i;
+
+ /* As per Marvell Release Notes - Alaska 88E1510/88E1518/88E1512/
+ * 88E1514 Rev A0, Errata Section 5.1:
+ * If EEE is intended to be used, the following register writes
+ * must be done once after every hardware reset.
+ */
+ err = marvell_set_page(phydev, 0x00FF);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(errata_vals); ++i) {
+ err = phy_write(phydev, 17, errata_vals[i].reg17);
+ if (err)
+ return err;
+ err = phy_write(phydev, 16, errata_vals[i].reg16);
+ if (err)
+ return err;
+ }
+
+ err = marvell_set_page(phydev, 0x00FB);
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, 07, 0xC00D);
+ if (err < 0)
+ return err;
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -1211,16 +1266,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- err = genphy_config_aneg(phydev);
- return 0;
+ return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)
@@ -1684,8 +1738,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1719,8 +1773,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
- phydev->supported)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1898,7 +1952,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < count; i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -1932,6 +1986,52 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+{
+ int err;
+
+ if (enable) {
+ u16 bmcr_ctl, mscr2_ctl = 0;
+
+ bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
+
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
+
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
+
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ /* FIXME: Based on trial and error test, it seem 1G need to have
+ * delay between soft reset and loopback enablement.
+ */
+ if (phydev->speed == SPEED_1000)
+ msleep(1000);
+
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+ } else {
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
+ if (err < 0)
+ return err;
+
+ return phy_config_aneg(phydev);
+ }
+}
+
static int marvell_vct5_wait_complete(struct phy_device *phydev)
{
int i;
@@ -2745,6 +2845,7 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t interface;
struct device *dev;
@@ -2756,7 +2857,7 @@ static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported);
+ sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
interface = sfp_select_interface(phydev->sfp_bus, supported);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
@@ -3078,7 +3179,7 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
- .set_loopback = genphy_loopback,
+ .set_loopback = m88e1510_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
.cable_test_start = marvell_vct7_cable_test_start,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index b6fea119fe13..383a9c9f36e5 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -96,6 +96,11 @@ enum {
MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+ /* SerDes reinitialization 88E21X0 */
+ MV_AN_21X0_SERDES_CTRL2 = 0x800f,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS = BIT(13),
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT = BIT(15),
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -117,16 +122,16 @@ enum {
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
- MV_V2_PORT_INTR_STS = 0xf040,
- MV_V2_PORT_INTR_MASK = 0xf043,
- MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
- MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
- MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
- MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
+ MV_V2_PORT_INTR_STS = 0xf040,
+ MV_V2_PORT_INTR_MASK = 0xf043,
+ MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
+ MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
+ MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
+ MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
/* Wake on LAN registers */
- MV_V2_WOL_CTRL = 0xf06e,
- MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
- MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
+ MV_V2_WOL_CTRL = 0xf06e,
+ MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -140,6 +145,8 @@ struct mv3310_chip {
bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
+ int (*set_mactype)(struct phy_device *phydev, int mactype);
+ int (*select_mactype)(unsigned long *interfaces);
int (*init_interface)(struct phy_device *phydev, int mactype);
#ifdef CONFIG_HWMON
@@ -466,9 +473,10 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support);
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
iface = sfp_select_interface(phydev->sfp_bus, support);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
@@ -593,6 +601,49 @@ static int mv2110_get_mactype(struct phy_device *phydev)
return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv2110_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int err, val;
+
+ mactype &= MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
+ err = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL,
+ MV_PMA_21X0_PORT_CTRL_SWRST |
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK,
+ MV_PMA_21X0_PORT_CTRL_SWRST | mactype);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS |
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT);
+ if (err)
+ return err;
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_AN,
+ MV_AN_21X0_SERDES_CTRL2, val,
+ !(val &
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT),
+ 5000, 100000, true);
+ if (err)
+ return err;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS);
+}
+
+static int mv2110_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ !test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else
+ return -1;
+}
+
static int mv3310_get_mactype(struct phy_device *phydev)
{
int mactype;
@@ -604,6 +655,46 @@ static int mv3310_get_mactype(struct phy_device *phydev)
return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv3310_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int ret;
+
+ mactype &= MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_MASK,
+ mactype);
+ if (ret <= 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_SWRST);
+}
+
+static int mv3310_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else
+ return -1;
+}
+
static int mv2110_init_interface(struct phy_device *phydev, int mactype)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
@@ -687,6 +778,20 @@ static int mv3310_config_init(struct phy_device *phydev)
if (err)
return err;
+ /* If host provided host supported interface modes, try to select the
+ * best one
+ */
+ if (!phy_interface_empty(phydev->host_interfaces)) {
+ mactype = chip->select_mactype(phydev->host_interfaces);
+ if (mactype >= 0) {
+ phydev_info(phydev, "Changing MACTYPE to %i\n",
+ mactype);
+ err = chip->set_mactype(phydev, mactype);
+ if (err)
+ return err;
+ }
+ }
+
mactype = chip->get_mactype(phydev);
if (mactype < 0)
return mactype;
@@ -880,7 +985,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
if (cssr1 < 0)
- return val;
+ return cssr1;
/* If the link settings are not resolved, mark the link down */
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
@@ -1049,6 +1154,8 @@ static const struct mv3310_chip mv3310_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3310_init_interface,
#ifdef CONFIG_HWMON
@@ -1060,6 +1167,8 @@ static const struct mv3310_chip mv3340_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3340_init_interface,
#ifdef CONFIG_HWMON
@@ -1070,6 +1179,8 @@ static const struct mv3310_chip mv3340_type = {
static const struct mv3310_chip mv2110_type = {
.init_supported_interfaces = mv2110_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
@@ -1080,6 +1191,8 @@ static const struct mv3310_chip mv2110_type = {
static const struct mv3310_chip mv2111_type = {
.init_supported_interfaces = mv2111_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 58d602985877..1cd604cd1fa1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -232,7 +232,7 @@ static ssize_t mdio_bus_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[sattr->addr],
sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
@@ -251,7 +251,7 @@ static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
@@ -583,7 +583,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & (1 << i)) == 0) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);
@@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void)
return ret;
}
-EXPORT_SYMBOL_GPL(mdio_bus_init);
#if IS_ENABLED(CONFIG_PHYLIB)
void mdio_bus_exit(void)
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index b7a5ae20edd5..68ee434f9dea 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 7e7904fee1d9..c49062ad72c6 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -30,8 +30,12 @@
#define INTSRC_LINK_DOWN BIT(4)
#define INTSRC_REMOTE_FAULT BIT(5)
#define INTSRC_ANEG_COMPLETE BIT(6)
+#define INTSRC_ENERGY_DETECT BIT(7)
#define INTSRC_MASK 30
+#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \
+ INTSRC_ENERGY_DETECT)
+
#define BANK_ANALOG_DSP 0
#define BANK_WOL 1
#define BANK_BIST 3
@@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
- u16 val;
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
if (ret)
return ret;
- val = INTSRC_ANEG_PR
- | INTSRC_PARALLEL_FAULT
- | INTSRC_ANEG_LP_ACK
- | INTSRC_LINK_DOWN
- | INTSRC_REMOTE_FAULT
- | INTSRC_ANEG_COMPLETE;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES);
} else {
- val = 0;
- ret = phy_write(phydev, INTSRC_MASK, val);
+ ret = phy_write(phydev, INTSRC_MASK, 0);
/* Ack any pending IRQ */
ret = meson_gxl_ack_interrupt(phydev);
@@ -237,9 +233,16 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
+ irq_status &= INT_SOURCES;
+
if (irq_status == 0)
return IRQ_NONE;
+ /* Aneg-complete interrupt is used for link-up detection */
+ if (phydev->autoneg == AUTONEG_ENABLE &&
+ irq_status == INTSRC_ENERGY_DETECT)
+ return IRQ_HANDLED;
+
phy_trigger_machine(phydev);
return IRQ_HANDLED;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4570cb9535b7..54a17b576eac 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,11 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/gpio/consumer.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -66,6 +71,36 @@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+#define KSZ9x31_LMD 0x12
+#define KSZ9x31_LMD_VCT_EN BIT(15)
+#define KSZ9x31_LMD_VCT_DIS_TX BIT(14)
+#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12)
+#define KSZ9x31_LMD_VCT_SEL_RESULT 0
+#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10)
+#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11)
+#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10)
+#define KSZ9x31_LMD_VCT_ST_NORMAL 0
+#define KSZ9x31_LMD_VCT_ST_OPEN 1
+#define KSZ9x31_LMD_VCT_ST_SHORT 2
+#define KSZ9x31_LMD_VCT_ST_FAIL 3
+#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8)
+#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7)
+#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6)
+#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5)
+#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4)
+#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2)
+#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
+#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+
+#define KSZPHY_WIRE_PAIR_MASK 0x3
+
+#define LAN8814_CABLE_DIAG 0x12
+#define LAN8814_CABLE_DIAG_STAT_MASK GENMASK(9, 8)
+#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0)
+#define LAN8814_PAIR_BIT_SHIFT 12
+
+#define LAN8814_WIRE_PAIR_MASK 0xF
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@ -79,6 +114,113 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define LAN8814_1PPM_FORMAT 17179
+
+#define PTP_RX_MOD 0x024F
+#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+#define PTP_RX_TIMESTAMP_EN 0x024D
+#define PTP_TX_TIMESTAMP_EN 0x028D
+
+#define PTP_TIMESTAMP_EN_SYNC_ BIT(0)
+#define PTP_TIMESTAMP_EN_DREQ_ BIT(1)
+#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
+#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
+
+#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
+#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
+
+#define PTP_TX_PARSE_IP_ADDR_EN 0x0285
+#define PTP_RX_PARSE_IP_ADDR_EN 0x0245
+#define LTC_HARD_RESET 0x023F
+#define LTC_HARD_RESET_ BIT(0)
+
+#define TSU_HARD_RESET 0x02C1
+#define TSU_HARD_RESET_ BIT(0)
+
+#define PTP_CMD_CTL 0x0200
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(0)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+
+#define PTP_CLOCK_SET_SEC_MID 0x0206
+#define PTP_CLOCK_SET_SEC_LO 0x0207
+#define PTP_CLOCK_SET_NS_HI 0x0208
+#define PTP_CLOCK_SET_NS_LO 0x0209
+
+#define PTP_CLOCK_READ_SEC_MID 0x022A
+#define PTP_CLOCK_READ_SEC_LO 0x022B
+#define PTP_CLOCK_READ_NS_HI 0x022C
+#define PTP_CLOCK_READ_NS_LO 0x022D
+
+#define PTP_OPERATING_MODE 0x0241
+#define PTP_OPERATING_MODE_STANDALONE_ BIT(0)
+
+#define PTP_TX_MOD 0x028F
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ BIT(12)
+#define PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+
+#define PTP_RX_PARSE_CONFIG 0x0242
+#define PTP_RX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_RX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_RX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_TX_PARSE_CONFIG 0x0282
+#define PTP_TX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_TX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_TX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_CLOCK_RATE_ADJ_HI 0x020C
+#define PTP_CLOCK_RATE_ADJ_LO 0x020D
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(15)
+
+#define PTP_LTC_STEP_ADJ_HI 0x0212
+#define PTP_LTC_STEP_ADJ_LO 0x0213
+#define PTP_LTC_STEP_ADJ_DIR_ BIT(15)
+
+#define LAN8814_INTR_STS_REG 0x0033
+#define LAN8814_INTR_STS_REG_1588_TSU0_ BIT(0)
+#define LAN8814_INTR_STS_REG_1588_TSU1_ BIT(1)
+#define LAN8814_INTR_STS_REG_1588_TSU2_ BIT(2)
+#define LAN8814_INTR_STS_REG_1588_TSU3_ BIT(3)
+
+#define PTP_CAP_INFO 0x022A
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x0f00) >> 8)
+#define PTP_CAP_INFO_RX_TS_CNT_GET_(reg_val) ((reg_val) & 0x000f)
+
+#define PTP_TX_EGRESS_SEC_HI 0x0296
+#define PTP_TX_EGRESS_SEC_LO 0x0297
+#define PTP_TX_EGRESS_NS_HI 0x0294
+#define PTP_TX_EGRESS_NS_LO 0x0295
+#define PTP_TX_MSG_HEADER2 0x0299
+
+#define PTP_RX_INGRESS_SEC_HI 0x0256
+#define PTP_RX_INGRESS_SEC_LO 0x0257
+#define PTP_RX_INGRESS_NS_HI 0x0254
+#define PTP_RX_INGRESS_NS_LO 0x0255
+#define PTP_RX_MSG_HEADER2 0x0259
+
+#define PTP_TSU_INT_EN 0x0200
+#define PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ BIT(3)
+#define PTP_TSU_INT_EN_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_ BIT(1)
+#define PTP_TSU_INT_EN_PTP_RX_TS_EN_ BIT(0)
+
+#define PTP_TSU_INT_STS 0x0201
+#define PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_ BIT(3)
+#define PTP_TSU_INT_STS_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1)
+#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0)
+
+#define LAN8814_LED_CTRL_1 0x0
+#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -108,6 +250,7 @@
#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
#define PS_TO_REG 200
+#define FIFO_SIZE 8
struct kszphy_hw_stat {
const char *string;
@@ -123,19 +266,73 @@ static struct kszphy_hw_stat kszphy_hw_stats[] = {
struct kszphy_type {
u32 led_mode_reg;
u16 interrupt_level_mask;
+ u16 cable_diag_reg;
+ unsigned long pair_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
};
+/* Shared structure between the PHYs of the same package. */
+struct lan8814_shared_priv {
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ /* Reference counter to how many ports in the package are enabling the
+ * timestamping
+ */
+ u8 ref;
+
+ /* Lock for ptp_clock and ref */
+ struct mutex shared_lock;
+};
+
+struct lan8814_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+struct kszphy_ptp_priv {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+
+ struct list_head rx_ts_list;
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+
+ int hwts_tx_type;
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+};
+
struct kszphy_priv {
+ struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
int led_mode;
+ u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
+static const struct kszphy_type lan8814_type = {
+ .led_mode_reg = ~LAN8814_LED_CTRL_1,
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
+static const struct kszphy_type ksz886x_type = {
+ .cable_diag_reg = KSZ8081_LMD,
+ .pair_mask = KSZPHY_WIRE_PAIR_MASK,
+};
+
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -348,7 +545,7 @@ static int kszphy_config_reset(struct phy_device *phydev)
}
}
- if (priv->led_mode >= 0)
+ if (priv->type && priv->led_mode >= 0)
kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
return 0;
@@ -364,10 +561,10 @@ static int kszphy_config_init(struct phy_device *phydev)
type = priv->type;
- if (type->has_broadcast_disable)
+ if (type && type->has_broadcast_disable)
kszphy_broadcast_disable(phydev);
- if (type->has_nand_tree_disable)
+ if (type && type->has_nand_tree_disable)
kszphy_nand_tree_disable(phydev);
return kszphy_config_reset(phydev);
@@ -1177,6 +1374,199 @@ static int ksz9031_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9x31_cable_test_start(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * Prior to running the cable diagnostics, Auto-negotiation should
+ * be disabled, full duplex set and the link speed set to 1000Mbps
+ * via the Basic Control Register.
+ */
+ ret = phy_modify(phydev, MII_BMCR,
+ BMCR_SPEED1000 | BMCR_FULLDPLX |
+ BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ret)
+ return ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * The Master-Slave configuration should be set to Slave by writing
+ * a value of 0x1000 to the Auto-Negotiation Master Slave Control
+ * Register.
+ */
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+
+ /* Cache these bits, they need to be restored once LinkMD finishes. */
+ priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret |= CTL1000_ENABLE_MASTER;
+
+ return phy_write(phydev, MII_CTRL1000, ret);
+}
+
+static int ksz9x31_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ9x31_LMD_VCT_ST_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz9x31_cable_test_failed(u16 status)
+{
+ int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status);
+
+ return stat == KSZ9x31_LMD_VCT_ST_FAIL;
+}
+
+static bool ksz9x31_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ fallthrough;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
+{
+ int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat);
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ *
+ * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
+ */
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131)
+ dt = clamp(dt - 22, 0, 255);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val,
+ !(val & KSZ9x31_LMD_VCT_EN),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz9x31_cable_test_get_pair(int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+
+ return ethtool_pair[pair];
+}
+
+static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ int ret, val;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * To test each individual cable pair, set the cable pair in the Cable
+ * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable
+ * Diagnostic Register, along with setting the Cable Diagnostics Test
+ * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit
+ * will self clear when the test is concluded.
+ */
+ ret = phy_write(phydev, KSZ9x31_LMD,
+ KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair));
+ if (ret)
+ return ret;
+
+ ret = ksz9x31_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ9x31_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz9x31_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz9x31_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_fault_length(phydev, val));
+}
+
+static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ unsigned long pair_mask = 0xf;
+ int retries = 20;
+ int pair, ret, rv;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz9x31_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ usleep_range(2000, 3000);
+ }
+
+ /* Report remaining unfinished pair result as unknown. */
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ *finished = true;
+
+ /* Restore cached bits from before LinkMD got started. */
+ rv = phy_modify(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER,
+ priv->vct_ctrl1000);
+ if (rv)
+ return rv;
+
+ return ret;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -1278,7 +1668,7 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -1323,6 +1713,30 @@ static int kszphy_suspend(struct phy_device *phydev)
return genphy_suspend(phydev);
}
+static void kszphy_parse_led_mode(struct phy_device *phydev)
+{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ if (type && type->led_mode_reg) {
+ ret = of_property_read_u32(np, "micrel,led-mode",
+ &priv->led_mode);
+
+ if (ret)
+ priv->led_mode = -1;
+
+ if (priv->led_mode > 3) {
+ phydev_err(phydev, "invalid led mode: 0x%02x\n",
+ priv->led_mode);
+ priv->led_mode = -1;
+ }
+ } else {
+ priv->led_mode = -1;
+ }
+}
+
static int kszphy_resume(struct phy_device *phydev)
{
int ret;
@@ -1355,7 +1769,6 @@ static int kszphy_probe(struct phy_device *phydev)
const struct device_node *np = phydev->mdio.dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
- int ret;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1365,20 +1778,7 @@ static int kszphy_probe(struct phy_device *phydev)
priv->type = type;
- if (type->led_mode_reg) {
- ret = of_property_read_u32(np, "micrel,led-mode",
- &priv->led_mode);
- if (ret)
- priv->led_mode = -1;
-
- if (priv->led_mode > 3) {
- phydev_err(phydev, "invalid led mode: 0x%02x\n",
- priv->led_mode);
- priv->led_mode = -1;
- }
- } else {
- priv->led_mode = -1;
- }
+ kszphy_parse_led_mode(phydev);
clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
@@ -1386,7 +1786,8 @@ static int kszphy_probe(struct phy_device *phydev)
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
- priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ if (type)
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
"micrel,rmii-reference-clock-select-25-mhz");
@@ -1413,6 +1814,17 @@ static int kszphy_probe(struct phy_device *phydev)
return 0;
}
+static int lan8814_cable_test_start(struct phy_device *phydev)
+{
+ /* If autoneg is enabled, we won't be able to test cross pair
+ * short. In this case, the PHY will "detect" a link and
+ * confuse the internal state machine - disable auto neg here.
+ * Set the speed to 1000mbit and full duplex.
+ */
+ return phy_modify(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+}
+
static int ksz886x_cable_test_start(struct phy_device *phydev)
{
if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA)
@@ -1426,9 +1838,9 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
}
-static int ksz886x_cable_test_result_trans(u16 status)
+static __always_inline int ksz886x_cable_test_result_trans(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_NORMAL:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
case KSZ8081_LMD_STAT_SHORT:
@@ -1442,15 +1854,15 @@ static int ksz886x_cable_test_result_trans(u16 status)
}
}
-static bool ksz886x_cable_test_failed(u16 status)
+static __always_inline bool ksz886x_cable_test_failed(u16 status, u16 mask)
{
- return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) ==
+ return FIELD_GET(mask, status) ==
KSZ8081_LMD_STAT_FAIL;
}
-static bool ksz886x_cable_test_fault_length_valid(u16 status)
+static __always_inline bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_OPEN:
fallthrough;
case KSZ8081_LMD_STAT_SHORT:
@@ -1459,29 +1871,80 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status)
return false;
}
-static int ksz886x_cable_test_fault_length(u16 status)
+static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *phydev,
+ u16 status, u16 data_mask)
{
int dt;
/* According to the data sheet the distance to the fault is
- * DELTA_TIME * 0.4 meters.
+ * DELTA_TIME * 0.4 meters for ksz phys.
+ * (DELTA_TIME - 22) * 0.8 for lan8814 phy.
*/
- dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status);
+ dt = FIELD_GET(data_mask, status);
- return (dt * 400) / 10;
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_LAN8814)
+ return ((dt - 22) * 800) / 10;
+ else
+ return (dt * 400) / 10;
}
static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
int val, ret;
- ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val,
+ ret = phy_read_poll_timeout(phydev, type->cable_diag_reg, val,
!(val & KSZ8081_LMD_ENABLE_TEST),
30000, 100000, true);
return ret < 0 ? ret : 0;
}
+static int lan8814_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = { ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ u32 fault_length;
+ int ret;
+ int val;
+
+ val = KSZ8081_LMD_ENABLE_TEST;
+ val = val | (pair << LAN8814_PAIR_BIT_SHIFT);
+
+ ret = phy_write(phydev, LAN8814_CABLE_DIAG, val);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz886x_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, LAN8814_CABLE_DIAG);
+ if (val < 0)
+ return val;
+
+ if (ksz886x_cable_test_failed(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_result_trans(val,
+ LAN8814_CABLE_DIAG_STAT_MASK
+ ));
+ if (ret)
+ return ret;
+
+ if (!ksz886x_cable_test_fault_length_valid(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return 0;
+
+ fault_length = ksz886x_cable_test_fault_length(phydev, val,
+ LAN8814_CABLE_DIAG_VCT_DATA_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
+}
+
static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
{
static const int ethtool_pair[] = {
@@ -1489,6 +1952,7 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
ETHTOOL_A_CABLE_PAIR_B,
};
int ret, val, mdix;
+ u32 fault_length;
/* There is no way to choice the pair, like we do one ksz9031.
* We can workaround this limitation by using the MDI-X functionality.
@@ -1527,25 +1991,27 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
if (val < 0)
return val;
- if (ksz886x_cable_test_failed(val))
+ if (ksz886x_cable_test_failed(val, KSZ8081_LMD_STAT_MASK))
return -EAGAIN;
ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
- ksz886x_cable_test_result_trans(val));
+ ksz886x_cable_test_result_trans(val, KSZ8081_LMD_STAT_MASK));
if (ret)
return ret;
- if (!ksz886x_cable_test_fault_length_valid(val))
+ if (!ksz886x_cable_test_fault_length_valid(val, KSZ8081_LMD_STAT_MASK))
return 0;
- return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- ksz886x_cable_test_fault_length(val));
+ fault_length = ksz886x_cable_test_fault_length(phydev, val, KSZ8081_LMD_DELTA_TIME_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
}
static int ksz886x_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
- unsigned long pair_mask = 0x3;
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ unsigned long pair_mask = type->pair_mask;
int retries = 20;
int pair, ret;
@@ -1554,7 +2020,10 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
/* Try harder if link partner is active */
while (pair_mask && retries--) {
for_each_set_bit(pair, &pair_mask, 4) {
- ret = ksz886x_cable_test_one_pair(phydev, pair);
+ if (type->cable_diag_reg == LAN8814_CABLE_DIAG)
+ ret = lan8814_cable_test_one_pair(phydev, pair);
+ else
+ ret = ksz886x_cable_test_one_pair(phydev, pair);
if (ret == -EAGAIN)
continue;
if (ret < 0)
@@ -1594,13 +2063,15 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
- u32 data;
+ int data;
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
- data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = __phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_unlock_mdio_bus(phydev);
return data;
}
@@ -1608,43 +2079,670 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
u16 val)
{
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC);
- val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
- if (val) {
+ val = __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val != 0)
phydev_err(phydev, "Error: phy_write has returned error %d\n",
val);
- return val;
+ phy_unlock_mdio_bus(phydev);
+ return val;
+}
+
+static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = PTP_TSU_INT_EN_PTP_TX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
+
+ return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+}
+
+static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+}
+
+static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = *seconds << 16 |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+}
+
+static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(shared->ptp_clock);
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
+{
+ int i;
+
+ for (i = 0; i < FIFO_SIZE; ++i)
+ lanphy_read_page_reg(phydev, 5,
+ egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
+
+ /* Read to clear overflow status bit */
+ lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+}
+
+static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ptp_priv->hwts_tx_type = config.tx_type;
+ ptp_priv->rx_filter = config.rx_filter;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp_priv->layer = 0;
+ ptp_priv->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (ptp_priv->layer & PTP_CLASS_L2) {
+ rxcfg = PTP_RX_PARSE_CONFIG_LAYER2_EN_;
+ txcfg = PTP_TX_PARSE_CONFIG_LAYER2_EN_;
+ } else if (ptp_priv->layer & PTP_CLASS_L4) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
+ }
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+
+ pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
+ PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ lan8814_config_ts_intr(ptp_priv->phydev, true);
+ else
+ lan8814_config_ts_intr(ptp_priv->phydev, false);
+
+ mutex_lock(&shared->shared_lock);
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ shared->ref++;
+ else
+ shared->ref--;
+
+ if (shared->ref)
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
+ else
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_DISABLE_);
+ mutex_unlock(&shared->shared_lock);
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ skb_queue_purge(&ptp_priv->rx_queue);
+ skb_queue_purge(&ptp_priv->tx_queue);
+
+ lan8814_flush_fifo(ptp_priv->phydev, false);
+ lan8814_flush_fifo(ptp_priv->phydev, true);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ switch (ptp_priv->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&ptp_priv->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds,
+ rx_ts->nsec);
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+
+ if (ret)
+ netif_rx(skb);
+ return ret;
+}
+
+static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ if (ptp_priv->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & ptp_priv->version) == 0 || (type & ptp_priv->layer) == 0)
+ return false;
+
+ /* If we failed to match then add it to the queue for when the timestamp
+ * will come
+ */
+ if (!lan8814_match_rx_ts(ptp_priv, skb))
+ skb_queue_tail(&ptp_priv->rx_queue, skb);
+
+ return true;
+}
+
+static void lan8814_ptp_clock_set(struct phy_device *phydev,
+ u32 seconds, u32 nano_seconds)
+{
+ u32 sec_low, sec_high, nsec_low, nsec_high;
+
+ sec_low = seconds & 0xffff;
+ sec_high = (seconds >> 16) & 0xffff;
+ nsec_low = nano_seconds & 0xffff;
+ nsec_high = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+}
+
+static void lan8814_ptp_clock_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds)
+{
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+}
+
+static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u32 nano_seconds;
+ u32 seconds;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
+ mutex_unlock(&shared->shared_lock);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
+ return 0;
+}
+
+static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_set(phydev, ts->tv_sec, ts->tv_nsec);
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static void lan8814_ptp_clock_step(struct phy_device *phydev,
+ s64 time_step_ns)
+{
+ u32 nano_seconds_step;
+ u64 abs_time_step_ns;
+ u32 unsigned_seconds;
+ u32 nano_seconds;
+ u32 remainder;
+ s32 seconds;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan8814_ptp_clock_set(phydev, unsigned_seconds,
+ nano_seconds);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)time_step_ns;
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
}
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ adjustment_value_hi);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ adjustment_value_hi);
+ seconds += ((s32)adjustment_value);
+ }
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ }
+ if (nano_seconds) {
+ u16 nano_seconds_lo;
+ u16 nano_seconds_hi;
+
+ nano_seconds_lo = nano_seconds & 0xffff;
+ nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ nano_seconds_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ nano_seconds_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
+ }
+}
+
+static int lan8814_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_step(phydev, delta);
+ mutex_unlock(&shared->shared_lock);
+
return 0;
}
-static int lan8814_config_init(struct phy_device *phydev)
+static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
{
- int val;
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u16 kszphy_rate_adj_lo, kszphy_rate_adj_hi;
+ bool positive = true;
+ u32 kszphy_rate_adj;
- /* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ positive = false;
+ }
- /* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ kszphy_rate_adj = LAN8814_1PPM_FORMAT * (scaled_ppm >> 16);
+ kszphy_rate_adj += (LAN8814_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
- /* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ kszphy_rate_adj_lo = kszphy_rate_adj & 0xffff;
+ kszphy_rate_adj_hi = (kszphy_rate_adj >> 16) & 0x3fff;
+
+ if (positive)
+ kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ mutex_lock(&shared->shared_lock);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ mutex_unlock(&shared->shared_lock);
return 0;
}
+static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ u32 seconds, nsec;
+ bool ret = false;
+ u16 skb_sig;
+ u16 seq_id;
+
+ lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
+
+ spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
+ lan8814_get_sig_tx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->tx_queue);
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->tx_queue.lock, flags);
+
+ if (ret) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ u32 reg;
+
+ do {
+ lan8814_dequeue_tx_skb(ptp_priv);
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
+}
+
+static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
+ struct lan8814_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->rx_queue);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_queue.lock, flags);
+
+ if (ret) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return ret;
+}
+
+static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts;
+ unsigned long flags;
+ u32 reg;
+
+ do {
+ rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return;
+
+ lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec,
+ &rx_ts->seq_id);
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!lan8814_match_skb(ptp_priv, rx_ts)) {
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
+}
+
+static void lan8814_handle_ptp_interrupt(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u16 status;
+
+ status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -1664,20 +2762,99 @@ static int lan8804_config_init(struct phy_device *phydev)
return 0;
}
+static irqreturn_t lan8804_handle_interrupt(struct phy_device *phydev)
+{
+ int status;
+
+ status = phy_read(phydev, LAN8814_INTS);
+ if (status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (status > 0)
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+#define LAN8804_OUTPUT_CONTROL 25
+#define LAN8804_OUTPUT_CONTROL_INTR_BUFFER BIT(14)
+#define LAN8804_CONTROL 31
+#define LAN8804_CONTROL_INTR_POLARITY BIT(14)
+
+static int lan8804_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ /* This is an internal PHY of lan966x and is not possible to change the
+ * polarity on the GIC found in lan966x, therefore change the polarity
+ * of the interrupt in the PHY from being active low instead of active
+ * high.
+ */
+ phy_write(phydev, LAN8804_CONTROL, LAN8804_CONTROL_INTR_POLARITY);
+
+ /* By default interrupt buffer is open-drain in which case the interrupt
+ * can be active only low. Therefore change the interrupt buffer to be
+ * push-pull to be able to change interrupt polarity
+ */
+ phy_write(phydev, LAN8804_OUTPUT_CONTROL,
+ LAN8804_OUTPUT_CONTROL_INTR_BUFFER);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ if (err)
+ return err;
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- int irq_status;
+ int irq_status, tsu_irq_status;
+ int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status < 0)
+ if (irq_status < 0) {
+ phy_error(phydev);
return IRQ_NONE;
+ }
- if (!(irq_status & LAN8814_INT_LINK))
- return IRQ_NONE;
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
- phy_trigger_machine(phydev);
+ while (1) {
+ tsu_irq_status = lanphy_read_page_reg(phydev, 4,
+ LAN8814_INTR_STS_REG);
+
+ if (tsu_irq_status > 0 &&
+ (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
+ LAN8814_INTR_STS_REG_1588_TSU1_ |
+ LAN8814_INTR_STS_REG_1588_TSU2_ |
+ LAN8814_INTR_STS_REG_1588_TSU3_))) {
+ lan8814_handle_ptp_interrupt(phydev);
+ ret = IRQ_HANDLED;
+ } else {
+ break;
+ }
+ }
- return IRQ_HANDLED;
+ return ret;
}
static int lan8814_ack_interrupt(struct phy_device *phydev)
@@ -1704,9 +2881,9 @@ static int lan8814_config_intr(struct phy_device *phydev)
if (err)
return err;
- err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
} else {
- err = phy_write(phydev, LAN8814_INTC, 0);
+ err = phy_write(phydev, LAN8814_INTC, 0);
if (err)
return err;
@@ -1716,6 +2893,198 @@ static int lan8814_config_intr(struct phy_device *phydev)
return err;
}
+static void lan8814_ptp_init(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u32 temp;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return;
+
+ lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
+ temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
+ temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+
+ /* Removing default registers configs related to L2 and IP */
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+ spin_lock_init(&ptp_priv->rx_ts_lock);
+
+ ptp_priv->phydev = phydev;
+
+ ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
+ ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
+ ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp;
+ ptp_priv->mii_ts.ts_info = lan8814_ts_info;
+
+ phydev->mii_ts = &ptp_priv->mii_ts;
+}
+
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return 0;
+
+ /* Initialise shared lock for clock*/
+ mutex_init(&shared->shared_lock);
+
+ shared->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
+ shared->ptp_clock_info.max_adj = 31249999;
+ shared->ptp_clock_info.n_alarm = 0;
+ shared->ptp_clock_info.n_ext_ts = 0;
+ shared->ptp_clock_info.n_pins = 0;
+ shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.pin_config = NULL;
+ shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
+ shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
+ shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
+ shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
+ shared->ptp_clock_info.getcrosststamp = NULL;
+
+ shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
+ &phydev->mdio.dev);
+ if (IS_ERR_OR_NULL(shared->ptp_clock)) {
+ phydev_err(phydev, "ptp_clock_register failed %lu\n",
+ PTR_ERR(shared->ptp_clock));
+ return -EINVAL;
+ }
+
+ phydev_dbg(phydev, "successfully registered ptp clock\n");
+
+ shared->phydev = phydev;
+
+ /* The EP.4 is shared between all the PHYs in the package and also it
+ * can be accessed by any of the PHYs
+ */
+ lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ PTP_OPERATING_MODE_STANDALONE_);
+
+ return 0;
+}
+
+static void lan8814_setup_led(struct phy_device *phydev, int val)
+{
+ int temp;
+
+ temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+
+ if (val)
+ temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+ else
+ temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
+
+ lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+}
+
+static int lan8814_config_init(struct phy_device *phydev)
+{
+ struct kszphy_priv *lan8814 = phydev->priv;
+ int val;
+
+ /* Reset the PHY */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
+ val |= LAN8814_QSGMII_SOFT_RESET_BIT;
+ lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+
+ /* Disable ANEG with QSGMII PCS Host side */
+ val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
+ val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
+ lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
+ val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8814_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+
+ if (lan8814->led_mode >= 0)
+ lan8814_setup_led(phydev, lan8814->led_mode);
+
+ return 0;
+}
+
+/* It is expected that there will not be any 'lan8814_take_coma_mode'
+ * function called in suspend. Because the GPIO line can be shared, so if one of
+ * the phys goes back in coma mode, then all the other PHYs will go, which is
+ * wrong.
+ */
+static int lan8814_release_coma_mode(struct phy_device *phydev)
+{
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
+ GPIOD_OUT_HIGH_OPEN_DRAIN |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod_set_consumer_name(gpiod, "LAN8814 coma mode");
+ gpiod_set_value_cansleep(gpiod, 0);
+
+ return 0;
+}
+
+static int lan8814_probe(struct phy_device *phydev)
+{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ struct kszphy_priv *priv;
+ u16 addr;
+ int err;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->type = type;
+
+ kszphy_parse_led_mode(phydev);
+
+ /* Strap-in value for PHY address, below register read gives starting
+ * phy address value
+ */
+ addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ addr, sizeof(struct lan8814_shared_priv));
+
+ if (phy_package_init_once(phydev)) {
+ err = lan8814_release_coma_mode(phydev);
+ if (err)
+ return err;
+
+ err = lan8814_ptp_probe_once(phydev);
+ if (err)
+ return err;
+ }
+
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1723,11 +3092,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
@@ -1741,8 +3111,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
@@ -1756,8 +3126,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1788,8 +3158,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
@@ -1802,8 +3172,8 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.match_phy_device = ksz8051_match_phy_device,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
@@ -1817,8 +3187,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
@@ -1845,11 +3215,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
+ .probe = kszphy_probe,
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -1864,14 +3235,15 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.get_features = ksz9031_get_features,
@@ -1883,15 +3255,18 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
- .driver_data = &ksz9021_type,
- .probe = kszphy_probe,
+ .driver_data = &lan8814_type,
+ .probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
.get_sset_count = kszphy_get_sset_count,
@@ -1901,6 +3276,8 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.config_intr = lan8814_config_intr,
.handle_interrupt = lan8814_handle_interrupt,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8804,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1915,11 +3292,14 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
+ .config_intr = lan8804_config_intr,
+ .handle_interrupt = lan8804_handle_interrupt,
}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1928,8 +3308,10 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1944,6 +3326,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
+ .driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.config_init = kszphy_config_init,
@@ -1966,6 +3349,8 @@ static struct phy_driver ksphy_driver[] = {
.name = "Microchip KSZ9477",
/* PHY_GBIT_FEATURES */
.config_init = kszphy_config_init,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 9f1f2b6c97d4..ccecee2524ce 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -344,8 +344,12 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
static struct phy_driver microchip_phy_driver[] = {
{
- .phy_id = 0x0007c130,
- .phy_id_mask = 0xfffffff0,
+ .phy_id = 0x0007c132,
+ /* This mask (0xfffffff2) is to differentiate from
+ * LAN8742 (phy_id 0x0007c130 and 0x0007c131)
+ * and allows future phy_id revisions.
+ */
+ .phy_id_mask = 0xfffffff2,
.name = "Microchip LAN88xx",
/* PHY_GBIT_FEATURES */
@@ -369,7 +373,7 @@ static struct phy_driver microchip_phy_driver[] = {
module_phy_driver(microchip_phy_driver);
static struct mdio_device_id __maybe_unused microchip_tbl[] = {
- { 0x0007c130, 0xfffffff0 },
+ { 0x0007c132, 0xfffffff2 },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index bc50224d43dd..8569a545e0a3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -8,11 +8,17 @@
#include <linux/phy.h>
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+
+#define PHY_ID_LAN87XX 0x0007c150
+#define PHY_ID_LAN937X 0x0007c180
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
+#define LAN87XX_REG_BANK_SEL_MASK GENMASK(10, 8)
+#define LAN87XX_REG_ADDR_MASK GENMASK(7, 0)
/* External Register Read Data Register */
#define LAN87XX_EXT_REG_RD_DATA (0x15)
@@ -22,12 +28,16 @@
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
/* Interrupt Mask Register */
#define LAN87XX_INTERRUPT_MASK (0x19)
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
+#define LAN87XX_MASK_COMM_RDY BIT(10)
+
/* MISC Control 1 Register */
#define LAN87XX_CTRL_1 (0x11)
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
@@ -37,6 +47,7 @@
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
#define PHYACC_ATTR_MODE_MODIFY 2
+#define PHYACC_ATTR_MODE_POLL 3
#define PHYACC_ATTR_BANK_SMI 0
#define PHYACC_ATTR_BANK_MISC 1
@@ -50,8 +61,41 @@
#define LAN87XX_CABLE_TEST_OPEN 1
#define LAN87XX_CABLE_TEST_SAME_SHORT 2
+/* T1 Registers */
+#define T1_AFE_PORT_CFG1_REG 0x0B
+#define T1_POWER_DOWN_CONTROL_REG 0x1A
+#define T1_SLV_FD_MULT_CFG_REG 0x18
+#define T1_CDR_CFG_PRE_LOCK_REG 0x05
+#define T1_CDR_CFG_POST_LOCK_REG 0x06
+#define T1_LCK_STG2_MUFACT_CFG_REG 0x1A
+#define T1_LCK_STG3_MUFACT_CFG_REG 0x1B
+#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
+#define T1_TX_RX_FIFO_CFG_REG 0x02
+#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_COEF_CLK_PWR_DN_CFG 0x04
+#define T1_COEF_RW_CTL_CFG 0x0D
+#define T1_SQI_CONFIG_REG 0x2E
+#define T1_SQI_CONFIG2_REG 0x4A
+#define T1_DCQ_SQI_REG 0xC3
+#define T1_DCQ_SQI_MSK GENMASK(3, 1)
+#define T1_MDIO_CONTROL2_REG 0x10
+#define T1_INTERRUPT_SOURCE_REG 0x18
+#define T1_INTERRUPT2_SOURCE_REG 0x08
+#define T1_EQ_FD_STG1_FRZ_CFG 0x69
+#define T1_EQ_FD_STG2_FRZ_CFG 0x6A
+#define T1_EQ_FD_STG3_FRZ_CFG 0x6B
+#define T1_EQ_FD_STG4_FRZ_CFG 0x6C
+#define T1_EQ_WT_FD_LCK_FRZ_CFG 0x6D
+#define T1_PST_EQ_LCK_STG1_FRZ_CFG 0x6E
+
+#define T1_MODE_STAT_REG 0x11
+#define T1_LINK_UP_MSK BIT(0)
+
+/* SQI defines */
+#define LAN87XX_MAX_SQI 0x07
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
struct access_ereg_val {
u8 mode;
@@ -61,6 +105,37 @@ struct access_ereg_val {
u16 mask;
};
+static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
+{
+ u8 prev_bank;
+ int rc = 0;
+ u16 val;
+
+ mutex_lock(&phydev->lock);
+ /* Read previous selected bank */
+ rc = phy_read(phydev, LAN87XX_EXT_REG_CTL);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* store the prev_bank */
+ prev_bank = FIELD_GET(LAN87XX_REG_BANK_SEL_MASK, rc);
+
+ if (bank != prev_bank && bank == PHYACC_ATTR_BANK_DSP) {
+ val = ereg & ~LAN87XX_REG_ADDR_MASK;
+
+ val &= ~LAN87XX_EXT_REG_CTL_WR_CTL;
+ val |= LAN87XX_EXT_REG_CTL_RD_CTL;
+
+ /* access twice for DSP bank change,dummy access */
+ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, val);
+ }
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
u8 offset, u16 val)
{
@@ -89,6 +164,13 @@ static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
ereg |= (bank << 8) | offset;
+ /* DSP bank access workaround for lan937x */
+ if (phydev->phy_id == PHY_ID_LAN937X) {
+ rc = lan937x_dsp_workaround(phydev, ereg, bank);
+ if (rc < 0)
+ return rc;
+ }
+
rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
if (rc < 0)
return rc;
@@ -117,6 +199,15 @@ static int access_ereg_modify_changed(struct phy_device *phydev,
return rc;
}
+static int access_smi_poll_timeout(struct phy_device *phydev,
+ u8 offset, u16 mask, u16 clr)
+{
+ int val;
+
+ return phy_read_poll_timeout(phydev, offset, val, (val & mask) == clr,
+ 150, 30000, true);
+}
+
static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
{
int rc;
@@ -157,68 +248,170 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
- /* TX Amplitude = 5 */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
- 0x000A, 0x001E},
- /* Clear SMI interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
- 0, 0},
- /* Clear MISC interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
- 0, 0},
- /* Turn on TC10 Ring Oscillator (ROSC) */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x0020, 0x0020},
- /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
- 0x283C, 0},
- /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
- 0x274F, 0},
- /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
- * and Wake_In to wake PHY
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x80A7, 0},
- /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
- * to 128 uS
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
- 0xF110, 0},
- /* Enable HW Init */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
- 0x0100, 0x0100},
+ /* TXPD/TXAMP6 Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x002D, 0 },
+ /* HW_Init Hi and Force_ED */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 },
+ /* Equalizer Full Duplex Freeze - T1 Slave */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG2_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG3_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG4_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 },
+ /* Slave Full Duplex Multi Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 },
+ /* CDR Pre and Post Lock Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_PRE_LOCK_REG, 0x0AB2, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_POST_LOCK_REG, 0x0AB3, 0 },
+ /* Lock Stage 2-3 Multi Factor Config */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG2_MUFACT_CFG_REG, 0x0AEA, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG3_MUFACT_CFG_REG, 0x0AEB, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_POST_LCK_MUFACT_CFG_REG, 0x0AEB, 0 },
+ /* Pointer delay */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_RX_FIFO_CFG_REG, 0x1C00, 0 },
+ /* Tx iir edits */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1861, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1061, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1922, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1122, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1983, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1183, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1944, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1144, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x18c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x10c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1846, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1046, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1807, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1007, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1808, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1008, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1809, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1009, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1810, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1010, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1811, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* Setup SQI measurement */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 },
+ /* SQI enable */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* SQI select mode 5 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG2_REG, 0x0001, 0 },
+ /* Throws the first SQI reading */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_RW_CTL_CFG, 0x0301, 0 },
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP,
+ T1_DCQ_SQI_REG, 0, 0 },
+ /* Flag LPS and WUR as idle errors */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0014, 0 },
+ /* HW_Init toggle, undo force ED, TXPD off */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0200, 0 },
+ /* Reset PCS to trigger hardware initialization */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0094, 0 },
+ /* Poll till Hardware is initialized */
+ { PHYACC_ATTR_MODE_POLL, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0080, 0 },
+ /* Tx AMP - 0x06 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x000C, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI,
+ T1_INTERRUPT_SOURCE_REG, 0, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC,
+ T1_INTERRUPT2_SOURCE_REG, 0, 0 },
+ /* HW_Init Hi */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 },
};
int rc, i;
- /* Start manual initialization procedures in Managed Mode */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x1a, 0x0000, 0x0100);
- if (rc < 0)
- return rc;
-
- /* Soft Reset the SMI block */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x00, 0x8000, 0x8000);
- if (rc < 0)
- return rc;
-
- /* Check to see if the self-clearing bit is cleared */
- usleep_range(1000, 2000);
- rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
- PHYACC_ATTR_BANK_SMI, 0x00, 0);
+ /* phy Soft reset */
+ rc = genphy_soft_reset(phydev);
if (rc < 0)
return rc;
- if ((rc & 0x8000) != 0)
- return -ETIMEDOUT;
/* PHY Initialization */
for (i = 0; i < ARRAY_SIZE(init); i++) {
- if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
- rc = access_ereg_modify_changed(phydev, init[i].bank,
- init[i].offset,
- init[i].val,
- init[i].mask);
+ if (init[i].mode == PHYACC_ATTR_MODE_POLL &&
+ init[i].bank == PHYACC_ATTR_BANK_SMI) {
+ rc = access_smi_poll_timeout(phydev,
+ init[i].offset,
+ init[i].val,
+ init[i].mask);
} else {
rc = access_ereg(phydev, init[i].mode, init[i].bank,
init[i].offset, init[i].val);
@@ -235,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
int rc, val = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ /* clear all interrupt */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (rc < 0)
+ return rc;
+
+ /* enable link down and comm ready interrupt */
+ val = LAN87XX_MASK_LINK_DOWN;
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ val = LAN87XX_MASK_COMM_RDY;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
} else {
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
- if (rc)
+ if (rc < 0)
return rc;
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
}
return rc < 0 ? rc : 0;
@@ -255,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
if (irq_status < 0) {
phy_error(phydev);
@@ -504,22 +743,113 @@ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int lan87xx_read_status(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ rc = phy_read(phydev, T1_MODE_STAT_REG);
+ if (rc < 0)
+ return rc;
+
+ if (rc & T1_LINK_UP_MSK)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ rc = genphy_read_master_slave(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = genphy_read_status_fixed(phydev);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+}
+
+static int lan87xx_get_sqi(struct phy_device *phydev)
+{
+ u8 sqi_value = 0;
+ int rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0);
+ if (rc < 0)
+ return rc;
+
+ sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc);
+
+ return sqi_value;
+}
+
+static int lan87xx_get_sqi_max(struct phy_device *phydev)
+{
+ return LAN87XX_MAX_SQI;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
- .phy_id = 0x0007c150,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
.name = "Microchip LAN87xx T1",
.flags = PHY_POLL_CABLE_TEST,
-
.features = PHY_BASIC_T1_FEATURES,
-
.config_init = lan87xx_config_init,
-
.config_intr = lan87xx_phy_config_intr,
.handle_interrupt = lan87xx_handle_interrupt,
-
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
+ .cable_test_start = lan87xx_cable_test_start,
+ .cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
+ .name = "Microchip LAN937x T1",
+ .flags = PHY_POLL_CABLE_TEST,
+ .features = PHY_BASIC_T1_FEATURES,
+ .config_init = lan87xx_config_init,
+ .config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
@@ -528,7 +858,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
- { 0x0007c150, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b7b2521c73fb..f81b077618f4 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
list_del(&flow->list);
clear_bit(flow->index, bitmap);
+ memzero_explicit(flow->key, sizeof(flow->key));
kfree(flow);
}
@@ -706,14 +707,6 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
struct phy_device *phydev = ctx->phydev;
struct vsc8531_private *priv = phydev->priv;
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->rx_sa = ctx->sa.rx_sa;
@@ -730,24 +723,13 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
struct macsec_flow *flow, bool update)
{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->tx_sa = ctx->sa.tx_sa;
/* Always match untagged packets on egress */
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(phydev, flow, update);
+ return vsc8584_macsec_add_flow(ctx->phydev, flow, update);
}
static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
@@ -755,10 +737,6 @@ static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_enable(ctx->phydev, flow);
@@ -770,10 +748,6 @@ static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_disable(ctx->phydev, flow);
@@ -785,12 +759,8 @@ static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_secy *secy = ctx->secy;
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
+ if (priv->secy)
+ return -EEXIST;
priv->secy = secy;
@@ -807,10 +777,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_del_flow(ctx->phydev, flow);
@@ -823,10 +789,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
vsc8584_macsec_del_secy(ctx);
return vsc8584_macsec_add_secy(ctx);
}
@@ -847,10 +809,6 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
if (flow->bank == MACSEC_INGR && flow->rx_sa &&
flow->rx_sa->sc->sci == ctx->rx_sc->sci)
@@ -862,33 +820,40 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -899,11 +864,8 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
@@ -911,33 +873,40 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -948,11 +917,8 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index ebfeeb3c67c1..8a13b1ad9a33 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -136,7 +136,7 @@ static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
return;
for (i = 0; i < priv->nstats; i++)
- strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+ strscpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
ETH_GSTRING_LEN);
}
@@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
MODULE_AUTHOR("Nagaraju Lakkaraju");
MODULE_LICENSE("Dual MIT/GPL");
+
+MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
+MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 34f829845d06..cf728bfd83e2 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
ts.tv_sec--;
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
return true;
}
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 5ce1bf03bbd7..24bae27eedef 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -8,7 +8,9 @@
#include <linux/module.h>
#include <linux/bitfield.h>
+#include <linux/hwmon.h>
#include <linux/phy.h>
+#include <linux/polynomial.h>
#include <linux/netdevice.h>
/* PHY ID */
@@ -54,7 +56,7 @@
PHY_IMASK_ANC)
#define PHY_FWV_REL_MASK BIT(15)
-#define PHY_FWV_TYPE_MASK GENMASK(11, 8)
+#define PHY_FWV_MAJOR_MASK GENMASK(11, 8)
#define PHY_FWV_MINOR_MASK GENMASK(7, 0)
/* SGMII */
@@ -64,6 +66,10 @@
#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \
VSPEC1_SGMII_CTRL_ANRS)
+/* Temperature sensor */
+#define VPSPEC1_TEMP_STA 0x0E
+#define VPSPEC1_TEMP_STA_DATA GENMASK(9, 0)
+
/* WoL */
#define VPSPEC2_WOL_CTL 0x0E06
#define VPSPEC2_WOL_AD01 0x0E08
@@ -71,8 +77,13 @@
#define VPSPEC2_WOL_AD45 0x0E0A
#define WOL_EN BIT(0)
+struct gpy_priv {
+ u8 fw_major;
+ u8 fw_minor;
+};
+
static const struct {
- int type;
+ int major;
int minor;
} ver_need_sgmii_reaneg[] = {
{7, 0x6D},
@@ -80,6 +91,102 @@ static const struct {
{9, 0x73},
};
+#if IS_ENABLED(CONFIG_HWMON)
+/* The original translation formulae of the temperature (in degrees of Celsius)
+ * are as follows:
+ *
+ * T = -2.5761e-11*(N^4) + 9.7332e-8*(N^3) + -1.9165e-4*(N^2) +
+ * 3.0762e-1*(N^1) + -5.2156e1
+ *
+ * where [-52.156, 137.961]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what it looks like after
+ * the alterations:
+ *
+ * T = -25761e-12*(N^4) + 97332e-9*(N^3) + -191650e-6*(N^2) +
+ * 307620e-3*(N^1) + -52156
+ *
+ * where T = [-52156, 137961]mC and N = [0, 1023].
+ */
+static const struct polynomial poly_N_to_temp = {
+ .terms = {
+ {4, -25761, 1000, 1},
+ {3, 97332, 1000, 1},
+ {2, -191650, 1000, 1},
+ {1, 307620, 1000, 1},
+ {0, -52156, 1, 1}
+ }
+};
+
+static int gpy_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VPSPEC1_TEMP_STA);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENODATA;
+
+ *value = polynomial_calc(&poly_N_to_temp,
+ FIELD_GET(VPSPEC1_TEMP_STA_DATA, ret));
+
+ return 0;
+}
+
+static umode_t gpy_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static const struct hwmon_channel_info *gpy_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops gpy_hwmon_hwmon_ops = {
+ .is_visible = gpy_hwmon_is_visible,
+ .read = gpy_hwmon_read,
+};
+
+static const struct hwmon_chip_info gpy_hwmon_chip_info = {
+ .ops = &gpy_hwmon_hwmon_ops,
+ .info = gpy_hwmon_info,
+};
+
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
+ phydev,
+ &gpy_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+#else
+static int gpy_hwmon_register(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int gpy_config_init(struct phy_device *phydev)
{
int ret;
@@ -96,6 +203,9 @@ static int gpy_config_init(struct phy_device *phydev)
static int gpy_probe(struct phy_device *phydev)
{
+ struct device *dev = &phydev->mdio.dev;
+ struct gpy_priv *priv;
+ int fw_version;
int ret;
if (!phydev->is_c45) {
@@ -104,33 +214,38 @@ static int gpy_probe(struct phy_device *phydev)
return ret;
}
- /* Show GPY PHY FW version in dmesg */
- ret = phy_read(phydev, PHY_FWV);
- if (ret < 0)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ fw_version = phy_read(phydev, PHY_FWV);
+ if (fw_version < 0)
+ return fw_version;
+ priv->fw_major = FIELD_GET(PHY_FWV_MAJOR_MASK, fw_version);
+ priv->fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_version);
+
+ ret = gpy_hwmon_register(phydev);
+ if (ret)
return ret;
- phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret,
- (ret & PHY_FWV_REL_MASK) ? "release" : "test");
+ /* Show GPY PHY FW version in dmesg */
+ phydev_info(phydev, "Firmware Version: %d.%d (0x%04X%s)\n",
+ priv->fw_major, priv->fw_minor, fw_version,
+ fw_version & PHY_FWV_REL_MASK ? "" : " test version");
return 0;
}
static bool gpy_sgmii_need_reaneg(struct phy_device *phydev)
{
- int fw_ver, fw_type, fw_minor;
+ struct gpy_priv *priv = phydev->priv;
size_t i;
- fw_ver = phy_read(phydev, PHY_FWV);
- if (fw_ver < 0)
- return true;
-
- fw_type = FIELD_GET(PHY_FWV_TYPE_MASK, fw_ver);
- fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_ver);
-
for (i = 0; i < ARRAY_SIZE(ver_need_sgmii_reaneg); i++) {
- if (fw_type != ver_need_sgmii_reaneg[i].type)
+ if (priv->fw_major != ver_need_sgmii_reaneg[i].major)
continue;
- if (fw_minor < ver_need_sgmii_reaneg[i].minor)
+ if (priv->fw_minor < ver_need_sgmii_reaneg[i].minor)
return true;
break;
}
@@ -295,6 +410,9 @@ static void gpy_update_interface(struct phy_device *phydev)
ret);
break;
}
+
+ if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000)
+ genphy_read_master_slave(phydev);
}
static int gpy_read_status(struct phy_device *phydev)
@@ -495,18 +613,12 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
static int gpy115_loopback(struct phy_device *phydev, bool enable)
{
- int ret;
- int fw_minor;
+ struct gpy_priv *priv = phydev->priv;
if (enable)
return gpy_loopback(phydev, enable);
- ret = phy_read(phydev, PHY_FWV);
- if (ret < 0)
- return ret;
-
- fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
- if (fw_minor > 0x0076)
+ if (priv->fw_minor > 0x76)
return gpy_loopback(phydev, 0);
return genphy_soft_reset(phydev);
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 06fdbae509a7..047c581457e3 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
shhwtstamps_rx = skb_hwtstamps(skb);
shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (priv->extts) {
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 9944cc501806..ec91e671f8aa 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -10,6 +10,7 @@
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/hwmon.h>
#include <linux/bitfield.h>
@@ -34,6 +35,11 @@
#define MII_CFG1 18
#define MII_CFG1_MASTER_SLAVE BIT(15)
#define MII_CFG1_AUTO_OP BIT(14)
+#define MII_CFG1_INTERFACE_MODE_MASK GENMASK(9, 8)
+#define MII_CFG1_MII_MODE (0x0 << 8)
+#define MII_CFG1_RMII_MODE_REFCLK_IN BIT(8)
+#define MII_CFG1_RMII_MODE_REFCLK_OUT BIT(9)
+#define MII_CFG1_REVMII_MODE GENMASK(9, 8)
#define MII_CFG1_SLEEP_CONFIRM BIT(6)
#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
#define MII_CFG1_LED_MODE_LINKUP 0
@@ -72,11 +78,15 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+/* Configure REF_CLK as input in RMII mode */
+#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
+
struct tja11xx_priv {
char *hwmon_name;
struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
+ u32 flags;
};
struct tja11xx_phy_stats {
@@ -251,8 +261,34 @@ do_test:
return __genphy_config_aneg(phydev, changed);
}
+static int tja11xx_get_interface_mode(struct phy_device *phydev)
+{
+ struct tja11xx_priv *priv = phydev->priv;
+ int mii_mode;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ mii_mode = MII_CFG1_MII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ mii_mode = MII_CFG1_REVMII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (priv->flags & TJA110X_RMII_MODE_REFCLK_IN)
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_IN;
+ else
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_OUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mii_mode;
+}
+
static int tja11xx_config_init(struct phy_device *phydev)
{
+ u16 reg_mask, reg_val;
int ret;
ret = tja11xx_enable_reg_write(phydev);
@@ -265,15 +301,32 @@ static int tja11xx_config_init(struct phy_device *phydev)
switch (phydev->phy_id & PHY_ID_MASK) {
case PHY_ID_TJA1100:
- ret = phy_modify(phydev, MII_CFG1,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
- MII_CFG1_LED_ENABLE,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
- MII_CFG1_LED_ENABLE);
+ reg_mask = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
+ MII_CFG1_LED_ENABLE;
+ reg_val = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
+ MII_CFG1_LED_ENABLE;
+
+ reg_mask |= MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val |= (ret & 0xffff);
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
if (ret)
return ret;
break;
case PHY_ID_TJA1101:
+ reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val = ret & 0xffff;
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
+ if (ret)
+ return ret;
+ fallthrough;
case PHY_ID_TJA1102:
ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
if (ret)
@@ -444,15 +497,10 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
struct device *dev = &phydev->mdio.dev;
- int i;
- priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!priv->hwmon_name)
- return -ENOMEM;
-
- for (i = 0; priv->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(priv->hwmon_name[i]))
- priv->hwmon_name[i] = '_';
+ priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(priv->hwmon_name))
+ return PTR_ERR(priv->hwmon_name);
priv->hwmon_dev =
devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
@@ -463,16 +511,36 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
return PTR_ERR_OR_ZERO(priv->hwmon_dev);
}
+static int tja11xx_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct tja11xx_priv *priv = phydev->priv;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "nxp,rmii-refclk-in"))
+ priv->flags |= TJA110X_RMII_MODE_REFCLK_IN;
+
+ return 0;
+}
+
static int tja11xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct tja11xx_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->phydev = phydev;
+ phydev->priv = priv;
+
+ ret = tja11xx_parse_dt(phydev);
+ if (ret)
+ return ret;
return tja11xx_hwmon_register(phydev, priv);
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index db709d30bf84..a87a4b3ffce4 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,6 +9,25 @@
#include <linux/phy.h>
/**
+ * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
+ * @phydev: target phy_device struct
+ */
+static bool genphy_c45_baset1_able(struct phy_device *phydev)
+{
+ int val;
+
+ if (phydev->pma_extable == -ENODATA) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (val < 0)
+ return false;
+
+ phydev->pma_extable = val;
+ }
+
+ return !!(phydev->pma_extable & MDIO_PMA_EXTABLE_BT1);
+}
+
+/**
* genphy_c45_pma_can_sleep - checks if the PMA have sleep support
* @phydev: target phy_device struct
*/
@@ -52,6 +71,36 @@ int genphy_c45_pma_suspend(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(genphy_c45_pma_suspend);
/**
+ * genphy_c45_pma_baset1_setup_master_slave - configures forced master/slave
+ * role of BaseT1 devices.
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev)
+{
+ int ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_setup_master_slave);
+
+/**
* genphy_c45_pma_setup_forced - configures a forced speed
* @phydev: target phy_device struct
*/
@@ -80,7 +129,10 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
switch (phydev->speed) {
case SPEED_10:
- ctrl2 |= MDIO_PMA_CTRL2_10BT;
+ if (genphy_c45_baset1_able(phydev))
+ ctrl2 |= MDIO_PMA_CTRL2_BASET1;
+ else
+ ctrl2 |= MDIO_PMA_CTRL2_10BT;
break;
case SPEED_100:
ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
@@ -118,10 +170,79 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
if (ret < 0)
return ret;
+ if (genphy_c45_baset1_able(phydev)) {
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
return genphy_c45_an_disable_aneg(phydev);
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
+/* Sets master/slave preference and supported technologies.
+ * The preference is set in the BIT(4) of BASE-T1 AN
+ * advertisement register 7.515 and whether the status
+ * is forced or not, it is set in the BIT(12) of BASE-T1
+ * AN advertisement register 7.514.
+ * Sets 10BASE-T1L Ability BIT(14) in BASE-T1 autonegotiation
+ * advertisement register [31:16] if supported.
+ */
+static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
+{
+ u16 adv_l_mask, adv_l = 0;
+ u16 adv_m_mask, adv_m = 0;
+ int changed = 0;
+ int ret;
+
+ adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
+ MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+ adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
+ break;
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ /* if master/slave role is not specified, do not overwrite it */
+ adv_l_mask &= ~MDIO_AN_T1_ADV_L_FORCE_MS;
+ adv_m_mask &= ~MDIO_AN_T1_ADV_M_MST;
+ break;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
+ adv_l_mask, adv_l);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
+ adv_m_mask, adv_m);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ return changed;
+}
+
/**
* genphy_c45_an_config_aneg - configure advertisement registers
* @phydev: target phy_device struct
@@ -141,6 +262,9 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev)
changed = genphy_config_eee_advert(phydev);
+ if (genphy_c45_baset1_able(phydev))
+ return genphy_c45_baset1_an_config_aneg(phydev);
+
adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
@@ -178,8 +302,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg);
*/
int genphy_c45_an_disable_aneg(struct phy_device *phydev)
{
+ u16 reg = MDIO_CTRL1;
- return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
@@ -194,7 +322,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
*/
int genphy_c45_restart_aneg(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ u16 reg = MDIO_CTRL1;
+
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, reg,
MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
@@ -210,11 +343,15 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
*/
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
+ u16 reg = MDIO_CTRL1;
int ret;
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_CTRL;
+
if (!restart) {
/* Configure and restart aneg if it wasn't set before */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
if (ret < 0)
return ret;
@@ -242,7 +379,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg);
*/
int genphy_c45_aneg_done(struct phy_device *phydev)
{
- int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ int reg = MDIO_STAT1;
+ int val;
+
+ if (genphy_c45_baset1_able(phydev))
+ reg = MDIO_AN_T1_STAT;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0;
}
@@ -307,6 +450,49 @@ int genphy_c45_read_link(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_link);
+/* Read the Clause 45 defined BASE-T1 AN (7.513) status register to check
+ * if autoneg is complete. If so read the BASE-T1 Autonegotiation
+ * Advertisement registers filling in the link partner advertisement,
+ * pause and asym_pause members in phydev.
+ */
+static int genphy_c45_baset1_read_lpa(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising);
+ mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0);
+ mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0);
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+ }
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, 1);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_L);
+ if (val < 0)
+ return val;
+
+ mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val);
+ phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M);
+ if (val < 0)
+ return val;
+
+ mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, val);
+
+ return 0;
+}
+
/**
* genphy_c45_read_lpa - read the link partner advertisement and pause
* @phydev: target phy_device struct
@@ -321,6 +507,9 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
+ if (genphy_c45_baset1_able(phydev))
+ return genphy_c45_baset1_read_lpa(phydev);
+
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
if (val < 0)
return val;
@@ -360,6 +549,34 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(genphy_c45_read_lpa);
/**
+ * genphy_c45_pma_baset1_read_master_slave - read forced master/slave
+ * configuration
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev)
+{
+ int val;
+
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_PMA_PMD_BT1_CTRL_CFG_MST) {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
+ } else {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_master_slave);
+
+/**
* genphy_c45_read_pma - read link speed etc from PMA
* @phydev: target phy_device struct
*/
@@ -399,6 +616,12 @@ int genphy_c45_read_pma(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
+ if (genphy_c45_baset1_able(phydev)) {
+ val = genphy_c45_pma_baset1_read_master_slave(phydev);
+ if (val < 0)
+ return val;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
@@ -530,12 +753,68 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
phydev->supported,
val & MDIO_PMA_NG_EXTABLE_5GBT);
}
+
+ if (val & MDIO_PMA_EXTABLE_BT1) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_PMD_BT1_B10L_ABLE);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported,
+ val & MDIO_AN_STAT1_ABLE);
+ }
}
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities);
+/* Read master/slave preference from registers.
+ * The preference is read from the BIT(4) of BASE-T1 AN
+ * advertisement register 7.515 and whether the preference
+ * is forced or not, it is read from BASE-T1 AN advertisement
+ * register 7.514.
+ */
+int genphy_c45_baset1_read_status(struct phy_device *phydev)
+{
+ int ret;
+ int cfg;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L);
+ if (ret < 0)
+ return ret;
+
+ cfg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M);
+ if (cfg < 0)
+ return cfg;
+
+ if (ret & MDIO_AN_T1_ADV_L_FORCE_MS) {
+ if (cfg & MDIO_AN_T1_ADV_M_MST)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ } else {
+ if (cfg & MDIO_AN_T1_ADV_M_MST)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_PREFERRED;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_PREFERRED;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_baset1_read_status);
+
/**
* genphy_c45_read_status - read PHY status
* @phydev: target phy_device struct
@@ -560,6 +839,12 @@ int genphy_c45_read_status(struct phy_device *phydev)
if (ret)
return ret;
+ if (genphy_c45_baset1_able(phydev)) {
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
phy_resolve_aneg_linkmode(phydev);
} else {
ret = genphy_c45_read_pma(phydev);
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 271fc01f7f7f..2c8bf438ea61 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -74,6 +74,80 @@ const char *phy_duplex_to_str(unsigned int duplex)
}
EXPORT_SYMBOL_GPL(phy_duplex_to_str);
+/**
+ * phy_rate_matching_to_str - Return a string describing the rate matching
+ *
+ * @rate_matching: Type of rate matching to describe
+ */
+const char *phy_rate_matching_to_str(int rate_matching)
+{
+ switch (rate_matching) {
+ case RATE_MATCH_NONE:
+ return "none";
+ case RATE_MATCH_PAUSE:
+ return "pause";
+ case RATE_MATCH_CRS:
+ return "crs";
+ case RATE_MATCH_OPEN_LOOP:
+ return "open-loop";
+ }
+ return "Unsupported (update phy-core.c)";
+}
+EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
+
+/**
+ * phy_interface_num_ports - Return the number of links that can be carried by
+ * a given MAC-PHY physical link. Returns 0 if this is
+ * unknown, the number of links else.
+ *
+ * @interface: The interface mode we want to get the number of ports
+ */
+int phy_interface_num_ports(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XLGMII:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return 1;
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return 4;
+ case PHY_INTERFACE_MODE_MAX:
+ WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_interface_num_ports);
+
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed.
@@ -176,6 +250,7 @@ static const struct phy_setting settings[] = {
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
+ PHY_SETTING( 10, FULL, 10baseT1L_Full ),
};
#undef PHY_SETTING
@@ -243,7 +318,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
-static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
+static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
{
const struct phy_setting *p;
int i;
@@ -254,13 +329,11 @@ static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
else
break;
}
-
- return 0;
}
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- return __set_linkmode_max_speed(max_speed, phydev->supported);
+ __set_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -273,17 +346,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
* is connected to a 1G PHY. This function allows the MAC to indicate its
* maximum speed, and so limit what the PHY will advertise.
*/
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
- int err;
-
- err = __set_phy_supported(phydev, max_speed);
- if (err)
- return err;
+ __set_phy_supported(phydev, max_speed);
phy_advertise_supported(phydev);
-
- return 0;
}
EXPORT_SYMBOL(phy_set_max_speed);
@@ -440,7 +507,9 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- return __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+
+ return 0;
}
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index beb2b66da132..e741d8aebffe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/suspend.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
@@ -114,6 +115,33 @@ void phy_print_status(struct phy_device *phydev)
EXPORT_SYMBOL(phy_print_status);
/**
+ * phy_get_rate_matching - determine if rate matching is supported
+ * @phydev: The phy device to return rate matching for
+ * @iface: The interface mode to use
+ *
+ * This determines the type of rate matching (if any) that @phy supports
+ * using @iface. @iface may be %PHY_INTERFACE_MODE_NA to determine if any
+ * interface supports rate matching.
+ *
+ * Return: The type of rate matching @phy supports for @iface, or
+ * %RATE_MATCH_NONE.
+ */
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int ret = RATE_MATCH_NONE;
+
+ if (phydev->drv->get_rate_matching) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_rate_matching(phydev, iface);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_get_rate_matching);
+
+/**
* phy_config_interrupt - configure the PHY device for the requested interrupts
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
@@ -255,6 +283,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.duplex = phydev->duplex;
cmd->base.master_slave_cfg = phydev->master_slave_get;
cmd->base.master_slave_state = phydev->master_slave_state;
+ cmd->base.rate_matching = phydev->rate_matching;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
@@ -295,20 +324,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
+ mii_data->val_out = mdiobus_c45_read(
+ phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num);
} else {
- prtad = mii_data->phy_id;
- devad = mii_data->reg_num;
+ mii_data->val_out = mdiobus_read(
+ phydev->mdio.bus, mii_data->phy_id,
+ mii_data->reg_num);
}
- mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad,
- devad);
return 0;
case SIOCSMIIREG:
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = mdiobus_c45_addr(devad, mii_data->reg_num);
} else {
prtad = mii_data->phy_id;
devad = mii_data->reg_num;
@@ -351,7 +380,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
}
- mdiobus_write(phydev->mdio.bus, prtad, devad, val);
+ if (mdio_phy_id_is_c45(mii_data->phy_id))
+ mdiobus_c45_write(phydev->mdio.bus, prtad, devad,
+ mii_data->reg_num, val);
+ else
+ mdiobus_write(phydev->mdio.bus, prtad, devad, val);
if (prtad == phydev->mdio.addr &&
devad == MII_BMCR &&
@@ -970,8 +1003,35 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
struct phy_driver *drv = phydev->drv;
+ irqreturn_t ret;
+
+ /* Wakeup interrupts may occur during a system sleep transition.
+ * Postpone handling until the PHY has resumed.
+ */
+ if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
+ struct net_device *netdev = phydev->attached_dev;
+
+ if (netdev) {
+ struct device *parent = netdev->dev.parent;
+
+ if (netdev->wol_enabled)
+ pm_system_wakeup();
+ else if (device_may_wakeup(&netdev->dev))
+ pm_wakeup_dev_event(&netdev->dev, 0, true);
+ else if (parent && device_may_wakeup(parent))
+ pm_wakeup_dev_event(parent, 0, true);
+ }
- return drv->handle_interrupt(phydev);
+ phydev->irq_rerun = 1;
+ disable_irq_nosync(irq);
+ return IRQ_HANDLED;
+ }
+
+ mutex_lock(&phydev->lock);
+ ret = drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74d8e1dc125f..57849ac0384e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -90,8 +91,9 @@ const int phy_10_100_features_array[4] = {
};
EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-const int phy_basic_t1_features_array[2] = {
+const int phy_basic_t1_features_array[3] = {
ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
@@ -277,6 +279,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
if (phydev->mac_managed_pm)
return 0;
+ /* Wakeup interrupts may occur during the system sleep transition when
+ * the PHY is inaccessible. Set flag to postpone handling until the PHY
+ * has resumed. Wait for concurrent interrupt handler to complete.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 1;
+ synchronize_irq(phydev->irq);
+ }
+
/* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
@@ -306,6 +317,14 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
+ /* If we managed to get here with the PHY state machine in a state
+ * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
+ * that something went wrong and we should most likely be using
+ * MAC managed PM, but we are not.
+ */
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
+ phydev->state != PHY_UP);
+
ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
@@ -314,6 +333,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
if (ret < 0)
return ret;
no_resume:
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 0;
+ synchronize_irq(phydev->irq);
+
+ /* Rerun interrupts which were postponed by phy_interrupt()
+ * because they occurred during the system sleep transition.
+ */
+ if (phydev->irq_rerun) {
+ phydev->irq_rerun = 0;
+ enable_irq(phydev->irq);
+ irq_wake_thread(phydev->irq, phydev);
+ }
+ }
+
if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev);
@@ -340,7 +373,7 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
if (!fixup)
return -ENOMEM;
- strlcpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
+ strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
fixup->phy_uid = phy_uid;
fixup->phy_uid_mask = phy_uid_mask;
fixup->run = run;
@@ -490,7 +523,7 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+ return sysfs_emit(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
}
static DEVICE_ATTR_RO(phy_id);
@@ -505,7 +538,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
else
mode = phy_modes(phydev->interface);
- return sprintf(buf, "%s\n", mode);
+ return sysfs_emit(buf, "%s\n", mode);
}
static DEVICE_ATTR_RO(phy_interface);
@@ -515,7 +548,7 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", phydev->has_fixups);
+ return sysfs_emit(buf, "%d\n", phydev->has_fixups);
}
static DEVICE_ATTR_RO(phy_has_fixups);
@@ -525,7 +558,7 @@ static ssize_t phy_dev_flags_show(struct device *dev,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%08x\n", phydev->dev_flags);
+ return sysfs_emit(buf, "0x%08x\n", phydev->dev_flags);
}
static DEVICE_ATTR_RO(phy_dev_flags);
@@ -599,6 +632,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
dev->autoneg = AUTONEG_ENABLE;
+ dev->pma_extable = -ENODATA;
dev->is_c45 = is_c45;
dev->phy_id = phy_id;
if (c45_ids)
@@ -958,6 +992,7 @@ EXPORT_SYMBOL(phy_device_register);
void phy_device_remove(struct phy_device *phydev)
{
unregister_mii_timestamper(phydev->mii_ts);
+ pse_control_put(phydev->psec);
device_del(&phydev->mdio.dev);
@@ -1279,7 +1314,7 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", !phydev->attached_dev);
+ return sysfs_emit(buf, "%d\n", !phydev->attached_dev);
}
static DEVICE_ATTR_RO(phy_standalone);
@@ -1449,6 +1484,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->state = PHY_READY;
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1471,10 +1508,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
- err = phy_disable_interrupts(phydev);
- if (err)
- return err;
-
phy_resume(phydev);
phy_led_triggers_register(phydev);
@@ -1746,6 +1779,9 @@ void phy_detach(struct phy_device *phydev)
phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
/*
* The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first.
@@ -1757,9 +1793,6 @@ void phy_detach(struct phy_device *phydev)
ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
-
- /* Assert the reset signal */
- phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
@@ -2001,18 +2034,12 @@ EXPORT_SYMBOL(genphy_config_eee_advert);
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- u16 ctl = 0;
+ u16 ctl;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (SPEED_1000 == phydev->speed)
- ctl |= BMCR_SPEED1000;
- else if (SPEED_100 == phydev->speed)
- ctl |= BMCR_SPEED100;
-
- if (DUPLEX_FULL == phydev->duplex)
- ctl |= BMCR_FULLDPLX;
+ ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
return phy_modify(phydev, MII_BMCR,
~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
@@ -2051,17 +2078,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev)
CTL1000_PREFER_MASTER), ctl);
}
-static int genphy_read_master_slave(struct phy_device *phydev)
+int genphy_read_master_slave(struct phy_device *phydev)
{
int cfg, state;
int val;
- if (!phydev->is_gigabit_capable) {
- phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
- phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
- return 0;
- }
-
phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
@@ -2102,6 +2123,7 @@ static int genphy_read_master_slave(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL(genphy_read_master_slave);
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
@@ -2396,14 +2418,18 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
- err = genphy_read_master_slave(phydev);
- if (err < 0)
- return err;
+ if (phydev->is_gigabit_capable) {
+ err = genphy_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+ }
err = genphy_read_lpa(phydev);
if (err < 0)
@@ -2615,13 +2641,7 @@ int genphy_loopback(struct phy_device *phydev, bool enable)
u16 val, ctl = BMCR_LOOPBACK;
int ret;
- if (phydev->speed == SPEED_1000)
- ctl |= BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
phy_modify(phydev, MII_BMCR, ~0, ctl);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 420201858564..6547b6cc6cbe 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -43,7 +43,6 @@ struct phylink {
/* private: */
struct net_device *netdev;
const struct phylink_mac_ops *mac_ops;
- const struct phylink_pcs_ops *pcs_ops;
struct phylink_config *config;
struct phylink_pcs *pcs;
struct device *dev;
@@ -74,9 +73,11 @@ struct phylink {
struct work_struct resolve;
bool mac_link_dropped;
+ bool using_mac_select_pcs;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
};
@@ -132,17 +133,6 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
-void phylink_set_10g_modes(unsigned long *mask)
-{
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
-}
-EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
-
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -166,8 +156,84 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
-static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
- unsigned long caps)
+/**
+ * phylink_interface_max_speed() - get the maximum speed of a phy interface
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ *
+ * Determine the maximum speed of a phy interface. This is intended to help
+ * determine the correct speed to pass to the MAC when the phy is performing
+ * rate matching.
+ *
+ * Return: The maximum speed of @interface
+ */
+static int phylink_interface_max_speed(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ return SPEED_100;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ return SPEED_1000;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return SPEED_2500;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ return SPEED_5000;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return SPEED_10000;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ return SPEED_25000;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ return SPEED_40000;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ /* No idea! Garbage in, unknown out */
+ return SPEED_UNKNOWN;
+ }
+
+ /* If we get here, someone forgot to add an interface mode above */
+ WARN_ON_ONCE(1);
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @caps: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that are
+ * supported by the @caps. @linkmodes must have been initialised previously.
+ */
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
{
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
@@ -178,8 +244,10 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
if (caps & MAC_10HD)
__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
- if (caps & MAC_10FD)
+ if (caps & MAC_10FD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
+ }
if (caps & MAC_100HD) {
__set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
@@ -304,21 +372,72 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
__set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
}
}
+EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes);
+
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF },
+};
/**
- * phylink_get_linkmodes() - get acceptable link modes
- * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex
+ * @speed: the speed to search for
+ * @duplex: the duplex to search for
+ *
+ * Find the mac capability for a given speed and duplex.
+ *
+ * Return: A mask with the mac capability patching @speed and @duplex, or 0 if
+ * there were no matches.
+ */
+static unsigned long phylink_cap_from_speed_duplex(int speed,
+ unsigned int duplex)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) {
+ if (speed == phylink_caps_params[i].speed &&
+ duplex == phylink_caps_params[i].duplex)
+ return phylink_caps_params[i].mask;
+ }
+
+ return 0;
+}
+
+/**
+ * phylink_get_capabilities() - get capabilities for a given MAC
* @interface: phy interface mode defined by &typedef phy_interface_t
* @mac_capabilities: bitmask of MAC capabilities
+ * @rate_matching: type of rate matching being performed
*
- * Set all possible pause, speed and duplex linkmodes in @linkmodes that
- * are supported by the @interface mode and @mac_capabilities. @linkmodes
- * must have been initialised previously.
+ * Get the MAC capabilities that are supported by the @interface mode and
+ * @mac_capabilities.
*/
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities)
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching)
{
+ int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ unsigned long matched_caps = 0;
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
@@ -330,6 +449,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_GMII:
caps |= MAC_1000HD | MAC_1000FD;
@@ -353,6 +473,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_1000BASEX:
caps |= MAC_1000HD;
fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
case PHY_INTERFACE_MODE_TRGMII:
caps |= MAC_1000FD;
break;
@@ -390,9 +511,55 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
break;
}
- phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+ switch (rate_matching) {
+ case RATE_MATCH_OPEN_LOOP:
+ /* TODO */
+ fallthrough;
+ case RATE_MATCH_NONE:
+ matched_caps = 0;
+ break;
+ case RATE_MATCH_PAUSE: {
+ /* The MAC must support asymmetric pause towards the local
+ * device for this. We could allow just symmetric pause, but
+ * then we might have to renegotiate if the link partner
+ * doesn't support pause. This is because there's no way to
+ * accept pause frames without transmitting them if we only
+ * support symmetric pause.
+ */
+ if (!(mac_capabilities & MAC_SYM_PAUSE) ||
+ !(mac_capabilities & MAC_ASYM_PAUSE))
+ break;
+
+ /* We can't adapt if the MAC doesn't support the interface's
+ * max speed at full duplex.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) {
+ /* Although a duplex-matching phy might exist, we
+ * conservatively remove these modes because the MAC
+ * will not be aware of the half-duplex nature of the
+ * link.
+ */
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+ }
+ break;
+ }
+ case RATE_MATCH_CRS:
+ /* The MAC must support half duplex at the interface's max
+ * speed.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_HALF)) {
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= mac_capabilities;
+ }
+ break;
+ }
+
+ return (caps & mac_capabilities) | matched_caps;
}
-EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+EXPORT_SYMBOL_GPL(phylink_get_capabilities);
/**
* phylink_generic_validate() - generic validate() callback implementation
@@ -409,10 +576,14 @@ void phylink_generic_validate(struct phylink_config *config,
struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ unsigned long caps;
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
- phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+ caps = phylink_get_capabilities(state->interface,
+ config->mac_capabilities,
+ state->rate_matching);
+ phylink_caps_to_linkmodes(mask, caps);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
@@ -427,7 +598,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
int ret;
/* Get the PCS for this interface mode */
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs))
return PTR_ERR(pcs);
@@ -467,8 +638,9 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
- struct phylink_link_state *state)
+static int phylink_validate_mask(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ const unsigned long *interfaces)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, };
@@ -477,7 +649,7 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
int intf;
for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
- if (test_bit(intf, pl->config->supported_interfaces)) {
+ if (test_bit(intf, interfaces)) {
linkmode_copy(s, supported);
t = *state;
@@ -498,12 +670,14 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- if (!phy_interface_empty(pl->config->supported_interfaces)) {
+ const unsigned long *interfaces = pl->config->supported_interfaces;
+
+ if (!phy_interface_empty(interfaces)) {
if (state->interface == PHY_INTERFACE_MODE_NA)
- return phylink_validate_any(pl, supported, state);
+ return phylink_validate_mask(pl, supported, state,
+ interfaces);
- if (!test_bit(state->interface,
- pl->config->supported_interfaces))
+ if (!test_bit(state->interface, interfaces))
return -EINVAL;
}
@@ -641,6 +815,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -767,15 +947,28 @@ static void phylink_resolve_flow(struct phylink_link_state *state)
}
}
+static void phylink_pcs_poll_stop(struct phylink *pl)
+{
+ if (pl->cfg_link_an_mode == MLO_AN_INBAND)
+ del_timer(&pl->link_poll);
+}
+
+static void phylink_pcs_poll_start(struct phylink *pl)
+{
+ if (pl->pcs && pl->pcs->poll && pl->cfg_link_an_mode == MLO_AN_INBAND)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+}
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
phylink_dbg(pl,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
__func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
+ phy_rate_matching_to_str(state->rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
@@ -787,8 +980,8 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl)
if (pl->link_config.an_enabled &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
phylink_autoneg_inband(pl->cur_link_an_mode)) {
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_an_restart(pl->pcs);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_an_restart(pl->pcs);
else if (pl->config->legacy_pre_march2020)
pl->mac_ops->mac_an_restart(pl->config);
}
@@ -798,11 +991,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
const struct phylink_link_state *state)
{
struct phylink_pcs *pcs = NULL;
+ bool pcs_changed = false;
int err;
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
@@ -810,8 +1004,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs);
return;
}
+
+ pcs_changed = pcs && pl->pcs != pcs;
}
+ phylink_pcs_poll_stop(pl);
+
if (pl->mac_ops->mac_prepare) {
err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
state->interface);
@@ -825,17 +1023,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs)
- phylink_set_pcs(pl, pcs);
+ if (pcs_changed)
+ pl->pcs = pcs;
phylink_mac_config(pl, state);
- if (pl->pcs_ops) {
- err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- state->interface,
- state->advertising,
- !!(pl->link_config.pause &
- MLO_PAUSE_AN));
+ if (pl->pcs) {
+ err = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ state->interface,
+ state->advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (err < 0)
phylink_err(pl, "pcs_config failed: %pe\n",
ERR_PTR(err));
@@ -852,6 +1050,8 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+
+ phylink_pcs_poll_start(pl);
}
/*
@@ -867,7 +1067,7 @@ static int phylink_change_inband_advert(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
- if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* Legacy method */
phylink_mac_config(pl, &pl->link_config);
phylink_mac_pcs_an_restart(pl);
@@ -884,10 +1084,11 @@ static int phylink_change_inband_advert(struct phylink *pl)
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising,
- !!(pl->link_config.pause & MLO_PAUSE_AN));
+ ret = pl->pcs->ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+ pl->link_config.interface,
+ pl->link_config.advertising,
+ !!(pl->link_config.pause &
+ MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -904,7 +1105,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- if (state->an_enabled) {
+ state->rate_matching = pl->link_config.rate_matching;
+ if (state->an_enabled) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -916,8 +1118,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- if (pl->pcs_ops)
- pl->pcs_ops->pcs_get_state(pl->pcs, state);
+ if (pl->pcs)
+ pl->pcs->ops->pcs_get_state(pl->pcs, state);
else if (pl->mac_ops->mac_pcs_get_state &&
pl->config->legacy_pre_march2020)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
@@ -987,19 +1189,43 @@ static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
+ int speed, duplex;
+ bool rx_pause;
+
+ speed = link_state.speed;
+ duplex = link_state.duplex;
+ rx_pause = !!(link_state.pause & MLO_PAUSE_RX);
+
+ switch (link_state.rate_matching) {
+ case RATE_MATCH_PAUSE:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will send
+ * pause frames to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_FULL;
+ rx_pause = true;
+ break;
+
+ case RATE_MATCH_CRS:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will cause
+ * collisions to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_HALF;
+ break;
+ }
pl->cur_interface = link_state.interface;
- if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
- pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
- pl->cur_interface,
- link_state.speed, link_state.duplex);
+ if (pl->pcs && pl->pcs->ops->pcs_link_up)
+ pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
+ pl->cur_interface, speed, duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev,
- pl->cur_link_an_mode, pl->cur_interface,
- link_state.speed, link_state.duplex,
- !!(link_state.pause & MLO_PAUSE_TX),
- !!(link_state.pause & MLO_PAUSE_RX));
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->cur_interface, speed, duplex,
+ !!(link_state.pause & MLO_PAUSE_TX), rx_pause);
if (ndev)
netif_carrier_on(ndev);
@@ -1091,6 +1317,17 @@ static void phylink_resolve(struct work_struct *w)
}
link_state.interface = pl->phy_state.interface;
+ /* If we are doing rate matching, then the
+ * link speed/duplex comes from the PHY
+ */
+ if (pl->phy_state.rate_matching) {
+ link_state.rate_matching =
+ pl->phy_state.rate_matching;
+ link_state.speed = pl->phy_state.speed;
+ link_state.duplex =
+ pl->phy_state.duplex;
+ }
+
/* If we have a PHY, we need to update with
* the PHY flow control bits.
*/
@@ -1113,7 +1350,7 @@ static void phylink_resolve(struct work_struct *w)
}
phylink_major_config(pl, false, &link_state);
pl->link_config.interface = link_state.interface;
- } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
+ } else if (!pl->pcs && pl->config->legacy_pre_march2020) {
/* The interface remains unchanged, only the speed,
* duplex or pause settings have changed. Call the
* old mac_config() method to configure the MAC/PCS
@@ -1182,9 +1419,8 @@ static int phylink_register_sfp(struct phylink *pl,
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
- ret = PTR_ERR(bus);
- phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
- return ret;
+ phylink_err(pl, "unable to attach SFP bus: %pe\n", bus);
+ return PTR_ERR(bus);
}
pl->sfp_bus = bus;
@@ -1216,11 +1452,17 @@ struct phylink *phylink_create(struct phylink_config *config,
phy_interface_t iface,
const struct phylink_mac_ops *mac_ops)
{
+ bool using_mac_select_pcs = false;
struct phylink *pl;
int ret;
- /* Validate the supplied configuration */
if (mac_ops->mac_select_pcs &&
+ mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) !=
+ ERR_PTR(-EOPNOTSUPP))
+ using_mac_select_pcs = true;
+
+ /* Validate the supplied configuration */
+ if (using_mac_select_pcs &&
phy_interface_empty(config->supported_interfaces)) {
dev_err(config->dev,
"phylink: error: empty supported_interfaces but mac_select_pcs() method present\n");
@@ -1244,6 +1486,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->using_mac_select_pcs = using_mac_select_pcs;
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1290,36 +1533,6 @@ struct phylink *phylink_create(struct phylink_config *config,
EXPORT_SYMBOL_GPL(phylink_create);
/**
- * phylink_set_pcs() - set the current PCS for phylink to use
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @pcs: a pointer to the &struct phylink_pcs
- *
- * Bind the MAC PCS to phylink. This may be called after phylink_create().
- * If it is desired to dynamically change the PCS, then the preferred method
- * is to use mac_select_pcs(), but it may also be called in mac_prepare()
- * or mac_config().
- *
- * Please note that there are behavioural changes with the mac_config()
- * callback if a PCS is present (denoting a newer setup) so removing a PCS
- * is not supported, and if a PCS is going to be used, it must be registered
- * by calling phylink_set_pcs() at the latest in the first mac_config() call.
- */
-void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs)
-{
- pl->pcs = pcs;
- pl->pcs_ops = pcs->ops;
-
- if (!pl->phylink_disable_state &&
- pl->cfg_link_an_mode == MLO_AN_INBAND) {
- if (pl->config->pcs_poll || pcs->poll)
- mod_timer(&pl->link_poll, jiffies + HZ);
- else
- del_timer(&pl->link_poll);
- }
-}
-EXPORT_SYMBOL_GPL(phylink_set_pcs);
-
-/**
* phylink_destroy() - cleanup and destroy the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -1349,6 +1562,7 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
+ pl->phy_state.rate_matching = phydev->rate_matching;
pl->phy_state.pause = MLO_PAUSE_NONE;
if (tx_pause)
pl->phy_state.pause |= MLO_PAUSE_TX;
@@ -1360,10 +1574,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phy_rate_matching_to_str(phydev->rate_matching),
phylink_pause_to_str(pl->phy_state.pause));
}
@@ -1400,14 +1615,15 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
config.interface = PHY_INTERFACE_MODE_NA;
else
config.interface = interface;
+ config.rate_matching = phy_get_rate_matching(phy, config.interface);
ret = phylink_validate(pl, supported, &config);
if (ret) {
- phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+ phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %pe\n",
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
- ret);
+ ERR_PTR(ret));
return ret;
}
@@ -1427,6 +1643,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
pl->phy_state.pause = MLO_PAUSE_NONE;
pl->phy_state.speed = SPEED_UNKNOWN;
pl->phy_state.duplex = DUPLEX_UNKNOWN;
+ pl->phy_state.rate_matching = RATE_MATCH_NONE;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -1444,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
if (phy_interrupt_is_valid(phy))
phy_request_interrupt(phy);
+ if (pl->config->mac_managed_pm)
+ phy->mac_managed_pm = true;
+
return 0;
}
@@ -1452,7 +1672,7 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
{
if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(interface))))
+ phy_interface_mode_is_8023z(interface) && !pl->sfp_bus)))
return -EINVAL;
if (pl->phydev)
@@ -1684,7 +1904,6 @@ void phylink_start(struct phylink *pl)
poll |= pl->config->poll_fixed_state;
break;
case MLO_AN_INBAND:
- poll |= pl->config->pcs_poll;
if (pl->pcs)
poll |= pl->pcs->poll;
break;
@@ -1870,8 +2089,10 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
{
phylink_merge_link_mode(kset->link_modes.advertising, state->advertising);
linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising);
- kset->base.speed = state->speed;
- kset->base.duplex = state->duplex;
+ if (kset->base.rate_matching == RATE_MATCH_NONE) {
+ kset->base.speed = state->speed;
+ kset->base.duplex = state->duplex;
+ }
kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
}
@@ -2326,8 +2547,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = mdiobus_c45_addr(devad, reg);
- } else if (phydev->is_c45) {
+ return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad,
+ reg);
+ }
+
+ if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
case MII_BMSR:
@@ -2349,12 +2573,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
return -EINVAL;
}
prtad = phy_id;
- devad = mdiobus_c45_addr(devad, reg);
- } else {
- prtad = phy_id;
- devad = reg;
+ return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad,
+ reg);
}
- return mdiobus_read(pl->phydev->mdio.bus, prtad, devad);
+
+ return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg);
}
static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
@@ -2366,8 +2589,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = mdiobus_c45_addr(devad, reg);
- } else if (phydev->is_c45) {
+ return mdiobus_c45_write(pl->phydev->mdio.bus, prtad, devad,
+ reg, val);
+ }
+
+ if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
case MII_BMSR:
@@ -2388,14 +2614,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
default:
return -EINVAL;
}
- prtad = phy_id;
- devad = mdiobus_c45_addr(devad, reg);
- } else {
- prtad = phy_id;
- devad = reg;
+ return mdiobus_c45_write(pl->phydev->mdio.bus, phy_id, devad,
+ reg, val);
}
- return mdiobus_write(phydev->mdio.bus, prtad, devad, val);
+ return mdiobus_write(phydev->mdio.bus, phy_id, reg, val);
}
static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
@@ -2583,21 +2806,85 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_config(struct phylink *pl, u8 mode,
- const unsigned long *supported,
- const unsigned long *advertising)
+static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_25GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_5GBASER,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_100BASEX,
+};
+
+static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces);
+
+static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
+ const unsigned long *intf)
+{
+ phy_interface_t interface;
+ size_t i;
+
+ interface = PHY_INTERFACE_MODE_NA;
+ for (i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); i++)
+ if (test_bit(phylink_sfp_interface_preference[i], intf)) {
+ interface = phylink_sfp_interface_preference[i];
+ break;
+ }
+
+ return interface;
+}
+
+static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ bool changed = false;
+
+ phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
+ phylink_an_mode_str(mode), phy_modes(state->interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+
+ if (!linkmode_equal(pl->supported, supported)) {
+ linkmode_copy(pl->supported, supported);
+ changed = true;
+ }
+
+ if (!linkmode_equal(pl->link_config.advertising, state->advertising)) {
+ linkmode_copy(pl->link_config.advertising, state->advertising);
+ changed = true;
+ }
+
+ if (pl->cur_link_an_mode != mode ||
+ pl->link_config.interface != state->interface) {
+ pl->cur_link_an_mode = mode;
+ pl->link_config.interface = state->interface;
+
+ changed = true;
+
+ phylink_info(pl, "switched to %s/%s link mode\n",
+ phylink_an_mode_str(mode),
+ phy_modes(state->interface));
+ }
+
+ if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+ phylink_mac_initial_config(pl, false);
+}
+
+static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
+ struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- bool changed;
int ret;
- linkmode_copy(support, supported);
+ linkmode_copy(support, phy->supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, advertising);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -2607,8 +2894,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
- phylink_err(pl, "validation with support %*pb failed: %d\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
@@ -2624,67 +2912,109 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
- phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
+ phylink_err(pl,
+ "validation of %s/%s with support %*pb failed: %pe\n",
phylink_an_mode_str(mode),
phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
- phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ pl->link_port = pl->sfp_port;
+
+ phylink_sfp_set_config(pl, mode, support, &config);
+
+ return 0;
+}
- if (phy_interface_mode_is_8023z(iface) && pl->phydev)
+static int phylink_sfp_config_optical(struct phylink *pl)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phylink_link_state config;
+ phy_interface_t interface;
+ int ret;
+
+ phylink_dbg(pl, "optical SFP: interfaces=[mac=%*pbl, sfp=%*pbl]\n",
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->config->supported_interfaces,
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->sfp_interfaces);
+
+ /* Find the union of the supported interfaces by the PCS/MAC and
+ * the SFP module.
+ */
+ phy_interface_and(interfaces, pl->config->supported_interfaces,
+ pl->sfp_interfaces);
+ if (phy_interface_empty(interfaces)) {
+ phylink_err(pl, "unsupported SFP module: no common interface modes\n");
return -EINVAL;
+ }
- changed = !linkmode_equal(pl->supported, support) ||
- !linkmode_equal(pl->link_config.advertising,
- config.advertising);
- if (changed) {
- linkmode_copy(pl->supported, support);
- linkmode_copy(pl->link_config.advertising, config.advertising);
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(support, pl->sfp_support);
+ linkmode_copy(config.advertising, pl->sfp_support);
+ config.speed = SPEED_UNKNOWN;
+ config.duplex = DUPLEX_UNKNOWN;
+ config.pause = MLO_PAUSE_AN;
+ config.an_enabled = true;
+
+ /* For all the interfaces that are supported, reduce the sfp_support
+ * mask to only those link modes that can be supported.
+ */
+ ret = phylink_validate_mask(pl, pl->sfp_support, &config, interfaces);
+ if (ret) {
+ phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ return ret;
}
- if (pl->cur_link_an_mode != mode ||
- pl->link_config.interface != config.interface) {
- pl->link_config.interface = config.interface;
- pl->cur_link_an_mode = mode;
+ interface = phylink_choose_sfp_interface(pl, interfaces);
+ if (interface == PHY_INTERFACE_MODE_NA) {
+ phylink_err(pl, "failed to select SFP interface\n");
+ return -EINVAL;
+ }
- changed = true;
+ phylink_dbg(pl, "optical SFP: chosen %s interface\n",
+ phy_modes(interface));
- phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(mode),
- phy_modes(config.interface));
+ config.interface = interface;
+
+ /* Ignore errors if we're expecting a PHY to attach later */
+ ret = phylink_validate(pl, support, &config);
+ if (ret) {
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
+ return ret;
}
pl->link_port = pl->sfp_port;
- if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
- &pl->phylink_disable_state))
- phylink_mac_initial_config(pl, false);
+ phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
- return ret;
+ return 0;
}
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
struct phylink *pl = upstream;
- unsigned long *support = pl->sfp_support;
ASSERT_RTNL();
- linkmode_zero(support);
- sfp_parse_support(pl->sfp_bus, id, support);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_zero(pl->sfp_support);
+ phy_interface_zero(pl->sfp_interfaces);
+ sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
/* If this module may have a PHY connecting later, defer until later */
pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+ return phylink_sfp_config_optical(pl);
}
static int phylink_sfp_module_start(void *upstream)
@@ -2703,8 +3033,7 @@ static int phylink_sfp_module_start(void *upstream)
if (!pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND,
- pl->sfp_support, pl->sfp_support);
+ return phylink_sfp_config_optical(pl);
}
static void phylink_sfp_module_stop(void *upstream)
@@ -2764,8 +3093,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
else
mode = MLO_AN_INBAND;
+ /* Set the PHY's host supported interfaces */
+ phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
+ pl->config->supported_interfaces);
+
/* Do the initial configuration */
- ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ ret = phylink_sfp_config_phy(pl, mode, phy);
if (ret < 0)
return ret;
@@ -2800,34 +3133,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
/* Helpers for MAC drivers */
-/**
- * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
- * @state: a pointer to a &struct phylink_link_state
- *
- * Inspect the interface mode, advertising mask or forced speed and
- * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
- * the interface mode to suit. @state->interface is appropriately
- * updated, and the advertising mask has the "other" baseX_Full flag
- * cleared.
- */
-void phylink_helper_basex_speed(struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_8023z(state->interface)) {
- bool want_2500 = state->an_enabled ?
- phylink_test(state->advertising, 2500baseX_Full) :
- state->speed == SPEED_2500;
-
- if (want_2500) {
- phylink_clear(state->advertising, 1000baseX_Full);
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- } else {
- phylink_clear(state->advertising, 2500baseX_Full);
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- }
- }
-}
-EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-
static void phylink_decode_c37_word(struct phylink_link_state *state,
uint16_t config_reg, int speed)
{
@@ -2966,6 +3271,7 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
@@ -3037,6 +3343,7 @@ int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
adv |= ADVERTISE_1000XPSE_ASYM;
return adv;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
return 0x0001;
default:
/* Nothing to do for other modes */
@@ -3076,7 +3383,9 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
/* Ensure ISOLATE bit is disabled */
if (mode == MLO_AN_INBAND &&
- linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)))
bmcr = BMCR_ANENABLE;
else
bmcr = 0;
@@ -3141,4 +3450,15 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
}
EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+static int __init phylink_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
+ __set_bit(phylink_sfp_interface_preference[i],
+ phylink_sfp_interfaces);
+
+ return 0;
+}
+
+module_init(phylink_init);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a5671ab896b3..3d99fd6664d7 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -70,6 +70,7 @@
#define RTLGEN_SPEED_MASK 0x0630
#define RTL_GENERIC_PHYID 0x001cc800
+#define RTL_8211FVD_PHYID 0x001cc878
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -78,6 +79,7 @@ MODULE_LICENSE("GPL");
struct rtl821x_priv {
u16 phycr1;
u16 phycr2;
+ bool has_phycr2;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -94,6 +96,7 @@ static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct rtl821x_priv *priv;
+ u32 phy_id = phydev->drv->phy_id;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -108,13 +111,16 @@ static int rtl821x_probe(struct phy_device *phydev)
if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
- if (ret < 0)
- return ret;
+ priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
+ if (priv->has_phycr2) {
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ if (ret < 0)
+ return ret;
- priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
- if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
- priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
+ if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
+ priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ }
phydev->priv = priv;
@@ -400,12 +406,14 @@ static int rtl8211f_config_init(struct phy_device *phydev)
val_rxdly ? "enabled" : "disabled");
}
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
- if (ret < 0) {
- dev_err(dev, "clkout configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
+ if (priv->has_phycr2) {
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_CLKOUT_EN, priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return genphy_soft_reset(phydev);
@@ -924,6 +932,18 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
+ .name = "RTL8211F-VD Gigabit Ethernet",
+ .probe = rtl821x_probe,
+ .config_init = &rtl8211f_config_init,
+ .read_status = rtlgen_read_status,
+ .config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = rtl821x_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
.name = "Generic FE-GE Realtek PHY",
.match_phy_device = rtlgen_match_phy_device,
.read_status = rtlgen_read_status,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 0c6c0d1843bc..daac293e8ede 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -10,12 +10,6 @@
#include "sfp.h"
-struct sfp_quirk {
- const char *vendor;
- const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
-};
-
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -38,87 +32,6 @@ struct sfp_bus {
bool started;
};
-static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- phylink_set(modes, 2500baseX_Full);
-}
-
-static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- /* Ubiquiti U-Fiber Instant module claims that support all transceiver
- * types including 10G Ethernet which is not truth. So clear all claimed
- * modes and set only one mode which module supports: 1000baseX_Full.
- */
- phylink_zero(modes);
- phylink_set(modes, 1000baseX_Full);
-}
-
-static const struct sfp_quirk sfp_quirks[] = {
- {
- // Alcatel Lucent G-010S-P can operate at 2500base-X, but
- // incorrectly report 2500MBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "G010SP",
- .modes = sfp_quirk_2500basex,
- }, {
- // Alcatel Lucent G-010S-A can operate at 2500base-X, but
- // report 3.2GBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "3FE46541AA",
- .modes = sfp_quirk_2500basex,
- }, {
- // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
- // NRZ in their EEPROM
- .vendor = "HUAWEI",
- .part = "MA5671A",
- .modes = sfp_quirk_2500basex,
- }, {
- .vendor = "UBNT",
- .part = "UF-INSTANT",
- .modes = sfp_quirk_ubnt_uf_instant,
- },
-};
-
-static size_t sfp_strlen(const char *str, size_t maxlen)
-{
- size_t size, i;
-
- /* Trailing characters should be filled with space chars */
- for (i = 0, size = 0; i < maxlen; i++)
- if (str[i] != ' ')
- size = i + 1;
-
- return size;
-}
-
-static bool sfp_match(const char *qs, const char *str, size_t len)
-{
- if (!qs)
- return true;
- if (strlen(qs) != len)
- return false;
- return !strncmp(qs, str, len);
-}
-
-static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
-{
- const struct sfp_quirk *q;
- unsigned int i;
- size_t vs, ps;
-
- vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
- ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
-
- for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
- if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
- sfp_match(q->part, id->base.vendor_pn, ps))
- return q;
-
- return NULL;
-}
-
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -226,12 +139,14 @@ EXPORT_SYMBOL_GPL(sfp_may_have_phy);
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
* @support: pointer to an array of unsigned long for the ethtool support mask
+ * @interfaces: pointer to an array of unsigned long for phy interface modes
+ * mask
*
* Parse the EEPROM identification information and derive the supported
* ethtool link modes for the module.
*/
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support, unsigned long *interfaces)
{
unsigned int br_min, br_nom, br_max;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
@@ -258,54 +173,81 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
/* Set ethtool support from the compliance fields. */
- if (id->base.e10g_base_sr)
+ if (id->base.e10g_base_sr) {
phylink_set(modes, 10000baseSR_Full);
- if (id->base.e10g_base_lr)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lr) {
phylink_set(modes, 10000baseLR_Full);
- if (id->base.e10g_base_lrm)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lrm) {
phylink_set(modes, 10000baseLRM_Full);
- if (id->base.e10g_base_er)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_er) {
phylink_set(modes, 10000baseER_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
if (id->base.e1000_base_sx ||
id->base.e1000_base_lx ||
- id->base.e1000_base_cx)
+ id->base.e1000_base_cx) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
if (id->base.e1000_base_t) {
phylink_set(modes, 1000baseT_Half);
phylink_set(modes, 1000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, interfaces);
}
/* 1000Base-PX or 1000Base-BX10 */
if ((id->base.e_base_px || id->base.e_base_bx10) &&
- br_min <= 1300 && br_max >= 1200)
+ br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
/* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
- if (id->base.e100_base_fx || id->base.e100_base_lx)
+ if (id->base.e100_base_fx || id->base.e100_base_lx) {
phylink_set(modes, 100baseFX_Full);
- if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) {
phylink_set(modes, 100baseFX_Full);
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
/* This may look odd, but some manufacturers use 12000MBd */
- if (br_min <= 12000 && br_max >= 10300)
+ if (br_min <= 12000 && br_max >= 10300) {
phylink_set(modes, 10000baseCR_Full);
- if (br_min <= 3200 && br_max >= 3100)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 3100) {
phylink_set(modes, 2500baseX_Full);
- if (br_min <= 1300 && br_max >= 1200)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
if (id->base.sfp_ct_passive) {
- if (id->base.passive.sff8431_app_e)
+ if (id->base.passive.sff8431_app_e) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
}
if (id->base.sfp_ct_active) {
if (id->base.active.sff8431_app_e ||
id->base.active.sff8431_lim) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
}
}
@@ -315,6 +257,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
@@ -326,16 +269,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_25GBASE_CR_S:
case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
break;
case SFF8024_ECC_10GBASE_T_SFI:
case SFF8024_ECC_10GBASE_T_SR:
phylink_set(modes, 10000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
break;
case SFF8024_ECC_5GBASE_T:
phylink_set(modes, 5000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces);
break;
case SFF8024_ECC_2_5GBASE_T:
phylink_set(modes, 2500baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
break;
default:
dev_warn(bus->sfp_dev,
@@ -348,10 +295,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.fc_speed_100 ||
id->base.fc_speed_200 ||
id->base.fc_speed_400) {
- if (id->base.br_nominal >= 31)
+ if (id->base.br_nominal >= 31) {
phylink_set(modes, 2500baseX_Full);
- if (id->base.br_nominal >= 12)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (id->base.br_nominal >= 12) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
/* If we haven't discovered any modes that this module supports, try
@@ -364,14 +315,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* 2500BASE-X, so we allow some slack here.
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
- if (br_min <= 1300 && br_max >= 1200)
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
- if (br_min <= 3200 && br_max >= 2500)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 2500) {
phylink_set(modes, 2500baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
}
- if (bus->sfp_quirk)
- bus->sfp_quirk->modes(id, modes);
+ if (bus->sfp_quirk && bus->sfp_quirk->modes)
+ bus->sfp_quirk->modes(id, modes, interfaces);
linkmode_or(support, support, modes);
@@ -651,6 +606,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
else if (ret < 0)
return ERR_PTR(ret);
+ if (!fwnode_device_is_available(ref.fwnode)) {
+ fwnode_handle_put(ref.fwnode);
+ return NULL;
+ }
+
bus = sfp_bus_get(ref.fwnode);
fwnode_handle_put(ref.fwnode);
if (!bus)
@@ -775,12 +735,13 @@ void sfp_link_down(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_link_down);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
{
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = sfp_lookup_quirk(id);
+ bus->sfp_quirk = quirk;
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index ab77a9f439ef..40c9a64c5e30 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -166,6 +166,7 @@ static const enum gpiod_flags gpio_flags[] = {
* on board (for a copper SFP) time to initialise.
*/
#define T_WAIT msecs_to_jiffies(50)
+#define T_WAIT_ROLLBALL msecs_to_jiffies(25000)
#define T_START_UP msecs_to_jiffies(300)
#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
@@ -205,8 +206,11 @@ static const enum gpiod_flags gpio_flags[] = {
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
+ * RollBall SFPs access phy via SFP Enhanced Digital Diagnostic Interface
+ * via address 0x51 (mdio-i2c will use RollBall protocol on this address).
*/
-#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR_ROLLBALL 17
struct sff_data {
unsigned int gpios;
@@ -218,6 +222,7 @@ struct sfp {
struct i2c_adapter *i2c;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
+ enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
size_t i2c_block_size;
@@ -234,6 +239,7 @@ struct sfp {
bool need_poll;
struct mutex st_mutex; /* Protects state */
+ unsigned int state_hw_mask;
unsigned int state_soft_mask;
unsigned int state;
struct delayed_work poll;
@@ -250,6 +256,10 @@ struct sfp {
struct sfp_eeprom_id id;
unsigned int module_power_mW;
unsigned int module_t_start_up;
+ unsigned int module_t_wait;
+ bool tx_fault_ignore;
+
+ const struct sfp_quirk *quirk;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -307,6 +317,136 @@ static const struct of_device_id sfp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sfp_of_match);
+static void sfp_fixup_long_startup(struct sfp *sfp)
+{
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+}
+
+static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
+{
+ sfp->tx_fault_ignore = true;
+}
+
+static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+{
+ /* Ignore the TX_FAULT and LOS signals on this module.
+ * these are possibly used for other purposes on this
+ * module, e.g. a serial port.
+ */
+ sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
+}
+
+static void sfp_fixup_rollball(struct sfp *sfp)
+{
+ sfp->mdio_protocol = MDIO_I2C_ROLLBALL;
+ sfp->module_t_wait = T_WAIT_ROLLBALL;
+}
+
+static void sfp_fixup_rollball_cc(struct sfp *sfp)
+{
+ sfp_fixup_rollball(sfp);
+
+ /* Some RollBall SFPs may have wrong (zero) extended compliance code
+ * burned in EEPROM. For PHY probing we need the correct one.
+ */
+ sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SFI;
+}
+
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+}
+
+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+ * modes and set only one mode which module supports: 1000baseX_Full.
+ */
+ linkmode_zero(modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+}
+
+#define SFP_QUIRK(_v, _p, _m, _f) \
+ { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
+#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
+
+static const struct sfp_quirk sfp_quirks[] = {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly
+ // report 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex),
+
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd
+ // NRZ in their EEPROM
+ SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
+ sfp_fixup_long_startup),
+
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
+ // their EEPROM
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+
+ SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+
+ SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball),
+ SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball),
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars, but
+ * some manufacturers can't read SFF-8472 and use NUL.
+ */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ' && str[i] != '\0')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
+
static unsigned long poll_jiffies;
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -418,9 +558,6 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
{
- struct mii_bus *i2c_mii;
- int ret;
-
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return -EINVAL;
@@ -428,7 +565,15 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
sfp->read = sfp_i2c_read;
sfp->write = sfp_i2c_write;
- i2c_mii = mdio_i2c_alloc(sfp->dev, i2c);
+ return 0;
+}
+
+static int sfp_i2c_mdiobus_create(struct sfp *sfp)
+{
+ struct mii_bus *i2c_mii;
+ int ret;
+
+ i2c_mii = mdio_i2c_alloc(sfp->dev, sfp->i2c, sfp->mdio_protocol);
if (IS_ERR(i2c_mii))
return PTR_ERR(i2c_mii);
@@ -446,6 +591,12 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
return 0;
}
+static void sfp_i2c_mdiobus_destroy(struct sfp *sfp)
+{
+ mdiobus_unregister(sfp->i2c_mii);
+ sfp->i2c_mii = NULL;
+}
+
/* Interface */
static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
@@ -471,8 +622,8 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
state |= SFP_F_TX_FAULT;
} else {
dev_err_ratelimited(sfp->dev,
- "failed to read SFP soft status: %d\n",
- ret);
+ "failed to read SFP soft status: %pe\n",
+ ERR_PTR(ret));
/* Preserve the current state */
state = sfp->state;
}
@@ -498,17 +649,18 @@ static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
static void sfp_soft_start_poll(struct sfp *sfp)
{
const struct sfp_eeprom_id *id = &sfp->id;
+ unsigned int mask = 0;
sfp->state_soft_mask = 0;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE &&
- !sfp->gpio[GPIO_TX_DISABLE])
- sfp->state_soft_mask |= SFP_F_TX_DISABLE;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT &&
- !sfp->gpio[GPIO_TX_FAULT])
- sfp->state_soft_mask |= SFP_F_TX_FAULT;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS &&
- !sfp->gpio[GPIO_LOS])
- sfp->state_soft_mask |= SFP_F_LOS;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE)
+ mask |= SFP_F_TX_DISABLE;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT)
+ mask |= SFP_F_TX_FAULT;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS)
+ mask |= SFP_F_LOS;
+
+ // Poll the soft state for hardware pins we want to ignore
+ sfp->state_soft_mask = ~sfp->state_hw_mask & mask;
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
@@ -522,10 +674,11 @@ static void sfp_soft_stop_poll(struct sfp *sfp)
static unsigned int sfp_get_state(struct sfp *sfp)
{
- unsigned int state = sfp->get_state(sfp);
+ unsigned int soft = sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT);
+ unsigned int state;
- if (state & SFP_F_PRESENT &&
- sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT))
+ state = sfp->get_state(sfp) & sfp->state_hw_mask;
+ if (state & SFP_F_PRESENT && soft)
state |= sfp_soft_get_state(sfp);
return state;
@@ -1194,90 +1347,45 @@ static const struct hwmon_ops sfp_hwmon_ops = {
.read_string = sfp_hwmon_read_string,
};
-static u32 sfp_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_chip = {
- .type = hwmon_chip,
- .config = sfp_hwmon_chip_config,
-};
-
-static u32 sfp_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
- HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
- .type = hwmon_temp,
- .config = sfp_hwmon_temp_config,
-};
-
-static u32 sfp_hwmon_vcc_config[] = {
- HWMON_I_INPUT |
- HWMON_I_MAX | HWMON_I_MIN |
- HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
- HWMON_I_CRIT | HWMON_I_LCRIT |
- HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
- HWMON_I_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
- .type = hwmon_in,
- .config = sfp_hwmon_vcc_config,
-};
-
-static u32 sfp_hwmon_bias_config[] = {
- HWMON_C_INPUT |
- HWMON_C_MAX | HWMON_C_MIN |
- HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
- HWMON_C_CRIT | HWMON_C_LCRIT |
- HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
- HWMON_C_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
- .type = hwmon_curr,
- .config = sfp_hwmon_bias_config,
-};
-
-static u32 sfp_hwmon_power_config[] = {
- /* Transmit power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- /* Receive power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
- .type = hwmon_power,
- .config = sfp_hwmon_power_config,
-};
-
static const struct hwmon_channel_info *sfp_hwmon_info[] = {
- &sfp_hwmon_chip,
- &sfp_hwmon_vcc_channel_info,
- &sfp_hwmon_temp_channel_info,
- &sfp_hwmon_bias_channel_info,
- &sfp_hwmon_power_channel_info,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT |
+ HWMON_I_MAX | HWMON_I_MIN |
+ HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+ HWMON_I_CRIT | HWMON_I_LCRIT |
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
+ HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT |
+ HWMON_C_MAX | HWMON_C_MIN |
+ HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+ HWMON_C_CRIT | HWMON_C_LCRIT |
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ /* Transmit power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
+ /* Receive power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL),
NULL,
};
@@ -1289,7 +1397,7 @@ static const struct hwmon_chip_info sfp_hwmon_chip_info = {
static void sfp_hwmon_probe(struct work_struct *work)
{
struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
- int err, i;
+ int err;
/* hwmon interface needs to access 16bit registers in atomic way to
* guarantee coherency of the diagnostic monitoring data. If it is not
@@ -1311,21 +1419,18 @@ static void sfp_hwmon_probe(struct work_struct *work)
mod_delayed_work(system_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
}
return;
}
- sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
- if (!sfp->hwmon_name) {
+ sfp->hwmon_name = hwmon_sanitize_name(dev_name(sfp->dev));
+ if (IS_ERR(sfp->hwmon_name)) {
dev_err(sfp->dev, "out of memory for hwmon name\n");
return;
}
- for (i = 0; sfp->hwmon_name[i]; i++)
- if (hwmon_is_bad_char(sfp->hwmon_name[i]))
- sfp->hwmon_name[i] = '_';
-
sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
sfp->hwmon_name, sfp,
&sfp_hwmon_chip_info,
@@ -1507,23 +1612,24 @@ static void sfp_sm_phy_detach(struct sfp *sfp)
sfp->mod_phy = NULL;
}
-static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
+static int sfp_sm_probe_phy(struct sfp *sfp, int addr, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ phy = get_phy_device(sfp->i2c_mii, addr, is_c45);
if (phy == ERR_PTR(-ENODEV))
return PTR_ERR(phy);
if (IS_ERR(phy)) {
- dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ dev_err(sfp->dev, "mdiobus scan returned %pe\n", phy);
return PTR_ERR(phy);
}
err = phy_device_register(phy);
if (err) {
phy_device_free(phy);
- dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ dev_err(sfp->dev, "phy_device_register failed: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1531,7 +1637,7 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
if (err) {
phy_device_remove(phy);
phy_device_free(phy);
- dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
+ dev_err(sfp->dev, "sfp_add_phy failed: %pe\n", ERR_PTR(err));
return err;
}
@@ -1607,6 +1713,14 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
+static int sfp_sm_add_mdio_bus(struct sfp *sfp)
+{
+ if (sfp->mdio_protocol != MDIO_I2C_NONE)
+ return sfp_i2c_mdiobus_create(sfp);
+
+ return 0;
+}
+
/* Probe a SFP for a PHY device if the module supports copper - the PHY
* normally sits at I2C bus address 0x56, and may either be a clause 22
* or clause 45 PHY.
@@ -1622,36 +1736,43 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
int err = 0;
- switch (sfp->id.base.extended_cc) {
- case SFF8024_ECC_10GBASE_T_SFI:
- case SFF8024_ECC_10GBASE_T_SR:
- case SFF8024_ECC_5GBASE_T:
- case SFF8024_ECC_2_5GBASE_T:
- err = sfp_sm_probe_phy(sfp, true);
+ switch (sfp->mdio_protocol) {
+ case MDIO_I2C_NONE:
break;
- default:
- if (sfp->id.base.e1000_base_t)
- err = sfp_sm_probe_phy(sfp, false);
+ case MDIO_I2C_MARVELL_C22:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, false);
+ break;
+
+ case MDIO_I2C_C45:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, true);
+ break;
+
+ case MDIO_I2C_ROLLBALL:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR_ROLLBALL, true);
break;
}
+
return err;
}
static int sfp_module_parse_power(struct sfp *sfp)
{
u32 power_mW = 1000;
+ bool supports_a2;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
power_mW = 2000;
+ supports_a2 = sfp->id.ext.sff8472_compliance !=
+ SFP_SFF8472_COMPLIANCE_NONE ||
+ sfp->id.ext.diagmon & SFP_DIAGMON_DDM;
+
if (power_mW > sfp->max_power_mW) {
/* Module power specification exceeds the allowed maximum. */
- if (sfp->id.ext.sff8472_compliance ==
- SFP_SFF8472_COMPLIANCE_NONE &&
- !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ if (!supports_a2) {
/* The module appears not to implement bus address
* 0xa2, so assume that the module powers up in the
* indicated mode.
@@ -1668,11 +1789,25 @@ static int sfp_module_parse_power(struct sfp *sfp)
}
}
+ if (power_mW <= 1000) {
+ /* Modules below 1W do not require a power change sequence */
+ sfp->module_power_mW = power_mW;
+ return 0;
+ }
+
+ if (!supports_a2) {
+ /* The module power level is below the host maximum and the
+ * module appears not to implement bus address 0xa2, so assume
+ * that the module powers up in the indicated mode.
+ */
+ return 0;
+ }
+
/* If the module requires a higher power mode, but also requires
* an address change sequence, warn the user that the module may
* not be functional.
*/
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) {
dev_warn(sfp->dev,
"Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
power_mW / 1000, (power_mW / 100) % 10);
@@ -1691,7 +1826,7 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err));
return -EAGAIN;
}
@@ -1709,7 +1844,8 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to write EEPROM: %pe\n",
+ ERR_PTR(err));
return -EAGAIN;
}
@@ -1761,7 +1897,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
id->base.connector = SFF8024_CONNECTOR_LC;
err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3);
if (err != 3) {
- dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to rewrite module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1772,7 +1910,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
check = sfp_check(&id->base, sizeof(id->base) - 1);
err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1);
if (err != 1) {
- dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to update base structure checksum in fiber module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
}
@@ -1797,12 +1937,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -1822,13 +1963,15 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n",
- ret);
+ dev_err(sfp->dev,
+ "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
}
@@ -1870,12 +2013,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.ext)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -1922,11 +2066,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
- if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
- !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
- sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ /* Initialise state bits to use from hardware */
+ sfp->state_hw_mask = SFP_F_PRESENT;
+ if (sfp->gpio[GPIO_TX_DISABLE])
+ sfp->state_hw_mask |= SFP_F_TX_DISABLE;
+ if (sfp->gpio[GPIO_TX_FAULT])
+ sfp->state_hw_mask |= SFP_F_TX_FAULT;
+ if (sfp->gpio[GPIO_LOS])
+ sfp->state_hw_mask |= SFP_F_LOS;
+
+ sfp->module_t_start_up = T_START_UP;
+ sfp->module_t_wait = T_WAIT;
+
+ sfp->tx_fault_ignore = false;
+
+ if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI ||
+ sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR ||
+ sfp->id.base.extended_cc == SFF8024_ECC_5GBASE_T ||
+ sfp->id.base.extended_cc == SFF8024_ECC_2_5GBASE_T)
+ sfp->mdio_protocol = MDIO_I2C_C45;
+ else if (sfp->id.base.e1000_base_t)
+ sfp->mdio_protocol = MDIO_I2C_MARVELL_C22;
else
- sfp->module_t_start_up = T_START_UP;
+ sfp->mdio_protocol = MDIO_I2C_NONE;
+
+ sfp->quirk = sfp_lookup_quirk(&id);
+ if (sfp->quirk && sfp->quirk->fixup)
+ sfp->quirk->fixup(sfp);
return 0;
}
@@ -2029,7 +2195,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
err = sfp_hwmon_insert(sfp);
if (err)
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
fallthrough;
@@ -2039,7 +2206,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
break;
/* Report the module insertion to the upstream device */
- err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id,
+ sfp->quirk);
if (err < 0) {
sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
@@ -2098,6 +2266,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ if (sfp->i2c_mii)
+ sfp_i2c_mdiobus_destroy(sfp);
sfp_module_tx_disable(sfp);
sfp_soft_stop_poll(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
@@ -2121,9 +2291,10 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
- * anything (such as probe for a PHY) is 50ms.
+ * anything (such as probe for a PHY) is 50ms (or more on
+ * specific modules).
*/
- sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ sfp_sm_next(sfp, SFP_S_WAIT, sfp->module_t_wait);
break;
case SFP_S_WAIT:
@@ -2137,8 +2308,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
* deasserting.
*/
timeout = sfp->module_t_start_up;
- if (timeout > T_WAIT)
- timeout -= T_WAIT;
+ if (timeout > sfp->module_t_wait)
+ timeout -= sfp->module_t_wait;
else
timeout = 1;
@@ -2160,6 +2331,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
init_done:
+ /* Create mdiobus and start trying for PHY */
+ ret = sfp_sm_add_mdio_bus(sfp);
+ if (ret < 0) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
sfp->sm_phy_retries = R_PHY_RETRY;
goto phy_probe;
}
@@ -2380,7 +2557,10 @@ static void sfp_check_state(struct sfp *sfp)
mutex_lock(&sfp->st_mutex);
state = sfp_get_state(sfp);
changed = state ^ sfp->state;
- changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
+ if (sfp->tx_fault_ignore)
+ changed &= SFP_F_PRESENT | SFP_F_LOS;
+ else
+ changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
for (i = 0; i < GPIO_MAX; i++)
if (changed & BIT(i))
@@ -2477,7 +2657,7 @@ static int sfp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
- err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
+ err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
if (err < 0)
return err;
@@ -2538,6 +2718,8 @@ static int sfp_probe(struct platform_device *pdev)
return PTR_ERR(sfp->gpio[i]);
}
+ sfp->state_hw_mask = SFP_F_PRESENT;
+
sfp->get_state = sfp_gpio_get_state;
sfp->set_state = sfp_gpio_set_state;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 27226535c72b..6cf1643214d3 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -6,6 +6,14 @@
struct sfp;
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
+ unsigned long *interfaces);
+ void (*fixup)(struct sfp *sfp);
+};
+
struct sfp_socket_ops {
void (*attach)(struct sfp *sfp);
void (*detach)(struct sfp *sfp);
@@ -23,7 +31,8 @@ int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
void sfp_remove_phy(struct sfp_bus *bus);
void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk);
void sfp_module_remove(struct sfp_bus *bus);
int sfp_module_start(struct sfp_bus *bus);
void sfp_module_stop(struct sfp_bus *bus);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index d8cac02a79b9..ac7481ce2fc1 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -44,8 +44,8 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
};
struct smsc_phy_priv {
+ u16 intmask;
bool energy_enable;
- struct clk *refclk;
};
static int smsc_phy_ack_interrupt(struct phy_device *phydev)
@@ -58,7 +58,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_intr(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
- u16 intmask = 0;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -66,12 +65,15 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
if (rc)
return rc;
- intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
+ priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
if (priv->energy_enable)
- intmask |= MII_LAN83C185_ISF_INT7;
- rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ priv->intmask |= MII_LAN83C185_ISF_INT7;
+
+ rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
} else {
- rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ priv->intmask = 0;
+
+ rc = phy_write(phydev, MII_LAN83C185_IM, 0);
if (rc)
return rc;
@@ -83,21 +85,18 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
{
- int irq_status, irq_enabled;
-
- irq_enabled = phy_read(phydev, MII_LAN83C185_IM);
- if (irq_enabled < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
+ struct smsc_phy_priv *priv = phydev->priv;
+ int irq_status;
irq_status = phy_read(phydev, MII_LAN83C185_ISF);
if (irq_status < 0) {
- phy_error(phydev);
+ if (irq_status != -ENODEV)
+ phy_error(phydev);
+
return IRQ_NONE;
}
- if (!(irq_status & irq_enabled))
+ if (!(irq_status & priv->intmask))
return IRQ_NONE;
phy_trigger_machine(phydev);
@@ -110,7 +109,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
struct smsc_phy_priv *priv = phydev->priv;
int rc;
- if (!priv->energy_enable)
+ if (!priv->energy_enable || phydev->irq != PHY_POLL)
return 0;
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -121,10 +120,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
-
- return smsc_phy_ack_interrupt(phydev);
+ return rc;
}
static int smsc_phy_reset(struct phy_device *phydev)
@@ -146,11 +142,6 @@ static int smsc_phy_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static int lan911x_config_init(struct phy_device *phydev)
-{
- return smsc_phy_ack_interrupt(phydev);
-}
-
static int lan87xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -210,6 +201,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
* response on link pulses to detect presence of plugged Ethernet cable.
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
* save approximately 220 mW of power if cable is unplugged.
+ * The workaround is only applicable to poll mode. Energy Detect Power-Down may
+ * not be used in interrupt mode lest link change detection becomes unreliable.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
@@ -217,7 +210,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
int err = genphy_read_status(phydev);
- if (!phydev->link && priv->energy_enable) {
+ if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
@@ -291,20 +284,12 @@ static void smsc_get_stats(struct phy_device *phydev,
data[i] = smsc_get_stat(phydev, i);
}
-static void smsc_phy_remove(struct phy_device *phydev)
-{
- struct smsc_phy_priv *priv = phydev->priv;
-
- clk_disable_unprepare(priv->refclk);
- clk_put(priv->refclk);
-}
-
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
- int ret;
+ struct clk *refclk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -318,22 +303,12 @@ static int smsc_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
/* Make clk optional to keep DTB backward compatibility. */
- priv->refclk = clk_get_optional(dev, NULL);
- if (IS_ERR(priv->refclk))
- return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ refclk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
"Failed to request clock\n");
- ret = clk_prepare_enable(priv->refclk);
- if (ret)
- return ret;
-
- ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
- if (ret) {
- clk_disable_unprepare(priv->refclk);
- return ret;
- }
-
- return 0;
+ return clk_set_rate(refclk, 50 * 1000 * 1000);
}
static struct phy_driver smsc_phy_driver[] = {
@@ -418,9 +393,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
- /* basic functions */
- .config_init = lan911x_config_init,
-
/* IRQ related */
.config_intr = smsc_phy_config_intr,
.handle_interrupt = smsc_phy_handle_interrupt,
@@ -438,7 +410,6 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
- .remove = smsc_phy_remove,
/* basic functions */
.read_status = lan87xx_read_status,
@@ -483,6 +454,36 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = 0x0007c130, /* 0x0007c130 and 0x0007c131 */
+ /* This mask (0xfffffff2) is to differentiate from
+ * LAN88xx (phy_id 0x0007c132)
+ * and allows future phy_id revisions.
+ */
+ .phy_id_mask = 0xfffffff2,
+ .name = "Microchip LAN8742",
+
+ /* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
+
+ .probe = smsc_phy_probe,
+
+ /* basic functions */
+ .read_status = lan87xx_read_status,
+ .config_init = smsc_phy_config_init,
+ .soft_reset = smsc_phy_reset,
+
+ /* IRQ related */
+ .config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
+
+ /* Statistics */
+ .get_sset_count = smsc_get_sset_count,
+ .get_strings = smsc_get_strings,
+ .get_stats = smsc_get_stats,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(smsc_phy_driver);
@@ -498,6 +499,7 @@ static struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0d0, 0xfffffff0 },
{ 0x0007c0f0, 0xfffffff0 },
{ 0x0007c110, 0xfffffff0 },
+ { 0x0007c130, 0xfffffff2 },
{ }
};
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 8b5445a724ce..d4202d40d47a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
@@ -137,15 +136,10 @@ static const struct ks8995_chip_params ks8995_chip[] = {
},
};
-struct ks8995_pdata {
- int reset_gpio;
- enum of_gpio_flags reset_gpio_flags;
-};
-
struct ks8995_switch {
struct spi_device *spi;
struct mutex lock;
- struct ks8995_pdata *pdata;
+ struct gpio_desc *reset_gpio;
struct bin_attribute regs_attr;
const struct ks8995_chip_params *chip;
int revision_id;
@@ -401,24 +395,6 @@ err_out:
return err;
}
-/* ks8995_parse_dt - setup platform data from devicetree
- * @ks: pointer to switch instance
- *
- * Parses supported DT properties and sets up platform data
- * accordingly.
- */
-static void ks8995_parse_dt(struct ks8995_switch *ks)
-{
- struct device_node *np = ks->spi->dev.of_node;
- struct ks8995_pdata *pdata = ks->pdata;
-
- if (!np)
- return;
-
- pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0,
- &pdata->reset_gpio_flags);
-}
-
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
@@ -449,38 +425,22 @@ static int ks8995_probe(struct spi_device *spi)
ks->spi = spi;
ks->chip = &ks8995_chip[variant];
- if (ks->spi->dev.of_node) {
- ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata),
- GFP_KERNEL);
- if (!ks->pdata)
- return -ENOMEM;
-
- ks->pdata->reset_gpio = -1;
-
- ks8995_parse_dt(ks);
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
}
- if (!ks->pdata)
- ks->pdata = spi->dev.platform_data;
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
/* de-assert switch reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) {
- unsigned long flags;
-
- flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ?
- GPIOF_ACTIVE_LOW : 0);
-
- err = devm_gpio_request_one(&spi->dev,
- ks->pdata->reset_gpio,
- flags, "switch-reset");
- if (err) {
- dev_err(&spi->dev,
- "failed to get reset-gpios: %d\n", err);
- return -EIO;
- }
-
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0);
- }
+ /* FIXME: this likely requires a delay */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
spi_set_drvdata(spi, ks);
@@ -517,17 +477,14 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
}
-static int ks8995_remove(struct spi_device *spi)
+static void ks8995_remove(struct spi_device *spi)
{
struct ks8995_switch *ks = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
/* assert reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
-
- return 0;
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 0d491b4d6667..c8791e9b451d 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
- netif_rx_ni(rcv->skb);
+ netif_rx(rcv->skb);
dev->stats.rx_bytes += rcv->length.h;
dev->stats.rx_packets++;
rcv->skb = NULL;
@@ -1111,7 +1111,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index f4429b93a9c8..15a179631903 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -281,8 +281,7 @@ ppp_asynctty_write(struct tty_struct *tty, struct file *file,
*/
static int
-ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = ap_get(tty);
int err, val;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4a365f15533e..9206c660a72e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2968,7 +2968,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
chan->ppp = NULL;
/*
- * This ensures that we have returned from any calls into the
+ * This ensures that we have returned from any calls into
* the channel's start_xmit or ioctl routine before we proceed.
*/
down_write(&pch->chan_sem);
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index b3a71b409a80..18283b7b94bc 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -274,8 +274,7 @@ ppp_sync_write(struct tty_struct *tty, struct file *file,
}
static int
-ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = sp_get(tty);
int __user *p = (int __user *)arg;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520340b7..ce2cbb5903d7 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
+ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
@@ -1011,8 +1012,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
goto end;
}
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &error);
+ skb = skb_recv_datagram(sk, flags, &error);
if (error < 0)
goto end;
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
new file mode 100644
index 000000000000..687dec49c1e1
--- /dev/null
+++ b/drivers/net/pse-pd/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Ethernet Power Sourcing Equipment drivers
+#
+
+menuconfig PSE_CONTROLLER
+ bool "Ethernet Power Sourcing Equipment Support"
+ help
+ Generic Power Sourcing Equipment Controller support.
+
+ If unsure, say no.
+
+if PSE_CONTROLLER
+
+config PSE_REGULATOR
+ tristate "Regulator based PSE controller"
+ depends on REGULATOR || COMPILE_TEST
+ help
+ This module provides support for simple regulator based Ethernet Power
+ Sourcing Equipment without automatic classification support. For
+ example for basic implementation of PoDL (802.3bu) specification.
+
+endif
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
new file mode 100644
index 000000000000..1b8aa4c70f0b
--- /dev/null
+++ b/drivers/net/pse-pd/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for Linux PSE drivers
+
+obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
+
+obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
new file mode 100644
index 000000000000..146b81f08a89
--- /dev/null
+++ b/drivers/net/pse-pd/pse_core.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Framework for Ethernet Power Sourcing Equipment
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/pse-pd/pse.h>
+
+static DEFINE_MUTEX(pse_list_mutex);
+static LIST_HEAD(pse_controller_list);
+
+/**
+ * struct pse_control - a PSE control
+ * @pcdev: a pointer to the PSE controller device
+ * this PSE control belongs to
+ * @list: list entry for the pcdev's PSE controller list
+ * @id: ID of the PSE line in the PSE controller device
+ * @refcnt: Number of gets of this pse_control
+ */
+struct pse_control {
+ struct pse_controller_dev *pcdev;
+ struct list_head list;
+ unsigned int id;
+ struct kref refcnt;
+};
+
+/**
+ * of_pse_zero_xlate - dummy function for controllers with one only control
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with #pse-cells = <0>.
+ */
+static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ return 0;
+}
+
+/**
+ * of_pse_simple_xlate - translate pse_spec to the PSE line number
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with 1:1 mapping, where PSE lines can be indexed by number
+ * without gaps.
+ */
+static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ if (pse_spec->args[0] >= pcdev->nr_lines)
+ return -EINVAL;
+
+ return pse_spec->args[0];
+}
+
+/**
+ * pse_controller_register - register a PSE controller device
+ * @pcdev: a pointer to the initialized PSE controller device
+ */
+int pse_controller_register(struct pse_controller_dev *pcdev)
+{
+ if (!pcdev->of_xlate) {
+ if (pcdev->of_pse_n_cells == 0)
+ pcdev->of_xlate = of_pse_zero_xlate;
+ else if (pcdev->of_pse_n_cells == 1)
+ pcdev->of_xlate = of_pse_simple_xlate;
+ }
+
+ mutex_init(&pcdev->lock);
+ INIT_LIST_HEAD(&pcdev->pse_control_head);
+
+ mutex_lock(&pse_list_mutex);
+ list_add(&pcdev->list, &pse_controller_list);
+ mutex_unlock(&pse_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pse_controller_register);
+
+/**
+ * pse_controller_unregister - unregister a PSE controller device
+ * @pcdev: a pointer to the PSE controller device
+ */
+void pse_controller_unregister(struct pse_controller_dev *pcdev)
+{
+ mutex_lock(&pse_list_mutex);
+ list_del(&pcdev->list);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_controller_unregister);
+
+static void devm_pse_controller_release(struct device *dev, void *res)
+{
+ pse_controller_unregister(*(struct pse_controller_dev **)res);
+}
+
+/**
+ * devm_pse_controller_register - resource managed pse_controller_register()
+ * @dev: device that is registering this PSE controller
+ * @pcdev: a pointer to the initialized PSE controller device
+ *
+ * Managed pse_controller_register(). For PSE controllers registered by
+ * this function, pse_controller_unregister() is automatically called on
+ * driver detach. See pse_controller_register() for more information.
+ */
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev)
+{
+ struct pse_controller_dev **pcdevp;
+ int ret;
+
+ pcdevp = devres_alloc(devm_pse_controller_release, sizeof(*pcdevp),
+ GFP_KERNEL);
+ if (!pcdevp)
+ return -ENOMEM;
+
+ ret = pse_controller_register(pcdev);
+ if (ret) {
+ devres_free(pcdevp);
+ return ret;
+ }
+
+ *pcdevp = pcdev;
+ devres_add(dev, pcdevp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_pse_controller_register);
+
+/* PSE control section */
+
+static void __pse_control_release(struct kref *kref)
+{
+ struct pse_control *psec = container_of(kref, struct pse_control,
+ refcnt);
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ module_put(psec->pcdev->owner);
+
+ list_del(&psec->list);
+ kfree(psec);
+}
+
+static void __pse_control_put_internal(struct pse_control *psec)
+{
+ lockdep_assert_held(&pse_list_mutex);
+
+ kref_put(&psec->refcnt, __pse_control_release);
+}
+
+/**
+ * pse_control_put - free the PSE control
+ * @psec: PSE control pointer
+ */
+void pse_control_put(struct pse_control *psec)
+{
+ if (IS_ERR_OR_NULL(psec))
+ return;
+
+ mutex_lock(&pse_list_mutex);
+ __pse_control_put_internal(psec);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_control_put);
+
+static struct pse_control *
+pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
+{
+ struct pse_control *psec;
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ list_for_each_entry(psec, &pcdev->pse_control_head, list) {
+ if (psec->id == index) {
+ kref_get(&psec->refcnt);
+ return psec;
+ }
+ }
+
+ psec = kzalloc(sizeof(*psec), GFP_KERNEL);
+ if (!psec)
+ return ERR_PTR(-ENOMEM);
+
+ if (!try_module_get(pcdev->owner)) {
+ kfree(psec);
+ return ERR_PTR(-ENODEV);
+ }
+
+ psec->pcdev = pcdev;
+ list_add(&psec->list, &pcdev->pse_control_head);
+ psec->id = index;
+ kref_init(&psec->refcnt);
+
+ return psec;
+}
+
+struct pse_control *
+of_pse_control_get(struct device_node *node)
+{
+ struct pse_controller_dev *r, *pcdev;
+ struct of_phandle_args args;
+ struct pse_control *psec;
+ int psec_id;
+ int ret;
+
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&pse_list_mutex);
+ pcdev = NULL;
+ list_for_each_entry(r, &pse_controller_list, list) {
+ if (args.np == r->dev->of_node) {
+ pcdev = r;
+ break;
+ }
+ }
+
+ if (!pcdev) {
+ psec = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ if (WARN_ON(args.args_count != pcdev->of_pse_n_cells)) {
+ psec = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ psec_id = pcdev->of_xlate(pcdev, &args);
+ if (psec_id < 0) {
+ psec = ERR_PTR(psec_id);
+ goto out;
+ }
+
+ /* pse_list_mutex also protects the pcdev's pse_control list */
+ psec = pse_control_get_internal(pcdev, psec_id);
+
+out:
+ mutex_unlock(&pse_list_mutex);
+ of_node_put(args.np);
+
+ return psec;
+}
+EXPORT_SYMBOL_GPL(of_pse_control_get);
+
+/**
+ * pse_ethtool_get_status - get status of PSE control
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @status: struct to store PSE status
+ */
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_get_status) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not support status report");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
+
+/**
+ * pse_ethtool_set_config - set PSE control configuration
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_set_config) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not configuration");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
new file mode 100644
index 000000000000..e2bf8306ca90
--- /dev/null
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Driver for the regulator based Ethernet Power Sourcing Equipment, without
+// auto classification support.
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+#include <linux/regulator/consumer.h>
+
+struct pse_reg_priv {
+ struct pse_controller_dev pcdev;
+ struct regulator *ps; /*power source */
+ enum ethtool_podl_pse_admin_state admin_state;
+};
+
+static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct pse_reg_priv, pcdev);
+}
+
+static int
+pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ if (priv->admin_state == config->admin_cotrol)
+ return 0;
+
+ switch (config->admin_cotrol) {
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
+ ret = regulator_enable(priv->ps);
+ break;
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
+ ret = regulator_disable(priv->ps);
+ break;
+ default:
+ dev_err(pcdev->dev, "Unknown admin state %i\n",
+ config->admin_cotrol);
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ priv->admin_state = config->admin_cotrol;
+
+ return 0;
+}
+
+static int
+pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ else
+ status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
+
+ status->podl_admin_state = priv->admin_state;
+
+ return 0;
+}
+
+static const struct pse_controller_ops pse_reg_ops = {
+ .ethtool_get_status = pse_reg_ethtool_get_status,
+ .ethtool_set_config = pse_reg_ethtool_set_config,
+};
+
+static int
+pse_reg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pse_reg_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!pdev->dev.of_node)
+ return -ENOENT;
+
+ priv->ps = devm_regulator_get_exclusive(dev, "pse");
+ if (IS_ERR(priv->ps))
+ return dev_err_probe(dev, PTR_ERR(priv->ps),
+ "failed to get PSE regulator.\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ else
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
+
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &pse_reg_ops;
+ priv->pcdev.dev = dev;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "failed to register PSE controller (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id pse_reg_of_match[] = {
+ { .compatible = "podl-pse-regulator", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pse_reg_of_match);
+
+static struct platform_driver pse_reg_driver = {
+ .probe = pse_reg_probe,
+ .driver = {
+ .name = "PSE regulator",
+ .of_match_table = of_match_ptr(pse_reg_of_match),
+ },
+};
+module_platform_driver(pse_reg_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("regulator based Ethernet Power Sourcing Equipment");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pse-regulator");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 1a95f3beb784..fbcb9d05da64 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev)
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
rnet->rx_skb[i]->protocol =
eth_type_trans(rnet->rx_skb[i], ndev);
- error = netif_rx(rnet->rx_skb[i]);
+ error = __netif_rx(rnet->rx_skb[i]);
if (error == NET_RX_DROP) {
ndev->stats.rx_dropped++;
@@ -443,10 +443,10 @@ static void rionet_get_drvinfo(struct net_device *ndev,
{
struct rionet_private *rnet = netdev_priv(ndev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
- strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "n/a", sizeof(info->fw_version));
+ strscpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
}
static u32 rionet_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 57a6d598467b..c3f8020571ad 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
/* datagram completed: send to upper level */
skb_trim(skb, dlen);
- netif_rx(skb);
+ __netif_rx(skb);
stats->rx_bytes+=dlen;
stats->rx_packets++;
lp->rx_skb[ns] = NULL;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9f3b4c1aa5ce..6865d32270e5 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl)
skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->stats.rx_packets++;
}
@@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
- if (!netif_running(dev))
+ if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?
@@ -1072,8 +1072,8 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
#endif /* CONFIG_SLIP_MODE_SLIP6 */
/* Perform I/O control on an active SLIP channel. */
-static int slip_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct slip *sl = tty->disc_data;
unsigned int tmp;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 4daac5fda073..36803d932dff 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -29,11 +29,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
-
+#include <linux/of.h>
#include <linux/sungem_phy.h>
/* Link modes of the BCM5400 PHY */
@@ -454,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
+ of_node_put(np);
if (can_low_power) {
/* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 8e3a28ba6b28..9e75ed3f08ce 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct tap_dev *tap;
struct tap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ enum skb_drop_reason drop_reason;
tap = tap_dev_get_rcu(dev);
if (!tap)
@@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
struct sk_buff *next;
- if (IS_ERR(segs))
+ if (IS_ERR(segs)) {
+ drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
goto drop;
+ }
if (!segs) {
- if (ptr_ring_produce(&q->ring, skb))
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
goto wake_up;
}
@@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
skb_list_walk_safe(segs, skb, next) {
skb_mark_not_on_list(skb);
if (ptr_ring_produce(&q->ring, skb)) {
- kfree_skb(skb);
- kfree_skb_list(next);
+ drop_reason = SKB_DROP_REASON_FULL_RING;
+ kfree_skb_reason(skb, drop_reason);
+ kfree_skb_list_reason(next, drop_reason);
break;
}
}
@@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
*/
if (skb->ip_summed == CHECKSUM_PARTIAL &&
!(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
+ skb_checksum_help(skb)) {
+ drop_reason = SKB_DROP_REASON_SKB_CSUM;
goto drop;
- if (ptr_ring_produce(&q->ring, skb))
+ }
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
}
wake_up:
@@ -383,7 +393,7 @@ drop:
/* Count errors/drops only here, thus don't care about args. */
if (tap->count_rx_dropped)
tap->count_rx_dropped(tap);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return RX_HANDLER_CONSUMED;
}
EXPORT_SYMBOL_GPL(tap_handle_frame);
@@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int depth;
bool zerocopy = false;
size_t linear;
+ enum skb_drop_reason drop_reason;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
@@ -696,18 +707,32 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
- if (err)
+ if (err) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto err_kfree;
+ }
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (!tap) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return total_len;
+ }
+ skb->dev = tap->dev;
+
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
- if (err)
+ if (err) {
+ rcu_read_unlock();
+ drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
+ }
}
skb_probe_transport_header(skb);
@@ -717,8 +742,6 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
- rcu_read_lock();
- tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_zcopy_init(skb, msg_control);
@@ -727,18 +750,12 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
uarg->callback(NULL, uarg, false);
}
- if (tap) {
- skb->dev = tap->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
+ dev_queue_xmit(skb);
rcu_read_unlock();
-
return total_len;
err_kfree:
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
err:
rcu_read_lock();
@@ -1198,7 +1215,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m,
struct xdp_buff *xdp;
int i;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
for (i = 0; i < ctl->num; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
tap_get_user_xdp(q, xdp);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8b2adc56b92a..62ade69295a9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -734,6 +734,11 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
port = team_port_get_rcu(skb->dev);
team = port->team;
if (!team_port_enabled(port)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* link-local packets are mostly useful when stack receives them
+ * with the link they arrive on.
+ */
+ return RX_HANDLER_PASS;
/* allow exact match delivery for disabled ports */
res = RX_HANDLER_EXACT;
} else {
@@ -744,10 +749,10 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += skb->len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, skb->len);
if (skb->pkt_type == PACKET_MULTICAST)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
skb->dev = team->dev;
@@ -1270,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1344,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1695,6 +1704,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
@@ -1715,8 +1732,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
pcpu_stats = this_cpu_ptr(team->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->tx_packets++;
- pcpu_stats->tx_bytes += len;
+ u64_stats_inc(&pcpu_stats->tx_packets);
+ u64_stats_add(&pcpu_stats->tx_bytes, len);
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(team->pcpu_stats->tx_dropped);
@@ -1849,11 +1866,11 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
p = per_cpu_ptr(team->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- rx_multicast = p->rx_multicast;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ rx_multicast = u64_stats_read(&p->rx_multicast);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
@@ -1865,9 +1882,9 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* rx_dropped, tx_dropped & rx_nohandler are u32,
* updated without syncp protection.
*/
- rx_dropped += p->rx_dropped;
- tx_dropped += p->tx_dropped;
- rx_nohandler += p->rx_nohandler;
+ rx_dropped += READ_ONCE(p->rx_dropped);
+ tx_dropped += READ_ONCE(p->tx_dropped);
+ rx_nohandler += READ_ONCE(p->rx_nohandler);
}
stats->rx_dropped = rx_dropped;
stats->tx_dropped = tx_dropped;
@@ -2065,8 +2082,8 @@ static const struct net_device_ops team_netdev_ops = {
static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
@@ -2835,6 +2852,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = team_nl_ops,
.n_small_ops = ARRAY_SIZE(team_nl_ops),
+ .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1,
.mcgrps = team_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
};
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ff5d0e98a088..83fcaeb2ac5e 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
+ * Networking over Thunderbolt/USB4 cables using USB4NET protocol
+ * (formerly Apple ThunderboltIP).
*
* Copyright (C) 2017, Intel Corporation
* Authors: Amir Levy <amir.jer.levy@intel.com>
@@ -30,6 +31,7 @@
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
+#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
@@ -209,6 +211,10 @@ static const uuid_t tbnet_svc_uuid =
static struct tb_property_dir *tbnet_dir;
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -612,18 +618,13 @@ static void tbnet_connected_work(struct work_struct *work)
return;
}
- /* Both logins successful so enable the high-speed DMA paths and
- * start the network device queue.
+ /* Both logins successful so enable the rings, high-speed DMA
+ * paths and start the network device queue.
+ *
+ * Note we enable the DMA paths last to make sure we have primed
+ * the Rx ring before any incoming packets are allowed to
+ * arrive.
*/
- ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
- net->rx_ring.ring->hop,
- net->remote_transmit_path,
- net->tx_ring.ring->hop);
- if (ret) {
- netdev_err(net->dev, "failed to enable DMA paths\n");
- return;
- }
-
tb_ring_start(net->tx_ring.ring);
tb_ring_start(net->rx_ring.ring);
@@ -635,10 +636,21 @@ static void tbnet_connected_work(struct work_struct *work)
if (ret)
goto err_free_rx_buffers;
+ ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
+ net->rx_ring.ring->hop,
+ net->remote_transmit_path,
+ net->tx_ring.ring->hop);
+ if (ret) {
+ netdev_err(net->dev, "failed to enable DMA paths\n");
+ goto err_free_tx_buffers;
+ }
+
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
return;
+err_free_tx_buffers:
+ tbnet_free_buffers(&net->tx_ring);
err_free_rx_buffers:
tbnet_free_buffers(&net->rx_ring);
err_stop_rings:
@@ -867,6 +879,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
+ unsigned int flags;
int hopid;
netif_carrier_off(dev);
@@ -891,9 +904,14 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
- ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
- RING_FLAG_FRAME, 0, sof_mask, eof_mask,
- tbnet_start_poll, net);
+ flags = RING_FLAG_FRAME;
+ /* Only enable full E2E if the other end supports it too */
+ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+ flags |= RING_FLAG_E2E;
+
+ ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+ net->tx_ring.ring->hop, sof_mask,
+ eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_ring_free(net->tx_ring.ring);
@@ -1264,7 +1282,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
dev->features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
- netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &net->napi, tbnet_poll);
/* MTU range: 68 - 65522 */
dev->min_mtu = ETH_MIN_MTU;
@@ -1356,6 +1374,7 @@ static struct tb_service_driver tbnet_driver = {
static int __init tbnet_init(void)
{
+ unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1365,12 +1384,11 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
- /* Currently only announce support for match frags ID (bit 1). Bit 0
- * is reserved for full E2E flow control which we do not support at
- * the moment.
- */
- tb_property_add_immediate(tbnet_dir, "prtcstns",
- TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+ flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+ if (tbnet_e2e)
+ flags |= TBNET_E2E;
+ tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret) {
@@ -1393,5 +1411,5 @@ module_exit(tbnet_exit);
MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_DESCRIPTION("Thunderbolt network driver");
+MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fed85447701a..7a3ab3427369 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -268,12 +268,17 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
tfile->napi_enabled = napi_en;
tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
- netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
napi_enable(&tfile->napi);
}
}
+static void tun_napi_enable(struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ napi_enable(&tfile->napi);
+}
+
static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
@@ -635,7 +640,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tfile);
+ if (!tfile->detached)
+ tun_napi_disable(tfile);
tun_napi_del(tfile);
}
@@ -654,8 +660,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
- } else
+ } else {
tun_disable_queue(tun, tfile);
+ tun_napi_disable(tfile);
+ }
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@@ -728,6 +736,7 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_napi_del(tfile);
tun_enable_queue(tfile);
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -808,6 +817,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (tfile->detached) {
tun_enable_queue(tfile);
+ tun_napi_enable(tfile);
} else {
sock_hold(&tfile->sk);
tun_napi_init(tun, tfile, napi, napi_frags);
@@ -1058,6 +1068,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun,
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ enum skb_drop_reason drop_reason;
int txq = skb->queue_mapping;
struct netdev_queue *queue;
struct tun_file *tfile;
@@ -1067,8 +1078,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tfile)
+ if (!tfile) {
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
+ }
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
@@ -1078,19 +1091,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
- if (!check_filter(&tun->txflt, skb))
+ if (!check_filter(&tun->txflt, skb)) {
+ drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
goto drop;
+ }
if (tfile->socket.sk->sk_filter &&
- sk_filter(tfile->socket.sk, skb))
+ sk_filter(tfile->socket.sk, skb)) {
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
goto drop;
+ }
len = run_ebpf_filter(tun, skb, len);
- if (len == 0 || pskb_trim(skb, len))
+ if (len == 0) {
+ drop_reason = SKB_DROP_REASON_TAP_FILTER;
+ goto drop;
+ }
+
+ if (pskb_trim(skb, len)) {
+ drop_reason = SKB_DROP_REASON_NOMEM;
goto drop;
+ }
- if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto drop;
+ }
skb_tx_timestamp(skb);
@@ -1101,12 +1127,14 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
- if (ptr_ring_produce(&tfile->tx_ring, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
- queue->trans_start = jiffies;
+ txq_trans_cond_update(queue);
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
@@ -1117,9 +1145,9 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
skb_tx_error(skb);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
rcu_read_unlock();
return NET_XMIT_DROP;
}
@@ -1273,7 +1301,7 @@ resample:
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
break;
}
nxmit++;
@@ -1431,7 +1459,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int err;
int i;
- if (it->nr_segs > MAX_SKB_FRAGS + 1)
+ if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
+ len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
return ERR_PTR(-EMSGSIZE);
local_bh_disable();
@@ -1608,7 +1637,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
break;
}
@@ -1717,6 +1746,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
+ enum skb_drop_reason drop_reason;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
@@ -1778,7 +1808,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
return PTR_ERR(skb);
}
if (!skb)
@@ -1807,7 +1837,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
@@ -1820,9 +1850,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
drop:
- atomic_long_inc(&tun->dev->rx_dropped);
- kfree_skb(skb);
+ dev_core_stats_rx_dropped_inc(tun->dev);
+ kfree_skb_reason(skb, drop_reason);
if (frags) {
tfile->napi.skb = NULL;
mutex_unlock(&tfile->napi_mutex);
@@ -1856,7 +1887,7 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
kfree_skb(skb);
return -EINVAL;
}
@@ -1869,6 +1900,7 @@ drop:
case IFF_TAP:
if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
err = -ENOMEM;
+ drop_reason = SKB_DROP_REASON_HDR_TRUNC;
goto drop;
}
skb->protocol = eth_type_trans(skb, tun->dev);
@@ -1922,6 +1954,7 @@ drop:
if (unlikely(!(tun->dev->flags & IFF_UP))) {
err = -EIO;
rcu_read_unlock();
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
@@ -1934,17 +1967,25 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ WARN_ON_ONCE(1);
+ err = -ENOMEM;
+ dev_core_stats_rx_dropped_inc(tun->dev);
+napi_busy:
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
- WARN_ON(1);
- return -ENOMEM;
+ return err;
}
- local_bh_disable();
- napi_gro_frags(&tfile->napi);
- local_bh_enable();
+ if (likely(napi_schedule_prep(&tfile->napi))) {
+ local_bh_disable();
+ napi_gro_frags(&tfile->napi);
+ napi_complete(&tfile->napi);
+ local_bh_enable();
+ } else {
+ err = -EBUSY;
+ goto napi_busy;
+ }
mutex_unlock(&tfile->napi_mutex);
} else if (tfile->napi_enabled) {
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
@@ -1962,7 +2003,7 @@ drop:
} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
tun_rx_batched(tun, tfile, skb, more);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
rcu_read_unlock();
@@ -2388,9 +2429,10 @@ static int tun_xdp_one(struct tun_struct *tun,
struct virtio_net_hdr *gso = &hdr->gso;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
+ struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
- int err = 0;
+ int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2405,13 +2447,13 @@ static int tun_xdp_one(struct tun_struct *tun,
xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- err = tun_xdp_act(tun, xdp_prog, xdp, act);
- if (err < 0) {
+ ret = tun_xdp_act(tun, xdp_prog, xdp, act);
+ if (ret < 0) {
put_page(virt_to_head_page(xdp->data));
- return err;
+ return ret;
}
- switch (err) {
+ switch (ret) {
case XDP_REDIRECT:
*flush = true;
fallthrough;
@@ -2435,7 +2477,7 @@ static int tun_xdp_one(struct tun_struct *tun,
build:
skb = build_skb(xdp->data_hard_start, buflen);
if (!skb) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -2445,7 +2487,7 @@ build:
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -2455,16 +2497,27 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- err = do_xdp_generic(xdp_prog, skb);
- if (err != XDP_PASS)
+ ret = do_xdp_generic(xdp_prog, skb);
+ if (ret != XDP_PASS) {
+ ret = 0;
goto out;
+ }
}
if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- netif_receive_skb(skb);
+ if (tfile->napi_enabled) {
+ queue = &tfile->sk.sk_write_queue;
+ spin_lock(&queue->lock);
+ __skb_queue_tail(queue, skb);
+ spin_unlock(&queue->lock);
+ ret = 1;
+ } else {
+ netif_receive_skb(skb);
+ ret = 0;
+ }
/* No need to disable preemption here since this function is
* always called with bh disabled
@@ -2475,7 +2528,7 @@ build:
tun_flow_update(tun, rxhash, tfile);
out:
- return err;
+ return ret;
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
@@ -2489,10 +2542,11 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (!tun)
return -EBADFD;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
struct tun_page tpage;
int n = ctl->num;
- int flush = 0;
+ int flush = 0, queued = 0;
memset(&tpage, 0, sizeof(tpage));
@@ -2501,12 +2555,17 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ if (ret > 0)
+ queued += ret;
}
if (flush)
xdp_do_flush();
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
rcu_read_unlock();
local_bh_enable();
@@ -2614,7 +2673,7 @@ static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "0x%x\n", tun_flags(tun));
+ return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
@@ -2622,9 +2681,9 @@ static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
- sprintf(buf, "%u\n",
- from_kuid_munged(current_user_ns(), tun->owner)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kuid_munged(current_user_ns(), tun->owner)) :
+ sysfs_emit(buf, "-1\n");
}
static ssize_t group_show(struct device *dev, struct device_attribute *attr,
@@ -2632,9 +2691,9 @@ static ssize_t group_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
- sprintf(buf, "%u\n",
- from_kgid_munged(current_user_ns(), tun->group)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kgid_munged(current_user_ns(), tun->group)) :
+ sysfs_emit(buf, "-1\n");
}
static DEVICE_ATTR_RO(tun_flags);
@@ -2778,7 +2837,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
rcu_assign_pointer(tfile->tun, tun);
}
- netif_carrier_on(tun->dev);
+ if (ifr->ifr_flags & IFF_NO_CARRIER)
+ netif_carrier_off(tun->dev);
+ else
+ netif_carrier_on(tun->dev);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -3006,8 +3068,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
- return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
- (unsigned int __user*)argp);
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
+ TUN_FEATURES, (unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
} else if (cmd == SIOCGSKNS) {
@@ -3490,15 +3552,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
- strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case IFF_TAP:
- strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index b554054a7560..4402eedb3d1a 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -168,7 +168,7 @@ config USB_NET_AX8817X
tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
depends on USB_USBNET
select CRC32
- select PHYLIB
+ select PHYLINK
select AX88796B_PHY
imply NET_SELFTESTS
default y
@@ -358,6 +358,7 @@ config USB_NET_SMSC95XX
select BITREVERSE
select CRC16
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for SMSC LAN95XX based USB 2.0
10/100 Ethernet adapters.
@@ -636,8 +637,9 @@ config USB_NET_AQC111
* Aquantia AQtion USB to 5GbE
config USB_RTL8153_ECM
- tristate "RTL8153 ECM support"
+ tristate
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
+ default y
help
This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index ea06d10e1c21..a017e9de2119 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -201,7 +201,7 @@ static void aqc111_get_drvinfo(struct net_device *net,
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
aqc111_data->fw_ver.major,
aqc111_data->fw_ver.minor,
@@ -735,7 +735,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->features |= AQ_SUPPORT_FEATURE;
dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE;
- netif_set_gso_max_size(dev->net, 65535);
+ netif_set_tso_max_size(dev->net, 65535);
aqc111_read_fw_version(dev, aqc111_data);
aqc111_data->autoneg = AUTONEG_ENABLE;
@@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (start_of_descs != desc_offset)
goto err;
- /* self check desc_offset from header*/
- if (desc_offset >= skb_len)
+ /* self check desc_offset from header and make sure that the
+ * bounds of the metadata array are inside the SKB
+ */
+ if (pkt_count * 2 + desc_offset >= skb_len)
goto err;
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, desc_offset);
+
if (pkt_count == 0)
goto err;
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 2a1e31defe71..74162190bccc 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -27,6 +27,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <net/selftests.h>
+#include <linux/phylink.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -126,8 +127,7 @@
AX_MEDIUM_RE)
#define AX88772_MEDIUM_DEFAULT \
- (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
- AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+ (AX_MEDIUM_FD | AX_MEDIUM_PS | \
AX_MEDIUM_AC | AX_MEDIUM_RE)
/* AX88772 & AX88178 RX_CTL values */
@@ -158,6 +158,8 @@
#define AX_EEPROM_MAGIC 0xdeadbeef
#define AX_EEPROM_LEN 0x200
+#define AX_EMBD_PHY_ADDR 0x10
+
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -177,14 +179,18 @@ struct asix_rx_fixup_info {
struct asix_common_private {
void (*resume)(struct usbnet *dev);
void (*suspend)(struct usbnet *dev);
+ int (*reset)(struct usbnet *dev, int in_pm);
u16 presvd_phy_advertise;
u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
struct mii_bus *mdio;
struct phy_device *phydev;
+ struct phy_device *phydev_int;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u16 phy_addr;
- char phy_name[20];
bool embd_phy;
+ u8 chipcode;
};
extern const struct driver_info ax88172a_info;
@@ -192,8 +198,8 @@ extern const struct driver_info ax88172a_info;
/* ASIX specific flags */
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm);
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm);
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm);
@@ -209,9 +215,6 @@ void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
-int asix_set_sw_mii(struct usbnet *dev, int in_pm);
-int asix_set_hw_mii(struct usbnet *dev, int in_pm);
-
int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 71682970be58..72ffc89b477a 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -11,8 +11,8 @@
#define AX_HOST_EN_RETRIES 30
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
@@ -27,9 +27,12 @@ int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
index, ret);
+ }
return ret;
}
@@ -65,6 +68,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static int asix_set_sw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
+
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static int asix_set_hw_mii(struct usbnet *dev, int in_pm)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
static int asix_check_host_enable(struct usbnet *dev, int in_pm)
{
int i, ret;
@@ -79,7 +103,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
- else if (ret < sizeof(smsr))
+ else if (ret < 0)
continue;
else if (smsr & AX_HOST_EN)
break;
@@ -294,25 +318,6 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return skb;
}
-int asix_set_sw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
-
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable software MII access\n");
- return ret;
-}
-
-int asix_set_hw_mii(struct usbnet *dev, int in_pm)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable hardware MII access\n");
- return ret;
-}
-
int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
int ret, offset;
@@ -428,6 +433,7 @@ void asix_adjust_link(struct net_device *netdev)
asix_write_medium_mode(dev, mode, 0);
phy_print_status(phydev);
+ usbnet_link_change(dev, phydev->link, 0);
}
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
@@ -488,7 +494,8 @@ void asix_set_multicast(struct net_device *net)
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
-int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+static int __asix_mdio_read(struct net_device *netdev, int phy_id, int loc,
+ bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
@@ -496,18 +503,18 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -517,8 +524,13 @@ out:
return ret < 0 ? ret : le16_to_cpu(res);
}
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ return __asix_mdio_read(netdev, phy_id, loc, false);
+}
+
static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
- int val)
+ int val, bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
@@ -529,16 +541,16 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV)
goto out;
ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -547,7 +559,7 @@ out:
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
- __asix_mdio_write(netdev, phy_id, loc, val);
+ __asix_mdio_write(netdev, phy_id, loc, val, false);
}
/* MDIO read and write wrappers for phylib */
@@ -555,63 +567,25 @@ int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct usbnet *priv = bus->priv;
- return asix_mdio_read(priv->net, phy_id, regnum);
+ return __asix_mdio_read(priv->net, phy_id, regnum, false);
}
int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
{
struct usbnet *priv = bus->priv;
- return __asix_mdio_write(priv->net, phy_id, regnum, val);
+ return __asix_mdio_write(priv->net, phy_id, regnum, val, false);
}
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res;
- int ret;
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV || ret == -ETIMEDOUT) {
- mutex_unlock(&dev->phy_mutex);
- return ret;
- }
-
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
-
- netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
-
- return le16_to_cpu(res);
+ return __asix_mdio_read(netdev, phy_id, loc, true);
}
void
asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
- int ret;
-
- netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV) {
- mutex_unlock(&dev->phy_mutex);
- return;
- }
-
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
+ __asix_mdio_write(netdev, phy_id, loc, val, true);
}
void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
@@ -778,8 +752,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
int asix_set_mac_address(struct net_device *net, void *p)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 4514d35ef4c4..11f60d32be82 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -303,6 +303,24 @@ static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset)
}
}
+static void ax88772_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int ax88772_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = usbnet_get_link,
@@ -319,6 +337,8 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.self_test = net_selftest,
.get_strings = ax88772_ethtool_get_strings,
.get_sset_count = ax88772_ethtool_get_sset_count,
+ .get_pauseparam = ax88772_ethtool_get_pauseparam,
+ .set_pauseparam = ax88772_ethtool_set_pauseparam,
};
static int ax88772_reset(struct usbnet *dev)
@@ -343,7 +363,7 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
- phy_start(priv->phydev);
+ phylink_start(priv->phylink);
return 0;
@@ -450,7 +470,6 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
struct asix_data *data = (struct asix_data *)&dev->data;
struct asix_common_private *priv = dev->driver_priv;
u16 rx_ctl, phy14h, phy15h, phy16h;
- u8 chipcode = 0;
int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
@@ -493,12 +512,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
goto out;
}
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0,
- 0, 1, &chipcode, in_pm);
- if (ret < 0)
- goto out;
-
- if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772B_CHIPCODE) {
+ if (priv->chipcode == AX_AX88772B_CHIPCODE) {
ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001,
0, NULL, in_pm);
if (ret < 0) {
@@ -506,7 +520,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
ret);
goto out;
}
- } else if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772A_CHIPCODE) {
+ } else if (priv->chipcode == AX_AX88772A_CHIPCODE) {
/* Check if the PHY registers have default settings */
phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
AX88772A_PHY14H);
@@ -596,8 +610,11 @@ static void ax88772_suspend(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
u16 medium;
- if (netif_running(dev->net))
- phy_stop(priv->phydev);
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_suspend(priv->phylink, false);
+ rtnl_unlock();
+ }
/* Stop MAC operation */
medium = asix_read_medium_status(dev, 1);
@@ -625,25 +642,14 @@ static void ax88772_resume(struct usbnet *dev)
int i;
for (i = 0; i < 3; i++)
- if (!ax88772_hw_reset(dev, 1))
+ if (!priv->reset(dev, 1))
break;
- if (netif_running(dev->net))
- phy_start(priv->phydev);
-}
-
-static void ax88772a_resume(struct usbnet *dev)
-{
- struct asix_common_private *priv = dev->driver_priv;
- int i;
-
- for (i = 0; i < 3; i++) {
- if (!ax88772a_hw_reset(dev, 1))
- break;
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_resume(priv->phylink);
+ rtnl_unlock();
}
-
- if (netif_running(dev->net))
- phy_start(priv->phydev);
}
static int asix_resume(struct usb_interface *intf)
@@ -681,15 +687,15 @@ static int ax88772_init_phy(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
int ret;
- snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
+ priv->phydev = mdiobus_get_phy(priv->mdio, priv->phy_addr);
+ if (!priv->phydev) {
+ netdev_err(dev->net, "Could not find PHY\n");
+ return -ENODEV;
+ }
- priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
- PHY_INTERFACE_MODE_INTERNAL);
- if (IS_ERR(priv->phydev)) {
- netdev_err(dev->net, "Could not connect to PHY device %s\n",
- priv->phy_name);
- ret = PTR_ERR(priv->phydev);
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
+ if (ret) {
+ netdev_err(dev->net, "Could not connect PHY\n");
return ret;
}
@@ -698,13 +704,115 @@ static int ax88772_init_phy(struct usbnet *dev)
phy_attached_info(priv->phydev);
+ if (priv->embd_phy)
+ return 0;
+
+ /* In case main PHY is not the embedded PHY and MAC is RMII clock
+ * provider, we need to suspend embedded PHY by keeping PLL enabled
+ * (AX_SWRESET_IPPD == 0).
+ */
+ priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
+ if (!priv->phydev_int) {
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
+ netdev_err(dev->net, "Could not find internal PHY\n");
+ return -ENODEV;
+ }
+
+ priv->phydev_int->mac_managed_pm = 1;
+ phy_suspend(priv->phydev_int);
+
+ return 0;
+}
+
+static void ax88772_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do */
+}
+
+static void ax88772_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+
+ asix_write_medium_mode(dev, 0, 0);
+ usbnet_link_change(dev, false, false);
+}
+
+static void ax88772_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+ u16 m = AX_MEDIUM_AC | AX_MEDIUM_RE;
+
+ m |= duplex ? AX_MEDIUM_FD : 0;
+
+ switch (speed) {
+ case SPEED_100:
+ m |= AX_MEDIUM_PS;
+ break;
+ case SPEED_10:
+ break;
+ default:
+ return;
+ }
+
+ if (tx_pause)
+ m |= AX_MEDIUM_TFC;
+
+ if (rx_pause)
+ m |= AX_MEDIUM_RFC;
+
+ asix_write_medium_mode(dev, m, 0);
+ usbnet_link_change(dev, true, false);
+}
+
+static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = ax88772_mac_config,
+ .mac_link_down = ax88772_mac_link_down,
+ .mac_link_up = ax88772_mac_link_up,
+};
+
+static int ax88772_phylink_setup(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &dev->net->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ priv->phylink_config.supported_interfaces);
+
+ if (priv->embd_phy)
+ phy_if_mode = PHY_INTERFACE_MODE_INTERNAL;
+ else
+ phy_if_mode = PHY_INTERFACE_MODE_RMII;
+
+ phylink = phylink_create(&priv->phylink_config, dev->net->dev.fwnode,
+ phy_if_mode, &ax88772_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phylink = phylink;
return 0;
}
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 buf[ETH_ALEN] = {0}, chipcode = 0;
struct asix_common_private *priv;
+ u8 buf[ETH_ALEN] = {0};
int ret, i;
priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
@@ -753,14 +861,25 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
priv->phy_addr = ret;
- priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+ priv->embd_phy = ((priv->phy_addr & 0x1f) == AX_EMBD_PHY_ADDR);
- asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
- chipcode &= AX_CHIPCODE_MASK;
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1,
+ &priv->chipcode, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret);
+ return ret;
+ }
+
+ priv->chipcode &= AX_CHIPCODE_MASK;
- ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
- ax88772a_hw_reset(dev, 0);
+ priv->resume = ax88772_resume;
+ priv->suspend = ax88772_suspend;
+ if (priv->chipcode == AX_AX88772_CHIPCODE)
+ priv->reset = ax88772_hw_reset;
+ else
+ priv->reset = ax88772a_hw_reset;
+ ret = priv->reset(dev, 0);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
return ret;
@@ -775,30 +894,27 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
priv->presvd_phy_bmcr = 0;
priv->presvd_phy_advertise = 0;
- if (chipcode == AX_AX88772_CHIPCODE) {
- priv->resume = ax88772_resume;
- priv->suspend = ax88772_suspend;
- } else {
- priv->resume = ax88772a_resume;
- priv->suspend = ax88772_suspend;
- }
ret = ax88772_init_mdio(dev);
if (ret)
return ret;
- return ax88772_init_phy(dev);
+ ret = ax88772_phylink_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = ax88772_init_phy(dev);
+ if (ret)
+ phylink_destroy(priv->phylink);
+
+ return ret;
}
static int ax88772_stop(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
- /* On unplugged USB, we will get MDIO communication errors and the
- * PHY will be set in to PHY_HALTED state.
- */
- if (priv->phydev->state != PHY_HALTED)
- phy_stop(priv->phydev);
+ phylink_stop(priv->phylink);
return 0;
}
@@ -807,7 +923,10 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_common_private *priv = dev->driver_priv;
- phy_disconnect(priv->phydev);
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
+ phylink_destroy(priv->phylink);
asix_rx_fixup_common_free(dev->driver_priv);
}
@@ -858,7 +977,6 @@ static int marvell_phy_init(struct usbnet *dev)
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
- reg &= 0xfc0f;
}
return 0;
@@ -920,11 +1038,21 @@ static int ax88178_reset(struct usbnet *dev)
int gpio0 = 0;
u32 phyid;
- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret);
+ return ret;
+ }
+
netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0);
- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret);
+ return ret;
+ }
+
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0);
netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1a627ba4b850..aff39bf3161d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -164,11 +164,15 @@
#define GMII_PHY_PGSEL_PAGE3 0x0003
#define GMII_PHY_PGSEL_PAGE5 0x0005
+static int ax88179_reset(struct usbnet *dev);
+
struct ax88179_data {
u8 eee_enabled;
u8 eee_active;
u16 rxctl;
- u16 reserved;
+ u8 in_pm;
+ u32 wol_supported;
+ u32 wolopts;
};
struct ax88179_int_data {
@@ -185,15 +189,29 @@ static const struct {
{7, 0xcc, 0x4c, 0x18, 8},
};
+static void ax88179_set_pm_mode(struct usbnet *dev, bool pm_mode)
+{
+ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ ax179_data->in_pm = pm_mode;
+}
+
+static int ax88179_in_pm(struct usbnet *dev)
+{
+ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ return ax179_data->in_pm;
+}
+
static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+ u16 size, void *data)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (!ax88179_in_pm(dev))
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -209,14 +227,14 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, const void *data, int in_pm)
+ u16 size, const void *data)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (!ax88179_in_pm(dev))
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -249,47 +267,6 @@ static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
}
}
-static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
-{
- int ret;
-
- if (2 == size) {
- u16 buf;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
- le16_to_cpus(&buf);
- *((u16 *)data) = buf;
- } else if (4 == size) {
- u32 buf;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
- le32_to_cpus(&buf);
- *((u32 *)data) = buf;
- } else {
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
- }
-
- return ret;
-}
-
-static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, const void *data)
-{
- int ret;
-
- if (2 == size) {
- u16 buf;
- buf = *((u16 *)data);
- cpu_to_le16s(&buf);
- ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, &buf, 1);
- } else {
- ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, data, 1);
- }
-
- return ret;
-}
-
static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
@@ -297,16 +274,16 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
if (2 == size) {
u16 buf = 0;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf);
le16_to_cpus(&buf);
*((u16 *)data) = buf;
} else if (4 == size) {
u32 buf = 0;
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf);
le32_to_cpus(&buf);
*((u32 *)data) = buf;
} else {
- ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, data);
}
return ret;
@@ -322,10 +299,10 @@ static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
buf = *((u16 *)data);
cpu_to_le16s(&buf);
ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, &buf, 0);
+ size, &buf);
} else {
ret = __ax88179_write_cmd(dev, cmd, value, index,
- size, data, 0);
+ size, data);
}
return ret;
@@ -425,55 +402,63 @@ ax88179_phy_write_mmd_indirect(struct usbnet *dev, u16 prtad, u16 devad,
static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct ax88179_data *priv = dev->driver_priv;
u16 tmp16;
u8 tmp8;
+ ax88179_set_pm_mode(dev, true);
+
usbnet_suspend(intf, message);
+ /* Enable WoL */
+ if (priv->wolopts) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp8);
+ if (priv->wolopts & WAKE_PHY)
+ tmp8 |= AX_MONITOR_MODE_RWLC;
+ if (priv->wolopts & WAKE_MAGIC)
+ tmp8 |= AX_MONITOR_MODE_RWMP;
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp8);
+ }
+
/* Disable RX path */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
/* Force bulk-in zero length */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
/* change clock */
tmp8 = 0;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
/* Configure RX control register => stop operation */
tmp16 = AX_RX_CTL_STOP;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+ ax88179_set_pm_mode(dev, false);
return 0;
}
/* This function is used to enable the autodetach function. */
/* This function is determined by offset 0x43 of EEPROM */
-static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
+static int ax88179_auto_detach(struct usbnet *dev)
{
u16 tmp16;
u8 tmp8;
- int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
- int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *);
-
- if (!in_pm) {
- fnr = ax88179_read_cmd;
- fnw = ax88179_write_cmd;
- } else {
- fnr = ax88179_read_cmd_nopm;
- fnw = ax88179_write_cmd_nopm;
- }
- if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
+ if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
return 0;
if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
@@ -481,13 +466,13 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
/* Enable Auto Detach bit */
tmp8 = 0;
- fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
tmp8 |= AX_CLK_SELECT_ULR;
- fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
tmp16 |= AX_PHYPWR_RSTCTL_AT;
- fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
return 0;
}
@@ -495,35 +480,14 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
static int ax88179_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- u16 tmp16;
- u8 tmp8;
- usbnet_link_change(dev, 0, 0);
-
- /* Power up ethernet PHY */
- tmp16 = 0;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
- udelay(1000);
-
- tmp16 = AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
- 2, 2, &tmp16);
- msleep(200);
+ ax88179_set_pm_mode(dev, true);
- /* Ethernet PHY Auto Detach*/
- ax88179_auto_detach(dev, 1);
+ usbnet_link_change(dev, 0, 0);
- /* Enable clock */
- ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
- msleep(100);
+ ax88179_reset(dev);
- /* Configure RX control register => start operation */
- tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
- AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
- ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+ ax88179_set_pm_mode(dev, false);
return usbnet_resume(intf);
}
@@ -532,40 +496,22 @@ static void
ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- u8 opt;
-
- if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
- 1, 1, &opt) < 0) {
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
- return;
- }
+ struct ax88179_data *priv = dev->driver_priv;
- wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
- wolinfo->wolopts = 0;
- if (opt & AX_MONITOR_MODE_RWLC)
- wolinfo->wolopts |= WAKE_PHY;
- if (opt & AX_MONITOR_MODE_RWMP)
- wolinfo->wolopts |= WAKE_MAGIC;
+ wolinfo->supported = priv->wol_supported;
+ wolinfo->wolopts = priv->wolopts;
}
static int
ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- u8 opt = 0;
+ struct ax88179_data *priv = dev->driver_priv;
- if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ if (wolinfo->wolopts & ~(priv->wol_supported))
return -EINVAL;
- if (wolinfo->wolopts & WAKE_PHY)
- opt |= AX_MONITOR_MODE_RWLC;
- if (wolinfo->wolopts & WAKE_MAGIC)
- opt |= AX_MONITOR_MODE_RWMP;
-
- if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
- 1, 1, &opt) < 0)
- return -EINVAL;
+ priv->wolopts = wolinfo->wolopts;
return 0;
}
@@ -599,8 +545,7 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
/* ax88179/178A returns 2 bytes from eeprom on read */
for (i = first_word; i <= last_word; i++) {
ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
- &eeprom_buff[i - first_word],
- 0);
+ &eeprom_buff[i - first_word]);
if (ret < 0) {
kfree(eeprom_buff);
return -EIO;
@@ -745,7 +690,7 @@ ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
static int ax88179_chk_eee(struct usbnet *dev)
{
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -848,7 +793,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
edata->eee_enabled = priv->eee_enabled;
edata->eee_active = priv->eee_active;
@@ -859,7 +804,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+ struct ax88179_data *priv = dev->driver_priv;
int ret;
priv->eee_enabled = edata->eee_enabled;
@@ -910,8 +855,8 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
static void ax88179_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct ax88179_data *data = (struct ax88179_data *)dev->data;
- u8 *m_filter = ((u8 *)dev->data) + 12;
+ struct ax88179_data *data = dev->driver_priv;
+ u8 *m_filter = ((u8 *)dev->data);
data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
@@ -923,7 +868,7 @@ static void ax88179_set_multicast(struct net_device *net)
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
- /* We use the 20 byte dev->data for our 8 byte filter buffer
+ /* We use dev->data for our 8 byte filter buffer
* to avoid allocating memory that is tricky to free later
*/
u32 crc_bits;
@@ -1069,7 +1014,7 @@ static int ax88179_check_eeprom(struct usbnet *dev)
} while (buf & EEP_BUSY);
__ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
- 2, 2, &eeprom[i * 2], 0);
+ 2, 2, &eeprom[i * 2]);
if ((i == 0) && (eeprom[0] == 0xFF))
return -EINVAL;
@@ -1322,46 +1267,15 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 buf[5];
- u16 *tmp16;
- u8 *tmp;
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
- struct ethtool_eee eee_data;
+ struct ax88179_data *ax179_data;
usbnet_get_endpoints(dev, intf);
- tmp16 = (u16 *)buf;
- tmp = (u8 *)buf;
-
- memset(ax179_data, 0, sizeof(*ax179_data));
-
- /* Power up ethernet PHY */
- *tmp16 = 0;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- msleep(200);
-
- *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
- msleep(100);
-
- /* Read MAC address from DTB or asix chip */
- ax88179_get_mac_addr(dev);
- memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
-
- /* RX bulk configuration */
- memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
-
- dev->rx_urb_size = 1024 * 20;
+ ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL);
+ if (!ax179_data)
+ return -ENOMEM;
- *tmp = 0x34;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
-
- *tmp = 0x52;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
- 1, 1, tmp);
+ dev->driver_priv = ax179_data;
dev->net->netdev_ops = &ax88179_netdev_ops;
dev->net->ethtool_ops = &ax88179_ethtool_ops;
@@ -1382,54 +1296,16 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->hw_features |= dev->net->features;
- netif_set_gso_max_size(dev->net, 16384);
-
- /* Enable checksum offload */
- *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
- AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
-
- *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
- AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
-
- /* Configure RX control register => start operation */
- *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
- AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
-
- *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
- AX_MONITOR_MODE_RWMP;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
-
- /* Configure default medium type => giga */
- *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
- AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
- AX_MEDIUM_GIGAMODE;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
- 2, 2, tmp16);
-
- ax88179_led_setting(dev);
+ netif_set_tso_max_size(dev->net, 16384);
- ax179_data->eee_enabled = 0;
- ax179_data->eee_active = 0;
-
- ax88179_disable_eee(dev);
-
- ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
- ax88179_ethtool_set_eee(dev, &eee_data);
-
- /* Restart autoneg */
- mii_nway_restart(&dev->mii);
-
- usbnet_link_change(dev, 0, 0);
+ ax88179_reset(dev);
return 0;
}
static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct ax88179_data *ax179_data = dev->driver_priv;
u16 tmp16;
/* Configure RX control register => stop operation */
@@ -1442,6 +1318,8 @@ static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
/* Power down ethernet PHY */
tmp16 = 0;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+
+ kfree(ax179_data);
}
static void
@@ -1468,58 +1346,119 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
- /* This check is no longer done by usbnet */
- if (skb->len < dev->net->hard_header_len)
+ /* At the end of the SKB, there's a header telling us how many packets
+ * are bundled into this buffer and where we can find an array of
+ * per-packet metadata (which contains elements encoded into u16).
+ */
+
+ /* SKB contents for current firmware:
+ * <packet 1> <padding>
+ * ...
+ * <packet N> <padding>
+ * <per-packet metadata entry 1> <dummy header>
+ * ...
+ * <per-packet metadata entry N> <dummy header>
+ * <padding2> <rx_hdr>
+ *
+ * where:
+ * <packet N> contains pkt_len bytes:
+ * 2 bytes of IP alignment pseudo header
+ * packet received
+ * <per-packet metadata entry N> contains 4 bytes:
+ * pkt_len and fields AX_RXHDR_*
+ * <padding> 0-7 bytes to terminate at
+ * 8 bytes boundary (64-bit).
+ * <padding2> 4 bytes to make rx_hdr terminate at
+ * 8 bytes boundary (64-bit)
+ * <dummy-header> contains 4 bytes:
+ * pkt_len=0 and AX_RXHDR_DROP_ERR
+ * <rx-hdr> contains 4 bytes:
+ * pkt_cnt and hdr_off (offset of
+ * <per-packet metadata entry 1>)
+ *
+ * pkt_cnt is number of entrys in the per-packet metadata.
+ * In current firmware there is 2 entrys per packet.
+ * The first points to the packet and the
+ * second is a dummy header.
+ * This was done probably to align fields in 64-bit and
+ * maintain compatibility with old firmware.
+ * This code assumes that <dummy header> and <padding2> are
+ * optional.
+ */
+
+ if (skb->len < 4)
return 0;
-
skb_trim(skb, skb->len - 4);
rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
+
+ if (pkt_cnt == 0)
+ return 0;
+
+ /* Make sure that the bounds of the metadata array are inside the SKB
+ * (and in front of the counter at the end).
+ */
+ if (pkt_cnt * 4 + hdr_off > skb->len)
+ return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
- while (pkt_cnt--) {
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, hdr_off);
+
+ for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
+ u16 pkt_len_plus_padd;
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+ pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
+
+ /* Skip dummy header used for alignment
+ */
+ if (pkt_len == 0)
+ continue;
+
+ if (pkt_len_plus_padd > skb->len)
+ return 0;
/* Check CRC or runt packet */
- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
+ pkt_len < 2 + ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ skb_pull(skb, pkt_len_plus_padd);
continue;
}
- if (pkt_cnt == 0) {
- skb->len = pkt_len;
+ /* last packet */
+ if (pkt_len_plus_padd == skb->len) {
+ skb_trim(skb, pkt_len);
+
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb_set_tail_pointer(skb, skb->len);
- skb->truesize = pkt_len + sizeof(struct sk_buff);
+
+ skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
- ax_skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
- skb_set_tail_pointer(ax_skb, ax_skb->len);
- ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(ax_skb, pkt_hdr);
- usbnet_skb_return(dev, ax_skb);
- } else {
+ if (!ax_skb)
return 0;
- }
+ skb_trim(ax_skb, pkt_len);
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+
+ skb->truesize = pkt_len_plus_padd +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+ skb_pull(skb, pkt_len_plus_padd);
}
- return 1;
+
+ return 0;
}
static struct sk_buff *
@@ -1557,7 +1496,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
static int ax88179_link_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+ struct ax88179_data *ax179_data = dev->driver_priv;
u8 tmp[5], link_sts;
u16 mode, tmp16, delay = HZ / 10;
u32 tmp32 = 0x40000000;
@@ -1632,7 +1571,7 @@ static int ax88179_reset(struct usbnet *dev)
u8 buf[5];
u16 *tmp16;
u8 *tmp;
- struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+ struct ax88179_data *ax179_data = dev->driver_priv;
struct ethtool_eee eee_data;
tmp16 = (u16 *)buf;
@@ -1651,10 +1590,11 @@ static int ax88179_reset(struct usbnet *dev)
msleep(100);
/* Ethernet PHY Auto Detach*/
- ax88179_auto_detach(dev, 0);
+ ax88179_auto_detach(dev);
/* Read MAC address from DTB or asix chip */
ax88179_get_mac_addr(dev);
+ memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
/* RX bulk configuration */
memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
@@ -1669,12 +1609,6 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1, 1, tmp);
- dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
-
- dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1700,6 +1634,12 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
2, 2, tmp16);
+ /* Check if WoL is supported */
+ ax179_data->wol_supported = 0;
+ if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &tmp) > 0)
+ ax179_data->wol_supported = WAKE_MAGIC | WAKE_PHY;
+
ax88179_led_setting(dev);
ax179_data->eee_enabled = 0;
@@ -1862,47 +1802,98 @@ static const struct driver_info mct_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info at_umc2000_info = {
+ .description = "AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info at_umc200_info = {
+ .description = "AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info at_umc2000sp_info = {
+ .description = "AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
- USB_DEVICE(0x0b95, 0x1790),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x1790, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88179_info,
}, {
/* ASIX AX88178A 10/100/1000 */
- USB_DEVICE(0x0b95, 0x178a),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x178a, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88178a_info,
}, {
/* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
- USB_DEVICE(0x04b4, 0x3610),
+ USB_DEVICE_AND_INTERFACE_INFO(0x04b4, 0x3610, 0xff, 0xff, 0),
.driver_info = (unsigned long)&cypress_GX3_info,
}, {
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
- USB_DEVICE(0x2001, 0x4a00),
+ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x4a00, 0xff, 0xff, 0),
.driver_info = (unsigned long)&dlink_dub1312_info,
}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
- USB_DEVICE(0x0df6, 0x0072),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0072, 0xff, 0xff, 0),
.driver_info = (unsigned long)&sitecom_info,
}, {
/* Samsung USB Ethernet Adapter */
- USB_DEVICE(0x04e8, 0xa100),
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e8, 0xa100, 0xff, 0xff, 0),
.driver_info = (unsigned long)&samsung_info,
}, {
/* Lenovo OneLinkDock Gigabit LAN */
- USB_DEVICE(0x17ef, 0x304b),
+ USB_DEVICE_AND_INTERFACE_INFO(0x17ef, 0x304b, 0xff, 0xff, 0),
.driver_info = (unsigned long)&lenovo_info,
}, {
/* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
- USB_DEVICE(0x050d, 0x0128),
+ USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x0128, 0xff, 0xff, 0),
.driver_info = (unsigned long)&belkin_info,
}, {
/* Toshiba USB 3.0 GBit Ethernet Adapter */
- USB_DEVICE(0x0930, 0x0a13),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x0a13, 0xff, 0xff, 0),
.driver_info = (unsigned long)&toshiba_info,
}, {
/* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */
- USB_DEVICE(0x0711, 0x0179),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0711, 0x0179, 0xff, 0xff, 0),
.driver_info = (unsigned long)&mct_info,
+}, {
+ /* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000e, 0xff, 0xff, 0),
+ .driver_info = (unsigned long)&at_umc2000_info,
+}, {
+ /* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000f, 0xff, 0xff, 0),
+ .driver_info = (unsigned long)&at_umc200_info,
+}, {
+ /* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
+ USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x0010, 0xff, 0xff, 0),
+ .driver_info = (unsigned long)&at_umc2000sp_info,
},
{ },
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index e7fe9c0f63a9..ff439ef535ac 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -8,13 +8,13 @@
*
* Based on the work of
* Donald Becker
- *
+ *
* Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
* - adds support for Belkin F5U011
*/
/*
- *
+ *
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -54,7 +54,7 @@ static const char driver_name[] = "catc";
/*
* Some defines.
- */
+ */
#define STATS_UPDATE (HZ) /* Time between stats updates */
#define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
@@ -280,7 +280,7 @@ static void catc_irq_done(struct urb *urb)
struct catc *catc = urb->context;
u8 *data = urb->transfer_buffer;
int status = urb->status;
- unsigned int hasdata = 0, linksts = LinkNoChange;
+ unsigned int hasdata, linksts = LinkNoChange;
int res;
if (!catc->is_f5u011) {
@@ -332,7 +332,7 @@ static void catc_irq_done(struct urb *urb)
dev_err(&catc->usbdev->dev,
"submit(rx_urb) status %d\n", res);
}
- }
+ }
}
resubmit:
res = usb_submit_urb (urb, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
unsigned long flags;
spin_lock_irqsave(&catc->ctrl_lock, flags);
-
+
q = catc->ctrl_queue + catc->ctrl_head;
q->dir = dir;
@@ -639,7 +639,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
memset(catc->multicast, 0xff, 64);
rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
- }
+ }
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
@@ -672,8 +672,8 @@ static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
@@ -781,7 +781,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
intf->altsetting->desc.bInterfaceNumber, 1)) {
dev_err(dev, "Can't set altsetting 1.\n");
ret = -EIO;
- goto fail_mem;;
+ goto fail_mem;
}
netdev = alloc_etherdev(sizeof(struct catc));
@@ -806,7 +806,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
- if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
+ if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
ret = -ENOMEM;
@@ -814,17 +814,17 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
- if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
+ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
dev_dbg(dev, "Testing for f5u011\n");
- catc->is_f5u011 = 1;
+ catc->is_f5u011 = 1;
atomic_set(&catc->recq_sz, 0);
pktsz = RX_PKT_SZ;
} else {
pktsz = RX_MAX_BURST * (PKT_SZ + 2);
}
-
+
usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
NULL, NULL, 0, catc_ctrl_done, catc);
@@ -854,7 +854,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
*buf = 0x87654321;
catc_write_mem(catc, 0xfa80, buf, 4);
catc_read_mem(catc, 0x7a80, buf, 4);
-
+
switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
@@ -873,32 +873,32 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
kfree(buf);
-
+
dev_dbg(dev, "Getting MAC from SEEROM.\n");
-
+
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting MAC into registers.\n");
-
+
for (i = 0; i < 6; i++)
catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
-
+
dev_dbg(dev, "Filling the multicast list.\n");
-
+
eth_broadcast_addr(broadcast);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
-
+
dev_dbg(dev, "Clearing error counters.\n");
-
+
for (i = 0; i < 8; i++)
catc_set_reg(catc, EthStats + i, 0);
catc->last_stats = jiffies;
-
+
dev_dbg(dev, "Enabling.\n");
-
+
catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
catc_set_reg(catc, LEDCtrl, LEDLink);
@@ -908,7 +908,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc_reset(catc);
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
catc->rxmode[1] = 0;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 359ea0d10e59..baa9b14b1644 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -218,7 +218,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(!skb2))
goto next;
skb_trim(skb2, len);
- put_unaligned_le16(BIT(15) | (1 << 11) | len,
+ put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
eem_linkcmd(dev, skb2);
break;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eb3817d70f2b..e11f70911acc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf)
* device MAC address has been updated). Always set MAC address to that of the
* device.
*/
-static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
return 1;
@@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup);
/* Ensure correct link state
*
@@ -583,6 +584,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@@ -640,6 +646,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
/* reported with some C860 units */
.idProduct = 0x9050, /* C-860 */
ZAURUS_MASTER_INTERFACE,
@@ -764,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 82bb5ed94c48..c89639381eca 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ipv6_stubs.h>
+#include <net/ndisc.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
@@ -659,6 +660,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e303b522efb5..8d5cbda33f66 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -441,7 +441,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
* .bind which is called before usbnet sets up dev->maxpacket
*/
if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
- val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ val % usb_maxpacket(dev->udev, dev->out) == 0)
val++;
/* we might need to flush any pending tx buffers if running */
@@ -465,7 +465,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
usbnet_update_max_qlen(dev);
/* never pad more than 3 full USB packets per transfer */
- ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+ ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out),
CDC_NCM_MIN_TX_PKT, ctx->tx_max);
}
@@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
struct usbnet *dev = ctx->dev;
- spin_lock_bh(&ctx->mtx);
+ spin_lock(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
} else if (dev->net != NULL) {
ctx->tx_reason_timeout++; /* count reason for transmitting */
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
netif_tx_unlock_bh(dev->net);
} else {
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
}
}
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
@@ -1892,7 +1892,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
static const struct driver_info cdc_ncm_info = {
- .description = "CDC NCM",
+ .description = "CDC NCM (NO ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
| FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
@@ -1904,6 +1904,19 @@ static const struct driver_info cdc_ncm_info = {
.set_rx_mode = usbnet_cdc_update_filter,
};
+/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
+static const struct driver_info cdc_ncm_zlp_info = {
+ .description = "CDC NCM (SEND ZLP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
/* Same as cdc_ncm_info, but with FLAG_WWAN */
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
@@ -2010,6 +2023,16 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
+ /* DisplayLink docking stations */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x17e9,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long)&cdc_ncm_zlp_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 32637df0f4cc..f4a44f05c6ab 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -120,7 +120,7 @@ static const struct driver_info an2720_info = {
#endif /* CONFIG_USB_AN2720 */
-
+
#ifdef CONFIG_USB_BELKIN
#define HAVE_HARDWARE
@@ -140,7 +140,7 @@ static const struct driver_info belkin_info = {
#endif /* CONFIG_USB_BELKIN */
-
+
#ifdef CONFIG_USB_EPSON2888
#define HAVE_HARDWARE
@@ -167,7 +167,7 @@ static const struct driver_info epson2888_info = {
#endif /* CONFIG_USB_EPSON2888 */
-
+
/*-------------------------------------------------------------------------
*
* info from Jonathan McDowell <noodles@earth.li>
@@ -181,7 +181,7 @@ static const struct driver_info kc2190_info = {
};
#endif /* CONFIG_USB_KC2190 */
-
+
#ifdef CONFIG_USB_ARMLINUX
#define HAVE_HARDWARE
@@ -222,7 +222,7 @@ static const struct driver_info blob_info = {
#endif /* CONFIG_USB_ARMLINUX */
-
+
/*-------------------------------------------------------------------------*/
#ifndef HAVE_HARDWARE
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 13a9a83b8538..46af78caf457 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -56,7 +56,7 @@
struct gl_packet {
__le32 packet_length;
- char packet_data [1];
+ char packet_data[];
};
struct gl_header {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f97813a4e8d1..ce1f6081d582 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1380,7 +1380,8 @@ static void hso_serial_cleanup(struct tty_struct *tty)
}
/* setup the term */
-static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void hso_serial_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct hso_serial *serial = tty->driver_data;
unsigned long flags;
@@ -2319,7 +2320,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
{
struct hso_device *hso_dev;
- hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC);
+ hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL);
if (!hso_dev)
return NULL;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index cd33955df0b6..6a769df0b421 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -146,7 +146,7 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma);
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in),
- dev->rx_buf, IPHETH_BUF_SIZE,
+ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback,
dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 9b2bc1993ece..c9efb7df892e 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -221,7 +221,7 @@ struct kaweth_device
dma_addr_t rxbufferhandle;
__u8 *rx_buf;
-
+
struct sk_buff *tx_skb;
__u8 *firmware_buf;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b8e20a3f2b84..f18ab8e220db 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -92,8 +92,6 @@
WAKE_MCAST | WAKE_BCAST | \
WAKE_ARP | WAKE_MAGIC)
-#define LAN78XX_NAPI_WEIGHT 64
-
#define TX_URB_NUM 10
#define TX_SS_URB_NUM TX_URB_NUM
#define TX_HS_URB_NUM TX_URB_NUM
@@ -1537,11 +1535,8 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
- if (dev->domain_data.phyirq > 0) {
- local_irq_disable();
- generic_handle_irq(dev->domain_data.phyirq);
- local_irq_enable();
- }
+ if (dev->domain_data.phyirq > 0)
+ generic_handle_irq_safe(dev->domain_data.phyirq);
} else {
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
@@ -4377,9 +4372,9 @@ static int lan78xx_probe(struct usb_interface *intf,
/* MTU range: 68 - 9000 */
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
- netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
+ netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
- netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
@@ -4426,7 +4421,7 @@ static int lan78xx_probe(struct usb_interface *intf,
goto out4;
period = ep_intr->desc.bInterval;
- maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
buf = kmalloc(maxp, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -4444,7 +4439,7 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
- dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index feb247e355f7..81ca64debc5b 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -894,7 +894,7 @@ static void pegasus_get_drvinfo(struct net_device *dev,
{
pegasus_t *pegasus = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 17c9c63b8eeb..2c82fbcaab22 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -18,7 +18,7 @@
/*
- * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
+ * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
*
* The protocol and handshaking used here should be bug-compatible
* with the Linux 2.2 "plusb" driver, by Deti Fliegl.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f510e8219470..26c34a7c21bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
- skbn->dev = net;
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
@@ -1088,6 +1087,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1316,6 +1316,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
+ {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
@@ -1350,6 +1351,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1357,6 +1359,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
@@ -1364,6 +1367,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1250, 0)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
@@ -1386,6 +1390,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f3, 0)}, /* Cinterion MV32-W-A RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f4, 0)}, /* Cinterion MV32-W-B RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -1395,12 +1402,16 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ee41088c5251..a481a1d831e2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12"
/* Information for net */
-#define NET_VERSION "12"
+#define NET_VERSION "13"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -770,8 +770,12 @@ enum rtl8152_flags {
RX_EPROTO,
};
+#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
+#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
+#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062
struct tally_counter {
__le64 tx_packets;
@@ -1871,7 +1875,9 @@ static void intr_callback(struct urb *urb)
"Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
- netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ if (net_ratelimit())
+ netif_info(tp, intr, tp->netdev,
+ "intr status -EOVERFLOW\n");
goto resubmit;
/* -EPIPE: should clear the halt */
default:
@@ -2154,7 +2160,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
}
static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
- struct sk_buff *skb, u32 len, u32 transport_offset)
+ struct sk_buff *skb, u32 len)
{
u32 mss = skb_shinfo(skb)->gso_size;
u32 opts1, opts2 = 0;
@@ -2165,6 +2171,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 = len | TX_FS | TX_LS;
if (mss) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
+
if (transport_offset > GTTCPHO_MAX) {
netif_warn(tp, tx_err, tp->netdev,
"Invalid transport offset 0x%x for TSO\n",
@@ -2195,6 +2203,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 |= transport_offset << GTTCPHO_SHIFT;
opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
u8 ip_protocol;
if (transport_offset > TCPHO_MAX) {
@@ -2258,7 +2267,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
- u32 offset;
skb = __skb_dequeue(&skb_head);
if (!skb)
@@ -2274,9 +2282,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
- offset = (u32)skb_transport_offset(skb);
-
- if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+ if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {
r8152_csum_workaround(tp, skb, &skb_head);
continue;
}
@@ -2724,22 +2730,26 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
ocp_data |= RCR_AM | RCR_AAP;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
- (netdev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev->flags & IFF_MULTICAST &&
+ netdev_mc_count(netdev) > multicast_filter_limit) ||
+ (netdev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
ocp_data |= RCR_AM;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
} else {
- struct netdev_hw_addr *ha;
-
mc_filter[1] = 0;
mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- ocp_data |= RCR_AM;
+ if (netdev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ ocp_data |= RCR_AM;
+ }
}
}
@@ -2757,9 +2767,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
{
u32 mss = skb_shinfo(skb)->gso_size;
int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
- int offset = skb_transport_offset(skb);
- if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) &&
+ skb_transport_offset(skb) > max_offset)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
features &= ~NETIF_F_GSO_MASK;
@@ -5904,6 +5914,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -5915,7 +5930,8 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
switch (tp->version) {
case RTL_VER_03:
@@ -5951,6 +5967,10 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -6424,21 +6444,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6550,9 +6557,17 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
+
/* Clear teredo wake event. bit[15:8] is the teredo wakeup
* type. Set it to zero. bits[7:0] are the W1C bits about
* the events. Set them to all 1 to clear them.
@@ -6563,6 +6578,10 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data |= NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rtl_rx_vlan_en(tp, true);
rxdy_gated_en(tp, false);
@@ -8590,11 +8609,11 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
{
struct r8152 *tp = netdev_priv(netdev);
- strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, MODULENAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
- strlcpy(info->fw_version, tp->rtl_fw.version,
+ strscpy(info->fw_version, tp->rtl_fw.version,
sizeof(info->fw_version));
}
@@ -9562,6 +9581,31 @@ u8 rtl8152_get_version(struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(rtl8152_get_version);
+static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
+{
+ int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor);
+ int product_id = le16_to_cpu(udev->descriptor.idProduct);
+ int vendor_id = le16_to_cpu(udev->descriptor.idVendor);
+
+ if (vendor_id == VENDOR_ID_LENOVO) {
+ switch (product_id) {
+ case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB:
+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
+ case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
+ case DEVICE_ID_THINKPAD_USB_C_DONGLE:
+ return 1;
+ }
+ } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
+ switch (product_id) {
+ case 0x8153:
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -9642,13 +9686,7 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
- switch (le16_to_cpu(udev->descriptor.idProduct)) {
- case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
- case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
- tp->lenovo_macpassthru = 1;
- }
- }
+ tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev);
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
(!strcmp(udev->serial, "000001000000") ||
@@ -9658,7 +9696,7 @@ static int rtl8152_probe(struct usb_interface *intf,
}
netdev->ethtool_ops = &ops;
- netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
+ netif_set_tso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
/* MTU range: 68 - 1500 or 9194 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -9732,10 +9770,8 @@ static int rtl8152_probe(struct usb_interface *intf,
usb_set_intfdata(intf, tp);
- if (tp->support_2500full)
- netif_napi_add(netdev, &tp->napi, r8152_poll, 256);
- else
- netif_napi_add(netdev, &tp->napi, r8152_poll, 64);
+ netif_napi_add_weight(netdev, &tp->napi, r8152_poll,
+ tp->support_2500full ? 256 : 64);
ret = register_netdev(netdev);
if (ret != 0) {
@@ -9802,6 +9838,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 247f58cb0f84..f79333fe1783 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -333,7 +333,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
net->hard_header_len += sizeof (struct rndis_data_hdr);
dev->hard_mtu = net->mtu + net->hard_header_len;
- dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
netif_dbg(dev, probe, dev->net,
"dev->maxpacket can't be 0\n");
@@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
- if (bp[0] & 0x02)
- eth_hw_addr_random(net);
- else
- eth_hw_addr_set(net, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
@@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS);
}
+static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = rndis_bind(dev, intf);
+
+ if (!status && (dev->net->dev_addr[0] & 0x02))
+ eth_hw_addr_random(dev->net);
+
+ return status;
+}
+
void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct rndis_halt *halt;
@@ -485,10 +492,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ bool dst_mac_fixup;
+
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
+ dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP);
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
@@ -523,10 +534,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
skb_pull(skb, msg_len - sizeof *hdr);
skb_trim(skb2, data_len);
+
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb2);
+
usbnet_skb_return(dev, skb2);
}
/* caller will usbnet_skb_return the remaining packet */
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb);
+
return 1;
}
EXPORT_SYMBOL_GPL(rndis_rx_fixup);
@@ -600,6 +618,17 @@ static const struct driver_info rndis_poll_status_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info zte_rndis_info = {
+ .description = "ZTE RNDIS device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP,
+ .bind = zte_rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -614,6 +643,16 @@ static const struct usb_device_id products [] = {
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, {
+ /* ZTE WWAN modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
+ /* ZTE WWAN modules, ACM flavour */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 3d2bf2acca94..97afd7335d86 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -769,8 +769,8 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
{
rtl8150_t *dev = netdev_priv(netdev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index bb4cbe8fc846..b3ae949e6f1c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -612,8 +612,8 @@ static void sierra_net_get_drvinfo(struct net_device *net,
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static u32 sierra_net_get_link(struct net_device *net)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index abe0149ed917..bfb58c91db04 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -18,8 +18,12 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/of_net.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <net/selftests.h>
+
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -51,6 +55,9 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define SMSC95XX_NR_IRQS (1) /* raise to 12 for GPIOs */
+#define PHY_HWIRQ (SMSC95XX_NR_IRQS - 1)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -59,24 +66,27 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ struct irq_chip irqchip;
+ struct irq_domain *irqdomain;
+ struct fwnode_handle *irqfwnode;
struct mii_bus *mdiobus;
struct phy_device *phydev;
+ struct task_struct *pm_task;
};
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data, int in_pm)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -84,9 +94,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
- netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
- index, ret);
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ index, ret);
return ret;
}
@@ -96,16 +107,15 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
return ret;
}
-static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data, int in_pm)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
- BUG_ON(!dev);
-
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -116,49 +126,27 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (ret < 0 && ret != -ENODEV)
netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
index, ret);
return ret;
}
-static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 1);
-}
-
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
-{
- return __smsc95xx_read_reg(dev, index, data, 0);
-}
-
-static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
-{
- return __smsc95xx_write_reg(dev, index, data, 0);
-}
-
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
- int in_pm)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
int ret;
do {
- ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
if (ret < 0) {
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
netdev_warn(dev->net, "Error reading MII_ACCESS\n");
return ret;
}
@@ -175,8 +163,7 @@ static u32 mii_address_cmd(int phy_id, int idx, u16 op)
return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
}
-static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
- int in_pm)
+static int smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx)
{
u32 val, addr;
int ret;
@@ -184,7 +171,7 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
@@ -192,21 +179,23 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
/* set the address, index & direction (read from PHY) */
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
goto done;
}
- ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
+ ret = smsc95xx_read_reg(dev, MII_DATA, &val);
if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error reading MII_DATA\n");
goto done;
}
@@ -214,11 +203,15 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
done:
mutex_unlock(&dev->phy_mutex);
+
+ /* Ignore -ENODEV error during disconnect() */
+ if (ret == -ENODEV)
+ return 0;
return ret;
}
-static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
- int idx, int regval, int in_pm)
+static void smsc95xx_mdio_write(struct usbnet *dev, int phy_id, int idx,
+ int regval)
{
u32 val, addr;
int ret;
@@ -226,28 +219,30 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
val = regval;
- ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_DATA, val);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_DATA\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_DATA\n");
goto done;
}
/* set the address, index & direction (write to PHY) */
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
- ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ if (ret != -ENODEV)
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
goto done;
}
- ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ ret = smsc95xx_phy_wait_not_busy(dev);
if (ret < 0) {
netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
goto done;
@@ -257,25 +252,11 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
-}
-
-static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
-{
- struct smsc95xx_priv *pdata = dev->driver_priv;
-
- __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
-}
-
static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
struct usbnet *dev = bus->priv;
- return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
+ return smsc95xx_mdio_read(dev, phy_id, idx);
}
static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
@@ -283,7 +264,7 @@ static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
{
struct usbnet *dev = bus->priv;
- __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ smsc95xx_mdio_write(dev, phy_id, idx, regval);
return 0;
}
@@ -552,16 +533,12 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
}
-static int smsc95xx_link_reset(struct usbnet *dev)
+static void smsc95xx_mac_update_fullduplex(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
- ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0)
- return ret;
-
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (pdata->phydev->duplex != DUPLEX_FULL) {
pdata->mac_cr &= ~MAC_CR_FDPX_;
@@ -573,18 +550,22 @@ static int smsc95xx_link_reset(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ netdev_warn(dev->net,
+ "Error updating MAC full duplex mode\n");
+ return;
+ }
ret = smsc95xx_phy_update_flowcontrol(dev);
if (ret < 0)
netdev_warn(dev->net, "Error updating PHY flow control\n");
-
- return ret;
}
static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ unsigned long flags;
u32 intdata;
if (urb->actual_length != 4) {
@@ -596,11 +577,15 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
+ local_irq_save(flags);
+
if (intdata & INT_ENP_PHY_INT_)
- usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ generic_handle_domain_irq(pdata->irqdomain, PHY_HWIRQ);
else
netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
intdata);
+
+ local_irq_restore(flags);
}
/* Enable or disable Tx & Rx checksum offload engines */
@@ -727,6 +712,26 @@ static u32 smsc95xx_get_link(struct net_device *net)
return net->phydev->link;
}
+static void smsc95xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int smsc95xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = smsc95xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -743,6 +748,9 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .self_test = net_selftest,
+ .get_strings = smsc95xx_ethtool_get_strings,
+ .get_sset_count = smsc95xx_ethtool_get_sset_count,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -816,7 +824,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
}
/* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
@@ -825,7 +833,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
+ return smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
}
static int smsc95xx_reset(struct usbnet *dev)
@@ -854,24 +862,6 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
}
- ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
- if (ret < 0)
- return ret;
-
- timeout = 0;
- do {
- msleep(10);
- ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
- if (ret < 0)
- return ret;
- timeout++;
- } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
-
- if (timeout >= 100) {
- netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
- return ret;
- }
-
ret = smsc95xx_set_mac_address(dev);
if (ret < 0)
return ret;
@@ -1026,7 +1016,7 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
}
- ret = smsc95xx_start_rx_path(dev, 0);
+ ret = smsc95xx_start_rx_path(dev);
if (ret < 0) {
netdev_warn(dev->net, "Failed to start RX path\n");
return ret;
@@ -1055,6 +1045,7 @@ static void smsc95xx_handle_link_change(struct net_device *net)
struct usbnet *dev = netdev_priv(net);
phy_print_status(net->phydev);
+ smsc95xx_mac_update_fullduplex(dev);
usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
}
@@ -1062,8 +1053,9 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata;
bool is_internal_phy;
+ char usb_path[64];
+ int ret, phy_irq;
u32 val;
- int ret;
printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
@@ -1103,10 +1095,38 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto free_pdata;
+ /* create irq domain for use by PHY driver and GPIO consumers */
+ usb_make_path(dev->udev, usb_path, sizeof(usb_path));
+ pdata->irqfwnode = irq_domain_alloc_named_fwnode(usb_path);
+ if (!pdata->irqfwnode) {
+ ret = -ENOMEM;
+ goto free_pdata;
+ }
+
+ pdata->irqdomain = irq_domain_create_linear(pdata->irqfwnode,
+ SMSC95XX_NR_IRQS,
+ &irq_domain_simple_ops,
+ pdata);
+ if (!pdata->irqdomain) {
+ ret = -ENOMEM;
+ goto free_irqfwnode;
+ }
+
+ phy_irq = irq_create_mapping(pdata->irqdomain, PHY_HWIRQ);
+ if (!phy_irq) {
+ ret = -ENOENT;
+ goto remove_irqdomain;
+ }
+
+ pdata->irqchip = dummy_irq_chip;
+ pdata->irqchip.name = SMSC_CHIPNAME;
+ irq_set_chip_and_handler_name(phy_irq, &pdata->irqchip,
+ handle_simple_irq, "phy");
+
pdata->mdiobus = mdiobus_alloc();
if (!pdata->mdiobus) {
ret = -ENOMEM;
- goto free_pdata;
+ goto dispose_irq;
}
ret = smsc95xx_read_reg(dev, HW_CFG, &val);
@@ -1139,6 +1159,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
goto unregister_mdio;
}
+ pdata->phydev->irq = phy_irq;
pdata->phydev->is_internal = is_internal_phy;
/* detect device revision as different features may be available */
@@ -1181,6 +1202,15 @@ unregister_mdio:
free_mdio:
mdiobus_free(pdata->mdiobus);
+dispose_irq:
+ irq_dispose_mapping(phy_irq);
+
+remove_irqdomain:
+ irq_domain_remove(pdata->irqdomain);
+
+free_irqfwnode:
+ irq_domain_free_fwnode(pdata->irqfwnode);
+
free_pdata:
kfree(pdata);
return ret;
@@ -1193,6 +1223,9 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
phy_disconnect(dev->net->phydev);
mdiobus_unregister(pdata->mdiobus);
mdiobus_free(pdata->mdiobus);
+ irq_dispose_mapping(irq_find_mapping(pdata->irqdomain, PHY_HWIRQ));
+ irq_domain_remove(pdata->irqdomain);
+ irq_domain_free_fwnode(pdata->irqfwnode);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
}
@@ -1206,8 +1239,7 @@ static int smsc95xx_start_phy(struct usbnet *dev)
static int smsc95xx_stop(struct usbnet *dev)
{
- if (dev->net->phydev)
- phy_stop(dev->net->phydev);
+ phy_stop(dev->net->phydev);
return 0;
}
@@ -1218,39 +1250,17 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
return crc << ((filter % 2) * 16);
}
-static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
-{
- int ret;
-
- netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
-
- /* read to clear */
- ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC);
- if (ret < 0)
- return ret;
-
- /* enable interrupt source */
- ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK);
- if (ret < 0)
- return ret;
-
- ret |= mask;
-
- smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret);
-
- return 0;
-}
-
-static int smsc95xx_link_ok_nopm(struct usbnet *dev)
+static int smsc95xx_link_ok(struct usbnet *dev)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
+ ret = smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, MII_BMSR);
if (ret < 0)
return ret;
@@ -1263,14 +1273,14 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1282,12 +1292,12 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_WUPS_ED_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
/* read back PM_CTRL */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
@@ -1299,34 +1309,34 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
+ int ret, phy_id = pdata->phydev->mdio.addr;
u32 val;
- int ret;
/* reconfigure link pulse detection timing for
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write(dev, phy_id, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read(dev, phy_id, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write(dev, phy_id, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_1;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1334,7 +1344,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1349,14 +1359,14 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1371,7 +1381,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
u32 val;
int ret;
- ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val);
+ ret = smsc95xx_read_reg(dev, RX_FIFO_INF, &val);
if (ret < 0)
return ret;
@@ -1380,14 +1390,14 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
return -EBUSY;
}
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
return ret;
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1395,7 +1405,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
val &= ~PM_CTL_WUPS_;
val |= PM_CTL_WUPS_WOL_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
return ret;
@@ -1407,7 +1417,6 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
- int ret;
if (!netif_running(dev->net)) {
/* interface is ifconfig down so fully power down hw */
@@ -1426,27 +1435,10 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
}
netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
-
- /* enable PHY wakeup events for if cable is attached */
- ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
- PHY_INT_MASK_ANEG_COMP_);
- if (ret < 0) {
- netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
- return ret;
- }
-
netdev_info(dev->net, "entering SUSPEND1 mode\n");
return smsc95xx_enter_suspend1(dev);
}
- /* enable PHY wakeup events so we remote wakeup if cable is pulled */
- ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
- PHY_INT_MASK_LINK_DOWN_);
- if (ret < 0) {
- netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
- return ret;
- }
-
netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
return smsc95xx_enter_suspend3(dev);
}
@@ -1458,9 +1450,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
+ pdata->pm_task = current;
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
+ pdata->pm_task = NULL;
return ret;
}
@@ -1469,8 +1464,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
pdata->suspend_flags = 0;
}
- /* determine if link is up using only _nopm functions */
- link_up = smsc95xx_link_ok_nopm(dev);
+ link_up = smsc95xx_link_ok(dev);
if (message.event == PM_EVENT_AUTO_SUSPEND &&
(pdata->features & FEATURE_REMOTE_WAKEUP)) {
@@ -1487,23 +1481,23 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
netdev_info(dev->net, "entering SUSPEND2 mode\n");
/* disable energy detect (link up) & wake up events */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
@@ -1512,13 +1506,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
if (pdata->wolopts & WAKE_PHY) {
- ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
- (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_));
- if (ret < 0) {
- netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
- goto done;
- }
-
/* if link is down then configure EDPD and enter SUSPEND1,
* otherwise enter SUSPEND0 below
*/
@@ -1601,7 +1588,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
for (i = 0; i < (wuff_filter_count * 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, filter_mask[i]);
if (ret < 0) {
kfree(filter_mask);
goto done;
@@ -1610,50 +1597,50 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
kfree(filter_mask);
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, command[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 4); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, offset[i]);
if (ret < 0)
goto done;
}
for (i = 0; i < (wuff_filter_count / 2); i++) {
- ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+ ret = smsc95xx_write_reg(dev, WUFF, crc[i]);
if (ret < 0)
goto done;
}
/* clear any pending pattern match packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_WUFR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
val |= WUCSR_MPR_;
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
}
/* enable/disable wakeup sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
goto done;
@@ -1673,12 +1660,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val &= ~WUCSR_MPEN_;
}
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
goto done;
/* enable wol wakeup source */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
goto done;
@@ -1688,12 +1675,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
if (pdata->wolopts & WAKE_PHY)
val |= PM_CTL_ED_EN_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
goto done;
/* enable receiver to enable frame reception */
- smsc95xx_start_rx_path(dev, 1);
+ smsc95xx_start_rx_path(dev);
/* some wol options are enabled, so enter SUSPEND0 */
netdev_info(dev->net, "entering SUSPEND0 mode\n");
@@ -1707,6 +1694,7 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+ pdata->pm_task = NULL;
return ret;
}
@@ -1727,45 +1715,53 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ pdata->pm_task = current;
+
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
- ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
- ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
if (ret < 0)
- return ret;
+ goto done;
/* clear wake-up status */
- ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
- ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
if (ret < 0)
- return ret;
+ goto done;
}
+ phy_init_hw(pdata->phydev);
+
ret = usbnet_resume(intf);
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
- phy_init_hw(pdata->phydev);
+done:
+ pdata->pm_task = NULL;
return ret;
}
static int smsc95xx_reset_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
+ pdata->pm_task = current;
ret = smsc95xx_reset(dev);
+ pdata->pm_task = NULL;
if (ret < 0)
return ret;
@@ -1961,8 +1957,8 @@ static const struct driver_info smsc95xx_info = {
.description = "smsc95xx USB 2.0 Ethernet",
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
- .link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_start_phy,
+ .reset = smsc95xx_reset,
+ .check_connect = smsc95xx_start_phy,
.stop = smsc95xx_stop,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
@@ -2062,6 +2058,11 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9E08),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
+ USB_DEVICE(0x184F, 0x0051),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index b658510cc9a4..5a53e63d33a6 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
- if (len > ETH_FRAME_LEN)
+ if (len > ETH_FRAME_LEN || len > skb->len)
return 0;
/* the last packet of current skb */
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
index 18f670251275..952e6f7c0321 100644
--- a/drivers/net/usb/sr9800.h
+++ b/drivers/net/usb/sr9800.h
@@ -163,7 +163,7 @@
#define SR9800_MAX_BULKIN_24K 6
#define SR9800_MAX_BULKIN_32K 7
-struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
/* 2k */
{2048, 0x8000, 0x8001},
/* 4k */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9a6450f796dc..64a9a80b2309 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -17,9 +17,6 @@
* issues can usefully be addressed by this framework.
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -229,7 +226,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
pipe = usb_rcvintpipe (dev->udev,
dev->status->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
- maxp = usb_maxpacket (dev->udev, pipe, 0);
+ maxp = usb_maxpacket(dev->udev, pipe);
/* avoid 1 msec chatter: min 8 msec poll rate */
period = max ((int) dev->status->desc.bInterval,
@@ -337,8 +334,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
skb->protocol = eth_type_trans (skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += skb->len;
+ u64_stats_inc(&stats64->rx_packets);
+ u64_stats_add(&stats64->rx_bytes, skb->len);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
@@ -384,7 +381,7 @@ insanity:
}
EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
-
+
/*-------------------------------------------------------------------------
*
* Network Device Driver (peer link to "Host Device", from USB host)
@@ -849,13 +846,11 @@ int usbnet_stop (struct net_device *net)
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
- /* deferred work (task, timer, softirq) must also stop.
- * can't flush_scheduled_work() until we drop rtnl (later),
- * else workers could deadlock; so make workers a NOP.
- */
+ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1055,9 +1050,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strlcpy (info->driver, dev->driver_name, sizeof info->driver);
- strlcpy (info->fw_version, dev->driver_info->description,
- sizeof info->fw_version);
+ strscpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strscpy(info->fw_version, dev->driver_info->description,
+ sizeof(info->fw_version));
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
@@ -1258,8 +1253,8 @@ static void tx_complete (struct urb *urb)
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->tx_packets += entry->packets;
- stats64->tx_bytes += entry->length;
+ u64_stats_add(&stats64->tx_packets, entry->packets);
+ u64_stats_add(&stats64->tx_bytes, entry->length);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
} else {
dev->net->stats.tx_errors++;
@@ -1603,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1616,15 +1612,17 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
- if (dev->driver_info->unbind)
- dev->driver_info->unbind(dev, intf);
-
net = dev->net;
unregister_netdev (net);
- cancel_work_sync(&dev->kevent);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
- usb_scuttle_anchored_urbs(&dev->deferred);
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind(dev, intf);
usb_kill_urb(dev->interrupt);
usb_free_urb(dev->interrupt);
@@ -1789,7 +1787,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
- dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
if (dev->maxpacket == 0) {
/* that is a broken device */
status = -ENODEV;
@@ -2004,7 +2002,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (size) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmalloc(size, GFP_NOIO);
if (!buf)
goto out;
}
@@ -2036,7 +2034,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_NOIO);
if (!buf)
goto out;
} else {
@@ -2137,7 +2135,7 @@ static void usbnet_async_cmd_cb(struct urb *urb)
int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, const void *data, u16 size)
{
- struct usb_ctrlrequest *req = NULL;
+ struct usb_ctrlrequest *req;
struct urb *urb;
int err = -ENOMEM;
void *buf = NULL;
@@ -2155,7 +2153,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (!buf) {
netdev_err(dev->net, "Error allocating buffer"
" in %s!\n", __func__);
- goto fail_free;
+ goto fail_free_urb;
}
}
@@ -2179,14 +2177,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (err < 0) {
netdev_err(dev->net, "Error submitting the control"
" message: status=%d\n", err);
- goto fail_free;
+ goto fail_free_all;
}
return 0;
+fail_free_all:
+ kfree(req);
fail_free_buf:
kfree(buf);
-fail_free:
- kfree(req);
+ /*
+ * avoid a double free
+ * needed because the flag can be set only
+ * after filling the URB
+ */
+ urb->transfer_flags = 0;
+fail_free_urb:
usb_free_urb(urb);
fail:
return err;
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 8e717a0b559b..7984f2157d22 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -315,6 +320,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
/* reported with some C860 units */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 354a963075c5..09682ea3354e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -128,8 +128,8 @@ static int veth_get_link_ksettings(struct net_device *dev,
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
@@ -286,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
{
return __dev_forward_skb(dev, skb) ?: xdp ?
veth_xdp_rx(rq, skb) :
- netif_rx(skb);
+ __netif_rx(skb);
}
/* return true if the specified skb has chances of GRO aggregation
@@ -319,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
+ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}
@@ -432,21 +433,6 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
-static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
- int buflen)
-{
- struct sk_buff *skb;
-
- skb = build_skb(head, buflen);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, headroom);
- skb_put(skb, len);
-
- return skb;
-}
-
static int veth_select_rxq(struct net_device *dev)
{
return smp_processor_id() % dev->real_num_rx_queues;
@@ -493,7 +479,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
- if (unlikely(frame->len > max_len ||
+ if (unlikely(xdp_get_frame_len(frame) > max_len ||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
@@ -694,72 +680,143 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
}
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
- struct sk_buff *skb,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static void veth_xdp_get(struct xdp_buff *xdp)
{
- u32 pktlen, headroom, act, metalen, frame_sz;
- void *orig_data, *orig_data_end;
- struct bpf_prog *xdp_prog;
- int mac_len, delta, off;
- struct xdp_buff xdp;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i;
- skb_prepare_for_gro(skb);
+ get_page(virt_to_page(xdp->data));
+ if (likely(!xdp_buff_has_frags(xdp)))
+ return;
- rcu_read_lock();
- xdp_prog = rcu_dereference(rq->xdp_prog);
- if (unlikely(!xdp_prog)) {
- rcu_read_unlock();
- goto out;
- }
+ for (i = 0; i < sinfo->nr_frags; i++)
+ __skb_frag_ref(&sinfo->frags[i]);
+}
- mac_len = skb->data - skb_mac_header(skb);
- pktlen = skb->len + mac_len;
- headroom = skb_headroom(skb) - mac_len;
+static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ struct xdp_buff *xdp,
+ struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ skb_shinfo(skb)->nr_frags) {
+ u32 size, len, max_head_size, off;
struct sk_buff *nskb;
- int size, head_off;
- void *head, *start;
struct page *page;
+ int i, head_off;
- size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (size > PAGE_SIZE)
+ /* We need a private copy of the skb and data buffers since
+ * the ebpf program can modify it. We segment the original skb
+ * into order-0 pages without linearize it.
+ *
+ * Make sure we have enough space for linear and paged area
+ */
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+ VETH_XDP_HEADROOM);
+ if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop;
+ /* Allocate skb head */
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
goto drop;
- head = page_address(page);
- start = head + VETH_XDP_HEADROOM;
- if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
- page_frag_free(head);
+ nskb = build_skb(page_address(page), PAGE_SIZE);
+ if (!nskb) {
+ put_page(page);
goto drop;
}
- nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
- skb->len, PAGE_SIZE);
- if (!nskb) {
- page_frag_free(head);
+ skb_reserve(nskb, VETH_XDP_HEADROOM);
+ size = min_t(u32, skb->len, max_head_size);
+ if (skb_copy_bits(skb, 0, nskb->data, size)) {
+ consume_skb(nskb);
goto drop;
}
+ skb_put(nskb, size);
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
+
+ /* Allocate paged area of new skb */
+ off = size;
+ len = skb->len - off;
+
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ size = min_t(u32, len, PAGE_SIZE);
+ skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
+ if (skb_copy_bits(skb, off, page_address(page),
+ size)) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ len -= size;
+ off += size;
+ }
+
consume_skb(skb);
skb = nskb;
+ } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+ pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+ goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = skb_end_pointer(skb) - skb->head;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
+ skb_headlen(skb), true);
+
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
+ *pskb = skb;
+
+ return 0;
+drop:
+ consume_skb(skb);
+ *pskb = NULL;
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act, metalen;
+ int off;
+
+ skb_prepare_for_gro(skb);
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+ if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ goto drop;
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -770,7 +827,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
case XDP_PASS:
break;
case XDP_TX:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
@@ -782,7 +839,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
@@ -805,18 +862,27 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- delta = orig_data - xdp.data;
- off = mac_len + delta;
+ off = orig_data - xdp.data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
__skb_pull(skb, -off);
- skb->mac_header -= delta;
+
+ skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
+
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(&xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
@@ -832,7 +898,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- page_frag_free(xdp.data);
+ xdp_return_buff(&xdp);
xdp_xmit:
return NULL;
}
@@ -854,7 +920,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- stats->xdp_bytes += frame->len;
+ stats->xdp_bytes += xdp_get_frame_len(frame);
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
if (frame) {
/* XDP_PASS */
@@ -912,8 +978,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
@@ -1002,7 +1070,7 @@ static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
struct veth_rq *rq = &priv->rq[i];
if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
if (err < 0)
goto err_rxq_reg;
@@ -1116,7 +1184,7 @@ static int veth_napi_enable_range(struct net_device *dev, int start, int end)
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
}
err = __veth_napi_enable_range(dev, start, end);
@@ -1307,7 +1375,7 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
+ priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
if (!priv->rq)
return -ENOMEM;
@@ -1460,9 +1528,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;
@@ -1574,6 +1647,7 @@ static void veth_setup(struct net_device *dev)
dev->hw_features = VETH_FEATURES;
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
/*
@@ -1685,8 +1759,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
- netif_set_gso_max_size(peer, dev->gso_max_size);
- netif_set_gso_max_segs(peer, dev->gso_max_segs);
+ netif_inherit_tso_max(peer, dev);
err = register_netdevice(peer);
put_net(net);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 569eecfbc2cd..7106932c6f88 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -169,6 +172,24 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
};
+/* This structure can contain rss message with maximum settings for indirection table and keysize
+ * Note, that default structure that describes RSS configuration virtio_net_rss_config
+ * contains same info but can't handle table values.
+ * In any case, structure would be passed to virtio hw through sg_buf split by parts
+ * because table sizes may be differ according to the device configuration.
+ */
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
+struct virtio_net_ctrl_rss {
+ u32 hash_types;
+ u16 indirection_table_mask;
+ u16 unclassified_queue;
+ u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+ u16 max_tx_vq;
+ u8 hash_key_length;
+ u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+};
+
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
@@ -178,6 +199,7 @@ struct control_buf {
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
+ struct virtio_net_ctrl_rss rss;
};
struct virtnet_info {
@@ -203,9 +225,20 @@ struct virtnet_info {
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* number of sg entries allocated for big packets */
+ unsigned int big_packets_num_skbfrags;
+
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
+ /* Host supports rss and/or hash report */
+ bool has_rss;
+ bool has_rss_hash_report;
+ u8 rss_key_size;
+ u16 rss_indir_table_size;
+ u32 rss_hash_types_supported;
+ u32 rss_hash_types_saved;
+
/* Has control virtqueue */
bool has_cvq;
@@ -215,9 +248,15 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for refilling if we run low on memory. */
+ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* Is delayed refill enabled? */
+ bool refill_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t refill_lock;
+
/* Work struct for config space updates */
struct work_struct config_work;
@@ -234,6 +273,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -242,15 +287,18 @@ struct virtnet_info {
};
struct padded_vnet_hdr {
- struct virtio_net_hdr_mrg_rxbuf hdr;
+ struct virtio_net_hdr_v1_hash hdr;
/*
* hdr is in a separate sg buffer, and data sg buffer shares same page
* with this header sg. This padding makes next sg 16 byte aligned
* after the header.
*/
- char padding[4];
+ char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -321,6 +369,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = true;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = false;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -396,7 +458,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
- hdr_padded_len = sizeof(*hdr);
+ hdr_padded_len = hdr_len;
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
@@ -978,6 +1040,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* xdp.data_meta were adjusted
*/
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
+
+ /* recalculate headroom if xdp.data or xdp_data_meta
+ * were adjusted, note that offset should always point
+ * to the start of the reserved bytes for virtio_net
+ * header which are followed by xdp.data, that means
+ * that offset is equal to the headroom (when buf is
+ * starting at the beginning of the page, otherwise
+ * there is a base offset inside the page) but it's used
+ * with a different starting point (buf start) than
+ * xdp.data (buf start + vnet hdr size). If xdp.data or
+ * data_meta were adjusted by the xdp prog then the
+ * headroom size has changed and so has the offset, we
+ * can use data_hard_start, which points at buf start +
+ * vnet hdr size, to calculate the new headroom and use
+ * it later to compute buf start in page_to_skb()
+ */
+ headroom = xdp.data - xdp.data_hard_start - metasize;
+
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
@@ -985,15 +1065,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
metasize,
- VIRTIO_XDP_HEADROOM);
+ headroom);
return head_skb;
}
break;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1123,6 +1206,35 @@ xdp_xmit:
return NULL;
}
+static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
+ struct sk_buff *skb)
+{
+ enum pkt_hash_types rss_hash_type;
+
+ if (!hdr_hash || !skb)
+ return;
+
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
+ case VIRTIO_NET_HASH_REPORT_TCPv4:
+ case VIRTIO_NET_HASH_REPORT_UDPv4:
+ case VIRTIO_NET_HASH_REPORT_TCPv6:
+ case VIRTIO_NET_HASH_REPORT_UDPv6:
+ case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
+ case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
+ rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ case VIRTIO_NET_HASH_REPORT_IPv4:
+ case VIRTIO_NET_HASH_REPORT_IPv6:
+ case VIRTIO_NET_HASH_REPORT_IPv6_EX:
+ rss_hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VIRTIO_NET_HASH_REPORT_NONE:
+ default:
+ rss_hash_type = PKT_HASH_TYPE_NONE;
+ }
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
+}
+
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
unsigned int *xdp_xmit,
@@ -1157,6 +1269,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
hdr = skb_vnet_hdr(skb);
+ if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+ virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1220,10 +1334,10 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
char *p;
int i, err, offset;
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
- /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
- for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
+ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
@@ -1254,7 +1368,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -1266,7 +1380,8 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
struct ewma_pkt_len *avg_pkt_len,
unsigned int room)
{
- const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ const size_t hdr_len = vi->hdr_len;
unsigned int len;
if (room)
@@ -1450,8 +1565,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+ spin_lock(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock(&vi->refill_lock);
+ }
}
u64_stats_update_begin(&rq->stats.syncp);
@@ -1524,6 +1643,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1574,6 +1698,8 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
+ enable_delayed_refill(vi);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
@@ -1769,6 +1895,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -1956,6 +2146,8 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* Make sure NAPI doesn't schedule refill work */
+ disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
@@ -2101,7 +2293,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_next(-1, cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
@@ -2177,12 +2369,227 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static bool virtnet_commit_rss_command(struct virtnet_info *vi)
+{
+ struct net_device *dev = vi->dev;
+ struct scatterlist sgs[4];
+ unsigned int sg_buf_size;
+
+ /* prepare sgs */
+ sg_init_table(sgs, 4);
+
+ sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+ sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+
+ sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+
+ sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
+ - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
+ sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+
+ sg_buf_size = vi->rss_key_size;
+ sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+ vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+ }
+ return true;
+}
+
+static void virtnet_init_default_rss(struct virtnet_info *vi)
+{
+ u32 indir_val = 0;
+ int i = 0;
+
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hash_types_saved = vi->rss_hash_types_supported;
+ vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ ? vi->rss_indir_table_size - 1 : 0;
+ vi->ctrl->rss.unclassified_queue = 0;
+
+ for (; i < vi->rss_indir_table_size; ++i) {
+ indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
+ vi->ctrl->rss.indirection_table[i] = indir_val;
+ }
+
+ vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+ vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+ netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+}
+
+static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case TCP_V6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case UDP_V4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case IPV4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ break;
+ case IPV6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
}
+static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+ u32 new_hashtypes = vi->rss_hash_types_saved;
+ bool is_disable = info->data & RXH_DISCARD;
+ bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
+
+ /* supports only 'sd', 'sdfn' and 'r' */
+ if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
+ return false;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
+ break;
+ case UDP_V4_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
+ break;
+ case IPV4_FLOW:
+ new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+ if (!is_disable)
+ new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+ break;
+ case TCP_V6_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
+ break;
+ case UDP_V6_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
+ break;
+ case IPV6_FLOW:
+ new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+ if (!is_disable)
+ new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+ break;
+ default:
+ /* unsupported flow */
+ return false;
+ }
+
+ /* if unsupported hashtype was set */
+ if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
+ return false;
+
+ if (new_hashtypes != vi->rss_hash_types_saved) {
+ vi->rss_hash_types_saved = new_hashtypes;
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ if (vi->dev->features & NETIF_F_RXHASH)
+ return virtnet_commit_rss_command(vi);
+ }
+
+ return true;
+}
static void virtnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -2190,9 +2597,9 @@ static void virtnet_get_drvinfo(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
@@ -2342,27 +2749,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2370,16 +2839,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2411,11 +2883,99 @@ static void virtnet_update_settings(struct virtnet_info *vi)
vi->duplex = duplex;
}
+static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
+{
+ return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
+}
+
+static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
+{
+ return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
+}
+
+static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ indir[i] = vi->ctrl->rss.indirection_table[i];
+ }
+
+ if (key)
+ memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ vi->ctrl->rss.indirection_table[i] = indir[i];
+ }
+ if (key)
+ memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+
+ virtnet_commit_rss_command(vi);
+
+ return 0;
+}
+
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = vi->curr_queue_pairs;
+ break;
+ case ETHTOOL_GRXFH:
+ virtnet_get_hashflow(vi, info);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ if (!virtnet_set_hashflow(vi, info))
+ rc = -EINVAL;
+
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -2426,12 +2986,17 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_link_ksettings = virtnet_set_link_ksettings,
.set_coalesce = virtnet_set_coalesce,
.get_coalesce = virtnet_get_coalesce,
+ .get_rxfh_key_size = virtnet_get_rxfh_key_size,
+ .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
+ .get_rxfh = virtnet_get_rxfh,
+ .set_rxfh = virtnet_set_rxfh,
+ .get_rxnfc = virtnet_get_rxnfc,
+ .set_rxnfc = virtnet_set_rxnfc,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -2439,14 +3004,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
- }
+ if (netif_running(vi->dev))
+ virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -2454,7 +3013,7 @@ static int init_vqs(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
err = init_vqs(vi);
if (err)
@@ -2462,16 +3021,12 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ enable_delayed_refill(vi);
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(vi->dev)) {
+ err = virtnet_open(vi->dev);
+ if (err)
+ return err;
}
netif_tx_lock_bh(vi->dev);
@@ -2678,6 +3233,16 @@ static int virtnet_set_features(struct net_device *dev,
vi->guest_offloads = offloads;
}
+ if ((dev->features ^ features) & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ else
+ vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+
+ if (!virtnet_commit_rss_command(vi))
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2804,6 +3369,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -2811,26 +3397,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -2851,7 +3425,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
*/
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
- const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ const unsigned int hdr_len = vi->hdr_len;
unsigned int rq_size = virtqueue_get_vring_size(vq);
unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
@@ -2966,10 +3540,11 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
- netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
- napi_tx ? napi_weight : 0);
+ netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ napi_weight);
+ netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
+ virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
@@ -3072,6 +3647,12 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3104,21 +3685,44 @@ static int virtnet_validate(struct virtio_device *vdev)
return 0;
}
+static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
+{
+ return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO);
+}
+
+static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
+{
+ bool guest_gso = virtnet_check_guest_gso(vi);
+
+ /* If device can receive ANY guest GSO packets, regardless of mtu,
+ * allocate packets of maximum size, otherwise limit it to only
+ * mtu size worth only.
+ */
+ if (mtu > ETH_DATA_LEN || guest_gso) {
+ vi->big_packets = true;
+ vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
+ }
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
- int mtu;
+ int mtu = 0;
- /* Find if host supports multiqueue virtio_net device */
- err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
- struct virtio_net_config,
- max_virtqueue_pairs, &max_queue_pairs);
+ /* Find if host supports multiqueue/rss virtio_net device */
+ max_queue_pairs = 1;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ max_queue_pairs =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
/* We need at least 2 queue's */
- if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+ if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
max_queue_pairs = 1;
@@ -3195,19 +3799,45 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
-
- /* If we can receive ANY GSO packets, we must allocate large ones. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
- vi->big_packets = true;
+ spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
- virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ vi->has_rss_hash_report = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ vi->has_rss = true;
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_indir_table_size =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ rss_max_indirection_table_length));
+ vi->rss_key_size =
+ virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+ vi->rss_hash_types_supported =
+ virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
+ vi->rss_hash_types_supported &=
+ ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
+ VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+ VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
+
+ dev->hw_features |= NETIF_F_RXHASH;
+ }
+
+ if (vi->has_rss_hash_report)
+ vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
+ else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
@@ -3236,12 +3866,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->mtu = mtu;
dev->max_mtu = mtu;
-
- /* TODO: size buffers correctly in this case. */
- if (dev->mtu > ETH_DATA_LEN)
- vi->big_packets = true;
}
+ virtnet_set_big_packets(vi, mtu);
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
@@ -3274,14 +3902,23 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
- err = register_netdev(dev);
+ if (vi->has_rss || vi->has_rss_hash_report)
+ virtnet_init_default_rss(vi);
+
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+ err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
+ rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
+ rtnl_unlock();
+
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
@@ -3312,7 +3949,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
- vi->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
@@ -3328,7 +3965,7 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
- vi->vdev->config->reset(vi->vdev);
+ virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
@@ -3405,7 +4042,8 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
- VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
+ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
@@ -3449,8 +4087,7 @@ static __init int virtio_net_driver_init(void)
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
-
- ret = register_virtio_driver(&virtio_net_driver);
+ ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 7a38925f4165..a666a88ac1ff 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index f9f3a23d1698..41c0660a0c54 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 74d4e8bc4abc..41d6767283a6 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -40,7 +40,13 @@ enum {
VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
VMXNET3_REG_MACH = 0x30, /* MAC Address High */
VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
- VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
+ VMXNET3_REG_ECR = 0x40, /* Event Cause Register */
+ VMXNET3_REG_DCR = 0x48, /* Device capability register,
+ * from 0x48 to 0x80
+ */
+ VMXNET3_REG_PTCR = 0x88, /* Passthru capbility register
+ * from 0x88 to 0xb0
+ */
};
/* BAR 0 */
@@ -51,8 +57,18 @@ enum {
VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
};
-#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
-#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+/* For Large PT BAR, the following offset to DB register */
+enum {
+ VMXNET3_REG_LB_TXPROD = 0x1000, /* Tx Producer Index */
+ VMXNET3_REG_LB_RXPROD = 0x1400, /* Rx Producer Index for ring 1 */
+ VMXNET3_REG_LB_RXPROD2 = 0x1800, /* Rx Producer Index for ring 2 */
+};
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_LARGE_PT_REG_SIZE 8192 /* large PT pages */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+#define VMXNET3_LARGE_BAR0_REG_SIZE (4096 * 4096) /* LARGE BAR 0 */
+#define VMXNET3_OOB_REG_SIZE (4094 * 4096) /* OOB pages */
#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
#define VMXNET3_REG_ALIGN_MASK 0x7
@@ -83,6 +99,9 @@ enum {
VMXNET3_CMD_SET_COALESCE,
VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_SET_RSS_FIELDS,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_RESERVED5,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -101,6 +120,9 @@ enum {
VMXNET3_CMD_GET_RESERVED2,
VMXNET3_CMD_GET_RESERVED3,
VMXNET3_CMD_GET_MAX_QUEUES_CONF,
+ VMXNET3_CMD_GET_RESERVED4,
+ VMXNET3_CMD_GET_MAX_CAPABILITIES,
+ VMXNET3_CMD_GET_DCR0_REG,
};
/*
@@ -126,17 +148,17 @@ struct Vmxnet3_TxDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 msscof:14; /* MSS, checksum offset, flags */
- u32 ext1:1;
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
u32 dtype:1; /* descriptor type */
- u32 oco:1;
+ u32 oco:1; /* Outer csum offload */
u32 gen:1; /* generation bit */
u32 len:14;
#else
u32 len:14;
u32 gen:1; /* generation bit */
- u32 oco:1;
+ u32 oco:1; /* Outer csum offload */
u32 dtype:1; /* descriptor type */
- u32 ext1:1;
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
u32 msscof:14; /* MSS, checksum offset, flags */
#endif /* __BIG_ENDIAN_BITFIELD */
@@ -240,11 +262,13 @@ struct Vmxnet3_RxCompDesc {
u32 rqID:10; /* rx queue/ring ID */
u32 sop:1; /* Start of Packet */
u32 eop:1; /* End of Packet */
- u32 ext1:2;
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
u32 rxdIdx:12; /* Index of the RxDesc */
#else
u32 rxdIdx:12; /* Index of the RxDesc */
- u32 ext1:2;
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
u32 eop:1; /* End of Packet */
u32 sop:1; /* Start of Packet */
u32 rqID:10; /* rx queue/ring ID */
@@ -378,6 +402,8 @@ union Vmxnet3_GenericDesc {
/* max # of tx descs for a non-tso pkt */
#define VMXNET3_MAX_TXD_PER_PKT 16
+/* max # of tx descs for a tso pkt */
+#define VMXNET3_MAX_TSO_TXD_PER_PKT 24
/* Max size of a single rx buffer */
#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
@@ -724,6 +750,13 @@ enum Vmxnet3_RSSField {
VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020,
};
+struct Vmxnet3_RingBufferSize {
+ __le16 ring1BufSizeType0;
+ __le16 ring1BufSizeType1;
+ __le16 ring2BufSizeType1;
+ __le16 pad;
+};
+
/* If the command data <= 16 bytes, use the shared memory directly.
* otherwise, use variable length configuration descriptor.
*/
@@ -731,6 +764,7 @@ union Vmxnet3_CmdInfo {
struct Vmxnet3_VariableLenConfDesc varConf;
struct Vmxnet3_SetPolling setPolling;
enum Vmxnet3_RSSField setRssFields;
+ struct Vmxnet3_RingBufferSize ringBufSize;
__le64 data[2];
};
@@ -801,4 +835,30 @@ struct Vmxnet3_DriverShared {
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
#define VMXNET3_LINK_DOWN 0
+#define VMXNET3_DCR_ERROR 31 /* error when bit 31 of DCR is set */
+#define VMXNET3_CAP_UDP_RSS 0 /* bit 0 of DCR 0 */
+#define VMXNET3_CAP_ESP_RSS_IPV4 1 /* bit 1 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD 2 /* bit 2 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_TSO 3 /* bit 3 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD 4 /* bit 4 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_TSO 5 /* bit 5 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD 6 /* bit 6 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD 7 /* bit 7 of DCR 0 */
+#define VMXNET3_CAP_PKT_STEERING_IPV4 8 /* bit 8 of DCR 0 */
+#define VMXNET3_CAP_VERSION_4_MAX VMXNET3_CAP_PKT_STEERING_IPV4
+#define VMXNET3_CAP_ESP_RSS_IPV6 9 /* bit 9 of DCR 0 */
+#define VMXNET3_CAP_VERSION_5_MAX VMXNET3_CAP_ESP_RSS_IPV6
+#define VMXNET3_CAP_ESP_OVER_UDP_RSS 10 /* bit 10 of DCR 0 */
+#define VMXNET3_CAP_INNER_RSS 11 /* bit 11 of DCR 0 */
+#define VMXNET3_CAP_INNER_ESP_RSS 12 /* bit 12 of DCR 0 */
+#define VMXNET3_CAP_CRC32_HASH_FUNC 13 /* bit 13 of DCR 0 */
+#define VMXNET3_CAP_VERSION_6_MAX VMXNET3_CAP_CRC32_HASH_FUNC
+#define VMXNET3_CAP_OAM_FILTER 14 /* bit 14 of DCR 0 */
+#define VMXNET3_CAP_ESP_QS 15 /* bit 15 of DCR 0 */
+#define VMXNET3_CAP_LARGE_BAR 16 /* bit 16 of DCR 0 */
+#define VMXNET3_CAP_OOORX_COMP 17 /* bit 17 of DCR 0 */
+#define VMXNET3_CAP_VERSION_7_MAX 18
+/* when new capability is introduced, update VMXNET3_CAP_MAX */
+#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
+
#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d9d90baac72a..d3e7b27eb933 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -130,6 +130,20 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
+/* Check if capability is supported by UPT device or
+ * UPT is even requested
+ */
+bool
+vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
+{
+ if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
+ cap_supported & (1UL << cap)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* Check the link state. This may start or stop the tx queue.
@@ -571,6 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rbi = rbi_base + ring->next2fill;
gd = ring->base + ring->next2fill;
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
@@ -589,6 +604,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -613,6 +629,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
+ rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -628,8 +645,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
/* Fill the last buffer but dont mark it ready, or else the
* device will think that the queue is full */
- if (num_allocated == num_to_alloc)
+ if (num_allocated == num_to_alloc) {
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
break;
+ }
gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
@@ -1042,6 +1061,23 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
tq->stats.copy_skb_header++;
}
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ /* tso pkts must not use more than
+ * VMXNET3_MAX_TSO_TXD_PER_PKT entries
+ */
+ if (skb_linearize(skb) != 0) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ tq->stats.linearized++;
+
+ /* recalculate the # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ }
if (skb->encapsulation) {
vmxnet3_prepare_inner_tso(skb, &ctx);
} else {
@@ -1125,7 +1161,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx.mss) {
if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
- gdesc->txd.om = VMXNET3_OM_ENCAP;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ }
gdesc->txd.msscof = ctx.mss;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
@@ -1142,8 +1183,15 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset +
ctx.l4_hdr_size;
- gdesc->txd.om = VMXNET3_OM_ENCAP;
- gdesc->txd.msscof = 0; /* Reserved */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.l4_offset +
+ skb->csum_offset;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ gdesc->txd.msscof = 0; /* Reserved */
+ }
} else {
gdesc->txd.hlen = ctx.l4_offset;
gdesc->txd.om = VMXNET3_OM_CSUM;
@@ -1191,7 +1239,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
- VMXNET3_REG_TXPROD + tq->qid * 8,
+ adapter->tx_prod_offset + tq->qid * 8,
tq->tx_ring.next2fill);
}
@@ -1343,14 +1391,15 @@ static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
- static const u32 rxprod_reg[2] = {
- VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ u32 rxprod_reg[2] = {
+ adapter->rx_prod_offset, adapter->rx_prod2_offset
};
u32 num_pkts = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
u16 segCnt = 0, mss = 0;
+ int comp_offset, fill_offset;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxCmdDesc;
struct Vmxnet3_RxCompDesc rxComp;
@@ -1501,6 +1550,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
hash_type);
}
#endif
+ skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
if (VMXNET3_VERSION_GE_2(adapter) &&
@@ -1623,9 +1673,15 @@ not_lro:
rcd_done:
/* device may have skipped some rx descs */
- ring->next2comp = idx;
- num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
+
+ comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
+ fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
+ idx - ring->next2fill - 1;
+ if (!ring->isOutOfOrder || fill_offset >= comp_offset)
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
/* Ensure that the writes to rxd->gen bits will be observed
* after all other writes to rxd objects.
@@ -1633,18 +1689,38 @@ rcd_done:
dma_wmb();
while (num_to_alloc) {
- vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
- &rxCmdDesc);
- BUG_ON(!rxd->addr);
+ rbi = rq->buf_info[ring_idx] + ring->next2fill;
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
+ goto refill_buf;
+ if (ring_idx == 0) {
+ /* ring0 Type1 buffers can get skipped; re-fill them */
+ if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
+ goto refill_buf;
+ }
+ if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
+refill_buf:
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ WARN_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
+ num_to_alloc--;
+ } else {
+ /* rx completion hasn't occurred */
+ ring->isOutOfOrder = 1;
+ break;
+ }
+ }
- /* Recv desc is ready to be used by the device */
- rxd->gen = ring->gen;
- vmxnet3_cmd_ring_adv_next2fill(ring);
- num_to_alloc--;
+ if (num_to_alloc == 0) {
+ ring->isOutOfOrder = 0;
}
/* if needed, update the register */
- if (unlikely(rq->shared->updateRxProd)) {
+ if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
@@ -1666,6 +1742,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1804,6 +1884,7 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
+ rq->rx_ring[i].isOutOfOrder = 0;
}
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
@@ -2621,6 +2702,23 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
}
static void
+vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_7(adapter))
+ return;
+
+ cmdInfo->ringBufSize = adapter->ringBufSize;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+static void
vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
@@ -2665,6 +2763,36 @@ vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
} else {
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ }
cmdInfo->setRssFields = adapter->rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_RSS_FIELDS);
@@ -2728,14 +2856,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
goto activate_err;
}
+ vmxnet3_init_bufsize(adapter);
vmxnet3_init_coalesce(adapter);
vmxnet3_init_rssfields(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
- VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+ adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
adapter->rx_queue[i].rx_ring[0].next2fill);
- VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+ VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
(i * VMXNET3_REG_ALIGN)),
adapter->rx_queue[i].rx_ring[1].next2fill);
}
@@ -2901,19 +3030,29 @@ static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz, i, ring0_size, ring1_size, comp_size;
- if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
- VMXNET3_MAX_ETH_HDR_SIZE) {
- adapter->skb_buf_size = adapter->netdev->mtu +
- VMXNET3_MAX_ETH_HDR_SIZE;
- if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
- adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
-
- adapter->rx_buf_per_pkt = 1;
+ /* With version7 ring1 will have only T0 buffers */
+ if (!VMXNET3_VERSION_GE_7(adapter)) {
+ if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
+ VMXNET3_MAX_ETH_HDR_SIZE) {
+ adapter->skb_buf_size = adapter->netdev->mtu +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
+ adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
+
+ adapter->rx_buf_per_pkt = 1;
+ } else {
+ adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
+ sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ }
} else {
- adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
- sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
- VMXNET3_MAX_ETH_HDR_SIZE;
- adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
+ VMXNET3_MAX_SKB_BUF_SIZE);
+ adapter->rx_buf_per_pkt = 1;
+ adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
+ adapter->ringBufSize.ring1BufSizeType1 = 0;
+ adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
}
/*
@@ -2929,6 +3068,11 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
ring1_size = (ring1_size + sz - 1) / sz * sz;
ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
sz * sz);
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ ring0_size = rounddown_pow_of_two(ring0_size);
+ ring1_size = rounddown_pow_of_two(ring1_size);
+ }
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -3179,6 +3323,54 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ }
+
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
@@ -3466,7 +3658,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_6)) {
+ if (ver & (1 << VMXNET3_REV_7)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_7);
+ adapter->version = VMXNET3_REV_7 + 1;
+ } else if (ver & (1 << VMXNET3_REV_6)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_6);
@@ -3514,6 +3711,39 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
+ adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
+ if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->dev_caps[0] = adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_LARGE_BAR);
+ }
+ if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
+ adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
+ adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
+ adapter->dev_caps[0] |= adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_OOORX_COMP);
+ }
+ if (adapter->dev_caps[0])
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ if (VMXNET3_VERSION_GE_7(adapter) &&
+ adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
+ } else {
+ adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
+ }
+
if (VMXNET3_VERSION_GE_6(adapter)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -3652,11 +3882,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
netif_napi_add(adapter->netdev,
&adapter->rx_queue[i].napi,
- vmxnet3_poll_rx_only, 64);
+ vmxnet3_poll_rx_only);
}
} else {
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
- vmxnet3_poll, 64);
+ vmxnet3_poll);
}
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3172d46c0335..18cf7c723201 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -209,12 +209,12 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
+ strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -298,7 +298,7 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features;
}
-static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
+static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -306,8 +306,56 @@ static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_LRO;
+ if (features & NETIF_F_GSO_UDP_TUNNEL)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
}
}
@@ -322,6 +370,22 @@ static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
}
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_GENEVE_TSO |
+ 1UL << VMXNET3_CAP_VXLAN_TSO |
+ 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
}
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
@@ -357,8 +421,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXVLAN;
- if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) {
- vmxnet3_enable_encap_offloads(netdev);
+ if ((features & tun_offload_mask) != 0) {
+ vmxnet3_enable_encap_offloads(netdev, features);
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXINNEROFLD;
} else if ((features & tun_offload_mask) == 0 &&
@@ -462,7 +526,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
@@ -490,9 +554,9 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset +
i * VMXNET3_REG_ALIGN);
- buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 +
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
@@ -660,6 +724,13 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size);
+ new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size);
+ new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size);
+ }
+
/* rx data ring buffer size has to be a multiple of
* VMXNET3_RXDATA_DESC_SIZE_ALIGN
*/
@@ -913,6 +984,39 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR,
+ adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
spin_lock_irqsave(&adapter->cmd_lock, flags);
cmdInfo->setRssFields = rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -1188,6 +1292,34 @@ done:
return 0;
}
+static void vmxnet3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) {
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ ec->combined_count = adapter->num_tx_queues;
+ } else {
+ ec->rx_count = adapter->num_rx_queues;
+ ec->tx_count =
+ adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ }
+ } else {
+ ec->combined_count = 1;
+ }
+
+ ec->other_count = 1;
+
+ /* Number of interrupts cannot be changed on the fly */
+ /* Just set maximums to actual values */
+ ec->max_rx = ec->rx_count;
+ ec->max_tx = ec->tx_count;
+ ec->max_combined = ec->combined_count;
+ ec->max_other = ec->other_count;
+}
+
static const struct ethtool_ops vmxnet3_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1213,6 +1345,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.set_rxfh = vmxnet3_set_rss,
#endif
.get_link_ksettings = vmxnet3_get_link_ksettings,
+ .get_channels = vmxnet3_get_channels,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7027ff483fa5..3367db23aa13 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -69,18 +69,19 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.6.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01060000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
@@ -135,6 +136,7 @@ struct vmxnet3_cmd_ring {
u32 next2fill;
u32 next2comp;
u8 gen;
+ u8 isOutOfOrder;
dma_addr_t basePA;
};
@@ -259,9 +261,13 @@ enum vmxnet3_rx_buf_type {
VMXNET3_RX_BUF_PAGE = 2
};
+#define VMXNET3_RXD_COMP_PENDING 0
+#define VMXNET3_RXD_COMP_DONE 1
+
struct vmxnet3_rx_buf_info {
enum vmxnet3_rx_buf_type buf_type;
u16 len;
+ u8 comp_state;
union {
struct sk_buff *skb;
struct page *page;
@@ -402,6 +408,13 @@ struct vmxnet3_adapter {
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
bool queuesExtEnabled;
+ struct Vmxnet3_RingBufferSize ringBufSize;
+ u32 devcap_supported[8];
+ u32 ptcap_supported[8];
+ u32 dev_caps[8];
+ u16 tx_prod_offset;
+ u16 rx_prod_offset;
+ u16 rx_prod2_offset;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -431,11 +444,13 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_5 + 1)
#define VMXNET3_VERSION_GE_6(adapter) \
(adapter->version >= VMXNET3_REV_6 + 1)
+#define VMXNET3_VERSION_GE_7(adapter) \
+ (adapter->version >= VMXNET3_REV_7 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 1024
-#define VMXNET3_DEF_RX_RING2_SIZE 256
+#define VMXNET3_DEF_RX_RING2_SIZE 512
#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
@@ -494,6 +509,7 @@ void vmxnet3_set_ethtool_ops(struct net_device *netdev);
void vmxnet3_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
+bool vmxnet3_check_ptcapability(u32 cap_supported, u32 cap);
extern char vmxnet3_driver_name[];
#endif
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e0b1ab99a359..badf6f09ae51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
this_cpu_inc(dev->dstats->rx_drps);
@@ -472,14 +472,13 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
memset(&fl6, 0, sizeof(fl6));
/* needed to match OIF rule */
- fl6.flowi6_oif = dev->ifindex;
+ fl6.flowi6_l3mdev = dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = iph->nexthdr;
- fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst) || dst == dst_null)
@@ -551,10 +550,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
memset(&fl4, 0, sizeof(fl4));
/* needed to match OIF rule */
- fl4.flowi4_oif = vrf_dev->ifindex;
+ fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.flowi4_tos = RT_TOS(ip4h->tos);
- fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
@@ -815,8 +814,8 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rt6) {
dst = &rt6->dst;
- dev_replace_track(dst->dev, net->loopback_dev,
- &dst->dev_tracker, GFP_KERNEL);
+ netdev_ref_replace(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
dst_release(dst);
}
@@ -1062,8 +1061,8 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rth) {
dst = &rth->dst;
- dev_replace_track(dst->dev, net->loopback_dev,
- &dst->dev_tracker, GFP_KERNEL);
+ netdev_ref_replace(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
dst_release(dst);
}
@@ -1078,7 +1077,7 @@ static int vrf_rtable_create(struct net_device *dev)
return -ENOMEM;
/* create a dst for routing packets out through a VRF device */
- rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1);
if (!rth)
return -ENOMEM;
@@ -1266,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
eth = (struct ethhdr *)skb->data;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
/* we set the ethernet destination and the source addresses to the
* address of the VRF device.
@@ -1295,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
*/
static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
struct net_device *vrf_dev,
- u16 proto)
+ u16 proto, struct net_device *orig_dev)
{
- if (skb_mac_header_was_set(skb))
+ if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
return 0;
return vrf_prepare_mac_header(skb, vrf_dev, proto);
@@ -1403,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
/* if packet is NDISC then keep the ingress interface */
if (!is_ndisc) {
+ struct net_device *orig_dev = skb->dev;
+
vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1411,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int err;
err = vrf_add_mac_header_if_unset(skb, vrf_dev,
- ETH_P_IPV6);
+ ETH_P_IPV6,
+ orig_dev);
if (likely(!err)) {
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
@@ -1441,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
+ struct net_device *orig_dev = skb->dev;
+
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
@@ -1461,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
- err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
+ err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
+ orig_dev);
if (likely(!err)) {
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
@@ -1535,8 +1541,8 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
static void vrf_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops vrf_ethtool_ops = {
diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile
new file mode 100644
index 000000000000..d4c255499b72
--- /dev/null
+++ b/drivers/net/vxlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the vxlan driver
+#
+
+obj-$(CONFIG_VXLAN) += vxlan.o
+
+vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan/vxlan_core.c
index 359d16780dbb..6ab669dcd1c6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -34,10 +34,10 @@
#include <net/ip6_checksum.h>
#endif
+#include "vxlan_private.h"
+
#define VXLAN_VERSION "0.1"
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
@@ -53,41 +53,15 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-static unsigned int vxlan_net_id;
-static struct rtnl_link_ops vxlan_link_ops;
+unsigned int vxlan_net_id;
-static const u8 all_zeros_mac[ETH_ALEN + 2];
+const u8 all_zeros_mac[ETH_ALEN + 2];
+static struct rtnl_link_ops vxlan_link_ops;
static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* per-network namespace private data for this module */
-struct vxlan_net {
- struct list_head vxlan_list;
- struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
- struct notifier_block nexthop_notifier_block;
-};
-
-/* Forwarding table entry */
-struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
- struct rcu_head rcu;
- unsigned long updated; /* jiffies */
- unsigned long used;
- struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
- u16 state; /* see ndm_state */
- __be32 vni;
- u16 flags; /* see ndm_flags and below */
- struct list_head nh_list;
- struct nexthop __rcu *nh;
- struct vxlan_dev __rcu *vdev;
-};
-
-#define NTF_VXLAN_ADDED_BY_USER 0x100
-
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -98,17 +72,6 @@ static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- if (a->sa.sa_family != b->sa.sa_family)
- return false;
- if (a->sa.sa_family == AF_INET6)
- return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
- else
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -135,12 +98,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
#else /* !CONFIG_IPV6 */
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -161,37 +118,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
}
#endif
-/* Virtual Network hash table head */
-static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
-{
- return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
-}
-
-/* Socket hash table head */
-static inline struct hlist_head *vs_head(struct net *net, __be16 port)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-
- return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
-static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
-}
-
-static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
-}
-
/* Find VXLAN socket based on network namespace, address family, UDP port,
* enabled unshareable flags and socket device binding (see l3mdev with
* non-default VRF).
@@ -213,18 +139,29 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
- __be32 vni)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs,
+ int ifindex, __be32 vni,
+ struct vxlan_vni_node **vninode)
{
+ struct vxlan_vni_node *vnode;
struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
- if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ if (vs->flags & VXLAN_F_COLLECT_METADATA &&
+ !(vs->flags & VXLAN_F_VNIFILTER))
vni = 0;
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
- if (node->vxlan->default_dst.remote_vni != vni)
+ if (!node->vxlan)
continue;
+ vnode = NULL;
+ if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
+ if (!vnode)
+ continue;
+ } else if (node->vxlan->default_dst.remote_vni != vni) {
+ continue;
+ }
if (IS_ENABLED(CONFIG_IPV6)) {
const struct vxlan_config *cfg = &node->vxlan->cfg;
@@ -234,6 +171,8 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
continue;
}
+ if (vninode)
+ *vninode = vnode;
return node->vxlan;
}
@@ -251,7 +190,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, ifindex, vni);
+ return vxlan_vs_find_vni(vs, ifindex, vni, NULL);
}
/* Fill in neighbour message in skbuff. */
@@ -493,7 +432,7 @@ static u32 eth_hash(const unsigned char *addr)
return hash_64(value, FDB_HASH_BITS);
}
-static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(addr + 2));
@@ -501,7 +440,7 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
-static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
{
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return eth_vni_hash(mac, vni);
@@ -712,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
- return -ENOBUFS;
+ return -ENOMEM;
}
rd->remote_ip = *ip;
@@ -774,12 +713,9 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
- vh = skb_gro_header_fast(skb, off_vx);
- if (skb_gro_header_hard(skb, hlen)) {
- vh = skb_gro_header_slow(skb, hlen, off_vx);
- if (unlikely(!vh))
- goto out;
- }
+ vh = skb_gro_header(skb, hlen, off_vx);
+ if (unlikely(!vh))
+ goto out;
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
@@ -872,37 +808,35 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
goto err_inval;
}
- if (nh) {
- if (!nexthop_get(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
- nh = NULL;
- goto err_inval;
- }
- if (!nexthop_is_fdb(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
- goto err_inval;
- }
+ if (!nexthop_get(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+ nh = NULL;
+ goto err_inval;
+ }
+ if (!nexthop_is_fdb(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
+ goto err_inval;
+ }
+
+ if (!nexthop_is_multipath(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ goto err_inval;
+ }
- if (!nexthop_is_multipath(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ /* check nexthop group family */
+ switch (vxlan->default_dst.remote_ip.sa.sa_family) {
+ case AF_INET:
+ if (!nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
goto err_inval;
}
-
- /* check nexthop group family */
- switch (vxlan->default_dst.remote_ip.sa.sa_family) {
- case AF_INET:
- if (!nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
- break;
- case AF_INET6:
- if (nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
+ break;
+ case AF_INET6:
+ if (nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+ goto err_inval;
}
}
@@ -920,12 +854,12 @@ err_inval:
return err;
}
-static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __be16 port, __be32 src_vni,
- __be32 vni, __u32 ifindex, __u16 ndm_flags,
- u32 nhid, struct vxlan_fdb **fdb,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1150,13 +1084,13 @@ err_notify:
}
/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_update(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags, u32 nhid,
- bool swdev_notify,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
struct vxlan_fdb *f;
@@ -1192,19 +1126,24 @@ static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
- __be32 *vni, u32 *ifindex, u32 *nhid)
+ __be32 *vni, u32 *ifindex, u32 *nhid,
+ struct netlink_ext_ack *extack)
{
struct net *net = dev_net(vxlan->dev);
int err;
- if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] ||
- tb[NDA_PORT]))
+ if (tb[NDA_NH_ID] &&
+ (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || tb[NDA_PORT])) {
+ NL_SET_ERR_MSG(extack, "DST, VNI, ifindex and port are mutually exclusive with NH_ID");
return -EINVAL;
+ }
if (tb[NDA_DST]) {
err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Unsupported address family");
return err;
+ }
} else {
union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
@@ -1220,24 +1159,30 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
}
if (tb[NDA_PORT]) {
- if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
+ if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) {
+ NL_SET_ERR_MSG(extack, "Invalid vxlan port");
return -EINVAL;
+ }
*port = nla_get_be16(tb[NDA_PORT]);
} else {
*port = vxlan->cfg.dst_port;
}
if (tb[NDA_VNI]) {
- if (nla_len(tb[NDA_VNI]) != sizeof(u32))
+ if (nla_len(tb[NDA_VNI]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid vni");
return -EINVAL;
+ }
*vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
} else {
*vni = vxlan->default_dst.remote_vni;
}
if (tb[NDA_SRC_VNI]) {
- if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
+ if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid src vni");
return -EINVAL;
+ }
*src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
} else {
*src_vni = vxlan->default_dst.remote_vni;
@@ -1246,12 +1191,16 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
if (tb[NDA_IFINDEX]) {
struct net_device *tdev;
- if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid ifindex");
return -EINVAL;
+ }
*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
tdev = __dev_get_by_index(net, *ifindex);
- if (!tdev)
+ if (!tdev) {
+ NL_SET_ERR_MSG(extack, "Device not found");
return -EADDRNOTAVAIL;
+ }
} else {
*ifindex = 0;
}
@@ -1289,7 +1238,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
- &nhid);
+ &nhid, extack);
if (err)
return err;
@@ -1307,10 +1256,10 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
- const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, __be32 vni,
- u32 ifindex, bool swdev_notify)
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1343,7 +1292,8 @@ out:
/* Delete entry (via netlink) */
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
union vxlan_addr ip;
@@ -1354,7 +1304,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
- &nhid);
+ &nhid, extack);
if (err)
return err;
@@ -1519,56 +1469,6 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
-{
- struct vxlan_dev *vxlan;
- struct vxlan_sock *sock4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *sock6;
-#endif
- unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
-
- sock4 = rtnl_dereference(dev->vn4_sock);
-
- /* The vxlan_sock is only used by dev, leaving group has
- * no effect on other vxlan devices.
- */
- if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
- return false;
-#if IS_ENABLED(CONFIG_IPV6)
- sock6 = rtnl_dereference(dev->vn6_sock);
- if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
- return false;
-#endif
-
- list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev) || vxlan == dev)
- continue;
-
- if (family == AF_INET &&
- rtnl_dereference(vxlan->vn4_sock) != sock4)
- continue;
-#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 &&
- rtnl_dereference(vxlan->vn6_sock) != sock6)
- continue;
-#endif
-
- if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- &dev->default_dst.remote_ip))
- continue;
-
- if (vxlan->default_dst.remote_ifindex !=
- dev->default_dst.remote_ifindex)
- continue;
-
- return true;
- }
-
- return false;
-}
-
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
@@ -1602,7 +1502,10 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
synchronize_net();
- vxlan_vs_del_dev(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vs_del_vnigrp(vxlan);
+ else
+ vxlan_vs_del_dev(vxlan);
if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock);
@@ -1617,76 +1520,6 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-/* Update multicast group membership when first VNI on
- * multicast address is brought up
- */
-static int vxlan_igmp_join(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_join_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
-/* Inverse of vxlan_igmp_join when last VNI is brought down */
-static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_leave_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
static bool vxlan_remcsum(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags)
{
@@ -1828,6 +1661,7 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
+ struct vxlan_vni_node *vninode = NULL;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@@ -1860,7 +1694,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan)
goto drop;
@@ -1930,6 +1764,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
@@ -1937,11 +1773,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- atomic_long_inc(&vxlan->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(vxlan->dev);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -1975,7 +1814,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
return -ENOENT;
vni = vxlan_vni(hdr->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL);
if (!vxlan)
return -ENOENT;
@@ -2049,8 +1888,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
@@ -2204,9 +2047,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (reply == NULL)
goto out;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
-
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
@@ -2395,7 +2240,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
- struct dst_cache *dst_cache,
+ __u8 flow_flags, struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
@@ -2422,6 +2267,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.saddr = *saddr;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
+ fl4.flowi4_flags = flow_flags;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
@@ -2472,7 +2318,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
@@ -2537,18 +2383,23 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tx_packets++;
- tx_stats->tx_bytes += len;
+ u64_stats_inc(&tx_stats->tx_packets);
+ u64_stats_add(&tx_stats->tx_bytes, len);
u64_stats_update_end(&tx_stats->syncp);
+ vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, len);
u64_stats_update_end(&rx_stats->syncp);
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
+ len);
} else {
drop:
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
}
rcu_read_unlock();
}
@@ -2578,6 +2429,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
@@ -2601,15 +2454,19 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
- __be32 vni, label;
- __u8 tos, ttl;
+ __u8 tos, ttl, flow_flags = 0;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ __be32 vni = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ __be32 label;
+#endif
info = skb_tunnel_info(skb);
@@ -2647,7 +2504,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+#if IS_ENABLED(CONFIG_IPV6)
label = vxlan->cfg.label;
+#endif
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
@@ -2664,6 +2523,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ flow_flags = info->key.flow_flags;
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
@@ -2674,7 +2534,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = info->key.ttl;
tos = info->key.tos;
+#if IS_ENABLED(CONFIG_IPV6)
label = info->key.label;
+#endif
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
@@ -2692,7 +2554,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
- dst_port, src_port,
+ dst_port, src_port, flow_flags,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -2821,12 +2683,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2838,6 +2702,7 @@ tx_error:
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2870,6 +2735,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
@@ -2944,6 +2811,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -3044,6 +2913,9 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_init(vxlan);
+
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
@@ -3073,6 +2945,9 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_uninit(vxlan);
+
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
@@ -3090,14 +2965,10 @@ static int vxlan_open(struct net_device *dev)
if (ret < 0)
return ret;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
- ret = vxlan_igmp_join(vxlan);
- if (ret == -EADDRINUSE)
- ret = 0;
- if (ret) {
- vxlan_sock_release(vxlan);
- return ret;
- }
+ ret = vxlan_multicast_join(vxlan);
+ if (ret) {
+ vxlan_sock_release(vxlan);
+ return ret;
}
if (vxlan->cfg.age_interval)
@@ -3134,19 +3005,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- int ret = 0;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- !vxlan_group_used(vn, vxlan))
- ret = vxlan_igmp_leave(vxlan);
+ vxlan_multicast_leave(vxlan);
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
- return ret;
+ return 0;
}
/* Stub, nothing needs to be done. */
@@ -3193,7 +3060,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, dport, sport,
- &info->dst_cache, info);
+ info->key.flow_flags, &info->dst_cache,
+ info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@@ -3369,6 +3237,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
[IFLA_VXLAN_DF] = { .type = NLA_U8 },
+ [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3441,8 +3310,8 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
static void vxlan_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
static int vxlan_get_link_ksettings(struct net_device *dev,
@@ -3554,6 +3423,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
@@ -3589,7 +3459,12 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
rcu_assign_pointer(vxlan->vn4_sock, vs);
node = &vxlan->hlist4;
}
- vxlan_vs_add_dev(vs, vxlan, node);
+
+ if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ vxlan_vs_add_vnigrp(vxlan, vs, ipv6);
+ else
+ vxlan_vs_add_dev(vs, vxlan, node);
+
return 0;
}
@@ -3616,13 +3491,42 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
return ret;
}
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni)
+{
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
+ struct vxlan_dev *tmp;
+
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp == vxlan)
+ continue;
+ if (tmp->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_vnifilter_lookup(tmp, vni))
+ continue;
+ } else if (tmp->cfg.vni != vni) {
+ continue;
+ }
+ if (tmp->cfg.dst_port != conf->dst_port)
+ continue;
+ if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
+ (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
+ continue;
+
+ if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ tmp->cfg.remote_ifindex != conf->remote_ifindex)
+ continue;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
struct net_device **lower,
struct vxlan_dev *old,
struct netlink_ext_ack *extack)
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *tmp;
bool use_ipv6 = false;
if (conf->flags & VXLAN_F_GPE) {
@@ -3755,22 +3659,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
if (!conf->age_interval)
conf->age_interval = FDB_AGE_DEFAULT;
- list_for_each_entry(tmp, &vn->vxlan_list, next) {
- if (tmp == old)
- continue;
-
- if (tmp->cfg.vni != conf->vni)
- continue;
- if (tmp->cfg.dst_port != conf->dst_port)
- continue;
- if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
- (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
- continue;
-
- if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
- tmp->cfg.remote_ifindex != conf->remote_ifindex)
- continue;
-
+ if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) {
NL_SET_ERR_MSG(extack,
"A VXLAN device with the specified VNI already exists");
return -EEXIST;
@@ -3810,8 +3699,7 @@ static void vxlan_config_apply(struct net_device *dev,
if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
- netif_set_gso_max_size(dev, lowerdev->gso_max_size);
- netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
+ netif_inherit_tso_max(dev, lowerdev);
needed_headroom = lowerdev->hard_header_len;
needed_headroom += lowerdev->needed_headroom;
@@ -4226,6 +4114,21 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_VXLAN_DF])
conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+ if (data[IFLA_VXLAN_VNIFILTER]) {
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER,
+ VXLAN_F_VNIFILTER, changelink, false,
+ extack);
+ if (err)
+ return err;
+
+ if ((conf->flags & VXLAN_F_VNIFILTER) &&
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER],
+ "vxlan vnifilter only valid in collect metadata mode");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -4301,6 +4204,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ /* If vni filtering device, also update fdb entries of
+ * all vnis that were using default remote ip
+ */
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip,
+ &conf.remote_ip, extack);
+ if (err) {
+ netdev_adjacent_change_abort(dst->remote_dev,
+ lowerdev, dev);
+ return err;
+ }
+ }
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@ -4446,6 +4362,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER &&
+ nla_put_u8(skb, IFLA_VXLAN_VNIFILTER,
+ !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -4805,6 +4726,8 @@ static int __init vxlan_init_module(void)
if (rc)
goto out4;
+ vxlan_vnifilter_init();
+
return 0;
out4:
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
@@ -4819,6 +4742,7 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
+ vxlan_vnifilter_uninit();
rtnl_link_unregister(&vxlan_link_ops);
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
diff --git a/drivers/net/vxlan/vxlan_multicast.c b/drivers/net/vxlan/vxlan_multicast.c
new file mode 100644
index 000000000000..a7f2d67dc61b
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_multicast.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan multicast group handling
+ *
+ */
+#include <linux/kernel.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <linux/igmp.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+/* Update multicast group membership when first VNI on
+ * multicast address is brought up
+ */
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+static bool vxlan_group_used_match(union vxlan_addr *ip, int ifindex,
+ union vxlan_addr *rip, int rifindex)
+{
+ if (!vxlan_addr_multicast(rip))
+ return false;
+
+ if (!vxlan_addr_equal(rip, ip))
+ return false;
+
+ if (rifindex != ifindex)
+ return false;
+
+ return true;
+}
+
+static bool vxlan_group_used_by_vnifilter(struct vxlan_dev *vxlan,
+ union vxlan_addr *ip, int ifindex)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &v->remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+ }
+
+ return false;
+}
+
+/* See if multicast group is already in use by other ID */
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &dev->default_dst.remote_ip);
+ int ifindex = (rifindex ? : dev->default_dst.remote_ifindex);
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6;
+#endif
+ unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
+ return false;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
+ return false;
+#endif
+
+ list_for_each_entry(vxlan, &vn->vxlan_list, next) {
+ if (!netif_running(vxlan->dev) || vxlan == dev)
+ continue;
+
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
+ continue;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
+ continue;
+#endif
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_group_used_by_vnifilter(vxlan, ip, ifindex))
+ continue;
+ } else {
+ if (!vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static int vxlan_multicast_join_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp, *vgood = NULL;
+ int ret = 0;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ /* skip if address is same as default address */
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ ret = vxlan_igmp_join(vxlan, &v->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ vgood = v;
+ }
+out:
+ if (ret) {
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (v == vgood)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int vxlan_multicast_leave_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ int last_err = 0, ret;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (vxlan_addr_multicast(&v->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, v->vni, &v->remote_ip,
+ 0)) {
+ ret = vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (ret)
+ last_err = ret;
+ }
+ }
+
+ return last_err;
+}
+
+int vxlan_multicast_join(struct vxlan_dev *vxlan)
+{
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_join_vnigrp(vxlan);
+
+ return 0;
+}
+
+int vxlan_multicast_leave(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ !vxlan_group_used(vn, vxlan, 0, NULL, 0)) {
+ ret = vxlan_igmp_leave(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_leave_vnigrp(vxlan);
+
+ return 0;
+}
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
new file mode 100644
index 000000000000..599c3b4fdd5e
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vxlan private header file
+ *
+ */
+
+#ifndef _VXLAN_PRIVATE_H
+#define _VXLAN_PRIVATE_H
+
+#include <linux/rhashtable.h>
+
+extern unsigned int vxlan_net_id;
+extern const u8 all_zeros_mac[ETH_ALEN + 2];
+extern const struct rhashtable_params vxlan_vni_rht_params;
+
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE (1 << PORT_HASH_BITS)
+
+/* per-network namespace private data for this module */
+struct vxlan_net {
+ struct list_head vxlan_list;
+ struct hlist_head sock_list[PORT_HASH_SIZE];
+ spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+ struct hlist_node hlist; /* linked list of entries */
+ struct rcu_head rcu;
+ unsigned long updated; /* jiffies */
+ unsigned long used;
+ struct list_head remotes;
+ u8 eth_addr[ETH_ALEN];
+ u16 state; /* see ndm_state */
+ __be32 vni;
+ u16 flags; /* see ndm_flags and below */
+ struct list_head nh_list;
+ struct nexthop __rcu *nh;
+ struct vxlan_dev __rcu *vdev;
+};
+
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
+/* Virtual Network hash table head */
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
+{
+ return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
+}
+
+/* Socket hash table head */
+static inline struct hlist_head *vs_head(struct net *net, __be16 port)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* First remote destination for a forwarding entry.
+ * Guaranteed to be non-NULL because remotes are never deleted.
+ */
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#else /* !CONFIG_IPV6 */
+
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#endif
+
+static inline struct vxlan_vni_node *
+vxlan_vnifilter_lookup(struct vxlan_dev *vxlan, __be32 vni)
+{
+ struct vxlan_vni_group *vg;
+
+ vg = rcu_dereference_rtnl(vxlan->vnigrp);
+ if (!vg)
+ return NULL;
+
+ return rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+}
+
+/* vxlan_core.c */
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack);
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify);
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni);
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni);
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify, struct netlink_ext_ack *extack);
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni);
+
+/* vxlan_vnifilter.c */
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
+
+void vxlan_vnifilter_init(void);
+void vxlan_vnifilter_uninit(void);
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len);
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6);
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan);
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack);
+
+
+/* vxlan_multicast.c */
+int vxlan_multicast_join(struct vxlan_dev *vxlan);
+int vxlan_multicast_leave(struct vxlan_dev *vxlan);
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex);
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+#endif
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
new file mode 100644
index 000000000000..3e04af4c5daa
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan vni filter for collect metadata mode
+ *
+ * Authors: Roopa Prabhu <roopa@nvidia.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/rhashtable.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct vxlan_vni_node *vnode = ptr;
+ __be32 vni = *(__be32 *)arg->key;
+
+ return vnode->vni != vni;
+}
+
+const struct rhashtable_params vxlan_vni_rht_params = {
+ .head_offset = offsetof(struct vxlan_vni_node, vnode),
+ .key_offset = offsetof(struct vxlan_vni_node, vni),
+ .key_len = sizeof(__be32),
+ .nelem_hint = 3,
+ .max_size = VXLAN_N_VID,
+ .obj_cmpfn = vxlan_vni_cmp,
+ .automatic_shrinking = true,
+};
+
+static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *v,
+ bool del)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_dev_node *node;
+ struct vxlan_sock *vs;
+
+ spin_lock(&vn->sock_lock);
+ if (del) {
+ if (!hlist_unhashed(&v->hlist4.hlist))
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!hlist_unhashed(&v->hlist6.hlist))
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ goto out;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ vs = rtnl_dereference(vxlan->vn6_sock);
+ if (vs && v) {
+ node = &v->hlist6;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+#endif
+ vs = rtnl_dereference(vxlan->vn4_sock);
+ if (vs && v) {
+ node = &v->hlist4;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+out:
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_dev_node *node;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ node = &v->hlist6;
+ else
+#endif
+ node = &v->hlist4;
+ node->vxlan = vxlan;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
+ struct vxlan_vni_stats *dest)
+{
+ int i;
+
+ memset(dest, 0, sizeof(*dest));
+ for_each_possible_cpu(i) {
+ struct vxlan_vni_stats_pcpu *pstats;
+ struct vxlan_vni_stats temp;
+ unsigned int start;
+
+ pstats = per_cpu_ptr(vninode->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&pstats->syncp);
+ memcpy(&temp, &pstats->stats, sizeof(temp));
+ } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+
+ dest->rx_packets += temp.rx_packets;
+ dest->rx_bytes += temp.rx_bytes;
+ dest->rx_drops += temp.rx_drops;
+ dest->rx_errors += temp.rx_errors;
+ dest->tx_packets += temp.tx_packets;
+ dest->tx_bytes += temp.tx_bytes;
+ dest->tx_drops += temp.tx_drops;
+ dest->tx_errors += temp.tx_errors;
+ }
+}
+
+static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
+
+ u64_stats_update_begin(&pstats->syncp);
+ switch (type) {
+ case VXLAN_VNI_STATS_RX:
+ pstats->stats.rx_bytes += len;
+ pstats->stats.rx_packets++;
+ break;
+ case VXLAN_VNI_STATS_RX_DROPS:
+ pstats->stats.rx_drops++;
+ break;
+ case VXLAN_VNI_STATS_RX_ERRORS:
+ pstats->stats.rx_errors++;
+ break;
+ case VXLAN_VNI_STATS_TX:
+ pstats->stats.tx_bytes += len;
+ pstats->stats.tx_packets++;
+ break;
+ case VXLAN_VNI_STATS_TX_DROPS:
+ pstats->stats.tx_drops++;
+ break;
+ case VXLAN_VNI_STATS_TX_ERRORS:
+ pstats->stats.tx_errors++;
+ break;
+ }
+ u64_stats_update_end(&pstats->syncp);
+}
+
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_node *vnode;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return;
+
+ if (vninode) {
+ vnode = vninode;
+ } else {
+ vnode = vxlan_vnifilter_lookup(vxlan, vni);
+ if (!vnode)
+ return;
+ }
+
+ vxlan_vnifilter_stats_add(vnode, type, len);
+}
+
+static u32 vnirange(struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend)
+{
+ return (be32_to_cpu(vend->vni) - be32_to_cpu(vbegin->vni));
+}
+
+static size_t vxlan_vnifilter_entry_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct tunnel_msg))
+ + nla_total_size(0) /* VXLAN_VNIFILTER_ENTRY */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_START */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_END */
+ + nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */
+}
+
+static int __vnifilter_entry_fill_stats(struct sk_buff *skb,
+ const struct vxlan_vni_node *vbegin)
+{
+ struct vxlan_vni_stats vstats;
+ struct nlattr *vstats_attr;
+
+ vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS);
+ if (!vstats_attr)
+ goto out_stats_err;
+
+ vxlan_vnifilter_stats_get(vbegin, &vstats);
+ if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES,
+ vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS,
+ vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS,
+ vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES,
+ vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS,
+ vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS,
+ vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD))
+ goto out_stats_err;
+
+ nla_nest_end(skb, vstats_attr);
+
+ return 0;
+
+out_stats_err:
+ nla_nest_cancel(skb, vstats_attr);
+ return -EMSGSIZE;
+}
+
+static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
+ struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend,
+ bool fill_stats)
+{
+ struct nlattr *ventry;
+ u32 vs = be32_to_cpu(vbegin->vni);
+ u32 ve = 0;
+
+ if (vbegin != vend)
+ ve = be32_to_cpu(vend->vni);
+
+ ventry = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY);
+ if (!ventry)
+ return false;
+
+ if (nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_START, vs))
+ goto out_err;
+
+ if (ve && nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_END, ve))
+ goto out_err;
+
+ if (!vxlan_addr_any(&vbegin->remote_ip)) {
+ if (vbegin->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP,
+ vbegin->remote_ip.sin.sin_addr.s_addr))
+ goto out_err;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP6,
+ &vbegin->remote_ip.sin6.sin6_addr))
+ goto out_err;
+#endif
+ }
+ }
+
+ if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin))
+ goto out_err;
+
+ nla_nest_end(skb, ventry);
+
+ return true;
+
+out_err:
+ nla_nest_cancel(skb, ventry);
+
+ return false;
+}
+
+static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode, int cmd)
+{
+ struct tunnel_msg *tmsg;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct net *net = dev_net(vxlan->dev);
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(vxlan_vnifilter_entry_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ goto out_err;
+
+ err = -EMSGSIZE;
+ nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*tmsg), 0);
+ if (!nlh)
+ goto out_err;
+ tmsg = nlmsg_data(nlh);
+ memset(tmsg, 0, sizeof(*tmsg));
+ tmsg->family = AF_BRIDGE;
+ tmsg->ifindex = vxlan->dev->ifindex;
+
+ if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false))
+ goto out_err;
+
+ nlmsg_end(skb, nlh);
+ rtnl_notify(skb, net, 0, RTNLGRP_TUNNEL, NULL, GFP_KERNEL);
+
+ return;
+
+out_err:
+ rtnl_set_sk_err(net, RTNLGRP_TUNNEL, err);
+
+ kfree_skb(skb);
+}
+
+static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct tunnel_msg *new_tmsg, *tmsg;
+ int idx = 0, s_idx = cb->args[1];
+ struct vxlan_vni_group *vg;
+ struct nlmsghdr *nlh;
+ bool dump_stats;
+ int err = 0;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EINVAL;
+
+ /* RCU needed because of the vni locking rules (rcu || rtnl) */
+ vg = rcu_dereference(vxlan->vnigrp);
+ if (!vg || !vg->num_vnis)
+ return 0;
+
+ tmsg = nlmsg_data(cb->nlh);
+ dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS);
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+ new_tmsg = nlmsg_data(nlh);
+ memset(new_tmsg, 0, sizeof(*new_tmsg));
+ new_tmsg->family = PF_BRIDGE;
+ new_tmsg->ifindex = dev->ifindex;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (idx < s_idx) {
+ idx++;
+ continue;
+ }
+ if (!vbegin) {
+ vbegin = v;
+ vend = v;
+ continue;
+ }
+ if (!dump_stats && vnirange(vend, v) == 1 &&
+ vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) {
+ goto update_end;
+ } else {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend,
+ dump_stats)) {
+ err = -EMSGSIZE;
+ break;
+ }
+ idx += vnirange(vbegin, vend) + 1;
+ vbegin = v;
+ }
+update_end:
+ vend = v;
+ }
+
+ if (!err && vbegin) {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats))
+ err = -EMSGSIZE;
+ }
+
+ cb->args[1] = err ? idx : 0;
+
+ nlmsg_end(skb, nlh);
+
+ return err;
+}
+
+static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx = 0, err = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct net_device *dev;
+
+ tmsg = nlmsg_data(cb->nlh);
+
+ if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ if (tmsg->ifindex) {
+ dev = dev_get_by_index_rcu(net, tmsg->ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ if (!netif_is_vxlan(dev)) {
+ NL_SET_ERR_MSG(cb->extack,
+ "The device is not a vxlan device");
+ err = -EINVAL;
+ goto out_err;
+ }
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ /* if the dump completed without an error we return 0 here */
+ if (err != -EMSGSIZE)
+ goto out_err;
+ } else {
+ for_each_netdev_rcu(net, dev) {
+ if (!netif_is_vxlan(dev))
+ continue;
+ if (idx < s_idx)
+ goto skip;
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ if (err == -EMSGSIZE)
+ break;
+skip:
+ idx++;
+ }
+ }
+ cb->args[0] = idx;
+ rcu_read_unlock();
+
+ return skb->len;
+
+out_err:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static const struct nla_policy vni_filter_entry_policy[VXLAN_VNIFILTER_ENTRY_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY_START] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_END] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_GROUP] = { .type = NLA_BINARY,
+ .len = sizeof_field(struct iphdr, daddr) },
+ [VXLAN_VNIFILTER_ENTRY_GROUP6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+static const struct nla_policy vni_filter_policy[VXLAN_VNIFILTER_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY] = { .type = NLA_NESTED },
+};
+
+static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ u32 hash_index;
+ int err = 0;
+
+ hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ if (remote_ip && !vxlan_addr_any(remote_ip)) {
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vni,
+ vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ return err;
+ }
+ }
+
+ if (old_remote_ip && !vxlan_addr_any(old_remote_ip)) {
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ *old_remote_ip,
+ vxlan->cfg.dst_port,
+ vni, vni,
+ dst->remote_ifindex,
+ true);
+ }
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ return err;
+}
+
+static int vxlan_vni_update_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode,
+ union vxlan_addr *group,
+ bool create, bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ union vxlan_addr *newrip = NULL, *oldrip = NULL;
+ union vxlan_addr old_remote_ip;
+ int ret = 0;
+
+ memcpy(&old_remote_ip, &vninode->remote_ip, sizeof(old_remote_ip));
+
+ /* if per vni remote ip is not present use vxlan dev
+ * default dst remote ip for fdb entry
+ */
+ if (group && !vxlan_addr_any(group)) {
+ newrip = group;
+ } else {
+ if (!vxlan_addr_any(&dst->remote_ip))
+ newrip = &dst->remote_ip;
+ }
+
+ /* if old rip exists, and no newrip,
+ * explicitly delete old rip
+ */
+ if (!newrip && !vxlan_addr_any(&old_remote_ip))
+ oldrip = &old_remote_ip;
+
+ if (!newrip && !oldrip)
+ return 0;
+
+ if (!create && oldrip && newrip && vxlan_addr_equal(oldrip, newrip))
+ return 0;
+
+ ret = vxlan_update_default_fdb_entry(vxlan, vninode->vni,
+ oldrip, newrip,
+ extack);
+ if (ret)
+ goto out;
+
+ if (group)
+ memcpy(&vninode->remote_ip, group, sizeof(vninode->remote_ip));
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&old_remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &old_remote_ip,
+ vxlan->default_dst.remote_ifindex)) {
+ ret = vxlan_igmp_leave(vxlan, &old_remote_ip,
+ 0);
+ if (ret)
+ goto out;
+ }
+
+ if (vxlan_addr_multicast(&vninode->remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vninode->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ }
+ }
+
+ *changed = true;
+
+ return 0;
+out:
+ return ret;
+}
+
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_group *vg;
+ struct vxlan_vni_node *vent;
+ int ret;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (vxlan_addr_any(&vent->remote_ip)) {
+ ret = vxlan_update_default_fdb_entry(vxlan, vent->vni,
+ old_remote_ip,
+ new_remote_ip,
+ extack);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ /* if per vni remote_ip not present, delete the
+ * default dst remote_ip previously added for this vni
+ */
+ if (!vxlan_addr_any(&vninode->remote_ip) ||
+ !vxlan_addr_any(&dst->remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ (vxlan_addr_any(&vninode->remote_ip) ?
+ dst->remote_ip : vninode->remote_ip),
+ vxlan->cfg.dst_port,
+ vninode->vni, vninode->vni,
+ dst->remote_ifindex,
+ true);
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&vninode->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &vninode->remote_ip,
+ dst->remote_ifindex)) {
+ vxlan_igmp_leave(vxlan, &vninode->remote_ip, 0);
+ }
+ }
+}
+
+static int vxlan_vni_update(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ __be32 vni, union vxlan_addr *group,
+ bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ int ret;
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+ if (!vninode)
+ return 0;
+
+ ret = vxlan_vni_update_group(vxlan, vninode, group, false, changed,
+ extack);
+ if (ret)
+ return ret;
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return 0;
+}
+
+static void __vxlan_vni_add_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_node *vent;
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (be32_to_cpu(v->vni) < be32_to_cpu(vent->vni))
+ continue;
+ else
+ break;
+ }
+ list_add_rcu(&v->vlist, hpos);
+ vg->num_vnis++;
+}
+
+static void __vxlan_vni_del_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ list_del_rcu(&v->vlist);
+ vg->num_vnis--;
+}
+
+static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
+ __be32 vni)
+{
+ struct vxlan_vni_node *vninode;
+
+ vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC);
+ if (!vninode)
+ return NULL;
+ vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
+ if (!vninode->stats) {
+ kfree(vninode);
+ return NULL;
+ }
+ vninode->vni = vni;
+ vninode->hlist4.vxlan = vxlan;
+#if IS_ENABLED(CONFIG_IPV6)
+ vninode->hlist6.vxlan = vxlan;
+#endif
+
+ return vninode;
+}
+
+static int vxlan_vni_add(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, union vxlan_addr *group,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ bool changed = false;
+ int err = 0;
+
+ if (vxlan_vnifilter_lookup(vxlan, v))
+ return vxlan_vni_update(vxlan, vg, v, group, &changed, extack);
+
+ err = vxlan_vni_in_use(vxlan->net, vxlan, &vxlan->cfg, v);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "VNI in use");
+ return err;
+ }
+
+ vninode = vxlan_vni_alloc(vxlan, v);
+ if (!vninode)
+ return -ENOMEM;
+
+ err = rhashtable_lookup_insert_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err) {
+ kfree(vninode);
+ return err;
+ }
+
+ __vxlan_vni_add_list(vg, vninode);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, false);
+
+ err = vxlan_vni_update_group(vxlan, vninode, group, true, &changed,
+ extack);
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return err;
+}
+
+static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
+{
+ struct vxlan_vni_node *v;
+
+ v = container_of(rcu, struct vxlan_vni_node, rcu);
+ free_percpu(v->stats);
+ kfree(v);
+}
+
+static int vxlan_vni_del(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ int err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &v,
+ vxlan_vni_rht_params);
+ if (!vninode) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ vxlan_vni_delete_group(vxlan, vninode);
+
+ err = rhashtable_remove_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err)
+ goto out;
+
+ __vxlan_vni_del_list(vg, vninode);
+
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_DELTUNNEL);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, true);
+
+ call_rcu(&vninode->rcu, vxlan_vni_node_rcu_free);
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_vni_add_del(struct vxlan_dev *vxlan, __u32 start_vni,
+ __u32 end_vni, union vxlan_addr *group,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_group *vg;
+ int v, err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ for (v = start_vni; v <= end_vni; v++) {
+ switch (cmd) {
+ case RTM_NEWTUNNEL:
+ err = vxlan_vni_add(vxlan, vg, v, group, extack);
+ break;
+ case RTM_DELTUNNEL:
+ err = vxlan_vni_del(vxlan, vg, v, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_process_vni_filter(struct vxlan_dev *vxlan,
+ struct nlattr *nlvnifilter,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *vattrs[VXLAN_VNIFILTER_ENTRY_MAX + 1];
+ u32 vni_start = 0, vni_end = 0;
+ union vxlan_addr group;
+ int err;
+
+ err = nla_parse_nested(vattrs,
+ VXLAN_VNIFILTER_ENTRY_MAX,
+ nlvnifilter, vni_filter_entry_policy,
+ extack);
+ if (err)
+ return err;
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_START]) {
+ vni_start = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_START]);
+ vni_end = vni_start;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_END])
+ vni_end = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_END]);
+
+ if (!vni_start && !vni_end) {
+ NL_SET_ERR_MSG_ATTR(extack, nlvnifilter,
+ "vni start nor end found in vni entry");
+ return -EINVAL;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]) {
+ group.sin.sin_addr.s_addr =
+ nla_get_in_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]);
+ group.sa.sa_family = AF_INET;
+ } else if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]) {
+ group.sin6.sin6_addr =
+ nla_get_in6_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]);
+ group.sa.sa_family = AF_INET6;
+ } else {
+ memset(&group, 0, sizeof(group));
+ }
+
+ if (vxlan_addr_multicast(&group) && !vxlan->default_dst.remote_ifindex) {
+ NL_SET_ERR_MSG(extack,
+ "Local interface required for multicast remote group");
+
+ return -EINVAL;
+ }
+
+ err = vxlan_vni_add_del(vxlan, vni_start, vni_end, &group, cmd,
+ extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_vni_group *vg;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ rhashtable_remove_fast(&vg->vni_hash, &v->vnode,
+ vxlan_vni_rht_params);
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ __vxlan_vni_del_list(vg, v);
+ vxlan_vnifilter_notify(vxlan, v, RTM_DELTUNNEL);
+ call_rcu(&v->rcu, vxlan_vni_node_rcu_free);
+ }
+ rhashtable_destroy(&vg->vni_hash);
+ kfree(vg);
+}
+
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg;
+ int ret;
+
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+ return -ENOMEM;
+ ret = rhashtable_init(&vg->vni_hash, &vxlan_vni_rht_params);
+ if (ret) {
+ kfree(vg);
+ return ret;
+ }
+ INIT_LIST_HEAD(&vg->vni_list);
+ rcu_assign_pointer(vxlan->vnigrp, vg);
+
+ return 0;
+}
+
+static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct vxlan_dev *vxlan;
+ struct net_device *dev;
+ struct nlattr *attr;
+ int err, vnis = 0;
+ int rem;
+
+ /* this should validate the header and check for remaining bytes */
+ err = nlmsg_parse(nlh, sizeof(*tmsg), NULL, VXLAN_VNIFILTER_MAX,
+ vni_filter_policy, extack);
+ if (err < 0)
+ return err;
+
+ tmsg = nlmsg_data(nlh);
+ dev = __dev_get_by_index(net, tmsg->ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ if (!netif_is_vxlan(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "The device is not a vxlan device");
+ return -EINVAL;
+ }
+
+ vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) {
+ switch (nla_type(attr)) {
+ case VXLAN_VNIFILTER_ENTRY:
+ err = vxlan_process_vni_filter(vxlan, attr,
+ nlh->nlmsg_type, extack);
+ break;
+ default:
+ continue;
+ }
+ vnis++;
+ if (err)
+ break;
+ }
+
+ if (!vnis) {
+ NL_SET_ERR_MSG_MOD(extack, "No vnis found to process");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void vxlan_vnifilter_init(void)
+{
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
+ vxlan_vnifilter_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+}
+
+void vxlan_vnifilter_uninit(void)
+{
+ rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
+}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 592a8389fc5a..dcb069dde66b 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -23,78 +23,6 @@ menuconfig WAN
if WAN
-# There is no way to detect a comtrol sv11 - force it modular for now.
-config HOSTESS_SV11
- tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- Driver for Comtrol Hostess SV-11 network card which
- operates on low speed synchronous serial links at up to
- 256Kbps, supporting PPP and Cisco HDLC.
-
- The driver will be compiled as a module: the
- module will be called hostess_sv11.
-
-# The COSA/SRP driver has not been tested as non-modular yet.
-config COSA
- tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
- help
- Driver for COSA and SRP synchronous serial boards.
-
- These boards allow to connect synchronous serial devices (for example
- base-band modems, or any other device with the X.21, V.24, V.35 or
- V.36 interface) to your Linux box. The cards can work as the
- character device, synchronous PPP network device, or the Cisco HDLC
- network device.
-
- You will need user-space utilities COSA or SRP boards for downloading
- the firmware to the cards and to set them up. Look at the
- <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
- read the comment at the top of the <file:drivers/net/wan/cosa.c> for
- details about the cards and the driver itself.
-
- The driver will be compiled as a module: the
- module will be called cosa.
-
-#
-# Lan Media's board. Currently 1000, 1200, 5200, 5245
-#
-config LANMEDIA
- tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
- depends on PCI && VIRT_TO_BUS && HDLC
- help
- Driver for the following Lan Media family of serial boards:
-
- - LMC 1000 board allows you to connect synchronous serial devices
- (for example base-band modems, or any other device with the X.21,
- V.24, V.35 or V.36 interface) to your Linux box.
-
- - LMC 1200 with on board DSU board allows you to connect your Linux
- box directly to a T1 or E1 circuit.
-
- - LMC 5200 board provides a HSSI interface capable of running up to
- 52 Mbits per second.
-
- - LMC 5245 board connects directly to a T3 circuit saving the
- additional external hardware.
-
- To change setting such as clock source you will need lmcctl.
- It is available at <ftp://ftp.lanmedia.com/> (broken link).
-
- To compile this driver as a module, choose M here: the
- module will be called lmc.
-
-# There is no way to detect a Sealevel board. Force it modular
-config SEALEVEL_4021
- tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
-
- The driver will be compiled as a module: the
- module will be called sealevel.
-
# Generic HDLC
config HDLC
tristate "Generic HDLC layer"
@@ -293,7 +221,8 @@ config SLIC_DS26522
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX && OF
+ select MFD_SYSCON
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 480bcd1f6c1c..5bec8fae47f8 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,13 +14,8 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
-obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
-obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
-obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
-obj-$(CONFIG_LANMEDIA) += lmc/
-
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_N2) += n2.o
obj-$(CONFIG_C101) += c101.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
deleted file mode 100644
index 23d2954d9747..000000000000
--- a/drivers/net/wan/cosa.c
+++ /dev/null
@@ -1,2052 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-
-/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-/* The driver for the SRP and COSA synchronous serial cards.
- *
- * HARDWARE INFO
- *
- * Both cards are developed at the Institute of Computer Science,
- * Masaryk University (https://www.ics.muni.cz/). The hardware is
- * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
- * and the photo of both cards is available at
- * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
- * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
- * For Linux-specific utilities, see below in the "Software info" section.
- * If you want to order the card, contact Jiri Novotny.
- *
- * The SRP (serial port?, the Czech word "srp" means "sickle") card
- * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
- * with V.24 interfaces up to 80kb/s each.
- *
- * The COSA (communication serial adapter?, the Czech word "kosa" means
- * "scythe") is a next-generation sync/async board with two interfaces
- * - currently any of V.24, X.21, V.35 and V.36 can be selected.
- * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
- * The 8-channels version is in development.
- *
- * Both types have downloadable firmware and communicate via ISA DMA.
- * COSA can be also a bus-mastering device.
- *
- * SOFTWARE INFO
- *
- * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/.
- * The CVS tree of Linux driver can be viewed there, as well as the
- * firmware binaries and user-space utilities for downloading the firmware
- * into the card and setting up the card.
- *
- * The Linux driver (unlike the present *BSD drivers :-) can work even
- * for the COSA and SRP in one computer and allows each channel to work
- * in one of the two modes (character or network device).
- *
- * AUTHOR
- *
- * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
- *
- * You can mail me bugfixes and even success reports. I am especially
- * interested in the SMP and/or muliti-channel success/failure reports
- * (I wonder if I did the locking properly :-).
- *
- * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
- *
- * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
- * The skeleton.c by Donald Becker
- * The SDL Riscom/N2 driver by Mike Natale
- * The Comtrol Hostess SV11 driver by Alan Cox
- * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-
-#undef COSA_SLOW_IO /* for testing purposes only */
-
-#include "cosa.h"
-
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
-
-/* Per-channel data structure */
-
-struct channel_data {
- int usage; /* Usage count; >0 for chrdev, -1 for netdev */
- int num; /* Number of the channel */
- struct cosa_data *cosa; /* Pointer to the per-card structure */
- int txsize; /* Size of transmitted data */
- char *txbuf; /* Transmit buffer */
- char name[COSA_MAX_NAME]; /* channel name */
-
- /* The HW layer interface */
- /* routine called from the RX interrupt */
- char *(*setup_rx)(struct channel_data *channel, int size);
- /* routine called when the RX is done (from the EOT interrupt) */
- int (*rx_done)(struct channel_data *channel);
- /* routine called when the TX is done (from the EOT interrupt) */
- int (*tx_done)(struct channel_data *channel, int size);
-
- /* Character device parts */
- struct mutex rlock;
- struct semaphore wsem;
- char *rxdata;
- int rxsize;
- wait_queue_head_t txwaitq, rxwaitq;
- int tx_status, rx_status;
-
- /* generic HDLC device parts */
- struct net_device *netdev;
- struct sk_buff *rx_skb, *tx_skb;
-};
-
-/* cosa->firmware_status bits */
-#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
-#define COSA_FW_START BIT(2) /* Is the microcode running? */
-
-struct cosa_data {
- int num; /* Card number */
- char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
- unsigned int datareg, statusreg; /* I/O ports */
- unsigned short irq, dma; /* IRQ and DMA number */
- unsigned short startaddr; /* Firmware start address */
- unsigned short busmaster; /* Use busmastering? */
- int nchannels; /* # of channels on this card */
- int driver_status; /* For communicating with firmware */
- int firmware_status; /* Downloaded, reseted, etc. */
- unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */
- unsigned long rxtx; /* RX or TX in progress? */
- int enabled;
- int usage; /* usage count */
- int txchan, txsize, rxsize;
- struct channel_data *rxchan;
- char *bouncebuf;
- char *txbuf, *rxbuf;
- struct channel_data *chan;
- spinlock_t lock; /* For exclusive operations on this structure */
- char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
- char *type; /* card type */
-};
-
-/* Define this if you want all the possible ports to be autoprobed.
- * It is here but it probably is not a good idea to use this.
- */
-/* #define COSA_ISA_AUTOPROBE 1*/
-
-/* Character device major number. 117 was allocated for us.
- * The value of 0 means to allocate a first free one.
- */
-static DEFINE_MUTEX(cosa_chardev_mutex);
-static int cosa_major = 117;
-
-/* Encoding of the minor numbers:
- * The lowest CARD_MINOR_BITS bits means the channel on the single card,
- * the highest bits means the card number.
- */
-#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card
- */
-/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
- * macro doesn't like anything other than the raw number as an argument :-(
- */
-#define MAX_CARDS 16
-/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
-
-#define DRIVER_RX_READY 0x0001
-#define DRIVER_TX_READY 0x0002
-#define DRIVER_TXMAP_SHIFT 2
-#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-
-/* for cosa->rxtx - indicates whether either transmit or receive is
- * in progress. These values are mean number of the bit.
- */
-#define TXBIT 0
-#define RXBIT 1
-#define IRQBIT 2
-
-#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
-
-#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
-#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
-#undef DEBUG_IO //1 /* Dump the I/O traffic */
-
-#define TX_TIMEOUT (5 * HZ)
-
-/* Maybe the following should be allocated dynamically */
-static struct cosa_data cosa_cards[MAX_CARDS];
-static int nr_cards;
-
-#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
-/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
-#else
-static int io[MAX_CARDS + 1];
-static int dma[MAX_CARDS + 1];
-#endif
-/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
-
-/* for class stuff*/
-static struct class *cosa_class;
-
-#ifdef MODULE
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
-module_param_hw_array(dma, int, dma, NULL, 0);
-MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
-
-MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
-MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
-MODULE_LICENSE("GPL");
-#endif
-
-/* I use this mainly for testing purposes */
-#ifdef COSA_SLOW_IO
-#define cosa_outb outb_p
-#define cosa_outw outw_p
-#define cosa_inb inb_p
-#define cosa_inw inw_p
-#else
-#define cosa_outb outb
-#define cosa_outw outw
-#define cosa_inb inb
-#define cosa_inw inw
-#endif
-
-#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-
-#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
-#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
-
-/* Initialization stuff */
-static int cosa_probe(int ioaddr, int irq, int dma);
-
-/* HW interface */
-static void cosa_enable_rx(struct channel_data *chan);
-static void cosa_disable_rx(struct channel_data *chan);
-static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
-static void cosa_kick(struct cosa_data *cosa);
-static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
-
-/* Network device stuff */
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity);
-static int cosa_net_open(struct net_device *d);
-static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
-static char *cosa_net_setup_rx(struct channel_data *channel, int size);
-static int cosa_net_rx_done(struct channel_data *channel);
-static int cosa_net_tx_done(struct channel_data *channel, int size);
-
-/* Character device */
-static char *chrdev_setup_rx(struct channel_data *channel, int size);
-static int chrdev_rx_done(struct channel_data *channel);
-static int chrdev_tx_done(struct channel_data *channel, int size);
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static unsigned int cosa_poll(struct file *file, poll_table *poll);
-static int cosa_open(struct inode *inode, struct file *file);
-static int cosa_release(struct inode *inode, struct file *file);
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-#ifdef COSA_FASYNC_WORKING
-static int cosa_fasync(struct inode *inode, struct file *file, int on);
-#endif
-
-static const struct file_operations cosa_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cosa_read,
- .write = cosa_write,
- .poll = cosa_poll,
- .unlocked_ioctl = cosa_chardev_ioctl,
- .open = cosa_open,
- .release = cosa_release,
-#ifdef COSA_FASYNC_WORKING
- .fasync = cosa_fasync,
-#endif
-};
-
-/* Ioctls */
-static int cosa_start(struct cosa_data *cosa, int address);
-static int cosa_reset(struct cosa_data *cosa);
-static int cosa_download(struct cosa_data *cosa, void __user *a);
-static int cosa_readmem(struct cosa_data *cosa, void __user *a);
-
-/* COSA/SRP ROM monitor */
-static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
-static int startmicrocode(struct cosa_data *cosa, int address);
-static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
-
-/* Auxiliary functions */
-static int get_wait_data(struct cosa_data *cosa);
-static int put_wait_data(struct cosa_data *cosa, int data);
-static int puthexnumber(struct cosa_data *cosa, int number);
-static void put_driver_status(struct cosa_data *cosa);
-static void put_driver_status_nolock(struct cosa_data *cosa);
-
-/* Interrupt handling */
-static irqreturn_t cosa_interrupt(int irq, void *cosa);
-
-/* I/O ops debugging */
-#ifdef DEBUG_IO
-static void debug_data_in(struct cosa_data *cosa, int data);
-static void debug_data_out(struct cosa_data *cosa, int data);
-static void debug_data_cmd(struct cosa_data *cosa, int data);
-static void debug_status_in(struct cosa_data *cosa, int status);
-static void debug_status_out(struct cosa_data *cosa, int status);
-#endif
-
-static inline struct channel_data *dev_to_chan(struct net_device *dev)
-{
- return (struct channel_data *)dev_to_hdlc(dev)->priv;
-}
-
-/* ---------- Initialization stuff ---------- */
-
-static int __init cosa_init(void)
-{
- int i, err = 0;
-
- if (cosa_major > 0) {
- if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
- pr_warn("unable to get major %d\n", cosa_major);
- err = -EIO;
- goto out;
- }
- } else {
- cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (!cosa_major) {
- pr_warn("unable to register chardev\n");
- err = -EIO;
- goto out;
- }
- }
- for (i = 0; i < MAX_CARDS; i++)
- cosa_cards[i].num = -1;
- for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
- cosa_probe(io[i], irq[i], dma[i]);
- if (!nr_cards) {
- pr_warn("no devices found\n");
- unregister_chrdev(cosa_major, "cosa");
- err = -ENODEV;
- goto out;
- }
- cosa_class = class_create(THIS_MODULE, "cosa");
- if (IS_ERR(cosa_class)) {
- err = PTR_ERR(cosa_class);
- goto out_chrdev;
- }
- for (i = 0; i < nr_cards; i++)
- device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL,
- "cosa%d", i);
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(cosa_major, "cosa");
-out:
- return err;
-}
-module_init(cosa_init);
-
-static void __exit cosa_exit(void)
-{
- struct cosa_data *cosa;
- int i;
-
- for (i = 0; i < nr_cards; i++)
- device_destroy(cosa_class, MKDEV(cosa_major, i));
- class_destroy(cosa_class);
-
- for (cosa = cosa_cards; nr_cards--; cosa++) {
- /* Clean up the per-channel data */
- for (i = 0; i < cosa->nchannels; i++) {
- /* Chardev driver has no alloc'd per-channel data */
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- /* Clean up the per-card data */
- kfree(cosa->chan);
- kfree(cosa->bouncebuf);
- free_irq(cosa->irq, cosa);
- free_dma(cosa->dma);
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- }
- unregister_chrdev(cosa_major, "cosa");
-}
-module_exit(cosa_exit);
-
-static const struct net_device_ops cosa_ops = {
- .ndo_open = cosa_net_open,
- .ndo_stop = cosa_net_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_tx_timeout = cosa_net_timeout,
-};
-
-static int cosa_probe(int base, int irq, int dma)
-{
- struct cosa_data *cosa = cosa_cards + nr_cards;
- int i, err = 0;
-
- memset(cosa, 0, sizeof(struct cosa_data));
-
- /* Checking validity of parameters: */
- /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
- if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
- pr_info("invalid IRQ %d\n", irq);
- return -1;
- }
- /* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8.
- */
- if (base < 0x100 || base > 0x3ff || base & 0x7) {
- pr_info("invalid I/O address 0x%x\n", base);
- return -1;
- }
- /* DMA should be 0,1 or 3-7 */
- if (dma < 0 || dma == 4 || dma > 7) {
- pr_info("invalid DMA %d\n", dma);
- return -1;
- }
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10
- */
- if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
- pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
- base, dma);
- return -1;
- }
-
- cosa->dma = dma;
- cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
- spin_lock_init(&cosa->lock);
-
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
- return -1;
-
- if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
- printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
- err = -1;
- goto err_out;
- }
-
- /* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3)) {
- cosa->type = "srp";
- } else if (!strncmp(cosa->id_string, "COSA", 4)) {
- cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
- } else {
-/* Print a warning only if we are not autoprobing */
-#ifndef COSA_ISA_AUTOPROBE
- pr_info("valid signature not found at 0x%x\n", base);
-#endif
- err = -1;
- goto err_out;
- }
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa) ? 2 : 4);
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
- printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
- return -1;
- }
-
- /* Now do IRQ autoprobe */
- if (irq < 0) {
- unsigned long irqs;
-/* pr_info("IRQ autoprobe\n"); */
- irqs = probe_irq_on();
- /* Enable interrupt on tx buffer empty (it sure is)
- * really sure ?
- * FIXME: When this code is not used as module, we should
- * probably call udelay() instead of the interruptible sleep.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(msecs_to_jiffies(300));
- irq = probe_irq_off(irqs);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
- /* Empty the received data register */
- cosa_getdata8(cosa);
-
- if (irq < 0) {
- pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
- irq, cosa->datareg);
- err = -1;
- goto err_out;
- }
- if (irq == 0) {
- pr_info("no interrupt obtained (board at 0x%x)\n",
- cosa->datareg);
- /* return -1; */
- }
- }
-
- cosa->irq = irq;
- cosa->num = nr_cards;
- cosa->usage = 0;
- cosa->nchannels = 2; /* FIXME: how to determine this? */
-
- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
- err = -1;
- goto err_out;
- }
- if (request_dma(cosa->dma, cosa->type)) {
- err = -1;
- goto err_out1;
- }
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
- if (!cosa->bouncebuf) {
- err = -ENOMEM;
- goto err_out2;
- }
- sprintf(cosa->name, "cosa%d", cosa->num);
-
- /* Initialize the per-channel data */
- cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
- if (!cosa->chan) {
- err = -ENOMEM;
- goto err_out3;
- }
-
- for (i = 0; i < cosa->nchannels; i++) {
- struct channel_data *chan = &cosa->chan[i];
-
- chan->cosa = cosa;
- chan->num = i;
- sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
-
- /* Initialize the chardev data structures */
- mutex_init(&chan->rlock);
- sema_init(&chan->wsem, 1);
-
- /* Register the network interface */
- chan->netdev = alloc_hdlcdev(chan);
- if (!chan->netdev) {
- pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
- err = -ENOMEM;
- goto err_hdlcdev;
- }
- dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
- dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
- chan->netdev->netdev_ops = &cosa_ops;
- chan->netdev->watchdog_timeo = TX_TIMEOUT;
- chan->netdev->base_addr = chan->cosa->datareg;
- chan->netdev->irq = chan->cosa->irq;
- chan->netdev->dma = chan->cosa->dma;
- err = register_hdlc_device(chan->netdev);
- if (err) {
- netdev_warn(chan->netdev,
- "register_hdlc_device() failed\n");
- free_netdev(chan->netdev);
- goto err_hdlcdev;
- }
- }
-
- pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
- cosa->num, cosa->id_string, cosa->type,
- cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
-
- return nr_cards++;
-
-err_hdlcdev:
- while (i-- > 0) {
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- kfree(cosa->chan);
-err_out3:
- kfree(cosa->bouncebuf);
-err_out2:
- free_dma(cosa->dma);
-err_out1:
- free_irq(cosa->irq, cosa);
-err_out:
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- pr_notice("cosa%d: allocating resources failed\n", cosa->num);
- return err;
-}
-
-/*---------- network device ---------- */
-
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static int cosa_net_open(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- int err;
- unsigned long flags;
-
- if (!(chan->cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- chan->cosa->name, chan->cosa->firmware_status);
- return -EPERM;
- }
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->usage != 0) {
- pr_warn("%s: cosa_net_open called with usage count %d\n",
- chan->name, chan->usage);
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return -EBUSY;
- }
- chan->setup_rx = cosa_net_setup_rx;
- chan->tx_done = cosa_net_tx_done;
- chan->rx_done = cosa_net_rx_done;
- chan->usage = -1;
- chan->cosa->usage++;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
-
- err = hdlc_open(dev);
- if (err) {
- spin_lock_irqsave(&chan->cosa->lock, flags);
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return err;
- }
-
- netif_start_queue(dev);
- cosa_enable_rx(chan);
- return 0;
-}
-
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- netif_stop_queue(dev);
-
- chan->tx_skb = skb;
- cosa_start_tx(chan, skb->data, skb->len);
- return NETDEV_TX_OK;
-}
-
-static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- if (test_bit(RXBIT, &chan->cosa->rxtx)) {
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_missed_errors++;
- } else {
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- }
- cosa_kick(chan->cosa);
- if (chan->tx_skb) {
- dev_kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- netif_wake_queue(dev);
-}
-
-static int cosa_net_close(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- unsigned long flags;
-
- netif_stop_queue(dev);
- hdlc_close(dev);
- cosa_disable_rx(chan);
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->rx_skb) {
- kfree_skb(chan->rx_skb);
- chan->rx_skb = NULL;
- }
- if (chan->tx_skb) {
- kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return 0;
-}
-
-static char *cosa_net_setup_rx(struct channel_data *chan, int size)
-{
- /* We can safely fall back to non-dma-able memory, because we have
- * the cosa->bouncebuf pre-allocated.
- */
- kfree_skb(chan->rx_skb);
- chan->rx_skb = dev_alloc_skb(size);
- if (!chan->rx_skb) {
- pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
- chan->netdev->stats.rx_dropped++;
- return NULL;
- }
- netif_trans_update(chan->netdev);
- return skb_put(chan->rx_skb, size);
-}
-
-static int cosa_net_rx_done(struct channel_data *chan)
-{
- if (!chan->rx_skb) {
- pr_warn("%s: rx_done with empty skb!\n", chan->name);
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_frame_errors++;
- return 0;
- }
- chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
- chan->rx_skb->dev = chan->netdev;
- skb_reset_mac_header(chan->rx_skb);
- chan->netdev->stats.rx_packets++;
- chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
- netif_rx(chan->rx_skb);
- chan->rx_skb = NULL;
- return 0;
-}
-
-/* ARGSUSED */
-static int cosa_net_tx_done(struct channel_data *chan, int size)
-{
- if (!chan->tx_skb) {
- pr_warn("%s: tx_done with empty skb!\n", chan->name);
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- return 1;
- }
- dev_consume_skb_irq(chan->tx_skb);
- chan->tx_skb = NULL;
- chan->netdev->stats.tx_packets++;
- chan->netdev->stats.tx_bytes += size;
- netif_wake_queue(chan->netdev);
- return 1;
-}
-
-/*---------- Character device ---------- */
-
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (mutex_lock_interruptible(&chan->rlock))
- return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
- if (!chan->rxdata) {
- mutex_unlock(&chan->rlock);
- return -ENOMEM;
- }
-
- chan->rx_status = 0;
- cosa_enable_rx(chan);
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->rxwaitq, &wait);
- while (!chan->rx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->rx_status == 0) {
- chan->rx_status = 1;
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- kbuf = chan->rxdata;
- count = chan->rxsize;
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
-
- if (copy_to_user(buf, kbuf, count)) {
- kfree(kbuf);
- return -EFAULT;
- }
- kfree(kbuf);
- return count;
-}
-
-static char *chrdev_setup_rx(struct channel_data *chan, int size)
-{
- /* Expect size <= COSA_MTU */
- chan->rxsize = size;
- return chan->rxdata;
-}
-
-static int chrdev_rx_done(struct channel_data *chan)
-{
- if (chan->rx_status) { /* Reader has died */
- kfree(chan->rxdata);
- up(&chan->wsem);
- }
- chan->rx_status = 1;
- wake_up_interruptible(&chan->rxwaitq);
- return 1;
-}
-
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (down_interruptible(&chan->wsem))
- return -ERESTARTSYS;
-
- if (count > COSA_MTU)
- count = COSA_MTU;
-
- /* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
- if (!kbuf) {
- up(&chan->wsem);
- return -ENOMEM;
- }
- if (copy_from_user(kbuf, buf, count)) {
- up(&chan->wsem);
- kfree(kbuf);
- return -EFAULT;
- }
- chan->tx_status = 0;
- cosa_start_tx(chan, kbuf, count);
-
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->txwaitq, &wait);
- while (!chan->tx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->tx_status == 0) {
- chan->tx_status = 1;
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- chan->tx_status = 1;
- spin_unlock_irqrestore(&cosa->lock, flags);
- up(&chan->wsem);
- kfree(kbuf);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- up(&chan->wsem);
- spin_unlock_irqrestore(&cosa->lock, flags);
- kfree(kbuf);
- return count;
-}
-
-static int chrdev_tx_done(struct channel_data *chan, int size)
-{
- if (chan->tx_status) { /* Writer was interrupted */
- kfree(chan->txbuf);
- up(&chan->wsem);
- }
- chan->tx_status = 1;
- wake_up_interruptible(&chan->txwaitq);
- return 1;
-}
-
-static __poll_t cosa_poll(struct file *file, poll_table *poll)
-{
- pr_info("cosa_poll is here\n");
- return 0;
-}
-
-static int cosa_open(struct inode *inode, struct file *file)
-{
- struct cosa_data *cosa;
- struct channel_data *chan;
- unsigned long flags;
- int n;
- int ret = 0;
-
- mutex_lock(&cosa_chardev_mutex);
- n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
- if (n >= nr_cards) {
- ret = -ENODEV;
- goto out;
- }
- cosa = cosa_cards + n;
-
- n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
- if (n >= cosa->nchannels) {
- ret = -ENODEV;
- goto out;
- }
- chan = cosa->chan + n;
-
- file->private_data = chan;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- if (chan->usage < 0) { /* in netdev mode */
- spin_unlock_irqrestore(&cosa->lock, flags);
- ret = -EBUSY;
- goto out;
- }
- cosa->usage++;
- chan->usage++;
-
- chan->tx_done = chrdev_tx_done;
- chan->setup_rx = chrdev_setup_rx;
- chan->rx_done = chrdev_rx_done;
- spin_unlock_irqrestore(&cosa->lock, flags);
-out:
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-static int cosa_release(struct inode *inode, struct file *file)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- unsigned long flags;
-
- cosa = channel->cosa;
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->usage--;
- channel->usage--;
- spin_unlock_irqrestore(&cosa->lock, flags);
- return 0;
-}
-
-#ifdef COSA_FASYNC_WORKING
-static struct fasync_struct *fasync[256] = { NULL, };
-
-/* To be done ... */
-static int cosa_fasync(struct inode *inode, struct file *file, int on)
-{
- int port = iminor(inode);
-
- return fasync_helper(inode, file, on, &fasync[port]);
-}
-#endif
-
-/* ---------- Ioctls ---------- */
-
-/* Ioctl subroutines can safely be made inline, because they are called
- * only from cosa_ioctl().
- */
-static inline int cosa_reset(struct cosa_data *cosa)
-{
- char idstring[COSA_MAX_ID_STRING];
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
- if (cosa_reset_and_read_id(cosa, idstring) < 0) {
- pr_notice("cosa%d: reset failed\n", cosa->num);
- return -EIO;
- }
- pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to download data into COSA memory. Calls download() */
-static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->name, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
- if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
-
- i = download(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: microcode download failed: %d\n",
- cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
- return 0;
-}
-
-/* High-level function to read COSA memory. Calls readmem() */
-static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~COSA_FW_RESET;
-
- i = readmem(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to start microcode. Calls startmicrocode(). */
-static inline int cosa_start(struct cosa_data *cosa, int address)
-{
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
-
- if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
- pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- cosa->firmware_status &= ~COSA_FW_RESET;
- i = startmicrocode(cosa, address);
- if (i < 0) {
- pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
- cosa->num, address, i);
- return -EIO;
- }
- pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
- cosa->startaddr = address;
- cosa->firmware_status |= COSA_FW_START;
- return 0;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->id_string) + 1;
-
- if (copy_to_user(string, cosa->id_string, l))
- return -EFAULT;
- return l;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->type) + 1;
-
- if (copy_to_user(string, cosa->type, l))
- return -EFAULT;
- return l;
-}
-
-static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case COSAIORSET: /* Reset the device */
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return cosa_reset(cosa);
- case COSAIOSTRT: /* Start the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_start(cosa, arg);
- case COSAIODOWNLD: /* Download the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
-
- return cosa_download(cosa, argp);
- case COSAIORMEM:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_readmem(cosa, argp);
- case COSAIORTYPE:
- return cosa_gettype(cosa, argp);
- case COSAIORIDSTR:
- return cosa_getidstr(cosa, argp);
- case COSAIONRCARDS:
- return nr_cards;
- case COSAIONRCHANS:
- return cosa->nchannels;
- case COSAIOBMSET:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- if (is_8bit(cosa))
- return -EINVAL;
- if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
- return -EINVAL;
- cosa->busmaster = arg;
- return 0;
- case COSAIOBMGET:
- return cosa->busmaster;
- }
- return -ENOIOCTLCMD;
-}
-
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- long ret;
-
- mutex_lock(&cosa_chardev_mutex);
- cosa = channel->cosa;
- ret = cosa_ioctl_common(cosa, channel, cmd, arg);
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-/*---------- HW layer interface ---------- */
-
-/* The higher layer can bind itself to the HW layer by setting the callbacks
- * in the channel_data structure and by using these routines.
- */
-static void cosa_enable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-static void cosa_disable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-/* FIXME: This routine probably should check for cosa_start_tx() called when
- * the previous transmit is still unfinished. In this case the non-zero
- * return value should indicate to the caller that the queuing(sp?) up
- * the transmit has failed.
- */
-static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
-{
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
-#ifdef DEBUG_DATA
- int i;
-
- pr_info("cosa%dc%d: starting tx(0x%x)",
- chan->cosa->num, chan->num, len);
- for (i = 0; i < len; i++)
- pr_cont(" %02x", buf[i]&0xff);
- pr_cont("\n");
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- chan->txbuf = buf;
- chan->txsize = len;
- if (len > COSA_MTU)
- chan->txsize = COSA_MTU;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- /* Tell the firmware we are ready */
- set_bit(chan->num, &cosa->txbitmap);
- put_driver_status(cosa);
-
- return 0;
-}
-
-static void put_driver_status(struct cosa_data *cosa)
-{
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
- if (!cosa->rxtx) {
- if (cosa->rxbitmap | cosa->txbitmap) {
- if (!cosa->enabled) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- }
- } else if (cosa->enabled) {
- cosa->enabled = 0;
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
- }
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static void put_driver_status_nolock(struct cosa_data *cosa)
-{
- int status;
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
-
- if (cosa->rxbitmap | cosa->txbitmap) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- } else {
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- cosa->enabled = 0;
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
-}
-
-/* The "kickme" function: When the DMA times out, this is called to
- * clean up the driver status.
- * FIXME: Preliminary support, the interface is probably wrong.
- */
-static void cosa_kick(struct cosa_data *cosa)
-{
- unsigned long flags, flags1;
- char *s = "(probably) IRQ";
-
- if (test_bit(RXBIT, &cosa->rxtx))
- s = "RX DMA";
- if (test_bit(TXBIT, &cosa->rxtx))
- s = "TX DMA";
-
- pr_info("%s: %s timeout - restarting\n", cosa->name, s);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->rxtx = 0;
-
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
-
- /* FIXME: Anything else? */
- udelay(100);
- cosa_putstatus(cosa, 0);
- udelay(100);
- (void)cosa_getdata8(cosa);
- udelay(100);
- cosa_putdata8(cosa, 0);
- udelay(100);
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-/* Check if the whole buffer is DMA-able. It means it is below the 16M of
- * physical memory and doesn't span the 64k boundary. For now it seems
- * SKB's never do this, but we'll check this anyway.
- */
-static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
-{
- static int count;
- unsigned long b = (unsigned long)buf;
-
- if (b + len >= MAX_DMA_ADDRESS)
- return 0;
- if ((b ^ (b + len)) & 0x10000) {
- if (count++ < 5)
- pr_info("%s: packet spanning a 64k boundary\n",
- chan->name);
- return 0;
- }
- return 1;
-}
-
-/* ---------- The SRP/COSA ROM monitor functions ---------- */
-
-/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
- * drivers need to say 4-digit hex number meaning start address of the microcode
- * separated by a single space. Monitor replies by saying " =". Now driver
- * has to write 4-digit hex number meaning the last byte address ended
- * by a single space. Monitor has to reply with a space. Now the download
- * begins. After the download monitor replies with "\r\n." (CR LF dot).
- */
-static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
-{
- int i;
-
- if (put_wait_data(cosa, 'w') == -1)
- return -1;
- if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -10;
- if (get_wait_data(cosa) != ' ')
- return -11;
- if (get_wait_data(cosa) != '=')
- return -12;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -13;
- if (put_wait_data(cosa, ' ') == -1)
- return -18;
- if (get_wait_data(cosa) != ' ')
- return -19;
-
- while (length--) {
- char c;
-#ifndef SRP_DOWNLOAD_AT_BOOT
- if (get_user(c, microcode))
- return -23; /* ??? */
-#else
- c = *microcode;
-#endif
- if (put_wait_data(cosa, c) == -1)
- return -20;
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Starting microcode is done via the "g" command of the SRP monitor.
- * The chat should be the following: "g" "g=" "<addr><CR>"
- * "<CR><CR><LF><CR><LF>".
- */
-static int startmicrocode(struct cosa_data *cosa, int address)
-{
- if (put_wait_data(cosa, 'g') == -1)
- return -1;
- if (get_wait_data(cosa) != 'g')
- return -2;
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, '\r') == -1)
- return -5;
-
- if (get_wait_data(cosa) != '\r')
- return -6;
- if (get_wait_data(cosa) != '\r')
- return -7;
- if (get_wait_data(cosa) != '\n')
- return -8;
- if (get_wait_data(cosa) != '\r')
- return -9;
- if (get_wait_data(cosa) != '\n')
- return -10;
-#if 0
- printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Reading memory is done via the "r" command of the SRP monitor.
- * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
- * Then driver can read the data and the conversation is finished
- * by SRP monitor sending "<CR><LF>." (dot at the end).
- *
- * This routine is not needed during the normal operation and serves
- * for debugging purposes only.
- */
-static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
-{
- if (put_wait_data(cosa, 'r') == -1)
- return -1;
- if ((get_wait_data(cosa)) != 'r')
- return -2;
- if ((get_wait_data(cosa)) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -5;
- if (get_wait_data(cosa) != ' ')
- return -6;
- if (get_wait_data(cosa) != '=')
- return -7;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -8;
- if (put_wait_data(cosa, ' ') == -1)
- return -9;
- if (get_wait_data(cosa) != ' ')
- return -10;
-
- while (length--) {
- char c;
- int i;
-
- i = get_wait_data(cosa);
- if (i == -1) {
- pr_info("0x%04x bytes remaining\n", length);
- return -11;
- }
- c = i;
-#if 1
- if (put_user(c, microcode))
- return -23; /* ??? */
-#else
- *microcode = c;
-#endif
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* This function resets the device and reads the initial prompt
- * of the device's ROM monitor.
- */
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
-{
- int i = 0, id = 0, prev = 0, curr = 0;
-
- /* Reset the card ... */
- cosa_putstatus(cosa, 0);
- cosa_getdata8(cosa);
- cosa_putstatus(cosa, SR_RST);
- msleep(500);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
-
- /* Try to read the ID string. The card then prints out the
- * identification string ended by the "\n\x2e".
- *
- * The following loop is indexed through i (instead of id)
- * to avoid looping forever when for any reason
- * the port returns '\r', '\n' or '\x2e' permanently.
- */
- for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
- curr = get_wait_data(cosa);
- if (curr == -1)
- return -1;
-
- curr &= 0xff;
- if (curr != '\r' && curr != '\n' && curr != 0x2e)
- idstring[id++] = curr;
- if (curr == 0x2e && prev == '\n')
- break;
- }
- /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
- idstring[id] = '\0';
- return id;
-}
-
-/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-
-/* This routine gets the data byte from the card waiting for the SR_RX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware.
- */
-static int get_wait_data(struct cosa_data *cosa)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_RX_RDY) {
- short r;
-
- r = cosa_getdata8(cosa);
-#if 0
- pr_info("get_wait_data returning after %d retries\n",
- 999 - retries);
-#endif
- return r;
- }
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
- }
- pr_info("timeout in get_wait_data (status 0x%x)\n",
- cosa_getstatus(cosa));
- return -1;
-}
-
-/* This routine puts the data byte to the card waiting for the SR_TX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware).
- */
-static int put_wait_data(struct cosa_data *cosa, int data)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_TX_RDY) {
- cosa_putdata8(cosa, data);
-#if 0
- pr_info("Putdata: %d retries\n", 999 - retries);
-#endif
- return 0;
- }
-#if 0
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
-#endif
- }
- pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
- cosa->num, cosa_getstatus(cosa));
- return -1;
-}
-
-/* The following routine puts the hexadecimal number into the SRP monitor
- * and verifies the proper echo of the sent bytes. Returns 0 on success,
- * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
- * (-2,-4,-6,-8) means that reading echo failed.
- */
-static int puthexnumber(struct cosa_data *cosa, int number)
-{
- char temp[5];
- int i;
-
- /* Well, I should probably replace this by something faster. */
- sprintf(temp, "%04X", number);
- for (i = 0; i < 4; i++) {
- if (put_wait_data(cosa, temp[i]) == -1) {
- pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
- cosa->num, i);
- return -1 - 2 * i;
- }
- if (get_wait_data(cosa) != temp[i]) {
- pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
- cosa->num, i);
- return -2 - 2 * i;
- }
- }
- return 0;
-}
-
-/* ---------- Interrupt routines ---------- */
-
-/* There are three types of interrupt:
- * At the beginning of transmit - this handled is in tx_interrupt(),
- * at the beginning of receive - it is in rx_interrupt() and
- * at the end of transmit/receive - it is the eot_interrupt() function.
- * These functions are multiplexed by cosa_interrupt() according to the
- * COSA status byte. I have moved the rx/tx/eot interrupt handling into
- * separate functions to make it more readable. These functions are inline,
- * so there should be no overhead of function call.
- *
- * In the COSA bus-master mode, we need to tell the card the address of a
- * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
- * It's time to use the bottom half :-(
- */
-
-/* Transmit interrupt routine - called when COSA is willing to obtain
- * data from the OS. The most tricky part of the routine is selection
- * of channel we (OS) want to send packet for. For SRP we should probably
- * use the round-robin approach. The newer COSA firmwares have a simple
- * flow-control - in the status word has bits 2 and 3 set to 1 means that the
- * channel 0 or 1 doesn't want to receive data.
- *
- * It seems there is a bug in COSA firmware (need to trace it further):
- * When the driver status says that the kernel has no more data for transmit
- * (e.g. at the end of TX DMA) and then the kernel changes its mind
- * (e.g. new packet is queued to hard_start_xmit()), the card issues
- * the TX interrupt but does not mark the channel as ready-to-transmit.
- * The fix seems to be to push the packet to COSA despite its request.
- * We first try to obey the card's opinion, and then fall back to forced TX.
- */
-static inline void tx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(TXBIT, &cosa->rxtx);
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- /* flow control, see the comment above */
- int i = 0;
-
- if (!cosa->txbitmap) {
- pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
- cosa->name);
- put_driver_status_nolock(cosa);
- clear_bit(TXBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- }
- while (1) {
- cosa->txchan++;
- i++;
- if (cosa->txchan >= cosa->nchannels)
- cosa->txchan = 0;
- if (!(cosa->txbitmap & (1 << cosa->txchan)))
- continue;
- if (~status &
- (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
- break;
- /* in second pass, accept first ready-to-TX channel */
- if (i > cosa->nchannels) {
- /* Can be safely ignored */
-#ifdef DEBUG_IRQS
- printk(KERN_DEBUG "%s: Forcing TX "
- "to not-ready channel %d\n",
- cosa->name, cosa->txchan);
-#endif
- break;
- }
- }
-
- cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan + cosa->txchan,
- cosa->chan[cosa->txchan].txbuf,
- cosa->txsize)) {
- cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
- } else {
- memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
- cosa->txbuf = cosa->bouncebuf;
- }
- }
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
- debug_data_in(cosa, cosa_getdata8(cosa));
-#else
- cosa_getdata8(cosa);
-#endif
- set_bit(IRQBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize & 0xff);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize & 0xff);
-#endif
- }
- } else {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
- | (cosa->txsize & 0x1fff));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
- (cosa->txsize & 0x1fff));
- debug_data_in(cosa, cosa_getdata8(cosa));
- debug_status_out(cosa, 0);
-#else
- cosa_getdata8(cosa);
-#endif
- cosa_putstatus(cosa, 0);
- }
-
- if (cosa->busmaster) {
- unsigned long addr = virt_to_bus(cosa->txbuf);
- int count = 0;
-
- pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- udelay(10);
- if (count > 1000)
- break;
- }
- pr_info("status %x\n", cosa_getstatus(cosa));
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16) & 0xffff);
-
- count = 0;
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- if (count > 1000)
- break;
- udelay(10);
- }
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr & 0xffff);
- flags1 = claim_dma_lock();
- set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- } else {
- /* start the DMA */
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_WRITE);
- set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
- set_dma_count(cosa->dma, cosa->txsize);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- }
- cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void rx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
-#endif
-
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(RXBIT, &cosa->rxtx);
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- set_bit(IRQBIT, &cosa->rxtx);
- put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) << 8;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize >> 8);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize & 0xff);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- } else {
- cosa->rxsize = cosa_getdata16(cosa);
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
- pr_warn("%s: rx for unknown channel (0x%04x)\n",
- cosa->name, cosa->rxsize);
- spin_unlock_irqrestore(&cosa->lock, flags);
- goto reject;
- }
- cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
- cosa->rxsize &= 0x1fff;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- cosa->rxbuf = NULL;
- if (cosa->rxchan->setup_rx)
- cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
-
- if (!cosa->rxbuf) {
-reject: /* Reject the packet */
- pr_info("cosa%d: rejecting packet on channel %d\n",
- cosa->num, cosa->rxchan->num);
- cosa->rxbuf = cosa->bouncebuf;
- }
-
- /* start the DMA */
- flags = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
- set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- else
- set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
-
- set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
- enable_dma(cosa->dma);
- release_dma_lock(flags);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- cosa_putdata8(cosa, DRIVER_RX_READY);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- debug_data_cmd(cosa, DRIVER_RX_READY);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void eot_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-
- spin_lock_irqsave(&cosa->lock, flags);
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
- if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan + cosa->txchan;
-
- if (chan->tx_done)
- if (chan->tx_done(chan, cosa->txsize))
- clear_bit(chan->num, &cosa->txbitmap);
- } else if (test_bit(RXBIT, &cosa->rxtx)) {
-#ifdef DEBUG_DATA
- {
- int i;
-
- pr_info("cosa%dc%d: done rx(0x%x)",
- cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i = 0; i < cosa->rxsize; i++)
- pr_cont(" %02x", cosa->rxbuf[i]&0xff);
- pr_cont("\n");
- }
-#endif
- /* Packet for unknown channel? */
- if (cosa->rxbuf == cosa->bouncebuf)
- goto out;
- if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
- memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
- if (cosa->rxchan->rx_done)
- if (cosa->rxchan->rx_done(cosa->rxchan))
- clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
- } else {
- pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
- }
- /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
- * cleared anyway). We should do it as soon as possible
- * so that we can tell the COSA we are done and to give it a time
- * for recovery.
- */
-out:
- cosa->rxtx = 0;
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static irqreturn_t cosa_interrupt(int irq, void *cosa_)
-{
- unsigned status;
- int count = 0;
- struct cosa_data *cosa = cosa_;
-again:
- status = cosa_getstatus(cosa);
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
-#endif
-#ifdef DEBUG_IO
- debug_status_in(cosa, status);
-#endif
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_DOWN_REQUEST:
- tx_interrupt(cosa, status);
- break;
- case SR_UP_REQUEST:
- rx_interrupt(cosa, status);
- break;
- case SR_END_OF_TRANSFER:
- eot_interrupt(cosa, status);
- break;
- default:
- /* We may be too fast for SRP. Try to wait a bit more. */
- if (count++ < 100) {
- udelay(100);
- goto again;
- }
- pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
- cosa->num, status & 0xff, count);
- }
-#ifdef DEBUG_IRQS
- if (count)
- pr_info("%s: %d-times got unknown status in IRQ\n",
- cosa->name, count);
- else
- pr_info("%s: returning from IRQ\n", cosa->name);
-#endif
- return IRQ_HANDLED;
-}
-
-/* ---------- I/O debugging routines ---------- */
-/* These routines can be used to monitor COSA/SRP I/O and to printk()
- * the data being transferred on the data and status I/O port in a
- * readable way.
- */
-
-#ifdef DEBUG_IO
-static void debug_status_in(struct cosa_data *cosa, int status)
-{
- char *s;
-
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_UP_REQUEST:
- s = "RX_REQ";
- break;
- case SR_DOWN_REQUEST:
- s = "TX_REQ";
- break;
- case SR_END_OF_TRANSFER:
- s = "ET_REQ";
- break;
- default:
- s = "NO_REQ";
- break;
- }
- pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_USR_RQ ? "USR_RQ|" : "",
- status & SR_TX_RDY ? "TX_RDY|" : "",
- status & SR_RX_RDY ? "RX_RDY|" : "",
- s);
-}
-
-static void debug_status_out(struct cosa_data *cosa, int status)
-{
- pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|",
- status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|",
- status & SR_RST ? "RESET|" : "",
- status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
- status & SR_TX_INT_ENA ? "TXINT|" : "!txint|",
- status & SR_RX_INT_ENA ? "RXINT" : "!rxint");
-}
-
-static void debug_data_in(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_out(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_cmd(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
- cosa->name, data,
- data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
- data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
-}
-#endif
-
-/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
deleted file mode 100644
index f57e0af9d56a..000000000000
--- a/drivers/net/wan/cosa.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
-
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- */
-
-#ifndef COSA_H__
-#define COSA_H__
-
-#include <linux/ioctl.h>
-
-#ifdef __KERNEL__
-/* status register - output bits */
-#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
-#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
-#define SR_RST 0x10 /* SRP reset */
-#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
-#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
-#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
-
-/* status register - input bits */
-#define SR_USR_RQ 0x20 /* user interrupt request pending */
-#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
-#define SR_RX_RDY 0x80 /* receiver data ready */
-
-#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
- up to PC */
-#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
- from PC to SRP */
-#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
- transfer (up or down) */
-
-#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
-
-/* bits in driver status byte definitions : */
-#define SR_RDY_RCV 0x01 /* ready to receive packet */
-#define SR_RDY_SND 0x02 /* ready to send packet */
-#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
-
-/* ???? */
-#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
-#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
-
-#endif /* __KERNEL__ */
-
-#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
-#define SR_START_ADDR 0x4400 /* SRP microcode start address */
-
-#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
-#define COSA_MAX_FIRMWARE_SIZE 0x10000
-
-/* ioctls */
-struct cosa_download {
- int addr, len;
- char __user *code;
-};
-
-/* Reset the device */
-#define COSAIORSET _IO('C',0xf0)
-
-/* Start microcode at given address */
-#define COSAIOSTRT _IOW('C',0xf1, int)
-
-/* Read the block from the device memory */
-#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Write the block to the device memory (i.e. download the microcode) */
-#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
-#define COSAIORTYPE _IOR('C',0xf3, char *)
-
-/* Read the device identification string */
-#define COSAIORIDSTR _IOR('C',0xf4, char *)
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Increment/decrement the module usage count :-) */
-/* #define COSAIOMINC _IO('C',0xf5) */
-/* #define COSAIOMDEC _IO('C',0xf6) */
-
-/* Get the total number of cards installed */
-#define COSAIONRCARDS _IO('C',0xf7)
-
-/* Get the number of channels on this card */
-#define COSAIONRCHANS _IO('C',0xf8)
-
-/* Set the driver for the bus-master operations */
-#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
-
-#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
-#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
-
-/* Gets the busmaster status */
-#define COSAIOBMGET _IO('C', 0xfa)
-
-#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index 5f43568a9715..63908dbbb02d 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -43,7 +43,7 @@
* This version number is incremented with each official release of the
* package and is a simplified number for normal user reference.
* Individual files are tracked by the version control system and may
- * have individual versions (or IDs) that move much faster than the
+ * have individual versions (or IDs) that move much faster than
* the release version as individual updates are tracked.
*/
#define FST_USER_VERSION "1.04"
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 5ae2d27b5da9..22edea6ca4b8 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1231,7 +1231,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
hdlc->attach = ucc_hdlc_attach;
hdlc->xmit = ucc_hdlc_tx;
- netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
+ netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
if (register_hdlc_device(dev)) {
ret = -ENOBUFS;
pr_err("ucc_hdlc: unable to register hdlc device\n");
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index b89b03a6aba7..534369ffe5de 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -173,7 +173,8 @@ static void sca_init_port(port_t *port)
sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
sca_set_carrier(port);
- netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(port->netdev, &port->napi, sca_poll,
+ NAPI_WEIGHT);
}
/* MSCI interrupt service */
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
deleted file mode 100644
index e985e54ba75d..000000000000
--- a/drivers/net/wan/hostess_sv11.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Comtrol SV11 card driver
- *
- * This is a slightly odd Z85230 synchronous driver. All you need to
- * know basically is
- *
- * Its a genuine Z85230
- *
- * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
- * use these facilities
- *
- * The control port is at io+1, the data at io+3 and turning off the DMA
- * is done by writing 0 to io+4
- *
- * The hardware does the bus handling to avoid the need for delays between
- * touching control registers.
- *
- * Port B isn't wired (why - beats me)
- *
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-static int dma;
-
-/* Network driver support routines
- */
-
-static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
-{
- return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- /* Send it to the PPP layer. We don't have time to process
- * it right now.
- */
- netif_rx(skb);
-}
-
-/* We've been placed in the UP state
- */
-
-static int hostess_open(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- int err = -1;
-
- /* Link layer up
- */
- switch (dma) {
- case 0:
- err = z8530_sync_open(d, &sv11->chanA);
- break;
- case 1:
- err = z8530_sync_dma_open(d, &sv11->chanA);
- break;
- case 2:
- err = z8530_sync_txdma_open(d, &sv11->chanA);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return err;
- }
- sv11->chanA.rx_function = hostess_input;
-
- /*
- * Go go go
- */
-
- netif_start_queue(d);
- return 0;
-}
-
-static int hostess_close(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- /* Discard new frames
- */
- sv11->chanA.rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind.
- */
-
-static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
-}
-
-static int hostess_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-/* Description block for a Comtrol Hostess SV11 card
- */
-
-static const struct net_device_ops hostess_ops = {
- .ndo_open = hostess_open,
- .ndo_stop = hostess_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static struct z8530_dev *sv11_init(int iobase, int irq)
-{
- struct z8530_dev *sv;
- struct net_device *netdev;
- /* Get the needed I/O space
- */
-
- if (!request_region(iobase, 8, "Comtrol SV11")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
- if (!sv)
- goto err_kzalloc;
-
- /* Stuff in the I/O addressing
- */
-
- sv->active = 0;
-
- sv->chanA.ctrlio = iobase + 1;
- sv->chanA.dataio = iobase + 3;
- sv->chanB.ctrlio = -1;
- sv->chanB.dataio = -1;
- sv->chanA.irqs = &z8530_nop;
- sv->chanB.irqs = &z8530_nop;
-
- outb(0, iobase + 4); /* DMA off */
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "Hostess SV11", sv) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_irq;
- }
-
- sv->irq = irq;
- sv->chanA.private = sv;
- sv->chanA.dev = sv;
- sv->chanB.dev = sv;
-
- if (dma) {
- /* You can have DMA off or 1 and 3 thats the lot
- * on the Comtrol.
- */
- sv->chanA.txdma = 3;
- sv->chanA.rxdma = 1;
- outb(0x03 | 0x08, iobase + 4); /* DMA on */
- if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
- goto err_txdma;
-
- if (dma == 1)
- if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
- goto err_rxdma;
- }
-
- /* Kill our private IRQ line the hostess can end up chattering
- * until the configuration is set
- */
- disable_irq(irq);
-
- /* Begin normal initialise
- */
-
- if (z8530_init(sv)) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_dma;
- }
- z8530_channel_load(&sv->chanB, z8530_dead_port);
- if (sv->type == Z85C30)
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
- else
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
-
- enable_irq(irq);
-
- /* Now we can take the IRQ
- */
-
- sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
- if (!netdev)
- goto free_dma;
-
- dev_to_hdlc(netdev)->attach = hostess_attach;
- dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
- netdev->netdev_ops = &hostess_ops;
- netdev->base_addr = iobase;
- netdev->irq = irq;
-
- if (register_hdlc_device(netdev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(netdev);
- goto free_dma;
- }
-
- z8530_describe(sv, "I/O", iobase);
- sv->active = 1;
- return sv;
-
-free_dma:
- if (dma == 1)
- free_dma(sv->chanA.rxdma);
-err_rxdma:
- if (dma)
- free_dma(sv->chanA.txdma);
-err_txdma:
- free_irq(irq, sv);
-err_irq:
- kfree(sv);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void sv11_shutdown(struct z8530_dev *dev)
-{
- unregister_hdlc_device(dev->chanA.netdevice);
- z8530_shutdown(dev);
- free_irq(dev->irq, dev);
- if (dma) {
- if (dma == 1)
- free_dma(dev->chanA.rxdma);
- free_dma(dev->chanA.txdma);
- }
- release_region(dev->chanA.ctrlio - 1, 8);
- free_netdev(dev->chanA.netdevice);
- kfree(dev);
-}
-
-static int io = 0x200;
-static int irq = 9;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
-
-static struct z8530_dev *sv11_unit;
-
-static int sv11_module_init(void)
-{
- sv11_unit = sv11_init(io, irq);
- if (!sv11_unit)
- return -ENODEV;
- return 0;
-}
-module_init(sv11_module_init);
-
-static void sv11_module_cleanup(void)
-{
- if (sv11_unit)
- sv11_shutdown(sv11_unit);
-}
-module_exit(sv11_module_cleanup);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0b7d9f2f2b8b..e46b7f5ee49e 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -16,8 +16,10 @@
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
@@ -1389,9 +1391,28 @@ static int ixp4xx_hss_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct device_node *np;
+ struct regmap *rmap;
struct port *port;
hdlc_device *hdlc;
int err;
+ u32 val;
+
+ /*
+ * Go into the syscon and check if we have the HSS and HDLC
+ * features available, else this will not work.
+ */
+ rmap = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(rmap))
+ return dev_err_probe(dev, PTR_ERR(rmap),
+ "failed to look up syscon\n");
+
+ val = cpu_ixp4xx_features(rmap);
+
+ if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
+ (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) {
+ dev_err(dev, "HDLC and HSS feature unavailable in platform\n");
+ return -ENODEV;
+ }
np = dev->of_node;
@@ -1483,7 +1504,7 @@ static int ixp4xx_hss_probe(struct platform_device *pdev)
port->clock_reg = CLK42X_SPEED_2048KHZ;
port->id = pdev->id;
port->dev = &pdev->dev;
- netif_napi_add(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
err = register_hdlc_device(ndev);
if (err)
@@ -1516,25 +1537,9 @@ static struct platform_driver ixp4xx_hss_driver = {
.probe = ixp4xx_hss_probe,
.remove = ixp4xx_hss_remove,
};
-
-static int __init hss_init_module(void)
-{
- if ((ixp4xx_read_feature_bits() &
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
- (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
- return -ENODEV;
-
- return platform_driver_register(&ixp4xx_hss_driver);
-}
-
-static void __exit hss_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_hss_driver);
-}
+module_platform_driver(ixp4xx_hss_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_hss");
-module_init(hss_init_module);
-module_exit(hss_cleanup_module);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 282192b82404..d62a904d2e42 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -325,6 +325,7 @@ static int lapbeth_open(struct net_device *dev)
err = lapb_register(dev, &lapbeth_callbacks);
if (err != LAPB_OK) {
+ napi_disable(&lapbeth->napi);
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
@@ -408,7 +409,7 @@ static int lapbeth_new_device(struct net_device *dev)
spin_lock_init(&lapbeth->up_lock);
skb_queue_head_init(&lapbeth->rx_queue);
- netif_napi_add(ndev, &lapbeth->napi, lapbeth_napi_poll, 16);
+ netif_napi_add_weight(ndev, &lapbeth->napi, lapbeth_napi_poll, 16);
rc = -EIO;
if (register_netdevice(ndev))
@@ -446,7 +447,7 @@ static int lapbeth_device_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
deleted file mode 100644
index f00fe4491d69..000000000000
--- a/drivers/net/wan/lmc/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Lan Media 21140 based WAN cards
-# Specifically the 1000,1200,5200,5245
-#
-
-obj-$(CONFIG_LANMEDIA) += lmc.o
-
-lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
-
-# Like above except every packet gets echoed to KERN_DEBUG
-# in hex
-#
-# DBDEF = \
-# -DDEBUG \
-# -DLMC_PACKET_LOG
-
-ccflags-y := $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
deleted file mode 100644
index d7d59b4595f9..000000000000
--- a/drivers/net/wan/lmc/lmc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_H_
-#define _LMC_H_
-
-#include "lmc_var.h"
-
-/*
- * prototypes for everyone
- */
-int lmc_probe(struct net_device * dev);
-unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
- devaddr, unsigned regno);
-void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
- unsigned regno, unsigned data);
-void lmc_led_on(lmc_softc_t * const, u32);
-void lmc_led_off(lmc_softc_t * const, u32);
-unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
-void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-
-int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
-
-extern lmc_media_t lmc_ds3_media;
-extern lmc_media_t lmc_ssi_media;
-extern lmc_media_t lmc_t1_media;
-extern lmc_media_t lmc_hssi_media;
-
-#ifdef _DBG_EVENTLOG
-static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-#endif
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
deleted file mode 100644
index 2b6051bda3fb..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-
-#include "lmc_debug.h"
-
-/*
- * Prints out len, max to 80 octets using printk, 20 per line
- */
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
- int iNewLine = 1;
- char str[80], *pstr;
-
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr = str+strlen(str);
-
- if(iLen > 240){
- printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
- iLen = 240;
- }
- else{
- printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
- }
-
- while(iLen > 0)
- {
- sprintf(pstr, "%02x ", *ucData);
- pstr+=3;
- ucData++;
- if( !(iNewLine % 20))
- {
- sprintf(pstr, "\n");
- printk(str);
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr=str+strlen(str);
- }
- iNewLine++;
- iLen--;
- }
- sprintf(pstr, "\n");
- printk(str);
-}
-#endif
-#endif
-
-#ifdef DEBUG
-u32 lmcEventLogIndex;
-u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
-{
- lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
- lmcEventLogBuf[lmcEventLogIndex++] = arg2;
- lmcEventLogBuf[lmcEventLogIndex++] = arg3;
- lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
-
- lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-}
-#endif /* DEBUG */
-
-/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
deleted file mode 100644
index cfae9eddf003..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_DEBUG_H_
-#define _LMC_DEBUG_H_
-
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-
-
-
-/* Debug --- Event log definitions --- */
-/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
-#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
-#define LMC_EVENTLOGARGS 4 /* number of args for each event */
-
-/* event indicators */
-#define LMC_EVENT_XMT 1
-#define LMC_EVENT_XMTEND 2
-#define LMC_EVENT_XMTINT 3
-#define LMC_EVENT_RCVINT 4
-#define LMC_EVENT_RCVEND 5
-#define LMC_EVENT_INT 6
-#define LMC_EVENT_XMTINTTMO 7
-#define LMC_EVENT_XMTPRCTMO 8
-#define LMC_EVENT_INTEND 9
-#define LMC_EVENT_RESET1 10
-#define LMC_EVENT_RESET2 11
-#define LMC_EVENT_FORCEDRESET 12
-#define LMC_EVENT_WATCHDOG 13
-#define LMC_EVENT_BADPKTSURGE 14
-#define LMC_EVENT_TBUSY0 15
-#define LMC_EVENT_TBUSY1 16
-
-
-#ifdef DEBUG
-extern u32 lmcEventLogIndex;
-extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
-#else
-#define LMC_EVENT_LOG(x,y,z)
-#endif /* end ifdef _DBG_EVENTLOG */
-
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
deleted file mode 100644
index 8c65e2176e94..000000000000
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_IOCTL_H_
-#define _LMC_IOCTL_H_
-/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */
-#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */
-#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5
-#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6
-#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7
-#define LMCIOCGETXINFO SIOCDEVPRIVATE+8
-#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9
-#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10
-#define LMCIOCRESET SIOCDEVPRIVATE+11
-#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12
-#define LMCIOCIFTYPE SIOCDEVPRIVATE+13
-#define LMCIOCXILINX SIOCDEVPRIVATE+14
-
-#define LMC_CARDTYPE_UNKNOWN -1
-#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */
-#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */
-#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */
-#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */
-
-#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */
-#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */
-#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */
-#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */
-
-#define LMC_CTL_OFF 0 /* generic OFF value */
-#define LMC_CTL_ON 1 /* generic ON value */
-
-#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */
-#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */
-
-#define LMC_CTL_CRC_LENGTH_16 16
-#define LMC_CTL_CRC_LENGTH_32 32
-#define LMC_CTL_CRC_BYTESIZE_2 2
-#define LMC_CTL_CRC_BYTESIZE_4 4
-
-
-#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */
-#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */
-
-#define LMC_CTL_CIRCUIT_TYPE_E1 0
-#define LMC_CTL_CIRCUIT_TYPE_T1 1
-
-/*
- * IFTYPE defines
- */
-#define LMC_PPP 1 /* use generic HDLC interface */
-#define LMC_NET 2 /* use direct net interface */
-#define LMC_RAW 3 /* use direct net interface */
-
-/*
- * These are not in the least IOCTL related, but I want them common.
- */
-/*
- * assignments for the GPIO register on the DEC chip (common)
- */
-#define LMC_GEP_INIT 0x01 /* 0: */
-#define LMC_GEP_RESET 0x02 /* 1: */
-#define LMC_GEP_MODE 0x10 /* 4: */
-#define LMC_GEP_DP 0x20 /* 5: */
-#define LMC_GEP_DATA 0x40 /* 6: serial out */
-#define LMC_GEP_CLK 0x80 /* 7: serial clock */
-
-/*
- * HSSI GPIO assignments
- */
-#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */
-#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */
-
-/*
- * T1 GPIO assignments
- */
-#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */
-#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */
-
-/*
- * Common MII16 bits
- */
-#define LMC_MII16_LED0 0x0080
-#define LMC_MII16_LED1 0x0100
-#define LMC_MII16_LED2 0x0200
-#define LMC_MII16_LED3 0x0400 /* Error, and the red one */
-#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */
-#define LMC_MII16_FIFO_RESET 0x0800
-
-/*
- * definitions for HSSI
- */
-#define LMC_MII16_HSSI_TA 0x0001
-#define LMC_MII16_HSSI_CA 0x0002
-#define LMC_MII16_HSSI_LA 0x0004
-#define LMC_MII16_HSSI_LB 0x0008
-#define LMC_MII16_HSSI_LC 0x0010
-#define LMC_MII16_HSSI_TM 0x0020
-#define LMC_MII16_HSSI_CRC 0x0040
-
-/*
- * assignments for the MII register 16 (DS3)
- */
-#define LMC_MII16_DS3_ZERO 0x0001
-#define LMC_MII16_DS3_TRLBK 0x0002
-#define LMC_MII16_DS3_LNLBK 0x0004
-#define LMC_MII16_DS3_RAIS 0x0008
-#define LMC_MII16_DS3_TAIS 0x0010
-#define LMC_MII16_DS3_BIST 0x0020
-#define LMC_MII16_DS3_DLOS 0x0040
-#define LMC_MII16_DS3_CRC 0x1000
-#define LMC_MII16_DS3_SCRAM 0x2000
-#define LMC_MII16_DS3_SCRAM_LARS 0x4000
-
-/* Note: 2 pairs of LEDs where swapped by mistake
- * in Xilinx code for DS3 & DS1 adapters */
-#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */
-#define LMC_DS3_LED1 0x0080 /* bit 07 blue */
-#define LMC_DS3_LED2 0x0400 /* bit 10 green */
-#define LMC_DS3_LED3 0x0200 /* bit 09 red */
-
-/*
- * framer register 0 and 7 (7 is latched and reset on read)
- */
-#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */
-#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */
-#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */
-#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */
-#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */
-
-/*
- * Framer register 9 contains the blue alarm signal
- */
-#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */
-
-/*
- * Framer register 0x10 contains xbit error
- */
-#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */
-
-/*
- * And SSI, LMC1000
- */
-#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */
-#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */
-#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */
-#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */
-#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */
-#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */
-#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */
-
-/*
- * bits 0x0080 through 0x0800 are generic, and described
- * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
- */
-#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */
-#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */
-#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */
-#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */
-
-/*
- * Some of the MII16 bits are mirrored in the MII17 register as well,
- * but let's keep thing separate for now, and get only the cable from
- * the MII17.
- */
-#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */
-#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */
-
-/*
- * And T1, LMC1200
- */
-#define LMC_MII16_T1_UNUSED1 0x0003
-#define LMC_MII16_T1_XOE 0x0004
-#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */
-#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */
-#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */
-#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */
-
-#define LMC_MII16_T1_LED0 0x0100
-#define LMC_MII16_T1_LED1 0x0080
-#define LMC_MII16_T1_LED2 0x0400
-#define LMC_MII16_T1_LED3 0x0200
-#define LMC_MII16_T1_FIFO_RESET 0x0800
-
-#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */
-#define LMC_MII16_T1_UNUSED2 0xe000
-
-
-/* 8370 framer registers */
-
-#define T1FRAMER_ALARM1_STATUS 0x47
-#define T1FRAMER_ALARM2_STATUS 0x48
-#define T1FRAMER_FERR_LSB 0x50
-#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */
-#define T1FRAMER_LCV_LSB 0x54
-#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */
-#define T1FRAMER_AERR 0x5A
-
-/* mask for the above AERR register */
-#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */
-#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */
-#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */
-
-/* 8370 framer register ALM1 (0x47) values
- * used to determine link status
- */
-
-#define T1F_SIGFRZ 0x01 /* signaling freeze */
-#define T1F_RLOF 0x02 /* receive loss of frame alignment */
-#define T1F_RLOS 0x04 /* receive loss of signal */
-#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */
-#define T1F_RAIS 0x10 /* receive alarm indication signal */
-#define T1F_UNUSED 0x20
-#define T1F_RYEL 0x40 /* receive yellow alarm */
-#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */
-
-#define LMC_T1F_WRITE 0
-#define LMC_T1F_READ 1
-
-typedef struct lmc_st1f_control {
- int command;
- int address;
- int value;
- char __user *data;
-} lmc_t1f_control;
-
-enum lmc_xilinx_c {
- lmc_xilinx_reset = 1,
- lmc_xilinx_load_prom = 2,
- lmc_xilinx_load = 3
-};
-
-struct lmc_xilinx_control {
- enum lmc_xilinx_c command;
- int len;
- char __user *data;
-};
-
-/* ------------------ end T1 defs ------------------- */
-
-#define LMC_MII_LedMask 0x0780
-#define LMC_MII_LedBitPos 7
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
deleted file mode 100644
index 6a142dc85c37..000000000000
--- a/drivers/net/wan/lmc/lmc_main.c
+++ /dev/null
@@ -1,2008 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Alan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- *
- * To control link specific options lmcctl is required.
- * It can be obtained from ftp.lanmedia.com.
- *
- * Linux driver notes:
- * Linux uses the device struct lmc_private to pass private information
- * around.
- *
- * The initialization portion of this driver (the lmc_reset() and the
- * lmc_dec_reset() functions, as well as the led controls and the
- * lmc_initcsrs() functions.
- *
- * The watchdog function runs every second and checks to see if
- * we still have link, and that the timing source is what we expected
- * it to be. If link is lost, the interface is marked down, and
- * we no longer can transmit.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-//#include <asm/spinlock.h>
-
-#define DRIVER_MAJOR_VERSION 1
-#define DRIVER_MINOR_VERSION 34
-#define DRIVER_SUB_VERSION 0
-
-#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-#include "lmc_proto.h"
-
-static int LMC_PKT_BUF_SZ = 1542;
-
-static const struct pci_device_id lmc_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_VENDOR_ID_LMC, PCI_ANY_ID },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_VENDOR_ID_LMC },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
-MODULE_LICENSE("GPL v2");
-
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static int lmc_rx (struct net_device *dev);
-static int lmc_open(struct net_device *dev);
-static int lmc_close(struct net_device *dev);
-static struct net_device_stats *lmc_get_stats(struct net_device *dev);
-static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
-static void lmc_softreset(lmc_softc_t * const);
-static void lmc_running_reset(struct net_device *dev);
-static int lmc_ifdown(struct net_device * const);
-static void lmc_watchdog(struct timer_list *t);
-static void lmc_reset(lmc_softc_t * const sc);
-static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
-
-/*
- * linux reserves 16 device specific IOCTLs. We call them
- * LMCIOC* to control various bits of our world.
- */
-static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- lmc_ctl_t ctl;
- int ret = -EOPNOTSUPP;
- u16 regVal;
- unsigned long flags;
-
- /*
- * Most functions mess with the structure
- * Disable interrupts while we do the polling
- */
-
- switch (cmd) {
- /*
- * Return current driver state. Since we keep this up
- * To date internally, just copy this out to the user.
- */
- case LMCIOCGINFO: /*fold01*/
- if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCSINFO: /*fold01*/
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_status (sc, &ctl);
-
- if(ctl.crc_length != sc->ictl.crc_length) {
- sc->lmc_media->set_crc_length(sc, ctl.crc_length);
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- else
- sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
- case LMCIOCIFTYPE: /*fold01*/
- {
- u16 old_type = sc->if_type;
- u16 new_type;
-
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if (copy_from_user(&new_type, data, sizeof(u16))) {
- ret = -EFAULT;
- break;
- }
-
-
- if (new_type == old_type)
- {
- ret = 0 ;
- break; /* no change */
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_proto_close(sc);
-
- sc->if_type = new_type;
- lmc_proto_attach(sc);
- ret = lmc_proto_open(sc);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- break;
- }
-
- case LMCIOCGETXINFO: /*fold01*/
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
-
- sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
- sc->lmc_xinfo.PciSlotNumber = 0;
- sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
- sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
- sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
- sc->lmc_xinfo.XilinxRevisionNumber =
- lmc_mii_readreg (sc, 0, 3) & 0xf;
- sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
- sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
- sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
-
- if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-
- case LMCIOCGETLMCSTATS:
- spin_lock_irqsave(&sc->lmc_lock, flags);
- if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
- sc->extra_stats.framingBitErrorCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
- sc->extra_stats.framingBitErrorCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
- sc->extra_stats.lineCodeViolationCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
- sc->extra_stats.lineCodeViolationCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
- regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
-
- sc->extra_stats.lossOfFrameCount +=
- (regVal & T1FRAMER_LOF_MASK) >> 4;
- sc->extra_stats.changeOfFrameAlignmentCount +=
- (regVal & T1FRAMER_COFA_MASK) >> 2;
- sc->extra_stats.severelyErroredFrameCount +=
- regVal & T1FRAMER_SEF_MASK;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- if (copy_to_user(data, &sc->lmc_device->stats,
- sizeof(sc->lmc_device->stats)) ||
- copy_to_user(data + sizeof(sc->lmc_device->stats),
- &sc->extra_stats, sizeof(sc->extra_stats)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCCLEARLMCSTATS:
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
- memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
- break;
-
- case LMCIOCSETCIRCUIT: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
- sc->ictl.circuit_type = ctl.circuit_type;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
-
- break;
-
- case LMCIOCRESET: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- /* Reset driver and bring back to current state */
- printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
- lmc_running_reset (dev);
- printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
-
- LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
-#ifdef DEBUG
- case LMCIOCDUMPEVENTLOG:
- if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
- ret = -EFAULT;
- break;
- }
- if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
- sizeof(lmcEventLogBuf)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-#endif /* end ifdef _DBG_EVENTLOG */
- case LMCIOCT1CONTROL: /*fold01*/
- if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
- ret = -EOPNOTSUPP;
- break;
- }
- break;
- case LMCIOCXILINX: /*fold01*/
- {
- struct lmc_xilinx_control xc; /*fold02*/
-
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- /*
- * Stop the xwitter whlie we restart the hardware
- */
- netif_stop_queue(dev);
-
- if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
- ret = -EFAULT;
- break;
- }
- switch(xc.command){
- case lmc_xilinx_reset: /*fold02*/
- {
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /* Reset the frammer hardware */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-// lmc_softreset(sc);
-
- {
- int i;
- for(i = 0; i < 5; i++){
- lmc_led_on(sc, LMC_DS3_LED0);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED0);
- lmc_led_on(sc, LMC_DS3_LED1);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED1);
- lmc_led_on(sc, LMC_DS3_LED3);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED3);
- lmc_led_on(sc, LMC_DS3_LED2);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED2);
- }
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-
-
- ret = 0x0;
-
- }
-
- break;
- case lmc_xilinx_load_prom: /*fold02*/
- {
- int timeout = 500000;
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0x0;
-
-
- break;
-
- }
-
- case lmc_xilinx_load: /*fold02*/
- {
- char *data;
- int pos;
- int timeout = 500000;
-
- if (!xc.data) {
- ret = -EINVAL;
- break;
- }
-
- data = memdup_user(xc.data, xc.len);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
-
- printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * Clear the Xilinx and start prgramming from the DEC
- */
-
- /*
- * Set ouput as:
- * Reset: 0 (active)
- * DP: 0 (active)
- * Mode: 1
- *
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio &= ~LMC_GEP_DP;
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Wait at least 10 us 20 to be safe
- */
- udelay(50);
-
- /*
- * Clear reset and activate programming lines
- * Reset: Input
- * DP: Input
- * Clock: Output
- * Data: Output
- * Mode: Output
- */
- lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Set LOAD, DATA, Clock to 1
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio |= LMC_GEP_MODE;
- sc->lmc_gpio |= LMC_GEP_DATA;
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
- printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout);
-
- for(pos = 0; pos < xc.len; pos++){
- switch(data[pos]){
- case 0:
- sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
- break;
- case 1:
- sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
- break;
- default:
- printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
- sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
- }
- sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
-
- sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
- }
- if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
- }
- else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
- }
- else {
- printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
- }
-
- lmc_gpio_mkinput(sc, 0xff);
-
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- kfree(data);
-
- ret = 0;
-
- break;
- }
- default: /*fold02*/
- ret = -EBADE;
- break;
- }
-
- netif_wake_queue(dev);
- sc->lmc_txfull = 0;
-
- }
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-
-/* the watchdog process that cruises around */
-static void lmc_watchdog(struct timer_list *t) /*fold00*/
-{
- lmc_softc_t *sc = from_timer(sc, t, timer);
- struct net_device *dev = sc->lmc_device;
- int link_status;
- u32 ticks;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- if(sc->check != 0xBEAFCAFE){
- printk("LMC: Corrupt net_device struct, breaking out\n");
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- return;
- }
-
-
- /* Make sure the tx jabber and rx watchdog are off,
- * and the transmit and receive processes are running.
- */
-
- LMC_CSR_WRITE (sc, csr_15, 0x00000011);
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- if (sc->lmc_ok == 0)
- goto kick_timer;
-
- LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
-
- /* --- begin time out check -----------------------------------
- * check for a transmit interrupt timeout
- * Has the packet xmt vs xmt serviced threshold been exceeded */
- if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd == 0)
- {
-
- /* wait for the watchdog to come around again */
- sc->tx_TimeoutInd = 1;
- }
- else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd)
- {
-
- LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
-
- sc->tx_TimeoutDisplay = 1;
- sc->extra_stats.tx_TimeoutCnt++;
-
- /* DEC chip is stuck, hit it with a RESET!!!! */
- lmc_running_reset (dev);
-
-
- /* look at receive & transmit process state to make sure they are running */
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-
- /* look at: DSR - 02 for Reg 16
- * CTS - 08
- * DCD - 10
- * RI - 20
- * for Reg 17
- */
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
-
- /* reset the transmit timeout detection flag */
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- } else {
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- }
-
- /* --- end time out check ----------------------------------- */
-
-
- link_status = sc->lmc_media->get_link_status (sc);
-
- /*
- * hardware level link lost, but the interface is marked as up.
- * Mark it as down.
- */
- if ((link_status == 0) && (sc->last_link_status != 0)) {
- printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
- sc->last_link_status = 0;
- /* lmc_reset (sc); Why reset??? The link can go down ok */
-
- /* Inform the world that link has been lost */
- netif_carrier_off(dev);
- }
-
- /*
- * hardware link is up, but the interface is marked as down.
- * Bring it back up again.
- */
- if (link_status != 0 && sc->last_link_status == 0) {
- printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
- sc->last_link_status = 1;
- /* lmc_reset (sc); Again why reset??? */
-
- netif_carrier_on(dev);
- }
-
- /* Call media specific watchdog functions */
- sc->lmc_media->watchdog(sc);
-
- /*
- * Poke the transmitter to make sure it
- * never stops, even if we run out of mem
- */
- LMC_CSR_WRITE(sc, csr_rxpoll, 0);
-
- /*
- * Check for code that failed
- * and try and fix it as appropriate
- */
- if(sc->failed_ring == 1){
- /*
- * Failed to setup the recv/xmit rin
- * Try again
- */
- sc->failed_ring = 0;
- lmc_softreset(sc);
- }
- if(sc->failed_recv_alloc == 1){
- /*
- * We failed to alloc mem in the
- * interrupt handler, go through the rings
- * and rebuild them
- */
- sc->failed_recv_alloc = 0;
- lmc_softreset(sc);
- }
-
-
- /*
- * remember the timer value
- */
-kick_timer:
-
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
- sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- /*
- * restart this timer.
- */
- sc->timer.expires = jiffies + (HZ);
- add_timer (&sc->timer);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
-
-static int lmc_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops lmc_ops = {
- .ndo_open = lmc_open,
- .ndo_stop = lmc_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_siocdevprivate = lmc_siocdevprivate,
- .ndo_tx_timeout = lmc_driver_timeout,
- .ndo_get_stats = lmc_get_stats,
-};
-
-static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- lmc_softc_t *sc;
- struct net_device *dev;
- u16 subdevice;
- u16 AdapModelNum;
- int err;
- static int cards_found;
-
- err = pcim_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
- return err;
- }
-
- err = pci_request_regions(pdev, "lmc");
- if (err) {
- printk(KERN_ERR "lmc: pci_request_region failed\n");
- return err;
- }
-
- /*
- * Allocate our own device structure
- */
- sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
- if (!sc)
- return -ENOMEM;
-
- dev = alloc_hdlcdev(sc);
- if (!dev) {
- printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
- return -ENOMEM;
- }
-
-
- dev->type = ARPHRD_HDLC;
- dev_to_hdlc(dev)->xmit = lmc_start_xmit;
- dev_to_hdlc(dev)->attach = lmc_attach;
- dev->netdev_ops = &lmc_ops;
- dev->watchdog_timeo = HZ; /* 1 second */
- dev->tx_queue_len = 100;
- sc->lmc_device = dev;
- sc->name = dev->name;
- sc->if_type = LMC_PPP;
- sc->check = 0xBEAFCAFE;
- dev->base_addr = pci_resource_start(pdev, 0);
- dev->irq = pdev->irq;
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /*
- * This will get the protocol layer ready and do any 1 time init's
- * Must have a valid sc and dev structure
- */
- lmc_proto_attach(sc);
-
- /* Init the spin lock so can call it latter */
-
- spin_lock_init(&sc->lmc_lock);
- pci_set_master(pdev);
-
- printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
- dev->base_addr, dev->irq);
-
- err = register_hdlc_device(dev);
- if (err) {
- printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
- free_netdev(dev);
- return err;
- }
-
- sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-
- /*
- *
- * Check either the subvendor or the subdevice, some systems reverse
- * the setting in the bois, seems to be version and arch dependent?
- * Fix the error, exchange the two values
- */
- if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
- subdevice = pdev->subsystem_vendor;
-
- switch (subdevice) {
- case PCI_DEVICE_ID_LMC_HSSI:
- printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
- sc->lmc_media = &lmc_hssi_media;
- break;
- case PCI_DEVICE_ID_LMC_DS3:
- printk(KERN_INFO "%s: LMC DS3\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_DS3;
- sc->lmc_media = &lmc_ds3_media;
- break;
- case PCI_DEVICE_ID_LMC_SSI:
- printk(KERN_INFO "%s: LMC SSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_SSI;
- sc->lmc_media = &lmc_ssi_media;
- break;
- case PCI_DEVICE_ID_LMC_T1:
- printk(KERN_INFO "%s: LMC T1\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_T1;
- sc->lmc_media = &lmc_t1_media;
- break;
- default:
- printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
- unregister_hdlc_device(dev);
- return -EIO;
- break;
- }
-
- lmc_initcsrs (sc, dev->base_addr, 8);
-
- lmc_gpio_mkinput (sc, 0xff);
- sc->lmc_gpio = 0; /* drive no signals yet */
-
- sc->lmc_media->defaults (sc);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /* verify that the PCI Sub System ID matches the Adapter Model number
- * from the MII register
- */
- AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
-
- if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
- subdevice != PCI_DEVICE_ID_LMC_T1) &&
- (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
- subdevice != PCI_DEVICE_ID_LMC_SSI) &&
- (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
- subdevice != PCI_DEVICE_ID_LMC_DS3) &&
- (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
- subdevice != PCI_DEVICE_ID_LMC_HSSI))
- printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
- " Subsystem ID = 0x%04x\n",
- dev->name, AdapModelNum, subdevice);
-
- /*
- * reset clock
- */
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
-
- sc->board_idx = cards_found++;
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
-
- sc->lmc_ok = 0;
- sc->last_link_status = 0;
-
- return 0;
-}
-
-/*
- * Called from pci when removing module.
- */
-static void lmc_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- printk(KERN_DEBUG "%s: removing...\n", dev->name);
- unregister_hdlc_device(dev);
- free_netdev(dev);
- }
-}
-
-/* After this is called, packets can be sent.
- * Does not initialize the addresses
- */
-static int lmc_open(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int err;
-
- lmc_led_on(sc, LMC_DS3_LED0);
-
- lmc_dec_reset(sc);
- lmc_reset(sc);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
- lmc_mii_readreg(sc, 0, 17));
-
- if (sc->lmc_ok)
- return 0;
-
- lmc_softreset (sc);
-
- /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
- if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
- printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- sc->got_irq = 1;
-
- /* Assert Terminal Active */
- sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /*
- * reset to last state.
- */
- sc->lmc_media->set_status (sc, NULL);
-
- /* setup default bits to be used in tulip_desc_t transmit descriptor
- * -baz */
- sc->TxDescriptControlInit = (
- LMC_TDES_INTERRUPT_ON_COMPLETION
- | LMC_TDES_FIRST_SEGMENT
- | LMC_TDES_LAST_SEGMENT
- | LMC_TDES_SECOND_ADDR_CHAINED
- | LMC_TDES_DISABLE_PADDING
- );
-
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
- /* disable 32 bit CRC generated by ASIC */
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- }
- sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
- /* Acknoledge the Terminal Active and light LEDs */
-
- /* dev->flags |= IFF_UP; */
-
- if ((err = lmc_proto_open(sc)) != 0)
- return err;
-
- netif_start_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- /*
- * select what interrupts we want to get
- */
- sc->lmc_intrmask = 0;
- /* Should be using the default interrupt mask defined in the .h file. */
- sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
- | TULIP_STS_RXINTR
- | TULIP_STS_TXINTR
- | TULIP_STS_ABNRMLINTR
- | TULIP_STS_SYSERROR
- | TULIP_STS_TXSTOPPED
- | TULIP_STS_TXUNDERFLOW
- | TULIP_STS_RXSTOPPED
- | TULIP_STS_RXNOBUF
- );
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
- sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- sc->lmc_ok = 1; /* Run watchdog */
-
- /*
- * Set the if up now - pfb
- */
-
- sc->last_link_status = 1;
-
- /*
- * Setup a timer for the watchdog on probe, and start it running.
- * Since lmc_ok == 0, it will be a NOP for now.
- */
- timer_setup(&sc->timer, lmc_watchdog, 0);
- sc->timer.expires = jiffies + HZ;
- add_timer (&sc->timer);
-
- return 0;
-}
-
-/* Total reset to compensate for the AdTran DSU doing bad things
- * under heavy load
- */
-
-static void lmc_running_reset (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- lmc_dec_reset (sc);
- lmc_reset (sc);
- lmc_softreset (sc);
- /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-
- netif_wake_queue(dev);
-
- sc->lmc_txfull = 0;
- sc->extra_stats.tx_tbusy0++;
-
- sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-}
-
-
-/* This is what is called when you ifconfig down a device.
- * This disables the timer for the watchdog and keepalives,
- * and disables the irq for dev.
- */
-static int lmc_close(struct net_device *dev)
-{
- /* not calling release_region() as we should */
- lmc_softc_t *sc = dev_to_sc(dev);
-
- sc->lmc_ok = 0;
- sc->lmc_media->set_link_status (sc, 0);
- del_timer (&sc->timer);
- lmc_proto_close(sc);
- lmc_ifdown (dev);
-
- return 0;
-}
-
-/* Ends the transfer of packets */
-/* When the interface goes down, this is called */
-static int lmc_ifdown (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- int i;
-
- /* Don't let anything else go on right now */
- // dev->start = 0;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- /* Stop Tx and Rx on the chip */
- csr6 = LMC_CSR_READ (sc, csr_command);
- csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
- csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
- LMC_CSR_WRITE (sc, csr_command, csr6);
-
- sc->lmc_device->stats.rx_missed_errors +=
- LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- /* release the interrupt */
- if(sc->got_irq == 1){
- free_irq (dev->irq, dev);
- sc->got_irq = 0;
- }
-
- /* free skbuffs in the Rx queue */
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb = sc->lmc_rxq[i];
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].status = 0;
- sc->lmc_rxring[i].length = 0;
- sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
- if (skb != NULL)
- dev_kfree_skb(skb);
- sc->lmc_rxq[i] = NULL;
- }
-
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL)
- dev_kfree_skb(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
- }
-
- lmc_led_off (sc, LMC_MII16_LED_ALL);
-
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- return 0;
-}
-
-/* Interrupt handling routine. This will take an incoming packet, or clean
- * up after a trasmit.
- */
-static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
-{
- struct net_device *dev = (struct net_device *) dev_instance;
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr;
- int i;
- s32 stat;
- unsigned int badtx;
- int max_work = LMC_RXDESCS;
- int handled = 0;
-
- spin_lock(&sc->lmc_lock);
-
- /*
- * Read the csr to find what interrupts we have (if any)
- */
- csr = LMC_CSR_READ (sc, csr_status);
-
- /*
- * Make sure this is our interrupt
- */
- if ( ! (csr & sc->lmc_intrmask)) {
- goto lmc_int_fail_out;
- }
-
- /* always go through this loop at least once */
- while (csr & sc->lmc_intrmask) {
- handled = 1;
-
- /*
- * Clear interrupt bits, we handle all case below
- */
- LMC_CSR_WRITE (sc, csr_status, csr);
-
- /*
- * One of
- * - Transmit process timed out CSR5<1>
- * - Transmit jabber timeout CSR5<3>
- * - Transmit underflow CSR5<5>
- * - Transmit Receiver buffer unavailable CSR5<7>
- * - Receive process stopped CSR5<8>
- * - Receive watchdog timeout CSR5<9>
- * - Early transmit interrupt CSR5<10>
- *
- * Is this really right? Should we do a running reset for jabber?
- * (being a WAN card and all)
- */
- if (csr & TULIP_STS_ABNRMLINTR){
- lmc_running_reset (dev);
- break;
- }
-
- if (csr & TULIP_STS_RXINTR)
- lmc_rx (dev);
-
- if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
-
- int n_compl = 0 ;
- /* reset the transmit timeout detection flag -baz */
- sc->extra_stats.tx_NoCompleteCnt = 0;
-
- badtx = sc->lmc_taint_tx;
- i = badtx % LMC_TXDESCS;
-
- while ((badtx < sc->lmc_next_tx)) {
- stat = sc->lmc_txring[i].status;
-
- LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
- sc->lmc_txring[i].length);
- /*
- * If bit 31 is 1 the tulip owns it break out of the loop
- */
- if (stat & 0x80000000)
- break;
-
- n_compl++ ; /* i.e., have an empty slot in ring */
- /*
- * If we have no skbuff or have cleared it
- * Already continue to the next buffer
- */
- if (sc->lmc_txq[i] == NULL)
- continue;
-
- /*
- * Check the total error summary to look for any errors
- */
- if (stat & 0x8000) {
- sc->lmc_device->stats.tx_errors++;
- if (stat & 0x4104)
- sc->lmc_device->stats.tx_aborted_errors++;
- if (stat & 0x0C00)
- sc->lmc_device->stats.tx_carrier_errors++;
- if (stat & 0x0200)
- sc->lmc_device->stats.tx_window_errors++;
- if (stat & 0x0002)
- sc->lmc_device->stats.tx_fifo_errors++;
- } else {
- sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
-
- sc->lmc_device->stats.tx_packets++;
- }
-
- dev_consume_skb_irq(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
-
- badtx++;
- i = badtx % LMC_TXDESCS;
- }
-
- if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
- {
- printk ("%s: out of sync pointer\n", dev->name);
- badtx += LMC_TXDESCS;
- }
- LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
- sc->lmc_txfull = 0;
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
-
-#ifdef DEBUG
- sc->extra_stats.dirtyTx = badtx;
- sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
- sc->extra_stats.lmc_txfull = sc->lmc_txfull;
-#endif
- sc->lmc_taint_tx = badtx;
-
- /*
- * Why was there a break here???
- */
- } /* end handle transmit interrupt */
-
- if (csr & TULIP_STS_SYSERROR) {
- u32 error;
- printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
- error = csr>>23 & 0x7;
- switch(error){
- case 0x000:
- printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
- break;
- case 0x001:
- printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
- break;
- case 0x002:
- printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
- break;
- default:
- printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
- }
- lmc_dec_reset (sc);
- lmc_reset (sc);
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- }
-
-
- if(max_work-- <= 0)
- break;
-
- /*
- * Get current csr status to make sure
- * we've cleared all interrupts
- */
- csr = LMC_CSR_READ (sc, csr_status);
- } /* end interrupt loop */
- LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
-
-lmc_int_fail_out:
-
- spin_unlock(&sc->lmc_lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 flag;
- int entry;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- /* normal path, tbusy known to be zero */
-
- entry = sc->lmc_next_tx % LMC_TXDESCS;
-
- sc->lmc_txq[entry] = skb;
- sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
-
- LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
-
-#ifndef GCOM
- /* If the queue is less than half full, don't interrupt */
- if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- }
-#else
- flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
-
- if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
- { /* ring full, go busy */
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
- LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
- }
-#endif
-
-
- if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
- flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
-
- /* don't pad small packets either */
- flag = sc->lmc_txring[entry].length = (skb->len) | flag |
- sc->TxDescriptControlInit;
-
- /* set the transmit timeout flag to be checked in
- * the watchdog timer handler. -baz
- */
-
- sc->extra_stats.tx_NoCompleteCnt++;
- sc->lmc_next_tx++;
-
- /* give ownership to the chip */
- LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
- sc->lmc_txring[entry].status = 0x80000000;
-
- /* send now! */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-static int lmc_rx(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int i;
- int rx_work_limit = LMC_RXDESCS;
- int rxIntLoopCnt; /* debug -baz */
- int localLengthErrCnt = 0;
- long stat;
- struct sk_buff *skb, *nsb;
- u16 len;
-
- lmc_led_on(sc, LMC_DS3_LED3);
-
- rxIntLoopCnt = 0; /* debug -baz */
-
- i = sc->lmc_next_rx % LMC_RXDESCS;
-
- while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++; /* debug -baz */
- len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
- if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
- if ((stat & 0x0000ffff) != 0x7fff) {
- /* Oversized frame */
- sc->lmc_device->stats.rx_length_errors++;
- goto skip_packet;
- }
- }
-
- if (stat & 0x00000008) { /* Catch a dribbling bit error */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_frame_errors++;
- goto skip_packet;
- }
-
-
- if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_crc_errors++;
- goto skip_packet;
- }
-
- if (len > LMC_PKT_BUF_SZ) {
- sc->lmc_device->stats.rx_length_errors++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if (len < sc->lmc_crcSize + 2) {
- sc->lmc_device->stats.rx_length_errors++;
- sc->extra_stats.rx_SmallPktCnt++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if(stat & 0x00004000){
- printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
- }
-
- len -= sc->lmc_crcSize;
-
- skb = sc->lmc_rxq[i];
-
- /*
- * We ran out of memory at some point
- * just allocate an skb buff and continue.
- */
-
- if (!skb) {
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- }
- sc->failed_recv_alloc = 1;
- goto skip_packet;
- }
-
- sc->lmc_device->stats.rx_packets++;
- sc->lmc_device->stats.rx_bytes += len;
-
- LMC_CONSOLE_LOG("recv", skb->data, len);
-
- /*
- * I'm not sure of the sanity of this
- * Packets could be arriving at a constant
- * 44.210mbits/sec and we're going to copy
- * them into a new buffer??
- */
-
- if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
- /*
- * If it's a large packet don't copy it just hand it up
- */
- give_it_anyways:
-
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].buffer1 = 0x0;
-
- skb_put (skb, len);
- skb->protocol = lmc_proto_type(sc, skb);
- skb_reset_mac_header(skb);
- /* skb_reset_network_header(skb); */
- skb->dev = dev;
- lmc_proto_netif(sc, skb);
-
- /*
- * This skb will be destroyed by the upper layers, make a new one
- */
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- /* Transferred to 21140 below */
- }
- else {
- /*
- * We've run out of memory, stop trying to allocate
- * memory and exit the interrupt handler
- *
- * The chip may run out of receivers and stop
- * in which care we'll try to allocate the buffer
- * again. (once a second)
- */
- sc->extra_stats.rx_BuffAllocErr++;
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->failed_recv_alloc = 1;
- goto skip_out_of_mem;
- }
- }
- else {
- nsb = dev_alloc_skb(len);
- if(!nsb) {
- goto give_it_anyways;
- }
- skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
-
- nsb->protocol = lmc_proto_type(sc, nsb);
- skb_reset_mac_header(nsb);
- /* skb_reset_network_header(nsb); */
- nsb->dev = dev;
- lmc_proto_netif(sc, nsb);
- }
-
- skip_packet:
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
-
- sc->lmc_next_rx++;
- i = sc->lmc_next_rx % LMC_RXDESCS;
- rx_work_limit--;
- if (rx_work_limit < 0)
- break;
- }
-
- /* detect condition for LMC1000 where DSU cable attaches and fills
- * descriptors with bogus packets
- *
- if (localLengthErrCnt > LMC_RXDESCS - 3) {
- sc->extra_stats.rx_BadPktSurgeCnt++;
- LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
- sc->extra_stats.rx_BadPktSurgeCnt);
- } */
-
- /* save max count of receive descriptors serviced */
- if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
- sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
-
-#ifdef DEBUG
- if (rxIntLoopCnt == 0)
- {
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
- != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++;
- }
- }
- LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
- }
-#endif
-
-
- lmc_led_off(sc, LMC_DS3_LED3);
-
-skip_out_of_mem:
- return 0;
-}
-
-static struct net_device_stats *lmc_get_stats(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return &sc->lmc_device->stats;
-}
-
-static struct pci_driver lmc_driver = {
- .name = "lmc",
- .id_table = lmc_pci_tbl,
- .probe = lmc_init_one,
- .remove = lmc_remove_one,
-};
-
-module_pci_driver(lmc_driver);
-
-unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
-{
- int i;
- int command = (0xf6 << 10) | (devaddr << 5) | regno;
- int retval = 0;
-
- LMC_MII_SYNC (sc);
-
- for (i = 15; i >= 0; i--)
- {
- int dataval = (command & (1 << i)) ? 0x20000 : 0;
-
- LMC_CSR_WRITE (sc, csr_9, dataval);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- for (i = 19; i > 0; i--)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
- LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- return (retval >> 1) & 0xffff;
-}
-
-void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
-{
- int i = 32;
- int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
-
- LMC_MII_SYNC (sc);
-
- i = 31;
- while (i >= 0)
- {
- int datav;
-
- if (command & (1 << i))
- datav = 0x20000;
- else
- datav = 0x00000;
-
- LMC_CSR_WRITE (sc, csr_9, datav);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-
- i = 2;
- while (i > 0)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, 0x50000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-}
-
-static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
-{
- int i;
-
- /* Initialize the receive rings and buffers. */
- sc->lmc_txfull = 0;
- sc->lmc_next_rx = 0;
- sc->lmc_next_tx = 0;
- sc->lmc_taint_rx = 0;
- sc->lmc_taint_tx = 0;
-
- /*
- * Setup each one of the receiver buffers
- * allocate an skbuff for each one, setup the descriptor table
- * and point each buffer at the next one
- */
-
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb;
-
- if (sc->lmc_rxq[i] == NULL)
- {
- skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if(skb == NULL){
- printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
- sc->failed_ring = 1;
- break;
- }
- else{
- sc->lmc_rxq[i] = skb;
- }
- }
- else
- {
- skb = sc->lmc_rxq[i];
- }
-
- skb->dev = sc->lmc_device;
-
- /* owned by 21140 */
- sc->lmc_rxring[i].status = 0x80000000;
-
- /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
- sc->lmc_rxring[i].length = skb_tailroom(skb);
-
- /* use to be tail which is dumb since you're thinking why write
- * to the end of the packj,et but since there's nothing there tail == data
- */
- sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
-
- /* This is fair since the structure is static and we have the next address */
- sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
-
- }
-
- /*
- * Sets end of ring
- */
- if (i != 0) {
- sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
- sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
- }
- LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
-
- /* Initialize the transmit rings and buffers */
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL){ /* have buffer */
- dev_kfree_skb(sc->lmc_txq[i]); /* free it */
- sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
- }
- sc->lmc_txq[i] = NULL;
- sc->lmc_txring[i].status = 0x00000000;
- sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
- }
- sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
- LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-}
-
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io &= ~bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io |= bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if ((~sc->lmc_miireg16) & led) /* Already on! */
- return;
-
- sc->lmc_miireg16 &= ~led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if (sc->lmc_miireg16 & led) /* Already set don't do anything */
- return;
-
- sc->lmc_miireg16 |= led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
-{
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- /*
- * make some of the GPIO pins be outputs
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force state reset. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
- sc->lmc_gpio &= ~(LMC_GEP_RESET);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, LMC_GEP_RESET);
-
- /*
- * Call media specific init routine
- */
- sc->lmc_media->init(sc);
-
- sc->extra_stats.resetCount++;
-}
-
-static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
-{
- u32 val;
-
- /*
- * disable all interrupts
- */
- sc->lmc_intrmask = 0;
- LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
-
- /*
- * Reset the chip with a software reset command.
- * Wait 10 microseconds (actually 50 PCI cycles but at
- * 33MHz that comes to two microseconds but wait a
- * bit longer anyways)
- */
- LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
- udelay(25);
-#ifdef __sparc__
- sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
- sc->lmc_busmode = 0x00100000;
- sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
- LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
-#endif
- sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
-
- /*
- * We want:
- * no ethernet address in frames we write
- * disable padding (txdesc, padding disable)
- * ignore runt frames (rdes0 bit 15)
- * no receiver watchdog or transmitter jabber timer
- * (csr15 bit 0,14 == 1)
- * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
- */
-
- sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
- | TULIP_CMD_FULLDUPLEX
- | TULIP_CMD_PASSBADPKT
- | TULIP_CMD_NOHEARTBEAT
- | TULIP_CMD_PORTSELECT
- | TULIP_CMD_RECEIVEALL
- | TULIP_CMD_MUSTBEONE
- );
- sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
- | TULIP_CMD_THRESHOLDCTL
- | TULIP_CMD_STOREFWD
- | TULIP_CMD_TXTHRSHLDCTL
- );
-
- LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
-
- /*
- * disable receiver watchdog and transmit jabber
- */
- val = LMC_CSR_READ(sc, csr_sia_general);
- val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
- LMC_CSR_WRITE(sc, csr_sia_general, val);
-}
-
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
- size_t csr_size)
-{
- sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
- sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
- sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
- sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
- sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
- sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
- sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
- sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
- sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
- sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
- sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
- sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
- sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
- sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
- sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
- sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
-}
-
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- printk("%s: Xmitter busy|\n", dev->name);
-
- sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
- goto bug_out;
-
- /*
- * Chip seems to have locked up
- * Reset it
- * This whips out all our descriptor
- * table and starts from scartch
- */
-
- LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
- LMC_CSR_READ (sc, csr_status),
- sc->extra_stats.tx_ProcTimeout);
-
- lmc_running_reset (dev);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- /* restart the tx processes */
- csr6 = LMC_CSR_READ (sc, csr_command);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
-
- /* immediate transmit */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- sc->lmc_device->stats.tx_errors++;
- sc->extra_stats.tx_ProcTimeout++; /* -baz */
-
- netif_trans_update(dev); /* prevent tx timeout */
-
-bug_out:
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
deleted file mode 100644
index ec1ac7b1f3fd..000000000000
--- a/drivers/net/wan/lmc/lmc_media.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/uaccess.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-
-#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-/*
- * protocol independent method.
- */
-static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-static void lmc_ds3_init (lmc_softc_t * const);
-static void lmc_ds3_default (lmc_softc_t * const);
-static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
-static int lmc_ds3_get_link_status (lmc_softc_t * const);
-static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ds3_set_scram (lmc_softc_t * const, int);
-static void lmc_ds3_watchdog (lmc_softc_t * const);
-
-static void lmc_hssi_init (lmc_softc_t * const);
-static void lmc_hssi_default (lmc_softc_t * const);
-static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_hssi_set_clock (lmc_softc_t * const, int);
-static int lmc_hssi_get_link_status (lmc_softc_t * const);
-static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_hssi_watchdog (lmc_softc_t * const);
-
-static void lmc_ssi_init (lmc_softc_t * const);
-static void lmc_ssi_default (lmc_softc_t * const);
-static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ssi_set_clock (lmc_softc_t * const, int);
-static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_ssi_get_link_status (lmc_softc_t * const);
-static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ssi_watchdog (lmc_softc_t * const);
-
-static void lmc_t1_init (lmc_softc_t * const);
-static void lmc_t1_default (lmc_softc_t * const);
-static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_t1_get_link_status (lmc_softc_t * const);
-static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
-static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
-static void lmc_t1_set_clock (lmc_softc_t * const, int);
-static void lmc_t1_watchdog (lmc_softc_t * const);
-
-static void lmc_dummy_set_1 (lmc_softc_t * const, int);
-static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
-
-static inline void write_av9110_bit (lmc_softc_t *, int);
-static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
-
-lmc_media_t lmc_ds3_media = {
- .init = lmc_ds3_init, /* special media init stuff */
- .defaults = lmc_ds3_default, /* reset to default state */
- .set_status = lmc_ds3_set_status, /* reset status to state provided */
- .set_clock_source = lmc_dummy_set_1, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
- .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
- .get_link_status = lmc_ds3_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ds3_watchdog
-};
-
-lmc_media_t lmc_hssi_media = {
- .init = lmc_hssi_init, /* special media init stuff */
- .defaults = lmc_hssi_default, /* reset to default state */
- .set_status = lmc_hssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_hssi_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_hssi_get_link_status, /* get link status */
- .set_link_status = lmc_hssi_set_link_status, /* set link status */
- .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_hssi_watchdog
-};
-
-lmc_media_t lmc_ssi_media = {
- .init = lmc_ssi_init, /* special media init stuff */
- .defaults = lmc_ssi_default, /* reset to default state */
- .set_status = lmc_ssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_ssi_set_clock, /* set clock source */
- .set_speed = lmc_ssi_set_speed, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_ssi_get_link_status, /* get link status */
- .set_link_status = lmc_ssi_set_link_status, /* set link status */
- .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ssi_watchdog
-};
-
-lmc_media_t lmc_t1_media = {
- .init = lmc_t1_init, /* special media init stuff */
- .defaults = lmc_t1_default, /* reset to default state */
- .set_status = lmc_t1_set_status, /* reset status to state provided */
- .set_clock_source = lmc_t1_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_t1_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- .watchdog = lmc_t1_watchdog
-};
-
-static void
-lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
-{
-}
-
-static void
-lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
-{
-}
-
-/*
- * HSSI methods
- */
-
-static void
-lmc_hssi_init (lmc_softc_t * const sc)
-{
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
-
- lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
-}
-
-static void
-lmc_hssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source && !sc->ictl.clock_source)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (!ctl->clock_source && sc->ictl.clock_source)
- {
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- }
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = sc->ictl.clock_source;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_hssi_get_link_status (lmc_softc_t * const sc)
-{
- /*
- * We're using the same code as SSI since
- * they're practically the same
- */
- return lmc_ssi_get_link_status(sc);
-}
-
-static void
-lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
- else
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_hssi_watchdog (lmc_softc_t * const sc)
-{
- /* HSSI is blank */
-}
-
-/*
- * DS3 methods
- */
-
-/*
- * Set cable length
- */
-static void
-lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
- }
- else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
- sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
- sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in cable length setting
- */
- if (ctl->cable_length && !sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
- else if (!ctl->cable_length && sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
-
- /*
- * Check for change in scrambler setting (requires reset)
- */
- if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_ON);
- else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_OFF);
-
- lmc_set_protocol (sc, ctl);
-}
-
-static void
-lmc_ds3_init (lmc_softc_t * const sc)
-{
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
-
- /* writes zeros everywhere */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_writereg (sc, 0, 18, 0);
- }
-
- /* set some essential bits */
- lmc_mii_writereg (sc, 0, 17, 1);
- lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */
-
- lmc_mii_writereg (sc, 0, 17, 5);
- lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */
-
- lmc_mii_writereg (sc, 0, 17, 14);
- lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */
-
- /* clear counters and latched bits */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_readreg (sc, 0, 18);
- }
-}
-
-/*
- * 1 == DS3 payload scrambled, 0 == not scrambled
- */
-static void
-lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_ON)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_ON;
- }
- else
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_OFF;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ds3_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status, link_status_11;
- int ret = 1;
-
- lmc_mii_writereg (sc, 0, 17, 7);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
-
- lmc_led_on(sc, LMC_DS3_LED2);
-
- if ((link_status & LMC_FRAMER_REG0_DLOS) ||
- (link_status & LMC_FRAMER_REG0_OOFS)){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 &= 0xfe;
- lmc_mii_writereg(sc, 0, 18, r1);
- printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */
- sc->last_led_err[3] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
- if(sc->last_led_err[3] == 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 |= 0x01;
- lmc_mii_writereg(sc, 0, 18, r1);
- }
- sc->last_led_err[3] = 0;
- }
-
- lmc_mii_writereg(sc, 0, 17, 0x10);
- link_status_11 = lmc_mii_readreg(sc, 0, 18);
- if((link_status & LMC_FRAMER_REG0_AIS) ||
- (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
- printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- lmc_mii_writereg (sc, 0, 17, 9);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- if(link_status & LMC_FRAMER_REG9_RBLUE){
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- return ret;
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_watchdog (lmc_softc_t * const sc)
-{
-
-}
-
-
-/*
- * SSI methods
- */
-
-static void lmc_ssi_init(lmc_softc_t * const sc)
-{
- u16 mii17;
- int cable;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
-
- mii17 = lmc_mii_readreg(sc, 0, 17);
-
- cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
- sc->ictl.cable_type = cable;
-
- lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
-}
-
-static void
-lmc_ssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- /*
- * make TXCLOCK always be an output
- */
- lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_speed (sc, NULL);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- sc->lmc_media->set_speed (sc, &sc->ictl);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- }
-
- if (ctl->clock_rate != sc->ictl.clock_rate)
- sc->lmc_media->set_speed (sc, ctl);
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- lmc_ctl_t *ictl = &sc->ictl;
- lmc_av9110_t *av;
-
- /* original settings for clock rate of:
- * 100 Khz (8,25,0,0,2) were incorrect
- * they should have been 80,125,1,3,3
- * There are 17 param combinations to produce this freq.
- * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
- */
- if (ctl == NULL)
- {
- av = &ictl->cardspec.ssi;
- ictl->clock_rate = 1500000;
- av->f = ictl->clock_rate;
- av->n = 120;
- av->m = 100;
- av->v = 1;
- av->x = 1;
- av->r = 2;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
- return;
- }
-
- av = &ctl->cardspec.ssi;
-
- if (av->f == 0)
- return;
-
- ictl->clock_rate = av->f; /* really, this is the rate we are */
- ictl->cardspec.ssi = *av;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ssi_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- u32 ticks;
- int ret = 1;
- int hw_hdsk = 1;
-
- /*
- * missing CTS? Hmm. If we require CTS on, we may never get the
- * link to come up, so omit it in this test.
- *
- * Also, it seems that with a loopback cable, DCD isn't asserted,
- * so just check for things like this:
- * DSR _must_ be asserted.
- * One of DCD or CTS must be asserted.
- */
-
- /* LMC 1000 (SSI) LED definitions
- * led0 Green = power to adapter, Gate Array loaded &
- * driver attached
- * led1 Green = DSR and DTR and RTS and CTS are set
- * led2 Green = Cable detected
- * led3 red = No timing is available from the
- * cable or the on-board frequency
- * generator.
- */
-
- link_status = lmc_mii_readreg (sc, 0, 16);
-
- /* Is the transmit clock still available */
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- lmc_led_on (sc, LMC_MII16_LED0);
-
- /* ====== transmit clock determination ===== */
- if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
- lmc_led_off(sc, LMC_MII16_LED3);
- }
- else if (ticks == 0 ) { /* no clock found ? */
- ret = 0;
- if (sc->last_led_err[3] != 1) {
- sc->extra_stats.tx_lossOfClockCnt++;
- printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
- }
- sc->last_led_err[3] = 1;
- lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
- }
- else {
- if(sc->last_led_err[3] == 1)
- printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
- sc->last_led_err[3] = 0;
- lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */
- }
-
- if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
- ret = 0;
- hw_hdsk = 0;
- }
-
-#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
- if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
- ret = 0;
- hw_hdsk = 0;
- }
-#endif
-
- if(hw_hdsk == 0){
- if(sc->last_led_err[1] != 1)
- printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
- sc->last_led_err[1] = 1;
- lmc_led_off(sc, LMC_MII16_LED1);
- }
- else {
- if(sc->last_led_err[1] != 0)
- printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
- sc->last_led_err[1] = 0;
- lmc_led_on(sc, LMC_MII16_LED1);
- }
-
- if(ret == 1) {
- lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
- }
-
- return ret;
-}
-
-static void
-lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- {
- sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * These are bits to program the ssi frequency generator
- */
-static inline void
-write_av9110_bit (lmc_softc_t * sc, int c)
-{
- /*
- * set the data bit as we need it.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- if (c & 0x01)
- sc->lmc_gpio |= LMC_GEP_DATA;
- else
- sc->lmc_gpio &= ~(LMC_GEP_DATA);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to high
- */
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to low again.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-}
-
-static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
-{
- int i;
-
-#if 0
- printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
- LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
-#endif
-
- sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
- sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
- * as outputs.
- */
- lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-
- sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * a shifting we will go...
- */
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, n >> i);
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, m >> i);
- for (i = 0; i < 1; i++)
- write_av9110_bit (sc, v >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, x >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, r >> i);
- for (i = 0; i < 5; i++)
- write_av9110_bit (sc, 0x17 >> i);
-
- /*
- * stop driving serial-related signals
- */
- lmc_gpio_mkinput (sc,
- (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-}
-
-static void lmc_ssi_watchdog(lmc_softc_t * const sc)
-{
- u16 mii17 = lmc_mii_readreg(sc, 0, 17);
- if (((mii17 >> 3) & 7) == 7)
- lmc_led_off(sc, LMC_MII16_LED2);
- else
- lmc_led_on(sc, LMC_MII16_LED2);
-}
-
-/*
- * T1 methods
- */
-
-/*
- * The framer regs are multiplexed through MII regs 17 & 18
- * write the register address to MII reg 17 and the * data to MII reg 18. */
-static void
-lmc_t1_write (lmc_softc_t * const sc, int a, int d)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- lmc_mii_writereg (sc, 0, 18, d);
-}
-
-/* Save a warning
-static int
-lmc_t1_read (lmc_softc_t * const sc, int a)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- return lmc_mii_readreg (sc, 0, 18);
-}
-*/
-
-
-static void
-lmc_t1_init (lmc_softc_t * const sc)
-{
- u16 mii16;
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
- mii16 = lmc_mii_readreg (sc, 0, 16);
-
- /* reset 8370 */
- mii16 &= ~LMC_MII16_T1_RST;
- lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
- lmc_mii_writereg (sc, 0, 16, mii16);
-
- /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */
- sc->lmc_miireg16 = mii16;
- lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
- mii16 = sc->lmc_miireg16;
-
- lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */
- lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */
- lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */
- lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */
- lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */
- lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */
- lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */
- lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */
- lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */
- lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */
- lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */
- lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */
- lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */
- lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */
- lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */
- lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */
- lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */
- lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */
- lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */
- lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */
- lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */
- lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */
- lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */
- lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */
- lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */
- lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */
- for (i = 0; i < 32; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */
- lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */
- lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */
- }
- for (i = 1; i < 25; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */
- }
-
- mii16 |= LMC_MII16_T1_XOE;
- lmc_mii_writereg (sc, 0, 16, mii16);
- sc->lmc_miireg16 = mii16;
-}
-
-static void
-lmc_t1_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
- /* Right now we can only clock from out internal source */
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-}
-/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed.
- */
-static void
-lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
- /*
- * check for change in circuit type */
- if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
- && sc->ictl.circuit_type ==
- LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
- LMC_CTL_CIRCUIT_TYPE_E1);
- else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
- && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- lmc_set_protocol (sc, ctl);
-}
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */ static int
-lmc_t1_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- int ret = 1;
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
- lmc_led_on(sc, LMC_DS3_LED2);
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
-
- if (link_status & T1F_RAIS) { /* turn on blue LED */
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- if(sc->last_led_err[1] != 0){
- printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
- }
- lmc_led_off (sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- /*
- * Yellow Alarm is nasty evil stuff, looks at data patterns
- * inside the channel and confuses it with HDLC framing
- * ignore all yellow alarms.
- *
- * Do listen to MultiFrame Yellow alarm which while implemented
- * different ways isn't in the channel and hence somewhat
- * more reliable
- */
-
- if (link_status & T1F_RMYEL) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- if(sc->last_led_err[0] != 0){
- printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- /*
- * Loss of signal and los of frame
- * Use the green bit to identify which one lit the led
- */
- if(link_status & T1F_RLOF){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 1;
-
- }
- else {
- if(sc->last_led_err[3] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOS))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 0;
- }
-
- if(link_status & T1F_RLOS){
- ret = 0;
- if(sc->last_led_err[2] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 1;
-
- }
- else {
- if(sc->last_led_err[2] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOF))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 0;
- }
-
- sc->lmc_xinfo.t1_alarm1_status = link_status;
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
- sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
- return ret;
-}
-
-/*
- * 1 == T1 Circuit Type , 0 == E1 Circuit Type
- */
-static void
-lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
- sc->lmc_miireg16 |= LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
- printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
- }
- else {
- sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
- printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit */
-static void
-lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
-
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_t1_watchdog (lmc_softc_t * const sc)
-{
-}
-
-static void
-lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (!ctl)
- sc->ictl.keepalive_onoff = LMC_CTL_ON;
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
deleted file mode 100644
index e5487616a816..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Allan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/workqueue.h>
-#include <linux/proc_fs.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/smp.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_debug.h"
-#include "lmc_ioctl.h"
-#include "lmc_proto.h"
-
-// attach
-void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
-{
- if (sc->if_type == LMC_NET) {
- struct net_device *dev = sc->lmc_device;
- /*
- * They set a few basics because they don't use HDLC
- */
- dev->flags |= IFF_POINTOPOINT;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- }
-}
-
-int lmc_proto_open(lmc_softc_t *sc)
-{
- int ret = 0;
-
- if (sc->if_type == LMC_PPP) {
- ret = hdlc_open(sc->lmc_device);
- if (ret < 0)
- printk(KERN_WARNING "%s: HDLC open failed: %d\n",
- sc->name, ret);
- }
- return ret;
-}
-
-void lmc_proto_close(lmc_softc_t *sc)
-{
- if (sc->if_type == LMC_PPP)
- hdlc_close(sc->lmc_device);
-}
-
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- return hdlc_type_trans(skb, sc->lmc_device);
- case LMC_NET:
- return htons(ETH_P_802_2);
- case LMC_RAW: /* Packet type for skbuff kind of useless */
- return htons(ETH_P_802_2);
- default:
- printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
- return htons(ETH_P_802_2);
- }
-}
-
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- case LMC_NET:
- default:
- netif_rx(skb);
- break;
- case LMC_RAW:
- break;
- }
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
deleted file mode 100644
index e56e7072de44..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_PROTO_H_
-#define _LMC_PROTO_H_
-
-#include <linux/hdlc.h>
-
-void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_open(lmc_softc_t *sc);
-void lmc_proto_close(lmc_softc_t *sc);
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
-
-static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
-{
- return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
-}
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
deleted file mode 100644
index 99f0aa787a35..000000000000
--- a/drivers/net/wan/lmc/lmc_var.h
+++ /dev/null
@@ -1,468 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_VAR_H_
-#define _LMC_VAR_H_
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#include <linux/timer.h>
-
-/*
- * basic definitions used in lmc include files
- */
-
-typedef struct lmc___softc lmc_softc_t;
-typedef struct lmc___media lmc_media_t;
-typedef struct lmc___ctl lmc_ctl_t;
-
-#define lmc_csrptr_t unsigned long
-
-#define LMC_REG_RANGE 0x80
-
-#define LMC_PRINTF_FMT "%s"
-#define LMC_PRINTF_ARGS (sc->lmc_device->name)
-
-#define TX_TIMEOUT (2*HZ)
-
-#define LMC_TXDESCS 32
-#define LMC_RXDESCS 32
-
-#define LMC_LINK_UP 1
-#define LMC_LINK_DOWN 0
-
-/* These macros for generic read and write to and from the dec chip */
-#define LMC_CSR_READ(sc, csr) \
- inl((sc)->lmc_csrs.csr)
-#define LMC_CSR_WRITE(sc, reg, val) \
- outl((val), (sc)->lmc_csrs.reg)
-
-//#ifdef _LINUX_DELAY_H
-// #define SLOW_DOWN_IO udelay(2);
-// #undef __SLOW_DOWN_IO
-// #define __SLOW_DOWN_IO udelay(2);
-//#endif
-
-#define DELAY(n) SLOW_DOWN_IO
-
-#define lmc_delay() inl(sc->lmc_csrs.csr_9)
-
-/* This macro sync's up with the mii so that reads and writes can take place */
-#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
- LMC_CSR_WRITE((sc), csr_9, 0x20000); \
- lmc_delay(); \
- LMC_CSR_WRITE((sc), csr_9, 0x30000); \
- lmc_delay(); \
- n--; }} while(0)
-
-struct lmc_regfile_t {
- lmc_csrptr_t csr_busmode; /* CSR0 */
- lmc_csrptr_t csr_txpoll; /* CSR1 */
- lmc_csrptr_t csr_rxpoll; /* CSR2 */
- lmc_csrptr_t csr_rxlist; /* CSR3 */
- lmc_csrptr_t csr_txlist; /* CSR4 */
- lmc_csrptr_t csr_status; /* CSR5 */
- lmc_csrptr_t csr_command; /* CSR6 */
- lmc_csrptr_t csr_intr; /* CSR7 */
- lmc_csrptr_t csr_missed_frames; /* CSR8 */
- lmc_csrptr_t csr_9; /* CSR9 */
- lmc_csrptr_t csr_10; /* CSR10 */
- lmc_csrptr_t csr_11; /* CSR11 */
- lmc_csrptr_t csr_12; /* CSR12 */
- lmc_csrptr_t csr_13; /* CSR13 */
- lmc_csrptr_t csr_14; /* CSR14 */
- lmc_csrptr_t csr_15; /* CSR15 */
-};
-
-#define csr_enetrom csr_9 /* 21040 */
-#define csr_reserved csr_10 /* 21040 */
-#define csr_full_duplex csr_11 /* 21040 */
-#define csr_bootrom csr_10 /* 21041/21140A/?? */
-#define csr_gp csr_12 /* 21140* */
-#define csr_watchdog csr_15 /* 21140* */
-#define csr_gp_timer csr_11 /* 21041/21140* */
-#define csr_srom_mii csr_9 /* 21041/21140* */
-#define csr_sia_status csr_12 /* 2104x */
-#define csr_sia_connectivity csr_13 /* 2104x */
-#define csr_sia_tx_rx csr_14 /* 2104x */
-#define csr_sia_general csr_15 /* 2104x */
-
-/* tulip length/control transmit descriptor definitions
- * used to define bits in the second tulip_desc_t field (length)
- * for the transmit descriptor -baz */
-
-#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
-#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
-#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
-#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
-#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
-#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
-#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
-#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
-#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
-#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
-#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
-#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
-
-#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
-#define TDES_COLLISION_COUNT_BIT_NUMBER 3
-
-/* Constants for the RCV descriptor RDES */
-
-#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
-#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
-#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
-#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
-#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
-#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
-#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
-#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
-#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
-#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
-#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
-#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
-#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
-#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
-#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
-#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
-#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
-
-#define RDES_FRAME_LENGTH_BIT_NUMBER 16
-
-#define LMC_RDES_ERROR_MASK ( (u32)( \
- LMC_RDES_OVERFLOW \
- | LMC_RDES_DRIBBLING_BIT \
- | LMC_RDES_REPORT_ON_MII_ERR \
- | LMC_RDES_COLLISION_SEEN ) )
-
-
-/*
- * Ioctl info
- */
-
-typedef struct {
- u32 n;
- u32 m;
- u32 v;
- u32 x;
- u32 r;
- u32 f;
- u32 exact;
-} lmc_av9110_t;
-
-/*
- * Common structure passed to the ioctl code.
- */
-struct lmc___ctl {
- u32 cardtype;
- u32 clock_source; /* HSSI, T1 */
- u32 clock_rate; /* T1 */
- u32 crc_length;
- u32 cable_length; /* DS3 */
- u32 scrambler_onoff; /* DS3 */
- u32 cable_type; /* T1 */
- u32 keepalive_onoff; /* protocol */
- u32 ticks; /* ticks/sec */
- union {
- lmc_av9110_t ssi;
- } cardspec;
- u32 circuit_type; /* T1 or E1 */
-};
-
-
-/*
- * Careful, look at the data sheet, there's more to this
- * structure than meets the eye. It should probably be:
- *
- * struct tulip_desc_t {
- * u8 own:1;
- * u32 status:31;
- * u32 control:10;
- * u32 buffer1;
- * u32 buffer2;
- * };
- * You could also expand status control to provide more bit information
- */
-
-struct tulip_desc_t {
- s32 status;
- s32 length;
- u32 buffer1;
- u32 buffer2;
-};
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-struct lmc___media {
- void (* init)(lmc_softc_t * const);
- void (* defaults)(lmc_softc_t * const);
- void (* set_status)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_clock_source)(lmc_softc_t * const, int);
- void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_cable_length)(lmc_softc_t * const, int);
- void (* set_scrambler)(lmc_softc_t * const, int);
- int (* get_link_status)(lmc_softc_t * const);
- void (* set_link_status)(lmc_softc_t * const, int);
- void (* set_crc_length)(lmc_softc_t * const, int);
- void (* set_circuit_type)(lmc_softc_t * const, int);
- void (* watchdog)(lmc_softc_t * const);
-};
-
-
-#define STATCHECK 0xBEEFCAFE
-
-struct lmc_extra_statistics
-{
- u32 version_size;
- u32 lmc_cardtype;
-
- u32 tx_ProcTimeout;
- u32 tx_IntTimeout;
- u32 tx_NoCompleteCnt;
- u32 tx_MaxXmtsB4Int;
- u32 tx_TimeoutCnt;
- u32 tx_OutOfSyncPtr;
- u32 tx_tbusy0;
- u32 tx_tbusy1;
- u32 tx_tbusy_calls;
- u32 resetCount;
- u32 lmc_txfull;
- u32 tbusy;
- u32 dirtyTx;
- u32 lmc_next_tx;
- u32 otherTypeCnt;
- u32 lastType;
- u32 lastTypeOK;
- u32 txLoopCnt;
- u32 usedXmtDescripCnt;
- u32 txIndexCnt;
- u32 rxIntLoopCnt;
-
- u32 rx_SmallPktCnt;
- u32 rx_BadPktSurgeCnt;
- u32 rx_BuffAllocErr;
- u32 tx_lossOfClockCnt;
-
- /* T1 error counters */
- u32 framingBitErrorCount;
- u32 lineCodeViolationCount;
-
- u32 lossOfFrameCount;
- u32 changeOfFrameAlignmentCount;
- u32 severelyErroredFrameCount;
-
- u32 check;
-};
-
-typedef struct lmc_xinfo {
- u32 Magic0; /* BEEFCAFE */
-
- u32 PciCardType;
- u32 PciSlotNumber; /* PCI slot number */
-
- u16 DriverMajorVersion;
- u16 DriverMinorVersion;
- u16 DriverSubVersion;
-
- u16 XilinxRevisionNumber;
- u16 MaxFrameSize;
-
- u16 t1_alarm1_status;
- u16 t1_alarm2_status;
-
- int link_status;
- u32 mii_reg16;
-
- u32 Magic1; /* DEADBEEF */
-} LMC_XINFO;
-
-
-/*
- * forward decl
- */
-struct lmc___softc {
- char *name;
- u8 board_idx;
- struct lmc_extra_statistics extra_stats;
- struct net_device *lmc_device;
-
- int hang, rxdesc, bad_packet, some_counter;
- u32 txgo;
- struct lmc_regfile_t lmc_csrs;
- volatile u32 lmc_txtick;
- volatile u32 lmc_rxtick;
- u32 lmc_flags;
- u32 lmc_intrmask; /* our copy of csr_intr */
- u32 lmc_cmdmode; /* our copy of csr_cmdmode */
- u32 lmc_busmode; /* our copy of csr_busmode */
- u32 lmc_gpio_io; /* state of in/out settings */
- u32 lmc_gpio; /* state of outputs */
- struct sk_buff* lmc_txq[LMC_TXDESCS];
- struct sk_buff* lmc_rxq[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_rxring[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_txring[LMC_TXDESCS];
- unsigned int lmc_next_rx, lmc_next_tx;
- volatile
- unsigned int lmc_taint_tx, lmc_taint_rx;
- int lmc_tx_start, lmc_txfull;
- int lmc_txbusy;
- u16 lmc_miireg16;
- int lmc_ok;
- int last_link_status;
- int lmc_cardtype;
- u32 last_frameerr;
- lmc_media_t *lmc_media;
- struct timer_list timer;
- lmc_ctl_t ictl;
- u32 TxDescriptControlInit;
-
- int tx_TimeoutInd; /* additional driver state */
- int tx_TimeoutDisplay;
- unsigned int lastlmc_taint_tx;
- int lasttx_packets;
- u32 tx_clockState;
- u32 lmc_crcSize;
- LMC_XINFO lmc_xinfo;
- char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
- char lmc_timing; /* for HSSI and SSI */
- int got_irq;
-
- char last_led_err[4];
-
- u32 last_int;
- u32 num_int;
-
- spinlock_t lmc_lock;
- u16 if_type; /* HDLC/PPP or NET */
-
- /* Failure cases */
- u8 failed_ring;
- u8 failed_recv_alloc;
-
- /* Structure check */
- u32 check;
-};
-
-#define LMC_PCI_TIME 1
-#define LMC_EXT_TIME 0
-
-#define PKT_BUF_SZ 1542 /* was 1536 */
-
-/* CSR5 settings */
-#define TIMER_INT 0x00000800
-#define TP_LINK_FAIL 0x00001000
-#define TP_LINK_PASS 0x00000010
-#define NORMAL_INT 0x00010000
-#define ABNORMAL_INT 0x00008000
-#define RX_JABBER_INT 0x00000200
-#define RX_DIED 0x00000100
-#define RX_NOBUFF 0x00000080
-#define RX_INT 0x00000040
-#define TX_FIFO_UNDER 0x00000020
-#define TX_JABBER 0x00000008
-#define TX_NOBUFF 0x00000004
-#define TX_DIED 0x00000002
-#define TX_INT 0x00000001
-
-/* CSR6 settings */
-#define OPERATION_MODE 0x00000200 /* Full Duplex */
-#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
-#define RECEIVE_ALL 0x40000000 /* Receive All */
-#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
-
-/* Dec control registers CSR6 as well */
-#define LMC_DEC_ST 0x00002000
-#define LMC_DEC_SR 0x00000002
-
-/* CSR15 settings */
-#define RECV_WATCHDOG_DISABLE 0x00000010
-#define JABBER_DISABLE 0x00000001
-
-/* More settings */
-/*
- * aSR6 -- Command (Operation Mode) Register
- */
-#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
-#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
-#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
-#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */
-#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
-#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
-#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
-#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */
-#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */
-#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */
-#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */
-
-#define TULIP_GP_PINSET 0x00000100L
-#define TULIP_BUSMODE_SWRESET 0x00000001L
-#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
-#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
-
-#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
-#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
-#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
-#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
-#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
-#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
-#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
-#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */
-#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */
-#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */
-#define TULIP_STS_TXNOBUF 0x00000004L
-#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */
-#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */
-
-#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */
-
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L
-
-#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */
-#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */
-#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */
-#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */
-#define TULIP_DSTS_RxMIIERR 0x00000008
-#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR)
-
-#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \
- | TULIP_STS_RXINTR \
- | TULIP_STS_TXINTR \
- | TULIP_STS_ABNRMLINTR \
- | TULIP_STS_SYSERROR \
- | TULIP_STS_TXSTOPPED \
- | TULIP_STS_TXUNDERFLOW\
- | TULIP_STS_RXSTOPPED )
-
-#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
-#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
-
-#ifndef TULIP_CMD_RECEIVEALL
-#define TULIP_CMD_RECEIVEALL 0x40000000L
-#endif
-
-/* Adapter module number */
-#define LMC_ADAP_HSSI 2
-#define LMC_ADAP_DS3 3
-#define LMC_ADAP_SSI 4
-#define LMC_ADAP_T1 5
-
-#define LMC_MTU 1500
-
-#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
-#define LMC_CRC_LEN_32 4
-
-#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
deleted file mode 100644
index eddd20aab691..000000000000
--- a/drivers/net/wan/sealevel.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Sealevel Systems 4021 driver.
- *
- * (c) Copyright 1999, 2001 Alan Cox
- * (c) Copyright 2001 Red Hat Inc.
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-struct slvl_device {
- struct z8530_channel *chan;
- int channel;
-};
-
-struct slvl_board {
- struct slvl_device dev[2];
- struct z8530_dev board;
- int iobase;
-};
-
- /* Network driver support routines */
-
-static inline struct slvl_device *dev_to_chan(struct net_device *dev)
-{
- return (struct slvl_device *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- netif_rx(skb);
-}
-
- /* We've been placed in the UP state */
-
-static int sealevel_open(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int err = -1;
- int unit = slvl->channel;
-
- /* Link layer up. */
-
- switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return err;
- }
-
- slvl->chan->rx_function = sealevel_input;
-
- netif_start_queue(d);
- return 0;
-}
-
-static int sealevel_close(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int unit = slvl->channel;
-
- /* Discard new frames */
-
- slvl->chan->rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind. */
-
-static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
-}
-
-static int sealevel_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops sealevel_ops = {
- .ndo_open = sealevel_open,
- .ndo_stop = sealevel_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
-{
- struct net_device *dev = alloc_hdlcdev(sv);
-
- if (!dev)
- return -1;
-
- dev_to_hdlc(dev)->attach = sealevel_attach;
- dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
- dev->netdev_ops = &sealevel_ops;
- dev->base_addr = iobase;
- dev->irq = irq;
-
- if (register_hdlc_device(dev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(dev);
- return -1;
- }
-
- sv->chan->netdevice = dev;
- return 0;
-}
-
-/* Allocate and setup Sealevel board. */
-
-static __init struct slvl_board *slvl_init(int iobase, int irq,
- int txdma, int rxdma, int slow)
-{
- struct z8530_dev *dev;
- struct slvl_board *b;
-
- /* Get the needed I/O space */
-
- if (!request_region(iobase, 8, "Sealevel 4021")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
- if (!b)
- goto err_kzalloc;
-
- b->dev[0].chan = &b->board.chanA;
- b->dev[0].channel = 0;
-
- b->dev[1].chan = &b->board.chanB;
- b->dev[1].channel = 1;
-
- dev = &b->board;
-
- /* Stuff in the I/O addressing */
-
- dev->active = 0;
-
- b->iobase = iobase;
-
- /* Select 8530 delays for the old board */
-
- if (slow)
- iobase |= Z8530_PORT_SLEEP;
-
- dev->chanA.ctrlio = iobase + 1;
- dev->chanA.dataio = iobase;
- dev->chanB.ctrlio = iobase + 3;
- dev->chanB.dataio = iobase + 2;
-
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
-
- /* Assert DTR enable DMA */
-
- outb(3 | (1 << 7), b->iobase + 4);
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "SeaLevel", dev) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_request_irq;
- }
-
- dev->irq = irq;
- dev->chanA.private = &b->dev[0];
- dev->chanB.private = &b->dev[1];
- dev->chanA.dev = dev;
- dev->chanB.dev = dev;
-
- dev->chanA.txdma = 3;
- dev->chanA.rxdma = 1;
- if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
- goto err_dma_tx;
-
- if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
- goto err_dma_rx;
-
- disable_irq(irq);
-
- /* Begin normal initialise */
-
- if (z8530_init(dev) != 0) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_hw;
- }
- if (dev->type == Z85C30) {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
- } else {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
- }
-
- /* Now we can take the IRQ */
-
- enable_irq(irq);
-
- if (slvl_setup(&b->dev[0], iobase, irq))
- goto free_hw;
- if (slvl_setup(&b->dev[1], iobase, irq))
- goto free_netdev0;
-
- z8530_describe(dev, "I/O", iobase);
- dev->active = 1;
- return b;
-
-free_netdev0:
- unregister_hdlc_device(b->dev[0].chan->netdevice);
- free_netdev(b->dev[0].chan->netdevice);
-free_hw:
- free_dma(dev->chanA.rxdma);
-err_dma_rx:
- free_dma(dev->chanA.txdma);
-err_dma_tx:
- free_irq(irq, dev);
-err_request_irq:
- kfree(b);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void __exit slvl_shutdown(struct slvl_board *b)
-{
- int u;
-
- z8530_shutdown(&b->board);
-
- for (u = 0; u < 2; u++) {
- struct net_device *d = b->dev[u].chan->netdevice;
-
- unregister_hdlc_device(d);
- free_netdev(d);
- }
-
- free_irq(b->board.irq, &b->board);
- free_dma(b->board.chanA.rxdma);
- free_dma(b->board.chanA.txdma);
- /* DMA off on the card, drop DTR */
- outb(0, b->iobase);
- release_region(b->iobase, 8);
- kfree(b);
-}
-
-static int io = 0x238;
-static int txdma = 1;
-static int rxdma = 3;
-static int irq = 5;
-static bool slow;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
-module_param_hw(txdma, int, dma, 0);
-MODULE_PARM_DESC(txdma, "Transmit DMA channel");
-module_param_hw(rxdma, int, dma, 0);
-MODULE_PARM_DESC(rxdma, "Receive DMA channel");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
-module_param(slow, bool, 0);
-MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
-
-static struct slvl_board *slvl_unit;
-
-static int __init slvl_init_module(void)
-{
- slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
-
- return slvl_unit ? 0 : -ENODEV;
-}
-
-static void __exit slvl_cleanup_module(void)
-{
- if (slvl_unit)
- slvl_shutdown(slvl_unit);
-}
-
-module_init(slvl_init_module);
-module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 8e3b1c717c10..6063552cea9b 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -194,10 +194,9 @@ static int slic_ds26522_init_configure(struct spi_device *spi)
return 0;
}
-static int slic_ds26522_remove(struct spi_device *spi)
+static void slic_ds26522_remove(struct spi_device *spi)
{
pr_info("DS26522 module uninstalled\n");
- return 0;
}
static int slic_ds26522_probe(struct spi_device *spi)
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
deleted file mode 100644
index 982a03488a00..000000000000
--- a/drivers/net/wan/z85230.c
+++ /dev/null
@@ -1,1641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (c) Copyright 2000, 2001 Red Hat Inc
- *
- * Development of this driver was funded by Equiinet Ltd
- * http://www.equiinet.com
- *
- * ChangeLog:
- *
- * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
- * unification of all the Z85x30 asynchronous drivers for real.
- *
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
- * boundary.
- *
- * Modified for SMP safety and SMP locking by Alan Cox
- * <alan@lxorguk.ukuu.org.uk>
- *
- * Performance
- *
- * Z85230:
- * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
- * X.25 is not unrealistic on all machines. DMA mode can in theory
- * handle T1/E1 quite nicely. In practice the limit seems to be about
- * 512Kbit->1Mbit depending on motherboard.
- *
- * Z85C30:
- * 64K will take DMA, 9600 baud X.25 should be ok.
- *
- * Z8530:
- * Synchronous mode without DMA is unlikely to pass about 2400 baud.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#define RT_LOCK
-#define RT_UNLOCK
-#include <linux/spinlock.h>
-
-#include "z85230.h"
-
-/**
- * z8530_read_port - Architecture specific interface function
- * @p: port to read
- *
- * Provided port access methods. The Comtrol SV11 requires no delays
- * between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- *
- * The caller must hold sufficient locks to avoid violating the horrible
- * 5uS delay rule.
- */
-
-static inline int z8530_read_port(unsigned long p)
-{
- u8 r = inb(Z8530_PORT_OF(p));
-
- if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
- udelay(5);
- return r;
-}
-
-/**
- * z8530_write_port - Architecture specific interface function
- * @p: port to write
- * @d: value to write
- *
- * Write a value to a port with delays if need be. Note that the
- * caller must hold locks to avoid read/writes from other contexts
- * violating the 5uS rule
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- */
-
-static inline void z8530_write_port(unsigned long p, u8 d)
-{
- outb(d, Z8530_PORT_OF(p));
- if (p & Z8530_PORT_SLEEP)
- udelay(5);
-}
-
-static void z8530_rx_done(struct z8530_channel *c);
-static void z8530_tx_done(struct z8530_channel *c);
-
-/**
- * read_zsreg - Read a register from a Z85230
- * @c: Z8530 channel to read from (2 per chip)
- * @reg: Register to read
- * FIXME: Use a spinlock.
- *
- * Most of the Z8530 registers are indexed off the control registers.
- * A read is done by writing to the control register and reading the
- * register back. The caller must hold the lock
- */
-
-static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- return z8530_read_port(c->ctrlio);
-}
-
-/**
- * read_zsdata - Read the data port of a Z8530 channel
- * @c: The Z8530 channel to read the data port from
- *
- * The data port provides fast access to some things. We still
- * have all the 5uS delays to worry about.
- */
-
-static inline u8 read_zsdata(struct z8530_channel *c)
-{
- u8 r;
-
- r = z8530_read_port(c->dataio);
- return r;
-}
-
-/**
- * write_zsreg - Write to a Z8530 channel register
- * @c: The Z8530 channel
- * @reg: Register number
- * @val: Value to write
- *
- * Write a value to an indexed register. The caller must hold the lock
- * to honour the irritating delay rules. We know about register 0
- * being fast to access.
- *
- * Assumes c->lock is held.
- */
-static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsctrl - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the control register on the Z8530
- */
-
-static inline void write_zsctrl(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsdata - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the data register on the Z8530
- */
-static inline void write_zsdata(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->dataio, val);
-}
-
-/* Register loading parameters for a dead port
- */
-
-u8 z8530_dead_port[] = {
- 255
-};
-EXPORT_SYMBOL(z8530_dead_port);
-
-/* Register loading parameters for currently supported circuit types
- */
-
-/* Data clocked by telco end. This is the correct data for the UK
- * "kilostream" service, and most other similar services.
- */
-
-u8 z8530_hdlc_kilostream[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream);
-
-/* As above but for enhanced chips.
- */
-
-u8 z8530_hdlc_kilostream_85230[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 23, 3, /* Extended mode AUTO TX and EOM*/
-
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
-
-/**
- * z8530_flush_fifo - Flush on chip RX FIFO
- * @c: Channel to flush
- *
- * Flush the receive FIFO. There is no specific option for this, we
- * blindly read bytes and discard them. Reading when there is no data
- * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
- * All locking is handled for the caller. On return data may still be
- * present if it arrived during the flush.
- */
-
-static void z8530_flush_fifo(struct z8530_channel *c)
-{
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- if (c->dev->type == Z85230) {
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- }
-}
-
-/**
- * z8530_rtsdtr - Control the outgoing DTS/RTS line
- * @c: The Z8530 channel to control;
- * @set: 1 to set, 0 to clear
- *
- * Sets or clears DTR/RTS on the requested line. All locking is handled
- * by the caller. For now we assume all boards use the actual RTS/DTR
- * on the chip. Apparently one or two don't. We'll scream about them
- * later.
- */
-
-static void z8530_rtsdtr(struct z8530_channel *c, int set)
-{
- if (set)
- c->regs[5] |= (RTS | DTR);
- else
- c->regs[5] &= ~(RTS | DTR);
- write_zsreg(c, R5, c->regs[5]);
-}
-
-/**
- * z8530_rx - Handle a PIO receive event
- * @c: Z8530 channel to process
- *
- * Receive handler for receiving in PIO mode. This is much like the
- * async one but not quite the same or as complex
- *
- * Note: Its intended that this handler can easily be separated from
- * the main code to run realtime. That'll be needed for some machines
- * (eg to ever clock 64kbits on a sparc ;)).
- *
- * The RT_LOCK macros don't do anything now. Keep the code covered
- * by them as short as possible in all circumstances - clocks cost
- * baud. The interrupt handler is assumed to be atomic w.r.t. to
- * other code - this is true in the RT case too.
- *
- * We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
- * ISA DMA
- *
- * Called with the device lock held
- */
-
-static void z8530_rx(struct z8530_channel *c)
-{
- u8 ch, stat;
-
- while (1) {
- /* FIFO empty ? */
- if (!(read_zsreg(c, R0) & 1))
- break;
- ch = read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- /* Overrun ?
- */
- if (c->count < c->max) {
- *c->dptr++ = ch;
- c->count++;
- }
-
- if (stat & END_FR) {
- /* Error ?
- */
- if (stat & (Rx_OVR | CRC_ERR)) {
- /* Rewind the buffer and return */
- if (c->skb)
- c->dptr = c->skb->data;
- c->count = 0;
- if (stat & Rx_OVR) {
- pr_warn("%s: overrun\n", c->dev->name);
- c->rx_overrun++;
- }
- if (stat & CRC_ERR) {
- c->rx_crc_err++;
- /* printk("crc error\n"); */
- }
- /* Shove the frame upstream */
- } else {
- /* Drop the lock for RX processing, or
- * there are deadlocks
- */
- z8530_rx_done(c);
- write_zsctrl(c, RES_Rx_CRC);
- }
- }
- }
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx - Handle a PIO transmit event
- * @c: Z8530 channel to process
- *
- * Z8530 transmit interrupt handler for the PIO mode. The basic
- * idea is to attempt to keep the FIFO fed. We fill as many bytes
- * in as possible, its quite possible that we won't keep up with the
- * data rate otherwise.
- */
-
-static void z8530_tx(struct z8530_channel *c)
-{
- while (c->txcount) {
- /* FIFO full ? */
- if (!(read_zsreg(c, R0) & 4))
- return;
- c->txcount--;
- /* Shovel out the byte
- */
- write_zsreg(c, R8, *c->tx_ptr++);
- write_zsctrl(c, RES_H_IUS);
- /* We are about to underflow */
- if (c->txcount == 0) {
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- }
- }
-
- /* End of frame TX - fire another one
- */
-
- write_zsctrl(c, RES_Tx_P);
-
- z8530_tx_done(c);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status - Handle a PIO status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred in PIO synchronous mode. There are several
- * reasons the chip will bother us here. A transmit underrun means we
- * failed to feed the chip fast enough and just broke a packet. A DCD
- * change is a line up or down.
- */
-
-static void z8530_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (status & TxEOM) {
-/* printk("%s: Tx underrun.\n", chan->dev->name); */
- chan->netdevice->stats.tx_fifo_errors++;
- write_zsctrl(chan, ERR_RES);
- z8530_tx_done(chan);
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_sync = {
- .rx = z8530_rx,
- .tx = z8530_tx,
- .status = z8530_status,
-};
-EXPORT_SYMBOL(z8530_sync);
-
-/**
- * z8530_dma_rx - Handle a DMA RX event
- * @chan: Channel to handle
- *
- * Non bus mastering DMA interfaces for the Z8x30 devices. This
- * is really pretty PC specific. The DMA mode means that most receive
- * events are handled by the DMA hardware. We get a kick here only if
- * a frame ended.
- */
-
-static void z8530_dma_rx(struct z8530_channel *chan)
-{
- if (chan->rxdma_on) {
- /* Special condition check only */
- u8 status;
-
- read_zsreg(chan, R7);
- read_zsreg(chan, R6);
-
- status = read_zsreg(chan, R1);
-
- if (status & END_FR)
- z8530_rx_done(chan); /* Fire up the next one */
-
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_H_IUS);
- } else {
- /* DMA is off right now, drain the slow way */
- z8530_rx(chan);
- }
-}
-
-/**
- * z8530_dma_tx - Handle a DMA TX event
- * @chan: The Z8530 channel to handle
- *
- * We have received an interrupt while doing DMA transmissions. It
- * shouldn't happen. Scream loudly if it does.
- */
-static void z8530_dma_tx(struct z8530_channel *chan)
-{
- if (!chan->dma_tx) {
- pr_warn("Hey who turned the DMA off?\n");
- z8530_tx(chan);
- return;
- }
- /* This shouldn't occur in DMA mode */
- pr_err("DMA tx - bogus event!\n");
- z8530_tx(chan);
-}
-
-/**
- * z8530_dma_status - Handle a DMA status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred on the Z8530. We receive these for two reasons
- * when in DMA mode. Firstly if we finished a packet transfer we get one
- * and kick the next packet out. Secondly we may see a DCD change.
- *
- */
-static void z8530_dma_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (chan->dma_tx) {
- if (status & TxEOM) {
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on = 0;
- release_dma_lock(flags);
- z8530_tx_done(chan);
- }
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
-
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-static struct z8530_irqhandler z8530_dma_sync = {
- .rx = z8530_dma_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-static struct z8530_irqhandler z8530_txdma_sync = {
- .rx = z8530_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-/**
- * z8530_rx_clear - Handle RX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_rx_clear(struct z8530_channel *c)
-{
- /* Data and status bytes
- */
- u8 stat;
-
- read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- if (stat & END_FR)
- write_zsctrl(c, RES_Rx_CRC);
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx_clear - Handle TX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_tx_clear(struct z8530_channel *c)
-{
- write_zsctrl(c, RES_Tx_P);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status_clear - Handle status events from a stopped chip
- * @chan: Z8530 channel to shut up
- *
- * Status interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_status_clear(struct z8530_channel *chan)
-{
- u8 status = read_zsreg(chan, R0);
-
- if (status & TxEOM)
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_nop = {
- .rx = z8530_rx_clear,
- .tx = z8530_tx_clear,
- .status = z8530_status_clear,
-};
-EXPORT_SYMBOL(z8530_nop);
-
-/**
- * z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
- * @dev_id: The Z8530 device that is interrupting.
- *
- * A Z85[2]30 device has stuck its hand in the air for attention.
- * We scan both the channels on the chip for events and then call
- * the channel specific call backs for each channel that has events.
- * We have to use callback functions because the two channels can be
- * in different modes.
- *
- * Locking is done for the handlers. Note that locking is done
- * at the chip level (the 5uS delay issue is per chip not per
- * channel). c->lock for both channels points to dev->lock
- */
-
-irqreturn_t z8530_interrupt(int irq, void *dev_id)
-{
- struct z8530_dev *dev = dev_id;
- u8 intr;
- static volatile int locker=0;
- int work = 0;
- struct z8530_irqhandler *irqs;
-
- if (locker) {
- pr_err("IRQ re-enter\n");
- return IRQ_NONE;
- }
- locker = 1;
-
- spin_lock(&dev->lock);
-
- while (++work < 5000) {
- intr = read_zsreg(&dev->chanA, R3);
- if (!(intr &
- (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
- break;
-
- /* This holds the IRQ status. On the 8530 you must read it
- * from chan A even though it applies to the whole chip
- */
-
- /* Now walk the chip and see what it is wanting - it may be
- * an IRQ for someone else remember
- */
-
- irqs = dev->chanA.irqs;
-
- if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
- if (intr & CHARxIP)
- irqs->rx(&dev->chanA);
- if (intr & CHATxIP)
- irqs->tx(&dev->chanA);
- if (intr & CHAEXT)
- irqs->status(&dev->chanA);
- }
-
- irqs = dev->chanB.irqs;
-
- if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
- if (intr & CHBRxIP)
- irqs->rx(&dev->chanB);
- if (intr & CHBTxIP)
- irqs->tx(&dev->chanB);
- if (intr & CHBEXT)
- irqs->status(&dev->chanB);
- }
- }
- spin_unlock(&dev->lock);
- if (work == 5000)
- pr_err("%s: interrupt jammed - abort(0x%X)!\n",
- dev->name, intr);
- /* Ok all done */
- locker = 0;
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(z8530_interrupt);
-
-static const u8 reg_init[16] = {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0x55, 0, 0, 0
-};
-
-/**
- * z8530_sync_open - Open a Z8530 channel for PIO
- * @dev: The network interface we are using
- * @c: The Z8530 channel to open in synchronous PIO mode
- *
- * Switch a Z8530 into synchronous mode without DMA assist. We
- * raise the RTS/DTR and commence network operation.
- */
-int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
- c->irqs = &z8530_sync;
-
- /* This loads the double buffer up */
- z8530_rx_done(c); /* Load the frame ring */
- z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c, 1);
- c->dma_tx = 0;
- c->regs[R1] |= TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_open);
-
-/**
- * z8530_sync_close - Close a PIO Z8530 channel
- * @dev: Network device to close
- * @c: Z8530 channel to disassociate and move to idle
- *
- * Close down a Z8530 interface and switch its interrupt handlers
- * to discard future events.
- */
-int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_close);
-
-/**
- * z8530_sync_dma_open - Open a Z8530 for DMA I/O
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA in both directions. Two
- * ISA DMA channels must be available for this to work. We assume ISA
- * DMA driven I/O and PC limits on access.
- */
-int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Load the DMA interfaces up
- */
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->rx_buf[0])
- return -ENOBUFS;
- c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- return -ENOBUFS;
- }
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_used = 0;
- c->dma_tx = 1;
- c->dma_num = 0;
- c->dma_ready = 1;
-
- /* Enable DMA control mode
- */
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* TX DMA via DIR/REQ
- */
-
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* RX DMA via W/Req
- */
-
- c->regs[R1] |= WT_FN_RDYFN;
- c->regs[R1] |= WT_RDY_RT;
- c->regs[R1] |= INT_ERR_Rx;
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] |= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* DMA interrupts
- */
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
- set_dma_count(c->rxdma, c->mtu);
- enable_dma(c->rxdma);
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 1;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_open);
-
-/**
- * z8530_sync_dma_close - Close down DMA I/O
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA mode synchronous interface. Halt the DMA, and
- * free the buffers.
- */
-int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- flags = claim_dma_lock();
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
-
- c->rxdma_on = 0;
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- release_dma_lock(flags);
-
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- spin_lock_irqsave(c->lock, flags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->rx_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- }
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_close);
-
-/**
- * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA transmission. One
- * ISA DMA channel must be available for this to work. The receive
- * side is run in PIO mode, but then it has the bigger FIFO.
- */
-
-int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- printk("Opening sync interface for TX-DMA\n");
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0])
- return -ENOBUFS;
-
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* Load the PIO receive ring
- */
-
- z8530_rx_done(c);
- z8530_rx_done(c);
-
- /* Load the DMA interfaces up
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- c->tx_dma_used = 0;
- c->dma_num = 0;
- c->dma_ready = 1;
- c->dma_tx = 1;
-
- /* Enable DMA control mode
- */
-
- /* TX DMA via DIR/REQ
- */
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_open);
-
-/**
- * z8530_sync_txdma_close - Close down a TX driven DMA channel
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
- * and free the buffers.
- */
-
-int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long dflags, cflags;
- u8 chk;
-
- spin_lock_irqsave(c->lock, cflags);
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- release_dma_lock(dflags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, cflags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
- * it exists...
- */
-static const char * const z8530_type_name[] = {
- "Z8530",
- "Z85C30",
- "Z85230"
-};
-
-/**
- * z8530_describe - Uniformly describe a Z8530 port
- * @dev: Z8530 device to describe
- * @mapping: string holding mapping type (eg "I/O" or "Mem")
- * @io: the port value in question
- *
- * Describe a Z8530 in a standard format. We must pass the I/O as
- * the port offset isn't predictable. The main reason for this function
- * is to try and get a common format of report.
- */
-
-void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
-{
- pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
- z8530_type_name[dev->type],
- mapping,
- Z8530_PORT_OF(io),
- dev->irq);
-}
-EXPORT_SYMBOL(z8530_describe);
-
-/* Locked operation part of the z8530 init code
- */
-static inline int do_z8530_init(struct z8530_dev *dev)
-{
- /* NOP the interrupt handlers first - we might get a
- * floating IRQ transition when we reset the chip
- */
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- dev->chanA.dcdcheck = DCD;
- dev->chanB.dcdcheck = DCD;
-
- /* Reset the chip */
- write_zsreg(&dev->chanA, R9, 0xC0);
- udelay(200);
- /* Now check its valid */
- write_zsreg(&dev->chanA, R12, 0xAA);
- if (read_zsreg(&dev->chanA, R12) != 0xAA)
- return -ENODEV;
- write_zsreg(&dev->chanA, R12, 0x55);
- if (read_zsreg(&dev->chanA, R12) != 0x55)
- return -ENODEV;
-
- dev->type = Z8530;
-
- /* See the application note.
- */
-
- write_zsreg(&dev->chanA, R15, 0x01);
-
- /* If we can set the low bit of R15 then
- * the chip is enhanced.
- */
-
- if (read_zsreg(&dev->chanA, R15) == 0x01) {
- /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
- /* Put a char in the fifo */
- write_zsreg(&dev->chanA, R8, 0);
- if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
- dev->type = Z85230; /* Has a FIFO */
- else
- dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
- }
-
- /* The code assumes R7' and friends are
- * off. Use write_zsext() for these and keep
- * this bit clear.
- */
-
- write_zsreg(&dev->chanA, R15, 0);
-
- /* At this point it looks like the chip is behaving
- */
-
- memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init, 16);
-
- return 0;
-}
-
-/**
- * z8530_init - Initialise a Z8530 device
- * @dev: Z8530 device to initialise.
- *
- * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
- * is present, identify the type and then program it to hopefully
- * keep quite and behave. This matters a lot, a Z8530 in the wrong
- * state will sometimes get into stupid modes generating 10Khz
- * interrupt streams and the like.
- *
- * We set the interrupt handler up to discard any events, in case
- * we get them during reset or setp.
- *
- * Return 0 for success, or a negative value indicating the problem
- * in errno form.
- */
-
-int z8530_init(struct z8530_dev *dev)
-{
- unsigned long flags;
- int ret;
-
- /* Set up the chip level lock */
- spin_lock_init(&dev->lock);
- dev->chanA.lock = &dev->lock;
- dev->chanB.lock = &dev->lock;
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = do_z8530_init(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(z8530_init);
-
-/**
- * z8530_shutdown - Shutdown a Z8530 device
- * @dev: The Z8530 chip to shutdown
- *
- * We set the interrupt handlers to silence any interrupts. We then
- * reset the chip and wait 100uS to be sure the reset completed. Just
- * in case the caller then tries to do stuff.
- *
- * This is called without the lock held
- */
-int z8530_shutdown(struct z8530_dev *dev)
-{
- unsigned long flags;
- /* Reset the chip */
-
- spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- write_zsreg(&dev->chanA, R9, 0xC0);
- /* We must lock the udelay, the chip is offlimits here */
- udelay(100);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_shutdown);
-
-/**
- * z8530_channel_load - Load channel data
- * @c: Z8530 channel to configure
- * @rtable: table of register, value pairs
- * FIXME: ioctl to allow user uploaded tables
- *
- * Load a Z8530 channel up from the system data. We use +16 to
- * indicate the "prime" registers. The value 255 terminates the
- * table.
- */
-
-int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- while (*rtable != 255) {
- int reg = *rtable++;
-
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] | 1);
- write_zsreg(c, reg & 0x0F, *rtable);
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] & ~1);
- c->regs[reg] = *rtable++;
- }
- c->rx_function = z8530_null_rx;
- c->skb = NULL;
- c->tx_skb = NULL;
- c->tx_next_skb = NULL;
- c->mtu = 1500;
- c->max = 0;
- c->count = 0;
- c->status = read_zsreg(c, R0);
- c->sync = 1;
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_channel_load);
-
-/**
- * z8530_tx_begin - Begin packet transmission
- * @c: The Z8530 channel to kick
- *
- * This is the speed sensitive side of transmission. If we are called
- * and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
- *
- * Note: We are handling this code path in the interrupt path, keep it
- * fast or bad things will happen.
- *
- * Called with the lock held.
- */
-
-static void z8530_tx_begin(struct z8530_channel *c)
-{
- unsigned long flags;
-
- if (c->tx_skb)
- return;
-
- c->tx_skb = c->tx_next_skb;
- c->tx_next_skb = NULL;
- c->tx_ptr = c->tx_next_ptr;
-
- if (!c->tx_skb) {
- /* Idle on */
- if (c->dma_tx) {
- flags = claim_dma_lock();
- disable_dma(c->txdma);
- /* Check if we crapped out.
- */
- if (get_dma_residue(c->txdma)) {
- c->netdevice->stats.tx_dropped++;
- c->netdevice->stats.tx_fifo_errors++;
- }
- release_dma_lock(flags);
- }
- c->txcount = 0;
- } else {
- c->txcount = c->tx_skb->len;
-
- if (c->dma_tx) {
- /* FIXME. DMA is broken for the original 8530,
- * on the older parts we need to set a flag and
- * wait for a further TX interrupt to fire this
- * stage off
- */
-
- flags = claim_dma_lock();
- disable_dma(c->txdma);
-
- /* These two are needed by the 8530/85C30
- * and must be issued when idling.
- */
- if (c->dev->type != Z85230) {
- write_zsctrl(c, RES_Tx_CRC);
- write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- clear_dma_ff(c->txdma);
- set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
- set_dma_count(c->txdma, c->txcount);
- enable_dma(c->txdma);
- release_dma_lock(flags);
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5] | TxENAB);
- } else {
- /* ABUNDER off */
- write_zsreg(c, R10, c->regs[10]);
- write_zsctrl(c, RES_Tx_CRC);
-
- while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
- write_zsreg(c, R8, *c->tx_ptr++);
- c->txcount--;
- }
- }
- }
- /* Since we emptied tx_skb we can ask for more
- */
- netif_wake_queue(c->netdevice);
-}
-
-/**
- * z8530_tx_done - TX complete callback
- * @c: The channel that completed a transmit.
- *
- * This is called when we complete a packet send. We wake the queue,
- * start the next packet going and then free the buffer of the existing
- * packet. This code is fairly timing sensitive.
- *
- * Called with the register lock held.
- */
-
-static void z8530_tx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
-
- /* Actually this can happen.*/
- if (!c->tx_skb)
- return;
-
- skb = c->tx_skb;
- c->tx_skb = NULL;
- z8530_tx_begin(c);
- c->netdevice->stats.tx_packets++;
- c->netdevice->stats.tx_bytes += skb->len;
- dev_consume_skb_irq(skb);
-}
-
-/**
- * z8530_null_rx - Discard a packet
- * @c: The channel the packet arrived on
- * @skb: The buffer
- *
- * We point the receive handler at this function when idle. Instead
- * of processing the frames we get to throw them away.
- */
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
-{
- dev_kfree_skb_any(skb);
-}
-EXPORT_SYMBOL(z8530_null_rx);
-
-/**
- * z8530_rx_done - Receive completion callback
- * @c: The channel that completed a receive
- *
- * A new packet is complete. Our goal here is to get back into receive
- * mode as fast as possible. On the Z85230 we could change to using
- * ESCC mode, but on the older chips we have no choice. We flip to the
- * new buffer immediately in DMA mode so that the DMA of the next
- * frame can occur while we are copying the previous buffer to an sk_buff
- *
- * Called with the lock held
- */
-static void z8530_rx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
- int ct;
-
- /* Is our receive engine in DMA mode
- */
- if (c->rxdma_on) {
- /* Save the ready state and the buffer currently
- * being used as the DMA target
- */
- int ready = c->dma_ready;
- unsigned char *rxb = c->rx_buf[c->dma_num];
- unsigned long flags;
-
- /* Complete this DMA. Necessary to find the length
- */
- flags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- c->rxdma_on = 0;
- ct = c->mtu - get_dma_residue(c->rxdma);
- if (ct < 0)
- ct = 2; /* Shit happens.. */
- c->dma_ready = 0;
-
- /* Normal case: the other slot is free, start the next DMA
- * into it immediately.
- */
-
- if (ready) {
- c->dma_num ^= 1;
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
- set_dma_count(c->rxdma, c->mtu);
- c->rxdma_on = 1;
- enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- * from passing
- */
- write_zsreg(c, R0, RES_Rx_CRC);
- } else {
- /* Can't occur as we dont reenable the DMA irq until
- * after the flip is done
- */
- netdev_warn(c->netdevice, "DMA flip overrun!\n");
- }
-
- release_dma_lock(flags);
-
- /* Shove the old buffer into an sk_buff. We can't DMA
- * directly into one on a PC - it might be above the 16Mb
- * boundary. Optimisation - we could check to see if we
- * can avoid the copy. Optimisation 2 - make the memcpy
- * a copychecksum.
- */
-
- skb = dev_alloc_skb(ct);
- if (!skb) {
- c->netdevice->stats.rx_dropped++;
- netdev_warn(c->netdevice, "Memory squeeze\n");
- } else {
- skb_put(skb, ct);
- skb_copy_to_linear_data(skb, rxb, ct);
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- c->dma_ready = 1;
- } else {
- RT_LOCK;
- skb = c->skb;
-
- /* The game we play for non DMA is similar. We want to
- * get the controller set up for the next packet as fast
- * as possible. We potentially only have one byte + the
- * fifo length for this. Thus we want to flip to the new
- * buffer and then mess around copying and allocating
- * things. For the current case it doesn't matter but
- * if you build a system where the sync irq isn't blocked
- * by the kernel IRQ disable then you need only block the
- * sync IRQ for the RT_LOCK area.
- *
- */
- ct = c->count;
-
- c->skb = c->skb2;
- c->count = 0;
- c->max = c->mtu;
- if (c->skb) {
- c->dptr = c->skb->data;
- c->max = c->mtu;
- } else {
- c->count = 0;
- c->max = 0;
- }
- RT_UNLOCK;
-
- c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2)
- skb_put(c->skb2, c->mtu);
-
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- /* If we received a frame we must now process it.
- */
- if (skb) {
- skb_trim(skb, ct);
- c->rx_function(c, skb);
- } else {
- c->netdevice->stats.rx_dropped++;
- netdev_err(c->netdevice, "Lost a frame\n");
- }
-}
-
-/**
- * spans_boundary - Check a packet can be ISA DMA'd
- * @skb: The buffer to check
- *
- * Returns true if the buffer cross a DMA boundary on a PC. The poor
- * thing can only DMA within a 64K block not across the edges of it.
- */
-
-static inline int spans_boundary(struct sk_buff *skb)
-{
- unsigned long a = (unsigned long)skb->data;
-
- a ^= (a + skb->len);
- if (a & 0x00010000) /* If the 64K bit is different.. */
- return 1;
- return 0;
-}
-
-/**
- * z8530_queue_xmit - Queue a packet
- * @c: The channel to use
- * @skb: The packet to kick down the channel
- *
- * Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
- * even in DMA mode we do the flip to DMA buffer if needed here
- * not in the IRQ.
- *
- * Called from the network code. The lock is not held at this
- * point.
- */
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
-{
- unsigned long flags;
-
- netif_stop_queue(c->netdevice);
- if (c->tx_next_skb)
- return NETDEV_TX_BUSY;
-
- /* PC SPECIFIC - DMA limits */
- /* If we will DMA the transmit and its gone over the ISA bus
- * limit, then copy to the flip buffer
- */
-
- if (c->dma_tx &&
- ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
- 16 * 1024 * 1024 || spans_boundary(skb))) {
- /* Send the flip buffer, and flip the flippy bit.
- * We don't care which is used when just so long as
- * we never use the same buffer twice in a row. Since
- * only one buffer can be going out at a time the other
- * has to be safe.
- */
- c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used ^= 1; /* Flip temp buffer */
- skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
- } else {
- c->tx_next_ptr = skb->data;
- }
- RT_LOCK;
- c->tx_next_skb = skb;
- RT_UNLOCK;
-
- spin_lock_irqsave(c->lock, flags);
- z8530_tx_begin(c);
- spin_unlock_irqrestore(c->lock, flags);
-
- return NETDEV_TX_OK;
-}
-EXPORT_SYMBOL(z8530_queue_xmit);
-
-/* Module support
- */
-static const char banner[] __initconst =
- KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
-
-static int __init z85230_init_driver(void)
-{
- printk(banner);
- return 0;
-}
-module_init(z85230_init_driver);
-
-static void __exit z85230_cleanup_driver(void)
-{
-}
-module_exit(z85230_cleanup_driver);
-
-MODULE_AUTHOR("Red Hat Inc.");
-MODULE_DESCRIPTION("Z85x30 synchronous driver core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
deleted file mode 100644
index 462cb620bc5d..000000000000
--- a/drivers/net/wan/z85230.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Description of Z8530 Z85C30 and Z85230 communications chips
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-
-#ifndef _Z8530_H
-#define _Z8530_H
-
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define FLAG 0x7e
-
-/* Write Register 0 */
-#define R0 0 /* Register selects */
-#define R1 1
-#define R2 2
-#define R3 3
-#define R4 4
-#define R5 5
-#define R6 6
-#define R7 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-#define RPRIME 16 /* Indicate a prime register access on 230 */
-
-#define NULLCODE 0 /* Null Code */
-#define POINT_HIGH 0x8 /* Select upper half of registers */
-#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
-#define SEND_ABORT 0x18 /* HDLC Abort */
-#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
-#define RES_Tx_P 0x28 /* Reset TxINT Pending */
-#define ERR_RES 0x30 /* Error Reset */
-#define RES_H_IUS 0x38 /* Reset highest IUS */
-
-#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
-#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
-#define RES_EOM_L 0xC0 /* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
-#define TxINT_ENAB 0x2 /* Tx Int Enable */
-#define PAR_SPEC 0x4 /* Parity is special condition */
-
-#define RxINT_DISAB 0 /* Rx Int Disable */
-#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
-#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
-#define INT_ERR_Rx 0x18 /* Int on error only */
-
-#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
-#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
-#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define RxENABLE 0x1 /* Rx Enable */
-#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
-#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
-#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
-#define ENT_HM 0x10 /* Enter Hunt Mode */
-#define AUTO_ENAB 0x20 /* Auto Enables */
-#define Rx5 0x0 /* Rx 5 Bits/Character */
-#define Rx7 0x40 /* Rx 7 Bits/Character */
-#define Rx6 0x80 /* Rx 6 Bits/Character */
-#define Rx8 0xc0 /* Rx 8 Bits/Character */
-
-/* Write Register 4 */
-
-#define PAR_ENA 0x1 /* Parity Enable */
-#define PAR_EVEN 0x2 /* Parity Even/Odd* */
-
-#define SYNC_ENAB 0 /* Sync Modes Enable */
-#define SB1 0x4 /* 1 stop bit/char */
-#define SB15 0x8 /* 1.5 stop bits/char */
-#define SB2 0xc /* 2 stop bits/char */
-
-#define MONSYNC 0 /* 8 Bit Sync character */
-#define BISYNC 0x10 /* 16 bit sync character */
-#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
-#define EXTSYNC 0x30 /* External Sync Mode */
-
-#define X1CLK 0x0 /* x1 clock mode */
-#define X16CLK 0x40 /* x16 clock mode */
-#define X32CLK 0x80 /* x32 clock mode */
-#define X64CLK 0xC0 /* x64 clock mode */
-
-/* Write Register 5 */
-
-#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
-#define RTS 0x2 /* RTS */
-#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
-#define TxENAB 0x8 /* Tx Enable */
-#define SND_BRK 0x10 /* Send Break */
-#define Tx5 0x0 /* Tx 5 bits (or less)/character */
-#define Tx7 0x20 /* Tx 7 bits/character */
-#define Tx6 0x40 /* Tx 6 bits/character */
-#define Tx8 0x60 /* Tx 8 bits/character */
-#define DTR 0x80 /* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define VIS 1 /* Vector Includes Status */
-#define NV 2 /* No Vector */
-#define DLC 4 /* Disable Lower Chain */
-#define MIE 8 /* Master Interrupt Enable */
-#define STATHI 0x10 /* Status high */
-#define NORESET 0 /* No reset on write to R9 */
-#define CHRB 0x40 /* Reset channel B */
-#define CHRA 0x80 /* Reset channel A */
-#define FHWRES 0xc0 /* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define BIT6 1 /* 6 bit/8bit sync */
-#define LOOPMODE 2 /* SDLC Loop mode */
-#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
-#define MARKIDLE 8 /* Mark/flag on idle */
-#define GAOP 0x10 /* Go active on poll */
-#define NRZ 0 /* NRZ mode */
-#define NRZI 0x20 /* NRZI mode */
-#define FM1 0x40 /* FM1 (transition = 1) */
-#define FM0 0x60 /* FM0 (transition = 0) */
-#define CRCPS 0x80 /* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define TRxCXT 0 /* TRxC = Xtal output */
-#define TRxCTC 1 /* TRxC = Transmit clock */
-#define TRxCBR 2 /* TRxC = BR Generator Output */
-#define TRxCDP 3 /* TRxC = DPLL output */
-#define TRxCOI 4 /* TRxC O/I */
-#define TCRTxCP 0 /* Transmit clock = RTxC pin */
-#define TCTRxCP 8 /* Transmit clock = TRxC pin */
-#define TCBR 0x10 /* Transmit clock = BR Generator output */
-#define TCDPLL 0x18 /* Transmit clock = DPLL output */
-#define RCRTxCP 0 /* Receive clock = RTxC pin */
-#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
-#define RCBR 0x40 /* Receive clock = BR Generator output */
-#define RCDPLL 0x60 /* Receive clock = DPLL output */
-#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define BRENABL 1 /* Baud rate generator enable */
-#define BRSRC 2 /* Baud rate generator source */
-#define DTRREQ 4 /* DTR/Request function */
-#define AUTOECHO 8 /* Auto Echo */
-#define LOOPBAK 0x10 /* Local loopback */
-#define SEARCH 0x20 /* Enter search mode */
-#define RMC 0x40 /* Reset missing clock */
-#define DISDPLL 0x60 /* Disable DPLL */
-#define SSBR 0x80 /* Set DPLL source = BR generator */
-#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
-#define SFMM 0xc0 /* Set FM mode */
-#define SNRZI 0xe0 /* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
-#define ZCIE 2 /* Zero count IE */
-#define FIFOE 4 /* Z85230 only */
-#define DCDIE 8 /* DCD IE */
-#define SYNCIE 0x10 /* Sync/hunt IE */
-#define CTSIE 0x20 /* CTS IE */
-#define TxUIE 0x40 /* Tx Underrun/EOM IE */
-#define BRKIE 0x80 /* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define Rx_CH_AV 0x1 /* Rx Character Available */
-#define ZCOUNT 0x2 /* Zero count */
-#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
-#define DCD 0x8 /* DCD */
-#define SYNC_HUNT 0x10 /* Sync/hunt */
-#define CTS 0x20 /* CTS */
-#define TxEOM 0x40 /* Tx underrun */
-#define BRK_ABRT 0x80 /* Break/Abort */
-
-/* Read Register 1 */
-#define ALL_SNT 0x1 /* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define RES3 0x8 /* 0/3 */
-#define RES4 0x4 /* 0/4 */
-#define RES5 0xc /* 0/5 */
-#define RES6 0x2 /* 0/6 */
-#define RES7 0xa /* 0/7 */
-#define RES8 0x6 /* 0/8 */
-#define RES18 0xe /* 1/8 */
-#define RES28 0x0 /* 2/8 */
-/* Special Rx Condition Interrupts */
-#define PAR_ERR 0x10 /* Parity error */
-#define Rx_OVR 0x20 /* Rx Overrun Error */
-#define CRC_ERR 0x40 /* CRC/Framing Error */
-#define END_FR 0x80 /* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
-#define CHBTxIP 0x2 /* Channel B Tx IP */
-#define CHBRxIP 0x4 /* Channel B Rx IP */
-#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
-#define CHATxIP 0x10 /* Channel A Tx IP */
-#define CHARxIP 0x20 /* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10 (misc status bits) */
-#define ONLOOP 2 /* On loop */
-#define LOOPSEND 0x10 /* Loop sending */
-#define CLK2MIS 0x40 /* Two clocks missing */
-#define CLK1MIS 0x80 /* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-
-/*
- * Interrupt handling functions for this SCC
- */
-
-struct z8530_channel;
-
-struct z8530_irqhandler
-{
- void (*rx)(struct z8530_channel *);
- void (*tx)(struct z8530_channel *);
- void (*status)(struct z8530_channel *);
-};
-
-/*
- * A channel of the Z8530
- */
-
-struct z8530_channel
-{
- struct z8530_irqhandler *irqs; /* IRQ handlers */
- /*
- * Synchronous
- */
- u16 count; /* Buyes received */
- u16 max; /* Most we can receive this frame */
- u16 mtu; /* MTU of the device */
- u8 *dptr; /* Pointer into rx buffer */
- struct sk_buff *skb; /* Buffer dptr points into */
- struct sk_buff *skb2; /* Pending buffer */
- u8 status; /* Current DCD */
- u8 dcdcheck; /* which bit to check for line */
- u8 sync; /* Set if in sync mode */
-
- u8 regs[32]; /* Register map for the chip */
- u8 pendregs[32]; /* Pending register values */
-
- struct sk_buff *tx_skb; /* Buffer being transmitted */
- struct sk_buff *tx_next_skb; /* Next transmit buffer */
- u8 *tx_ptr; /* Byte pointer into the buffer */
- u8 *tx_next_ptr; /* Next pointer to use */
- u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
- u8 tx_dma_used; /* Flip buffer usage toggler */
- u16 txcount; /* Count of bytes to transmit */
-
- void (*rx_function)(struct z8530_channel *, struct sk_buff *);
-
- /*
- * Sync DMA
- */
-
- u8 rxdma; /* DMA channels */
- u8 txdma;
- u8 rxdma_on; /* DMA active if flag set */
- u8 txdma_on;
- u8 dma_num; /* Buffer we are DMAing into */
- u8 dma_ready; /* Is the other buffer free */
- u8 dma_tx; /* TX is to use DMA */
- u8 *rx_buf[2]; /* The flip buffers */
-
- /*
- * System
- */
-
- struct z8530_dev *dev; /* Z85230 chip instance we are from */
- unsigned long ctrlio; /* I/O ports */
- unsigned long dataio;
-
- /*
- * For PC we encode this way.
- */
-#define Z8530_PORT_SLEEP 0x80000000
-#define Z8530_PORT_OF(x) ((x)&0xFFFF)
-
- u32 rx_overrun; /* Overruns - not done yet */
- u32 rx_crc_err;
-
- /*
- * Bound device pointers
- */
-
- void *private; /* For our owner */
- struct net_device *netdevice; /* Network layer device */
-
- spinlock_t *lock; /* Device lock */
-};
-
-/*
- * Each Z853x0 device.
- */
-
-struct z8530_dev
-{
- char *name; /* Device instance name */
- struct z8530_channel chanA; /* SCC channel A */
- struct z8530_channel chanB; /* SCC channel B */
- int type;
-#define Z8530 0 /* NMOS dinosaur */
-#define Z85C30 1 /* CMOS - better */
-#define Z85230 2 /* CMOS with real FIFO */
- int irq; /* Interrupt for the device */
- int active; /* Soft interrupt enable - the Mac doesn't
- always have a hard disable on its 8530s... */
- spinlock_t lock;
-};
-
-
-/*
- * Functions
- */
-
-extern u8 z8530_dead_port[];
-extern u8 z8530_hdlc_kilostream_85230[];
-extern u8 z8530_hdlc_kilostream[];
-irqreturn_t z8530_interrupt(int, void *);
-void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-int z8530_init(struct z8530_dev *);
-int z8530_shutdown(struct z8530_dev *);
-int z8530_sync_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-int z8530_channel_load(struct z8530_channel *, u8 *);
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
-
-
-/*
- * Standard interrupt vector sets
- */
-
-extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
-
-/*
- * Asynchronous Interfacing
- */
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-
-#define SERIAL_XMIT_SIZE 4096
-#define WAKEUP_CHARS 256
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-
-#endif /* !(_Z8530_H) */
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 9a4c8ff32d9d..5bf7822c53f1 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+enum { MAX_ALLOWEDIPS_BITS = 128 };
+
static struct kmem_cache *node_cache;
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
@@ -40,7 +42,8 @@ static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
{
if (rcu_access_pointer(p)) {
- WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
+ return;
stack[(*len)++] = rcu_dereference_raw(p);
}
}
@@ -52,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu)
static void root_free_rcu(struct rcu_head *rcu)
{
- struct allowedips_node *node, *stack[128] = {
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
container_of(rcu, struct allowedips_node, rcu) };
unsigned int len = 1;
@@ -65,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu)
static void root_remove_peer_lists(struct allowedips_node *root)
{
- struct allowedips_node *node, *stack[128] = { root };
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a46067c38bf5..d58e9f818d3b 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -19,6 +19,7 @@
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
+#include <net/dst_metadata.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
@@ -59,9 +60,7 @@ out:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
- void *data)
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
struct wg_device *wg;
struct wg_peer *peer;
@@ -70,7 +69,8 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
* its normal operation rather than as a somewhat rare event, then we
* don't actually want to clear keys.
*/
- if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) ||
+ IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP))
return 0;
if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
@@ -92,7 +92,24 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
-#endif
+
+static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_noise_expire_current_peer_keypairs(peer);
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification };
static int wg_stop(struct net_device *dev)
{
@@ -152,7 +169,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_peer;
}
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
@@ -424,16 +441,18 @@ int __init wg_device_init(void)
{
int ret;
-#ifdef CONFIG_PM_SLEEP
ret = register_pm_notifier(&pm_notifier);
if (ret)
return ret;
-#endif
- ret = register_pernet_device(&pernet_ops);
+ ret = register_random_vmfork_notifier(&vm_notifier);
if (ret)
goto error_pm;
+ ret = register_pernet_device(&pernet_ops);
+ if (ret)
+ goto error_vm;
+
ret = rtnl_link_register(&link_ops);
if (ret)
goto error_pernet;
@@ -442,10 +461,10 @@ int __init wg_device_init(void)
error_pernet:
unregister_pernet_device(&pernet_ops);
+error_vm:
+ unregister_random_vmfork_notifier(&vm_notifier);
error_pm:
-#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
-#endif
return ret;
}
@@ -453,8 +472,7 @@ void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
unregister_pernet_device(&pernet_ops);
-#ifdef CONFIG_PM_SLEEP
+ unregister_random_vmfork_notifier(&vm_notifier);
unregister_pm_notifier(&pm_notifier);
-#endif
rcu_barrier();
}
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d7f408..43c8c84e7ea8 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
@@ -621,6 +620,7 @@ static const struct genl_ops genl_ops[] = {
static struct genl_family genl_family __ro_after_init = {
.ops = genl_ops,
.n_ops = ARRAY_SIZE(genl_ops),
+ .resv_start_op = WG_CMD_SET_DEVICE + 1,
.name = WG_GENL_NAME,
.version = WG_GENL_VERSION,
.maxattr = WGDEVICE_A_MAX,
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index c0cfd9b36c0b..720952b92e78 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
static_identity->static_public, private_key);
}
+static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index 1acd00ab2fbc..1cb502a932e0 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -54,8 +54,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
skb_queue_head_init(&peer->staged_packet_queue);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
- netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll);
napi_enable(&peer->napi);
list_add_tail(&peer->peer_list, &wg->peer_list);
INIT_LIST_HEAD(&peer->allowedips_list);
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 1de413b19e34..8084e7408c0a 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -4,6 +4,7 @@
*/
#include "queueing.h"
+#include <linux/skb_array.h>
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
@@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
+ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7b8df406c773..7135d51d2d87 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -19,15 +19,8 @@
/* Must be called with bh disabled. */
static void update_rx_stats(struct wg_peer *peer, size_t len)
{
- struct pcpu_sw_netstats *tstats =
- get_cpu_ptr(peer->device->dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- ++tstats->rx_packets;
- tstats->rx_bytes += len;
+ dev_sw_netstats_rx_add(peer->device->dev, len);
peer->rx_bytes += len;
- u64_stats_update_end(&tstats->syncp);
- put_cpu_ptr(tstats);
}
#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index e173204ae7d7..19eac00b2381 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -284,7 +284,7 @@ static __init bool randomized_test(void)
mutex_lock(&mutex);
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
@@ -299,7 +299,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 4);
- prandom_bytes(mutate_mask, 4);
+ get_random_bytes(mutate_mask, 4);
mutate_amount = prandom_u32_max(32);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -310,7 +310,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t,
@@ -328,7 +328,7 @@ static __init bool randomized_test(void)
}
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
@@ -343,7 +343,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 16);
- prandom_bytes(mutate_mask, 16);
+ get_random_bytes(mutate_mask, 16);
mutate_amount = prandom_u32_max(128);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -354,7 +354,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t,
@@ -381,13 +381,13 @@ static __init bool randomized_test(void)
for (j = 0;; ++j) {
for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
pr_err("allowedips random v4 self-test: FAIL\n");
goto free;
}
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
pr_err("allowedips random v6 self-test: FAIL\n");
goto free;
@@ -593,10 +593,10 @@ bool __init wg_allowedips_selftest(void)
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
- /* These will hit the WARN_ON(len >= 128) in free_node if something
- * goes wrong.
+ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
+ * if something goes wrong.
*/
- for (i = 0; i < 128; ++i) {
+ for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
part = cpu_to_be64(~(1LLU << (i % 64)));
memset(&ip, 0xff, 16);
memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index 007cd4457c5f..d4bb40a695ab 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -167,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
@@ -176,7 +176,6 @@ bool __init wg_ratelimiter_selftest(void)
test += test_count;
goto err;
}
- msleep(500);
continue;
} else if (ret < 0) {
test += test_count;
@@ -195,7 +194,6 @@ bool __init wg_ratelimiter_selftest(void)
test += test_count;
goto err;
}
- msleep(50);
continue;
}
test += test_count;
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 6f07b949cb81..0414d7a6ce74 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -160,6 +160,7 @@ out:
rcu_read_unlock_bh();
return ret;
#else
+ kfree_skb(skb);
return -EAFNOSUPPORT;
#endif
}
@@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
endpoint->src4.s_addr = ip_hdr(skb)->daddr;
endpoint->src_if4 = skb->skb_iif;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
endpoint->addr6.sin6_family = AF_INET6;
endpoint->addr6.sin6_port = udp_hdr(skb)->source;
endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
@@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer,
peer->endpoint.addr4 = endpoint->addr4;
peer->endpoint.src4 = endpoint->src4;
peer->endpoint.src_if4 = endpoint->src_if4;
- } else if (endpoint->addr.sa_family == AF_INET6) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
peer->endpoint.addr6 = endpoint->addr6;
peer->endpoint.src6 = endpoint->src6;
} else {
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 7add2002ff4c..cb1c15012dd0 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -28,9 +28,11 @@ source "drivers/net/wireless/intersil/Kconfig"
source "drivers/net/wireless/marvell/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/microchip/Kconfig"
+source "drivers/net/wireless/purelifi/Kconfig"
source "drivers/net/wireless/ralink/Kconfig"
source "drivers/net/wireless/realtek/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
+source "drivers/net/wireless/silabs/Kconfig"
source "drivers/net/wireless/st/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zydas/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 80b324499786..a61cf6c90343 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -13,13 +13,15 @@ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/
+obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
+obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/
obj-$(CONFIG_WLAN_VENDOR_ST) += st/
obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
-obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2db9c948c0fc..6bee16b207d1 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1311,7 +1311,7 @@ static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changes)
+ u64 changes)
{
struct adm8211_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 141c1b5a7b1f..6f937d2cc126 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -104,7 +104,7 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
}
if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
- ar5523_err(ar, "RX USB to short.\n");
+ ar5523_err(ar, "RX USB too short.\n");
goto skip;
}
@@ -1160,7 +1160,7 @@ static int ar5523_get_wlan_mode(struct ar5523 *ar,
ar5523_info(ar, "STA not found!\n");
return WLAN_MODE_11b;
}
- sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
for (bit = 0; bit < band->n_bitrates; bit++) {
if (sta_rate_set & 1) {
@@ -1198,7 +1198,7 @@ static void ar5523_create_rateset(struct ar5523 *ar,
ar5523_info(ar, "STA not found. Cannot set rates\n");
sta_rate_set = bss_conf->basic_rates;
} else
- sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
@@ -1256,14 +1256,14 @@ static int ar5523_create_connection(struct ar5523 *ar,
sizeof(create), 0);
}
-static int ar5523_write_associd(struct ar5523 *ar,
- struct ieee80211_bss_conf *bss)
+static int ar5523_write_associd(struct ar5523 *ar, struct ieee80211_vif *vif)
{
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
struct ar5523_cmd_set_associd associd;
memset(&associd, 0, sizeof(associd));
associd.defaultrateix = cpu_to_be32(0); /* XXX */
- associd.associd = cpu_to_be32(bss->aid);
+ associd.associd = cpu_to_be32(vif->cfg.aid);
associd.timoffset = cpu_to_be32(0x3b); /* XXX */
memcpy(associd.bssid, bss->bssid, ETH_ALEN);
return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
@@ -1273,7 +1273,7 @@ static int ar5523_write_associd(struct ar5523 *ar,
static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss,
- u32 changed)
+ u64 changed)
{
struct ar5523 *ar = hw->priv;
int error;
@@ -1284,7 +1284,7 @@ static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
if (!(changed & BSS_CHANGED_ASSOC))
goto out_unlock;
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
error = ar5523_create_connection(ar, vif, bss);
if (error) {
ar5523_err(ar, "could not create connection\n");
@@ -1297,7 +1297,7 @@ static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
goto out_unlock;
}
- error = ar5523_write_associd(ar, bss);
+ error = ar5523_write_associd(ar, vif);
if (error) {
ar5523_err(ar, "could not set association\n");
goto out_unlock;
@@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev)
return -ENOENT;
}
- txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ txblock = kzalloc(sizeof(*txblock), GFP_KERNEL);
if (!txblock)
goto out;
@@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev)
if (!fwbuf)
goto out_free_rxblock;
- memset(txblock, 0, sizeof(struct ar5523_fwblock));
txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
txblock->total = cpu_to_be32(fw->size);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ab8f77ae5e66..f0c615fa5614 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -728,20 +728,17 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
struct ath10k *ar;
struct ath10k_ahb *ar_ahb;
struct ath10k_pci *ar_pci;
- const struct of_device_id *of_id;
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
struct ath10k_bus_params bus_params = {};
- of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev);
+ if (!hw_rev) {
+ dev_err(&pdev->dev, "OF data missing\n");
return -EINVAL;
}
- hw_rev = (enum ath10k_hw_rev)of_id->data;
-
size = sizeof(*ar_pci) + sizeof(*ar_ahb);
ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
hw_rev, &ath10k_ahb_hif_ops);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 4481ed375f55..af6546572df2 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -101,7 +101,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
/* Step 1: Read 4 bytes of the target info and check if it is
- * the special sentinal version word or the first word in the
+ * the special sentinel version word or the first word in the
* version response.
*/
resplen = sizeof(u32);
@@ -111,7 +111,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
return ret;
}
- /* Some SDIO boards have a special sentinal byte before the real
+ /* Some SDIO boards have a special sentinel byte before the real
* version response.
*/
if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index c45c814fd122..59926227bd49 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1323,7 +1323,7 @@ EXPORT_SYMBOL(ath10k_ce_per_engine_service);
/*
* Handler for per-engine interrupts on ALL active CEs.
* This is used in cases where the system is sharing a
- * single interrput for all CEs
+ * single interrupt for all CEs
*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 8f5b8eb368fa..400f332a7ff0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -33,9 +33,11 @@ EXPORT_SYMBOL(ath10k_debug_mask);
static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
-static bool rawmode;
static bool fw_diag_log;
+/* frame mode values are mapped as per enum ath10k_hw_txrx_mode */
+unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+
unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
@@ -44,15 +46,16 @@ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
-module_param(rawmode, bool, 0644);
module_param(fw_diag_log, bool, 0644);
+module_param_named(frame_mode, ath10k_frame_mode, uint, 0644);
module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
-MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging");
@@ -75,6 +78,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -93,6 +97,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -111,6 +117,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -129,6 +136,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -148,6 +157,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -166,6 +176,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -184,6 +196,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_sdio_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -198,6 +211,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -216,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -234,6 +250,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -252,6 +270,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -270,6 +289,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -288,6 +309,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -306,6 +328,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -325,6 +349,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -346,6 +371,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -370,6 +397,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -388,6 +416,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -415,6 +445,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -437,6 +468,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -461,6 +494,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -483,6 +517,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -501,6 +537,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -519,6 +556,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -537,6 +576,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -557,6 +597,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -575,6 +617,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -586,6 +629,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true,
.credit_size_workaround = true,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -611,6 +656,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -629,6 +675,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -643,6 +691,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dir = WCN3990_HW_1_0_FW_DIR,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_TLV_NUM_PEERS,
@@ -658,6 +707,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
+ .hw_restart_disconnect = true,
+ .use_fw_tx_credits = false,
},
};
@@ -1201,6 +1252,7 @@ success:
static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
{
const struct firmware *fw;
+ char boardname[100];
if (bd_ie_type == ATH10K_BD_IE_BOARD) {
if (!ar->hw_params.fw.board) {
@@ -1208,9 +1260,19 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
return -EINVAL;
}
+ scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
+ boardname);
+ if (IS_ERR(ar->normal_mode_fw.board)) {
+ fw = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ ar->normal_mode_fw.board = fw;
+ }
+
if (IS_ERR(ar->normal_mode_fw.board))
return PTR_ERR(ar->normal_mode_fw.board);
@@ -2426,6 +2488,7 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ struct ath10k_vif *arvif;
int ret;
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
@@ -2464,6 +2527,14 @@ static void ath10k_core_restart(struct work_struct *work)
ar->state = ATH10K_STATE_RESTARTING;
ath10k_halt(ar);
ath10k_scan_finish(ar);
+ if (ar->hw_params.hw_restart_disconnect) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
+
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
@@ -2547,7 +2618,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
- if (rawmode) {
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_RAW) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
fw_file->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
@@ -3025,7 +3096,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* enabled always.
*
* We can still enable BTCOEX if firmware has the support
- * eventhough btceox_support value is
+ * even though btceox_support value is
* ATH10K_DT_BTCOEX_NOT_FOUND
*/
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 9f6680b3be0a..f5de8ce8fb45 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -59,9 +59,6 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
-/* NAPI poll budget */
-#define ATH10K_NAPI_BUDGET 64
-
/* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -79,7 +76,7 @@
/* The magic used by QCA spec */
#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
-/* Default Airtime weight multipler (Tuned for multiclient performance) */
+/* Default Airtime weight multiplier (Tuned for multiclient performance) */
#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
#define ATH10K_MAX_RETRY_COUNT 30
@@ -860,7 +857,7 @@ enum ath10k_dev_flags {
/* Disable HW crypto engine */
ATH10K_FLAG_HW_CRYPTO_DISABLED,
- /* Bluetooth coexistance enabled */
+ /* Bluetooth coexistence enabled */
ATH10K_FLAG_BTCOEX,
/* Per Station statistics service */
@@ -1317,6 +1314,7 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
return false;
}
+extern unsigned int ath10k_frame_mode;
extern unsigned long ath10k_coredump_mask;
void ath10k_core_napi_sync_disable(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index fe6b6f97a916..2d1634a890dd 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -531,7 +531,7 @@ static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
{0x40000, 0x400A4},
- /* SI register is skiped here.
+ /* SI register is skipped here.
* Because it will cause bus hang
*
* {0x50000, 0x50018},
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 240d70515088..437b9759f05d 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -125,7 +125,7 @@ enum ath10k_mem_region_type {
* To minimize the size of the array, the list must obey the format:
* '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
* also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
- * we may encouter error in the dump processing.
+ * we may encounter error in the dump processing.
*/
struct ath10k_mem_section {
u32 start;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 39378e3f9b2b..c861e66ef6bc 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1081,7 +1081,7 @@ exit:
* struct available..
*/
-/* This generally cooresponds to the debugfs fw_stats file */
+/* This generally corresponds to the debugfs fw_stats file */
static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 367539f2c370..87a3365330ff 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -498,7 +498,7 @@ static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
{
switch (i) {
case ATH10K_AMPDU_SUBFRM_NUM_10:
- return "upto 10";
+ return "up to 10";
case ATH10K_AMPDU_SUBFRM_NUM_20:
return "11-20";
case ATH10K_AMPDU_SUBFRM_NUM_30:
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index fab398046a3f..6d1784f74bea 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -947,13 +947,18 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
return -ECOMM;
}
- htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ if (ar->hw_params.use_fw_tx_credits)
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ else
+ htc->total_transmit_credits = 1;
+
htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
ath10k_dbg(ar, ATH10K_DBG_HTC,
- "Target ready! transmit resources: %d size:%d\n",
+ "Target ready! transmit resources: %d size:%d actual credits:%d\n",
htc->total_transmit_credits,
- htc->target_credit_size);
+ htc->target_credit_size,
+ msg->ready.credit_count);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 127b4e4980ef..907e1e13871a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -131,6 +131,159 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
HTT_T2H_MSG_TYPE_PEER_STATS,
};
+const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload)
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return !!(rx_desc->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
+const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload),
+
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+};
+
+static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off)
+{
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static struct htt_rx_desc *
+ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff)
+{
+ return &((struct htt_rx_desc_v2 *)buff)->base;
+}
+
+static struct rx_attention *
+ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->attention;
+}
+
+static struct rx_frag_info_common *
+ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->frag_info.common;
+}
+
+static struct rx_mpdu_start *
+ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_start;
+}
+
+static struct rx_mpdu_end *
+ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_end;
+}
+
+static struct rx_msdu_start_common *
+ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_start.common;
+}
+
+static struct rx_msdu_end_common *
+ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_end.common;
+}
+
+static struct rx_ppdu_start *
+ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_start;
+}
+
+static struct rx_ppdu_end_common *
+ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_end.common;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->rx_hdr_status;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->msdu_payload;
+}
+
+const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v2),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload),
+
+ .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer,
+ .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets,
+ .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention,
+ .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info,
+ .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start,
+ .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end,
+ .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start,
+ .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end,
+ .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start,
+ .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end,
+ .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status,
+ .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 9a3a8907389b..f06cf39204e2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -240,14 +240,7 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring32 {
- __le32 fw_idx_shadow_reg_paddr;
- __le32 rx_ring_base_paddr;
- __le16 rx_ring_len; /* in 4-byte words */
- __le16 rx_ring_bufsize; /* rx skb size - in bytes */
- __le16 flags; /* %HTT_RX_RING_FLAGS_ */
- __le16 fw_idx_init_val;
-
+struct htt_rx_ring_rx_desc_offsets {
/* the following offsets are in 4-byte units */
__le16 mac80211_hdr_offset;
__le16 msdu_payload_offset;
@@ -261,6 +254,17 @@ struct htt_rx_ring_setup_ring32 {
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
struct htt_rx_ring_setup_ring64 {
__le64 fw_idx_shadow_reg_paddr;
__le64 rx_ring_base_paddr;
@@ -269,17 +273,7 @@ struct htt_rx_ring_setup_ring64 {
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
__le16 fw_idx_init_val;
- /* the following offsets are in 4-byte units */
- __le16 mac80211_hdr_offset;
- __le16 msdu_payload_offset;
- __le16 ppdu_start_offset;
- __le16 ppdu_end_offset;
- __le16 mpdu_start_offset;
- __le16 mpdu_end_offset;
- __le16 msdu_start_offset;
- __le16 msdu_end_offset;
- __le16 rx_attention_offset;
- __le16 frag_info_offset;
+ struct htt_rx_ring_rx_desc_offsets offsets;
} __packed;
struct htt_rx_ring_setup_hdr {
@@ -2075,12 +2069,22 @@ static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
}
+/* the driver strongly assumes that the rx header status be 64 bytes long,
+ * so all possible rx_desc structures must respect this assumption.
+ */
#define RX_HTT_HDR_STATUS_LEN 64
-/* This structure layout is programmed via rx ring setup
+/* The rx descriptor structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring.
+ * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
+ * when modifying the structure layout of the rx descriptor beyond what it expects
+ * (even if it correctly programmed during the rx ring setup).
+ * Therefore we must keep two different memory layouts, abstract the rx descriptor
+ * representation and use ath10k_rx_desc_ops
+ * for correctly accessing rx descriptor data.
*/
+
+/* base struct used for abstracting the rx descritor representation */
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
@@ -2089,6 +2093,13 @@ struct htt_rx_desc {
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
+} __packed;
+
+/* rx descriptor for wcn3990 and possibly extensible for newer cards
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v2 {
+ struct htt_rx_desc base;
struct {
struct rx_attention attention;
struct rx_frag_info frag_info;
@@ -2103,6 +2114,240 @@ struct htt_rx_desc {
u8 msdu_payload[];
};
+/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
+ * works correctly. We keep a single rx descriptor for all these three
+ * families of cards because from tests it seems to be the most stable solution,
+ * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
+ * during some tests.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v1 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info_v1 frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start_v1 msdu_start;
+ struct rx_msdu_end_v1 msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end_v1 ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* rx_desc abstraction */
+struct ath10k_htt_rx_desc_ops {
+ /* These fields are mandatory, they must be specified in any instance */
+
+ /* sizeof() of the rx_desc structure used by this hw */
+ size_t rx_desc_size;
+
+ /* offset of msdu_payload inside the rx_desc structure used by this hw */
+ size_t rx_desc_msdu_payload_offset;
+
+ /* These fields are options.
+ * When a field is not provided the default implementation gets used
+ * (see the ath10k_rx_desc_* operations below for more info about the defaults)
+ */
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+
+ /* Safely cast from a void* buffer containing an rx descriptor
+ * to the proper rx_desc structure
+ */
+ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
+
+ void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
+ struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
+ struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
+ struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
+ struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
+
+static inline int
+ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
+ return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+static inline bool
+ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
+ return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
+/* The default implementation of all these getters is using the old rx_desc,
+ * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
+ * But probably, if new wireless cards must be supported, it would be better
+ * to switch the default implementation to the new rx_desc, since this would
+ * make the extension easier .
+ */
+static inline struct htt_rx_desc *
+ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
+{
+ if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
+ return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
+ return &((struct htt_rx_desc_v1 *)buff)->base;
+}
+
+static inline void
+ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_rx_desc_offsets *off)
+{
+ if (hw->rx_desc_ops->rx_desc_get_offsets) {
+ hw->rx_desc_ops->rx_desc_get_offsets(off);
+ } else {
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+ }
+}
+
+static inline struct rx_attention *
+ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_attention)
+ return hw->rx_desc_ops->rx_desc_get_attention(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->attention;
+}
+
+static inline struct rx_frag_info_common *
+ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_frag_info)
+ return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->frag_info.common;
+}
+
+static inline struct rx_mpdu_start *
+ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_start;
+}
+
+static inline struct rx_mpdu_end *
+ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_end;
+}
+
+static inline struct rx_msdu_start_common *
+ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_start)
+ return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_start.common;
+}
+
+static inline struct rx_msdu_end_common *
+ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_end)
+ return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_end.common;
+}
+
+static inline struct rx_ppdu_start *
+ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_start;
+}
+
+static inline struct rx_ppdu_end_common *
+ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_end.common;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
+ return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->rx_hdr_status;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
+ return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->msdu_payload;
+}
+
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
@@ -2136,7 +2381,14 @@ struct htt_rx_chan_info {
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 2048
-#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
+ * because it depends on the underlying device rx_desc representation
+ */
+static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
+{
+ return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
+}
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely.
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index adbaeb67eedf..e76aab973320 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -21,7 +21,10 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+/* shortcut to interpret a raw memory buffer as a rx descriptor */
+#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
@@ -128,6 +131,7 @@ static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
+ struct ath10k_hw_params *hw = &htt->ar->hw_params;
struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
@@ -163,8 +167,8 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
- rx_desc = (struct htt_rx_desc *)skb->data;
- rx_desc->attention.flags = __cpu_to_le32(0);
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
+ ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
paddr = dma_map_single(htt->ar->dev, skb->data,
skb->len + skb_tailroom(skb),
@@ -297,12 +301,16 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
ath10k_htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
+
dma_free_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
htt->rx_ring.alloc_idx.vaddr,
htt->rx_ring.alloc_idx.paddr);
+ htt->rx_ring.alloc_idx.vaddr = NULL;
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
}
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
@@ -343,9 +351,14 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ struct rx_attention *rx_desc_attention;
+ struct rx_frag_info_common *rx_desc_frag_info_common;
+ struct rx_msdu_start_common *rx_desc_msdu_start_common;
+ struct rx_msdu_end_common *rx_desc_msdu_end_common;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -360,13 +373,18 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
__skb_queue_tail(amsdu, msdu);
- rx_desc = (struct htt_rx_desc *)msdu->data;
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
+ rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
+ rx_desc);
+ rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
+ rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
/* FIXME: we must report msdu payload since this is what caller
* expects now
*/
- skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
- skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/*
* Sanity check - confirm the HW is finished filling in the
@@ -376,24 +394,24 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
* To prevent the case that we handle a stale Rx descriptor,
* just assert for now until we have a way to recover.
*/
- if (!(__le32_to_cpu(rx_desc->attention.flags)
+ if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu);
return -EIO;
}
- msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
- msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chained = rx_desc_frag_info_common->ring2_more_count;
if (msdu_len_invalid)
msdu_len = 0;
skb_trim(msdu, 0);
- skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
msdu_len -= msdu->len;
/* Note: Chained buffers do not contain rx descriptor */
@@ -411,11 +429,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+ last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
- sizeof(*rx_desc) - sizeof(u32));
+ /* FIXME: why are we skipping the first part of the rx_desc? */
+ trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
+ hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)
break;
@@ -480,6 +499,7 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u32 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -488,12 +508,12 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -556,6 +576,7 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u64 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -564,12 +585,12 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -631,8 +652,10 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -667,15 +690,16 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -693,8 +717,10 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -728,15 +754,16 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -823,8 +850,10 @@ err_dma_idx:
ath10k_htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
err_netbuf:
return -ENOMEM;
}
@@ -944,16 +973,32 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct rx_mpdu_end *rxd_mpdu_end;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_ppdu_start *rxd_ppdu_start;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
+ u8 *rxd_msdu_payload;
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
u32 stbc, nsts_su;
- info1 = __le32_to_cpu(rxd->ppdu_start.info1);
- info2 = __le32_to_cpu(rxd->ppdu_start.info2);
- info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
+
+ info1 = __le32_to_cpu(rxd_ppdu_start->info1);
+ info2 = __le32_to_cpu(rxd_ppdu_start->info2);
+ info3 = __le32_to_cpu(rxd_ppdu_start->info3);
preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
@@ -1022,24 +1067,24 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
- __le32_to_cpu(rxd->attention.flags),
- __le32_to_cpu(rxd->mpdu_start.info0),
- __le32_to_cpu(rxd->mpdu_start.info1),
- __le32_to_cpu(rxd->msdu_start.common.info0),
- __le32_to_cpu(rxd->msdu_start.common.info1),
- rxd->ppdu_start.info0,
- __le32_to_cpu(rxd->ppdu_start.info1),
- __le32_to_cpu(rxd->ppdu_start.info2),
- __le32_to_cpu(rxd->ppdu_start.info3),
- __le32_to_cpu(rxd->ppdu_start.info4));
+ __le32_to_cpu(rxd_attention->flags),
+ __le32_to_cpu(rxd_mpdu_start->info0),
+ __le32_to_cpu(rxd_mpdu_start->info1),
+ __le32_to_cpu(rxd_msdu_start_common->info0),
+ __le32_to_cpu(rxd_msdu_start_common->info1),
+ rxd_ppdu_start->info0,
+ __le32_to_cpu(rxd_ppdu_start->info1),
+ __le32_to_cpu(rxd_ppdu_start->info2),
+ __le32_to_cpu(rxd_ppdu_start->info3),
+ __le32_to_cpu(rxd_ppdu_start->info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
- __le32_to_cpu(rxd->msdu_end.common.info0),
- __le32_to_cpu(rxd->mpdu_end.info0));
+ __le32_to_cpu(rxd_msdu_end_common->info0),
+ __le32_to_cpu(rxd_mpdu_end->info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
- rxd->msdu_payload, 50);
+ rxd_msdu_payload, 50);
}
status->rate_idx = mcs;
@@ -1059,6 +1104,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
@@ -1069,15 +1118,19 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (!rxd)
return NULL;
- if (rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ if (rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.common.info0 &
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
- peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
@@ -1167,14 +1220,16 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
- if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+ rxd_ppdu_start->rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
@@ -1182,7 +1237,7 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_comb;
+ rxd_ppdu_start->rssi_comb;
status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
@@ -1190,13 +1245,18 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_end_common *rxd_ppdu_end_common;
+
+ rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
+
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF. Is it worth holding frames until end of PPDU is known?
*
* FIXME: Can we get/compute 64bit TSF?
*/
- status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
@@ -1206,7 +1266,9 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
u32 vdev_id)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
bool is_first_ppdu;
bool is_last_ppdu;
@@ -1214,11 +1276,14 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_first_ppdu = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ is_first_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
- is_last_ppdu = !!(rxd->attention.flags &
+ is_last_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
if (is_first_ppdu) {
@@ -1357,7 +1422,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
size_t hdr_len;
size_t crypto_len;
bool is_first;
@@ -1366,10 +1433,13 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
int bytes_aligned = ar->hw_params.decap_align_bytes;
u8 *qos;
- rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
@@ -1387,7 +1457,7 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
* error packets. If limit exceeds, hw sends all remaining MSDUs as
* a single last MSDU with this msdu limit error set.
*/
- msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
+ msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
* without first MSDU is expected in that case, and handled later here.
@@ -1479,6 +1549,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
@@ -1499,9 +1570,10 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- rxd = (void *)msdu->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
@@ -1537,18 +1609,25 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ u8 *rxd_rx_hdr_status;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
int bytes_aligned = ar->hw_params.decap_align_bytes;
- rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ hdr = (void *)rxd_rx_hdr_status;
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
@@ -1574,6 +1653,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
@@ -1593,8 +1673,10 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
@@ -1635,6 +1717,7 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
@@ -1647,8 +1730,10 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
@@ -1673,7 +1758,9 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
enum rx_msdu_decap_format decap;
/* First msdu's decapped header:
@@ -1687,8 +1774,11 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
* [rfc1042/llc]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
@@ -1710,17 +1800,23 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
}
}
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.common.info1);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ flags = __le32_to_cpu(rxd_attention->flags);
+ info = __le32_to_cpu(rxd_msdu_start_common->info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1741,9 +1837,10 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
-static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
+ struct sk_buff *msdu)
{
- msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
@@ -1835,7 +1932,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu, *temp;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
@@ -1853,18 +1954,22 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_mgmt = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ is_mgmt = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header. It'll be used for undecapping of each MSDU.
*/
- hdr = (void *)rxd->rx_hdr_status;
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
@@ -1882,8 +1987,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
- rxd = (void *)last->data - sizeof(*rxd);
- attention = __le32_to_cpu(rxd->attention.flags);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)last->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ attention = __le32_to_cpu(rxd_attention->flags);
has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
@@ -1971,7 +2079,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
continue;
}
- ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
@@ -2083,12 +2191,19 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
unsigned long *unchain_cnt)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_frag_info_common *rxd_frag_info;
enum rx_msdu_decap_format decap;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
/* FIXME: Current unchaining logic can only handle simple case of raw
@@ -2097,7 +2212,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
* try re-constructing such frames - it'll be pretty much garbage.
*/
if (decap != RX_MSDU_DECAP_RAW ||
- skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
return;
@@ -2112,7 +2227,10 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -2120,12 +2238,16 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_first = !!(rxd->msdu_end.common.info0 &
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
@@ -2136,7 +2258,7 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
if (!is_first)
return false;
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -2380,7 +2502,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
- * same limitiation here as well.
+ * same limitation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
@@ -3028,11 +3150,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
+ struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
if (skb_queue_empty(list))
return -ENOBUFS;
@@ -3043,15 +3167,22 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
@@ -3194,7 +3325,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
@@ -3438,7 +3569,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
+ "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
info0, info1, enable, num_records, mode, threshold);
len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
@@ -3715,7 +3846,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
switch (txrate.flags) {
case WMI_RATE_PREAMBLE_OFDM:
if (arsta->arvif && arsta->arvif->vif)
- conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
arsta->tx_info.status.rates[0].idx = rate_idx - 4;
break;
@@ -3759,6 +3890,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_80_MHZ_WIDTH;
break;
+ case RATE_INFO_BW_160:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
}
if (peer_stats->succ_pkts) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index b793eac2cfac..bd603feb7953 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -796,47 +796,26 @@ static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring32 *ring =
(struct htt_rx_ring_setup_ring32 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
-static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring64 *ring =
(struct htt_rx_ring_setup_ring64 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
@@ -896,7 +875,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_32(ring);
+ ath10k_htt_fill_rx_desc_offset_32(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -909,6 +888,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring64 *ring;
@@ -965,7 +945,7 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_64(ring);
+ ath10k_htt_fill_rx_desc_offset_64(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -1132,7 +1112,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
int len = 0;
int ret;
- /* Response IDs are echo-ed back only for host driver convienence
+ /* Response IDs are echo-ed back only for host driver convenience
* purposes. They aren't used for anything in the driver yet so use 0.
*/
@@ -1295,7 +1275,6 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
struct ath10k *ar = htt->ar;
int res, data_len;
struct htt_cmd_hdr *cmd_hdr;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct htt_data_tx_desc *tx_desc;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *tmp_skb;
@@ -1306,11 +1285,15 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
u16 flags1 = 0;
u16 msdu_id = 0;
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
data_len = msdu->len;
@@ -1407,7 +1390,6 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
@@ -1439,15 +1421,19 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
- } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
- txmode == ATH10K_HW_TXRX_RAW &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
@@ -1609,7 +1595,6 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
@@ -1641,15 +1626,19 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
- } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
- txmode == ATH10K_HW_TXRX_RAW &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 57c58af64a57..6d32b43a4da6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -11,6 +11,7 @@
#include "hif.h"
#include "wmi-ops.h"
#include "bmi.h"
+#include "rx_desc.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
@@ -83,7 +84,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ /* Note: qca99x0 supports up to 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
* It is not really necessary to assign address for newly supported
* CEs in this address table.
@@ -119,7 +120,7 @@ const struct ath10k_hw_regs qca4019_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* qca4019 supports upto 12 copy engines. Since base address
+ /* qca4019 supports up to 12 copy engines. Since base address
* of ce8 to ce11 are not directly referred in the code,
* no need have them in separate members in this table.
* Copy Engine Address
@@ -923,7 +924,7 @@ static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
ath10k_hif_write32(ar, address, msb);
}
-/* 1. Write to memory region of target, such as IRAM adn DRAM.
+/* 1. Write to memory region of target, such as IRAM and DRAM.
* 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
* can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
* 3. In order to access the region other than the above,
@@ -1134,21 +1135,7 @@ const struct ath10k_hw_ops qca988x_ops = {
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
-static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
-{
- return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
- RX_MSDU_END_INFO1_L3_HDR_PAD);
-}
-
-static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
-{
- return !!(rxd->msdu_end.common.info0 &
- __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
-}
-
const struct ath10k_hw_ops qca99x0_ops = {
- .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
- .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 591ef7416b61..1b99f3a39a11 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -510,6 +510,8 @@ struct ath10k_hw_clk_params {
u32 outdiv;
};
+struct htt_rx_desc_ops;
+
struct ath10k_hw_params {
u32 id;
u16 dev_id;
@@ -562,6 +564,9 @@ struct ath10k_hw_params {
*/
bool sw_decrypt_mcast_mgmt;
+ /* Rx descriptor abstraction */
+ const struct ath10k_htt_rx_desc_ops *rx_desc_ops;
+
const struct ath10k_hw_ops *hw_ops;
/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
@@ -628,18 +633,20 @@ struct ath10k_hw_params {
bool supports_peer_stats_info;
bool dynamic_sar_support;
+
+ bool hw_restart_disconnect;
+
+ bool use_fw_tx_credits;
};
-struct htt_rx_desc;
struct htt_resp;
struct htt_data_tx_completion_ext;
+struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
- bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
};
@@ -653,24 +660,6 @@ extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
static inline int
-ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
- return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
- return 0;
-}
-
-static inline bool
-ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_msdu_limit_error)
- return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd);
- return false;
-}
-
-static inline int
ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
struct htt_resp *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b11aaee8b8c0..ec8d5b29bc72 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -659,7 +659,7 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
- conf = rcu_dereference(vif->chanctx_conf);
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
@@ -864,11 +864,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
- int peer_id;
- int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -880,25 +905,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- for_each_set_bit(peer_id, peer->peer_ids,
- ATH10K_MAX_NUM_PEER_IDS) {
- ar->peer_map[peer_id] = NULL;
- }
-
- /* Double check that peer is properly un-referenced from
- * the peer_map
- */
- for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
- if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
- peer->addr, peer, i);
- ar->peer_map[i] = NULL;
- }
- }
-
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
spin_unlock_bh(&ar->data_lock);
}
@@ -1509,8 +1516,8 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- arg.ssid = arvif->vif->bss_conf.ssid;
- arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ arg.ssid = arvif->vif->cfg.ssid;
+ arg.ssid_len = arvif->vif->cfg.ssid_len;
}
ath10k_dbg(ar, ATH10K_DBG_MAC,
@@ -1630,7 +1637,7 @@ static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1823,8 +1830,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
- struct ieee80211_bss_conf *info,
- const u8 self_peer[ETH_ALEN])
+ struct ieee80211_vif *vif)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
@@ -1832,7 +1838,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (!info->ibss_joined) {
+ if (!vif->cfg.ibss_joined) {
if (is_zero_ether_addr(arvif->bssid))
return;
@@ -2028,7 +2034,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return;
- if (!vif->csa_active)
+ if (!vif->bss_conf.csa_active)
return;
if (!arvif->is_up)
@@ -2163,7 +2169,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
- aid = vif->bss_conf.aid;
+ aid = vif->cfg.aid;
else
aid = sta->aid;
@@ -2193,7 +2199,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
return;
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
- info->ssid_len ? info->ssid : NULL, info->ssid_len,
+ vif->cfg.ssid_len ? vif->cfg.ssid : NULL,
+ vif->cfg.ssid_len,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -2251,7 +2258,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
- ratemask = sta->supp_rates[band];
+ ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2296,7 +2303,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -2335,7 +2342,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->peer_flags |= ar->wmi.peer_flags->ldbc;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->peer_flags |= ar->wmi.peer_flags->bw40;
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
@@ -2388,7 +2395,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss,
+ max_nss);
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2545,7 +2553,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_hw_params *hw = &ar->hw_params;
struct cfg80211_chan_def def;
@@ -2587,10 +2595,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= ar->wmi.peer_flags->bw80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->peer_flags |= ar->wmi.peer_flags->bw160;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -2604,7 +2612,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->peer_vht_rates.rx_mcs_set =
@@ -2684,15 +2692,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
@@ -2703,13 +2713,13 @@ static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
}
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
@@ -2736,15 +2746,15 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
@@ -2759,12 +2769,12 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
/*
* Check VHT first.
*/
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath10k_mac_get_phymode_vht(ar, sta);
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -3079,8 +3089,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
/* ap_sta must be accessed only within rcu section which must be left
* before calling ath10k_setup_peer_smps() which might sleep.
*/
- ht_cap = ap_sta->ht_cap;
- vht_cap = ap_sta->vht_cap;
+ ht_cap = ap_sta->deflink.ht_cap;
+ vht_cap = ap_sta->deflink.vht_cap;
ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
@@ -3115,11 +3125,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
WARN_ON(arvif->is_up);
- arvif->aid = bss_conf->aid;
+ arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath10k_wmi_pdev_set_param(ar,
@@ -3278,7 +3288,7 @@ static int ath10k_station_assoc(struct ath10k *ar,
*/
if (!reassoc) {
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap);
+ &sta->deflink.ht_cap);
if (ret) {
ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3710,6 +3720,9 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
__le16 fc = hdr->frame_control;
+ if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return ATH10K_HW_TXRX_ETHERNET;
+
if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
return ATH10K_HW_TXRX_RAW;
@@ -3870,6 +3883,12 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
bool noack = false;
cb->flags = 0;
+
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
+ goto finish_cb_fill;
+ }
+
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
@@ -3908,6 +3927,7 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
cb->flags |= ATH10K_SKB_F_RAW_TX;
}
+finish_cb_fill:
cb->vif = vif;
cb->txq = txq;
cb->airtime_est = airtime;
@@ -4031,7 +4051,11 @@ static int ath10k_mac_tx(struct ath10k *ar,
ath10k_tx_h_seq_no(vif, skb);
break;
case ATH10K_HW_TXRX_ETHERNET:
- ath10k_tx_h_8023(skb);
+ /* Convert 802.11->802.3 header only if the frame was earlier
+ * encapsulated to 802.11 by mac80211. Otherwise pass it as is.
+ */
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
@@ -4118,11 +4142,10 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
- if (peer)
+ if (peer) {
ath10k_warn(ar, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
-
- if (!peer) {
+ } else {
ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
peer_addr,
WMI_PEER_TYPE_DEFAULT);
@@ -4643,12 +4666,10 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
struct ieee80211_txq *txq = NULL;
- struct ieee80211_hdr *hdr = (void *)skb->data;
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
bool is_htt;
bool is_mgmt;
- bool is_presp;
int ret;
u16 airtime;
@@ -4662,8 +4683,14 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
if (is_htt) {
+ bool is_presp = false;
+
spin_lock_bh(&ar->htt.tx_lock);
- is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+ }
ret = ath10k_htt_tx_inc_pending(htt);
if (ret) {
@@ -5339,13 +5366,29 @@ err:
static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 opt;
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- if (!ar->hw_rfkill_on)
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on) {
+ /* If the current driver state is RESTARTING but not yet
+ * fully RESTARTED because of incoming suspend event,
+ * then ath10k_halt() is already called via
+ * ath10k_core_restart() and should not be called here.
+ */
+ if (ar->state != ATH10K_STATE_RESTARTING) {
+ ath10k_halt(ar);
+ } else {
+ /* Suspending here, because when in RESTARTING
+ * state, ath10k_core_stop() skips
+ * ath10k_wait_for_suspend().
+ */
+ opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
+ ath10k_wait_for_suspend(ar, opt);
+ }
+ }
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -5447,6 +5490,30 @@ static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
ar->wmi.vdev_param->txbf, value);
}
+static void ath10k_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k *ar = hw->priv;
+ u32 vdev_param;
+ int ret;
+
+ if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET ||
+ ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+ arvif->vdev_id, ret);
+ }
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -5656,15 +5723,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->def_wep_key_idx = -1;
- vdev_param = ar->wmi.vdev_param->tx_encap_type;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- ATH10K_HW_TXRX_NATIVE_WIFI);
- /* 10.X firmware does not support this VDEV parameter. Do not warn */
- if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
- arvif->vdev_id, ret);
- goto err_vdev_delete;
- }
+ ath10k_update_vif_offload(hw, vif);
/* Configuring number of spatial stream for monitor interface is causing
* target assert in qca9888 and qca6174.
@@ -6016,7 +6075,7 @@ static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
@@ -6030,7 +6089,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (changed & BSS_CHANGED_IBSS)
- ath10k_control_ibss(arvif, info, vif->addr);
+ ath10k_control_ibss(arvif, vif);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -6095,9 +6154,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
- arvif->u.ap.ssid_len = info->ssid_len;
- if (info->ssid_len)
- memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
@@ -6174,7 +6234,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc) {
+ if (vif->cfg.assoc) {
/* Workaround: Make sure monitor vdev is not running
* when associating to prevent some firmware revisions
* (e.g. 10.1 and 10.2) from crashing.
@@ -6199,7 +6259,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_PS) {
- arvif->ps = vif->bss_conf.ps;
+ arvif->ps = vif->cfg.ps;
ret = ath10k_config_ps(ar);
if (ret)
@@ -6787,10 +6847,10 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
int ret = 0;
s16 txpwr;
- if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
- txpwr = sta->txpwr.power;
+ txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
@@ -6910,26 +6970,29 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
struct ieee80211_sta *sta,
u32 rate_ctrl_flag, u8 nss)
{
- if (nss > sta->rx_nss) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
+ if (nss > sta->deflink.rx_nss) {
ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
- nss, sta->rx_nss);
+ nss, sta->deflink.rx_nss);
return -EINVAL;
}
if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
- if (!sta->vht_cap.vht_supported) {
+ if (!vht_cap->vht_supported) {
ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
- if (!sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ if (!ht_cap->ht_supported || vht_cap->vht_supported) {
ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else {
- if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported)
+ if (ht_cap->ht_supported || vht_cap->vht_supported)
return -EINVAL;
}
@@ -7565,10 +7628,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* Clean up the peer object as well since we
* must have failed to do this above.
*/
- list_del(&peer->list);
- ar->peer_map[i] = NULL;
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
}
spin_unlock_bh(&ar->data_lock);
@@ -7758,7 +7818,8 @@ exit:
}
static int ath10k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath10k *ar = hw->priv;
@@ -8036,7 +8097,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* TODO: Implement this function properly
* For now it is needed to reply to Probe Requests in IBSS mode.
- * Propably we need this information from FW.
+ * Probably we need this information from FW.
*/
static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
{
@@ -8272,7 +8333,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
u8 rate = arvif->vht_pfr;
/* skip non vht and multiple rate peers */
- if (!sta->vht_cap.vht_supported || arvif->vht_num_rates != 1)
+ if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1)
return false;
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -8313,7 +8374,7 @@ static void ath10k_mac_clr_bitrate_mask_iter(void *data,
int err;
/* clear vht peers only */
- if (arsta->arvif != arvif || !sta->vht_cap.vht_supported)
+ if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported)
return;
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@@ -8457,13 +8518,14 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_STA,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->bandwidth, sta->rx_nss,
- sta->smps_mode);
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
@@ -8478,7 +8540,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
- sta->bandwidth, sta->addr);
+ sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -8487,12 +8549,12 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->rx_nss;
+ arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -8505,7 +8567,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
break;
case IEEE80211_SMPS_NUM_MODES:
ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -8776,7 +8838,7 @@ ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath10k_mac_change_chanctx_arg *arg = data;
- if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
@@ -8789,7 +8851,7 @@ ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ath10k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
- ctx = rcu_access_pointer(vif->chanctx_conf);
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -8862,6 +8924,7 @@ unlock:
static int
ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath10k *ar = hw->priv;
@@ -8941,6 +9004,7 @@ err:
static void
ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath10k *ar = hw->priv;
@@ -9353,6 +9417,7 @@ static const struct ieee80211_ops ath10k_ops = {
.stop = ath10k_stop,
.config = ath10k_config,
.add_interface = ath10k_add_interface,
+ .update_vif_offload = ath10k_update_vif_offload,
.remove_interface = ath10k_remove_interface,
.configure_filter = ath10k_configure_filter,
.bss_info_changed = ath10k_bss_info_changed,
@@ -9621,7 +9686,7 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
},
};
-/* FIXME: This is not thouroughly tested. These combinations may over- or
+/* FIXME: This is not thoroughly tested. These combinations may over- or
* underestimate hw/fw capabilities.
*/
static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
@@ -9861,7 +9926,7 @@ int ath10k_mac_register(struct ath10k *ar)
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
- /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
+ /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
* and CCMP-256 in hardware.
*/
WLAN_CIPHER_SUITE_GCMP,
@@ -10022,6 +10087,12 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) {
+ if (ar->wmi.vdev_param->tx_encap_type !=
+ WMI_VDEV_PARAM_UNSUPPORTED)
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ }
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -10207,7 +10278,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
}
- if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+ if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
+ !ath_is_world_regd(&ar->ath_common.regulatory)) {
ret = regulatory_hint(ar->hw->wiphy,
ar->ath_common.regulatory.alpha2);
if (ret)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 4d4e2f91e15c..e56c6a6b1379 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1244,7 +1244,7 @@ static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
unsigned int nbytes, max_nbytes, nentries;
int orig_len;
- /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ /* No need to acquire ce_lock for CE5, since this is the only place CE5
* is processed other than init and deinit. Before releasing CE5
* buffers, interrupts are disabled. Thus CE5 access is serialized.
*/
@@ -3215,8 +3215,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
- ATH10K_NAPI_BUDGET);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index cf64898b9447..480cd97ab739 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -81,7 +81,7 @@ struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ath10k_ce_pipe *ce_hdl;
- /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ /* Our pipe number; facilitates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 80fcb917fe4e..66cb7a1e628a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -590,12 +590,12 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
if (resp->fw_version_info_valid) {
qmi->fw_version = resp->fw_version_info.fw_version;
- strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
+ strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
sizeof(qmi->fw_build_timestamp));
}
if (resp->fw_build_id_valid)
- strlcpy(qmi->fw_build_id, resp->fw_build_id,
+ strscpy(qmi->fw_build_id, resp->fw_build_id,
MAX_BUILD_ID_LEN + 1);
if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
@@ -792,7 +792,7 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
return;
/*
- * HACK: sleep for a while inbetween receiving the msa info response
+ * HACK: sleep for a while between receiving the msa info response
* and the XPU update to prevent SDM845 from crashing due to a security
* violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
*/
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 705b6295e466..777e53aa69dc 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -196,17 +196,31 @@ struct rx_attention {
* descriptor.
*/
-struct rx_frag_info {
+struct rx_frag_info_common {
u8 ring0_more_count;
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+} __packed;
+
+struct rx_frag_info_wcn3990 {
u8 ring4_more_count;
u8 ring5_more_count;
u8 ring6_more_count;
u8 ring7_more_count;
} __packed;
+struct rx_frag_info {
+ struct rx_frag_info_common common;
+ union {
+ struct rx_frag_info_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_frag_info_v1 {
+ struct rx_frag_info_common common;
+} __packed;
+
/*
* ring0_more_count
* Indicates the number of more buffers associated with RX DMA
@@ -434,7 +448,7 @@ struct rx_mpdu_end {
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
- * c) A-MSDU subframe header (14 bytes) if appliable
+ * c) A-MSDU subframe header (14 bytes) if applicable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b).
@@ -474,11 +488,17 @@ struct rx_msdu_start_wcn3990 {
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
- struct rx_msdu_start_qca99x0 qca99x0;
struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_start_v1 {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
@@ -612,11 +632,17 @@ struct rx_msdu_end_wcn3990 {
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
- struct rx_msdu_end_qca99x0 qca99x0;
struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_end_v1 {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
@@ -1136,11 +1162,17 @@ struct rx_ppdu_end_wcn3990 {
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_ppdu_end_v1 {
+ struct rx_ppdu_end_common common;
+ union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
- struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 63e1c2d783c5..79e09c7a82b3 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1057,7 +1057,7 @@ static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
out:
/* An optimization to bypass reading the IRQ status registers
- * unecessarily which can re-wake the target, if upper layers
+ * unnecessarily which can re-wake the target, if upper layers
* determine that we are in a low-throughput mode, we can rely on
* taking another interrupt rather than re-checking the status
* registers which can re-wake the target.
@@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
return;
}
- ret = mmc_hw_reset(ar_sdio->func->card->host);
+ ret = mmc_hw_reset(ar_sdio->func->card);
if (ret)
ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
@@ -2531,8 +2531,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
- ATH10K_NAPI_BUDGET);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 9513ab696fff..cfcb759a87de 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1242,20 +1242,18 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
- ATH10K_NAPI_BUDGET);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- int irqflags = IRQF_TRIGGER_RISING;
int ret, id;
for (id = 0; id < CE_COUNT_MAX; id++) {
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
- ath10k_snoc_per_engine_handler,
- irqflags, ce_name[id], ar);
+ ath10k_snoc_per_engine_handler, 0,
+ ce_name[id], ar);
if (ret) {
ath10k_err(ar,
"failed to register IRQ handler for CE %d: %d\n",
@@ -1306,13 +1304,10 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
}
for (i = 0; i < CE_COUNT; i++) {
- res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
- if (!res) {
- ath10k_err(ar, "failed to get IRQ%d\n", i);
- ret = -ENODEV;
- goto out;
- }
- ar_snoc->ce_irqs[i].irq_line = res->start;
+ ret = platform_get_irq(ar_snoc->dev, i);
+ if (ret < 0)
+ return ret;
+ ar_snoc->ce_irqs[i].irq_line = ret;
}
ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
@@ -1323,10 +1318,8 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
ar_snoc->xo_cal_data);
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
static void ath10k_snoc_quirks_init(struct ath10k *ar)
@@ -1556,11 +1549,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
- of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 25e0ad36ddb1..b4733b5ded34 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -17,7 +17,7 @@ struct ath10k_fw_file;
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
- u8 data[0];
+ u8 data[];
} __packed;
struct ath10k_swap_code_seg_tail {
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 36c9a1364253..cefd97323dfe 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -98,7 +98,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 5fdb020f4da3..1f4de9fbf2b3 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,7 +19,7 @@ struct ath10k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
u32 quiet_period;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 4714c86bb501..64e7a767d963 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -52,15 +52,12 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -92,16 +89,13 @@ TRACE_EVENT(ath10k_log_dbg,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 6f8b64218894..da3bc35e41aa 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -43,6 +43,7 @@ out:
int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
+ struct ieee80211_tx_status status;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
@@ -125,10 +126,22 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
- ieee80211_tx_status(htt->ar->hw, msdu);
+ memset(&status, 0, sizeof(status));
+ status.skb = msdu;
+ status.info = info;
+
+ rcu_read_lock();
+
+ if (txq)
+ status.sta = txq->sta;
+
+ ieee80211_tx_status_ext(htt->ar->hw, &status);
+
+ rcu_read_unlock();
+
/* we do not own the msdu anymore */
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3d98f19c6ec8..b0067af685b1 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -345,6 +345,12 @@ static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
ep->ep_ops.ep_rx_complete(ar, skb);
/* The RX complete handler now owns the skb... */
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
+ napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
+
return;
out_free_skb:
@@ -387,6 +393,7 @@ static int ath10k_usb_hif_start(struct ath10k *ar)
int i;
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ ath10k_core_napi_enable(ar);
ath10k_usb_start_recv_pipes(ar);
/* set the TX resource avail threshold for each TX pipe */
@@ -462,6 +469,7 @@ err:
static void ath10k_usb_hif_stop(struct ath10k *ar)
{
ath10k_usb_flush_all(ar);
+ ath10k_core_napi_sync_disable(ar);
}
static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
@@ -966,6 +974,20 @@ err:
return ret;
}
+static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
/* ath10k usb driver registered functions */
static int ath10k_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -992,6 +1014,8 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
+
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
@@ -1013,6 +1037,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */
bus_params.chip_id = 0;
+ bus_params.hl_msdu_ids = true;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
@@ -1044,6 +1069,7 @@ static void ath10k_usb_remove(struct usb_interface *interface)
return;
ath10k_core_unregister(ar_usb->ar);
+ netif_napi_del(&ar_usb->ar->napi);
ath10k_usb_destroy(ar_usb->ar);
usb_put_dev(interface_to_usbdev(interface));
ath10k_core_destroy(ar_usb->ar);
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
index 34d683e8fc18..48e066ba8162 100644
--- a/drivers/net/wireless/ath/ath10k/usb.h
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -26,7 +26,7 @@
#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
-/* diagnostic command defnitions */
+/* diagnostic command definitions */
#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7efbe03fbca8..876410a47d1d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -205,7 +205,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
}
arvif = ath10k_get_arvif(ar, vdev_id);
- if (arvif && arvif->is_up && arvif->vif->csa_active)
+ if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
kfree(tb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index b39c9b78b32b..dbb48d70f2e9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1813,7 +1813,7 @@ struct wmi_tlv_pdev_get_temp_cmd {
struct wmi_tlv_pdev_temperature_event {
__le32 tlv_hdr;
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
__le32 pdev_id;
} __packed;
@@ -2548,7 +2548,7 @@ struct nlo_channel_prediction_cfg {
/* Preconfigured stationary threshold.
* Lesser value means more conservative. Bigger value means more aggressive.
- * Maximum is 100 and mininum is 0.
+ * Maximum is 100 and minimum is 0.
*/
__le32 stationary_threshold;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 62c453a21e49..980d4124fa28 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2427,7 +2427,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
param->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
@@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) {
- struct ieee80211_mgmt *mgmt = (void *)skb->data;
- enum cfg80211_bss_frame_type ftype;
- u8 *ies;
- int ies_ch;
-
+ ieee80211_is_probe_resp(hdr->frame_control))
status->boottime_ns = ktime_get_boottime_ns();
- if (!ar->scan_channel)
- goto drop;
-
- ies = mgmt->u.beacon.variable;
-
- if (ieee80211_is_beacon(mgmt->frame_control))
- ftype = CFG80211_BSS_FTYPE_BEACON;
- else
- ftype = CFG80211_BSS_FTYPE_PRESP;
-
- ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
- skb_tail_pointer(skb) - ies,
- sband->band, ftype);
-
- if (ies_ch > 0 && ies_ch != channel) {
- ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "channel mismatched ds channel %d scan channel %d\n",
- ies_ch, channel);
- goto drop;
- }
- }
-
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ieee80211_rx_ni(ar->hw, skb);
return 0;
-
-drop:
- dev_kfree_skb(skb);
- return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)
@@ -3586,7 +3555,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
__le32 t;
u32 v, tim_len;
- /* When FW reports 0 in tim_len, ensure atleast first byte
+ /* When FW reports 0 in tim_len, ensure at least first byte
* in tim_bitmap is considered for pvm calculation.
*/
tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
@@ -3913,13 +3882,13 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* Once CSA counter is completed stop sending beacons until
* actual channel switch is done
*/
- if (arvif->vif->csa_active &&
+ if (arvif->vif->bss_conf.csa_active &&
ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
continue;
}
- bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
if (!bcn) {
ath10k_warn(ar, "could not get mac80211 beacon\n");
continue;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4abd12e78028..6de3cc4640a0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3170,7 +3170,7 @@ struct wmi_start_scan_common {
/* dwell time in msec on passive channels */
__le32 dwell_time_passive;
/*
- * min time in msec on the BSS channel,only valid if atleast one
+ * min time in msec on the BSS channel,only valid if at least one
* VDEV is active
*/
__le32 min_rest_time;
@@ -3196,7 +3196,7 @@ struct wmi_start_scan_common {
* and bssid_list
*/
__le32 repeat_probe_time;
- /* time in msec between 2 consequetive probe requests with in a set. */
+ /* time in msec between 2 consecutive probe requests with in a set. */
__le32 probe_spacing_time;
/*
* data inactivity time in msec on bss channel that will be used by
@@ -4397,7 +4397,7 @@ struct wmi_pdev_stats_tx {
/* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
@@ -5240,7 +5240,7 @@ enum wmi_vdev_param {
* scheduler.
*/
WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_VDEV_PARAM_ATIM_WINDOW,
@@ -5372,7 +5372,7 @@ enum wmi_10x_vdev_param {
* scheduler.
*/
WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_10X_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_10X_VDEV_PARAM_ATIM_WINDOW,
@@ -5904,7 +5904,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
/*
- * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
@@ -6947,7 +6947,7 @@ struct wmi_echo_ev_arg {
};
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 7d65c115669f..20b9aa8ddf7d 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
- new_pattern = old_pattern;
if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
- if (patterns[i].pkt_offset < ETH_HLEN)
+ if (patterns[i].pkt_offset < ETH_HLEN) {
ath10k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
- else
+ } else {
+ new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index c1fce4159f1f..cc47e0114595 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -17,13 +17,14 @@ ath11k-y += core.o \
peer.o \
dbring.o \
hw.o \
- wow.o
+ pcic.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+ath11k-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 3fb0aa000825..d34a4d6325b2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -8,10 +9,15 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
#include "hif.h"
#include <linux/remoteproc.h>
+#include "pcic.h"
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
@@ -23,18 +29,14 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,ipq6018-wifi",
.data = (void *)ATH11K_HW_IPQ6018_HW10,
},
+ { .compatible = "qcom,wcn6750-wifi",
+ .data = (void *)ATH11K_HW_WCN6750_HW10,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
-static const struct ath11k_bus_params ath11k_ahb_bus_params = {
- .mhi_support = false,
- .m3_fw_support = false,
- .fixed_bdf_addr = true,
- .fixed_mem_region = true,
-};
-
#define ATH11K_IRQ_CE0_OFFSET 4
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
@@ -134,6 +136,61 @@ enum ext_irq_num {
tcl2host_status_ring,
};
+static int
+ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.msi.irqs[vector];
+}
+
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start = 0;
+
+ /* If offset lies within DP register range, use 1st window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ATH11K_PCI_WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+
+ /* WCN6750 uses static window based register access*/
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start;
+ u32 val;
+
+ /* WCN6750 uses static window based register access */
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ return val;
+}
+
+static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+ .window_write32 = ath11k_ahb_window_write32_wcn6750,
+ .window_read32 = ath11k_ahb_window_read32_wcn6750,
+};
+
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
{
return ioread32(ab->mem + offset);
@@ -304,6 +361,7 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
@@ -351,7 +409,8 @@ static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
int timeout;
if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
- ab->hw_params.cold_boot_calib == 0)
+ ab->hw_params.cold_boot_calib == 0 ||
+ ab->hw_params.cbcal_restart_fw == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
@@ -391,6 +450,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
}
}
@@ -399,6 +460,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
int irq_idx;
int i;
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_free_irq(ab);
+
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -466,7 +530,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
@@ -481,7 +545,7 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
@@ -553,6 +617,9 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
int irq, irq_idx, i;
int ret;
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_config_irq(ab);
+
/* Configure CE irqs */
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
@@ -574,7 +641,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
}
/* Configure external interrupts */
- ret = ath11k_ahb_ext_irq_config(ab);
+ ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}
@@ -622,11 +689,90 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
return 0;
}
-static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
+static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = enable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device suspended\n");
+
+ return ret;
+}
+
+static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = disable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device resumed\n");
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
.write32 = ath11k_ahb_write32,
+ .read = NULL,
.irq_enable = ath11k_ahb_ext_irq_enable,
.irq_disable = ath11k_ahb_ext_irq_disable,
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
@@ -634,6 +780,25 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
.power_up = ath11k_ahb_power_up,
};
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
+ .start = ath11k_pcic_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = NULL,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+ .suspend = ath11k_ahb_hif_suspend,
+ .resume = ath11k_ahb_hif_resume,
+ .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
+ .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
+};
+
static int ath11k_core_get_rproc(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -656,12 +821,278 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ phys_addr_t msi_addr_pa;
+ dma_addr_t msi_addr_iova;
+ struct resource *res;
+ int int_prop;
+ int ret;
+ int i;
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath11k_err(ab, "failed to fetch msi_addr\n");
+ return -ENOENT;
+ }
+
+ msi_addr_pa = res->start;
+ msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(ab->dev, msi_addr_iova))
+ return -ENOMEM;
+
+ ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
+ ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
+
+ ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
+ if (ret)
+ return ret;
+
+ ab->pci.msi.ep_base_data = int_prop + 32;
+
+ for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res)
+ return -ENODEV;
+
+ ab->pci.msi.irqs[i] = res->start;
+ }
+
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
+ &ab_ahb->smp2p_info.smem_bit);
+ if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
+ ath11k_err(ab, "failed to fetch smem state: %ld\n",
+ PTR_ERR(ab_ahb->smp2p_info.smem_state));
+ return PTR_ERR(ab_ahb->smp2p_info.smem_state);
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return;
+
+ qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
+}
+
+static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ void __iomem *mem;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_ahb_setup_msi_resources(ab);
+
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.msa_paddr = r.start;
+ ab_ahb->fw.msa_size = resource_size(&r);
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 1);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve ce fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.ce_paddr = r.start;
+ ab_ahb->fw.ce_size = resource_size(&r);
+
+ return 0;
+}
+
+static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *host_dev = ab->dev;
+ struct platform_device_info info = {0};
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ /* Chipsets not requiring MSA need not initialize
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ ret = ath11k_ahb_setup_msa_resources(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to setup msa resources\n");
+ return ret;
+ }
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ab_ahb->fw.use_tz = true;
+ return 0;
+ }
+
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath11k_err(ab, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ab_ahb->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ ath11k_err(ab, "failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
+ if (ret) {
+ ath11k_err(ab, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
+ ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
+ ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
+ goto err_iommu_unmap;
+ }
+
+ ab_ahb->fw.use_tz = false;
+ ab_ahb->fw.iommu_domain = iommu_dom;
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_unmap:
+ iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ if (ab_ahb->fw.use_tz)
+ return 0;
+
+ iommu = ab_ahb->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+ if (unmapped_size != ab_ahb->fw.msa_size)
+ ath11k_err(ab, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
+ if (unmapped_size != ab_ahb->fw.ce_size)
+ ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ab_ahb->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
+
+ return 0;
+}
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
const struct of_device_id *of_id;
- struct resource *mem_res;
- void __iomem *mem;
+ const struct ath11k_hif_ops *hif_ops;
+ const struct ath11k_pci_ops *pci_ops;
+ enum ath11k_hw_rev hw_rev;
int ret;
of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
@@ -670,10 +1101,21 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
- if (IS_ERR(mem)) {
- dev_err(&pdev->dev, "ioremap error\n");
- return PTR_ERR(mem);
+ hw_rev = (enum ath11k_hw_rev)of_id->data;
+
+ switch (hw_rev) {
+ case ATH11K_HW_IPQ8074:
+ case ATH11K_HW_IPQ6018_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_ipq8074;
+ pci_ops = NULL;
+ break;
+ case ATH11K_HW_WCN6750_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_wcn6750;
+ pci_ops = &ath11k_ahb_pci_ops_wcn6750;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
+ return -EOPNOTSUPP;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -683,28 +1125,43 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
}
ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
- ATH11K_BUS_AHB,
- &ath11k_ahb_bus_params);
+ ATH11K_BUS_AHB);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
- ab->hif.ops = &ath11k_ahb_hif_ops;
+ ab->hif.ops = hif_ops;
ab->pdev = pdev;
- ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
- ab->mem = mem;
- ab->mem_len = resource_size(mem_res);
+ ab->hw_rev = hw_rev;
platform_set_drvdata(pdev, ab);
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_core_free;
+ }
+
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
- ret = ath11k_hal_srng_init(ab);
+ ret = ath11k_ahb_setup_resources(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
+ ret = ath11k_ahb_setup_smp2p_handle(ab);
+ if (ret)
+ goto err_fw_deinit;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_release_smp2p_handle;
+
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
@@ -741,6 +1198,12 @@ err_ce_free:
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
+err_release_smp2p_handle:
+ ath11k_ahb_release_smp2p_handle(ab);
+
+err_fw_deinit:
+ ath11k_ahb_fw_resource_deinit(ab);
+
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -748,20 +1211,10 @@ err_core_free:
return ret;
}
-static int ath11k_ahb_remove(struct platform_device *pdev)
+static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
{
- struct ath11k_base *ab = platform_get_drvdata(pdev);
unsigned long left;
- if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
- ath11k_debugfs_soc_destroy(ab);
- ath11k_qmi_deinit_service(ab);
- goto qmi_fail;
- }
-
- reinit_completion(&ab->driver_recovery);
-
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
left = wait_for_completion_timeout(&ab->driver_recovery,
ATH11K_AHB_RECOVERY_TIMEOUT);
@@ -771,18 +1224,61 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath11k_ahb_free_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
- ath11k_core_deinit(ab);
-qmi_fail:
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_release_smp2p_handle(ab);
+ ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_ahb_power_down(ab);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ ath11k_ahb_remove_prepare(ab);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_ahb_free_resources(ab);
return 0;
}
+static void ath11k_ahb_shutdown(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ /* platform shutdown() & remove() are mutually exclusive.
+ * remove() is invoked during rmmod & shutdown() during
+ * system reboot/shutdown.
+ */
+ ath11k_ahb_remove_prepare(ab);
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
+ goto free_resources;
+
+ ath11k_core_deinit(ab);
+
+free_resources:
+ ath11k_ahb_free_resources(ab);
+}
+
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
@@ -790,6 +1286,7 @@ static struct platform_driver ath11k_ahb_driver = {
},
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
+ .shutdown = ath11k_ahb_shutdown,
};
static int ath11k_ahb_init(void)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 51e6e4a5f686..415ddfd26654 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_AHB_H
#define ATH11K_AHB_H
@@ -8,10 +9,34 @@
#include "core.h"
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+
+enum ath11k_ahb_smp2p_msg_id {
+ ATH11K_AHB_POWER_SAVE_ENTER = 1,
+ ATH11K_AHB_POWER_SAVE_EXIT,
+};
+
struct ath11k_base;
struct ath11k_ahb {
struct rproc *tgt_rproc;
+ struct {
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ dma_addr_t msa_paddr;
+ u32 msa_size;
+ dma_addr_t ce_paddr;
+ u32 ce_size;
+ bool use_tz;
+ } fw;
+ struct {
+ unsigned short seq_no;
+ unsigned int smem_bit;
+ struct qcom_smem_state *smem_state;
+ } smp2p_info;
};
static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index aaa7b05ff49d..f2da95fd4253 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@@ -249,7 +250,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
- /* only ce4 needs shadow workaroud*/
+ /* only ce4 needs shadow workaround */
if (ce_id == 4)
return true;
return false;
@@ -918,9 +919,6 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
int i;
int ret;
- ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
- &ab->qmi.ce_cfg.shadow_reg_v2_len);
-
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
@@ -1044,7 +1042,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
- /* Free any parial successful allocation */
+ /* Free any partial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 8255b6cfab0c..9644ff909502 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -145,7 +145,7 @@ struct ath11k_ce_ring {
u32 hal_ring_id;
/* keep last */
- struct sk_buff *skb[0];
+ struct sk_buff *skb[];
};
struct ath11k_ce_pipe {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 293563b3f784..b99180bc8172 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -8,6 +9,7 @@
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
@@ -52,9 +54,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -71,6 +70,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -82,6 +82,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -94,9 +95,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -119,9 +137,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -135,6 +150,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -146,6 +162,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -158,9 +175,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qca6390 hw2.0",
@@ -183,9 +217,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 48,
- .rfkill_cfg = 0,
- .rfkill_on_level = 1,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -199,6 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -209,21 +241,42 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
- .supports_regdb = true,
+ .supports_regdb = false,
.fix_l1ss = true,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0171ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qcn9074 hw1.0",
@@ -246,9 +299,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -261,6 +311,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 16,
.fft_hdr_len = 24,
.max_fft_bins = 1024,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -272,6 +323,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 2,
.num_vdevs = 8,
.num_peers = 128,
@@ -284,9 +336,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true,
- .wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.0",
@@ -309,9 +378,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -325,6 +391,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -335,6 +402,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -347,9 +415,29 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.1",
@@ -372,9 +460,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -388,6 +473,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -397,6 +483,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -409,23 +496,179 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
- .wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ },
+ {
+ .name = "wcn6750 hw1.0",
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ .fw = {
+ .dir = "WCN6750/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6750_ops,
+ .ring_mask = &ath11k_hw_ring_mask_wcn6750,
+ .internal_sleep_clock = false,
+ .regs = &wcn6750_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .cold_boot_calib = true,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_wcn6750,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = true,
+ .fixed_fw_mem = true,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = false,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+ .smp2p_wow_exit = true,
},
};
+static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
+{
+ WARN_ON(!ab->hw_params.single_pdev_only);
+
+ return &ab->pdevs[0];
+}
+
+void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_init(struct ath11k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+{
+ ath11k_fw_stats_pdevs_free(&stats->pdevs);
+ ath11k_fw_stats_vdevs_free(&stats->vdevs);
+ ath11k_fw_stats_bcn_free(&stats->bcn);
+}
+
int ath11k_core_suspend(struct ath11k_base *ab)
{
int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
- /* TODO: there can frames in queues so for now add delay as a hack.
- * Need to implement to handle and remove this delay.
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
*/
- msleep(500);
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
@@ -434,6 +677,12 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_wow_enable(ab);
if (ret) {
ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
@@ -466,10 +715,20 @@ EXPORT_SYMBOL(ath11k_core_suspend);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
+ /* so far signle_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
ret = ath11k_hif_resume(ab);
if (ret) {
ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
@@ -496,6 +755,97 @@ int ath11k_core_resume(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_core_resume);
+static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath11k_base *ab = data;
+ const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
+ struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
+ ssize_t copied;
+ size_t len;
+ int i;
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ return;
+
+ if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH11K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff;
+ ab->new_alpha2[1] = smbios->cc_code & 0xff;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH11K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios worldwide regdomain\n");
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!smbios->bdf_enabled) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ len = min_t(size_t,
+ strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
+ for (i = 0; i < len; i++) {
+ if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic prefix */
+ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
+ sizeof(ab->qmi.target.bdf_ext));
+ if (copied < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate\n");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
+}
+
+int ath11k_core_check_smbios(struct ath11k_base *ab)
+{
+ ab->qmi.target.bdf_ext[0] = '\0';
+ dmi_walk(ath11k_core_check_cc_code_bdfext, ab);
+
+ if (ab->qmi.target.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
int ath11k_core_check_dt(struct ath11k_base *ab)
{
size_t max_len = sizeof(ab->qmi.target.bdf_ext);
@@ -519,13 +869,13 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
return 0;
}
-static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
- size_t name_len)
+static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len, bool with_variant)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
@@ -555,6 +905,18 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
return 0;
}
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, true);
+}
+
+static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false);
+}
+
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *file)
{
@@ -589,7 +951,9 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath11k_fw_ie *hdr;
bool name_match_found;
@@ -599,7 +963,7 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
name_match_found = false;
- /* go through ATH11K_BD_IE_BOARD_ elements */
+ /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath11k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -610,48 +974,50 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
+ ath11k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath11k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH11K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath11k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH11K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath11k_warn(ab, "unknown %s id found: %d\n",
+ ath11k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -668,7 +1034,10 @@ out:
static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
struct ath11k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -733,22 +1102,23 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH11K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH11K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -758,8 +1128,9 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath11k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath11k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -790,24 +1161,52 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
#define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int ret;
- ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH11K_BOARD_API2_FILE;
+
+ ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath11k_err(ab, "failed to create board name: %d", ret);
return ret;
}
ab->bd_api = 2;
- ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath11k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
ab->bd_api = 1;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
if (ret) {
- ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath11k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params.fw.dir);
return ret;
}
@@ -819,13 +1218,32 @@ success:
int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
+ char boardname[BOARD_NAME_SIZE];
int ret;
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
if (ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
+exit:
+ if (!ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
+
return ret;
}
@@ -890,23 +1308,23 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_mac_register(ab);
+ ret = ath11k_dp_pdev_alloc(ab);
if (ret) {
- ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_pdev_debug;
}
- ret = ath11k_dp_pdev_alloc(ab);
+ ret = ath11k_mac_register(ab);
if (ret) {
- ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_dp_pdev_free;
}
ret = ath11k_thermal_register(ab);
if (ret) {
ath11k_err(ab, "could not register thermal device: %d\n",
ret);
- goto err_dp_pdev_free;
+ goto err_mac_unregister;
}
ret = ath11k_spectral_init(ab);
@@ -919,10 +1337,10 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
err_thermal_unregister:
ath11k_thermal_unregister(ab);
-err_dp_pdev_free:
- ath11k_dp_pdev_free(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
err_pdev_debug:
ath11k_debugfs_pdev_destroy(ab);
@@ -939,21 +1357,14 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
ath11k_debugfs_pdev_destroy(ab);
}
-static int ath11k_core_start(struct ath11k_base *ab,
- enum ath11k_firmware_mode mode)
+static int ath11k_core_start(struct ath11k_base *ab)
{
int ret;
- ret = ath11k_qmi_firmware_start(ab, mode);
- if (ret) {
- ath11k_err(ab, "failed to attach wmi: %d\n", ret);
- return ret;
- }
-
ret = ath11k_wmi_attach(ab);
if (ret) {
ath11k_err(ab, "failed to attach wmi: %d\n", ret);
- goto err_firmware_stop;
+ return ret;
}
ret = ath11k_htc_init(ab);
@@ -1028,7 +1439,7 @@ static int ath11k_core_start(struct ath11k_base *ab,
}
/* put hardware to DBS mode */
- if (ab->hw_params.single_pdev_only) {
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
@@ -1053,28 +1464,22 @@ err_hif_stop:
ath11k_hif_stop(ab);
err_wmi_detach:
ath11k_wmi_detach(ab);
-err_firmware_stop:
- ath11k_qmi_firmware_stop(ab);
return ret;
}
-static int ath11k_core_rfkill_config(struct ath11k_base *ab)
+static int ath11k_core_start_firmware(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
{
- struct ath11k *ar;
- int ret = 0, i;
+ int ret;
- if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
- return 0;
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
-
- ret = ath11k_mac_rfkill_config(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_warn(ab, "failed to configure rfkill: %d", ret);
- return ret;
- }
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
}
return ret;
@@ -1084,16 +1489,22 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ ret = ath11k_core_start_firmware(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath11k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_ce_init_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to initialize CE: %d\n", ret);
- return ret;
+ goto err_firmware_stop;
}
ret = ath11k_dp_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to init DP: %d\n", ret);
- return ret;
+ goto err_firmware_stop;
}
switch (ath11k_crypto_mode) {
@@ -1114,7 +1525,7 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
mutex_lock(&ab->core_lock);
- ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ ret = ath11k_core_start(ab);
if (ret) {
ath11k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
@@ -1126,13 +1537,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_core_stop;
}
ath11k_hif_irq_enable(ab);
-
- ret = ath11k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_stop;
- }
-
mutex_unlock(&ab->core_lock);
return 0;
@@ -1143,6 +1547,9 @@ err_core_stop:
err_dp_free:
ath11k_dp_free(ab);
mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
return ret;
}
@@ -1198,7 +1605,6 @@ void ath11k_core_halt(struct ath11k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
- cancel_work_sync(&ab->rfkill_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
@@ -1206,28 +1612,6 @@ void ath11k_core_halt(struct ath11k *ar)
idr_init(&ar->txmgmt_idr);
}
-static void ath11k_rfkill_work(struct work_struct *work)
-{
- struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work);
- struct ath11k *ar;
- bool rfkill_radio_on;
- int i;
-
- spin_lock_bh(&ab->base_lock);
- rfkill_radio_on = ab->rfkill_radio_on;
- spin_unlock_bh(&ab->base_lock);
-
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
- continue;
-
- /* notify cfg80211 radio state change */
- ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
- }
-}
-
static void ath11k_update_11d(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
@@ -1248,6 +1632,7 @@ static void ath11k_update_11d(struct work_struct *work)
pdev = &ab->pdevs[i];
ar = pdev->ar;
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
@@ -1256,12 +1641,11 @@ static void ath11k_update_11d(struct work_struct *work)
}
}
-static void ath11k_core_restart(struct work_struct *work)
+static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
{
- struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
- int i, ret = 0;
+ int i;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -1275,8 +1659,11 @@ static void ath11k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -1295,11 +1682,14 @@ static void ath11k_core_restart(struct work_struct *work)
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
- ret = ath11k_core_reconfigure_on_crash(ab);
- if (ret) {
- ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
- return;
- }
+ reinit_completion(&ab->driver_recovery);
+}
+
+static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
@@ -1335,6 +1725,97 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ab->driver_recovery);
}
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ int ret;
+
+ if (!ab->is_reset)
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath11k_core_post_reconfigure_recovery(ab);
+}
+
+static void ath11k_core_reset(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success.
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath11k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH11K_RESET_TIMEOUT_HZ);
+
+ if (time_left) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ atomic_inc(&ab->fail_cont_count);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath11k_core_post_reconfigure_recovery(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH11K_RECOVER_START_TIMEOUT_HZ);
+
+ ath11k_hif_power_down(ab);
+ ath11k_hif_power_up(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
+}
+
static int ath11k_init_hw_params(struct ath11k_base *ab)
{
const struct ath11k_hw_params *hw_params = NULL;
@@ -1404,13 +1885,15 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
+ destroy_workqueue(ab->workqueue_aux);
+ destroy_workqueue(ab->workqueue);
+
kfree(ab);
}
EXPORT_SYMBOL(ath11k_core_free);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus,
- const struct ath11k_bus_params *bus_params)
+ enum ath11k_bus bus)
{
struct ath11k_base *ab;
@@ -1424,9 +1907,17 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
if (!ab->workqueue)
goto err_sc_free;
+ ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
mutex_init(&ab->core_lock);
+ mutex_init(&ab->tbl_mtx_lock);
spin_lock_init(&ab->base_lock);
mutex_init(&ab->vdev_id_11d_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1434,17 +1925,18 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
- INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
+ INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
- ab->bus_params = *bus_params;
ab->hif.bus = bus;
return ab;
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
err_sc_free:
kfree(ab);
return NULL;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 9e88ccca5ca7..cf2f52cc4e30 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -10,6 +11,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitfield.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <linux/rhashtable.h>
+#include <linux/average.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -23,6 +28,7 @@
#include "thermal.h"
#include "dbring.h"
#include "spectral.h"
+#include "wow.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -36,9 +42,26 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF
#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* The magic used by QCA spec */
+#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
extern unsigned int ath11k_frame_mode;
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
#define ATH11K_MON_TIMER_INTERVAL 10
+#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
@@ -118,6 +141,7 @@ enum ath11k_hw_rev {
ATH11K_HW_QCN9074_HW10,
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
+ ATH11K_HW_WCN6750_HW10,
};
enum ath11k_firmware_mode {
@@ -147,6 +171,39 @@ struct ath11k_ext_irq_grp {
struct net_device napi_ndev;
};
+enum ath11k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH11K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH11K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH11K_SMBIOS_CC_WW = 2,
+};
+
+struct ath11k_smbios_bdf {
+ struct dmi_header hdr;
+
+ u8 features_disabled;
+
+ /* enum ath11k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
+ u8 bdf_enabled;
+ u8 bdf_ext[];
+} __packed;
+
#define HEHANDLE_CAP_PHYINFO_SIZE 3
#define HECAP_PHYINFO_SIZE 9
#define HECAP_MACINFO_SIZE 5
@@ -189,6 +246,12 @@ enum ath11k_scan_state {
ATH11K_SCAN_ABORTING,
};
+enum ath11k_11d_state {
+ ATH11K_11D_IDLE,
+ ATH11K_11D_PREPARING,
+ ATH11K_11D_RUNNING,
+};
+
enum ath11k_dev_flags {
ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED,
@@ -204,6 +267,8 @@ enum ath11k_dev_flags {
ATH11K_FLAG_CE_IRQ_ENABLED,
ATH11K_FLAG_EXT_IRQ_ENABLED,
ATH11K_FLAG_FIXED_MEM_RGN,
+ ATH11K_FLAG_DEVICE_INIT_DONE,
+ ATH11K_FLAG_MULTI_MSI_VECTORS,
};
enum ath11k_monitor_flags {
@@ -212,6 +277,30 @@ enum ath11k_monitor_flags {
ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
+#define ATH11K_IPV6_UC_TYPE 0
+#define ATH11K_IPV6_AC_TYPE 1
+
+#define ATH11K_IPV6_MAX_COUNT 16
+#define ATH11K_IPV4_MAX_COUNT 2
+
+struct ath11k_arp_ns_offload {
+ u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
+ bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -263,6 +352,12 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
+ struct ath11k_arp_ns_offload arp_ns_offload;
+ struct ath11k_rekey_data rekey_data;
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
@@ -370,6 +465,8 @@ struct ath11k_per_ppdu_tx_stats {
u32 retry_bytes;
};
+DECLARE_EWMA(avg_rssi, 10, 8)
+
struct ath11k_sta {
struct ath11k_vif *arvif;
@@ -388,6 +485,7 @@ struct ath11k_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
s8 rssi_beacon;
s8 chain_signal[IEEE80211_MAX_CHAINS];
struct ath11k_htt_tx_stats *tx_stats;
@@ -400,6 +498,13 @@ struct ath11k_sta {
bool use_4addr_set;
u16 tcl_metadata;
+
+ /* Protected with ar->data_lock */
+ enum ath11k_wmi_peer_ps_state peer_ps_state;
+ u64 ps_start_time;
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
};
#define ATH11K_MIN_5G_FREQ 4150
@@ -441,19 +546,21 @@ struct ath11k_dbg_htt_stats {
spinlock_t lock;
};
+#define MAX_MODULE_ID_BITMAP_WORDS 16
+
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
u32 extd_tx_stats;
- struct ath11k_fw_stats fw_stats;
- struct completion fw_stats_complete;
- bool fw_stats_done;
u32 extd_rx_stats;
u32 pktlog_filter;
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
+ u32 mem_offset;
+ u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+ struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
};
struct ath11k_per_peer_tx_stats {
@@ -479,8 +586,6 @@ struct ath11k {
struct ath11k_pdev_wmi *wmi;
struct ath11k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
- u32 ht_cap_info;
- u32 vht_cap_info;
struct ath11k_he ar_he;
enum ath11k_state state;
bool supports_6ghz;
@@ -582,6 +687,9 @@ struct ath11k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath11k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath11k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -599,10 +707,20 @@ struct ath11k {
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
u32 vdev_id_11d_scan;
- struct completion finish_11d_scan;
- struct completion finish_11d_ch_list;
- bool pending_11d;
+ struct completion completed_11d_scan;
+ enum ath11k_11d_state state_11d;
bool regdom_set_by_user;
+ int hw_rate_code;
+ u8 twt_enabled;
+ bool nlo_enabled;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ /* protected by conf_mutex */
+ bool ps_state_enable;
+ bool ps_timekeeper_enable;
};
struct ath11k_band_cap {
@@ -644,12 +762,12 @@ struct ath11k_board_data {
size_t len;
};
-struct ath11k_bus_params {
- bool mhi_support;
- bool m3_fw_support;
- bool fixed_bdf_addr;
- bool fixed_mem_region;
- bool static_window_map;
+struct ath11k_pci_ops {
+ int (*wakeup)(struct ath11k_base *ab);
+ void (*release)(struct ath11k_base *ab);
+ int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
+ void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
+ u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
};
/* IPQ8074 HW channel counters frequency value in hertz */
@@ -693,6 +811,19 @@ struct ath11k_soc_dp_stats {
struct ath11k_dp_ring_bp_stats bp_stats;
};
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+ u16 hw_rev;
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
@@ -737,6 +868,18 @@ struct ath11k_base {
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
+
+ /* To synchronize rhash tbl write operation */
+ struct mutex tbl_mtx_lock;
+
+ /* The rhashtable containing struct ath11k_peer keyed by mac addr */
+ struct rhashtable *rhead_peer_addr;
+ struct rhashtable_params rhash_peer_addr_param;
+
+ /* The rhashtable containing struct ath11k_peer keyed by id */
+ struct rhashtable *rhead_peer_id;
+ struct rhashtable_params rhash_peer_id_param;
+
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN];
@@ -750,13 +893,12 @@ struct ath11k_base {
int bd_api;
struct ath11k_hw_params hw_params;
- struct ath11k_bus_params bus_params;
const struct firmware *cal_file;
/* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio
- * by the firmware during initializatin
+ * by the firmware during initialization
*/
struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting
@@ -778,6 +920,18 @@ struct ath11k_base {
struct work_struct restart_work;
struct work_struct update_11d_work;
u8 new_alpha2[3];
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -786,10 +940,6 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
- struct work_struct rfkill_work;
-
- /* true means radio is on */
- bool rfkill_radio_on;
/* To synchronize 11d scan vdev id */
struct mutex vdev_id_11d_lock;
@@ -805,8 +955,20 @@ struct ath11k_base {
u32 subsystem_device;
} id;
+ struct {
+ struct {
+ const struct ath11k_msi_config *config;
+ u32 ep_base_data;
+ u32 irqs[32];
+ u32 addr_lo;
+ u32 addr_hi;
+ } msi;
+
+ const struct ath11k_pci_ops *ops;
+ } pci;
+
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {
@@ -961,6 +1123,12 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
+void ath11k_fw_stats_init(struct ath11k *ar);
+void ath11k_fw_stats_pdevs_free(struct list_head *head);
+void ath11k_fw_stats_vdevs_free(struct list_head *head);
+void ath11k_fw_stats_bcn_free(struct list_head *head);
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
+
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
@@ -975,8 +1143,7 @@ int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus,
- const struct ath11k_bus_params *bus_params);
+ enum ath11k_bus bus);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
@@ -986,7 +1153,7 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
const char *name);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
-
+int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index eda67ebfc4c2..2107ec05d14f 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -37,7 +37,8 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff)
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+ ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
@@ -101,7 +103,8 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring)
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
kfree(buff);
break;
}
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring);
+ ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
@@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
- u8 pdev_idx, rbm;
+ u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
@@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
+ module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
+ ath11k_debugfs_add_dbring_entry(ar, module_id,
+ ATH11K_DBG_DBR_EVENT_RX, srng);
+
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
@@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
buff->paddr = 0;
memset(buff->payload, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index fbbd5fe02aa8..91545640c47b 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -23,8 +23,8 @@ enum ath11k_debug_mask {
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
ATH11K_DBG_PCI = 0x00001000,
- ATH11K_DBG_DP_TX = 0x00001000,
- ATH11K_DBG_DP_RX = 0x00002000,
+ ATH11K_DBG_DP_TX = 0x00002000,
+ ATH11K_DBG_DP_RX = 0x00004000,
ATH11K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 198ade90b725..ccdf3d5ba1ab 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -14,6 +14,7 @@
#include "dp_tx.h"
#include "debugfs_htt_stats.h"
#include "peer.h"
+#include "hif.h"
static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
"REO2SW1_RING",
@@ -52,91 +53,74 @@ static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"MONITOR_DEST_RING",
};
-static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
{
- struct ath11k_fw_stats_pdev *i, *tmp;
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_data;
+ struct ath11k_dbg_dbr_entry *entry;
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
+ if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+ return;
-static void ath11k_fw_stats_vdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_vdev *i, *tmp;
+ dbr_debug = ar->debug.dbr_debug[id];
+ if (!dbr_debug)
+ return;
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
+ if (!dbr_debug->dbr_debug_enabled)
+ return;
-static void ath11k_fw_stats_bcn_free(struct list_head *head)
-{
- struct ath11k_fw_stats_bcn *i, *tmp;
+ dbr_data = &dbr_debug->dbr_dbg_data;
+
+ spin_lock_bh(&dbr_data->lock);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
+ if (dbr_data->entries) {
+ entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+ entry->hp = srng->u.src_ring.hp;
+ entry->tp = *srng->u.src_ring.tp_addr;
+ entry->timestamp = jiffies;
+ entry->event = event;
+
+ dbr_data->dbr_debug_idx++;
+ if (dbr_data->dbr_debug_idx ==
+ dbr_data->num_ring_debug_entries)
+ dbr_data->dbr_debug_idx = 0;
}
+
+ spin_unlock_bh(&dbr_data->lock);
}
static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
{
spin_lock_bh(&ar->data_lock);
- ar->debug.fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
spin_unlock_bh(&ar->data_lock);
}
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
- struct ath11k_fw_stats stats = {};
- struct ath11k *ar;
+ struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev;
bool is_end;
static unsigned int num_vdev, num_bcn;
size_t total_vdevs_started = 0;
- int i, ret;
-
- INIT_LIST_HEAD(&stats.pdevs);
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
-
- ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
- if (ret) {
- ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
- goto free;
- }
-
- rcu_read_lock();
- ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
- if (!ar) {
- rcu_read_unlock();
- ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
- stats.pdev_id, ret);
- goto free;
- }
-
- spin_lock_bh(&ar->data_lock);
+ int i;
- if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
- ar->debug.fw_stats_done = true;
- goto complete;
- }
+ /* WMI_REQUEST_PDEV_STAT request has been already processed */
- if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->debug.fw_stats_done = true;
- goto complete;
+ if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ ar->fw_stats_done = true;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats.vdevs)) {
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
ath11k_warn(ab, "empty vdev stats");
- goto complete;
+ return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
@@ -149,43 +133,34 @@ void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb
is_end = ((++num_vdev) == total_vdevs_started);
- list_splice_tail_init(&stats.vdevs,
- &ar->debug.fw_stats.vdevs);
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_vdev = 0;
}
- goto complete;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats.bcn)) {
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
- goto complete;
+ return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs);
- list_splice_tail_init(&stats.bcn,
- &ar->debug.fw_stats.bcn);
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_bcn = 0;
}
}
-complete:
- complete(&ar->debug.fw_stats_complete);
- rcu_read_unlock();
- spin_unlock_bh(&ar->data_lock);
-
-free:
- ath11k_fw_stats_pdevs_free(&stats.pdevs);
- ath11k_fw_stats_vdevs_free(&stats.vdevs);
- ath11k_fw_stats_bcn_free(&stats.bcn);
}
static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
@@ -206,7 +181,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
ath11k_debugfs_fw_stats_reset(ar);
- reinit_completion(&ar->debug.fw_stats_complete);
+ reinit_completion(&ar->fw_stats_complete);
ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
@@ -216,9 +191,8 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
return ret;
}
- time_left =
- wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
if (!time_left)
return -ETIMEDOUT;
@@ -227,7 +201,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
break;
spin_lock_bh(&ar->data_lock);
- if (ar->debug.fw_stats_done) {
+ if (ar->fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
@@ -299,8 +273,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -371,8 +344,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -449,14 +421,13 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
}
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
/* since beacon stats request is looped for all active VDEVs, saved fw
* stats is not freed for each request until done for all active VDEVs
*/
spin_lock_bh(&ar->data_lock);
- ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
spin_unlock_bh(&ar->data_lock);
file->private_data = buf;
@@ -557,6 +528,10 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
ret = ath11k_wmi_force_fw_hang_cmd(ar,
ATH11K_WMI_FW_HANG_ASSERT_TYPE,
ATH11K_WMI_FW_HANG_DELAY);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath11k_info(ab, "user requested hw restart\n");
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ ret = 0;
} else {
ret = -EINVAL;
goto exit;
@@ -666,6 +641,12 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
goto exit;
}
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
if (enable) {
rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
@@ -870,6 +851,126 @@ static const struct file_operations fops_soc_dp_stats = {
.llseek = default_llseek,
};
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[128] = {0};
+ struct ath11k_fw_dbglog dbglog;
+ unsigned int param, mod_id_index, is_end;
+ u64 value;
+ int ret, num;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+ if (num < 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+ param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+ if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+ if (!is_end) {
+ ret = count;
+ goto out;
+ }
+ } else {
+ if (num != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ dbglog.param = param;
+ dbglog.value = lower_32_bits(value);
+ ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+ if (ret) {
+ ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .write = ath11k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
+{
+ struct ath11k_base *ab = inode->i_private;
+ u8 *buf;
+ u32 start, end;
+ int ret;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+
+ buf = vmalloc(end - start + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath11k_hif_read(ab, buf, start, end);
+ if (ret) {
+ ath11k_warn(ab, "failed to dump sram: %d\n", ret);
+ vfree(buf);
+ return ret;
+ }
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t ath11k_read_sram_dump(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->f_inode->i_private;
+ const char *buf = file->private_data;
+ int len;
+ u32 start, end;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+ len = end - start + 1;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops_sram_dump = {
+ .open = ath11k_open_sram_dump,
+ .read = ath11k_read_sram_dump,
+ .release = ath11k_release_sram_dump,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -885,6 +986,10 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
+ if (ab->hw_params.sram_dump.start != 0)
+ debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
+ &fops_sram_dump);
+
return 0;
}
@@ -913,7 +1018,7 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
ar->debug.debugfs_pdev);
- ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+ ar->fw_stats.debugfs_fwstats = fwstats_dir;
/* all stats debugfs files created are under "fw_stats" directory
* created per PDEV
@@ -924,12 +1029,6 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
&fops_vdev_stats);
debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
&fops_bcn_stats);
-
- INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
-
- init_completion(&ar->debug.fw_stats_complete);
}
static ssize_t ath11k_write_pktlog_filter(struct file *file,
@@ -1107,6 +1206,356 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+ static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+ int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+ char *buf;
+ int i, ret;
+ int len = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+ len += scnprintf(buf + len, size - len,
+ "| idx | hp | tp | timestamp | event |\n");
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+
+ spin_lock_bh(&dbr_dbg_data->lock);
+
+ for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+ len += scnprintf(buf + len, size - len,
+ "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+ dbr_dbg_data->entries[i].hp,
+ dbr_dbg_data->entries[i].tp,
+ dbr_dbg_data->entries[i].timestamp,
+ event_id_to_string[dbr_dbg_data->entries[i].event]);
+ }
+
+ spin_unlock_bh(&dbr_dbg_data->lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+ .read = ath11k_debug_dump_dbr_entries,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_dbg_data->entries);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+ if (ar->debug.dbr_debug[dbr_id])
+ return 0;
+
+ ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+ GFP_KERNEL);
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return -ENOMEM;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ if (dbr_debug->dbr_debugfs)
+ return 0;
+
+ dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+ ar->debug.debugfs_pdev);
+ if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+ if (IS_ERR(dbr_debug->dbr_debugfs))
+ return PTR_ERR(dbr_debug->dbr_debugfs);
+ return -ENOMEM;
+ }
+
+ dbr_debug->dbr_debug_enabled = true;
+ dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+ dbr_dbg_data->dbr_debug_idx = 0;
+ dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+ sizeof(struct ath11k_dbg_dbr_entry),
+ GFP_KERNEL);
+ if (!dbr_dbg_data->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&dbr_dbg_data->lock);
+
+ debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+ dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+ return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {0};
+ u32 dbr_id, enable;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ goto out;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+ if (ret != 2 || dbr_id > 1 || enable > 1) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+ goto out;
+ }
+
+ if (enable) {
+ ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else {
+ ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+ .write = ath11k_debugfs_write_enable_dbr_dbg,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ ssize_t ret;
+ u8 ps_timekeeper_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_timekeeper_enable = {
+ .read = ath11k_read_ps_timekeeper_enable,
+ .write = ath11k_write_ps_timekeeper_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_reset_peer_ps_duration(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_reset_ps_duration(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+ u8 reset_ps_duration;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_reset_peer_ps_duration,
+ ar);
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_ps_duration = {
+ .write = ath11k_write_reset_ps_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ arsta->ps_start_time = 0;
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ps_state_enable = !!ps_state_enable;
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
+ ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable) {
+ ar->ps_timekeeper_enable = false;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_peer_ps_state_disable,
+ ar);
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath11k_read_ps_state_enable,
+ .write = ath11k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -1136,6 +1585,9 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("pktlog_filter", 0644,
ar->debug.debugfs_pdev, ar,
&fops_pktlog_filter);
+ debugfs_create_file("fw_dbglog_config", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_fw_dbglog);
if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
debugfs_create_file("dfs_simulate_radar", 0200,
@@ -1146,9 +1598,298 @@ int ath11k_debugfs_register(struct ath11k *ar)
&ar->dfs_block_radar_events);
}
+ if (ab->hw_params.dbr_debug_support)
+ debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_dbr_debug);
+
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
+ &fops_ps_state_enable);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("ps_timekeeper_enable", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_ps_timekeeper_enable);
+
+ debugfs_create_file("reset_ps_duration", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_reset_ps_duration);
+ }
+
return 0;
}
void ath11k_debugfs_unregister(struct ath11k *ar)
{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ int i;
+
+ for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+ dbr_debug = ar->debug.dbr_debug[i];
+ if (!dbr_debug)
+ continue;
+
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+ kfree(dbr_dbg_data->entries);
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[i] = NULL;
+ }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_add_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[128] = {0};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.wake_intvl_us,
+ &params.wake_intvl_mantis,
+ &params.wake_dura_us,
+ &params.sp_offset_us,
+ &params.twt_cmd,
+ &params.flag_bcast,
+ &params.flag_trigger,
+ &params.flag_flow_type,
+ &params.flag_protection);
+ if (ret != 16)
+ return -EINVAL;
+
+ /* In the case of station vif, TWT is entirely handled by
+ * the firmware based on the input parameters in the TWT enable
+ * WMI command that is sent to the target during assoc.
+ * For manually testing the TWT feature, we need to first disable
+ * TWT and send enable command again with TWT input parameter
+ * sta_cong_timer_ms set to 0.
+ */
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ twt_params.sta_cong_timer_ms = 0;
+
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ goto err_twt_add_dialog;
+
+ return count;
+
+err_twt_add_dialog:
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return ret;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_del_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[64] = {0};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_pause_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_resume_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.sp_offset_us,
+ &params.next_twt_size);
+ if (ret != 9)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+ .write = ath11k_write_twt_add_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+ .write = ath11k_write_twt_del_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+ .write = ath11k_write_twt_pause_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .write = ath11k_write_twt_resume_dialog,
+ .open = simple_open
+};
+
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ struct ath11k_base *ab = arvif->ar->ab;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
+
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+}
+
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+ if (!arvif->debugfs_twt)
+ return;
+
+ debugfs_remove_recursive(arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 4c0740394c95..3af0169f6cf2 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+ ATH11K_DBG_DBR_EVENT_INVALID,
+ ATH11K_DBG_DBR_EVENT_RX,
+ ATH11K_DBG_DBR_EVENT_REPLENISH,
+ ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+ u32 hp;
+ u32 tp;
+ u64 timestamp;
+ enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+ /* protects ath11k_db_ring_debug data */
+ spinlock_t lock;
+ struct ath11k_dbg_dbr_entry *entries;
+ u32 dbr_debug_idx;
+ u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+ struct ath11k_dbg_dbr_data dbr_dbg_data;
+ struct dentry *dbr_debugfs;
+ bool dbr_debug_enabled;
+};
+
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
@@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
};
enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_INVALID = 0,
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
@@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_MAX,
};
+enum fw_dbglog_wlan_module_id {
+ WLAN_MODULE_ID_MIN = 0,
+ WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+ WLAN_MODULE_WMI,
+ WLAN_MODULE_STA_PWRSAVE,
+ WLAN_MODULE_WHAL,
+ WLAN_MODULE_COEX,
+ WLAN_MODULE_ROAM,
+ WLAN_MODULE_RESMGR_CHAN_MANAGER,
+ WLAN_MODULE_RESMGR,
+ WLAN_MODULE_VDEV_MGR,
+ WLAN_MODULE_SCAN,
+ WLAN_MODULE_RATECTRL,
+ WLAN_MODULE_AP_PWRSAVE,
+ WLAN_MODULE_BLOCKACK,
+ WLAN_MODULE_MGMT_TXRX,
+ WLAN_MODULE_DATA_TXRX,
+ WLAN_MODULE_HTT,
+ WLAN_MODULE_HOST,
+ WLAN_MODULE_BEACON,
+ WLAN_MODULE_OFFLOAD,
+ WLAN_MODULE_WAL,
+ WLAN_WAL_MODULE_DE,
+ WLAN_MODULE_PCIELP,
+ WLAN_MODULE_RTT,
+ WLAN_MODULE_RESOURCE,
+ WLAN_MODULE_DCS,
+ WLAN_MODULE_CACHEMGR,
+ WLAN_MODULE_ANI,
+ WLAN_MODULE_P2P,
+ WLAN_MODULE_CSA,
+ WLAN_MODULE_NLO,
+ WLAN_MODULE_CHATTER,
+ WLAN_MODULE_WOW,
+ WLAN_MODULE_WAL_VDEV,
+ WLAN_MODULE_WAL_PDEV,
+ WLAN_MODULE_TEST,
+ WLAN_MODULE_STA_SMPS,
+ WLAN_MODULE_SWBMISS,
+ WLAN_MODULE_WMMAC,
+ WLAN_MODULE_TDLS,
+ WLAN_MODULE_HB,
+ WLAN_MODULE_TXBF,
+ WLAN_MODULE_BATCH_SCAN,
+ WLAN_MODULE_THERMAL_MGR,
+ WLAN_MODULE_PHYERR_DFS,
+ WLAN_MODULE_RMC,
+ WLAN_MODULE_STATS,
+ WLAN_MODULE_NAN,
+ WLAN_MODULE_IBSS_PWRSAVE,
+ WLAN_MODULE_HIF_UART,
+ WLAN_MODULE_LPI,
+ WLAN_MODULE_EXTSCAN,
+ WLAN_MODULE_UNIT_TEST,
+ WLAN_MODULE_MLME,
+ WLAN_MODULE_SUPPL,
+ WLAN_MODULE_ERE,
+ WLAN_MODULE_OCB,
+ WLAN_MODULE_RSSI_MONITOR,
+ WLAN_MODULE_WPM,
+ WLAN_MODULE_CSS,
+ WLAN_MODULE_PPS,
+ WLAN_MODULE_SCAN_CH_PREDICT,
+ WLAN_MODULE_MAWC,
+ WLAN_MODULE_CMC_QMIC,
+ WLAN_MODULE_EGAP,
+ WLAN_MODULE_NAN20,
+ WLAN_MODULE_QBOOST,
+ WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+ WLAN_MODULE_HALPHY,
+ WLAN_WAL_MODULE_ENQ,
+ WLAN_MODULE_GNSS,
+ WLAN_MODULE_WAL_MEM,
+ WLAN_MODULE_SCHED_ALGO,
+ WLAN_MODULE_TX,
+ WLAN_MODULE_RX,
+ WLAN_MODULE_WLM,
+ WLAN_MODULE_RU_ALLOCATOR,
+ WLAN_MODULE_11K_OFFLOAD,
+ WLAN_MODULE_STA_TWT,
+ WLAN_MODULE_AP_TWT,
+ WLAN_MODULE_UL_OFDMA,
+ WLAN_MODULE_HPCS_PULSE,
+ WLAN_MODULE_DTF,
+ WLAN_MODULE_QUIET_IE,
+ WLAN_MODULE_SHMEM_MGR,
+ WLAN_MODULE_CFIR,
+ WLAN_MODULE_CODE_COVER,
+ WLAN_MODULE_SHO,
+ WLAN_MODULE_MLO_MGR,
+ WLAN_MODULE_PEER_INIT,
+ WLAN_MODULE_STA_MLO_PS,
+
+ WLAN_MODULE_ID_MAX,
+ WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+ ATH11K_FW_DBGLOG_ML = 0,
+ ATH11K_FW_DBGLOG_VERBOSE = 0,
+ ATH11K_FW_DBGLOG_INFO,
+ ATH11K_FW_DBGLOG_INFO_LVL_1,
+ ATH11K_FW_DBGLOG_INFO_LVL_2,
+ ATH11K_FW_DBGLOG_WARN,
+ ATH11K_FW_DBGLOG_ERR,
+ ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+ enum wmi_debug_log_param param;
+ union {
+ struct {
+ /* log_level values are given in enum fw_dbglog_log_level */
+ u16 log_level;
+ /* module_id values are given in enum fw_dbglog_wlan_module_id */
+ u16 module_id;
+ };
+ /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+ * according to param
+ */
+ u32 value;
+ };
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debugfs_soc_create(struct ath11k_base *ab);
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@@ -114,7 +269,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
int ath11k_debugfs_register(struct ath11k *ar);
void ath11k_debugfs_unregister(struct ath11k *ar);
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
@@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng);
+
#else
static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
@@ -179,8 +341,8 @@ static inline void ath11k_debugfs_unregister(struct ath11k *ar)
{
}
-static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
+ struct ath11k_fw_stats *stats)
{
}
@@ -224,6 +386,21 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-#endif /* CONFIG_MAC80211_DEBUGFS*/
+static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 4484235bcda4..b3efca6bd7dd 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -1403,6 +1404,8 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brpoll_7);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
htt_stats_buf->ax_bsr_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
@@ -1485,6 +1488,8 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brp7_err);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
htt_stats_buf->ax_bsr_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
@@ -1519,6 +1524,16 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
htt_stats_buf->mu_mimo_ppdu_posted);
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
+
len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
@@ -1535,10 +1550,34 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
- for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
+ }
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2933,6 +2972,21 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
htt_stats_buf->txbf);
+ len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
+ htt_stats_buf->rx_su_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
+ "rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
+ htt_stats_buf->rx_mu_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ "rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
+ htt_stats_buf->rx_br_poll);
+
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
"rx_legacy_cck_rate",
HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
@@ -2995,6 +3049,38 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n");
}
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
+ "rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
+ "rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ "rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ "rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ "rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
+ "rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ "rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ "rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
htt_stats_buf->per_chain_rssi_pkt_type);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index dc210c54d131..2b97cbbd28cb 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@@ -629,7 +630,7 @@ struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
* completing the burst, we identify the txop used in the burst and
* incr the corresponding bin.
* Each bin represents 1ms & we have 10 bins in this histogram.
- * they are deined in FW using the following macros
+ * they are defined in FW using the following macros
* #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
* #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
*/
@@ -682,6 +683,7 @@ struct htt_tx_selfgen_ax_stats_tlv {
u32 ax_bsr_trigger;
u32 ax_mu_bar_trigger;
u32 ax_mu_rts_trigger;
+ u32 ax_ulmumimo_trigger;
};
struct htt_tx_selfgen_ac_err_stats_tlv {
@@ -712,12 +714,14 @@ struct htt_tx_selfgen_ax_err_stats_tlv {
u32 ax_bsr_trigger_err;
u32 ax_mu_bar_trigger_err;
u32 ax_mu_rts_trigger_err;
+ u32 ax_ulmumimo_trigger_err;
};
/* == TX MU STATS == */
#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
/* mu-mimo sw sched cmd stats */
@@ -734,6 +738,24 @@ struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+
+ /* UL MU-MIMO */
+ /* ax_ul_mumimo_basic_sch_nusers[i] is the number of basic triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ /* ax_ul_mumimo_brp_sch_nusers[i] is the number of brp triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
};
struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
@@ -1297,6 +1319,8 @@ struct htt_tx_pdev_rate_stats_tlv {
#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
struct htt_rx_pdev_rate_stats_tlv {
u32 mac_id__word;
@@ -1375,6 +1399,21 @@ struct htt_rx_pdev_rate_stats_tlv {
u32 per_chain_rssi_pkt_type;
s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 rx_su_ndpa;
+ u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_mu_ndpa;
+ u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_br_poll;
+ u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+
+ u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
};
/* == RX PDEV/SOC STATS == */
@@ -1858,7 +1897,7 @@ struct htt_phy_counters_tlv {
u32 phytx_abort_cnt;
/* number of times rx abort initiated by phy */
u32 phyrx_abort_cnt;
- /* number of rx defered count initiated by phy */
+ /* number of rx deferred count initiated by phy */
u32 phyrx_defer_abort_cnt;
/* number of sizing events generated at LSTF */
u32 rx_gain_adj_lstf_event_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 1b1acbdf837a..9cc4ef28e751 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -751,6 +751,102 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
+static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u64 time_since_station_in_power_save;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ time_since_station_in_power_save = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies);
+ else
+ time_since_station_in_power_save = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n",
+ time_since_station_in_power_save);
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_current_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_current_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ u64 power_save_duration;
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ power_save_duration = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies)
+ + arsta->ps_total_duration;
+ else
+ power_save_duration = arsta->ps_total_duration;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_total_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_total_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -778,4 +874,15 @@ void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vi
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
+
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("current_ps_duration", 0440, dir, sta,
+ &fops_current_ps_duration);
+ debugfs_create_file("total_ps_duration", 0440, dir, sta,
+ &fops_total_ps_duration);
+ }
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8b790ce72e5d..f5156a7fbdd7 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -131,13 +132,11 @@ static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
switch (type) {
case HAL_WBM2SW_RELEASE:
- if (ring_num < 3) {
- grp_mask = &ab->hw_params.ring_mask->tx[0];
- } else if (ring_num == 3) {
+ if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
- return -ENOENT;
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
@@ -371,6 +370,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
+ u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -396,9 +396,12 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+ wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
- HAL_TCL_DATA, i, 0,
- DP_TCL_DATA_RING_SIZE);
+ HAL_TCL_DATA, tcl_num, 0,
+ ab->hw_params.tx_ring_size);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
@@ -406,7 +409,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
- HAL_WBM2SW_RELEASE, i, 0,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
@@ -431,7 +434,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
- 3, 0, DP_RX_RELEASE_RING_SIZE);
+ DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
@@ -774,9 +777,10 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int i, j;
int tot_work_done = 0;
- if (ab->hw_params.ring_mask->tx[grp_id]) {
- i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
- ath11k_dp_tx_completion_handler(ab, i);
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
+ ab->hw_params.ring_mask->tx[grp_id])
+ ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
@@ -963,7 +967,7 @@ static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* When v2_map_support is true:for STA mode, enable address
* search index, tcl uses ast_hash value in the descriptor.
- * When v2_map_support is false: for STA mode, dont' enable
+ * When v2_map_support is false: for STA mode, don't enable
* address search index.
*/
switch (arvif->vdev_type) {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 409d6cc5a1d5..be9eafc872b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -115,6 +116,8 @@ struct ath11k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
};
struct dp_full_mon_mpdu {
@@ -167,6 +170,7 @@ struct ath11k_mon_data {
struct ath11k_pdev_dp {
u32 mac_id;
+ u32 mon_dest_ring_stuck_cnt;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
@@ -200,6 +204,7 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
#define DP_TX_COMP_RING_SIZE 32768
#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
@@ -219,6 +224,8 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+#define DP_RX_RELEASE_RING_NUM 3
+
#define DP_RX_BUFFER_SIZE 2048
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
@@ -296,7 +303,7 @@ struct ath11k_dp {
#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-/* HTT tx completion is overlayed in wbm_release_ring */
+/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
@@ -463,7 +470,7 @@ enum htt_srng_ring_id {
* 3'b010: 4 usec
* 3'b011: 8 usec (default)
* 3'b100: 16 usec
- * Others: Reserverd
+ * Others: Reserved
* b'19 - response_required:
* Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
* b'20:31 - reserved: reserved for future use
@@ -990,8 +997,7 @@ struct htt_rx_ring_tlv_filter {
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
-/**
- * Enumeration for full monitor mode destination ring select
+/* Enumeration for full monitor mode destination ring select
* 0 - REO destination ring select
* 1 - FW destination ring select
* 2 - SW destination ring select
@@ -1170,12 +1176,12 @@ struct ath11k_htt_ppdu_stats_msg {
u32 ppdu_id;
u32 timestamp;
u32 rsvd;
- u8 data[0];
+ u8 data[];
} __packed;
struct htt_tlv {
u32 header;
- u8 value[0];
+ u8 value[];
} __packed;
#define HTT_TLV_TAG GENMASK(11, 0)
@@ -1362,7 +1368,7 @@ struct htt_ppdu_stats_usr_cmn_array {
* tx_ppdu_stats_info is variable length, with length =
* number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
*/
- struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[];
} __packed;
struct htt_ppdu_user_stats {
@@ -1388,8 +1394,7 @@ struct htt_ppdu_stats_info {
struct list_head list;
};
-/**
- * @brief target -> host packet log message
+/* @brief target -> host packet log message
*
* @details
* The following field definitions describe the format of the packet log
@@ -1424,11 +1429,10 @@ struct htt_ppdu_stats_info {
*/
struct htt_pktlog_msg {
u32 hdr;
- u8 payload[0];
+ u8 payload[];
};
-/**
- * @brief host -> target FW extended statistics retrieve
+/* @brief host -> target FW extended statistics retrieve
*
* @details
* The following field definitions describe the format of the HTT host
@@ -1563,8 +1567,7 @@ struct htt_ext_stats_cfg_params {
u32 cfg3;
};
-/**
- * @brief target -> host extended statistics upload
+/* @brief target -> host extended statistics upload
*
* @details
* The following field definitions describe the format of the HTT target
@@ -1645,7 +1648,7 @@ struct ath11k_htt_extd_stats_msg {
u32 info0;
u64 cookie;
u32 info1;
- u8 data[0];
+ u8 data[];
} __packed;
#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c212a789421e..c5a4c34d7749 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -43,6 +43,13 @@ static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
}
static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
+static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -828,8 +835,9 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath11k_dp_rx_tid_del_func);
if (ret) {
- ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
- tid, ret);
+ if (ret != -ESHUTDOWN)
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
@@ -2313,7 +2321,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
- bool is_cck;
+ bool is_cck, is_ldpc;
pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
@@ -2355,6 +2363,9 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
@@ -2488,7 +2499,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
- * Also, fast_rx expectes the STA to be authorized, hence
+ * Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
@@ -2642,9 +2653,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
spin_lock_bh(&srng->lock);
+try_again:
ath11k_hal_srng_access_begin(ab, srng);
-try_again:
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
@@ -2755,6 +2766,9 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
if (!rx_stats)
return;
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -3080,79 +3094,6 @@ move_next:
return num_buffs_reaped;
}
-int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- enum hal_rx_mon_status hal_status;
- struct sk_buff *skb;
- struct sk_buff_head skb_list;
- struct hal_rx_mon_ppdu_info ppdu_info;
- struct ath11k_peer *peer;
- struct ath11k_sta *arsta;
- int num_buffs_reaped = 0;
- u32 rx_buf_sz;
- u16 log_type = 0;
-
- __skb_queue_head_init(&skb_list);
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
- &skb_list);
- if (!num_buffs_reaped)
- goto exit;
-
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
-
- while ((skb = __skb_dequeue(&skb_list))) {
- if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
- rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
- } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
- rx_buf_sz = DP_RX_BUFFER_SIZE;
- }
-
- if (log_type)
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
- hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
-
- if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
-
- if (!peer || !peer->sta) {
- ath11k_dbg(ab, ATH11K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info.peer_id);
- goto next_skb;
- }
-
- arsta = (struct ath11k_sta *)peer->sta->drv_priv;
- ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
-
- if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
-next_skb:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-
- dev_kfree_skb_any(skb);
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
- }
-exit:
- return num_buffs_reaped;
-}
-
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
@@ -4870,7 +4811,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
- u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
@@ -4912,38 +4852,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
- __le16 qos_field;
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
- wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
- if (ieee80211_is_data_qos(wh->frame_control)) {
- struct ieee80211_qos_hdr *qwh =
- (struct ieee80211_qos_hdr *)hdr_desc;
-
- qos_field = qwh->qos_ctrl;
+ if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
- }
+
msdu = head_msdu;
while (msdu) {
- rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
-
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
- memcpy(dest, hdr_desc, wifi_hdr_len);
- memcpy(dest + wifi_hdr_len,
- (u8 *)&qos_field, sizeof(__le16));
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
- ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@@ -4967,8 +4896,98 @@ err_merge_fail:
return NULL;
}
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+ if (ppduinfo->nss)
+ rxs->nss = ppduinfo->nss;
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
@@ -5003,7 +5022,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
- rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
@@ -5022,6 +5041,12 @@ mon_deliver_fail:
return -EINVAL;
}
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
@@ -5035,6 +5060,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
+ u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
@@ -5064,20 +5090,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
head_msdu = NULL;
tail_msdu = NULL;
- rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
- &head_msdu,
- &tail_msdu,
- &npackets, &ppdu_id);
+ mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ continue;
+ }
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "dest_rx: new ppdu_id %x != status ppdu_id %x",
- ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
@@ -5106,36 +5156,93 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
}
}
-static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- int mac_id, u32 quota,
- struct napi_struct *napi)
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct sk_buff *status_skb;
- u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
- struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- ppdu_info = &pmon->mon_ppdu_info;
- rx_mon_stats = &pmon->rx_mon_stats;
+ __skb_queue_head_init(&skb_list);
- if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
- return;
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else {
+ log_type = ATH11K_PKTLOG_TYPE_INVALID;
+ rx_buf_sz = 0;
+ }
- while (!skb_queue_empty(&pmon->rx_status_q)) {
- status_skb = skb_dequeue(&pmon->rx_status_q);
+ if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
- tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
- status_skb);
- if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
- dev_kfree_skb_any(status_skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
}
+exit:
+ return num_buffs_reaped;
}
static u32
@@ -5352,6 +5459,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
@@ -5489,22 +5597,6 @@ reap_status_ring:
return quota;
}
-static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- int num_buffs_reaped = 0;
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
- &pmon->rx_status_q);
- if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
-
- return num_buffs_reaped;
-}
-
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@@ -5514,8 +5606,6 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
- else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
- ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 91d6244b6543..8afbba236935 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -93,7 +94,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
- u8 ring_selector = 0, ring_map = 0;
+ u32 ring_selector = 0;
+ u8 ring_map = 0;
bool tcl_ring_retry;
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
@@ -105,19 +107,13 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
- /* Let the default ring selection be based on current processor
- * number, where one of the 3 tcl rings are selected based on
- * the smp_processor_id(). In case that ring
- * is full/busy, we resort to other available rings.
- * If all rings are full, we drop the packet.
- * //TODO Add throttling logic when all rings are full
- */
- ring_selector = smp_processor_id();
+ ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
@@ -129,7 +125,8 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
- if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ !ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
@@ -247,7 +244,7 @@ tcl_ring_sel:
* Restart ring selection if some rings are not checked yet.
*/
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
- ab->hw_params.max_tx_ring > 1) {
+ ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -351,7 +348,8 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
@@ -426,7 +424,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
- u8 mcs, rate_idx, ofdma;
+ u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -518,9 +516,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
+ struct ieee80211_tx_status status = { 0 };
+ struct ieee80211_rate_status status_rate = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -552,7 +555,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
@@ -583,12 +586,31 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
- /* NOTE: Tx rate status reporting. Tx completion status does not have
- * necessary information (for example nss) to build the tx rate.
- * Might end up reporting it out-of-band from HTT stats.
- */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ status.sta = peer->sta;
+ status.skb = msdu;
+ status.info = info;
+ rate = arsta->last_txrate;
- ieee80211_tx_status(ar->hw, msdu);
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
@@ -730,7 +752,7 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
return 0;
/* Can this be optimized so that we keep the pending command list only
- * for tid delete command to free up the resoruce on the command status
+ * for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 2ec09ae90080..2fd224480d45 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -125,7 +126,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
},
{ /* WBM2SW_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
- .max_rings = 4,
+ .max_rings = 5,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
@@ -1082,10 +1083,10 @@ static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
srng = &hal->srng_list[ring_id];
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
- srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
else
- srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
}
@@ -1120,7 +1121,7 @@ int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11k_DBG_HAL,
"target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
target_reg,
- HAL_SHADOW_REG(shadow_cfg_idx),
+ HAL_SHADOW_REG(ab, shadow_cfg_idx),
shadow_cfg_idx,
ring_type, ring_num);
@@ -1163,8 +1164,8 @@ void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
{
lockdep_assert_held(&srng->lock);
- /* check whether the ring is emptry. Update the shadow
- * HP only when then ring isn't' empty.
+ /* check whether the ring is empty. Update the shadow
+ * HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
@@ -1193,12 +1194,12 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
s = &hal->srng_config[HAL_REO_REINJECT];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
s = &hal->srng_config[HAL_REO_CMD];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index a7d9b4c551ad..6a1f78ee6eb6 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -31,12 +32,12 @@ struct ath11k_base;
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
-#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr
#define HAL_SHADOW_NUM_REGS 36
#define HAL_HP_OFFSET_IN_REG_START 1
#define HAL_OFFSET_FROM_HP_TO_TP 4
-#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x)))
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
@@ -120,7 +121,7 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
-#define HAL_REO1_MISC_CTL 0x00000630
+#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl
#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
@@ -180,16 +181,18 @@ struct ath11k_base;
#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
/* REO CMD R0 address */
-#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_cmd_ring_base_lsb
/* REO CMD R2 address */
-#define HAL_REO_CMD_HP 0x00003020
+#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp
/* SW2REO R0 address */
-#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_sw2reo_ring_base_lsb
/* SW2REO R2 address */
-#define HAL_SW2REO_RING_HP 0x00003028
+#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp
/* CE ring R0 address */
#define HAL_CE_DST_RING_BASE_LSB 0x00000000
@@ -240,7 +243,7 @@ struct ath11k_base;
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
-/* TCL ring feild mask and offset */
+/* TCL ring field mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
@@ -265,7 +268,7 @@ struct ath11k_base;
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
-/* REO ring feild mask and offset */
+/* REO ring field mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
@@ -386,6 +389,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
HAL_SRNG_RING_ID_UMAC_ID_END = 127,
HAL_SRNG_RING_ID_LMAC1_ID_START,
@@ -447,13 +451,13 @@ enum hal_ring_type {
/**
* enum hal_reo_cmd_type: Enum for REO command type
- * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
- * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
- * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
- * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* earlier with a 'REO_FLUSH_CACHE' command
- * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
- * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
*/
enum hal_reo_cmd_type {
HAL_REO_CMD_GET_QUEUE_STATS = 0,
@@ -632,7 +636,7 @@ struct hal_srng {
} u;
};
-/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
@@ -675,6 +679,7 @@ enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_SW1_BM,
HAL_RX_BUF_RBM_SW2_BM,
HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
};
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
@@ -870,8 +875,7 @@ struct hal_reo_status {
} u;
};
-/**
- * HAL context to be used to access SRNG APIs (currently used by data path
+/* HAL context to be used to access SRNG APIs (currently used by data path
* and transport (CE) modules)
*/
struct ath11k_hal {
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 406767672844..d895ea878d9f 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -474,6 +474,7 @@ enum hal_tlv_tag {
#define HAL_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
#define HAL_TLV_ALIGN 4
@@ -606,7 +607,7 @@ struct rx_msdu_desc {
*
* msdu_continuation
* When set, this MSDU buffer was not able to hold the entire MSDU.
- * The next buffer will therefor contain additional information
+ * The next buffer will therefore contain additional information
* related to this MSDU.
*
* msdu_length
@@ -642,7 +643,7 @@ struct rx_msdu_desc {
*
* da_idx_timeout
* Indicates, an unsuccessful MAC destination address search due
- * to the expiration of search timer fot this MSDU.
+ * to the expiration of search timer for this MSDU.
*/
enum hal_reo_dest_ring_buffer_type {
@@ -1677,7 +1678,7 @@ struct hal_wbm_release_ring {
* Producer: SW/TQM/RXDMA/REO/SWITCH
* Consumer: WBM/SW/FW
*
- * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
* for software based completions.
*
* buf_addr_info
@@ -2158,7 +2159,7 @@ struct hal_reo_status_hdr {
* commands.
*
* execution_time (in us)
- * The amount of time REO took to excecute the command. Note that
+ * The amount of time REO took to execute the command. Note that
* this time does not include the duration of the command waiting
* in the command ring, before the execution started.
*
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index a3b353a4b5f7..7f39c6fb7408 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -453,10 +453,12 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
desc->info0));
ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
@@ -755,7 +757,7 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
- * later when ADDBA request is recevied. This should be changed to
+ * later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
@@ -802,12 +804,75 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
+#define HAL_MAX_UL_MU_USERS 37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+ rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->mpdu_ok_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[6]));
+ rx_user_status->mpdu_err_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[8]));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+
+ ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
- u32 tlv_tag, u8 *tlv_data)
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
{
- u32 info0, info1;
+ u32 info0, info1, value;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@@ -828,6 +893,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->ast_index =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+ __le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
@@ -851,6 +919,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats;
+
+ ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+ __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+ __le32_to_cpu(eu_stats->rsvd1[1]);
+
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
@@ -949,50 +1055,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
- u32 nsts, cp_ltf, dcm;
+ ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
- ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
- info0);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
- info0);
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
- ppdu_info->is_stbc = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
- ppdu_info->beamformed = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
- dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
- info0);
- nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
- switch (cp_ltf) {
+ if (value == 0)
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+ else
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+ ppdu_info->mcs = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+ he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+ ppdu_info->dcm = he_dcm;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+ ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+ he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+ ppdu_info->is_stbc = he_stbc;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+ /* data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /* data5 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- break;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
- break;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
case 3:
- if (dcm && ppdu_info->is_stbc)
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- else
- ppdu_info->gi = HAL_RX_GI_3_2_US;
- break;
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+ ppdu_info->beamformed = value;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /* data6 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value++;
+ ppdu_info->nss = value;
+ ppdu_info->he_data6 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
- ppdu_info->nss = nsts + 1;
- ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@@ -1000,29 +1207,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
- u32 cp_ltf;
-
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
- info0);
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
- info0);
-
- switch (cp_ltf) {
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ /*data3*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+ he_stbc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+ /*data4*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /*data5*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
break;
case 3:
- ppdu_info->gi = HAL_RX_GI_3_2_US;
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /*data6*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+ info0);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+ value);
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+ value = value - 1;
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+ value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@@ -1040,7 +1360,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
-
+ ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@@ -1050,14 +1370,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+ ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
- info0) + 1;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@@ -1066,17 +1397,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+ he_dcm = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ /* HE-data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
- info0 &
- HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
- info0);
+ info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
@@ -1118,6 +1472,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
@@ -1141,12 +1498,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
+ u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1158,7 +1517,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
- tlv_tag, ptr);
+ tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index 571054c6d7f8..f6bae07abfd3 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -65,10 +65,6 @@ enum hal_rx_reception_type {
HAL_RX_RECEPTION_TYPE_MAX,
};
-#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
-#define HAL_TLV_STATUS_PPDU_DONE 1
-#define HAL_TLV_STATUS_BUF_DONE 2
-#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
@@ -77,6 +73,40 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ dl_ofdma_ru_start_index:7,
+ dl_ofdma_ru_width:7,
+ dl_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[8];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
struct hal_sw_mon_ring_entries {
dma_addr_t mon_dst_paddr;
dma_addr_t mon_status_paddr;
@@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
u8 mcs;
u8 nss;
u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
u8 is_stbc;
u8 gi;
u8 ldpc;
@@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
u8 rssi_comb;
u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
u8 dcm;
u8 ru_alloc;
u8 reception_type;
+ u64 tsft;
u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ char rssi_chain[8][8];
+ struct hal_rx_user_status userstats;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
@@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
__le32 rsvd2[11];
} __packed;
+struct hal_rx_ppdu_end_user_stats_ext {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 info5;
+ u32 info6;
+} __packed;
+
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
@@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
-
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
@@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
@@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
@@ -279,11 +403,14 @@ struct hal_rx_phyrx_rssi_legacy_info {
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
- __le32 rsvd1[21];
+ __le32 rsvd1[11];
+ __le32 info1;
+ __le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_wcn6855 {
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index c8929de8ce6c..d1b0e36e04a9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
@@ -44,8 +45,7 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
- FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
- (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index 36f4f6f6cbc2..c5e88364afe5 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -35,6 +36,7 @@ struct hal_tx_info {
u8 lmac_id;
u8 dscp_tid_tbl_idx;
bool enable_mesh;
+ u8 rbm_id;
};
/* TODO: Check if the actual desc macros can be used instead */
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index e9366f786fbb..659b80d2abd4 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -11,6 +11,7 @@
struct ath11k_hif_ops {
u32 (*read32)(struct ath11k_base *sc, u32 address);
void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
+ int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
void (*irq_enable)(struct ath11k_base *sc);
void (*irq_disable)(struct ath11k_base *sc);
int (*start)(struct ath11k_base *sc);
@@ -99,6 +100,15 @@ static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 d
sc->hif.ops->write32(sc, address, data);
}
+static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
+ u32 start, u32 end)
+{
+ if (!ab->hif.ops->read)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->read(ab, buf, start, end);
+}
+
static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
@@ -134,4 +144,5 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
else
*msi_data_idx = ce_id;
}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 6913b7494b9b..ca3aedc0252d 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -258,8 +258,10 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
- if (eid >= ATH11K_HTC_EP_COUNT)
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ dev_kfree_skb_any(skb);
return;
+ }
ep = &htc->endpoint[eid];
spin_lock_bh(&htc->tx_lock);
@@ -272,6 +274,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
ep_tx_complete(htc->ab, skb);
}
+static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
@@ -376,6 +383,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
ath11k_htc_suspend_complete(ab, false);
break;
case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath11k_htc_wakeup_from_suspend(ab);
break;
default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 3b0fdc1a6b3f..dbcc0c4035b6 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -273,6 +274,12 @@ static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
@@ -444,6 +451,12 @@ static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
@@ -758,10 +771,10 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL);
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
@@ -801,6 +814,36 @@ static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
return peer_id;
}
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ *
+ * TODO: Add throttling logic when all rings are full
+ */
+ return smp_processor_id();
+}
+
+static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Select the TCL ring based on the flow hash of the SKB instead
+ * of CPU ID. Since applications pumping the traffic can be scheduled
+ * on multiple CPUs, there is a chance that packets of the same flow
+ * could end on different TCL rings, this could sometimes results in
+ * an out of order arrival of the packets at the receiver.
+ */
+ return skb_get_hash(skb);
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -815,6 +858,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -837,6 +881,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -853,6 +898,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -875,6 +921,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -891,6 +938,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -913,6 +961,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -929,6 +978,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
@@ -951,6 +1001,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -967,6 +1018,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
@@ -989,11 +1041,54 @@ const struct ath11k_hw_ops wcn6855_ops = {
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
-#define ATH11K_TX_RING_MASK_0 0x1
-#define ATH11K_TX_RING_MASK_1 0x2
-#define ATH11K_TX_RING_MASK_2 0x4
+const struct ath11k_hw_ops wcn6750_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
+};
+
+#define ATH11K_TX_RING_MASK_0 BIT(0)
+#define ATH11K_TX_RING_MASK_1 BIT(1)
+#define ATH11K_TX_RING_MASK_2 BIT(2)
+#define ATH11K_TX_RING_MASK_3 BIT(3)
+#define ATH11K_TX_RING_MASK_4 BIT(4)
#define ATH11K_RX_RING_MASK_0 0x1
#define ATH11K_RX_RING_MASK_1 0x2
@@ -1840,6 +1935,43 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
},
};
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ 0,
+ ATH11K_TX_RING_MASK_2,
+ 0,
+ ATH11K_TX_RING_MASK_4,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -1885,10 +2017,18 @@ const struct ath11k_hw_regs ipq8074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@@ -1909,6 +2049,12 @@ const struct ath11k_hw_regs ipq8074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in IPQ8074 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
@@ -1956,10 +2102,18 @@ const struct ath11k_hw_regs qca6390_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003a4,
.hal_reo_tcl_ring_hp = 0x00003050,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x000004ac,
.hal_reo_status_hp = 0x00003068,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@@ -1980,6 +2134,12 @@ const struct ath11k_hw_regs qca6390_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, not used in QCA6390 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qcn9074_regs = {
@@ -2027,10 +2187,18 @@ const struct ath11k_hw_regs qcn9074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
@@ -2051,6 +2219,12 @@ const struct ath11k_hw_regs qcn9074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in QCN9074 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs wcn6855_regs = {
@@ -2098,10 +2272,18 @@ const struct ath11k_hw_regs wcn6855_regs = {
.hal_reo_tcl_ring_base_lsb = 0x00000454,
.hal_reo_tcl_ring_hp = 0x00003060,
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x0000055c,
.hal_reo_status_hp = 0x00003078,
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
@@ -2122,12 +2304,170 @@ const struct ath11k_hw_regs wcn6855_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6855.
+ */
+ .hal_reo1_misc_ctl = 0x00000630,
+};
+
+const struct ath11k_hw_regs wcn6750_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x00000504,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6750.
+ */
+ .hal_reo1_misc_ctl = 0x000005d8,
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
+};
+
+static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
+ {.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
+ {.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
+ {.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
+ {.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
+ {.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
+ {.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
+ {.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
+ {.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
+ {.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
+ {.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
+ {.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
+};
+
+const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
+ .freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 29934b36c14e..8a3f24862edc 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@@ -121,8 +122,15 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
+struct ath11k_hw_tcl2wbm_rbm_map {
+ u8 tcl_ring_num;
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
};
struct ath11k_hw_params {
@@ -152,9 +160,6 @@ struct ath11k_hw_params {
u32 svc_to_ce_map_len;
bool single_pdev_only;
- u32 rfkill_pin;
- u32 rfkill_cfg;
- u32 rfkill_on_level;
bool rxdma1_enable;
int num_rxmda_per_pdev;
@@ -168,6 +173,7 @@ struct ath11k_hw_params {
u8 summary_pad_sz;
u8 fft_hdr_len;
u16 max_fft_bins;
+ bool fragment_160mhz;
} spectral;
u16 interface_modes;
@@ -177,6 +183,7 @@ struct ath11k_hw_params {
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
+ bool cbcal_restart_fw;
int fw_mem_mode;
u32 num_vdevs;
u32 num_peers;
@@ -189,9 +196,29 @@ struct ath11k_hw_params {
const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory;
- bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
+ bool current_cc_support;
+ bool dbr_debug_support;
+ bool global_reset;
+ const struct cfg80211_sar_capa *bios_sar_capa;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+ bool static_window_map;
+ bool hybrid_bus_type;
+ bool fixed_fw_mem;
+ bool support_off_channel_tx;
+ bool supports_multi_bssid;
+
+ struct {
+ u32 start;
+ u32 end;
+ } sram_dump;
+
+ bool tcl_ring_retry;
+ u32 tx_ring_size;
+ bool smp2p_wow_exit;
};
struct ath11k_hw_ops {
@@ -210,6 +237,7 @@ struct ath11k_hw_ops {
u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
@@ -233,6 +261,7 @@ struct ath11k_hw_ops {
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ u32 (*get_ring_selector)(struct sk_buff *skb);
};
extern const struct ath11k_hw_ops ipq8074_ops;
@@ -240,13 +269,16 @@ extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ops wcn6855_ops;
+extern const struct ath11k_hw_ops wcn6750_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
@@ -287,10 +319,16 @@ enum ath11k_bd_ie_board_type {
ATH11K_BD_IE_BOARD_DATA = 1,
};
+enum ath11k_bd_ie_regdb_type {
+ ATH11K_BD_IE_REGDB_NAME = 0,
+ ATH11K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath11k_bd_ie_type {
/* contains sub IEs of enum ath11k_bd_ie_board_type */
ATH11K_BD_IE_BOARD = 0,
- ATH11K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath11k_bd_ie_regdb_type */
+ ATH11K_BD_IE_REGDB = 1,
};
struct ath11k_hw_regs {
@@ -336,6 +374,12 @@ struct ath11k_hw_regs {
u32 hal_reo_status_ring_base_lsb;
u32 hal_reo_status_hp;
+ u32 hal_reo_cmd_ring_base_lsb;
+ u32 hal_reo_cmd_ring_hp;
+
+ u32 hal_sw2reo_ring_base_lsb;
+ u32 hal_sw2reo_ring_hp;
+
u32 hal_seq_wcss_umac_ce0_src_reg;
u32 hal_seq_wcss_umac_ce0_dst_reg;
u32 hal_seq_wcss_umac_ce1_src_reg;
@@ -351,11 +395,29 @@ struct ath11k_hw_regs {
u32 pcie_qserdes_sysclk_en_sel;
u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_shadow_base_addr;
+ u32 hal_reo1_misc_ctl;
};
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs;
+extern const struct ath11k_hw_regs wcn6750_regs;
+
+static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH11K_BD_IE_BOARD:
+ return "board data";
+ case ATH11K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
+extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 07f499d5ec92..2d1e3fd9b526 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
+#include <linux/inetdevice.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -16,6 +21,8 @@
#include "testmode.h"
#include "peer.h"
#include "debugfs_sta.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -498,7 +505,7 @@ static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
- conf = rcu_dereference(vif->chanctx_conf);
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
@@ -868,13 +875,16 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
ath11k_peer_rx_tid_cleanup(ar, peer);
+ ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
}
spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
ar->num_peers = 0;
ar->num_stations = 0;
@@ -1352,7 +1362,7 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath11k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1388,10 +1398,11 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
- if (!vif->color_change_active && !arvif->bcca_zero_sent)
+ if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent)
return;
- if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) {
+ if (vif->bss_conf.color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1399,7 +1410,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
- if (vif->color_change_active)
+ if (vif->bss_conf.color_change_active)
ieee80211_beacon_update_cntdwn(vif);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -1529,7 +1540,7 @@ static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
- aid = vif->bss_conf.aid;
+ aid = vif->cfg.aid;
else
aid = sta->aid;
@@ -1626,7 +1637,7 @@ static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
- ratemask = sta->supp_rates[band];
+ ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1671,7 +1682,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -1708,7 +1719,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1766,7 +1777,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1868,7 +1879,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
@@ -1914,17 +1925,17 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
- if (vht_nss > sta->rx_nss) {
+ if (vht_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
- for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (vht_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
@@ -1934,14 +1945,14 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n",
- sta->rx_nss, sta->addr);
- vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ sta->deflink.rx_nss, sta->addr);
+ vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
}
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
- for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
(2 * i) & 3;
@@ -1949,7 +1960,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2068,9 +2079,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
{
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
enum nl80211_band band;
- u16 *he_mcs_mask;
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX];
u8 max_nss, he_mcs;
u16 he_tx_mcs = 0, v = 0;
int i, he_nss, nss_idx;
@@ -2087,7 +2098,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
return;
band = def.chan->band;
- he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+ memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
+ sizeof(he_mcs_mask));
if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
return;
@@ -2125,7 +2137,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
@@ -2157,10 +2169,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -2203,9 +2215,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
- if (he_nss > sta->rx_nss) {
+ if (he_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
- for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (he_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
@@ -2215,11 +2227,11 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n",
- sta->rx_nss, sta->addr);
- he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ sta->deflink.rx_nss, sta->addr);
+ he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1];
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -2262,7 +2274,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
/* Calculate peer NSS capability from HE capabilities if STA
* supports HE.
*/
- for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) {
he_mcs = he_tx_mcs >> (2 * i) & 3;
/* In case of fixed rates, MCS Range in he_tx_mcs might have
@@ -2273,7 +2285,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
he_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
if (arg->peer_phymode == MODE_11AX_HE160 ||
arg->peer_phymode == MODE_11AX_HE80_80) {
@@ -2306,7 +2318,7 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor;
@@ -2316,16 +2328,19 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
band = def.chan->band;
- if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
return;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
- arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa);
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
arg->peer_mpdu_density =
ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
arg->peer_he_caps_6ghz));
@@ -2351,17 +2366,17 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
- if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa)
+ if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa)
return;
if (ht_cap->ht_supported) {
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
} else {
- smps = le16_get_bits(sta->he_6ghz_capa.capa,
+ smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_SM_PS);
}
@@ -2485,15 +2500,15 @@ err:
static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH11K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->vht_cap.cap &
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2505,13 +2520,13 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
}
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
@@ -2520,24 +2535,24 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
struct ieee80211_sta *sta)
{
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
@@ -2566,23 +2581,23 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->he_cap.has_he &&
+ if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->vht_cap.vht_supported &&
- !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
@@ -2595,15 +2610,15 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
- if (sta->he_cap.has_he &&
+ if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
- } else if (sta->vht_cap.vht_supported &&
- !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath11k_mac_get_phymode_vht(ar, sta);
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2726,8 +2741,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->ht_cap,
- le16_to_cpu(ap_sta->he_6ghz_capa.capa));
+ &ap_sta->deflink.ht_cap,
+ le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2736,7 +2751,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
WARN_ON(arvif->is_up);
- arvif->aid = bss_conf->aid;
+ arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
@@ -2747,10 +2762,11 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
spin_lock_bh(&ar->ab->base_lock);
@@ -2804,6 +2820,8 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false;
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
@@ -2862,6 +2880,11 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
if (ret)
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
@@ -3037,7 +3060,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for SRG */
+ /* Enable all partial BSSID mask for SRG */
ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3055,7 +3078,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for non-SRG */
+ /* Enable all partial BSSID mask for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3070,7 +3093,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
@@ -3085,6 +3108,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
int ret = 0;
u8 rateidx;
u32 rate;
+ u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex);
@@ -3128,6 +3152,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3149,9 +3187,10 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
- arvif->u.ap.ssid_len = info->ssid_len;
- if (info->ssid_len)
- memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
@@ -3163,14 +3202,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BA_MODE,
- WMI_BA_MODE_BUFFER_SIZE_256);
- if (ret)
- ath11k_warn(ar->ab,
- "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
- arvif->vdev_id);
-
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -3247,7 +3278,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc)
+ if (vif->cfg.assoc)
ath11k_bss_assoc(hw, vif, info);
else
ath11k_bss_disassoc(hw, vif);
@@ -3263,7 +3294,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params.supports_sta_ps) {
- arvif->ps = vif->bss_conf.ps;
+ arvif->ps = vif->cfg.ps;
ret = ath11k_mac_config_ps(ar);
if (ret)
@@ -3320,10 +3351,15 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
- if (info->twt_requester || info->twt_responder)
- ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
- else
+ struct wmi_twt_enable_params twt_params = {0};
+
+ if (info->twt_requester || info->twt_responder) {
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
+ &twt_params);
+ } else {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
@@ -3377,6 +3413,19 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
+ memcpy(arvif->arp_ns_offload.ipv4_addr,
+ vif->cfg.arp_addr_list,
+ ipv4_cnt * sizeof(u32));
+ memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
+ arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif->cfg.arp_addr_cnt,
+ vif->addr, arvif->arp_ns_offload.ipv4_addr);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3408,7 +3457,7 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
break;
}
}
@@ -3587,26 +3636,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- /* Currently the pending_11d=true only happened 1 time while
- * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
- * ath11k_mac_op_add_interface(), after wlan interface up,
- * pending_11d=false always.
- * If remove below wait, it always happened scan fail and lead connect
- * fail while wlan interface up, because it has a 11d scan which is running
- * in firmware, and lead this scan failed.
- */
- if (ar->pending_11d) {
- long time_left;
- unsigned long timeout = 5 * HZ;
-
- if (ar->supports_6ghz)
- timeout += 5 * HZ;
-
- time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac wait 11d channel list time left %ld\n", time_left);
- }
-
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
@@ -3672,6 +3701,10 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
return ret;
}
@@ -3983,7 +4016,7 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
}
/* Avoid updating invalid nss as fixed rate*/
- if (nss > sta->rx_nss)
+ if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
@@ -4033,7 +4066,7 @@ ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
}
/* Avoid updating invalid nss as fixed rate */
- if (nss > sta->rx_nss)
+ if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
@@ -4100,12 +4133,12 @@ static int ath11k_station_assoc(struct ath11k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
- } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
if (ret)
@@ -4119,7 +4152,8 @@ static int ath11k_station_assoc(struct ath11k *ar,
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa));
+ &sta->deflink.ht_cap,
+ le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4281,10 +4315,10 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
- } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
} else {
@@ -4454,6 +4488,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
}
+ ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_tx_stats:
@@ -4495,6 +4530,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
@@ -4504,34 +4540,42 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION;
- if (ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION)
- goto free;
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ if (!skip_peer_delete) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab,
+ ATH11K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ }
ath11k_mac_dec_num_stations(arvif, sta);
+ mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
+ if (skip_peer_delete && peer) {
+ peer->sta = NULL;
+ } else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
peer->sta = NULL;
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
-free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -4598,10 +4642,10 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
int ret = 0;
s16 txpwr;
- if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
- txpwr = sta->txpwr.power;
+ txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
@@ -4662,15 +4706,16 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->bandwidth, sta->rx_nss,
- sta->smps_mode);
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
@@ -4685,7 +4730,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
- sta->bandwidth, sta->addr);
+ sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -4694,12 +4739,12 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->rx_nss;
+ arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -4712,7 +4757,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4785,7 +4830,8 @@ exit:
}
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath11k *ar = hw->priv;
@@ -4915,6 +4961,8 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -4955,7 +5003,7 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
{
bool subfer, subfee;
- int sound_dim = 0;
+ int sound_dim = 0, nsts = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
@@ -4965,6 +5013,11 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
subfer = false;
}
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
@@ -4977,7 +5030,9 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
- /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
@@ -4989,9 +5044,15 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
*vht_cap |= sound_dim;
}
- /* Use the STS advertised by FW unless SU Beamformee is not supported*/
- if (!subfee)
- *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
}
static struct ieee80211_sta_vht_cap
@@ -5517,8 +5578,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -5536,6 +5597,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
arvif->is_started);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
+ mutex_unlock(&ar->conf_mutex);
}
}
@@ -5566,64 +5628,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
-
- return 0;
-}
-
-int ath11k_mac_rfkill_config(struct ath11k *ar)
-{
- struct ath11k_base *ab = ar->ab;
- u32 param;
- int ret;
-
- if (ab->hw_params.rfkill_pin == 0)
- return -EOPNOTSUPP;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
- ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg,
- ab->hw_params.rfkill_on_level);
-
- param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL,
- ab->hw_params.rfkill_on_level) |
- FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM,
- ab->hw_params.rfkill_pin) |
- FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO,
- ab->hw_params.rfkill_cfg);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ab,
- "failed to set rfkill config 0x%x: %d\n",
- param, ret);
- return ret;
- }
-
- return 0;
-}
-
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable)
-{
- enum wmi_rfkill_enable_radio param;
- int ret;
-
- if (enable)
- param = WMI_RFKILL_ENABLE_RADIO_ON;
- else
- param = WMI_RFKILL_ENABLE_RADIO_OFF;
-
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d",
- ar->pdev_idx, param);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
- param, ret);
- return ret;
- }
+ queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -5713,6 +5718,27 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
return ret;
}
+static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH11K_RECONFIGURE_TIMEOUT_HZ);
+}
+
static int ath11k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
@@ -5729,6 +5755,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
break;
case ATH11K_STATE_RESTARTING:
ar->state = ATH11K_STATE_RESTARTED;
+ ath11k_mac_wait_reconfigure(ab);
break;
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
@@ -5795,7 +5822,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* TODO: Do we need to enable ANI? */
- ath11k_reg_update_chan_list(ar);
+ ath11k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
@@ -5860,7 +5887,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
- cancel_work_sync(&ar->ab->rfkill_work);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
@@ -6032,7 +6063,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
return false;
}
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
{
struct wmi_11d_scan_start_params param;
int ret;
@@ -6060,28 +6091,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
- if (wait)
- reinit_completion(&ar->finish_11d_scan);
-
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = vdev_id;
- if (wait) {
- ar->pending_11d = true;
- ret = wait_for_completion_timeout(&ar->finish_11d_scan,
- 5 * HZ);
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac 11d scan left time %d\n", ret);
-
- if (!ret)
- ar->pending_11d = false;
- }
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ar->state_11d = ATH11K_11D_RUNNING;
}
fin:
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6100,16 +6125,24 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n",
ar->vdev_id_11d_scan);
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
- if (ret)
+ if (ret) {
ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret);
- else
+ } else {
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
@@ -6162,6 +6195,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ /* In the case of hardware recovery, debugfs files are
+ * not deleted since ieee80211_ops.remove_interface() is
+ * not invoked. In such cases, try to delete the files.
+ * These will be re-created later.
+ */
+ ath11k_debugfs_remove_interface(arvif);
+
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@@ -6305,8 +6345,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
-
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH11K_11D_PREPARING;
+ }
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@@ -6341,28 +6383,20 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
+ ath11k_debugfs_add_interface(arvif);
+
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- reinit_completion(&ar->peer_delete_done);
-
- fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
- arvif->vdev_id);
+ fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (fbret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- arvif->vdev_id, vif->addr);
+ ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
+ vif->addr, arvif->vdev_id, fbret);
goto err;
}
-
- fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
- vif->addr);
- if (fbret)
- goto err;
-
- ar->num_peers--;
}
err_vdev_del:
@@ -6375,6 +6409,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
+ ath11k_debugfs_remove_interface(arvif);
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -6473,6 +6508,8 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
+ ath11k_debugfs_remove_interface(arvif);
+
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@@ -6610,12 +6647,13 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef,
+ struct ieee80211_chanctx_conf *ctx,
bool restart)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
+ const struct cfg80211_chan_def *chandef = &ctx->def;
int he_support = arvif->vif->bss_conf.he_support;
int ret = 0;
@@ -6650,8 +6688,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- arg.channel.freq2_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ arg.channel.freq2_radar = ctx->radar_enabled;
arg.channel.passive = arg.channel.chan_radar;
@@ -6761,15 +6798,15 @@ err:
}
static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, false);
}
static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, true);
}
struct ath11k_mac_change_chanctx_arg {
@@ -6785,7 +6822,7 @@ ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath11k_mac_change_chanctx_arg *arg = data;
- if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
@@ -6798,7 +6835,7 @@ ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ath11k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
- ctx = rcu_access_pointer(vif->chanctx_conf);
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -6836,13 +6873,33 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
+ /* change_chanctx can be called even before vdev_up from
+ * ieee80211_start_ap->ieee80211_vif_use_channel->
+ * ieee80211_recalc_radar_chanctx.
+ *
+ * Firmware expect vdev_restart only if vdev is up.
+ * If vdev is down then it expect vdev_stop->vdev_start.
+ */
+ if (arvif->is_up) {
+ ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+ if (ret)
+ ath11k_warn(ab, "failed to start vdev %d: %d\n",
+ arvif->vdev_id, ret);
- ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
- if (ret) {
- ath11k_warn(ab, "failed to restart vdev %d: %d\n",
- arvif->vdev_id, ret);
continue;
}
@@ -6927,7 +6984,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
- if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR)
ath11k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6947,7 +7005,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
if (WARN_ON(arvif->is_started))
return -EBUSY;
- ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -6955,6 +7013,19 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return ret;
}
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
@@ -6972,6 +7043,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
@@ -7028,7 +7100,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ ret = ath11k_mac_vdev_start(arvif, ctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -7061,11 +7133,13 @@ out:
static void
ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
int ret;
mutex_lock(&ar->conf_mutex);
@@ -7077,9 +7151,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath11k_peer_find_by_addr(ab, ar->mac_addr))
- ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_stop(ar);
@@ -7130,7 +7208,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
}
@@ -7204,31 +7282,47 @@ static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
{
- struct ath11k *ar = hw->priv;
long time_left;
-
- if (drop)
- return;
+ int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH11K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH11K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
- time_left);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "mac mgmt tx flush mgmt pending %d\n",
- atomic_read(&ar->num_pending_mgmt_tx));
+ return ret;
+}
+
+int ath11k_mac_wait_tx_complete(struct ath11k *ar)
+{
+ ath11k_mac_drain_tx(ar);
+ return ath11k_mac_flush_tx_complete(ar);
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (drop)
+ return;
+
+ ath11k_mac_flush_tx_complete(ar);
}
static int
@@ -7629,6 +7723,7 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b
bool he_fixed_rate = false, vht_fixed_rate = false;
struct ath11k_peer *peer, *tmp;
const u16 *vht_mcs_mask, *he_mcs_mask;
+ struct ieee80211_link_sta *deflink;
u8 vht_nss, he_nss;
bool ret = true;
@@ -7651,13 +7746,16 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b
spin_lock_bh(&ar->ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
if (peer->sta) {
- if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported ||
- peer->sta->rx_nss < vht_nss)) {
+ deflink = &peer->sta->deflink;
+
+ if (vht_fixed_rate && (!deflink->vht_cap.vht_supported ||
+ deflink->rx_nss < vht_nss)) {
ret = false;
goto out;
}
- if (he_fixed_rate && (!peer->sta->he_cap.has_he ||
- peer->sta->rx_nss < he_nss)) {
+
+ if (he_fixed_rate && (!deflink->he_cap.has_he ||
+ deflink->rx_nss < he_nss)) {
ret = false;
goto out;
}
@@ -7677,6 +7775,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
+ struct ath11k_pdev_cap *cap;
struct ath11k *ar = arvif->ar;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -7697,10 +7796,11 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
return -EPERM;
band = def.chan->band;
+ cap = &ar->pdev->cap;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
he_mcs_mask = mask->control[band].he_mcs;
- ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+ ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
@@ -7710,7 +7810,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
he_ltf = mask->control[band].he_ltf;
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
- * requires passing atleast one of used basic rates along with them.
+ * requires passing at least one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
* not supported by the FW. Hence use of FIXED_RATE vdev param is not
* suitable for setting single HT/VHT rates.
@@ -7827,6 +7927,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
@@ -7838,6 +7940,30 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ar->pdev->pdev_id);
ar->state = ATH11K_STATE_ON;
ieee80211_wake_queues(ar->hw);
+
+ if (ar->ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ }
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "recovery count %d\n", recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
+ }
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -8013,6 +8139,402 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
+ ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_arp_ns_offload *offload;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct list_head *p;
+ u32 count, scope;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac op ipv6 changed\n");
+
+ offload = &arvif->arp_ns_offload;
+ count = 0;
+
+ read_lock_bh(&idev->lock);
+
+ memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
+ memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
+ memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
+
+ /* get unicast address */
+ list_for_each(p, &idev->addr_list) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
+ }
+ }
+
+ /* get anycast address */
+ for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
+ }
+ }
+
+generate:
+ offload->ipv6_count = count;
+ read_unlock_bh(&idev->lock);
+
+ /* generate ns multicast address */
+ ath11k_generate_ns_mc_addr(ar, offload);
+}
+#endif
+
+static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct ath11k *ar = hw->priv;
+ const struct cfg80211_sar_sub_specs *sspec;
+ int ret, index;
+ u8 *sar_tbl;
+ u32 i;
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
+ !ar->ab->hw_params.bios_sar_capa) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
+ goto exit;
+ }
+
+ sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL);
+ if (!sar_tbl) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ sspec = sar->sub_specs;
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
+ ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
+ sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1);
+ continue;
+ }
+
+ /* chain0 and chain1 share same power setting */
+ sar_tbl[sspec->freq_range_index] = sspec->power;
+ index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1);
+ sar_tbl[index] = sspec->power;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n",
+ sspec->freq_range_index, sar_tbl[sspec->freq_range_index]);
+ sspec++;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sar power: %d", ret);
+
+ kfree(sar_tbl);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct scan_req_params arg;
+ int ret;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration = duration;
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param = {0};
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ mutex_unlock(&ar->conf_mutex);
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
}
static const struct ieee80211_ops ath11k_ops = {
@@ -8029,6 +8551,7 @@ static const struct ieee80211_ops ath11k_ops = {
.hw_scan = ath11k_mac_op_hw_scan,
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
+ .set_rekey_data = ath11k_mac_op_set_rekey_data,
.sta_state = ath11k_mac_op_sta_state,
.sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
@@ -8050,9 +8573,25 @@ static const struct ieee80211_ops ath11k_ops = {
.flush = ath11k_mac_op_flush,
.sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath11k_wow_op_suspend,
+ .resume = ath11k_wow_op_resume,
+ .set_wakeup = ath11k_wow_op_set_wakeup,
+#endif
+
#ifdef CONFIG_ATH11K_DEBUGFS
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = ath11k_mac_op_ipv6_changed,
+#endif
+ .get_txpower = ath11k_mac_op_get_txpower,
+
+ .set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
+ .remain_on_channel = ath11k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel,
};
static void ath11k_mac_update_ch_list(struct ath11k *ar,
@@ -8305,6 +8844,8 @@ void ath11k_mac_unregister(struct ath11k_base *ab)
__ath11k_mac_unregister(ar);
}
+
+ ath11k_peer_rhash_tbl_destroy(ab);
}
static int __ath11k_mac_register(struct ath11k *ar)
@@ -8353,6 +8894,11 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ if (ab->hw_params.supports_multi_bssid) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@@ -8419,19 +8965,40 @@ static int __ath11k_mac_register(struct ath11k *ar)
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath11k_wow_init(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
- ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
- if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD,
+ ar->ab->wmi_ab.svc_map)) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_BSS_COLOR);
+ ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION);
+ }
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -8455,6 +9022,10 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ab->hw_params.bios_sar_capa)
+ ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
@@ -8476,6 +9047,17 @@ static int __ath11k_mac_register(struct ath11k *ar)
goto err_unregister_hw;
}
+ if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n", ret);
+ }
+
ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
@@ -8507,6 +9089,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i;
int ret;
+ u8 mac_addr[ETH_ALEN] = {0};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
@@ -8515,13 +9098,22 @@ int ath11k_mac_register(struct ath11k_base *ab)
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+ ret = ath11k_peer_rhash_tbl_init(ab);
+ if (ret)
+ return ret;
+
+ device_get_mac_address(ab->dev, mac_addr);
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ab->pdevs_macaddr_valid) {
ether_addr_copy(ar->mac_addr, pdev->mac_addr);
} else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ if (is_zero_ether_addr(mac_addr))
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, mac_addr);
ar->mac_addr[4] += i;
}
@@ -8544,6 +9136,8 @@ err_cleanup:
__ath11k_mac_unregister(ar);
}
+ ath11k_peer_rhash_tbl_destroy(ab);
+
return ret;
}
@@ -8598,6 +9192,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
@@ -8611,8 +9206,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
- init_completion(&ar->finish_11d_scan);
- init_completion(&ar->finish_11d_ch_list);
+ init_completion(&ar->completed_11d_scan);
+
+ ath11k_fw_stats_init(ar);
}
return 0;
@@ -8639,3 +9235,34 @@ void ath11k_mac_destroy(struct ath11k_base *ab)
pdev->ar = NULL;
}
}
+
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath11k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0e6c870b09c8..2a0d3afb0c99 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath11k;
struct ath11k_base;
@@ -130,7 +131,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
@@ -147,8 +148,6 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
-int ath11k_mac_rfkill_config(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
@@ -172,4 +171,8 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
+int ath11k_mac_wait_tx_complete(struct ath11k *ar);
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index e4250ba8dfee..86995e8dc913 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
-/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#include <linux/msi.h>
#include <linux/pci.h>
@@ -11,8 +14,10 @@
#include "debug.h"
#include "mhi.h"
#include "pci.h"
+#include "pcic.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@@ -204,7 +209,7 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
{
u32 val;
- val = ath11k_pci_read32(ab, MHISTATUS);
+ val = ath11k_pcic_read32(ab, MHISTATUS);
ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
@@ -212,29 +217,29 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
* has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR.
*/
- ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+ ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10);
}
static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_TXVECDB, 0);
+ ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
}
static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+ ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
}
static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_RXVECDB, 0);
+ ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
}
static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+ ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
}
void ath11k_mhi_clear_vector(struct ath11k_base *ab)
@@ -253,9 +258,8 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
int *irq;
unsigned int msi_data;
- ret = ath11k_pci_get_user_msi_assignment(ab_pci,
- "MHI", &num_vectors,
- &user_base_data, &base_vector);
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
+ &user_base_data, &base_vector);
if (ret)
return ret;
@@ -269,11 +273,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
for (i = 0; i < num_vectors; i++) {
msi_data = base_vector;
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
msi_data += i;
- irq[i] = ath11k_pci_get_msi_irq(ab->dev,
- msi_data);
+ irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
}
ab_pci->mhi_ctrl->irq = irq;
@@ -291,15 +294,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
+static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+};
+
static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "mhi notify status reason %s\n",
+ ath11k_mhi_op_callback_to_str(cb));
+
switch (cb) {
case MHI_CB_SYS_ERROR:
ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
+ case MHI_CB_EE_RDDM:
+ if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ break;
default:
break;
}
@@ -332,6 +368,7 @@ static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (ret)
return ret;
@@ -365,22 +402,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ret = ath11k_mhi_get_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to get msi for mhi\n");
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
- return ret;
+ goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
+ mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@@ -402,18 +439,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
default:
ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
ab->hw_rev);
- mhi_free_controller(mhi_ctrl);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_controller;
}
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
return 0;
+
+free_controller:
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+ return ret;
}
void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
@@ -425,216 +466,62 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
mhi_free_controller(mhi_ctrl);
}
-static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state)
-{
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- return "INIT";
- case ATH11K_MHI_DEINIT:
- return "DEINIT";
- case ATH11K_MHI_POWER_ON:
- return "POWER_ON";
- case ATH11K_MHI_POWER_OFF:
- return "POWER_OFF";
- case ATH11K_MHI_FORCE_POWER_OFF:
- return "FORCE_POWER_OFF";
- case ATH11K_MHI_SUSPEND:
- return "SUSPEND";
- case ATH11K_MHI_RESUME:
- return "RESUME";
- case ATH11K_MHI_TRIGGER_RDDM:
- return "TRIGGER_RDDM";
- case ATH11K_MHI_RDDM_DONE:
- return "RDDM_DONE";
- default:
- return "UNKNOWN";
- }
-};
-
-static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
+ int ret;
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_DEINIT:
- clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_POWER_ON:
- set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_POWER_OFF:
- case ATH11K_MHI_FORCE_POWER_OFF:
- clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
- clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
- clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_SUSPEND:
- set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_RESUME:
- clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
- break;
- case ATH11K_MHI_RDDM_DONE:
- set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
- break;
- default:
- ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
- }
-}
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
-static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
-{
- struct ath11k_base *ab = ab_pci->ab;
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to prepare mhi: %d", ret);
+ return ret;
+ }
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_DEINIT:
- case ATH11K_MHI_POWER_ON:
- if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_FORCE_POWER_OFF:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_POWER_OFF:
- case ATH11K_MHI_SUSPEND:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_RESUME:
- if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
- !test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
- return 0;
- break;
- case ATH11K_MHI_RDDM_DONE:
- return 0;
- default:
- ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to power up mhi: %d", ret);
+ return ret;
}
- ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state,
- ab_pci->mhi_state);
+ return 0;
+}
- return -EINVAL;
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+{
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
-static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
- enum ath11k_mhi_state mhi_state)
+int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
- ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state);
- if (ret)
- goto out;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
-
- switch (mhi_state) {
- case ATH11K_MHI_INIT:
- ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_DEINIT:
- mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
- ret = 0;
- break;
- case ATH11K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_POWER_OFF:
- mhi_power_down(ab_pci->mhi_ctrl, true);
- ret = 0;
- break;
- case ATH11K_MHI_FORCE_POWER_OFF:
- mhi_power_down(ab_pci->mhi_ctrl, false);
- ret = 0;
- break;
- case ATH11K_MHI_SUSPEND:
- ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_RESUME:
- /* Do force MHI resume as some devices like QCA6390, WCN6855
- * are not in M3 state but they are functional. So just ignore
- * the MHI state while resuming.
- */
- ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_TRIGGER_RDDM:
- ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
- break;
- case ATH11K_MHI_RDDM_DONE:
- break;
- default:
- ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
- ret = -EINVAL;
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend mhi: %d", ret);
+ return ret;
}
- if (ret)
- goto out;
-
- ath11k_mhi_set_state_bit(ab_pci, mhi_state);
-
return 0;
-
-out:
- ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
- ath11k_mhi_state_to_str(mhi_state), mhi_state);
- return ret;
}
-int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{
+ struct ath11k_base *ab = ab_pci->ab;
int ret;
- ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
-
- ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT);
- if (ret)
- goto out;
-
- ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON);
- if (ret)
- goto out;
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume mhi: %d", ret);
+ return ret;
+ }
return 0;
-
-out:
- return ret;
-}
-
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
-}
-
-void ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND);
-}
-
-void ath11k_mhi_resume(struct ath11k_pci *ab_pci)
-{
- ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 488dada5d31c..8d9f852da695 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -16,19 +16,6 @@
#define MHICTRL 0x38
#define MHICTRL_RESET_MASK 0x2
-enum ath11k_mhi_state {
- ATH11K_MHI_INIT,
- ATH11K_MHI_DEINIT,
- ATH11K_MHI_POWER_ON,
- ATH11K_MHI_POWER_OFF,
- ATH11K_MHI_FORCE_POWER_OFF,
- ATH11K_MHI_SUSPEND,
- ATH11K_MHI_RESUME,
- ATH11K_MHI_TRIGGER_RDDM,
- ATH11K_MHI_RDDM,
- ATH11K_MHI_RDDM_DONE,
-};
-
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
@@ -36,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
void ath11k_mhi_clear_vector(struct ath11k_base *ab);
-void ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
-void ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
+int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index d73b522a0081..99cf3357c66e 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -13,29 +14,15 @@
#include "hif.h"
#include "mhi.h"
#include "debug.h"
+#include "pcic.h"
#define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32
-#define ATH11K_PCI_IRQ_CE0_OFFSET 3
-#define ATH11K_PCI_IRQ_DP_OFFSET 14
-
-#define WINDOW_ENABLE_BIT 0x40000000
-#define WINDOW_REG_ADDRESS 0x310c
-#define WINDOW_VALUE_MASK GENMASK(24, 19)
-#define WINDOW_START 0x80000
-#define WINDOW_RANGE_MASK GENMASK(18, 0)
-
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
-/* BAR0 + 4k is always accessible, and no
- * need to force wakeup.
- * 4K - 32 = 0xFE0
- */
-#define ACCESS_ALWAYS_OFF 0xFE0
-
#define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
@@ -49,233 +36,150 @@ static const struct pci_device_id ath11k_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
-static const struct ath11k_bus_params ath11k_pci_bus_params = {
- .mhi_support = true,
- .m3_fw_support = true,
- .fixed_bdf_addr = false,
- .fixed_mem_region = false,
-};
+static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-static const struct ath11k_msi_config ath11k_msi_config[] = {
- {
- .total_vectors = 32,
- .total_users = 4,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 10, .base_vector = 3 },
- { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
- { .name = "DP", .num_vectors = 18, .base_vector = 14 },
- },
- },
- {
- .total_vectors = 16,
- .total_users = 3,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 5, .base_vector = 3 },
- { .name = "DP", .num_vectors = 8, .base_vector = 8 },
- },
- },
-};
+ return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+}
-static const struct ath11k_msi_config msi_config_one_msi = {
- .total_vectors = 1,
- .total_users = 4,
- .users = (struct ath11k_msi_user[]) {
- { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
- { .name = "CE", .num_vectors = 1, .base_vector = 0 },
- { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
- { .name = "DP", .num_vectors = 1, .base_vector = 0 },
- },
-};
+static void ath11k_pci_bus_release(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
- "bhi",
- "mhi-er0",
- "mhi-er1",
- "ce0",
- "ce1",
- "ce2",
- "ce3",
- "ce4",
- "ce5",
- "ce6",
- "ce7",
- "ce8",
- "ce9",
- "ce10",
- "ce11",
- "host2wbm-desc-feed",
- "host2reo-re-injection",
- "host2reo-command",
- "host2rxdma-monitor-ring3",
- "host2rxdma-monitor-ring2",
- "host2rxdma-monitor-ring1",
- "reo2ost-exception",
- "wbm2host-rx-release",
- "reo2host-status",
- "reo2host-destination-ring4",
- "reo2host-destination-ring3",
- "reo2host-destination-ring2",
- "reo2host-destination-ring1",
- "rxdma2host-monitor-destination-mac3",
- "rxdma2host-monitor-destination-mac2",
- "rxdma2host-monitor-destination-mac1",
- "ppdu-end-interrupts-mac3",
- "ppdu-end-interrupts-mac2",
- "ppdu-end-interrupts-mac1",
- "rxdma2host-monitor-status-ring-mac3",
- "rxdma2host-monitor-status-ring-mac2",
- "rxdma2host-monitor-status-ring-mac1",
- "host2rxdma-host-buf-ring-mac3",
- "host2rxdma-host-buf-ring-mac2",
- "host2rxdma-host-buf-ring-mac1",
- "rxdma2host-destination-ring-mac3",
- "rxdma2host-destination-ring-mac2",
- "rxdma2host-destination-ring-mac1",
- "host2tcl-input-ring4",
- "host2tcl-input-ring3",
- "host2tcl-input-ring2",
- "host2tcl-input-ring1",
- "wbm2host-tx-completions-ring3",
- "wbm2host-tx-completions-ring2",
- "wbm2host-tx-completions-ring1",
- "tcl2host-status-ring",
-};
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+ if (!ab->hw_params.static_window_map)
+ return ATH11K_PCI_WINDOW_START;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within DP register range, use 3rd window */
+ return 3 * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within CE register range, use 2nd window */
+ return 2 * ATH11K_PCI_WINDOW_START;
+ else
+ return ATH11K_PCI_WINDOW_START;
+}
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{
struct ath11k_base *ab = ab_pci->ab;
- u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
+ u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
lockdep_assert_held(&ab_pci->window_lock);
if (window != ab_pci->register_window) {
- iowrite32(WINDOW_ENABLE_BIT | window,
- ab->mem + WINDOW_REG_ADDRESS);
- ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ab_pci->register_window = window;
}
}
-static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
-{
- u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
- u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
- u32 window;
-
- window = (umac_window << 12) | (ce_window << 6);
-
- iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
-}
-
-static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab,
- u32 offset)
+static void
+ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 window_start;
- /* If offset lies within DP register range, use 3rd window */
- if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
- window_start = 3 * WINDOW_START;
- /* If offset lies within CE register range, use 2nd window */
- else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
- window_start = 2 * WINDOW_START;
- else
- window_start = WINDOW_START;
+ window_start = ath11k_pci_get_window_start(ab, offset);
- return window_start;
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
}
-void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
+static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start;
+ u32 window_start, val;
- /* for offset beyond BAR + 4K - 32, may
- * need to wakeup MHI to access.
- */
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+ window_start = ath11k_pci_get_window_start(ab, offset);
- if (offset < WINDOW_START) {
- iowrite32(value, ab->mem + offset);
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
} else {
- if (ab->bus_params.static_window_map)
- window_start = ath11k_pci_get_window_start(ab, offset);
- else
- window_start = WINDOW_START;
-
- if (window_start == WINDOW_START) {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
- } else {
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- }
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+ return val;
}
-u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 val, window_start;
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
- /* for offset beyond BAR + 4K - 32, may
- * need to wakeup MHI to access.
- */
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+ return pci_irq_vector(pci_dev, vector);
+}
- if (offset < WINDOW_START) {
- val = ioread32(ab->mem + offset);
- } else {
- if (ab->bus_params.static_window_map)
- window_start = ath11k_pci_get_window_start(ab, offset);
- else
- window_start = WINDOW_START;
-
- if (window_start == WINDOW_START) {
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
- } else {
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
- }
- }
+static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
+ .wakeup = ath11k_pci_bus_wake_up,
+ .release = ath11k_pci_bus_release,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
- if (ab->hw_params.wakeup_mhi &&
- test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
- offset >= ACCESS_ALWAYS_OFF)
- mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
- return val;
+static const struct ath11k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
+
+static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+{
+ u32 umac_window;
+ u32 ce_window;
+ u32 window;
+
+ umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+ ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+ window = (umac_window << 12) | (ce_window << 6);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
}
static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
{
u32 val, delay;
- val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
val |= PCIE_SOC_GLOBAL_RESET_V;
- ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
/* TODO: exact time to sleep is uncertain */
delay = 10;
@@ -284,11 +188,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
/* Need to toggle V bit back otherwise stuck in reset status */
val &= ~PCIE_SOC_GLOBAL_RESET_V;
- ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
mdelay(delay);
- val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff)
ath11k_warn(ab, "link down error during global reset\n");
}
@@ -298,10 +202,10 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
u32 val;
/* read cookie */
- val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
- val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */
@@ -310,16 +214,16 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
* continuing warm path and entering dead loop.
*/
- ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10);
- val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* A read clear register. clear the register to prevent
* Q6 from entering wrong code path.
*/
- val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
}
@@ -329,14 +233,14 @@ static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
u32 v;
int i;
- v = ath11k_pci_read32(ab, offset);
+ v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
for (i = 0; i < 10; i++) {
- ath11k_pci_write32(ab, offset, (v & ~mask) | value);
+ ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
- v = ath11k_pci_read32(ab, offset);
+ v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
@@ -397,23 +301,23 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
u32 val;
int i;
- val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/
for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
if (val == 0xffffffff)
mdelay(5);
- ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
- val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
}
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
- val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL;
- ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
- val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
@@ -427,21 +331,21 @@ static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
* So when download SBL again, SBL will open Interrupt and
* receive it, and crash immediately.
*/
- ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+ ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
}
static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
{
u32 val;
- val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
- ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+ ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
}
static void ath11k_pci_force_wake(struct ath11k_base *ab)
{
- ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
mdelay(5);
}
@@ -463,463 +367,6 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
ath11k_mhi_set_mhictrl_reset(ab);
}
-int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
-
- return pci_irq_vector(pci_dev, vector);
-}
-
-static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
- u32 *msi_addr_hi)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- struct pci_dev *pci_dev = to_pci_dev(ab->dev);
-
- pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
- msi_addr_lo);
-
- if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
- pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
- msi_addr_hi);
- } else {
- *msi_addr_hi = 0;
- }
-}
-
-int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector)
-{
- struct ath11k_base *ab = ab_pci->ab;
- const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
- int idx;
-
- for (idx = 0; idx < msi_config->total_users; idx++) {
- if (strcmp(user_name, msi_config->users[idx].name) == 0) {
- *num_vectors = msi_config->users[idx].num_vectors;
- *base_vector = msi_config->users[idx].base_vector;
- *user_base_data = *base_vector + ab_pci->msi_ep_base_data;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI,
- "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
- user_name, *num_vectors, *user_base_data,
- *base_vector);
-
- return 0;
- }
- }
-
- ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
-
- return -EINVAL;
-}
-
-static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
- u32 *msi_idx)
-{
- u32 i, msi_data_idx;
-
- for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- if (ce_id == i)
- break;
-
- msi_data_idx++;
- }
- *msi_idx = msi_data_idx;
-}
-
-static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
-
- return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
- num_vectors, user_base_data,
- base_vector);
-}
-
-static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
-{
- int i, j;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- for (j = 0; j < irq_grp->num_irq; j++)
- free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
-
- netif_napi_del(&irq_grp->napi);
- }
-}
-
-static void ath11k_pci_free_irq(struct ath11k_base *ab)
-{
- int i, irq_idx;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
- free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
- }
-
- ath11k_pci_free_ext_irq(ab);
-}
-
-static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 irq_idx;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
- enable_irq(ab->irq_num[irq_idx]);
-}
-
-static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 irq_idx;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
- disable_irq_nosync(ab->irq_num[irq_idx]);
-}
-
-static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
-{
- int i;
-
- clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- ath11k_pci_ce_irq_disable(ab, i);
- }
-}
-
-static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
-{
- int i;
- int irq_idx;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
- synchronize_irq(ab->irq_num[irq_idx]);
- }
-}
-
-static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
-{
- struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
- int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
-
- ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
-
- enable_irq(ce_pipe->ab->irq_num[irq_idx]);
-}
-
-static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
-{
- struct ath11k_ce_pipe *ce_pipe = arg;
- struct ath11k_base *ab = ce_pipe->ab;
- int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
-
- if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
- return IRQ_HANDLED;
-
- /* last interrupt received for this CE */
- ce_pipe->timestamp = jiffies;
-
- disable_irq_nosync(ab->irq_num[irq_idx]);
-
- tasklet_schedule(&ce_pipe->intr_tq);
-
- return IRQ_HANDLED;
-}
-
-static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
- int i;
-
- /* In case of one MSI vector, we handle irq enable/disable
- * in a uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-}
-
-static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
-{
- int i;
-
- clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
-
- ath11k_pci_ext_grp_disable(irq_grp);
-
- if (irq_grp->napi_enabled) {
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
- irq_grp->napi_enabled = false;
- }
- }
-}
-
-static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
- int i;
-
- /* In case of one MSI vector, we handle irq enable/disable in a
- * uniform way since we only have one irq
- */
- if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-}
-
-static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
-{
- int i;
-
- set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- if (!irq_grp->napi_enabled) {
- napi_enable(&irq_grp->napi);
- irq_grp->napi_enabled = true;
- }
- ath11k_pci_ext_grp_enable(irq_grp);
- }
-}
-
-static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
-{
- int i, j, irq_idx;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
- for (j = 0; j < irq_grp->num_irq; j++) {
- irq_idx = irq_grp->irqs[j];
- synchronize_irq(ab->irq_num[irq_idx]);
- }
- }
-}
-
-static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
-{
- __ath11k_pci_ext_irq_disable(ab);
- ath11k_pci_sync_ext_irqs(ab);
-}
-
-static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
-{
- struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
- struct ath11k_ext_irq_grp,
- napi);
- struct ath11k_base *ab = irq_grp->ab;
- int work_done;
- int i;
-
- work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- for (i = 0; i < irq_grp->num_irq; i++)
- enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
- }
-
- if (work_done > budget)
- work_done = budget;
-
- return work_done;
-}
-
-static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
-{
- struct ath11k_ext_irq_grp *irq_grp = arg;
- struct ath11k_base *ab = irq_grp->ab;
- int i;
-
- if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
- return IRQ_HANDLED;
-
- ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
-
- /* last interrupt received for this group */
- irq_grp->timestamp = jiffies;
-
- for (i = 0; i < irq_grp->num_irq; i++)
- disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-
- napi_schedule(&irq_grp->napi);
-
- return IRQ_HANDLED;
-}
-
-static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
- u32 user_base_data = 0, base_vector = 0;
-
- ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
- &num_vectors,
- &user_base_data,
- &base_vector);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- u32 num_irq = 0;
-
- irq_grp->ab = ab;
- irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
-
- if (ab->hw_params.ring_mask->tx[i] ||
- ab->hw_params.ring_mask->rx[i] ||
- ab->hw_params.ring_mask->rx_err[i] ||
- ab->hw_params.ring_mask->rx_wbm_rel[i] ||
- ab->hw_params.ring_mask->reo_status[i] ||
- ab->hw_params.ring_mask->rxdma2host[i] ||
- ab->hw_params.ring_mask->host2rxdma[i] ||
- ab->hw_params.ring_mask->rx_mon_status[i]) {
- num_irq = 1;
- }
-
- irq_grp->num_irq = num_irq;
- irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
-
- for (j = 0; j < irq_grp->num_irq; j++) {
- int irq_idx = irq_grp->irqs[j];
- int vector = (i % num_vectors) + base_vector;
- int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
-
- ab->irq_num[irq_idx] = irq;
-
- ath11k_dbg(ab, ATH11K_DBG_PCI,
- "irq:%d group:%d\n", irq, i);
-
- irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
- ab_pci->irq_flags,
- "DP_EXT_IRQ", irq_grp);
- if (ret) {
- ath11k_err(ab, "failed request irq %d: %d\n",
- vector, ret);
- return ret;
- }
- }
- ath11k_pci_ext_grp_disable(irq_grp);
- }
-
- return 0;
-}
-
-static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
- const struct cpumask *m)
-{
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
- return 0;
-
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
-}
-
-static int ath11k_pci_config_irq(struct ath11k_base *ab)
-{
- struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- struct ath11k_ce_pipe *ce_pipe;
- u32 msi_data_start;
- u32 msi_data_count, msi_data_idx;
- u32 msi_irq_start;
- unsigned int msi_data;
- int irq, i, ret, irq_idx;
-
- ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
- "CE", &msi_data_count,
- &msi_data_start, &msi_irq_start);
- if (ret)
- return ret;
-
- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
- if (ret) {
- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
- return ret;
- }
-
- /* Configure CE irqs */
- for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
- irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
- ce_pipe = &ab->ce.ce_pipe[i];
-
- irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
-
- tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
-
- ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
- ab_pci->irq_flags, irq_name[irq_idx],
- ce_pipe);
- if (ret) {
- ath11k_err(ab, "failed to request irq %d: %d\n",
- irq_idx, ret);
- goto err_irq_affinity_cleanup;
- }
-
- ab->irq_num[irq_idx] = irq;
- msi_data_idx++;
-
- ath11k_pci_ce_irq_disable(ab, i);
- }
-
- ret = ath11k_pci_ext_irq_config(ab);
- if (ret)
- goto err_irq_affinity_cleanup;
-
- return 0;
-
-err_irq_affinity_cleanup:
- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- return ret;
-}
-
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
@@ -935,19 +382,6 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
&cfg->shadow_reg_v2_len);
}
-static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
-{
- int i;
-
- set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
- ath11k_pci_ce_irq_enable(ab, i);
- }
-}
-
static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
{
struct pci_dev *dev = ab_pci->pdev;
@@ -976,18 +410,18 @@ static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
- const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ struct pci_dev *pci_dev = ab_pci->pdev;
struct msi_desc *msi_desc;
int num_vectors;
int ret;
- num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ num_vectors = pci_alloc_irq_vectors(pci_dev,
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
if (num_vectors == msi_config->total_vectors) {
- set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
- ab_pci->irq_flags = IRQF_SHARED;
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
} else {
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
1,
@@ -997,9 +431,8 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
ret = -EINVAL;
goto reset_msi_config;
}
- clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
- ab_pci->msi_config = &msi_config_one_msi;
- ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+ clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ ab->pci.msi.config = &msi_config_one_msi;
ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n");
}
ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
@@ -1013,11 +446,19 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
goto free_msi_vector;
}
- ab_pci->msi_ep_base_data = msi_desc->msg.data;
- if (msi_desc->msi_attrib.is_64)
- set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
+ ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ &ab->pci.msi.addr_lo);
+
+ if (msi_desc->pci.msi_attrib.is_64) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ &ab->pci.msi.addr_hi);
+ } else {
+ ab->pci.msi.addr_hi = 0;
+ }
- ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
return 0;
@@ -1044,10 +485,10 @@ static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
return -EINVAL;
}
- ab_pci->msi_ep_base_data = msi_desc->msg.data;
+ ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
- ab_pci->msi_ep_base_data);
+ ab_pci->ab->pci.msi.ep_base_data);
return 0;
}
@@ -1160,7 +601,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
int ret;
ab_pci->register_window = 0;
- clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
/* Disable ASPM during firmware download due to problems switching
@@ -1176,7 +617,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return ret;
}
- if (ab->bus_params.static_window_map)
+ if (ab->hw_params.static_window_map)
ath11k_pci_select_static_window(ab_pci);
return 0;
@@ -1194,7 +635,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
ath11k_mhi_stop(ab_pci);
- clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -1202,144 +643,68 @@ static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
- ath11k_mhi_suspend(ar_pci);
-
- return 0;
+ return ath11k_mhi_suspend(ar_pci);
}
static int ath11k_pci_hif_resume(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
- ath11k_mhi_resume(ar_pci);
-
- return 0;
-}
-
-static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
-{
- int i;
-
- for (i = 0; i < ab->hw_params.ce_count; i++) {
- struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
-
- if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
- continue;
-
- tasklet_kill(&ce_pipe->intr_tq);
- }
+ return ath11k_mhi_resume(ar_pci);
}
-static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab)
+static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
{
- ath11k_pci_ce_irqs_disable(ab);
- ath11k_pci_sync_ce_irqs(ab);
- ath11k_pci_kill_tasklets(ab);
+ ath11k_pcic_ce_irqs_enable(ab);
}
-static void ath11k_pci_stop(struct ath11k_base *ab)
+static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
{
- ath11k_pci_ce_irq_disable_sync(ab);
- ath11k_ce_cleanup_pipes(ab);
+ ath11k_pcic_ce_irq_disable_sync(ab);
}
static int ath11k_pci_start(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
-
/* TODO: for now don't restore ASPM in case of single MSI
* vector as MHI register reading in M2 causes system hang.
*/
- if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
ath11k_pci_aspm_restore(ab_pci);
else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
- ath11k_pci_ce_irqs_enable(ab);
- ath11k_ce_rx_post_buf(ab);
-
- return 0;
-}
-
-static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
-{
- ath11k_pci_ce_irqs_enable(ab);
-}
-
-static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
-{
- ath11k_pci_ce_irq_disable_sync(ab);
-}
-
-static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe)
-{
- const struct service_to_pipe *entry;
- bool ul_set = false, dl_set = false;
- int i;
-
- for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
- entry = &ab->hw_params.svc_to_ce_map[i];
-
- if (__le32_to_cpu(entry->service_id) != service_id)
- continue;
-
- switch (__le32_to_cpu(entry->pipedir)) {
- case PIPEDIR_NONE:
- break;
- case PIPEDIR_IN:
- WARN_ON(dl_set);
- *dl_pipe = __le32_to_cpu(entry->pipenum);
- dl_set = true;
- break;
- case PIPEDIR_OUT:
- WARN_ON(ul_set);
- *ul_pipe = __le32_to_cpu(entry->pipenum);
- ul_set = true;
- break;
- case PIPEDIR_INOUT:
- WARN_ON(dl_set);
- WARN_ON(ul_set);
- *dl_pipe = __le32_to_cpu(entry->pipenum);
- *ul_pipe = __le32_to_cpu(entry->pipenum);
- dl_set = true;
- ul_set = true;
- break;
- }
- }
-
- if (WARN_ON(!ul_set || !dl_set))
- return -ENOENT;
+ ath11k_pcic_start(ab);
return 0;
}
static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.start = ath11k_pci_start,
- .stop = ath11k_pci_stop,
- .read32 = ath11k_pci_read32,
- .write32 = ath11k_pci_write32,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = ath11k_pcic_read,
.power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend,
.resume = ath11k_pci_hif_resume,
- .irq_enable = ath11k_pci_ext_irq_enable,
- .irq_disable = ath11k_pci_ext_irq_disable,
- .get_msi_address = ath11k_pci_get_msi_address,
- .get_user_msi_vector = ath11k_get_user_msi_assignment,
- .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
- .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx,
+ .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
{
u32 soc_hw_version;
- soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
soc_hw_version);
*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
@@ -1349,16 +714,26 @@ static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *
*major, *minor);
}
+static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
+ return 0;
+
+ return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+}
+
static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ const struct ath11k_pci_ops *pci_ops;
int ret;
- ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
- &ath11k_pci_bus_params);
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
+
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
@@ -1411,11 +786,11 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
- ab_pci->msi_config = &ath11k_msi_config[0];
+
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- ab_pci->msi_config = &ath11k_msi_config[1];
- ab->bus_params.static_window_map = true;
+ pci_ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
@@ -1444,7 +819,8 @@ unsupported_wcn6855_soc:
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
- ab_pci->msi_config = &ath11k_msi_config[0];
+
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -1453,6 +829,18 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ret = ath11k_pci_alloc_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret);
@@ -1481,12 +869,18 @@ unsupported_wcn6855_soc:
ath11k_pci_init_qmi_ce_config(ab);
- ret = ath11k_pci_config_irq(ab);
+ ret = ath11k_pcic_config_irq(ab);
if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret);
goto err_ce_free;
}
+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+ goto err_free_irq;
+ }
+
/* kernel may allocate a dummy vector before request_irq and
* then allocate a real vector when request_irq is called.
* So get msi_data here again to avoid spurious interrupt
@@ -1495,18 +889,21 @@ unsupported_wcn6855_soc:
ret = ath11k_pci_config_msi_data(ab_pci);
if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret);
- goto err_free_irq;
+ goto err_irq_affinity_cleanup;
}
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
- goto err_free_irq;
+ goto err_irq_affinity_cleanup;
}
return 0;
+err_irq_affinity_cleanup:
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
err_free_irq:
- ath11k_pci_free_irq(ab);
+ ath11k_pcic_free_irq(ab);
err_ce_free:
ath11k_ce_free_pipes(ab);
@@ -1550,7 +947,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
qmi_fail:
ath11k_mhi_unregister(ab_pci);
- ath11k_pci_free_irq(ab);
+ ath11k_pcic_free_irq(ab);
ath11k_pci_free_msi(ab_pci);
ath11k_pci_free_region(ab_pci);
@@ -1562,7 +959,9 @@ qmi_fail:
static void ath11k_pci_shutdown(struct pci_dev *pdev)
{
struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pci_power_down(ab);
}
@@ -1571,6 +970,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_suspend(ab);
if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
@@ -1583,6 +987,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_resume(ab);
if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 61d67b20a0eb..e9a01f344ec6 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -52,23 +53,8 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
-struct ath11k_msi_user {
- char *name;
- int num_vectors;
- u32 base_vector;
-};
-
-struct ath11k_msi_config {
- int total_vectors;
- int total_users;
- struct ath11k_msi_user *users;
-};
-
enum ath11k_pci_flags {
- ATH11K_PCI_FLAG_INIT_DONE,
- ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE,
- ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath11k_pci {
@@ -76,10 +62,8 @@ struct ath11k_pci {
struct ath11k_base *ab;
u16 dev_id;
char amss_path[100];
- u32 msi_ep_base_data;
struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config;
- unsigned long mhi_state;
u32 register_window;
/* protects register_window above */
@@ -88,8 +72,6 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
-
- unsigned long irq_flags;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
@@ -97,11 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
return (struct ath11k_pci *)ab->drv_priv;
}
-int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name,
- int *num_vectors, u32 *user_base_data,
- u32 *base_vector);
-int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
-void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
-u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
-
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
new file mode 100644
index 000000000000..380f9d37b644
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "pcic.h"
+#include "debug.h"
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static const struct ath11k_msi_config ath11k_msi_config[] = {
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ },
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ },
+ {
+ .total_vectors = 28,
+ .total_users = 2,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "CE", .num_vectors = 10, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 10 },
+ },
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ },
+};
+
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
+{
+ const struct ath11k_msi_config *msi_config;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
+ msi_config = &ath11k_msi_config[i];
+
+ if (msi_config->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_msi_config)) {
+ ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->pci.msi.config = msi_config;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+
+static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ if (offset < ATH11K_PCI_WINDOW_START)
+ iowrite32(value, ab->mem + offset);
+ else
+ ab->pci.ops->window_write32(ab, offset, value);
+}
+
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ int ret = 0;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ __ath11k_pcic_write32(ab, offset, value);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_write32);
+
+static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val;
+
+ if (offset < ATH11K_PCI_WINDOW_START)
+ val = ioread32(ab->mem + offset);
+ else
+ val = ab->pci.ops->window_read32(ab, offset);
+
+ return val;
+}
+
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ int ret = 0;
+ u32 val;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ val = __ath11k_pcic_read32(ab, offset);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return val;
+}
+EXPORT_SYMBOL(ath11k_pcic_read32);
+
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
+{
+ int ret = 0;
+ bool wakeup_required;
+ u32 *data = buf;
+ u32 i;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup) {
+ ret = ab->pci.ops->wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup for read from 0x%x: %d\n",
+ start, ret);
+ return ret;
+ }
+ }
+
+ for (i = start; i < end + 1; i += 4)
+ *data++ = __ath11k_pcic_read32(ab, i);
+
+ if (wakeup_required && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_read);
+
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ *msi_addr_lo = ab->pci.msi.addr_lo;
+ *msi_addr_hi = ab->pci.msi.addr_hi;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
+
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
+
+static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+void ath11k_pcic_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pcic_free_ext_irq(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_free_irq);
+
+static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
+}
+
+static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+ struct ath11k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath11k_pcic_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_pcic_ext_grp_enable(irq_grp);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+
+static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pcic_ext_irq_disable(ab);
+ ath11k_pcic_sync_ext_irqs(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
+
+static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+ int i;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.ops->get_msi_irq(ab, vector);
+}
+
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pcic_ext_grp_napi_poll);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pcic_get_msi_irq(ab, vector);
+
+ if (irq < 0)
+ return irq;
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
+ irq_flags, "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+ }
+ ath11k_pcic_ext_grp_disable(irq_grp);
+ }
+
+ return 0;
+}
+
+int ath11k_pcic_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ /* Configure CE irqs */
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath11k_pcic_get_msi_irq(ab, msi_data);
+ if (irq < 0)
+ return irq;
+
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
+
+ ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
+ irq_flags, irq_name[irq_idx], ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pcic_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_config_irq);
+
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
+
+static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_disable(ab);
+ ath11k_pcic_sync_ce_irqs(ab);
+ ath11k_pcic_kill_tasklets(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
+
+void ath11k_pcic_stop(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_stop);
+
+int ath11k_pcic_start(struct ath11k_base *ab)
+{
+ set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+
+ ath11k_pcic_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_start);
+
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops)
+{
+ if (!pci_ops)
+ return 0;
+
+ /* Return error if mandatory pci_ops callbacks are missing */
+ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+ !pci_ops->window_read32)
+ return -EINVAL;
+
+ ab->pci.ops = pci_ops;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
+
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
+
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+ struct ath11k_ce_pipe *ce_pipe;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ synchronize_irq(ab->irq_num[irq_idx]);
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
new file mode 100644
index 000000000000..ac012e88bf6d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_PCI_CMN_H
+#define _ATH11K_PCI_CMN_H
+
+#include "core.h"
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_DP_OFFSET 14
+
+#define ATH11K_PCI_CE_WAKE_IRQ 2
+
+#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
+#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
+#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
+#define ATH11K_PCI_WINDOW_START 0x80000
+#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+void ath11k_pcic_free_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
+void ath11k_pcic_stop(struct ath11k_base *ab);
+int ath11k_pcic_start(struct ath11k_base *ab);
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 85471f8b3563..1ae7af02c364 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -1,23 +1,22 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
-struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
- const u8 *addr)
+static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
+ int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (peer->vdev_id != vdev_id)
- continue;
- if (!ether_addr_equal(peer->addr, addr))
+ if (peer->peer_id != peer_id)
continue;
return peer;
@@ -26,15 +25,15 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
return NULL;
}
-static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
- u8 pdev_idx, const u8 *addr)
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (peer->pdev_idx != pdev_idx)
+ if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
@@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
- list_for_each_entry(peer, &ab->peers, list) {
- if (!ether_addr_equal(peer->addr, addr))
- continue;
+ if (!ab->rhead_peer_addr)
+ return NULL;
- return peer;
- }
+ peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
+ ab->rhash_peer_addr_param);
- return NULL;
+ return peer;
}
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
@@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
- list_for_each_entry(peer, &ab->peers, list)
- if (peer_id == peer->peer_id)
- return peer;
+ if (!ab->rhead_peer_id)
+ return NULL;
- return NULL;
+ peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
+ ab->rhash_peer_id_param);
+
+ return peer;
}
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
@@ -99,7 +99,7 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, peer_id);
+ peer = ath11k_peer_find_list_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
@@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
return 0;
}
+static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params,
+ void *key)
+{
+ struct ath11k_peer *tmp;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
+
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ ret = rhashtable_remove_fast(rtbl, rhead, *params);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param, &peer->peer_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param, &peer->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ goto err_clean;
+ }
+
+ return 0;
+
+err_clean:
+ ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ return ret;
+}
+
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_peer *peer, *tmp;
@@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
@@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
+ ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
}
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
@@ -217,17 +290,51 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
return 0;
}
-int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
+ struct ath11k_peer *peer;
+ struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, addr);
+ /* Check if the found peer is what we want to remove.
+ * While the sta is transitioning to another band we may
+ * have 2 peer with the same addr assigned to different
+ * vdev_id. Make sure we are deleting the correct peer.
+ */
+ if (peer && peer->vdev_id == vdev_id)
+ ath11k_peer_rhash_delete(ab, peer);
+
+ /* Fallback to peer list search if the correct peer can't be found.
+ * Skip the deletion of the peer from the rhash since it has already
+ * been deleted in peer add.
+ */
+ if (!peer)
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ath11k_warn(ab,
+ "failed to find peer vdev_id %d addr %pM in delete\n",
+ vdev_id, addr);
+ return -EINVAL;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath11k_warn(ar->ab,
+ ath11k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
@@ -237,6 +344,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
if (ret)
return ret;
+ return 0;
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = __ath11k_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ar->num_peers--;
return 0;
@@ -252,7 +372,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
- int ret;
+ int ret, fbret;
lockdep_assert_held(&ar->conf_mutex);
@@ -263,10 +383,19 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
}
spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
+ peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
- spin_unlock_bh(&ar->ab->base_lock);
- return -EINVAL;
+ if (peer->vdev_id == param->vdev_id) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ return -EINVAL;
+ }
+
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
}
spin_unlock_bh(&ar->ab->base_lock);
@@ -283,30 +412,25 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
if (ret)
return ret;
+ mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
- reinit_completion(&ar->peer_delete_done);
-
- ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
- param->vdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- param->vdev_id, param->peer_addr);
- return ret;
- }
-
- ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
- param->peer_addr);
- if (ret)
- return ret;
+ ret = -ENOENT;
+ goto cleanup;
+ }
- return -ENOENT;
+ ret = ath11k_peer_rhash_add(ar->ab, peer);
+ if (ret) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
@@ -333,6 +457,213 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
return 0;
+
+cleanup:
+ fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+ return ret;
+}
+
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_id_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_id)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_id);
+ rhash_id_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_id_tbl) {
+ ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_id_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, peer_id);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_id);
+ param->key_len = sizeof_field(struct ath11k_peer, peer_id);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_id_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_id) {
+ ab->rhead_peer_id = rhash_id_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_id_tbl);
+err_free:
+ kfree(rhash_id_tbl);
+
+ return ret;
+}
+
+static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_addr)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_addr);
+ rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_addr_tbl) {
+ ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_addr_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, addr);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
+ param->key_len = sizeof_field(struct ath11k_peer, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr) {
+ ab->rhead_peer_addr = rhash_addr_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_addr_tbl);
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_id);
+ kfree(ab->rhead_peer_id);
+ ab->rhead_peer_id = NULL;
+}
+
+static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_addr)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_addr);
+ kfree(ab->rhead_peer_addr);
+ ab->rhead_peer_addr = NULL;
+}
+
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ret = ath11k_peer_rhash_id_tbl_init(ab);
+ if (ret)
+ goto out;
+
+ ret = ath11k_peer_rhash_addr_tbl_init(ab);
+ if (ret)
+ goto cleanup_tbl;
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup_tbl:
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+out:
+ mutex_unlock(&ab->tbl_mtx_lock);
+ return ret;
+}
+
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ath11k_peer_rhash_addr_tbl_destroy(ab);
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+
+ mutex_unlock(&ab->tbl_mtx_lock);
}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 63fe5665badf..6dd17bafe3a0 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_PEER_H
@@ -20,6 +21,11 @@ struct ath11k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+ /* peer id based rhashtable list pointer */
+ struct rhash_head rhash_id;
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
+
/* Info used in MMIC verification of
* RX fragments
*/
@@ -47,5 +53,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
int vdev_id);
-
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 65d3c6ba35ae..51de2208b789 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -12,9 +13,14 @@
#include <linux/of_address.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+
+#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
bool ath11k_cold_boot_cal = 1;
EXPORT_SYMBOL(ath11k_cold_boot_cal);
@@ -745,6 +751,68 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -1628,6 +1696,13 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
@@ -1645,7 +1720,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- if (ab->bus_params.m3_fw_support) {
+ if (ab->hw_params.m3_fw_support) {
req.m3_support_valid = 1;
req.m3_support = 1;
req.m3_cache_support_valid = 1;
@@ -1674,6 +1749,9 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
}
+ if (ab->hw_params.global_reset)
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
ret = qmi_txn_init(&ab->qmi.handle, &txn,
@@ -1728,10 +1806,6 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
req->client_id = QMI_WLANFW_CLIENT_ID;
req->fw_ready_enable_valid = 1;
req->fw_ready_enable = 1;
- req->request_mem_enable_valid = 1;
- req->request_mem_enable = 1;
- req->fw_mem_ready_enable_valid = 1;
- req->fw_mem_ready_enable = 1;
req->cal_done_enable_valid = 1;
req->cal_done_enable = 1;
req->fw_init_done_enable_valid = 1;
@@ -1740,6 +1814,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
req->pin_connect_result_enable_valid = 0;
req->pin_connect_result_enable = 0;
+ /* WCN6750 doesn't request for DDR memory via QMI,
+ * instead it uses a fixed 12MB reserved memory
+ * region in DDR.
+ */
+ if (!ab->hw_params.fixed_fw_mem) {
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ }
+
ret = qmi_txn_init(handle, &txn,
qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
if (ret < 0)
@@ -1794,10 +1879,10 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
/* For QCA6390 by default FW requests a block of ~4M contiguous
* DMA memory, it's hard to allocate from OS. So host returns
- * failure to FW and FW will then request mulitple blocks of small
+ * failure to FW and FW will then request multiple blocks of small
* chunk size memory.
*/
- if (!(ab->bus_params.fixed_mem_region ||
+ if (!(ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem_delayed) {
delayed = true;
@@ -1867,7 +1952,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
- if ((ab->bus_params.fixed_mem_region ||
+ if ((ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem[i].iaddr)
iounmap(ab->qmi.target_mem[i].iaddr);
@@ -1892,6 +1977,21 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
+
+ /* Firmware reloads in coldboot/firmware recovery.
+ * in such case, no need to allocate memory for FW again.
+ */
+ if (chunk->vaddr) {
+ if (chunk->prev_type == chunk->type ||
+ chunk->prev_size == chunk->size)
+ continue;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->size,
+ chunk->vaddr, chunk->paddr);
+ chunk->vaddr = NULL;
+ }
+
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
@@ -1912,6 +2012,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->type);
return -EINVAL;
}
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
}
return 0;
@@ -1932,10 +2034,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get hremote_node\n");
- return ret;
+ return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
+ of_node_put(hremote_node);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get reg from hremote\n");
@@ -2000,6 +2103,80 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
return 0;
}
+static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_device_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_device_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ void __iomem *bar_addr_va;
+ int ret;
+
+ /* device info message req is only sent for hybrid bus devices */
+ if (!ab->hw_params.hybrid_bus_type)
+ return 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlfw_device_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_DEVICE_INFO_REQ_V01,
+ QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_device_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi device info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr_valid || !resp.bar_size_valid) {
+ ath11k_warn(ab, "qmi device info response invalid: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr ||
+ resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) {
+ ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n",
+ resp.bar_addr, resp.bar_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size);
+
+ if (!bar_addr_va) {
+ ath11k_warn(ab, "qmi device info ioremap failed\n");
+ ab->mem_len = 0;
+ ret = -EIO;
+ goto out;
+ }
+
+ ab->mem = bar_addr_va;
+ ab->mem_len = resp.bar_size;
+
+ return 0;
+out:
+ return ret;
+}
+
static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
@@ -2007,6 +2184,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
struct qmi_txn txn;
int ret = 0;
int r;
+ char *fw_build_id;
+ int fw_build_id_mask_len;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -2057,13 +2236,13 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
if (resp.fw_version_info_valid) {
ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
- strlcpy(ab->qmi.target.fw_build_timestamp,
+ strscpy(ab->qmi.target.fw_build_timestamp,
resp.fw_version_info.fw_build_timestamp,
sizeof(ab->qmi.target.fw_build_timestamp));
}
if (resp.fw_build_id_valid)
- strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
if (resp.eeprom_read_timeout_valid) {
@@ -2072,6 +2251,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
}
+ fw_build_id = ab->qmi.target.fw_build_id;
+ fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
+ if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
+ fw_build_id = fw_build_id + fw_build_id_mask_len;
+
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
@@ -2079,7 +2263,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
- ab->qmi.target.fw_build_id);
+ fw_build_id);
+
+ r = ath11k_core_check_smbios(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
r = ath11k_core_check_dt(ab);
if (r)
@@ -2106,7 +2294,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
memset(&resp, 0, sizeof(resp));
- if (ab->bus_params.fixed_bdf_addr) {
+ if (ab->hw_params.fixed_bdf_addr) {
bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
if (!bdf_addr) {
ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
@@ -2135,7 +2323,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
req->end = 1;
}
- if (ab->bus_params.fixed_bdf_addr ||
+ if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
req->data_valid = 0;
req->end = 1;
@@ -2144,7 +2332,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
memcpy(req->data, temp, req->data_len);
}
- if (ab->bus_params.fixed_bdf_addr) {
+ if (ab->hw_params.fixed_bdf_addr) {
if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
bdf_addr += ab->hw_params.fw.cal_offset;
@@ -2183,7 +2371,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
goto err_iounmap;
}
- if (ab->bus_params.fixed_bdf_addr ||
+ if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
remaining = 0;
} else {
@@ -2196,7 +2384,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
}
err_iounmap:
- if (ab->bus_params.fixed_bdf_addr)
+ if (ab->hw_params.fixed_bdf_addr)
iounmap(bdf_addr);
err_free_req:
@@ -2302,9 +2490,6 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
char path[100];
int ret;
- if (m3_mem->vaddr || m3_mem->size)
- return 0;
-
fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
@@ -2314,6 +2499,9 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
return ret;
}
+ if (m3_mem->vaddr || m3_mem->size)
+ goto skip_m3_alloc;
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
fw->size, &m3_mem->paddr,
GFP_KERNEL);
@@ -2324,6 +2512,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
return -ENOMEM;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, fw->data, fw->size);
m3_mem->size = fw->size;
release_firmware(fw);
@@ -2335,12 +2524,13 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- if (!ab->bus_params.m3_fw_support || !m3_mem->vaddr)
+ if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
return;
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
@@ -2354,7 +2544,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- if (ab->bus_params.m3_fw_support) {
+ if (ab->hw_params.m3_fw_support) {
ret = ath11k_qmi_m3_load(ab);
if (ret) {
ath11k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2476,7 +2666,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
memset(&resp, 0, sizeof(resp));
req->host_version_valid = 1;
- strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ strscpy(req->host_version, ATH11K_HOST_VERSION_STRING,
sizeof(req->host_version));
req->tgt_cfg_valid = 1;
@@ -2682,27 +2872,6 @@ ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
return 0;
}
-static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
-{
- struct ath11k_base *ab = qmi->ab;
- int ret;
-
- ret = ath11k_qmi_fw_ind_register_send(ab);
- if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
- ret);
- return ret;
- }
-
- ret = ath11k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
{
struct ath11k_base *ab = qmi->ab;
@@ -2729,6 +2898,12 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
+ ret = ath11k_qmi_request_device_info(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi device info: %d\n", ret);
+ return ret;
+ }
+
if (ab->hw_params.supports_regdb)
ath11k_qmi_load_bdf_qmi(ab, true);
@@ -2738,9 +2913,33 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ return 0;
+}
+
+static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
if (ret < 0) {
- ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret);
+ ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
+ return ret;
+ }
+
+ if (!ab->hw_params.fixed_fw_mem)
+ return ret;
+
+ ret = ath11k_qmi_event_load_bdf(qmi);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret);
return ret;
}
@@ -2773,7 +2972,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- if (ab->bus_params.fixed_mem_region ||
+ if (ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
@@ -2814,6 +3013,12 @@ static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+
+ if (!ab->qmi.cal_done) {
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ }
+
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
}
@@ -2831,6 +3036,19 @@ static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cold boot calibration done\n");
}
+static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware init done\n");
+}
+
static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
{
.type = QMI_INDICATION,
@@ -2861,6 +3079,14 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -2940,10 +3166,20 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH11K_QMI_EVENT_FW_MEM_READY:
ret = ath11k_qmi_event_load_bdf(qmi);
- if (ret < 0)
+ if (ret < 0) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab,
+ "failed to send qmi m3 info req: %d\n", ret);
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ }
+
break;
- case ATH11K_QMI_EVENT_FW_READY:
+ case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
ath11k_hal_dump_srng_stats(ab);
@@ -2958,11 +3194,31 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
- ath11k_core_qmi_firmware_ready(ab);
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ /* For targets requiring a FW restart upon cold
+ * boot completion, there is no need to process
+ * FW ready; such targets will receive FW init
+ * done message after FW restart.
+ */
+ if (ab->hw_params.cbcal_restart_fw)
+ break;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ath11k_core_qmi_firmware_ready(ab);
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
break;
default:
@@ -3024,3 +3280,8 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_qmi_deinit_service);
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ath11k_qmi_m3_free(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index ba2eff4d59cb..0909d53cefeb 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@@ -20,22 +21,26 @@
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
-#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
+#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
-#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
-#define QMI_WLFW_FW_READY_IND_V01 0x0038
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
+#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000
+
struct ath11k_base;
enum ath11k_qmi_file_type {
@@ -65,6 +70,7 @@ enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
ATH11K_QMI_EVENT_POWER_UP,
ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_FW_INIT_DONE,
ATH11K_QMI_EVENT_MAX,
};
@@ -93,6 +99,8 @@ struct ath11k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
u32 *vaddr;
void __iomem *iaddr;
@@ -285,10 +293,16 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
char placeholder;
};
-#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
-#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
-#define QMI_WLANFW_CAP_REQ_V01 0x0024
-#define QMI_WLANFW_CAP_RESP_V01 0x0024
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0
enum qmi_wlanfw_pipedir_enum_v01 {
QMI_WLFW_PIPEDIR_NONE_V01 = 0,
@@ -381,6 +395,18 @@ struct qmi_wlanfw_cap_req_msg_v01 {
char placeholder;
};
+struct qmi_wlanfw_device_info_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u64 bar_addr;
+ u32 bar_size;
+ u8 bar_addr_valid;
+ u8 bar_size_valid;
+};
+
#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
@@ -492,5 +518,6 @@ void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index d6575feca5a2..6fae4e61ede7 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -48,6 +48,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
+ struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -76,24 +77,33 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- /* Set the country code to the firmware and wait for
+ /* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
- init_country_param.flags = ALPHA_IS_SET;
- memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
- init_country_param.cc_info.alpha2[2] = 0;
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
- ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
- if (ret)
- ath11k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
}
-int ath11k_reg_update_chan_list(struct ath11k *ar)
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
@@ -102,7 +112,35 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret;
+ int i, ret, left;
+
+ if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ if (ar->state == ATH11K_STATE_RESTARTING)
+ return 0;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -184,11 +222,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
- if (ar->pending_11d) {
- complete(&ar->finish_11d_ch_list);
- ar->pending_11d = false;
- }
-
return ret;
}
@@ -254,18 +287,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
- if (ar->pending_11d)
- complete(&ar->finish_11d_scan);
-
- rtnl_lock();
- wiphy_lock(ar->hw->wiphy);
-
- if (ar->pending_11d)
- reinit_completion(&ar->finish_11d_ch_list);
-
- ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
- wiphy_unlock(ar->hw->wiphy);
- rtnl_unlock();
+ ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
kfree(regd_copy);
@@ -273,7 +295,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar);
+ ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 5fb9dc03a74e..2f284f26378d 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar);
-int ath11k_reg_update_chan_list(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 79c50804d7dc..786d5f36f5e5 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -877,7 +877,7 @@ struct rx_msdu_start_wcn6855 {
*
* l4_offset
* Depending upon mode bit, this field either indicates the
- * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * L4 offset in bytes from the start of RX_HEADER (only valid
* if either ipv4_proto or ipv6_proto is set to 1) or indicates
* the offset in bytes to the start of TCP or UDP header from
* the start of the IP header after decapsulation (Only valid if
@@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9074 {
@@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_wcn6855 {
@@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc {
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 4100cc1449a2..705868198df4 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -30,6 +30,7 @@
#define ATH11K_SPECTRAL_20MHZ 20
#define ATH11K_SPECTRAL_40MHZ 40
#define ATH11K_SPECTRAL_80MHZ 80
+#define ATH11K_SPECTRAL_160MHZ 160
#define ATH11K_SPECTRAL_SIGNATURE 0xFA
@@ -107,7 +108,7 @@ struct spectral_search_fft_report {
__le32 info1;
__le32 info2;
__le32 reserve0;
- u8 bins[0];
+ u8 bins[];
} __packed;
struct ath11k_spectral_search_report {
@@ -183,6 +184,8 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
return 0;
+ ar->spectral.is_primary = true;
+
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
@@ -212,7 +215,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar,
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
@@ -582,6 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
+ bool fragment_sample = false;
lockdep_assert_held(&ar->spectral.lock);
@@ -636,6 +643,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
case ATH11K_SPECTRAL_80MHZ:
fft_sample->chan_width_mhz = chan_width_mhz;
break;
+ case ATH11K_SPECTRAL_160MHZ:
+ if (ab->hw_params.spectral.fragment_160mhz) {
+ chan_width_mhz /= 2;
+ fragment_sample = true;
+ }
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
default:
ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
return -EINVAL;
@@ -660,6 +674,17 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
+ /* If freq2 is available then the spectral scan results are fragmented
+ * as primary and secondary
+ */
+ if (fragment_sample && freq) {
+ if (!ar->spectral.is_primary)
+ fft_sample->freq1 = cpu_to_be16(freq);
+
+ /* We have to toggle the is_primary to handle the next report */
+ ar->spectral.is_primary = !ar->spectral.is_primary;
+ }
+
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
ab->hw_params.spectral.fft_sz);
@@ -843,9 +868,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar)
{
struct ath11k_spectral *sp = &ar->spectral;
- if (!sp->enabled)
- return;
-
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
@@ -897,15 +919,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab)
if (!sp->enabled)
continue;
- ath11k_spectral_debug_unregister(ar);
- ath11k_spectral_ring_free(ar);
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
-
- sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->enabled = false;
-
spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
}
}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
index 081744265f2a..96bfa16e18e9 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.h
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -35,6 +35,7 @@ struct ath11k_spectral {
u16 count;
u8 fft_size;
bool enabled;
+ bool is_primary;
};
#ifdef CONFIG_ATH11K_SPECTRAL
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c96b26f39a25..23ed01bd44f9 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -99,7 +99,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree Celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
index f9af55f3682d..3e39675ef7f5 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.h
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -19,7 +19,7 @@ struct ath11k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index a02e54735e88..9535745fe026 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
- __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH11K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH11K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -308,6 +305,34 @@ TRACE_EVENT(ath11k_wmi_diag,
)
);
+TRACE_EVENT(ath11k_ps_timekeeper,
+ TP_PROTO(struct ath11k *ar, const void *peer_addr,
+ u32 peer_ps_timestamp, u8 peer_ps_state),
+ TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
+
+ TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __dynamic_array(u8, peer_addr, ETH_ALEN)
+ __field(u8, peer_ps_state)
+ __field(u32, peer_ps_timestamp)
+ ),
+
+ TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ memcpy(__get_dynamic_array(peer_addr), peer_addr,
+ ETH_ALEN);
+ __entry->peer_ps_state = peer_ps_state;
+ __entry->peer_ps_timestamp = peer_ps_timestamp;
+ ),
+
+ TP_printk("%s %s %u %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->peer_ps_state,
+ __entry->peer_ps_timestamp
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6b68ccf65e39..fad9f8d308a2 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -128,8 +129,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
- [WMI_TAG_RFKILL_EVENT] = {
- .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -144,6 +143,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -388,6 +389,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
ab->target_pdev_count++;
+ if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return -EINVAL;
+
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
* handled.
@@ -395,7 +400,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
- } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
@@ -405,13 +412,11 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
pdev_cap->nss_ratio_info =
WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
- } else {
- return -EINVAL;
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
- * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
@@ -526,8 +531,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
- ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
-
return 0;
}
@@ -618,10 +621,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
return skb;
}
+static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params.support_off_channel_tx &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_mgmt_send_cmd *cmd;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
@@ -642,7 +660,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->desc_id = buf_id;
- cmd->chanfreq = 0;
+ cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->frame_len = frame->len;
@@ -973,9 +991,13 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -989,6 +1011,17 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->trans_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->profile_idx = bss_conf->bssid_index;
+ cmd->profile_num = bss_conf->bssid_indicator;
+ }
+ }
+
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
@@ -1678,7 +1711,7 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- if (vif->csa_active) {
+ if (vif->bss_conf.csa_active) {
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
}
@@ -2013,7 +2046,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
{
/* setup commonly used values */
arg->scan_req_id = 1;
- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
@@ -3043,8 +3079,34 @@ int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
return ret;
}
-int
-ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
@@ -3062,34 +3124,29 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
- cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
- cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
- cmd->congestion_thresh_teardown =
- ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
- cmd->congestion_thresh_critical =
- ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
- cmd->interference_thresh_teardown =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
- cmd->interference_thresh_setup =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
- cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
- cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
- cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
- cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
- cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
- cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
- cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
- cmd->remove_sta_slot_interval =
- ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
- /* TODO add MBSSID support */
- cmd->mbss_support = 0;
-
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_ENABLE_CMDID);
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 1;
}
return ret;
}
@@ -3114,11 +3171,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_DISABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 0;
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delete twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi pause twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
}
return ret;
}
@@ -3626,7 +3853,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -5083,6 +5311,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar)
break;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_RUNNING;
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
complete(&ar->scan.started);
break;
}
@@ -5165,6 +5395,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
break;
}
}
@@ -5613,9 +5845,9 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
arvif->bssid,
NULL);
if (!sta) {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
- ret = -EPROTO;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
goto exit;
}
@@ -5713,8 +5945,9 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
"wmi stats vdev id %d snr %d\n",
src->vdev_id, src->beacon_snr);
} else {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for vdev stat\n",
+ arvif->bssid);
}
}
@@ -6177,8 +6410,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
const void **tb;
- int ret;
+ int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -6204,6 +6439,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
kfree(tb);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
@@ -6353,7 +6595,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
fallback:
/* Fallback to older reg (by sending previous country setting
- * again if fw has succeded and we failed to process here.
+ * again if fw has succeeded and we failed to process here.
* The Regdomain should be uniform across driver and fw. Since the
* FW has processed the command and sent a success status, we expect
* this function to succeed as well. If it doesn't, CTRY needs to be
@@ -6560,6 +6802,107 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
}
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
@@ -7112,47 +7455,64 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
rcu_read_unlock();
}
-static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
{
- const void **tb;
const struct wmi_service_available_event *ev;
- int ret;
+ u32 *wmi_ext2_service_bitmap;
int i, j;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
+ switch (tag) {
+ case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+ ev = (struct wmi_service_available_event *)ptr;
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
- if (!ev) {
- ath11k_warn(ab, "failed to fetch svc available ev");
- kfree(tb);
- return;
- }
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ ev->wmi_service_segment_bitmap[0],
+ ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2],
+ ev->wmi_service_segment_bitmap[3]);
+ break;
+ case WMI_TAG_ARRAY_UINT32:
+ wmi_ext2_service_bitmap = (u32 *)ptr;
+ for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i++) {
+ do {
+ if (wmi_ext2_service_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- /* TODO: Use wmi_service_segment_offset information to get the service
- * especially when more services are advertised in multiple sevice
- * available events.
- */
- for (i = 0, j = WMI_MAX_SERVICE;
- i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
- i++) {
- do {
- if (ev->wmi_service_segment_bitmap[i] &
- BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
- set_bit(j, ab->wmi_ab.svc_map);
- } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+ wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+ break;
}
+ return 0;
+}
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
- ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
- ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ int ret;
- kfree(tb);
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_services_parser,
+ NULL);
+ if (ret)
+ ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
}
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -7185,7 +7545,53 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debugfs_fw_stats_process(ab, skb);
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+ * are currently requested only via debugfs fw stats. Hence, processing these
+ * in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -7248,7 +7654,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
continue;
}
- if (arvif->is_up && arvif->vif->csa_active)
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif);
}
rcu_read_unlock();
@@ -7338,40 +7744,6 @@ exit:
kfree(tb);
}
-static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
- const struct wmi_rfkill_state_change_ev *ev;
- const void **tb;
- int ret;
-
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
-
- ev = tb[WMI_TAG_RFKILL_EVENT];
- if (!ev) {
- kfree(tb);
- return;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
- ev->gpio_pin_num,
- ev->int_type,
- ev->radio_state);
-
- spin_lock_bh(&ab->base_lock);
- ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
- spin_unlock_bh(&ab->base_lock);
-
- queue_work(ab->workqueue, &ab->rfkill_work);
- kfree(tb);
-}
-
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7532,6 +7904,116 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath11k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
+ ev->refresh_cnt);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
+ NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
+
+ replay_ctr = ev->replay_ctr.word1;
+ replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
+ arvif->rekey_data.replay_ctr = replay_ctr;
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7629,11 +8111,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
+ case WMI_TWT_DEL_DIALOG_EVENTID:
+ case WMI_TWT_PAUSE_DIALOG_EVENTID:
+ case WMI_TWT_RESUME_DIALOG_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
@@ -7651,12 +8139,15 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
- case WMI_RFKILL_STATE_CHANGE_EVENTID:
- ath11k_rfkill_state_change_event(ab, skb);
- break;
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath11k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7798,6 +8289,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;
@@ -7851,7 +8395,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
@@ -7873,6 +8417,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
ath11k_wmi_free_dbring_caps(ab);
}
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable;
+
+ /* Set all modes in case of disable */
+ if (cmd->enable)
+ cmd->hw_filter_bitmap = filter_bitmap;
+ else
+ cmd->hw_filter_bitmap = ((u32)~0U);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ enable, filter_bitmap);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
{
struct wmi_wow_host_wakeup_ind *cmd;
@@ -7943,3 +8520,648 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
}
+
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->is_add = enable;
+ cmd->event_bitmap = (1 << event);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_ADD_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
+ bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_BITMAP_PATTERN_T) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
+ bitmap->pattern_offset = pattern_offset;
+ bitmap->pattern_len = pattern_len;
+ bitmap->bitmask_len = pattern_len;
+ bitmap->pattern_id = pattern_id;
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_DEL_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 *channel_list;
+ size_t len, nlo_list_len, channel_list_len;
+ u8 *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = pno->vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = pno->active_max_time;
+ cmd->passive_dwell_time = pno->passive_max_time;
+
+ if (pno->do_passive_scan)
+ cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
+
+ cmd->fast_scan_period = pno->fast_scan_period;
+ cmd->slow_scan_period = pno->slow_scan_period;
+ cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
+ cmd->delay_start_time = pno->delay_start_time;
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
+ ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = pno->uc_networks_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = (struct nlo_configured_parameters *)ptr;
+ for (i = 0; i < cmd->no_of_ssids; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
+
+ nlo_list[i].ssid.valid = true;
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ nlo_list[i].ssid.ssid.ssid_len);
+ ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
+ roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = true;
+ nlo_list[i].rssi_cond.rssi =
+ pno->a_networks[i].rssi_threshold;
+ }
+
+ nlo_list[i].bcast_nw_type.valid = true;
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ pno->a_networks[i].bcast_nw_type;
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = pno->a_networks[0].channel_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = (u32 *)ptr;
+ for (i = 0; i < cmd->num_of_channels; i++)
+ channel_list[i] = pno->a_networks[0].channels[i];
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_STOP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_tuple *ns;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = (struct wmi_tlv *)buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = (struct wmi_ns_offload_tuple *)buf_ptr;
+ ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= WMI_NSOL_FLAGS_VALID;
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+ ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
+ ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+ ath11k_ce_byte_swap(ns->target_mac.addr, 8);
+
+ if (ns->target_mac.word0 != 0 ||
+ ns->target_mac.word1 != 0) {
+ ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_tuple *arp;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = (struct wmi_arp_offload_tuple *)buf_ptr;
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = WMI_ARPOL_FLAGS_VALID;
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+ ath11k_ce_byte_swap(arp->target_ipaddr, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct ath11k_arp_ns_offload *offload;
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ offload = &arvif->arp_ns_offload;
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
+ }
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->flags = 0;
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->num_ns_ext_tuples = ns_ext_tuples;
+
+ buf_ptr += sizeof(*cmd);
+
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+ int len;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+
+ if (enable) {
+ cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+ ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
+ } else {
+ cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ int len;
+ struct sk_buff *skb;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
+{ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, sar_len_aligned, rsvd_len_aligned;
+
+ sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sar_len_aligned +
+ TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->sar_len = BIOS_SAR_TABLE_LEN;
+ cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, rsvd_len_aligned;
+
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+}
+
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->enabled = arg->enabled;
+ cmd->interval = arg->interval;
+ cmd->method = arg->method;
+
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 2f26ec1a8aa3..8f2c07d70a4a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -12,10 +12,12 @@
struct ath11k_base;
struct ath11k;
struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
+struct ath11k_vif;
#define PSOC_HOST_MAX_NUM_SS (8)
-/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
#define MAX_HE_NSS 8
#define MAX_HE_MODULATION 8
#define MAX_HE_RU 4
@@ -283,6 +285,11 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_GET_TPC_STATS_CMDID,
+ WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID,
+ WMI_PDEV_GET_DPD_STATUS_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -1207,7 +1214,7 @@ enum wmi_tlv_tag {
WMI_TAG_NS_OFFLOAD_TUPLE,
WMI_TAG_FTM_INTG_CMD,
WMI_TAG_STA_KEEPALIVE_CMD,
- WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
WMI_TAG_AP_PS_PEER_CMD,
WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
@@ -1857,6 +1864,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
};
@@ -1990,6 +1999,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+ /* The first 128 bits */
WMI_MAX_SERVICE = 128,
WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
@@ -2080,9 +2090,15 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
- WMI_MAX_EXT_SERVICE
+ /* The second 128 bits */
+ WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
+
+ /* The third 128 bits */
+ WMI_MAX_EXT2_SERVICE = 384
};
enum {
@@ -3087,9 +3103,6 @@ enum scan_dwelltime_adaptive_mode {
SCAN_DWELL_MODE_STATIC = 4
};
-#define WLAN_SCAN_MAX_NUM_SSID 10
-#define WLAN_SCAN_MAX_NUM_BSSID 10
-
#define WLAN_SSID_MAX_LEN 32
struct element_info {
@@ -3104,7 +3117,6 @@ struct wlan_ssid {
#define WMI_IE_BITMAP_SIZE 8
-#define WMI_SCAN_MAX_NUM_SSID 0x0A
/* prefix used by scan requestor ids on the host */
#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
@@ -3112,10 +3124,6 @@ struct wlan_ssid {
/* host cycles through the lower 12 bits to generate ids */
#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
*/
@@ -3311,8 +3319,8 @@ struct scan_req_params {
u32 n_probes;
u32 *chan_list;
u32 notify_scan_events;
- struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
- struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
struct element_info extraie;
struct element_info htcap;
struct element_info vhtcap;
@@ -4475,7 +4483,7 @@ struct wmi_pdev_radar_ev {
} __packed;
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
s32 temp;
u32 pdev_id;
} __packed;
@@ -4701,7 +4709,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
*/
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
- /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ /* Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
};
@@ -4813,9 +4821,9 @@ enum wmi_rate_preamble {
/**
* enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
- * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
- * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
- * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
*/
enum wmi_rtscts_prot_mode {
WMI_RTS_CTS_DISABLED = 0,
@@ -4826,13 +4834,13 @@ enum wmi_rtscts_prot_mode {
/**
* enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
* protection mode.
- * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
- * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
- * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
- * but if there's a sw retry, both the rate
- * series will use RTS-CTS.
- * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
- * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
*/
enum wmi_rtscts_profile {
WMI_RTSCTS_FOR_NO_RATESERIES = 0,
@@ -4926,6 +4934,25 @@ struct wmi_wmm_params_all_arg {
#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+struct wmi_twt_enable_params {
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+};
+
struct wmi_twt_enable_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -4952,6 +4979,112 @@ struct wmi_twt_disable_params_cmd {
u32 pdev_id;
} __packed;
+enum WMI_HOST_TWT_COMMAND {
+ WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+ WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+ WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+ WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+ WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+ WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+ WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u8 twt_cmd;
+ u8 flag_bcast;
+ u8 flag_trigger;
+ u8 flag_flow_type;
+ u8 flag_protection;
+} __packed;
+
+enum wmi_twt_add_dialog_status {
+ WMI_ADD_TWT_STATUS_OK,
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+ WMI_ADD_TWT_STATUS_INVALID_PARAM,
+ WMI_ADD_TWT_STATUS_NOT_READY,
+ WMI_ADD_TWT_STATUS_NO_RESOURCE,
+ WMI_ADD_TWT_STATUS_NO_ACK,
+ WMI_ADD_TWT_STATUS_NO_RESPONSE,
+ WMI_ADD_TWT_STATUS_DENIED,
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
struct wmi_obss_spatial_reuse_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -5215,29 +5348,19 @@ struct target_resource_config {
u32 twt_ap_sta_count;
};
-enum wmi_sys_cap_info_flags {
- WMI_SYS_CAP_INFO_RXTX_LED = BIT(0),
- WMI_SYS_CAP_INFO_RFKILL = BIT(1),
-};
-
-#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
-#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6)
-#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
-
-enum wmi_rfkill_enable_radio {
- WMI_RFKILL_ENABLE_RADIO_ON = 0,
- WMI_RFKILL_ENABLE_RADIO_OFF = 1,
+enum wmi_debug_log_param {
+ WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
};
-enum wmi_rfkill_radio_state {
- WMI_RFKILL_RADIO_STATE_OFF = 1,
- WMI_RFKILL_RADIO_STATE_ON = 2,
-};
-
-struct wmi_rfkill_state_change_ev {
- u32 gpio_pin_num;
- u32 int_type;
- u32 radio_state;
+struct wmi_debug_log_config_cmd_fixed_param {
+ u32 tlv_header;
+ u32 dbg_log_param;
+ u32 value;
} __packed;
#define WMI_MAX_MEM_REQS 32
@@ -5247,6 +5370,26 @@ struct wmi_rfkill_state_change_ev {
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+enum ath11k_wmi_peer_ps_state {
+ WMI_PEER_PS_STATE_OFF,
+ WMI_PEER_PS_STATE_ON,
+ WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+ /* Used to indicate that power save state change is valid */
+ WMI_PEER_PS_VALID = 0x1,
+ WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_ps_state;
+ u32 ps_supported_bitmap;
+ u32 peer_ps_valid;
+ u32 peer_ps_timestamp;
+} __packed;
+
struct ath11k_wmi_base {
struct ath11k_base *ab;
struct ath11k_pdev_wmi wmi[MAX_RADIOS];
@@ -5255,7 +5398,7 @@ struct ath11k_wmi_base {
struct completion service_ready;
struct completion unified_ready;
- DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
wait_queue_head_t tx_credits_wq;
const struct wmi_peer_flags_map *peer_flags;
u32 num_mem_chunks;
@@ -5268,6 +5411,19 @@ struct ath11k_wmi_base {
struct ath11k_targ_cap *targ_cap;
};
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+ u32 hw_filter_bitmap;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -5412,6 +5568,45 @@ static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
#undef C2S
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_add_del_event_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 is_add;
+ u32 event_bitmap;
+} __packed;
+
struct wmi_wow_enable_cmd {
u32 tlv_header;
u32 enable;
@@ -5424,13 +5619,353 @@ struct wmi_wow_host_wakeup_ind {
u32 reserved;
} __packed;
-struct wmi_wow_ev_arg {
+struct wmi_tlv_wow_event_info {
u32 vdev_id;
u32 flag;
- enum wmi_wow_wake_reason wake_reason;
+ u32 wake_reason;
u32 data_len;
+} __packed;
+
+struct wmi_wow_bitmap_pattern {
+ u32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ u32 pattern_offset;
+ u32 pattern_len;
+ u32 bitmask_len;
+ u32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
};
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_param {
+ u32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ u32 valid;
+ u32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ u32 valid;
+ u32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ u32 valid;
+ u32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ u32 valid;
+ s32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ u32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 fast_scan_max_cycles;
+ u32 active_dwell_time;
+ u32 passive_dwell_time;
+ u32 probe_bundle_size;
+
+ /* ART = IRT */
+ u32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ u32 max_rest_time;
+
+ /* SBM */
+ u32 scan_backoff_multiplier;
+
+ /* SCBM */
+ u32 fast_scan_period;
+
+ /* specific to windows */
+ u32 slow_scan_period;
+
+ u32 no_of_ssids;
+
+ u32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ u32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct wmi_mac_addr mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct wmi_mac_addr mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ u32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ u32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ u32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_replayc_cnt {
+ union {
+ u8 counter[GTK_REPLAY_COUNTER_BYTES];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_gtk_offload_status_event {
+ u32 vdev_id;
+ u32 flags;
+ u32 refresh_cnt;
+ struct wmi_replayc_cnt replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+#define BIOS_SAR_TABLE_LEN (22)
+#define BIOS_SAR_RSVD1_LEN (6)
+#define BIOS_SAR_RSVD2_LEN (18)
+
+struct wmi_pdev_set_sar_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sar_len;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_pdev_set_geo_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ u32 method;
+
+ /* in seconds */
+ u32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp {
+ u32 tlv_header;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -5544,8 +6079,18 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
struct ath11k_fw_stats *fw_stats, u32 stats_id,
char *buf);
int ath11k_wmi_simulate_radar(struct ath11k *ar);
-int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
@@ -5582,4 +6127,28 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog);
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id);
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable);
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif);
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val);
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 43c62e99dd0e..1dec23b0699c 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -1,16 +1,30 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include "mac.h"
+
+#include <net/mac80211.h>
#include "core.h"
#include "hif.h"
#include "debug.h"
#include "wmi.h"
#include "wow.h"
+#include "dp_rx.h"
+
+static const struct wiphy_wowlan_support ath11k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
int ath11k_wow_enable(struct ath11k_base *ab)
{
@@ -54,6 +68,13 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int ret;
+ /* In the case of WCN6750, WoW wakeup is done
+ * by sending SMP2P power save exit message
+ * to the target processor.
+ */
+ if (ab->hw_params.smp2p_wow_exit)
+ return 0;
+
reinit_completion(&ab->wow.wakeup_completed);
ret = ath11k_wmi_wow_host_wakeup_ind(ar);
@@ -71,3 +92,786 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
return 0;
}
+
+static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Compute new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Compute new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath11k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
+ int j;
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
+
+ memcpy(pattern_ext, old_pattern.pattern,
+ old_pattern.pattern_len);
+ old_pattern.pattern = pattern_ext;
+ ath11k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ } else {
+ new_pattern = old_pattern;
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_wakeups(struct ath11k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
+{
+ int ret = 0;
+ struct ath11k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 bitmap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
+ bitmap,
+ true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath11k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath11k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_keepalive(struct ath11k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_enable(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx pktlog during wow suspend: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath11k_ce_stop_shadow_timers(ar->ab);
+ ath11k_dp_stop_shadow_timers(ar->ab);
+
+ ath11k_hif_irq_disable(ar->ab);
+ ath11k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath11k_hif_suspend(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath11k_wow_wakeup(ar->ab);
+
+cleanup:
+ ath11k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath11k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_hif_resume(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath11k_hif_ce_irq_enable(ar->ab);
+ ath11k_hif_irq_enable(ar->ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_wakeup(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH11K_STATE_OFF:
+ case ATH11K_STATE_RESTARTING:
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_wow_init(struct ath11k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath11k_wowlan_support;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
index dabc4ee63cf6..553ba850d910 100644
--- a/drivers/net/wireless/ath/ath11k/wow.h
+++ b/drivers/net/wireless/ath/ath11k/wow.h
@@ -3,8 +3,53 @@
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath11k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
#define ATH11K_WOW_RETRY_NUM 3
#define ATH11K_WOW_RETRY_WAIT_MS 200
+#define ATH11K_WOW_PATTERNS 22
+#ifdef CONFIG_PM
+
+int ath11k_wow_init(struct ath11k *ar);
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath11k_wow_op_resume(struct ieee80211_hw *hw);
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
int ath11k_wow_enable(struct ath11k_base *ab);
int ath11k_wow_wakeup(struct ath11k_base *ab);
+
+#else
+
+static inline int ath11k_wow_init(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 234ea939d316..f595204f493d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1395,10 +1395,6 @@ struct ath5k_hw {
u32 ah_txq_imr_nofrm;
u32 ah_txq_isr_txok_all;
- u32 ah_txq_isr_txurn;
- u32 ah_txq_isr_qcborn;
- u32 ah_txq_isr_qcburn;
- u32 ah_txq_isr_qtrig;
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 66d123f48085..c59c14483177 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1946,7 +1946,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (!skb) {
ret = -ENOMEM;
@@ -1982,7 +1982,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
/*
* Check if the previous beacon has gone out. If
- * not, don't don't try to post another: skip this
+ * not, don't try to post another: skip this
* period and wait for the next. Missed beacons
* indicate a problem and should not occur. If we
* miss too many consecutive beacons reset the device.
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index e6c52f7c26e7..d9e376eb040e 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -650,6 +650,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+ ah->ah_txq_isr_txok_all = 0;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
@@ -670,13 +671,6 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much useful since we treat
- * all queues the same way if we get a TXURN (update
- * tx trigger level) but we might need it later on*/
- if (pisr & AR5K_ISR_TXURN)
- ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
- AR5K_SISR2_QCU_TXURN);
-
/* Misc Beacon related interrupts */
/* For AR5211 */
@@ -709,25 +703,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRORN);
- }
/* A queue got CBR underrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRURN);
- }
/* A queue got triggered */
- if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+ if (unlikely(pisr & (AR5K_ISR_QTRIG)))
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
- AR5K_SISR4_QTRIG);
- }
data = pisr;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fbc2c19848f..d444b3d70ba2 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
}
}
+ if (idx == AR5K_EEPROM_N_PD_CURVES)
+ goto err_out;
+
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 532eeac9e83e..ed5d2160a72a 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -250,7 +250,7 @@ unlock:
static void
ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes)
+ struct ieee80211_bss_conf *bss_conf, u64 changes)
{
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_hw *ah = hw->priv;
@@ -278,9 +278,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_ASSOC) {
- avf->assoc = bss_conf->assoc;
- if (bss_conf->assoc)
- ah->assoc = bss_conf->assoc;
+ avf->assoc = vif->cfg.assoc;
+ if (vif->cfg.assoc)
+ ah->assoc = vif->cfg.assoc;
else
ah->assoc = ath5k_any_vif_assoc(ah);
@@ -288,11 +288,11 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath5k_set_beacon_filter(hw, ah->assoc);
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
- common->curaid = bss_conf->aid;
+ vif->cfg.aid, common->curbssid);
+ common->curaid = vif->cfg.aid;
ath5k_hw_set_bssid(ah);
/* Once ANI is available you would start it here */
}
@@ -410,7 +410,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
/* FIF_CONTROL doc says we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
- * but we should see if we can improve on granularity */
+ * we should see if we can improve on granularity */
if (*new_flags & FIF_CONTROL)
rfilt |= AR5K_RX_FILTER_CONTROL;
@@ -572,7 +572,8 @@ ath5k_get_stats(struct ieee80211_hw *hw,
static int
-ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath5k_hw *ah = hw->priv;
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 00f9e347d414..5797ef9c73d7 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3136,7 +3136,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
/* Limit it to be inside pwr range */
table_size = pwr_max[pdg] - pwr_min[pdg];
- max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
+ max_idx = min(pdadc_n, table_size);
/* Fill pdadc_out table */
while (pdadc_0 < max_idx && pdadc_i < 128)
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index dc2b3b46781e..a75bfa9fd1cf 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -36,6 +36,11 @@ ath6kl_core-y += wmi.o
ath6kl_core-y += core.o
ath6kl_core-y += recovery.o
+# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_htc_mbox.o += $(call cc-disable-warning, dangling-pointer)
+endif
+
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index bd1183830e91..a20e0aeae284 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -807,7 +807,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
struct cfg80211_roam_info roam_info = {
- .bss = bss,
+ .links[0].bss = bss,
.req_ie = assoc_req_ie,
.req_ie_len = assoc_req_len,
.resp_ie = assoc_resp_ie,
@@ -1119,12 +1119,12 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->ndev, &chandef);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
mutex_unlock(&vif->wdev.mtx);
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
@@ -1249,7 +1249,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct ath6kl *ar = ath6kl_priv(ndev);
@@ -1279,7 +1279,7 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback) (void *cookie,
struct key_params *))
@@ -1314,7 +1314,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -2967,7 +2967,8 @@ static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return ath6kl_set_ies(vif, beacon);
}
-static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -3368,6 +3369,7 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
struct net_device *dev,
+ unsigned int link_id,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index f9d3f3a5edfe..ba16b98c872d 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -92,7 +92,7 @@ struct bus_request {
* emode - This indicates the whether the command is to be executed in a
* blocking or non-blocking fashion (HIF_SYNCHRONOUS/
* HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
- * implemented using the asynchronous mode allowing the the bus
+ * implemented using the asynchronous mode allowing the bus
* driver to indicate the completion of operation through the
* registered callback routine. The requirement primarily comes
* from the contexts these operations get called from (a driver's
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index e3874421c4c0..1963d3145481 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1538,7 +1538,7 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
queue, n_msg);
/*
- * This is due to unavailabilty of buffers to rx entire data.
+ * This is due to unavailability of buffers to rx entire data.
* Return no error so that free buffers from queue can be used
* to receive partial data.
*/
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 9b5c7d8f2b95..201e45554070 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1014,7 +1014,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
switch (ie_id) {
case ATH6KL_FW_IE_FW_VERSION:
- strlcpy(ar->wiphy->fw_version, data,
+ strscpy(ar->wiphy->fw_version, data,
min(sizeof(ar->wiphy->fw_version), ie_len+1));
ath6kl_dbg(ATH6KL_DBG_BOOT,
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 6b51a2dceadc..8a43c48ec1cf 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1185,7 +1185,7 @@ static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
* Wait for first 4 bytes to be in FIFO
* If CONSERVATIVE_BMI_READ is enabled, also wait for
* a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
+ * response is available in the FIFO
*
* CASE 3: length > 128
* Wait for the first 4 bytes to be in FIFO
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index a3d3740419eb..231a94769ddb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -253,13 +253,10 @@ DECLARE_EVENT_CLASS(ath6kl_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -284,14 +281,11 @@ TRACE_EVENT(ath6kl_log_dbg,
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index b22ed499f7ba..a56fab6232a9 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index aba70f35e574..5220809841a6 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -71,6 +71,7 @@ struct ath6kl_usb {
u8 *diag_cmd_buffer;
u8 *diag_resp_buffer;
struct ath6kl *ar;
+ struct workqueue_struct *wq;
};
/* usb urb object */
@@ -478,7 +479,7 @@ static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
* Flushing any pending I/O may schedule work this call will block
* until all scheduled work runs to completion.
*/
- flush_scheduled_work();
+ flush_workqueue(ar_usb->wq);
}
static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
@@ -544,7 +545,7 @@ static void ath6kl_usb_recv_complete(struct urb *urb)
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
- schedule_work(&pipe->io_complete_work);
+ queue_work(pipe->ar_usb->wq, &pipe->io_complete_work);
cleanup_recv_urb:
ath6kl_usb_cleanup_recv_urb(urb_context);
@@ -579,7 +580,7 @@ static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
- schedule_work(&pipe->io_complete_work);
+ queue_work(pipe->ar_usb->wq, &pipe->io_complete_work);
}
static void ath6kl_usb_io_comp_work(struct work_struct *work)
@@ -619,6 +620,7 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
kfree(ar_usb->diag_cmd_buffer);
kfree(ar_usb->diag_resp_buffer);
+ destroy_workqueue(ar_usb->wq);
kfree(ar_usb);
}
@@ -631,9 +633,15 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
int status = 0;
int i;
+ /* ath6kl_usb_destroy() needs ar_usb != NULL && ar_usb->wq != NULL. */
ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
if (ar_usb == NULL)
- goto fail_ath6kl_usb_create;
+ return NULL;
+ ar_usb->wq = alloc_workqueue("ath6kl_wq", 0, 0);
+ if (!ar_usb->wq) {
+ kfree(ar_usb);
+ return NULL;
+ }
usb_set_intfdata(interface, ar_usb);
spin_lock_init(&(ar_usb->cs_lock));
@@ -1217,6 +1225,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
static const struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9375)},
{USB_DEVICE(0x0cf3, 0x9374)},
+ {USB_DEVICE(0x04da, 0x390d)},
{ /* Terminating entry */ },
};
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index bd1ef6334997..3787b9fb0075 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1750,7 +1750,6 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
{
- u16 ap_info_entry_size;
struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
struct wmi_ap_info_v1 *ap_info_v1;
u8 index;
@@ -1759,14 +1758,12 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->ap_list_ver != APLIST_VER1)
return -EINVAL;
- ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
ath6kl_dbg(ATH6KL_DBG_WMI,
"number of APs in aplist event: %d\n", ev->num_ap);
- if (len < (int) (sizeof(struct wmi_aplist_event) +
- (ev->num_ap - 1) * ap_info_entry_size))
+ if (len < struct_size(ev, ap_list, ev->num_ap))
return -EINVAL;
/* AP list version 1 contents */
@@ -1959,21 +1956,15 @@ static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
struct wmi_start_scan_cmd *sc;
- s8 size;
int i, ret;
- size = sizeof(struct wmi_start_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
@@ -2008,7 +1999,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
- s8 size, *supp_rates;
+ s8 *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
@@ -2023,18 +2014,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
num_chan, ch_list);
}
- size = sizeof(struct wmi_begin_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 784940ba4c90..b4fcfb72991c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -698,7 +698,7 @@ enum auth_mode {
/*
* NB: these values are ordered carefully; there are lots of
- * of implications in any reordering. In particular beware
+ * implications in any reordering. In particular beware
* that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
*/
#define ATH6KL_CIPHER_WEP 0
@@ -863,7 +863,7 @@ struct wmi_begin_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* wmi_start_scan_cmd is to be deprecated. Use
@@ -889,7 +889,7 @@ struct wmi_start_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/*
@@ -1278,7 +1278,7 @@ struct wmi_snr_threshold_params_cmd {
/* "alpha" */
u8 weight;
- /* lowest of uppper */
+ /* lowest of upper */
u8 thresh_above1_val;
u8 thresh_above2_val;
@@ -1373,7 +1373,7 @@ struct wmi_channel_list_reply {
u8 num_ch;
/* channel in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* List of Events (target to host) */
@@ -1545,7 +1545,7 @@ struct wmi_connect_event {
u8 beacon_ie_len;
u8 assoc_req_len;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/* Disconnect Event */
@@ -1596,7 +1596,7 @@ struct wmi_disconnect_event {
u8 disconn_reason;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/*
@@ -1637,7 +1637,7 @@ struct bss_bias {
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[0];
+ struct bss_bias bss_bias[];
} __packed;
struct low_rssi_scan_params {
@@ -1720,7 +1720,7 @@ struct wmi_neighbor_info {
struct wmi_neighbor_report_event {
u8 num_neighbors;
- struct wmi_neighbor_info neighbor[0];
+ struct wmi_neighbor_info neighbor[];
} __packed;
/* TKIP MIC Error Event */
@@ -1957,7 +1957,7 @@ union wmi_ap_info {
struct wmi_aplist_event {
u8 ap_list_ver;
u8 num_ap;
- union wmi_ap_info ap_list[1];
+ union wmi_ap_info ap_list[];
} __packed;
/* Developer Commands */
@@ -2051,7 +2051,7 @@ struct wmi_get_keepalive_cmd {
struct wmi_set_appie_cmd {
u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_set_ie_cmd {
@@ -2059,7 +2059,7 @@ struct wmi_set_ie_cmd {
u8 ie_field; /* enum wmi_ie_field_type */
u8 ie_len;
u8 reserved;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* Notify the WSC registration status to the target */
@@ -2127,7 +2127,7 @@ struct wmi_add_wow_pattern_cmd {
u8 filter_list_id;
u8 filter_size;
u8 filter_offset;
- u8 filter[0];
+ u8 filter[];
} __packed;
struct wmi_del_wow_pattern_cmd {
@@ -2360,7 +2360,7 @@ struct wmi_send_action_cmd {
__le32 freq;
__le32 wait;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_send_mgmt_cmd {
@@ -2369,7 +2369,7 @@ struct wmi_send_mgmt_cmd {
__le32 wait;
__le32 no_cck;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_tx_status_event {
@@ -2389,7 +2389,7 @@ struct wmi_set_appie_extended_cmd {
u8 role_id;
u8 mgmt_frm_type;
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_remain_on_chnl_event {
@@ -2406,18 +2406,18 @@ struct wmi_cancel_remain_on_chnl_event {
struct wmi_rx_action_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities_event {
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_rx_probe_req_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
@@ -2431,7 +2431,7 @@ struct wmi_get_p2p_info {
struct wmi_p2p_info_event {
__le32 info_req_flags;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities {
@@ -2450,7 +2450,7 @@ struct wmi_p2p_probe_response_cmd {
__le32 freq;
u8 destination_addr[ETH_ALEN];
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/* Extended WMI (WMIX)
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index eff94bcd1f0a..9bdfcee2f448 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -45,6 +45,11 @@ ath9k_hw-y:= \
ar9003_eeprom.o \
ar9003_paprd.o
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_mac.o += -Wno-array-bounds
+endif
+
ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index cdefb8e2daf1..9cd12b20b18d 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -98,13 +98,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
- }
-
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ath9k_fill_chanctx_ops();
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index fba5a847c3bb..a8c0e8e2d78c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -301,10 +301,11 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3));
- WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
+ WRITE_ONCE(ads->ds_ctl7,
+ set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
+ | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
+ | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
+ | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1));
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index fcfed8e59d29..ebdb97999335 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -498,7 +498,7 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
else
REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit);
- /* on AR92xx, the highest bit of count will make the the chip send
+ /* on AR92xx, the highest bit of count will make the chip send
* spectral samples endlessly. Check if this really was intended,
* and fix otherwise.
*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index dc24da1ff00b..6ca089f15629 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -177,7 +177,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
int i;
/* Accumulate IQ cal measures for active chains */
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) {
ah->totalPowerMeasI[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b0a4ca3559fd..16bfcd0a1f6e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3911,7 +3911,7 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
}
/* Test value. if 0 then attenuation is unused. Don't load anything. */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) {
value = ar9003_hw_atten_chain_get(ah, i, chan);
REG_RMW_FIELD(ah, ext_atten_reg[i],
@@ -4747,7 +4747,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
}
static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
- int mode,
+ bool is2ghz,
int ipier,
int ichain,
int *pfrequency,
@@ -4757,7 +4757,6 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
{
u8 *pCalPier;
struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
- int is2GHz;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
struct ath_common *common = ath9k_hw_common(ah);
@@ -4768,17 +4767,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
return -1;
}
- if (mode) { /* 5GHz */
- if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_dbg(common, EEPROM,
- "Invalid 5GHz cal pier index, must be less than %d\n",
- AR9300_NUM_5G_CAL_PIERS);
- return -1;
- }
- pCalPier = &(eep->calFreqPier5G[ipier]);
- pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
- is2GHz = 0;
- } else {
+ if (is2ghz) {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n",
@@ -4788,10 +4777,18 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
pCalPier = &(eep->calFreqPier2G[ipier]);
pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
- is2GHz = 1;
+ } else {
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_dbg(common, EEPROM,
+ "Invalid 5GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
}
- *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
+ *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2ghz);
*pcorrection = pCalPierStruct->refPower;
*ptemperature = pCalPierStruct->tempMeas;
*pvoltage = pCalPierStruct->voltMeas;
@@ -4960,7 +4957,6 @@ tempslope:
static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
{
int ichain, ipier, npier;
- int mode;
int lfrequency[AR9300_MAX_CHAINS],
lcorrection[AR9300_MAX_CHAINS],
ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
@@ -4976,12 +4972,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
int pfrequency, pcorrection, ptemperature, pvoltage,
pnf_cal, pnf_pwr;
struct ath_common *common = ath9k_hw_common(ah);
+ bool is2ghz = frequency < 4000;
- mode = (frequency >= 4000);
- if (mode)
- npier = AR9300_NUM_5G_CAL_PIERS;
- else
+ if (is2ghz)
npier = AR9300_NUM_2G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_5G_CAL_PIERS;
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
lfrequency[ichain] = 0;
@@ -4990,7 +4986,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* identify best lower and higher frequency calibration measurement */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
for (ipier = 0; ipier < npier; ipier++) {
- if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ if (!ar9003_hw_cal_pier_get(ah, is2ghz, ipier, ichain,
&pfrequency, &pcorrection,
&ptemperature, &pvoltage,
&pnf_cal, &pnf_pwr)) {
@@ -5126,13 +5122,13 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
frequency, correction[0], correction[1], correction[2]);
/* Store calibrated noise floor values */
- for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++)
- if (mode) {
- ah->nf_5g.cal[ichain] = nf_cal[ichain];
- ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
- } else {
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++)
+ if (is2ghz) {
ah->nf_2g.cal[ichain] = nf_cal[ichain];
ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
+ } else {
+ ah->nf_5g.cal[ichain] = nf_cal[ichain];
+ ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
}
return 0;
@@ -5449,8 +5445,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize];
u8 target_power_val_t2_eep[ar9300RateSize];
u8 targetPowerValT2_tpc[ar9300RateSize];
@@ -5465,17 +5459,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
if (ar9003_is_paprd_enabled(ah)) {
- if (IS_CHAN_2GHZ(chan))
- modal_hdr = &eep->modalHeader2G;
- else
- modal_hdr = &eep->modalHeader5G;
-
ah->paprd_ratemask =
- le32_to_cpu(modal_hdr->papdRateMaskHt20) &
+ ar9003_get_paprd_rate_mask_ht20(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK;
ah->paprd_ratemask_ht40 =
- le32_to_cpu(modal_hdr->papdRateMaskHt40) &
+ ar9003_get_paprd_rate_mask_ht40(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK;
paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
@@ -5592,30 +5581,40 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
return ar9003_modal_header(ah, is2ghz)->spurChans;
}
+u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt20);
+}
+
+u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt40);
+}
+
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ bool is2ghz = IS_CHAN_2GHZ(chan);
- if (IS_CHAN_2GHZ(chan))
- return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20),
+ if (is2ghz)
+ return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
else {
if (chan->channel >= 5700)
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
+ return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
else if (chan->channel >= 5400)
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_2);
else
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_1);
}
}
static u8 ar9003_get_eepmisc(struct ath_hw *ah)
{
- return ah->eeprom.map4k.baseEepHeader.eepMisc;
+ return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
}
const struct eeprom_ops eep_ar9300_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index e8fda54acfe3..f8ae20318302 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -363,6 +363,8 @@ u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
+u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5184a0aacfe2..ff8ab58e67d9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -144,10 +144,11 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3));
- WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
+ WRITE_ONCE(ads->ctl18,
+ set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
+ | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
+ | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
+ | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ctl19, AR_Not_Sounding);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 34e100940284..b2d53b6c0ffd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -21,7 +21,7 @@
void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{
struct ath9k_channel *chan = ah->curchan;
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ bool is2ghz = IS_CHAN_2GHZ(chan);
/*
* 3 bits for modalHeader5G.papdRateMaskHt20
@@ -36,17 +36,17 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
* -- disable PAPRD for lower band 5GHz
*/
- if (IS_CHAN_5GHZ(chan)) {
+ if (!is2ghz) {
if (chan->channel >= UPPER_5G_SUB_BAND_START) {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(30))
val = false;
} else if (chan->channel >= MID_5G_SUB_BAND_START) {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(29))
val = false;
} else {
- if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(28))
val = false;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index daf30f9946b4..090ff0600c81 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -523,21 +523,10 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
int synth_freq;
int range = 10;
int freq_offset = 0;
- int mode;
- u8* spurChansPtr;
+ u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
unsigned int i;
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-
- if (IS_CHAN_5GHZ(chan)) {
- spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
- mode = 0;
- }
- else {
- spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
- mode = 1;
- }
- if (spurChansPtr[0] == 0)
+ if (spur_fbin_ptr[0] == 0)
return; /* No spur in the mode */
if (IS_CHAN_HT40(chan)) {
@@ -554,16 +543,18 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
ar9003_hw_spur_ofdm_clear(ah);
- for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
- freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS && spur_fbin_ptr[i]; i++) {
+ freq_offset = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
+ IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq;
if (abs(freq_offset) < range) {
ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
range, synth_freq);
if (AR_SREV_9565(ah) && (i < 4)) {
- freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
- mode);
+ freq_offset =
+ ath9k_hw_fbin2freq(spur_fbin_ptr[i + 1],
+ IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq;
if (abs(freq_offset) < range)
ar9003_hw_spur_ofdm_9565(ah, freq_offset);
@@ -1753,7 +1744,7 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
- /* on AR93xx and newer, count = 0 will make the the chip send
+ /* on AR93xx and newer, count = 0 will make the chip send
* spectral samples endlessly. Check if this really was intended,
* and fix otherwise.
*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index a171dbb29fbb..ad949eb02f3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -720,7 +720,7 @@
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
-#define AR_CH0_TOP2_XPABIASLVL_S 12
+#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef6f5ea06c1f..3ccf8cfc6b63 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -1071,8 +1071,9 @@ struct ath_softc {
#endif
#ifdef CONFIG_ATH9K_HWRNG
+ struct hwrng rng_ops;
u32 rng_last;
- struct task_struct *rng_task;
+ char rng_name[sizeof("ath9k_65535")];
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 72e2e71aac0e..ee72faac2f1d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -135,7 +135,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
bf->bf_mpdu = NULL;
}
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (skb == NULL)
return NULL;
@@ -362,7 +362,7 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc,
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- if (!vif || !vif->csa_active)
+ if (!vif || !vif->bss_conf.csa_active)
return false;
if (!ieee80211_beacon_cntdwn_is_complete(vif))
@@ -585,8 +585,9 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
static void ath9k_cache_beacon_config(struct ath_softc *sc,
struct ath_chanctx *ctx,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_vif *vif)
{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &ctx->beacon;
@@ -596,7 +597,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->dtim_count = 1;
- cur_conf->ibss_creator = bss_conf->ibss_creator;
+ cur_conf->ibss_creator = vif->cfg.ibss_creator;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -649,7 +650,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
cur_conf->enable_beacon = beacons;
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
- ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
+ ath9k_cache_beacon_config(sc, ctx, main_vif);
ath9k_set_beacon(sc);
set_bit(ATH_OP_BEACONS, &common->op_flags);
@@ -657,7 +658,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
}
/* Update the beacon configuration. */
- ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
+ ath9k_cache_beacon_config(sc, ctx, main_vif);
/*
* Configure the HW beacon registers only when we have a valid
@@ -670,7 +671,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
* IBSS interface.
*/
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
- !enabled && beacons && !main_vif->bss_conf.ibss_creator) {
+ !enabled && beacons && !main_vif->cfg.ibss_creator) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 6cf087522157..571062f2e82a 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif, false);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, -1, false);
if (!skb)
return false;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d95cabddce33..1e2a30019fb6 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -36,7 +36,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- if (!an->sta->ht_cap.ht_supported) {
+ if (!an->sta->deflink.ht_cap.ht_supported) {
len = scnprintf(buf, size, "%s\n",
"HT not supported");
goto exit;
@@ -186,7 +186,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
band = ah->curchan->chan->band;
rstats = &an->rx_rate_stats;
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
goto legacy;
len += scnprintf(buf + len, size - len,
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index acb9602aa464..11349218bc21 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -246,7 +246,7 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
DFS_STAT_INC(sc, dc_phy_errors);
/* when both are present use stronger one */
- rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
+ rssi = max(ard->rssi, ard->ext_rssi);
break;
default:
/*
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e6b3cd49ea18..efb7889142d4 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -670,8 +670,6 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
- int status;
-
if (AR_SREV_9300_20_OR_LATER(ah))
ah->eep_ops = &eep_ar9300_ops;
else if (AR_SREV_9287(ah)) {
@@ -685,7 +683,5 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
if (!ah->eep_ops->fill_eeprom(ah))
return -EIO;
- status = ah->eep_ops->check_eeprom(ah);
-
- return status;
+ return ah->eep_ops->check_eeprom(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f06eec99de68..4d9002a9d082 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -244,11 +244,11 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, txok);
if (txok) {
- TX_STAT_INC(skb_success);
- TX_STAT_ADD(skb_success_bytes, ln);
+ TX_STAT_INC(hif_dev, skb_success);
+ TX_STAT_ADD(hif_dev, skb_success_bytes, ln);
}
else
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -302,7 +302,7 @@ static void hif_usb_tx_cb(struct urb *urb)
hif_dev->tx.tx_buf_cnt++;
if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
__hif_usb_tx(hif_dev); /* Check for pending SKBs */
- TX_STAT_INC(buf_completed);
+ TX_STAT_INC(hif_dev, buf_completed);
spin_unlock(&hif_dev->tx.tx_lock);
}
@@ -353,7 +353,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
tx_buf->len += tx_buf->offset;
__skb_queue_tail(&tx_buf->skb_queue, nskb);
- TX_STAT_INC(skb_queued);
+ TX_STAT_INC(hif_dev, skb_queued);
}
usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
@@ -368,11 +368,10 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
__skb_queue_head_init(&tx_buf->skb_queue);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
+ } else {
+ TX_STAT_INC(hif_dev, buf_queued);
}
- if (!ret)
- TX_STAT_INC(buf_queued);
-
return ret;
}
@@ -515,7 +514,7 @@ static void hif_usb_sta_drain(void *hif_handle, u8 idx)
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, false);
hif_dev->tx.tx_skb_cnt--;
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -586,14 +585,14 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
pkt_tag = get_unaligned_le16(ptr + index + 2);
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
- RX_STAT_INC(skb_dropped);
+ RX_STAT_INC(hif_dev, skb_dropped);
return;
}
if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
- RX_STAT_INC(skb_dropped);
+ RX_STAT_INC(hif_dev, skb_dropped);
return;
}
@@ -619,7 +618,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
goto err;
}
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]),
hif_dev->rx_transfer_len);
@@ -640,7 +639,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
goto err;
}
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
skb_put(nskb, pkt_len);
@@ -650,10 +649,10 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
err:
for (i = 0; i < pool_index; i++) {
- RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
+ RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len);
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
- RX_STAT_INC(skb_completed);
+ RX_STAT_INC(hif_dev, skb_completed);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 6b45e63fae4b..30f0765fb9fd 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -327,14 +327,18 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
}
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
-
-#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
-#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
-#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
-
-#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+#define __STAT_SAFE(hif_dev, expr) ((hif_dev)->htc_handle->drv_priv ? (expr) : 0)
+#define CAB_STAT_INC(priv) ((priv)->debug.tx_stats.cab_queued++)
+#define TX_QSTAT_INC(priv, q) ((priv)->debug.tx_stats.queue_stats[q]++)
+
+#define TX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c += a)
+#define RX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c += a)
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs);
@@ -374,13 +378,13 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data);
#else
-#define TX_STAT_INC(c) do { } while (0)
-#define TX_STAT_ADD(c, a) do { } while (0)
-#define RX_STAT_INC(c) do { } while (0)
-#define RX_STAT_ADD(c, a) do { } while (0)
-#define CAB_STAT_INC do { } while (0)
+#define TX_STAT_INC(hif_dev, c)
+#define TX_STAT_ADD(hif_dev, c, a)
+#define RX_STAT_INC(hif_dev, c)
+#define RX_STAT_ADD(hif_dev, c, a)
-#define TX_QSTAT_INC(c) do { } while (0)
+#define CAB_STAT_INC(priv)
+#define TX_QSTAT_INC(priv, c)
static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index c745897aa3d6..533471e69400 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -215,7 +215,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
}
/* Get a new beacon */
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon) {
spin_unlock_bh(&priv->beacon_lock);
return;
@@ -511,7 +511,7 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
struct ieee80211_vif *vif;
vif = priv->csa_vif;
- if (!vif || !vif->csa_active)
+ if (!vif || !vif->bss_conf.csa_active)
return false;
if (!ieee80211_beacon_cntdwn_is_complete(vif))
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index ff61ae34ecdf..07ac88fb1c57 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -944,7 +944,6 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
priv->hw = hw;
priv->htc = htc_handle;
priv->dev = dev;
- htc_handle->drv_priv = priv;
SET_IEEE80211_DEV(hw, priv->dev);
ret = ath9k_htc_wait_for_target(priv);
@@ -965,6 +964,8 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
if (ret)
goto err_init;
+ htc_handle->drv_priv = priv;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 72ef319feeda..61875c45366b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -100,7 +100,7 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
priv->rearm_ani = true;
}
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
priv->rearm_ani = true;
priv->reconfig_beacon = true;
}
@@ -491,7 +491,7 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
ista->index = sta_idx;
tsta.is_vif_sta = 0;
maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
+ sta->deflink.ht_cap.ampdu_factor);
tsta.maxampdu = cpu_to_be16(maxampdu);
} else {
memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
@@ -602,7 +602,7 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
for (i = 0, j = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
+ if (sta->deflink.supp_rates[sband->band] & BIT(i)) {
trate->rates.legacy_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
@@ -610,9 +610,9 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
}
trate->rates.legacy_rates.rs_nrates = j;
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ if (sta->deflink.ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
trate->rates.ht_rates.rs_rates[j++] = i;
if (j == ATH_HTC_RATE_MAX)
break;
@@ -620,18 +620,18 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
caps |= ATH_RC_TX_STBC_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1])
+ if (sta->deflink.ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- (conf_is_ht40(&priv->hw->conf)))
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
+ (conf_is_ht40(&priv->hw->conf)))
caps |= WLAN_RC_40_FLAG;
if (conf_is_ht40(&priv->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
caps |= WLAN_RC_SGI_FLAG;
else if (conf_is_ht20(&priv->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
caps |= WLAN_RC_SGI_FLAG;
}
@@ -1369,7 +1369,8 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1488,8 +1489,8 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
- common->curaid = bss_conf->aid;
+ if ((vif->type == NL80211_IFTYPE_STATION) && vif->cfg.assoc) {
+ common->curaid = vif->cfg.aid;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
@@ -1509,7 +1510,7 @@ static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
@@ -1521,17 +1522,17 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
- bss_conf->assoc);
+ vif->cfg.assoc);
- bss_conf->assoc ?
+ vif->cfg.assoc ?
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
- if (!bss_conf->assoc)
+ if (!vif->cfg.assoc)
clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_htc_choose_set_bssid(priv);
- if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
+ if (vif->cfg.assoc && (priv->num_sta_assoc_vif == 1))
ath9k_htc_start_ani(priv);
else if (priv->num_sta_assoc_vif == 0)
ath9k_htc_stop_ani(priv);
@@ -1540,7 +1541,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_IBSS) {
if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
- common->curaid = bss_conf->aid;
+ common->curaid = vif->cfg.aid;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
ath9k_htc_set_bssid(priv);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6a850a0bfa8a..672789e3c55d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
switch (qnum) {
case 0:
- TX_QSTAT_INC(IEEE80211_AC_VO);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VO);
epid = priv->data_vo_ep;
break;
case 1:
- TX_QSTAT_INC(IEEE80211_AC_VI);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VI);
epid = priv->data_vi_ep;
break;
case 2:
- TX_QSTAT_INC(IEEE80211_AC_BE);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BE);
epid = priv->data_be_ep;
break;
case 3:
default:
- TX_QSTAT_INC(IEEE80211_AC_BK);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BK);
epid = priv->data_bk_ep;
break;
}
@@ -328,7 +328,7 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
if (is_cab) {
- CAB_STAT_INC;
+ CAB_STAT_INC(priv);
tx_ctl->epid = priv->cab_ep;
return;
}
@@ -1016,6 +1016,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
goto rx_next;
}
+ if (rxstatus->rs_keyix >= ATH_KEYMAX &&
+ rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) {
+ ath_dbg(common, ANY,
+ "Invalid keyix, dropping (keyix: %d)\n",
+ rxstatus->rs_keyix);
+ goto rx_next;
+ }
+
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 510e61e97dbc..ca05b07a45e6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
+ memset(hdr->control, 0, sizeof(hdr->control));
status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
@@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target,
conn_msg->dl_pipeid = endpoint->dl_pipeid;
conn_msg->ul_pipeid = endpoint->ul_pipeid;
+ /* To prevent infoleak */
+ conn_msg->svc_meta_len = 0;
+ conn_msg->pad = 0;
+
ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
if (ret)
goto err;
@@ -359,33 +364,27 @@ ret:
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 len)
{
uint32_t *pattern = (uint32_t *)skb->data;
- switch (*pattern) {
- case 0x33221199:
- {
+ if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) {
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
- break;
- }
- case 0x33221299:
- {
+ return;
+ }
+ if (*pattern == 0x33221299) {
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
- break;
- }
- default:
- dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
- break;
+ return;
}
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
}
/*
@@ -406,16 +405,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (!htc_handle || !skb)
return;
+ /* A valid message requires len >= 8.
+ *
+ * sizeof(struct htc_frame_hdr) == 8
+ * sizeof(struct htc_ready_msg) == 8
+ * sizeof(struct htc_panic_bad_vaddr) == 16
+ * sizeof(struct htc_panic_bad_epid) == 8
+ */
+ if (unlikely(len < sizeof(struct htc_frame_hdr)))
+ goto invalid;
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
- ath9k_htc_fw_panic_report(htc_handle, skb);
+ ath9k_htc_fw_panic_report(htc_handle, skb, len);
kfree_skb(skb);
return;
}
if (epid < 0 || epid >= ENDPOINT_MAX) {
+invalid:
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
@@ -427,21 +436,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
/* Handle trailer */
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
- if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) {
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ len -= 4;
+ }
}
/* Get the message ID */
+ if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16)))
+ goto invalid;
msg_id = (__be16 *) ((void *) htc_hdr +
sizeof(struct htc_frame_hdr));
/* Now process HTC messages */
switch (be16_to_cpu(*msg_id)) {
case HTC_MSG_READY_ID:
+ if (unlikely(len < sizeof(struct htc_ready_msg)))
+ goto invalid;
htc_process_target_rdy(htc_handle, htc_hdr);
break;
case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ if (unlikely(len < sizeof(struct htc_frame_hdr) +
+ sizeof(struct htc_conn_svc_rspmsg)))
+ goto invalid;
htc_process_conn_rsp(htc_handle, htc_hdr);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 096a206f49ed..450ab19b1d4e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -710,7 +710,7 @@ struct ath_spec_scan {
/**
* struct ath_hw_ops - callbacks used by hardware code and driver code
*
- * This structure contains callbacks designed to to be used internally by
+ * This structure contains callbacks designed to be used internally by
* hardware code and also by the lower level driver.
*
* @config_pci_powersave:
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index fd6aa49adadf..af44b33814dd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -35,8 +35,10 @@
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
- AR_STBC##_index : 0) \
- |SM((_series)[_index].ChSel, AR_ChainSel##_index))
+ AR_STBC##_index : 0))
+
+#define set11nChainSel(_series, _index) \
+ (SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
#define CCK_PREAMBLE_BITS 144
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 98090e40e1cf..a4197c14f0a9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}
@@ -1712,7 +1712,8 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
}
static int ath9k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath_softc *sc = hw->priv;
@@ -1863,7 +1864,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
#define CHECK_ANI \
(BSS_CHANGED_ASSOC | \
@@ -1881,11 +1882,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
- bss_conf->bssid, bss_conf->assoc);
+ bss_conf->bssid, vif->cfg.assoc);
memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
- avp->aid = bss_conf->aid;
- avp->assoc = bss_conf->assoc;
+ avp->aid = vif->cfg.aid;
+ avp->assoc = vif->cfg.assoc;
ath9k_calculate_summary_state(sc, avp->chanctx);
}
@@ -1893,7 +1894,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_IBSS) ||
(changed & BSS_CHANGED_OCB)) {
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = bss_conf->aid;
+ common->curaid = vif->cfg.aid;
ath9k_hw_write_associd(sc->sc_ah);
}
@@ -2048,7 +2049,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
atid = ath_node_to_tid(an, tid);
atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
- sta->ht_cap.ampdu_factor;
+ sta->deflink.ht_cap.ampdu_factor;
break;
default:
ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
@@ -2596,6 +2597,7 @@ static void ath9k_change_chanctx(struct ieee80211_hw *hw,
static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct ath_softc *sc = hw->priv;
@@ -2627,6 +2629,7 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct ath_softc *sc = hw->priv;
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 39d46c203f6b..039bf0c35fbe 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,7 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
- u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
+ static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 653e79611830..8983ea6fc727 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -834,8 +834,8 @@
((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
-#define AR_SREV_9100(ah) \
- ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100)
+#define AR_SREV_9100(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9100))
#define AR_SREV_9100_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
@@ -891,7 +891,7 @@
#define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
#define AR_SREV_9300_22(_ah) \
- (AR_SREV_9300(ah) && \
+ (AR_SREV_9300((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
#define AR_SREV_9330(_ah) \
@@ -994,8 +994,8 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
#define AR_SREV_SOC(_ah) \
- (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
- AR_SREV_9561(ah))
+ (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(_ah) || \
+ AR_SREV_9561(_ah))
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index f9d3d6eedd3c..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -21,11 +21,6 @@
#include "hw.h"
#include "ar9003_phy.h"
-#define ATH9K_RNG_BUF_SIZE 320
-#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
-
-static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
-
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
@@ -71,61 +66,57 @@ static u32 ath9k_rng_delay_get(u32 fail_stats)
return delay;
}
-static int ath9k_rng_kthread(void *data)
+static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
- int bytes_read;
- struct ath_softc *sc = data;
- u32 *rng_buf;
- u32 delay, fail_stats = 0;
-
- rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
- if (!rng_buf)
- goto out;
-
- while (!kthread_should_stop()) {
- bytes_read = ath9k_rng_data_read(sc, rng_buf,
- ATH9K_RNG_BUF_SIZE);
- if (unlikely(!bytes_read)) {
- delay = ath9k_rng_delay_get(++fail_stats);
- wait_event_interruptible_timeout(rng_queue,
- kthread_should_stop(),
- msecs_to_jiffies(delay));
- continue;
+ struct ath_softc *sc = container_of(rng, struct ath_softc, rng_ops);
+ u32 fail_stats = 0, word;
+ int bytes_read = 0;
+
+ for (;;) {
+ if (max & ~3UL)
+ bytes_read = ath9k_rng_data_read(sc, buf, max >> 2);
+ if ((max & 3UL) && ath9k_rng_data_read(sc, &word, 1)) {
+ memcpy(buf + bytes_read, &word, max & 3UL);
+ bytes_read += max & 3UL;
+ memzero_explicit(&word, sizeof(word));
}
+ if (!wait || !max || likely(bytes_read) || fail_stats > 110)
+ break;
- fail_stats = 0;
-
- /* sleep until entropy bits under write_wakeup_threshold */
- add_hwgenerator_randomness((void *)rng_buf, bytes_read,
- ATH9K_RNG_ENTROPY(bytes_read));
+ if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+ break;
}
- kfree(rng_buf);
-out:
- sc->rng_task = NULL;
-
- return 0;
+ if (wait && !bytes_read && max)
+ bytes_read = -EIO;
+ return bytes_read;
}
void ath9k_rng_start(struct ath_softc *sc)
{
+ static atomic_t serial = ATOMIC_INIT(0);
struct ath_hw *ah = sc->sc_ah;
- if (sc->rng_task)
+ if (sc->rng_ops.read)
return;
if (!AR_SREV_9300_20_OR_LATER(ah))
return;
- sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
- if (IS_ERR(sc->rng_task))
- sc->rng_task = NULL;
+ snprintf(sc->rng_name, sizeof(sc->rng_name), "ath9k_%u",
+ (atomic_inc_return(&serial) - 1) & U16_MAX);
+ sc->rng_ops.name = sc->rng_name;
+ sc->rng_ops.read = ath9k_rng_read;
+ sc->rng_ops.quality = 320;
+
+ if (devm_hwrng_register(sc->dev, &sc->rng_ops))
+ sc->rng_ops.read = NULL;
}
void ath9k_rng_stop(struct ath_softc *sc)
{
- if (sc->rng_task) {
- kthread_stop(sc->rng_task);
- sc->rng_task = NULL;
+ if (sc->rng_ops.read) {
+ devm_hwrng_unregister(sc->dev, &sc->rng_ops);
+ sc->rng_ops.read = NULL;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d0caf1de2bde..ba271a10d4ab 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
- sizeof(tx_info->rate_driver_data));
- return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+ sizeof(tx_info->status.status_driver_data));
+ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -1271,7 +1271,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
int phy;
if (!rates[i].count || (rates[i].idx < 0))
- continue;
+ break;
rix = rates[i].idx;
info->rates[i].Tries = rates[i].count;
@@ -1574,10 +1574,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
*/
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor)) - 1;
- density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ sta->deflink.ht_cap.ampdu_factor)) - 1;
+ density = ath9k_parse_mpdudensity(sta->deflink.ht_cap.ampdu_density);
an->mpdudensity = density;
}
@@ -2160,7 +2160,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = an->ps_key;
else
fi->keyix = ATH9K_TXKEYIX_INVALID;
- fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
+ fi->dyn_smps = sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC;
fi->keytype = keytype;
fi->framelen = framelen;
fi->tx_power = txpower;
@@ -2542,6 +2542,16 @@ skip_tx_complete:
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+ void *ptr = &tx_info->status;
+
+ memset(ptr + sizeof(tx_info->status.rates), 0,
+ sizeof(tx_info->status) -
+ sizeof(tx_info->status.rates) -
+ sizeof(tx_info->status.status_driver_data));
+}
+
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
+ ath_clear_tx_status(tx_info);
+
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
-
- for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
- tx_info->status.rates[i].count = 0;
- tx_info->status.rates[i].idx = -1;
- }
-
- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
- /* we report airtime in ath_tx_count_airtime(), don't report twice */
- tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
index 1a81868ce26d..7463baa62fa8 100644
--- a/drivers/net/wireless/ath/carl9170/Makefile
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -3,3 +3,8 @@ carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
obj-$(CONFIG_CARL9170) += carl9170.o
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_cmd.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 84a8ce0784b1..ba29b4aebe9f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -458,7 +458,6 @@ struct ar9170 {
# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
struct {
struct hwrng rng;
- bool initialized;
char name[30 + 1];
u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
unsigned int cache_idx;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 1ab09e1c9ec5..4c1aecd1163c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -105,7 +105,7 @@ static void carl9170_fw_info(struct ar9170 *ar)
CARL9170FW_GET_MONTH(fw_date),
CARL9170FW_GET_DAY(fw_date));
- strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
+ strscpy(ar->hw->wiphy->fw_version, motd_desc->release,
sizeof(ar->hw->wiphy->fw_version));
}
}
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 503b21abbba5..10acb6ad30d0 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -149,7 +149,7 @@ struct carl9170fw_fix_entry {
struct carl9170fw_fix_desc {
struct carl9170fw_desc_head head;
- struct carl9170fw_fix_entry data[0];
+ struct carl9170fw_fix_entry data[];
} __packed;
#define CARL9170FW_FIX_DESC_SIZE \
(sizeof(struct carl9170fw_fix_desc))
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 49f7ee1c912b..1540e9827f48 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1032,7 +1032,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1115,7 +1115,7 @@ static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- ar->common.curaid = bss_conf->aid;
+ ar->common.curaid = vif->cfg.aid;
err = carl9170_set_beacon_timers(ar);
if (err)
goto out;
@@ -1306,8 +1306,8 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
atomic_set(&sta_info->pending_frames, 0);
- if (sta->ht_cap.ht_supported) {
- if (sta->ht_cap.ampdu_density > 6) {
+ if (sta->deflink.ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ampdu_density > 6) {
/*
* HW does support 16us AMPDU density.
* No HT-Xmit for station.
@@ -1319,7 +1319,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
RCU_INIT_POINTER(sta_info->agg[i], NULL);
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
+ sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor);
sta_info->ht_sta = true;
}
@@ -1335,7 +1335,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
unsigned int i;
bool cleanup = false;
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
sta_info->ht_sta = false;
@@ -1365,7 +1365,8 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
}
static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct ar9170 *ar = hw->priv;
@@ -1412,7 +1413,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!tid_info)
return -ENOMEM;
@@ -1494,7 +1495,7 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
if (!(ar->features & CARL9170_WPS_BUTTON))
return 0;
- input = input_allocate_device();
+ input = devm_input_allocate_device(&ar->udev->dev);
if (!input)
return -ENOMEM;
@@ -1512,10 +1513,8 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
err = input_register_device(input);
- if (err) {
- input_free_device(input);
+ if (err)
return err;
- }
ar->wps.pbc = input;
return 0;
@@ -1539,7 +1538,7 @@ static int carl9170_rng_get(struct ar9170 *ar)
BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
- if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+ if (!IS_ACCEPTING_CMD(ar))
return -EAGAIN;
count = ARRAY_SIZE(ar->rng.cache);
@@ -1585,14 +1584,6 @@ static int carl9170_rng_read(struct hwrng *rng, u32 *data)
return sizeof(u16);
}
-static void carl9170_unregister_hwrng(struct ar9170 *ar)
-{
- if (ar->rng.initialized) {
- hwrng_unregister(&ar->rng.rng);
- ar->rng.initialized = false;
- }
-}
-
static int carl9170_register_hwrng(struct ar9170 *ar)
{
int err;
@@ -1603,25 +1594,14 @@ static int carl9170_register_hwrng(struct ar9170 *ar)
ar->rng.rng.data_read = carl9170_rng_read;
ar->rng.rng.priv = (unsigned long)ar;
- if (WARN_ON(ar->rng.initialized))
- return -EALREADY;
-
- err = hwrng_register(&ar->rng.rng);
+ err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
if (err) {
dev_err(&ar->udev->dev, "Failed to register the random "
"number generator (%d)\n", err);
return err;
}
- ar->rng.initialized = true;
-
- err = carl9170_rng_get(ar);
- if (err) {
- carl9170_unregister_hwrng(ar);
- return err;
- }
-
- return 0;
+ return carl9170_rng_get(ar);
}
#endif /* CONFIG_CARL9170_HWRNG */
@@ -1914,7 +1894,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
WARN_ON(!(tx_streams >= 1 && tx_streams <=
IEEE80211_HT_MCS_TX_MAX_STREAMS));
- tx_params = (tx_streams - 1) <<
+ tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
@@ -1937,7 +1917,8 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
if (!bands)
return -EINVAL;
- ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
+ ar->survey = devm_kcalloc(&ar->udev->dev, chans,
+ sizeof(struct survey_info), GFP_KERNEL);
if (!ar->survey)
return -ENOMEM;
ar->num_channels = chans;
@@ -1964,11 +1945,7 @@ int carl9170_register(struct ar9170 *ar)
struct ath_regulatory *regulatory = &ar->common.regulatory;
int err = 0, i;
- if (WARN_ON(ar->mem_bitmap))
- return -EINVAL;
-
- ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
-
+ ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@@ -2057,17 +2034,6 @@ void carl9170_unregister(struct ar9170 *ar)
carl9170_debugfs_unregister(ar);
#endif /* CONFIG_CARL9170_DEBUGFS */
-#ifdef CONFIG_CARL9170_WPC
- if (ar->wps.pbc) {
- input_unregister_device(ar->wps.pbc);
- ar->wps.pbc = NULL;
- }
-#endif /* CONFIG_CARL9170_WPC */
-
-#ifdef CONFIG_CARL9170_HWRNG
- carl9170_unregister_hwrng(ar);
-#endif /* CONFIG_CARL9170_HWRNG */
-
carl9170_cancel_worker(ar);
cancel_work_sync(&ar->restart_work);
@@ -2082,12 +2048,6 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
- bitmap_free(ar->mem_bitmap);
- ar->mem_bitmap = NULL;
-
- kfree(ar->survey);
- ar->survey = NULL;
-
mutex_destroy(&ar->mutex);
ieee80211_free_hw(ar->hw);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 1b76f4434c06..6bb9aa2bfe65 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1044,8 +1044,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
if (unlikely(!sta || !cvif))
goto err_out;
- factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
- density = sta->ht_cap.ampdu_density;
+ factor = min_t(unsigned int, 1u,
+ sta->deflink.ht_cap.ampdu_factor);
+ density = sta->deflink.ht_cap.ampdu_density;
if (density) {
/*
@@ -1558,6 +1559,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
goto out;
}
} while (ar->beacon_enabled && i--);
+
+ /* no entry found in list */
+ return NULL;
}
out:
@@ -1624,7 +1628,7 @@ int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
goto out_unlock;
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
- NULL, NULL);
+ NULL, NULL, 0);
if (!skb) {
err = -ENOMEM;
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index bb73553fd7c2..0a4e42e806b9 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -327,7 +327,7 @@ struct _carl9170_tx_superdesc {
struct _carl9170_tx_superframe {
struct _carl9170_tx_superdesc s;
struct _ar9170_tx_hwdesc f;
- u8 frame_data[0];
+ u8 frame_data[];
} __packed __aligned(4);
#define CARL9170_TX_SUPERDESC_LEN 24
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 75cb53a3ec15..27f4d74a41c8 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -197,7 +197,7 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd,
static struct channel_detector *
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
- u32 sz, i;
+ u32 i;
struct channel_detector *cd;
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
@@ -206,8 +206,8 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
- sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ cd->detectors = kmalloc_array(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index b53ebb3ac9a2..85955572a705 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -48,7 +48,7 @@
* the MAC address to obtain the relevant bits and compare the result with
* (frame's BSSID & mask) to see if they match.
*
- * Simple example: on your card you have have two BSSes you have created with
+ * Simple example: on your card you have two BSSes you have created with
* BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
* There is another BSSID-03 but you are not part of it. For simplicity's sake,
* assuming only 4 bits for a mac address and for BSSIDs you can then have:
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index b2400e2417a5..f15e7bd690b5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 or 0x0 set, this is not a supported regulatory
- * domain but since we have more than one user with it we
- * need a solution for them. We default to 0x64, which is
- * the default Atheros world regulatory domain.
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to 0x64, which is the
+ * default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
+ if (reg->current_rd != COUNTRY_ERD_FLAG)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index e14f374f97d4..fe187c1fbeb0 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -108,7 +108,7 @@ struct fft_sample_ath10k {
u8 avgpwr_db;
u8 max_exp;
- u8 data[0];
+ u8 data[];
} __packed;
struct fft_sample_ath11k {
@@ -123,7 +123,7 @@ struct fft_sample_ath11k {
__be32 tsf;
__be32 noise;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* SPECTRAL_COMMON_H */
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index ba711644d27e..9935cf475b6d 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -40,16 +40,13 @@ TRACE_EVENT(ath_log,
TP_STRUCT__entry(
__string(device, wiphy_name(wiphy))
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, wiphy_name(wiphy));
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH_DBG_MAX_LEN,
- vaf->fmt,
- *vaf->va) >= ATH_DBG_MAX_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 27413703ad69..26bec795b372 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -5,6 +5,7 @@ wcn36xx-y += main.o \
txrx.o \
smd.o \
pmc.o \
- debug.o
+ debug.o \
+ firmware.o
wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 6af306ae41ad..58b3c0501bfd 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -21,6 +21,7 @@
#include "wcn36xx.h"
#include "debug.h"
#include "pmc.h"
+#include "firmware.h"
#ifdef CONFIG_WCN36XX_DEBUGFS
@@ -136,6 +137,42 @@ static const struct file_operations fops_wcn36xx_dump = {
.write = write_file_dump,
};
+static ssize_t read_file_firmware_feature_caps(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ size_t len = 0, buf_len = 2048;
+ char *buf;
+ int i;
+ int ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wcn->hal_mutex);
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ len += scnprintf(buf + len, buf_len - len, "%s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
+ if (len >= buf_len)
+ break;
+ }
+ mutex_unlock(&wcn->hal_mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_wcn36xx_firmware_feat_caps = {
+ .open = simple_open,
+ .read = read_file_firmware_feature_caps,
+};
+
#define ADD_FILE(name, mode, fop, priv_data) \
do { \
struct dentry *d; \
@@ -163,6 +200,8 @@ void wcn36xx_debugfs_init(struct wcn36xx *wcn)
ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn);
ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn);
+ ADD_FILE(firmware_feat_caps, 0200,
+ &fops_wcn36xx_firmware_feat_caps, wcn);
}
void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
index 46307aa562d3..7116d96e0543 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.h
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -31,6 +31,7 @@ struct wcn36xx_dfs_entry {
struct dentry *rootdir;
struct wcn36xx_dfs_file file_bmps_switcher;
struct wcn36xx_dfs_file file_dump;
+ struct wcn36xx_dfs_file file_firmware_feat_caps;
};
void wcn36xx_debugfs_init(struct wcn36xx *wcn);
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.c b/drivers/net/wireless/ath/wcn36xx/firmware.c
new file mode 100644
index 000000000000..4b7f439e4db5
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "wcn36xx.h"
+#include "firmware.h"
+
+#define DEFINE(s)[s] = #s
+
+static const char * const wcn36xx_firmware_caps_names[] = {
+ DEFINE(MCC),
+ DEFINE(P2P),
+ DEFINE(DOT11AC),
+ DEFINE(SLM_SESSIONIZATION),
+ DEFINE(DOT11AC_OPMODE),
+ DEFINE(SAP32STA),
+ DEFINE(TDLS),
+ DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
+ DEFINE(WLANACTIVE_OFFLOAD),
+ DEFINE(BEACON_OFFLOAD),
+ DEFINE(SCAN_OFFLOAD),
+ DEFINE(ROAM_OFFLOAD),
+ DEFINE(BCN_MISS_OFFLOAD),
+ DEFINE(STA_POWERSAVE),
+ DEFINE(STA_ADVANCED_PWRSAVE),
+ DEFINE(AP_UAPSD),
+ DEFINE(AP_DFS),
+ DEFINE(BLOCKACK),
+ DEFINE(PHY_ERR),
+ DEFINE(BCN_FILTER),
+ DEFINE(RTT),
+ DEFINE(RATECTRL),
+ DEFINE(WOW),
+ DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
+ DEFINE(SPECULATIVE_PS_POLL),
+ DEFINE(SCAN_SCH),
+ DEFINE(IBSS_HEARTBEAT_OFFLOAD),
+ DEFINE(WLAN_SCAN_OFFLOAD),
+ DEFINE(WLAN_PERIODIC_TX_PTRN),
+ DEFINE(ADVANCE_TDLS),
+ DEFINE(BATCH_SCAN),
+ DEFINE(FW_IN_TX_PATH),
+ DEFINE(EXTENDED_NSOFFLOAD_SLOT),
+ DEFINE(CH_SWITCH_V1),
+ DEFINE(HT40_OBSS_SCAN),
+ DEFINE(UPDATE_CHANNEL_LIST),
+ DEFINE(WLAN_MCADDR_FLT),
+ DEFINE(WLAN_CH144),
+ DEFINE(NAN),
+ DEFINE(TDLS_SCAN_COEXISTENCE),
+ DEFINE(LINK_LAYER_STATS_MEAS),
+ DEFINE(MU_MIMO),
+ DEFINE(EXTENDED_SCAN),
+ DEFINE(DYNAMIC_WMM_PS),
+ DEFINE(MAC_SPOOFED_SCAN),
+ DEFINE(BMU_ERROR_GENERIC_RECOVERY),
+ DEFINE(DISA),
+ DEFINE(FW_STATS),
+ DEFINE(WPS_PRBRSP_TMPL),
+ DEFINE(BCN_IE_FLT_DELTA),
+ DEFINE(TDLS_OFF_CHANNEL),
+ DEFINE(RTT3),
+ DEFINE(MGMT_FRAME_LOGGING),
+ DEFINE(ENHANCED_TXBD_COMPLETION),
+ DEFINE(LOGGING_ENHANCEMENT),
+ DEFINE(EXT_SCAN_ENHANCED),
+ DEFINE(MEMORY_DUMP_SUPPORTED),
+ DEFINE(PER_PKT_STATS_SUPPORTED),
+ DEFINE(EXT_LL_STAT),
+ DEFINE(WIFI_CONFIG),
+ DEFINE(ANTENNA_DIVERSITY_SELECTION),
+};
+
+#undef DEFINE
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_firmware_caps_names[x];
+}
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+}
+
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.h b/drivers/net/wireless/ath/wcn36xx/firmware.h
new file mode 100644
index 000000000000..f991cf959f82
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _FIRMWARE_H_
+#define _FIRMWARE_H_
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum wcn36xx_firmware_feat_caps {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+ WLAN_MCADDR_FLT = 36,
+ WLAN_CH144 = 37,
+ NAN = 38,
+ TDLS_SCAN_COEXISTENCE = 39,
+ LINK_LAYER_STATS_MEAS = 40,
+ MU_MIMO = 41,
+ EXTENDED_SCAN = 42,
+ DYNAMIC_WMM_PS = 43,
+ MAC_SPOOFED_SCAN = 44,
+ BMU_ERROR_GENERIC_RECOVERY = 45,
+ DISA = 46,
+ FW_STATS = 47,
+ WPS_PRBRSP_TMPL = 48,
+ BCN_IE_FLT_DELTA = 49,
+ TDLS_OFF_CHANNEL = 51,
+ RTT3 = 52,
+ MGMT_FRAME_LOGGING = 53,
+ ENHANCED_TXBD_COMPLETION = 54,
+ LOGGING_ENHANCEMENT = 55,
+ EXT_SCAN_ENHANCED = 56,
+ MEMORY_DUMP_SUPPORTED = 57,
+ PER_PKT_STATS_SUPPORTED = 58,
+ EXT_LL_STAT = 60,
+ WIFI_CONFIG = 61,
+ ANTENNA_DIVERSITY_SELECTION = 62,
+
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x);
+
+#endif /* _FIRMWARE_H_ */
+
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 2a1db9756fd5..d3a9d00e65e1 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -1961,7 +1961,7 @@ struct wcn36xx_hal_config_bss_params {
/* HAL should update the existing BSS entry, if this flag is set.
* UMAC will set this flag in case of reassoc, where we want to
- * resue the the old BSSID and still return success 0 = Add, 1 =
+ * resue the old BSSID and still return success 0 = Add, 1 =
* Update */
u8 action;
@@ -2098,7 +2098,7 @@ struct wcn36xx_hal_config_bss_params_v1 {
/* HAL should update the existing BSS entry, if this flag is set.
* UMAC will set this flag in case of reassoc, where we want to
- * resue the the old BSSID and still return success 0 = Add, 1 =
+ * resue the old BSSID and still return success 0 = Add, 1 =
* Update */
u8 action;
@@ -2626,7 +2626,12 @@ enum tx_rate_info {
HAL_TX_RATE_SGI = 0x8,
/* Rate with Long guard interval */
- HAL_TX_RATE_LGI = 0x10
+ HAL_TX_RATE_LGI = 0x10,
+
+ /* VHT rates */
+ HAL_TX_RATE_VHT20 = 0x20,
+ HAL_TX_RATE_VHT40 = 0x40,
+ HAL_TX_RATE_VHT80 = 0x80,
};
struct ani_global_class_a_stats_info {
@@ -2672,7 +2677,7 @@ struct ani_global_security_stats {
* management information base (MIB) object is enabled */
u32 rx_wep_unencrypted_frm_cnt;
- /* The number of received MSDU packets that that the 802.11 station
+ /* The number of received MSDU packets that the 802.11 station
* discarded because of MIC failures */
u32 rx_mic_fail_cnt;
@@ -4137,7 +4142,7 @@ struct wcn36xx_hal_dump_cmd_rsp_msg {
/* Length of the responce message */
u32 rsp_length;
- /* FIXME: Currently considering the the responce will be less than
+ /* FIXME: Currently considering the responce will be less than
* 100bytes */
u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
} __packed;
@@ -4753,74 +4758,6 @@ struct wcn36xx_hal_set_power_params_resp {
u32 status;
} __packed;
-/* Capability bitmap exchange definitions and macros starts */
-
-enum place_holder_in_cap_bitmap {
- MCC = 0,
- P2P = 1,
- DOT11AC = 2,
- SLM_SESSIONIZATION = 3,
- DOT11AC_OPMODE = 4,
- SAP32STA = 5,
- TDLS = 6,
- P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
- WLANACTIVE_OFFLOAD = 8,
- BEACON_OFFLOAD = 9,
- SCAN_OFFLOAD = 10,
- ROAM_OFFLOAD = 11,
- BCN_MISS_OFFLOAD = 12,
- STA_POWERSAVE = 13,
- STA_ADVANCED_PWRSAVE = 14,
- AP_UAPSD = 15,
- AP_DFS = 16,
- BLOCKACK = 17,
- PHY_ERR = 18,
- BCN_FILTER = 19,
- RTT = 20,
- RATECTRL = 21,
- WOW = 22,
- WLAN_ROAM_SCAN_OFFLOAD = 23,
- SPECULATIVE_PS_POLL = 24,
- SCAN_SCH = 25,
- IBSS_HEARTBEAT_OFFLOAD = 26,
- WLAN_SCAN_OFFLOAD = 27,
- WLAN_PERIODIC_TX_PTRN = 28,
- ADVANCE_TDLS = 29,
- BATCH_SCAN = 30,
- FW_IN_TX_PATH = 31,
- EXTENDED_NSOFFLOAD_SLOT = 32,
- CH_SWITCH_V1 = 33,
- HT40_OBSS_SCAN = 34,
- UPDATE_CHANNEL_LIST = 35,
- WLAN_MCADDR_FLT = 36,
- WLAN_CH144 = 37,
- NAN = 38,
- TDLS_SCAN_COEXISTENCE = 39,
- LINK_LAYER_STATS_MEAS = 40,
- MU_MIMO = 41,
- EXTENDED_SCAN = 42,
- DYNAMIC_WMM_PS = 43,
- MAC_SPOOFED_SCAN = 44,
- BMU_ERROR_GENERIC_RECOVERY = 45,
- DISA = 46,
- FW_STATS = 47,
- WPS_PRBRSP_TMPL = 48,
- BCN_IE_FLT_DELTA = 49,
- TDLS_OFF_CHANNEL = 51,
- RTT3 = 52,
- MGMT_FRAME_LOGGING = 53,
- ENHANCED_TXBD_COMPLETION = 54,
- LOGGING_ENHANCEMENT = 55,
- EXT_SCAN_ENHANCED = 56,
- MEMORY_DUMP_SUPPORTED = 57,
- PER_PKT_STATS_SUPPORTED = 58,
- EXT_LL_STAT = 60,
- WIFI_CONFIG = 61,
- ANTENNA_DIVERSITY_SELECTION = 62,
-
- MAX_FEATURE_SUPPORTED = 128,
-};
-
#define WCN36XX_HAL_CAPS_SIZE 4
struct wcn36xx_hal_feat_caps_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 9575d7373bf2..6b8d2889d73f 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -28,6 +28,7 @@
#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
+#include "firmware.h"
unsigned int wcn36xx_dbg_mask;
module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
@@ -192,84 +193,15 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
-static const char * const wcn36xx_caps_names[] = {
- "MCC", /* 0 */
- "P2P", /* 1 */
- "DOT11AC", /* 2 */
- "SLM_SESSIONIZATION", /* 3 */
- "DOT11AC_OPMODE", /* 4 */
- "SAP32STA", /* 5 */
- "TDLS", /* 6 */
- "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
- "WLANACTIVE_OFFLOAD", /* 8 */
- "BEACON_OFFLOAD", /* 9 */
- "SCAN_OFFLOAD", /* 10 */
- "ROAM_OFFLOAD", /* 11 */
- "BCN_MISS_OFFLOAD", /* 12 */
- "STA_POWERSAVE", /* 13 */
- "STA_ADVANCED_PWRSAVE", /* 14 */
- "AP_UAPSD", /* 15 */
- "AP_DFS", /* 16 */
- "BLOCKACK", /* 17 */
- "PHY_ERR", /* 18 */
- "BCN_FILTER", /* 19 */
- "RTT", /* 20 */
- "RATECTRL", /* 21 */
- "WOW", /* 22 */
- "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
- "SPECULATIVE_PS_POLL", /* 24 */
- "SCAN_SCH", /* 25 */
- "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
- "WLAN_SCAN_OFFLOAD", /* 27 */
- "WLAN_PERIODIC_TX_PTRN", /* 28 */
- "ADVANCE_TDLS", /* 29 */
- "BATCH_SCAN", /* 30 */
- "FW_IN_TX_PATH", /* 31 */
- "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
- "CH_SWITCH_V1", /* 33 */
- "HT40_OBSS_SCAN", /* 34 */
- "UPDATE_CHANNEL_LIST", /* 35 */
- "WLAN_MCADDR_FLT", /* 36 */
- "WLAN_CH144", /* 37 */
- "NAN", /* 38 */
- "TDLS_SCAN_COEXISTENCE", /* 39 */
- "LINK_LAYER_STATS_MEAS", /* 40 */
- "MU_MIMO", /* 41 */
- "EXTENDED_SCAN", /* 42 */
- "DYNAMIC_WMM_PS", /* 43 */
- "MAC_SPOOFED_SCAN", /* 44 */
- "BMU_ERROR_GENERIC_RECOVERY", /* 45 */
- "DISA", /* 46 */
- "FW_STATS", /* 47 */
- "WPS_PRBRSP_TMPL", /* 48 */
- "BCN_IE_FLT_DELTA", /* 49 */
- "TDLS_OFF_CHANNEL", /* 51 */
- "RTT3", /* 52 */
- "MGMT_FRAME_LOGGING", /* 53 */
- "ENHANCED_TXBD_COMPLETION", /* 54 */
- "LOGGING_ENHANCEMENT", /* 55 */
- "EXT_SCAN_ENHANCED", /* 56 */
- "MEMORY_DUMP_SUPPORTED", /* 57 */
- "PER_PKT_STATS_SUPPORTED", /* 58 */
- "EXT_LL_STAT", /* 60 */
- "WIFI_CONFIG", /* 61 */
- "ANTENNA_DIVERSITY_SELECTION", /* 62 */
-};
-
-static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
-{
- if (x >= ARRAY_SIZE(wcn36xx_caps_names))
- return "UNKNOWN";
- return wcn36xx_caps_names[x];
-}
-
static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
{
int i;
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
- if (get_feat_caps(wcn->fw_feat_caps, i))
- wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n", wcn36xx_get_cap_name(i));
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
}
}
@@ -331,6 +263,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
INIT_LIST_HEAD(&wcn->vif_list);
spin_lock_init(&wcn->dxe_lock);
+ spin_lock_init(&wcn->survey_lock);
return 0;
@@ -380,7 +313,7 @@ static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
if (enable && !wcn->sw_scan) {
- if (vif->bss_conf.ps) /* ps allowed ? */
+ if (vif->cfg.ps) /* ps allowed ? */
wcn36xx_pmc_enter_bmps_state(wcn, vif);
} else {
wcn36xx_pmc_exit_bmps_state(wcn, vif);
@@ -392,11 +325,41 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel = NULL;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(wcn->hw->wiphy->bands); i++) {
+ band = wcn->hw->wiphy->bands[i];
+ if (!band)
+ break;
+ for (j = 0; j < band->n_channels; j++) {
+ if (HW_VALUE_CHANNEL(band->channels[j].hw_value) == ch) {
+ channel = &band->channels[j];
+ break;
+ }
+ }
+ if (channel)
+ break;
+ }
+
+ if (!channel) {
+ wcn36xx_err("Cannot tune to channel %d\n", ch);
+ return;
+ }
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+ wcn->band = band;
+ wcn->channel = channel;
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
+
+ return;
}
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
@@ -670,7 +633,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
- if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
@@ -708,7 +671,7 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
- if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
@@ -757,7 +720,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
int i, size;
u16 *rates_table;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
- u32 rates = sta->supp_rates[band];
+ u32 rates = sta->deflink.supp_rates[band];
memset(&sta_priv->supported_rates, 0,
sizeof(sta_priv->supported_rates));
@@ -783,20 +746,20 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
}
}
- if (sta->ht_cap.ht_supported) {
- BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
- sizeof(sta_priv->supported_rates.supported_mcs_set));
+ if (sta->deflink.ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->deflink.ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
memcpy(sta_priv->supported_rates.supported_mcs_set,
- sta->ht_cap.mcs.rx_mask,
- sizeof(sta->ht_cap.mcs.rx_mask));
+ sta->deflink.ht_cap.mcs.rx_mask,
+ sizeof(sta->deflink.ht_cap.mcs.rx_mask));
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
sta_priv->supported_rates.op_rate_mode = STA_11ac;
sta_priv->supported_rates.vht_rx_mcs_map =
- sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
sta_priv->supported_rates.vht_tx_mcs_map =
- sta->vht_cap.vht_mcs.tx_mcs_map;
+ sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
}
@@ -837,7 +800,7 @@ void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates)
static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wcn36xx *wcn = hw->priv;
struct sk_buff *skb = NULL;
@@ -845,7 +808,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
enum wcn36xx_hal_link_state link_state;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%llx\n",
vif, changed);
mutex_lock(&wcn->conf_mutex);
@@ -884,17 +847,17 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed ssid\n");
wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
- bss_conf->ssid, bss_conf->ssid_len);
+ vif->cfg.ssid, vif->cfg.ssid_len);
- vif_priv->ssid.length = bss_conf->ssid_len;
+ vif_priv->ssid.length = vif->cfg.ssid_len;
memcpy(&vif_priv->ssid.ssid,
- bss_conf->ssid,
- bss_conf->ssid_len);
+ vif->cfg.ssid,
+ vif->cfg.ssid_len);
}
if (changed & BSS_CHANGED_ASSOC) {
vif_priv->is_joining = false;
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *sta;
struct wcn36xx_sta *sta_priv;
@@ -902,7 +865,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
"mac assoc bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
- bss_conf->aid);
+ vif->cfg.aid);
vif_priv->sta_assoc = true;
@@ -928,7 +891,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_config_bss(wcn, vif, sta,
bss_conf->bssid,
true);
- sta_priv->aid = bss_conf->aid;
+ sta_priv->aid = vif->cfg.aid;
/*
* config_sta must be called from because this is the
* place where AID is available.
@@ -942,7 +905,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
"disassociated bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
- bss_conf->aid);
+ vif->cfg.aid);
vif_priv->sta_assoc = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
@@ -975,7 +938,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
- &tim_len);
+ &tim_len, 0);
if (!skb) {
wcn36xx_err("failed to alloc beacon skb\n");
goto out;
@@ -1326,6 +1289,64 @@ static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
}
+static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct wcn36xx_chan_survey *chan_survey;
+ int band_idx;
+ unsigned long flags;
+
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ band_idx = idx;
+ if (band_idx >= sband->n_channels) {
+ band_idx -= sband->n_channels;
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_5GHZ];
+ }
+
+ if (!sband || band_idx >= sband->n_channels)
+ return -ENOENT;
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+
+ chan_survey = &wcn->chan_survey[idx];
+ survey->channel = &sband->channels[band_idx];
+ survey->noise = chan_survey->rssi - chan_survey->snr;
+ survey->filled = 0;
+
+ if (chan_survey->rssi > -100 && chan_survey->rssi < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (survey->channel == wcn->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
+ HW_VALUE_CHANNEL(survey->channel->hw_value),
+ chan_survey->rssi, chan_survey->snr, survey->noise,
+ survey->filled, survey->channel->center_freq);
+
+ return 0;
+}
+
+static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct station_info *sinfo)
+{
+ struct wcn36xx *wcn;
+ u8 sta_index;
+ int status;
+
+ wcn = hw->priv;
+ sta_index = get_sta_index(vif, wcn36xx_sta_to_priv(sta));
+ status = wcn36xx_smd_get_stats(wcn, sta_index, HAL_GLOBAL_CLASS_A_STATS_INFO, sinfo);
+
+ if (status)
+ wcn36xx_err("wcn36xx_smd_get_stats failed\n");
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1349,11 +1370,13 @@ static const struct ieee80211_ops wcn36xx_ops = {
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove,
+ .sta_statistics = wcn36xx_sta_statistics,
.ampdu_action = wcn36xx_ampdu_action,
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
+ .get_survey = wcn36xx_get_survey,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
@@ -1446,25 +1469,20 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
{
struct device_node *mmio_node;
struct device_node *iris_node;
- struct resource *res;
int index;
int ret;
/* Set TX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
- if (!res) {
- wcn36xx_err("failed to get tx_irq\n");
- return -ENOENT;
- }
- wcn->tx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret < 0)
+ return ret;
+ wcn->tx_irq = ret;
/* Set RX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
- if (!res) {
- wcn36xx_err("failed to get rx_irq\n");
- return -ENOENT;
- }
- wcn->rx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0)
+ return ret;
+ wcn->rx_irq = ret;
/* Acquire SMSM tx enable handle */
wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
@@ -1513,6 +1531,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
+ if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
+ of_device_is_compatible(iris_node, "qcom,wcn3660b"))
+ wcn->rf_id = RF_IRIS_WCN3660;
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
@@ -1535,6 +1556,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
void *wcnss;
int ret;
const u8 *addr;
+ int n_channels;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
@@ -1562,6 +1584,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
goto out_wq;
}
+ n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+ wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ if (!wcn->chan_survey) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index caeb68901326..566f0b9c1584 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/rpmsg.h>
#include "smd.h"
+#include "firmware.h"
struct wcn36xx_cfg_val {
u32 cfg_id;
@@ -208,9 +209,9 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
{
if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
- else if (sta && sta->ht_cap.ht_supported)
+ else if (sta && sta->deflink.ht_cap.ht_supported)
bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
- else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
+ else if (sta && (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0x7f))
bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
else
bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -225,9 +226,10 @@ static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
- if (sta && sta->ht_cap.ht_supported) {
- unsigned long caps = sta->ht_cap.cap;
- bss_params->ht = sta->ht_cap.ht_supported;
+ if (sta && sta->deflink.ht_cap.ht_supported) {
+ unsigned long caps = sta->deflink.ht_cap.cap;
+
+ bss_params->ht = sta->deflink.ht_cap.ht_supported;
bss_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
bss_params->lsig_tx_op_protection_full_support =
@@ -250,23 +252,24 @@ wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params_v1 *bss)
{
- if (sta && sta->vht_cap.vht_supported)
+ if (sta && sta->deflink.vht_cap.vht_supported)
bss->vht_capable = 1;
}
static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
- if (sta->ht_cap.ht_supported) {
- unsigned long caps = sta->ht_cap.cap;
- sta_params->ht_capable = sta->ht_cap.ht_supported;
+ if (sta->deflink.ht_cap.ht_supported) {
+ unsigned long caps = sta->deflink.ht_cap.cap;
+
+ sta_params->ht_capable = sta->deflink.ht_cap.ht_supported;
sta_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
sta_params->lsig_txop_protection = is_cap_supported(caps,
IEEE80211_HT_CAP_LSIG_TXOP_PROT);
- sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
- sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_ampdu_size = sta->deflink.ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->deflink.ht_cap.ampdu_density;
/* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */
sta_params->max_amsdu_size = !is_cap_supported(caps,
IEEE80211_HT_CAP_MAX_AMSDU);
@@ -287,13 +290,13 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
- if (sta->vht_cap.vht_supported) {
- unsigned long caps = sta->vht_cap.cap;
+ if (sta->deflink.vht_cap.vht_supported) {
+ unsigned long caps = sta->deflink.vht_cap.cap;
- sta_params->vht_capable = sta->vht_cap.vht_supported;
+ sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
sta_params->vht_ldpc_enabled =
is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
- if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
sta_params->vht_tx_mu_beamformee_capable =
is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
if (sta_params->vht_tx_mu_beamformee_capable)
@@ -308,9 +311,10 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
sta_params->ht_ldpc_enabled =
- is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING);
+ is_cap_supported(sta->deflink.ht_cap.cap,
+ IEEE80211_HT_CAP_LDPC_CODING);
}
}
@@ -2428,49 +2432,6 @@ out:
return ret;
}
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] |= (1 << bit_idx);
-}
-
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return -EINVAL;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
-
- return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
-}
-
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] &= ~(1 << bit_idx);
-}
-
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
@@ -2479,11 +2440,12 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
- set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
if (wcn->rf_id == RF_IRIS_WCN3680) {
- set_feat_caps(msg_body.feat_caps, DOT11AC);
- set_feat_caps(msg_body.feat_caps, WLAN_CH144);
- set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, DOT11AC);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, WLAN_CH144);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps,
+ ANTENNA_DIVERSITY_SELECTION);
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2627,6 +2589,62 @@ out:
return ret;
}
+int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
+ struct station_info *sinfo)
+{
+ struct wcn36xx_hal_stats_req_msg msg_body;
+ struct wcn36xx_hal_stats_rsp_msg *rsp;
+ void *rsp_body;
+ int ret;
+
+ if (stats_mask & ~HAL_GLOBAL_CLASS_A_STATS_INFO) {
+ wcn36xx_err("stats_mask 0x%x contains unimplemented types\n",
+ stats_mask);
+ return -EINVAL;
+ }
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GET_STATS_REQ);
+
+ msg_body.sta_id = sta_index;
+ msg_body.stats_mask = stats_mask;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("sending hal_get_stats failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_get_stats response failed err=%d\n", ret);
+ goto out;
+ }
+
+ rsp = (struct wcn36xx_hal_stats_rsp_msg *)wcn->hal_buf;
+ rsp_body = (wcn->hal_buf + sizeof(struct wcn36xx_hal_stats_rsp_msg));
+
+ if (rsp->stats_mask != stats_mask) {
+ wcn36xx_err("stats_mask 0x%x differs from requested 0x%x\n",
+ rsp->stats_mask, stats_mask);
+ goto out;
+ }
+
+ if (rsp->stats_mask & HAL_GLOBAL_CLASS_A_STATS_INFO) {
+ struct ani_global_class_a_stats_info *stats_info = rsp_body;
+
+ wcn36xx_process_tx_rate(stats_info, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ rsp_body += sizeof(struct ani_global_class_a_stats_info);
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{
struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
@@ -2946,7 +2964,7 @@ int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.host_offload_params.enable =
WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE;
memcpy(&msg_body.host_offload_params.u,
- &vif->bss_conf.arp_addr_list[0], sizeof(__be32));
+ &vif->cfg.arp_addr_list[0], sizeof(__be32));
}
msg_body.ns_offload_params.bss_index = vif_priv->bss_index;
@@ -3092,9 +3110,9 @@ static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn,
cpu_to_le64(rsp->key_replay_counter);
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "GTK replay counter increment %llu\n",
- rsp->key_replay_counter);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "GTK replay counter increment %llu\n",
+ rsp->key_replay_counter);
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
@@ -3241,7 +3259,7 @@ int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
size_t payload_size;
int ret;
- if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
return -EOPNOTSUPP;
mutex_lock(&wcn->hal_mutex);
@@ -3316,6 +3334,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_ADD_BA_SESSION_RSP:
case WCN36XX_HAL_ADD_BA_RSP:
case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_GET_STATS_RSP:
case WCN36XX_HAL_TRIGGER_BA_RSP:
case WCN36XX_HAL_UPDATE_CFG_RSP:
case WCN36XX_HAL_JOIN_RSP:
@@ -3347,7 +3366,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
- msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
+ msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 957cfa87fbde..cf15cde2a364 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -125,9 +125,6 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
@@ -138,6 +135,8 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn);
+int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
+ struct station_info *sinfo);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index dd58dde8c836..0802ed728824 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/random.h>
#include "txrx.h"
static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
@@ -23,6 +24,11 @@ static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+static inline int get_snr(struct wcn36xx_rx_bd *bd)
+{
+ return ((bd->phy_stat1 >> 24) & 0xff);
+}
+
struct wcn36xx_rate {
u16 bitrate;
u16 mcs_or_legacy_index;
@@ -266,6 +272,37 @@ static void __skb_queue_purge_irq(struct sk_buff_head *list)
dev_kfree_skb_irq(skb);
}
+static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
+ int band, int freq)
+{
+ static struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *sband;
+ int idx;
+ int i;
+ u8 snr_sample = snr & 0xff;
+
+ idx = 0;
+ if (band == NL80211_BAND_5GHZ)
+ idx = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+
+ sband = wcn->hw->wiphy->bands[band];
+ channel = sband->channels;
+
+ for (i = 0; i < sband->n_channels; i++, channel++) {
+ if (channel->center_freq == freq) {
+ idx += i;
+ break;
+ }
+ }
+
+ spin_lock(&wcn->survey_lock);
+ wcn->chan_survey[idx].rssi = rssi;
+ wcn->chan_survey[idx].snr = snr;
+ spin_unlock(&wcn->survey_lock);
+
+ add_device_randomness(&snr_sample, sizeof(snr_sample));
+}
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
@@ -343,6 +380,9 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
+ wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
+ status.band, status.freq);
+
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
@@ -663,3 +703,32 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
return ret;
}
+
+void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info)
+{
+ /* tx_rate is in units of 500kbps; mac80211 wants them in 100kbps */
+ if (stats->tx_rate_flags & HAL_TX_RATE_LEGACY)
+ info->legacy = stats->tx_rate * 5;
+
+ info->flags = 0;
+ info->mcs = stats->mcs_index;
+ info->nss = 1;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_HT40))
+ info->flags |= RATE_INFO_FLAGS_MCS;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_VHT20 | HAL_TX_RATE_VHT40 | HAL_TX_RATE_VHT80))
+ info->flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (stats->tx_rate_flags & HAL_TX_RATE_SGI)
+ info->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_VHT20))
+ info->bw = RATE_INFO_BW_20;
+
+ if (stats->tx_rate_flags & (HAL_TX_RATE_HT40 | HAL_TX_RATE_VHT40))
+ info->bw = RATE_INFO_BW_40;
+
+ if (stats->tx_rate_flags & HAL_TX_RATE_VHT80)
+ info->bw = RATE_INFO_BW_80;
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
index b54311ffde9c..fb0d6cabd52b 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.h
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -164,5 +164,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
int wcn36xx_start_tx(struct wcn36xx *wcn,
struct wcn36xx_sta *sta_priv,
struct sk_buff *skb);
+void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info);
#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index fbd0558c2c19..9aa08b636d08 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
+#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)
@@ -194,7 +195,14 @@ struct wcn36xx_sta {
enum wcn36xx_ampdu_state ampdu_state[16];
int non_agg_frame_ct;
};
+
struct wcn36xx_dxe_ch;
+
+struct wcn36xx_chan_survey {
+ s8 rssi;
+ u8 snr;
+};
+
struct wcn36xx {
struct ieee80211_hw *hw;
struct device *dev;
@@ -281,6 +289,12 @@ struct wcn36xx {
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel;
+
+ spinlock_t survey_lock; /* protects chan_survey */
+ struct wcn36xx_chan_survey *chan_survey;
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 764d1d14132b..40f9a7ef8980 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1620,7 +1620,7 @@ static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
@@ -1653,10 +1653,9 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
params->seq_len, params->seq);
return -EINVAL;
}
- }
-
- if (!IS_ERR(cs))
+ } else {
wil_del_rx_key(key_index, key_usage, cs);
+ }
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@@ -1697,7 +1696,7 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
@@ -1724,7 +1723,7 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
/* Need to be present or wiphy_new() will WARN */
static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -2073,8 +2072,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
key_params.key = vif->gtk;
key_params.key_len = vif->gtk_len;
key_params.seq_len = IEEE80211_GCMP_PN_LEN;
- rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false,
- NULL, &key_params);
+ rc = wil_cfg80211_add_key(wiphy, ndev, -1, vif->gtk_index,
+ false, NULL, &key_params);
if (rc)
wil_err(wil, "vif %d recovery add key failed (%d)\n",
i, rc);
@@ -2099,8 +2098,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
bcon->tail_len))
privacy = 1;
- memcpy(vif->ssid, wdev->ssid, wdev->ssid_len);
- vif->ssid_len = wdev->ssid_len;
+ memcpy(vif->ssid, wdev->u.ap.ssid, wdev->u.ap.ssid_len);
+ vif->ssid_len = wdev->u.ap.ssid_len;
/* in case privacy has changed, need to restart the AP */
if (vif->privacy != privacy) {
@@ -2109,7 +2108,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
vif->ssid_len, privacy,
- wdev->beacon_interval,
+ wdev->links[0].ap.beacon_interval,
vif->channel,
vif->wmi_edmg_channel, bcon,
vif->hidden_ssid,
@@ -2187,7 +2186,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
- struct net_device *ndev)
+ struct net_device *ndev,
+ unsigned int link_id)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 4c944e595978..04d1aa0e2d35 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1010,20 +1010,14 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
- int rc, rc1;
+ int rc1;
- if (cmdlen < 0)
+ if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
- wmi = kmalloc(len, GFP_KERNEL);
- if (!wmi)
- return -ENOMEM;
-
- rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0) {
- kfree(wmi);
- return rc;
- }
+ wmi = memdup_user(buf, len);
+ if (IS_ERR(wmi))
+ return PTR_ERR(wmi);
cmd = (cmdlen > 0) ? &wmi[1] : NULL;
cmdid = le16_to_cpu(wmi->command_id);
@@ -1033,7 +1027,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
- return rc;
+ return len;
}
static const struct file_operations fops_wmi = {
@@ -1391,19 +1385,6 @@ static int temp_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(temp);
-/*---------freq------------*/
-static int freq_show(struct seq_file *s, void *data)
-{
- struct wil6210_priv *wil = s->private;
- struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
- u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
-
- seq_printf(s, "Freq = %d\n", freq);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(freq);
-
/*---------link------------*/
static int link_show(struct seq_file *s, void *data)
{
@@ -2380,7 +2361,6 @@ static const struct {
{"pmcdata", 0444, &fops_pmcdata},
{"pmcring", 0444, &fops_pmcring},
{"temp", 0444, &temp_fops},
- {"freq", 0444, &freq_fops},
{"link", 0444, &link_fops},
{"info", 0444, &info_fops},
{"recovery", 0644, &fops_recovery},
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 7da87c9f363f..94e61dbe94f8 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1305,7 +1305,7 @@ void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
board_file = WIL_BOARD_FILE_NAME;
}
- strlcpy(buf, board_file, len);
+ strscpy(buf, board_file, len);
}
static int wil_get_bl_info(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 0913f0bf60e7..ee7d7e9c2718 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -445,7 +445,7 @@ int wil_if_add(struct wil6210_priv *wil)
wil_dbg_misc(wil, "entered");
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
rc = wiphy_register(wiphy);
if (rc < 0) {
@@ -456,18 +456,14 @@ int wil_if_add(struct wil6210_priv *wil)
init_dummy_netdev(&wil->napi_ndev);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx_edma,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx_edma,
- WIL6210_NAPI_BUDGET);
+ wil6210_netdev_poll_rx_edma);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
+ wil6210_netdev_poll_rx);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx);
}
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ed4df561e5c5..f521af575e9b 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -445,10 +445,9 @@ int wil_pm_runtime_get(struct wil6210_priv *wil)
int rc;
struct device *dev = wil_to_dev(wil);
- rc = pm_runtime_get_sync(dev);
+ rc = pm_runtime_resume_and_get(dev);
if (rc < 0) {
- wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
- pm_runtime_put_noidle(dev);
+ wil_err(wil, "pm_runtime_resume_and_get() failed, rc = %d\n", rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 11c989e95880..201f44612c31 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -70,13 +70,10 @@ DECLARE_EVENT_CLASS(wil6210_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- WIL6210_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= WIL6210_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index cc830c795b33..237cbd5c5060 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
if (gro)
napi_gro_receive(&wil->napi_rx, skb);
else
- netif_rx_ni(skb);
+ netif_rx(skb);
}
ndev->stats.rx_packets++;
stats->rx_packets++;
@@ -1782,9 +1782,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
}
/* Header Length = MAC header len + IP header len + TCP header len*/
- hdrlen = ETH_HLEN +
- (int)skb_network_header_len(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 1f4c8ec75be8..1ae1bec1b97f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -356,7 +356,7 @@ struct vring_rx_mac {
* bit 10 : cmd_dma_it:1 immediate interrupt
* bit 11..15 : reserved:5
* bit 16..29 : phy_info_length:14 It is valid when the PII is set.
- * When the FFM bit is set bits 29-27 are used for for
+ * When the FFM bit is set bits 29-27 are used for
* Flex Filter Match. Matching Index to one of the L2
* EtherType Flex Filter
* bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 11946ecd0b99..22a6eb3e12b7 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -82,7 +82,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (20) /* max number of stations */
#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */
-#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dd8abbb28849..6a5976a2944c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -780,7 +780,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
return; /* FW load will fail after timeout */
}
/* ignore MAC address, we already have it from the boot loader */
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) {
wil_dbg_wmi(wil, "rfc calibration result %d\n",
@@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
eth->h_proto = cpu_to_be16(ETH_P_PAE);
skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
- if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += sz;
if (stats) {
@@ -1822,8 +1822,8 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
memset(&info, 0, sizeof(info));
- info.channel = ieee80211_get_channel(wiphy, freq);
- info.bss = vif->bss;
+ info.links[0].channel = ieee80211_get_channel(wiphy, freq);
+ info.links[0].bss = vif->bss;
info.req_ie = assoc_req_ie;
info.req_ie_len = assoc_req_ie_len;
info.resp_ie = assoc_resp_ie;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 7582761c61e2..24e609c1f523 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2033,7 +2033,7 @@ static int at76_config(struct ieee80211_hw *hw, u32 changed)
static void at76_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 35c2e798d98b..45d079b93384 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1518,7 +1518,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->firmware = NULL;
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
+ strscpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
@@ -3353,7 +3353,7 @@ static void atmel_management_frame(struct atmel_private *priv,
priv->beacons_this_sec++;
atmel_smooth_qual(priv);
if (priv->last_beacon_timestamp) {
- /* Note truncate this to 32 bits - kernel can't divide a long long */
+ /* Note truncate this to 32 bits - kernel can't divide a long */
u32 beacon_delay = timestamp - priv->last_beacon_timestamp;
int beacons = beacon_delay / (beacon_interval * 1000);
if (beacons > 1)
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index 982a772a9d87..bfe1be345844 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -118,7 +118,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 17bcec5f3ff7..b2539a916fd0 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -105,7 +105,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
-static int b43_modparam_pio = 0;
+static int b43_modparam_pio;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
@@ -366,7 +366,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev);
static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed);
+ u64 changed);
static int b43_ratelimit(struct b43_wl *wl)
{
@@ -1832,7 +1832,7 @@ static void b43_update_templates(struct b43_wl *wl)
* the TIM field, but that would probably require resizing and
* moving of data within the beacon template.
* Simply request a new beacon and let mac80211 do the hard work. */
- beacon = ieee80211_beacon_get(wl->hw, wl->vif);
+ beacon = ieee80211_beacon_get(wl->hw, wl->vif, 0);
if (unlikely(!beacon))
return;
@@ -3783,7 +3783,8 @@ static void b43_qos_init(struct b43_wldev *dev)
}
static int b43_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 _queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 _queue,
const struct ieee80211_tx_queue_params *params)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -4097,7 +4098,7 @@ static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates)
static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 4213caca9117..5ec5233acf40 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -88,7 +88,7 @@ enum b43_txpwr_result {
* initialized here.
* Must not be NULL.
* @prepare_hardware: Prepare the PHY. This is called before b43_chip_init to
- * do some early early PHY hardware init.
+ * do some early PHY hardware init.
* Can be NULL, if not required.
* @init: Initialize the PHY.
* Must not be NULL.
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index cf3ccf4ddfe7..2c0c019a815d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
u16 data[4];
s16 gain[2];
u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+ static const s16 lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
@@ -2479,11 +2479,7 @@ static void b43_nphy_gain_ctl_workarounds_rev19(struct b43_wldev *dev)
static void b43_nphy_gain_ctl_workarounds_rev7(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
-
- switch (phy->rev) {
- /* TODO */
- }
+ /* TODO - should depend on phy->rev */
}
static void b43_nphy_gain_ctl_workarounds_rev3(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index 38b5be3a84e2..79e6fd205bfb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -88,7 +88,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index eec3af9c3745..4022c544aefe 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1241,7 +1241,7 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
* field, but that would probably require resizing and moving of data
* within the beacon template. Simply request a new beacon and let
* mac80211 do the hard work. */
- beacon = ieee80211_beacon_get(wl->hw, wl->vif);
+ beacon = ieee80211_beacon_get(wl->hw, wl->vif, 0);
if (unlikely(!beacon))
return;
@@ -2505,7 +2505,8 @@ static void b43legacy_op_tx(struct ieee80211_hw *hw,
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
return 0;
@@ -2806,7 +2807,7 @@ static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates
static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -2943,7 +2944,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
}
-b43legacy_mac_suspend(dev);
+ b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 05404fbd1e70..c1395e622759 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev)
struct b43legacy_phy *phy = &dev->phy;
u16 regstack[12] = { 0 };
u16 mls;
- u16 fval;
+ s16 fval;
int i;
int j;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 3984fd7d918e..9ec0c60b6da1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -87,6 +87,8 @@ struct brcmf_proto_bcdc_header {
* plus any space that might be needed
* for bus alignment padding.
*/
+#define ROUND_UP_MARGIN 2048
+
struct brcmf_bcdc {
u16 reqid;
u8 bus_header[BUS_HEADER_LEN];
@@ -368,8 +370,7 @@ brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(bcdc->fws)) {
- if (!success)
- brcmf_fws_bustxfail(bcdc->fws, txp);
+ brcmf_fws_bustxcomplete(bcdc->fws, txp, success);
} else {
if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
brcmu_pkt_buf_free_skb(txp);
@@ -397,9 +398,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
- struct sk_buff *skb, bool inirq)
+ struct sk_buff *skb)
{
- brcmf_fws_rxreorder(ifp, skb, inirq);
+ brcmf_fws_rxreorder(ifp, skb);
}
static void
@@ -471,7 +472,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_bcdc_dcmd);
+ sizeof(struct brcmf_proto_bcdc_dcmd) + ROUND_UP_MARGIN;
return 0;
fail:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ac02244a6fdf..d0daef674e72 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -784,9 +784,11 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
{
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return 0;
+
sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
if (!sdiodev->freezer)
return -ENOMEM;
@@ -802,6 +804,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
if (sdiodev->freezer) {
WARN_ON(atomic_read(&sdiodev->freezer->freezing));
kfree(sdiodev->freezer);
+ sdiodev->freezer = NULL;
}
}
@@ -833,7 +836,8 @@ static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
{
- return atomic_read(&sdiodev->freezer->freezing);
+ return IS_ENABLED(CONFIG_PM_SLEEP) &&
+ atomic_read(&sdiodev->freezer->freezing);
}
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
@@ -847,24 +851,16 @@ void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
{
- atomic_inc(&sdiodev->freezer->thread_count);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_inc(&sdiodev->freezer->thread_count);
}
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
{
- atomic_dec(&sdiodev->freezer->thread_count);
-}
-#else
-static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
-{
- return 0;
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_dec(&sdiodev->freezer->thread_count);
}
-static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
-
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
sdiodev->state = BRCMF_SDIOD_DOWN;
@@ -875,13 +871,9 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
- /* Disable Function 2 */
- sdio_claim_host(sdiodev->func2);
- sdio_disable_func(sdiodev->func2);
- sdio_release_host(sdiodev->func2);
-
- /* Disable Function 1 */
+ /* Disable functions 2 then 1. */
sdio_claim_host(sdiodev->func1);
+ sdio_disable_func(sdiodev->func2);
sdio_disable_func(sdiodev->func1);
sdio_release_host(sdiodev->func1);
@@ -911,7 +903,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
}
switch (sdiodev->func2->device) {
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
@@ -933,7 +925,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
} else {
brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
}
@@ -991,6 +983,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359),
{ /* end: all zeroes */ }
@@ -1119,18 +1112,29 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1);
+
+ /* Power must be preserved to be able to support WOWL. */
+ if (!(pm_caps & MMC_PM_KEEP_POWER))
+ goto notsup;
- brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
- sdiodev->wowl_enabled = enabled;
+ if (sdiodev->settings->bus.sdio.oob_irq_supported ||
+ pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
+ sdiodev->wowl_enabled = enabled;
+ brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ return;
+ }
+
+notsup:
+ brcmf_dbg(SDIO, "WOWL not supported\n");
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
struct sdio_func *func;
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
- mmc_pm_flag_t pm_caps, sdio_flags;
+ mmc_pm_flag_t sdio_flags;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1142,20 +1146,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- pm_caps = sdio_get_host_pm_caps(func);
-
- if (pm_caps & MMC_PM_KEEP_POWER) {
- /* preserve card power during suspend */
+ if (sdiodev->wowl_enabled) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->wowl_enabled) {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
- }
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1176,21 +1175,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
- mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
int ret = 0;
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!(pm_caps & MMC_PM_KEEP_POWER)) {
+ if (!sdiodev->wowl_enabled) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->wowl_enabled &&
- sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
@@ -1199,11 +1196,9 @@ static int brcmf_ops_sdio_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_ops_sdio_suspend,
- .resume = brcmf_ops_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmf_sdio_pm_ops,
+ brcmf_ops_sdio_suspend,
+ brcmf_ops_sdio_resume);
static struct sdio_driver brcmf_sdmmc_driver = {
.probe = brcmf_ops_sdio_probe,
@@ -1212,9 +1207,7 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.id_table = brcmf_sdmmc_ids,
.drv = {
.owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .pm = &brcmf_sdio_pm_ops,
-#endif /* CONFIG_PM_SLEEP */
+ .pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
.coredump = brcmf_dev_coredump,
},
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 3f5da3bb6aa5..2208ab3aa795 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -6,6 +6,8 @@
#ifndef BRCMFMAC_BUS_H
#define BRCMFMAC_BUS_H
+#include <linux/kernel.h>
+#include <linux/firmware.h>
#include "debug.h"
/* IDs of the 6 default common rings of msgbuf protocol */
@@ -34,6 +36,11 @@ enum brcmf_bus_protocol_type {
BRCMF_PROTO_MSGBUF
};
+/* Firmware blobs that may be available */
+enum brcmf_blob_type {
+ BRCMF_BLOB_CLM,
+};
+
struct brcmf_mp_device;
struct brcmf_bus_dcmd {
@@ -60,7 +67,7 @@ struct brcmf_bus_dcmd {
* @wowl_config: specify if dongle is configured for wowl when going to suspend
* @get_ramsize: obtain size of device memory.
* @get_memdump: obtain device memory dump in provided buffer.
- * @get_fwname: obtain firmware name.
+ * @get_blob: obtain a firmware blob.
*
* This structure provides an abstract interface towards the
* bus specific driver. For control messages to common driver
@@ -77,8 +84,8 @@ struct brcmf_bus_ops {
void (*wowl_config)(struct device *dev, bool enabled);
size_t (*get_ramsize)(struct device *dev);
int (*get_memdump)(struct device *dev, void *data, size_t len);
- int (*get_fwname)(struct device *dev, const char *ext,
- unsigned char *fw_name);
+ int (*get_blob)(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type);
void (*debugfs_create)(struct device *dev);
int (*reset)(struct device *dev);
};
@@ -89,7 +96,7 @@ struct brcmf_bus_ops {
*
* @commonrings: commonrings which are always there.
* @flowrings: commonrings which are dynamically created and destroyed for data.
- * @rx_dataoffset: if set then all rx data has this this offset.
+ * @rx_dataoffset: if set then all rx data has this offset.
* @max_rxbufpost: maximum number of buffers to post for rx.
* @max_flowrings: maximum number of tx flow rings supported.
* @max_submissionrings: maximum number of submission rings(h2d) supported.
@@ -220,10 +227,10 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
}
static inline
-int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext,
- unsigned char *fw_name)
+int brcmf_bus_get_blob(struct brcmf_bus *bus, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- return bus->ops->get_fwname(bus->dev, ext, fw_name);
+ return bus->ops->get_blob(bus->dev, fw, type);
}
static inline
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ba52318615ae..dfcfb3333369 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -16,6 +16,7 @@
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
@@ -2166,7 +2167,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
offsetof(struct brcmf_assoc_params_le, chanspec_list);
if (cfg->channel)
join_params_size += sizeof(u16);
- ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+ ext_join_params = kzalloc(sizeof(*ext_join_params), GFP_KERNEL);
if (ext_join_params == NULL) {
err = -ENOMEM;
goto done;
@@ -2360,7 +2361,8 @@ done:
static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool unicast, bool multicast)
+ int link_id, u8 key_idx, bool unicast,
+ bool multicast)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
@@ -2394,7 +2396,8 @@ done:
static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key *key;
@@ -2431,8 +2434,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -2456,8 +2459,8 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
if (params->key_len == 0)
- return brcmf_cfg80211_del_key(wiphy, ndev, key_idx, pairwise,
- mac_addr);
+ return brcmf_cfg80211_del_key(wiphy, ndev, -1, key_idx,
+ pairwise, mac_addr);
if (params->key_len > sizeof(key->data)) {
bphy_err(drvr, "Too long key length (%u)\n", params->key_len);
@@ -2552,8 +2555,9 @@ done:
}
static s32
-brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
- bool pairwise, const u8 *mac_addr, void *cookie,
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie,
struct key_params *params))
{
@@ -2609,7 +2613,8 @@ done:
static s32
brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_idx)
+ struct net_device *ndev, int link_id,
+ u8 key_idx)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -3159,10 +3164,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_bss_info_le *bi;
- const struct brcmf_tlv *tim;
- size_t ie_len;
- u8 *ie;
+ struct brcmf_bss_info_le *bi = NULL;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -3176,29 +3178,8 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
bphy_err(drvr, "Could not get bss info %d\n", err);
goto update_bss_info_out;
}
-
bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
err = brcmf_inform_single_bss(cfg, bi);
- if (err)
- goto update_bss_info_out;
-
- ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
- ie_len = le32_to_cpu(bi->ie_length);
-
- tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (!tim) {
- /*
- * active scan was done so we could not get dtim
- * information out of probe response.
- * so we speficially query dtim information to dongle.
- */
- u32 var;
- err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
- if (err) {
- bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
- goto update_bss_info_out;
- }
- }
update_bss_info_out:
brcmf_dbg(TRACE, "Exit");
@@ -3983,7 +3964,6 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
struct brcmf_pmk_list_le *pmk_list;
int i;
u32 npmk;
- s32 err;
pmk_list = &cfg->pmk_list;
npmk = le32_to_cpu(pmk_list->npmk);
@@ -3992,10 +3972,8 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
for (i = 0; i < npmk; i++)
brcmf_dbg(CONN, "PMK[%d]: %pM\n", i, &pmk_list->pmk[i].bssid);
- err = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
- sizeof(*pmk_list));
-
- return err;
+ return brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
+ sizeof(*pmk_list));
}
static s32
@@ -4622,7 +4600,7 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
- s32 pktflags[] = {
+ static const s32 pktflags[] = {
BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
@@ -4964,7 +4942,8 @@ exit:
return err;
}
-static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
+ unsigned int link_id)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -5040,13 +5019,10 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err;
brcmf_dbg(TRACE, "Enter\n");
- err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
-
- return err;
+ return brcmf_config_ap_mgmt_ie(ifp->vif, info);
}
static int
@@ -5301,6 +5277,7 @@ exit:
static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@@ -6014,8 +5991,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
done:
kfree(buf);
- roam_info.channel = notify_channel;
- roam_info.bssid = profile->bssid;
+ roam_info.links[0].channel = notify_channel;
+ roam_info.links[0].bssid = profile->bssid;
roam_info.req_ie = conn_info->req_ie;
roam_info.req_ie_len = conn_info->req_ie_len;
roam_info.resp_ie = conn_info->resp_ie;
@@ -6058,7 +6035,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
} else {
conn_params.status = WLAN_STATUS_AUTH_TIMEOUT;
}
- conn_params.bssid = profile->bssid;
+ conn_params.links[0].bssid = profile->bssid;
conn_params.req_ie = conn_info->req_ie;
conn_params.req_ie_len = conn_info->req_ie_len;
conn_params.resp_ie = conn_info->resp_ie;
@@ -6428,6 +6405,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
cfg->dongle_up = false; /* dongle down */
brcmf_abort_scanning(cfg);
brcmf_deinit_priv_mem(cfg);
+ brcmf_clear_assoc_ies(cfg);
}
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
@@ -7476,6 +7454,21 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
vif_event_equals(event, action), timeout);
}
+static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
+{
+ if (drvr->settings->trivial_ccode_map)
+ return true;
+
+ switch (drvr->bus_if->chip) {
+ case BRCM_CC_43430_CHIP_ID:
+ case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43602_CHIP_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
struct brcmf_fil_country_le *ccreq)
{
@@ -7484,18 +7477,28 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ if (brmcf_use_iso3166_ccode_fallback(drvr)) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 1ee49f9e325d..121893bbaa1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -641,6 +641,7 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
*srsize = (32 * 1024);
break;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
/* assume sr for now as we can not check
* firmware sr capability at this point.
*/
@@ -704,6 +705,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
{
switch (ci->pub.chip) {
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
return 0x198000;
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
@@ -731,6 +733,10 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x160000;
case CY_CC_43752_CHIP_ID:
return 0x170000;
+ case BRCM_CC_4378_CHIP_ID:
+ return 0x352000;
+ case CY_CC_89459_CHIP_ID:
+ return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
@@ -1257,7 +1263,8 @@ brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
brcmf_chip_resetcore(core, 0, 0, 0);
/* disable bank #3 remap for this device */
- if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+ if (chip->pub.chip == BRCM_CC_43430_CHIP_ID ||
+ chip->pub.chip == CY_CC_43439_CHIP_ID) {
sr = container_of(core, struct brcmf_core_priv, pub);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
@@ -1401,6 +1408,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
fallthrough;
@@ -1414,10 +1422,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
reg = chip->ops->read32(chip->ctx, addr);
return (reg & pmu_cc3_mask) != 0;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
case CY_CC_4373_CHIP_ID:
+ case CY_CC_89459_CHIP_ID:
/* explicitly check SR engine enable bit */
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index e3758bd86acf..74020fa10065 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -123,7 +123,6 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
struct brcmf_bus *bus = drvr->bus_if;
struct brcmf_dload_data_le *chunk_buf;
const struct firmware *clm = NULL;
- u8 clm_name[BRCMF_FW_NAME_LEN];
u32 chunk_len;
u32 datalen;
u32 cumulative_len;
@@ -133,15 +132,8 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
brcmf_dbg(TRACE, "Enter\n");
- memset(clm_name, 0, sizeof(clm_name));
- err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name);
- if (err) {
- bphy_err(drvr, "get CLM blob file name failed (%d)\n", err);
- return err;
- }
-
- err = firmware_request_nowarn(&clm, clm_name, bus->dev);
- if (err) {
+ err = brcmf_bus_get_blob(bus, &clm, BRCMF_BLOB_CLM);
+ if (err || !clm) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);
return 0;
@@ -190,6 +182,31 @@ done:
return err;
}
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr)
+{
+ s32 err;
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", addr, ETH_ALEN);
+ if (err < 0)
+ bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+
+ return err;
+}
+
+/* On some boards there is no eeprom to hold the nvram, in this case instead
+ * a board specific nvram is loaded from /lib/firmware. On most boards the
+ * macaddr setting in the /lib/firmware nvram file is ignored because the
+ * wifibt chip has a unique MAC programmed into the chip itself.
+ * But in some cases the actual MAC from the /lib/firmware nvram file gets
+ * used, leading to MAC conflicts.
+ * The MAC addresses in the troublesome nvram files seem to all come from
+ * the same nvram file template, so we only need to check for 1 known
+ * address to detect this.
+ */
+static const u8 brcmf_default_mac_address[ETH_ALEN] = {
+ 0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38
+};
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -202,13 +219,30 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
char *ptr;
s32 err;
- /* retreive mac address */
- err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
- sizeof(ifp->mac_addr));
- if (err < 0) {
- bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
- goto done;
+ if (is_valid_ether_addr(ifp->mac_addr)) {
+ /* set mac address */
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
+ goto done;
+ } else {
+ /* retrieve mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
+ goto done;
+ }
+
+ if (ether_addr_equal_unaligned(ifp->mac_addr, brcmf_default_mac_address)) {
+ bphy_err(drvr, "Default MAC is used, replacing with random MAC to avoid conflicts\n");
+ eth_random_addr(ifp->mac_addr);
+ ifp->ndev->addr_assign_type = NET_ADDR_RANDOM;
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
+ goto done;
+ }
}
+
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
@@ -219,7 +253,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
&revinfo, sizeof(revinfo));
if (err < 0) {
bphy_err(drvr, "retrieving revision info failed, %d\n", err);
- strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
+ strscpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
} else {
ri->vendorid = le32_to_cpu(revinfo.vendorid);
ri->deviceid = le32_to_cpu(revinfo.deviceid);
@@ -272,7 +306,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* locate firmware version number for ethtool */
ptr = strrchr(buf, ' ') + 1;
- strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+ strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
/* Query for 'clmver' to get CLM version info from firmware */
memset(buf, 0, sizeof(buf));
@@ -382,11 +416,11 @@ static void brcmf_mp_attach(void)
* if not set then if available use the platform data version. To make
* sure it gets initialized at all, always copy the module param version
*/
- strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
+ strscpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
BRCMF_FW_ALTPATH_LEN);
if ((brcmfmac_pdata) && (brcmfmac_pdata->fw_alternative_path) &&
(brcmf_mp_global.firmware_path[0] == '\0')) {
- strlcpy(brcmf_mp_global.firmware_path,
+ strscpy(brcmf_mp_global.firmware_path,
brcmfmac_pdata->fw_alternative_path,
BRCMF_FW_ALTPATH_LEN);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 8b5f49997c8b..aa25abffcc7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -38,6 +38,7 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
* @fcmode: FWS flow control.
* @roamoff: Firmware roaming off?
* @ignore_probe_fail: Ignore probe failure.
+ * @trivial_ccode_map: Assume firmware uses ISO3166 country codes with rev 0
* @country_codes: If available, pointer to struct for translating country codes
* @bus: Bus specific platform data. Only SDIO at the mmoment.
*/
@@ -48,8 +49,11 @@ struct brcmf_mp_device {
bool roamoff;
bool iapp;
bool ignore_probe_fail;
+ bool trivial_ccode_map;
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
+ unsigned char mac[ETH_ALEN];
+ const char *antenna_sku;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
@@ -64,6 +68,7 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr);
#ifdef CONFIG_DMI
void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index fed9cd5f29a2..595ae3ae561e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -7,6 +7,7 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/inetdevice.h>
+#include <linux/property.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <net/addrconf.h>
@@ -232,16 +233,12 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
- struct brcmf_pub *drvr = ifp->drvr;
int err;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
- ETH_ALEN);
- if (err < 0) {
- bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err);
- } else {
+ err = brcmf_c_set_cur_etheraddr(ifp, sa->sa_data);
+ if (err >= 0) {
brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
eth_hw_addr_set(ifp->ndev, ifp->mac_addr);
@@ -295,6 +292,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
int head_delta;
+ unsigned int tx_bytes = skb->len;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -369,7 +367,7 @@ done:
ndev->stats.tx_dropped++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += tx_bytes;
}
/* Return ok: we always eat the packet */
@@ -400,7 +398,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
@@ -423,15 +421,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
- if (inirq) {
- netif_rx(skb);
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
- */
- netif_rx_ni(skb);
- }
+ netif_rx(skb);
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -480,7 +470,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -515,7 +505,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
return;
if (brcmf_proto_is_reorder_skb(skb)) {
- brcmf_proto_rxreorder(ifp, skb, inirq);
+ brcmf_proto_rxreorder(ifp, skb);
} else {
/* Process special event packets */
if (handle_event) {
@@ -524,7 +514,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
brcmf_fweh_process_skb(ifp->drvr, skb,
BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
}
- brcmf_netif_rx(ifp, skb, inirq);
+ brcmf_netif_rx(ifp, skb);
}
}
@@ -572,10 +562,10 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
if (drvr->revinfo.result == 0)
brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, drev, sizeof(info->version));
- strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
- strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, drev, sizeof(info->version));
+ strscpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
+ strscpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
}
@@ -1205,7 +1195,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
brcmf_dbg(TRACE, "\n");
/* add primary networking interface */
- ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
+ ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d",
+ is_valid_ether_addr(drvr->settings->mac) ? drvr->settings->mac : NULL);
if (IS_ERR(ifp))
return PTR_ERR(ifp);
@@ -1490,8 +1481,10 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- if (!err)
+ if (!err) {
bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n");
+ atomic_set(&ifp->pend_8021x_cnt, 0);
+ }
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8212c9de14f1..340346c122d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 0af452dca766..86ff174936a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -24,6 +24,13 @@ static const struct brcmf_dmi_data acepc_t8_data = {
BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
};
+/* The Chuwi Hi8 Pro uses the same Ampak AP6212 module as the Chuwi Vi8 Plus
+ * and the nvram for the Vi8 Plus is already in linux-firmware, so use that.
+ */
+static const struct brcmf_dmi_data chuwi_hi8_pro_data = {
+ BRCM_CC_43430_CHIP_ID, 0, "ilife-S806"
+};
+
static const struct brcmf_dmi_data gpd_win_pocket_data = {
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
};
@@ -76,6 +83,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "MRD"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/10/2016"),
+ },
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ },
+ {
/* Cyberbook T116 rugged tablet */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 7c68d9849324..2c2f3e026c13 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -248,7 +248,9 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
- drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
+ drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
+ drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID &&
+ drvr->bus_if->chip != CY_CC_43439_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 0eb13e5df517..f2207793f6e2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -21,6 +21,8 @@
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff"
+#define BRCMF_FW_MACADDR_FMT "macaddr=%pM"
+#define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3)
enum nvram_parser_state {
IDLE,
@@ -44,6 +46,7 @@ enum nvram_parser_state {
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
* @boardrev_found: nvram contains boardrev information.
+ * @strip_mac: strip the MAC address.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -57,6 +60,7 @@ struct nvram_parser {
bool multi_dev_v1;
bool multi_dev_v2;
bool boardrev_found;
+ bool strip_mac;
};
/*
@@ -121,6 +125,10 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v2 = true;
if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
nvp->boardrev_found = true;
+ /* strip macaddr if platform MAC overrides */
+ if (nvp->strip_mac &&
+ strncmp(&nvp->data[nvp->entry], "macaddr", 7) == 0)
+ st = COMMENT;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
@@ -207,6 +215,9 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = data_len;
+ /* Add space for properties we may add */
+ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
+ size += BRCMF_FW_MACADDR_LEN + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
@@ -366,22 +377,37 @@ static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
nvp->nvram_len++;
}
+static void brcmf_fw_add_macaddr(struct nvram_parser *nvp, u8 *mac)
+{
+ int len;
+
+ len = scnprintf(&nvp->nvram[nvp->nvram_len], BRCMF_FW_MACADDR_LEN + 1,
+ BRCMF_FW_MACADDR_FMT, mac);
+ WARN_ON(len != BRCMF_FW_MACADDR_LEN);
+ nvp->nvram_len += len + 1;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
- u32 *new_length, u16 domain_nr, u16 bus_nr)
+ u32 *new_length, u16 domain_nr, u16 bus_nr,
+ struct device *dev)
{
struct nvram_parser nvp;
u32 pad;
u32 token;
__le32 token_le;
+ u8 mac[ETH_ALEN];
if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0)
return NULL;
+ if (eth_platform_get_mac_address(dev, mac) == 0)
+ nvp.strip_mac = true;
+
while (nvp.pos < data_len) {
nvp.state = nv_parser_states[nvp.state](&nvp);
if (nvp.state == END)
@@ -402,6 +428,9 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
brcmf_fw_add_defaults(&nvp);
+ if (nvp.strip_mac)
+ brcmf_fw_add_macaddr(&nvp, mac);
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -428,6 +457,7 @@ struct brcmf_fw {
struct device *dev;
struct brcmf_fw_request *req;
u32 curpos;
+ unsigned int board_index;
void (*done)(struct device *dev, int err, struct brcmf_fw_request *req);
};
@@ -457,43 +487,34 @@ static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len)
static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret)
{
- const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 };
- struct efivar_entry *nvram_efivar;
+ efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f,
+ 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13);
unsigned long data_len = 0;
+ efi_status_t status;
u8 *data = NULL;
- int err;
- nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL);
- if (!nvram_efivar)
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
return NULL;
- memcpy(&nvram_efivar->var.VariableName, name, sizeof(name));
- nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61,
- 0xb5, 0x1f, 0x43, 0x26,
- 0x81, 0x23, 0xd1, 0x13);
-
- err = efivar_entry_size(nvram_efivar, &data_len);
- if (err)
+ status = efi.get_variable(L"nvram", &guid, NULL, &data_len, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL)
goto fail;
data = kmalloc(data_len, GFP_KERNEL);
if (!data)
goto fail;
- err = efivar_entry_get(nvram_efivar, NULL, &data_len, data);
- if (err)
+ status = efi.get_variable(L"nvram", &guid, NULL, &data_len, data);
+ if (status != EFI_SUCCESS)
goto fail;
brcmf_fw_fix_efi_nvram_ccode(data, data_len);
brcmf_info("Using nvram EFI variable\n");
- kfree(nvram_efivar);
*data_len_ret = data_len;
return data;
-
fail:
kfree(data);
- kfree(nvram_efivar);
return NULL;
}
#else
@@ -544,7 +565,8 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (data)
nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
fwctx->req->domain_nr,
- fwctx->req->bus_nr);
+ fwctx->req->bus_nr,
+ fwctx->dev);
if (free_bcm47xx_nvram)
bcm47xx_nvram_release_contents(data);
@@ -594,39 +616,50 @@ static int brcmf_fw_complete_request(const struct firmware *fw,
static char *brcm_alt_fw_path(const char *path, const char *board_type)
{
- char alt_path[BRCMF_FW_NAME_LEN];
- char suffix[5];
+ char base[BRCMF_FW_NAME_LEN];
+ const char *suffix;
+ char *ret;
- strscpy(alt_path, path, BRCMF_FW_NAME_LEN);
- /* At least one character + suffix */
- if (strlen(alt_path) < 5)
+ if (!board_type)
return NULL;
- /* strip .txt or .bin at the end */
- strscpy(suffix, alt_path + strlen(alt_path) - 4, 5);
- alt_path[strlen(alt_path) - 4] = 0;
- strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
- strlcat(alt_path, board_type, BRCMF_FW_NAME_LEN);
- strlcat(alt_path, suffix, BRCMF_FW_NAME_LEN);
+ suffix = strrchr(path, '.');
+ if (!suffix || suffix == path)
+ return NULL;
+
+ /* strip extension at the end */
+ strscpy(base, path, BRCMF_FW_NAME_LEN);
+ base[suffix - path] = 0;
+
+ ret = kasprintf(GFP_KERNEL, "%s.%s%s", base, board_type, suffix);
+ if (!ret)
+ brcmf_err("out of memory allocating firmware path for '%s'\n",
+ path);
- return kstrdup(alt_path, GFP_KERNEL);
+ brcmf_dbg(TRACE, "FW alt path: %s\n", ret);
+
+ return ret;
}
static int brcmf_fw_request_firmware(const struct firmware **fw,
struct brcmf_fw *fwctx)
{
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
+ unsigned int i;
int ret;
- /* Files can be board-specific, first try a board-specific path */
- if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
+ /* Files can be board-specific, first try board-specific paths */
+ for (i = 0; i < ARRAY_SIZE(fwctx->req->board_types); i++) {
char *alt_path;
- alt_path = brcm_alt_fw_path(cur->path, fwctx->req->board_type);
+ if (!fwctx->req->board_types[i])
+ goto fallback;
+ alt_path = brcm_alt_fw_path(cur->path,
+ fwctx->req->board_types[i]);
if (!alt_path)
goto fallback;
- ret = request_firmware(fw, alt_path, fwctx->dev);
+ ret = firmware_request_nowarn(fw, alt_path, fwctx->dev);
kfree(alt_path);
if (ret == 0)
return ret;
@@ -660,15 +693,40 @@ static void brcmf_fw_request_done_alt_path(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *first = &fwctx->req->items[0];
+ const char *board_type, *alt_path;
int ret = 0;
- /* Fall back to canonical path if board firmware not found */
- if (!fw)
- ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ if (fw) {
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
+
+ /* Try next board firmware */
+ if (fwctx->board_index < ARRAY_SIZE(fwctx->req->board_types)) {
+ board_type = fwctx->req->board_types[fwctx->board_index++];
+ if (!board_type)
+ goto fallback;
+ alt_path = brcm_alt_fw_path(first->path, board_type);
+ if (!alt_path)
+ goto fallback;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
- brcmf_fw_request_done);
+ brcmf_fw_request_done_alt_path);
+ kfree(alt_path);
+
+ if (ret < 0)
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
- if (fw || ret < 0)
+fallback:
+ /* Fall back to canonical path if board firmware not found */
+ ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_done);
+
+ if (ret < 0)
brcmf_fw_request_done(fw, ctx);
}
@@ -693,7 +751,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,8 +770,11 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_types[0])
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_types[0]);
if (alt_path) {
+ fwctx->board_index++;
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_done_alt_path);
@@ -774,7 +835,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
fwnames[j].path[0] = '\0';
/* check if firmware path is provided by module parameter */
if (brcmf_mp_global.firmware_path[0] != '\0') {
- strlcpy(fwnames[j].path, mp_path,
+ strscpy(fwnames[j].path, mp_path,
BRCMF_FW_NAME_LEN);
if (end != '/') {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index e290dec9c53d..1266cbaee072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -11,6 +11,8 @@
#define BRCMF_FW_DEFAULT_PATH "brcm/"
+#define BRCMF_FW_MAX_BOARD_TYPES 8
+
/**
* struct brcmf_firmware_mapping - Used to map chipid/revmask to firmware
* filename and nvram filename. Each bus type implementation should create
@@ -66,7 +68,7 @@ struct brcmf_fw_request {
u16 domain_nr;
u16 bus_nr;
u32 n_items;
- const char *board_type;
+ const char *board_types[BRCMF_FW_MAX_BOARD_TYPES];
struct brcmf_fw_item items[];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 096f6b969dd8..e1127d7e086d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -419,7 +419,6 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
flowid = flow->hash[i].flowid;
if (flow->rings[flowid]->status != RING_OPEN)
continue;
- flow->rings[flowid]->status = RING_CLOSING;
brcmf_msgbuf_delete_flowring(drvr, flowid);
}
}
@@ -458,10 +457,8 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
(hash[i].ifidx == ifidx)) {
flowid = flow->hash[i].flowid;
- if (flow->rings[flowid]->status == RING_OPEN) {
- flow->rings[flowid]->status = RING_CLOSING;
+ if (flow->rings[flowid]->status == RING_OPEN)
brcmf_msgbuf_delete_flowring(drvr, flowid);
- }
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index bc3f4e4edcdf..dac7eb77799b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
+ if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
+ bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ goto event_free;
+ }
/* convert event message */
emsg_be = &event->emsg;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index d5578ca681bb..72fe8bce6eaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -192,7 +192,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, const char *data, u32 datalen,
+brcmf_create_iovar(const char *name, const char *data, u32 datalen,
char *buf, u32 buflen)
{
u32 len;
@@ -213,7 +213,7 @@ brcmf_create_iovar(char *name, const char *data, u32 datalen,
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -241,7 +241,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
}
s32
-brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -272,7 +272,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
}
s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -280,7 +280,7 @@ brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -292,7 +292,7 @@ brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
}
static u32
-brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
+brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
char *buf, u32 buflen)
{
const s8 *prefix = "bsscfg:";
@@ -337,7 +337,7 @@ brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
}
s32
-brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -366,7 +366,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -396,7 +396,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -405,7 +405,7 @@ brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -417,7 +417,7 @@ brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
return err;
}
-static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
+static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
{
u32 iolen;
@@ -438,7 +438,7 @@ static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
return iolen;
}
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -466,7 +466,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -495,7 +495,7 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -503,7 +503,7 @@ s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
sizeof(data_le));
}
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -514,12 +514,12 @@ s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
return err;
}
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data)
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
{
return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
}
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data)
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
{
__le16 data_le = cpu_to_le16(*data);
s32 err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index cb26f8c59c21..bc693157c4b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -84,26 +84,26 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len);
-s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data);
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index e69d1e56996f..f518e025d6e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -135,7 +135,7 @@
/* Link Down indication in WoWL mode: */
#define BRCMF_WOWL_LINKDOWN (1 << 31)
-#define BRCMF_WOWL_MAXPATTERNS 8
+#define BRCMF_WOWL_MAXPATTERNS 16
#define BRCMF_WOWL_MAXPATTERNSIZE 128
#define BRCMF_COUNTRY_BUF_SZ 4
@@ -1068,7 +1068,7 @@ struct brcmf_mkeep_alive_pkt_le {
__le32 period_msec;
__le16 len_bytes;
u8 keep_alive_id;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 19b0f318f93e..36af81975855 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -688,7 +688,7 @@ static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
struct brcmf_fws_mac_descriptor *desc)
{
if (desc == &fws->desc.other)
- strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+ strscpy(desc->name, "MAC-OTHER", sizeof(desc->name));
else if (desc->mac_handle)
scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
desc->mac_handle, desc->interface_id);
@@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
@@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
}
}
@@ -2475,7 +2475,8 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
}
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success)
{
u32 hslot;
@@ -2483,11 +2484,14 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
brcmu_pkt_buf_free_skb(skb);
return;
}
- brcmf_fws_lock(fws);
- hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0,
- 1);
- brcmf_fws_unlock(fws);
+
+ if (!success) {
+ brcmf_fws_lock(fws);
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot,
+ 0, 0, 1);
+ brcmf_fws_unlock(fws);
+ }
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 50e424b5880d..f9c36cd8f1de 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -40,8 +40,9 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_fws_reset_interface(struct brcmf_if *ifp);
void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 7c8e08ee8f0f..cec53f934940 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -71,6 +71,7 @@
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48
+#define BRCMF_MAX_TXSTATUS_WAIT_RETRIES 10
struct msgbuf_common_hdr {
u8 msgtype;
@@ -536,8 +537,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
- bool inirq)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
}
@@ -807,8 +807,12 @@ static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
- if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
return -ENOMEM;
+ } else {
+ brcmf_flowring_enqueue(flow, flowid, skb);
+ return 0;
+ }
}
queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
@@ -1191,7 +1195,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
@@ -1396,9 +1400,27 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct msgbuf_tx_flowring_delete_req *delete;
struct brcmf_commonring *commonring;
+ struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid];
+ struct brcmf_flowring *flow = msgbuf->flow;
void *ret_ptr;
u8 ifidx;
int err;
+ int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES;
+
+ /* make sure it is not in txflow */
+ brcmf_commonring_lock(commonring_del);
+ flow->rings[flowid]->status = RING_CLOSING;
+ brcmf_commonring_unlock(commonring_del);
+
+ /* wait for commonring txflow finished */
+ while (retry && atomic_read(&commonring_del->outstanding_tx)) {
+ usleep_range(5000, 10000);
+ retry--;
+ }
+ if (!retry) {
+ brcmf_err("timed out waiting for txstatus\n");
+ atomic_set(&commonring_del->outstanding_tx, 0);
+ }
/* no need to submit if firmware can not be reached */
if (drvr->bus_if->state != BRCMF_BUS_UP) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 2e322edbb907..6a849f4a94dd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 1024
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 1024
#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 513c7e6421b2..a83699de01ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <defs.h>
#include "debug.h"
@@ -23,6 +24,12 @@ static int brcmf_of_get_country_codes(struct device *dev,
count = of_property_count_strings(np, "brcm,ccode-map");
if (count < 0) {
+ /* If no explicit country code map is specified, check whether
+ * the trivial map should be used.
+ */
+ settings->trivial_ccode_map =
+ of_property_read_bool(np, "brcm,ccode-map-trivial");
+
/* The property is optional, so return success if it doesn't
* exist. Otherwise propagate the error code.
*/
@@ -63,28 +70,36 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
+ const char *prop;
int irq;
int err;
u32 irqf;
u32 val;
+ /* Apple ARM64 platforms have their own idea of board type, passed in
+ * via the device tree. They also have an antenna SKU parameter
+ */
+ if (!of_property_read_string(np, "brcm,board-type", &prop))
+ settings->board_type = prop;
+
+ if (!of_property_read_string(np, "apple,antenna-sku", &prop))
+ settings->antenna_sku = prop;
+
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
- if (root) {
- int i, len;
+ if (root && !settings->board_type) {
char *board_type;
const char *tmp;
of_property_read_string_index(root, "compatible", 0, &tmp);
/* get rid of '/' in the compatible string to be able to find the FW */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
- for (i = 0; i < board_type[i]; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type) {
+ of_node_put(root);
+ return;
}
+ strreplace(board_type, '/', '-');
settings->board_type = board_type;
of_node_put(root);
@@ -97,6 +112,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
if (err)
brcmf_err("failed to get OF country code map (err=%d)\n", err);
+ of_get_mac_address(np, settings->mac);
+
if (bus_type != BRCMF_BUSTYPE_SDIO)
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 4735063e4c03..10d9d9c63b28 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -90,8 +90,8 @@
#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
-#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
-#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comeback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comeback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
@@ -158,7 +158,7 @@ struct brcmf_p2p_pub_act_frame {
u8 oui_type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -177,7 +177,7 @@ struct brcmf_p2p_action_frame {
u8 type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -192,7 +192,7 @@ struct brcmf_p2psd_gas_pub_act_frame {
u8 category;
u8 action;
u8 dialog_token;
- u8 query_data[1];
+ u8 query_data[];
};
/**
@@ -225,7 +225,7 @@ static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
return false;
pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ if (frame_len < sizeof(*pact_frm))
return false;
if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
@@ -253,7 +253,7 @@ static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
return false;
act_frm = (struct brcmf_p2p_action_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ if (frame_len < sizeof(*act_frm))
return false;
if (act_frm->category == P2P_AF_CATEGORY &&
@@ -280,7 +280,7 @@ static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
return false;
sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ if (frame_len < sizeof(*sd_act_frm))
return false;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
@@ -396,11 +396,11 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CREQ:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Request\n",
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CRESP:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Response\n",
(tx) ? "TX" : "RX");
break;
default:
@@ -1128,7 +1128,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
/* 100ms ~ 300ms */
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
- 100 * (1 + prandom_u32() % 3));
+ 100 * (1 + prandom_u32_max(3)));
else
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 8b149996fc00..80083f9ea311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <soc.h>
@@ -58,6 +59,16 @@ BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
+BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
+
+/* firmware config files */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
+
+/* per-board firmware binaries */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.clm_blob");
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
@@ -79,6 +90,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+ BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
+ BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
@@ -110,6 +123,12 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
+#define BRCMF_PCIE_64_PCIE2REG_INTMASK 0xC14
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24
+
#define BRCMF_PCIE2_INTA 0x01
#define BRCMF_PCIE2_INTB 0x02
@@ -129,6 +148,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
#define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
+#define BRCMF_PCIE_MB_INT_FN0 (BRCMF_PCIE_MB_INT_FN0_0 | \
+ BRCMF_PCIE_MB_INT_FN0_1)
#define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
BRCMF_PCIE_MB_INT_D2H0_DB1 | \
BRCMF_PCIE_MB_INT_D2H1_DB0 | \
@@ -138,6 +159,40 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB0 0x1
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB1 0x2
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB0 0x4
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB1 0x8
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB0 0x10
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB1 0x20
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB0 0x40
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB1 0x80
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB0 0x100
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB1 0x200
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB0 0x400
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB1 0x800
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB0 0x1000
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB1 0x2000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB0 0x4000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB1 0x8000
+
+#define BRCMF_PCIE_64_MB_INT_D2H_DB (BRCMF_PCIE_64_MB_INT_D2H0_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H0_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB1)
+
#define BRCMF_PCIE_SHARED_VERSION_7 7
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
@@ -247,12 +302,24 @@ struct brcmf_pcie_core_info {
u32 wrapbase;
};
+#define BRCMF_OTP_MAX_PARAM_LEN 16
+
+struct brcmf_otp_params {
+ char module[BRCMF_OTP_MAX_PARAM_LEN];
+ char vendor[BRCMF_OTP_MAX_PARAM_LEN];
+ char version[BRCMF_OTP_MAX_PARAM_LEN];
+ bool valid;
+};
+
struct brcmf_pciedev_info {
enum brcmf_pcie_state state;
bool in_irq;
struct pci_dev *pdev;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
+ const struct firmware *clm_fw;
+ const struct brcmf_pcie_reginfo *reginfo;
void __iomem *regs;
void __iomem *tcm;
u32 ram_base;
@@ -272,6 +339,7 @@ struct brcmf_pciedev_info {
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
struct brcmf_mp_device *settings;
+ struct brcmf_otp_params otp;
};
struct brcmf_pcie_ringbuf {
@@ -338,11 +406,49 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
};
+struct brcmf_pcie_reginfo {
+ u32 intmask;
+ u32 mailboxint;
+ u32 mailboxmask;
+ u32 h2d_mailbox_0;
+ u32 h2d_mailbox_1;
+ u32 int_d2h_db;
+ u32 int_fn0;
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_default = {
+ .intmask = BRCMF_PCIE_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB,
+ .int_fn0 = BRCMF_PCIE_MB_INT_FN0,
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = {
+ .intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB,
+ .int_fn0 = 0,
+};
+
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq);
static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
+static u16
+brcmf_pcie_read_reg16(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+ void __iomem *address = devinfo->regs + reg_offset;
+
+ return ioread16(address);
+}
+
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -448,47 +554,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
static void
-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
- void *srcaddr, u32 len)
-{
- void __iomem *address = devinfo->tcm + mem_offset;
- __le32 *src32;
- __le16 *src16;
- u8 *src8;
-
- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
- src8 = (u8 *)srcaddr;
- while (len) {
- iowrite8(*src8, address);
- address++;
- src8++;
- len--;
- }
- } else {
- len = len / 2;
- src16 = (__le16 *)srcaddr;
- while (len) {
- iowrite16(le16_to_cpu(*src16), address);
- address += 2;
- src16++;
- len--;
- }
- }
- } else {
- len = len / 4;
- src32 = (__le32 *)srcaddr;
- while (len) {
- iowrite32(le32_to_cpu(*src32), address);
- address += 4;
- src32++;
- len--;
- }
- }
-}
-
-
-static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
void *dstaddr, u32 len)
{
@@ -529,6 +594,8 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
+#define READCC32(devinfo, reg) brcmf_pcie_read_reg32(devinfo, \
+ CHIPCREGOFFS(reg))
#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
CHIPCREGOFFS(reg), value)
@@ -777,6 +844,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
return;
console = &devinfo->shared.console;
+ if (!console->base_addr)
+ return;
addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
newidx = brcmf_pcie_read_tcm32(devinfo, addr);
while (newidx != console->read_idx) {
@@ -810,30 +879,29 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0);
}
static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- BRCMF_PCIE_MB_INT_D2H_DB |
- BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask,
+ devinfo->reginfo->int_d2h_db |
+ devinfo->reginfo->int_fn0);
}
static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
{
if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
brcmf_pcie_write_reg32(devinfo,
- BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+ devinfo->reginfo->h2d_mailbox_1, 1);
}
static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) {
brcmf_pcie_intr_disable(devinfo);
brcmf_dbg(PCIE, "Enter\n");
return IRQ_WAKE_THREAD;
@@ -848,15 +916,14 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
u32 status;
devinfo->in_irq = true;
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
brcmf_dbg(PCIE, "Enter %x\n", status);
if (status) {
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint,
status);
- if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1))
+ if (status & devinfo->reginfo->int_fn0)
brcmf_pcie_handle_mb_data(devinfo);
- if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
+ if (status & devinfo->reginfo->int_d2h_db) {
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_proto_msgbuf_rx_trigger(
&devinfo->pdev->dev);
@@ -915,8 +982,8 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
if (devinfo->in_irq)
brcmf_err(bus, "Still in IRQ (processing) !!!\n");
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status);
devinfo->irq_allocated = false;
}
@@ -968,7 +1035,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
brcmf_dbg(PCIE, "RING !\n");
/* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1);
return 0;
}
@@ -1348,6 +1415,18 @@ static void brcmf_pcie_down(struct device *dev)
{
}
+static int brcmf_pcie_preinit(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ brcmf_pcie_intr_enable(buspub->devinfo);
+ brcmf_pcie_hostready(buspub->devinfo);
+
+ return 0;
+}
static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
{
@@ -1401,23 +1480,25 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
return 0;
}
-static
-int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_pcie_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_pcie_fwnames,
- ARRAY_SIZE(brcmf_pcie_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = devinfo->clm_fw;
+ devinfo->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
@@ -1456,6 +1537,7 @@ static int brcmf_pcie_reset(struct device *dev)
}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .preinit = brcmf_pcie_preinit,
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
@@ -1463,7 +1545,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.wowl_config = brcmf_pcie_wowl_config,
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
- .get_fwname = brcmf_pcie_get_fwname,
+ .get_blob = brcmf_pcie_get_blob,
.reset = brcmf_pcie_reset,
};
@@ -1540,6 +1622,7 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
shared->max_rxbufpost, shared->rx_dataoffset);
brcmf_pcie_bus_console_init(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
return 0;
}
@@ -1563,8 +1646,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
return err;
brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
- (void *)fw->data, fw->size);
+ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
resetintr = get_unaligned_le32(fw->data);
release_firmware(fw);
@@ -1578,7 +1661,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
@@ -1715,15 +1798,22 @@ static int brcmf_pcie_buscoreprep(void *ctx)
static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
- u32 val;
+ struct brcmf_core *core;
+ u32 val, reg;
devinfo->ci = chip;
brcmf_pcie_reset_device(devinfo);
- val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ /* reginfo is not ready yet */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ reg = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT;
+ else
+ reg = BRCMF_PCIE_PCIE2REG_MAILBOXINT;
+
+ val = brcmf_pcie_read_reg32(devinfo, reg);
if (val != 0xffffffff)
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
- val);
+ brcmf_pcie_write_reg32(devinfo, reg, val);
return 0;
}
@@ -1746,8 +1836,206 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32,
};
+#define BRCMF_OTP_SYS_VENDOR 0x15
+#define BRCMF_OTP_BRCM_CIS 0x80
+
+#define BRCMF_OTP_VENDOR_HDR 0x00000008
+
+static int
+brcmf_pcie_parse_otp_sys_vendor(struct brcmf_pciedev_info *devinfo,
+ u8 *data, size_t size)
+{
+ int idx = 4;
+ const char *chip_params;
+ const char *board_params;
+ const char *p;
+
+ /* 4-byte header and two empty strings */
+ if (size < 6)
+ return -EINVAL;
+
+ if (get_unaligned_le32(data) != BRCMF_OTP_VENDOR_HDR)
+ return -EINVAL;
+
+ chip_params = &data[idx];
+
+ /* Skip first string, including terminator */
+ idx += strnlen(chip_params, size - idx) + 1;
+ if (idx >= size)
+ return -EINVAL;
+
+ board_params = &data[idx];
+
+ /* Skip to terminator of second string */
+ idx += strnlen(board_params, size - idx);
+ if (idx >= size)
+ return -EINVAL;
+
+ /* At this point both strings are guaranteed NUL-terminated */
+ brcmf_dbg(PCIE, "OTP: chip_params='%s' board_params='%s'\n",
+ chip_params, board_params);
+
+ p = skip_spaces(board_params);
+ while (*p) {
+ char tag = *p++;
+ const char *end;
+ size_t len;
+
+ if (*p++ != '=') /* implicit NUL check */
+ return -EINVAL;
+
+ /* *p might be NUL here, if so end == p and len == 0 */
+ end = strchrnul(p, ' ');
+ len = end - p;
+
+ /* leave 1 byte for NUL in destination string */
+ if (len > (BRCMF_OTP_MAX_PARAM_LEN - 1))
+ return -EINVAL;
+
+ /* Copy len characters plus a NUL terminator */
+ switch (tag) {
+ case 'M':
+ strscpy(devinfo->otp.module, p, len + 1);
+ break;
+ case 'V':
+ strscpy(devinfo->otp.vendor, p, len + 1);
+ break;
+ case 'm':
+ strscpy(devinfo->otp.version, p, len + 1);
+ break;
+ }
+
+ /* Skip to next arg, if any */
+ p = skip_spaces(end);
+ }
+
+ brcmf_dbg(PCIE, "OTP: module=%s vendor=%s version=%s\n",
+ devinfo->otp.module, devinfo->otp.vendor,
+ devinfo->otp.version);
+
+ if (!devinfo->otp.module[0] ||
+ !devinfo->otp.vendor[0] ||
+ !devinfo->otp.version[0])
+ return -EINVAL;
+
+ devinfo->otp.valid = true;
+ return 0;
+}
+
+static int
+brcmf_pcie_parse_otp(struct brcmf_pciedev_info *devinfo, u8 *otp, size_t size)
+{
+ int p = 0;
+ int ret = -EINVAL;
+
+ brcmf_dbg(PCIE, "parse_otp size=%zd\n", size);
+
+ while (p < (size - 1)) {
+ u8 type = otp[p];
+ u8 length = otp[p + 1];
+
+ if (type == 0)
+ break;
+
+ if ((p + 2 + length) > size)
+ break;
+
+ switch (type) {
+ case BRCMF_OTP_SYS_VENDOR:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): SYS_VENDOR\n",
+ p, length);
+ ret = brcmf_pcie_parse_otp_sys_vendor(devinfo,
+ &otp[p + 2],
+ length);
+ break;
+ case BRCMF_OTP_BRCM_CIS:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): BRCM_CIS\n",
+ p, length);
+ break;
+ default:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): Unknown type 0x%x\n",
+ p, length, type);
+ break;
+ }
+
+ p += 2 + length;
+ }
+
+ return ret;
+}
+
+static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
+{
+ const struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
+ u32 coreid, base, words, idx, sromctl;
+ u16 *otp;
+ struct brcmf_core *core;
+ int ret;
+
+ switch (devinfo->ci->chip) {
+ case BRCM_CC_4378_CHIP_ID:
+ coreid = BCMA_CORE_GCI;
+ base = 0x1120;
+ words = 0x170;
+ break;
+ default:
+ /* OTP not supported on this chip */
+ return 0;
+ }
+
+ core = brcmf_chip_get_core(devinfo->ci, coreid);
+ if (!core) {
+ brcmf_err(bus, "No OTP core\n");
+ return -ENODEV;
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ /* Chips with OTP accessed via ChipCommon need additional
+ * handling to access the OTP
+ */
+ brcmf_pcie_select_core(devinfo, coreid);
+ sromctl = READCC32(devinfo, sromcontrol);
+
+ if (!(sromctl & BCMA_CC_SROM_CONTROL_OTP_PRESENT)) {
+ /* Chip lacks OTP, try without it... */
+ brcmf_err(bus,
+ "OTP unavailable, using default firmware\n");
+ return 0;
+ }
+
+ /* Map OTP to shadow area */
+ WRITECC32(devinfo, sromcontrol,
+ sromctl | BCMA_CC_SROM_CONTROL_OTPSEL);
+ }
+
+ otp = kcalloc(words, sizeof(u16), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ /* Map bus window to SROM/OTP shadow area in core */
+ base = brcmf_pcie_buscore_prep_addr(devinfo->pdev, base + core->base);
+
+ brcmf_dbg(PCIE, "OTP data:\n");
+ for (idx = 0; idx < words; idx++) {
+ otp[idx] = brcmf_pcie_read_reg16(devinfo, base + 2 * idx);
+ brcmf_dbg(PCIE, "[%8x] 0x%04x\n", base + 2 * idx, otp[idx]);
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ brcmf_pcie_select_core(devinfo, coreid);
+ WRITECC32(devinfo, sromcontrol, sromctl);
+ }
+
+ ret = brcmf_pcie_parse_otp(devinfo, (u8 *)otp, 2 * words);
+ kfree(otp);
+
+ return ret;
+}
+
#define BRCMF_PCIE_FW_CODE 0
#define BRCMF_PCIE_FW_NVRAM 1
+#define BRCMF_PCIE_FW_CLM 2
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq)
@@ -1772,11 +2060,14 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
+ devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary;
kfree(fwreq);
ret = brcmf_chip_get_raminfo(devinfo->ci);
if (ret) {
brcmf_err(bus, "Failed to get RAM info\n");
+ release_firmware(fw);
+ brcmf_fw_nvram_free(nvram);
goto fail;
}
@@ -1826,9 +2117,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
init_waitqueue_head(&devinfo->mbdata_resp_wait);
- brcmf_pcie_intr_enable(devinfo);
- brcmf_pcie_hostready(devinfo);
-
ret = brcmf_attach(&devinfo->pdev->dev);
if (ret)
goto fail;
@@ -1848,6 +2136,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
struct brcmf_fw_name fwnames[] = {
{ ".bin", devinfo->fw_name },
{ ".txt", devinfo->nvram_name },
+ { ".clm_blob", devinfo->clm_name },
};
fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
@@ -1860,11 +2149,51 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
- fwreq->board_type = devinfo->settings->board_type;
+ fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_PCIE_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
+ /* Apple platforms with fancy firmware/NVRAM selection */
+ if (devinfo->settings->board_type &&
+ devinfo->settings->antenna_sku &&
+ devinfo->otp.valid) {
+ const struct brcmf_otp_params *otp = &devinfo->otp;
+ struct device *dev = &devinfo->pdev->dev;
+ const char **bt = fwreq->board_types;
+
+ brcmf_dbg(PCIE, "Apple board: %s\n",
+ devinfo->settings->board_type);
+
+ /* Example: apple,shikoku-RASP-m-6.11-X3 */
+ bt[0] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version,
+ devinfo->settings->antenna_sku);
+ bt[1] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version);
+ bt[2] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor);
+ bt[3] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ otp->module);
+ bt[4] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ devinfo->settings->antenna_sku);
+ bt[5] = devinfo->settings->board_type;
+
+ if (!bt[0] || !bt[1] || !bt[2] || !bt[3] || !bt[4]) {
+ kfree(fwreq);
+ return NULL;
+ }
+ } else {
+ brcmf_dbg(PCIE, "Board: %s\n", devinfo->settings->board_type);
+ fwreq->board_types[0] = devinfo->settings->board_type;
+ }
+
return fwreq;
}
@@ -1875,6 +2204,7 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_fw_request *fwreq;
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_core *core;
struct brcmf_bus *bus;
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
@@ -1894,6 +2224,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ devinfo->reginfo = &brcmf_reginfo_64;
+ else
+ devinfo->reginfo = &brcmf_reginfo_default;
+
pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
if (pcie_bus_dev == NULL) {
ret = -ENOMEM;
@@ -1936,6 +2272,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
+ ret = brcmf_pcie_read_otp(devinfo);
+ if (ret) {
+ brcmf_err(bus, "failed to parse OTP\n");
+ goto fail_brcmf;
+ }
+
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
if (!fwreq) {
ret = -ENOMEM;
@@ -1980,6 +2322,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
return;
devinfo = bus->bus_priv.pcie->devinfo;
+ brcmf_pcie_bus_console_read(devinfo, false);
devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
if (devinfo->ci)
@@ -1998,6 +2341,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
brcmf_pcie_release_ringbuffers(devinfo);
brcmf_pcie_reset_device(devinfo);
brcmf_pcie_release_resource(devinfo);
+ release_firmware(devinfo->clm_fw);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
@@ -2055,7 +2399,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
/* Check if device is still up and running, if so we are ready */
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) {
brcmf_dbg(PCIE, "Try to wakeup device....\n");
if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
goto cleanup;
@@ -2106,6 +2450,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
@@ -2121,6 +2466,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index fabfbb0b40b0..170c61c8136c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -158,12 +158,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
- int err, i;
+ int err, i, ri;
- for (i = 0; i < pi->n_reqs; i++)
- if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- mac_addr = pi->reqs[i]->mac_addr;
- mac_mask = pi->reqs[i]->mac_addr_mask;
+ for (ri = 0; ri < pi->n_reqs; ri++)
+ if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[ri]->mac_addr;
+ mac_mask = pi->reqs[ri]->mac_addr_mask;
break;
}
@@ -177,7 +177,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
pfn_mac.mac[i] &= mac_mask[i];
- pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
+ pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
}
/* Clear multi bit */
pfn_mac.mac[0] &= 0xFE;
@@ -185,7 +185,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
pfn_mac.mac[0] |= 0x02;
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
- pi->reqs[i]->reqid, pfn_mac.mac);
+ pi->reqs[ri]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index f4a79e217da5..bd08d3aaa8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
- void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
- ifp->drvr->proto->rxreorder(ifp, skb, inirq);
+ ifp->drvr->proto->rxreorder(ifp, skb);
}
static inline void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8effeb7a7269..465d95d83759 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@@ -618,6 +618,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
BRCMF_FW_DEF(43430B0, "brcmfmac43430b0-sdio");
+BRCMF_FW_CLM_DEF(43439, "brcmfmac43439-sdio");
BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
@@ -629,7 +630,6 @@ BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
@@ -652,11 +652,13 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
+ BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
+ BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
@@ -1617,7 +1619,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
- * packet and and copy into the chain.
+ * packet and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
@@ -4020,15 +4022,14 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
-#ifdef CONFIG_PM_SLEEP
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
- if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+ if (IS_ENABLED(CONFIG_PM_SLEEP) &&
+ (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
-#endif
if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
@@ -4130,29 +4131,29 @@ brcmf_sdio_watchdog(struct timer_list *t)
}
}
-static
-int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_sdio_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_sdio_fwnames,
- ARRAY_SIZE(brcmf_sdio_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = sdiodev->clm_fw;
+ sdiodev->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
static int brcmf_sdio_bus_reset(struct device *dev)
{
- int ret = 0;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -4165,18 +4166,11 @@ static int brcmf_sdio_bus_reset(struct device *dev)
/* reset the adapter */
sdio_claim_host(sdiodev->func1);
- mmc_hw_reset(sdiodev->func1->card->host);
+ mmc_hw_reset(sdiodev->func1->card);
sdio_release_host(sdiodev->func1);
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-
- ret = brcmf_sdiod_probe(sdiodev);
- if (ret) {
- brcmf_err("Failed to probe after sdio device reset: ret %d\n",
- ret);
- }
-
- return ret;
+ return 0;
}
static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
@@ -4189,13 +4183,14 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.wowl_config = brcmf_sdio_wowl_config,
.get_ramsize = brcmf_sdio_bus_get_ramsize,
.get_memdump = brcmf_sdio_bus_get_memdump,
- .get_fwname = brcmf_sdio_get_fwname,
+ .get_blob = brcmf_sdio_get_blob,
.debugfs_create = brcmf_sdio_debugfs_create,
.reset = brcmf_sdio_bus_reset
};
#define BRCMF_SDIO_FW_CODE 0
#define BRCMF_SDIO_FW_NVRAM 1
+#define BRCMF_SDIO_FW_CLM 2
static void brcmf_sdio_firmware_callback(struct device *dev, int err,
struct brcmf_fw_request *fwreq)
@@ -4218,6 +4213,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
code = fwreq->items[BRCMF_SDIO_FW_CODE].binary;
nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len;
+ sdiod->clm_fw = fwreq->items[BRCMF_SDIO_FW_CLM].binary;
kfree(fwreq);
/* try to download image and nvram to the dongle */
@@ -4416,6 +4412,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
struct brcmf_fw_name fwnames[] = {
{ ".bin", bus->sdiodev->fw_name },
{ ".txt", bus->sdiodev->nvram_name },
+ { ".clm_blob", bus->sdiodev->clm_name },
};
fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev,
@@ -4427,7 +4424,9 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
- fwreq->board_type = bus->sdiodev->settings->board_type;
+ fwreq->items[BRCMF_SDIO_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_SDIO_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->board_types[0] = bus->sdiodev->settings->board_type;
return fwreq;
}
@@ -4583,6 +4582,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->sdiodev->settings)
brcmf_release_module_param(bus->sdiodev->settings);
+ release_firmware(bus->sdiodev->clm_fw);
+ bus->sdiodev->clm_fw = NULL;
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 15d2c02fa3ec..b76d34d36bde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,9 +186,11 @@ struct brcmf_sdio_dev {
struct sg_table sgtable;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
bool wowl_enabled;
enum brcmf_sdiod_state state;
struct brcmf_sdiod_freezer *freezer;
+ const struct firmware *clm_fw;
};
/* sdio core registers */
@@ -346,26 +348,10 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
-#ifdef CONFIG_PM_SLEEP
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
-#else
-static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
-{
- return false;
-}
-static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 338c66d0c5f8..5a139d7ed47a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -33,13 +33,11 @@ TRACE_EVENT(brcmf_err,
TP_ARGS(func, vaf),
TP_STRUCT__entry(
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
@@ -50,14 +48,12 @@ TRACE_EVENT(brcmf_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 9fb68c2dc7e3..85e18fb9c497 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1154,24 +1154,11 @@ error:
return NULL;
}
-static
-int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_usb_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
-
- fwreq = brcmf_fw_alloc_request(bus->chip, bus->chiprev,
- brcmf_usb_fwnames,
- ARRAY_SIZE(brcmf_usb_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
-
- kfree(fwreq);
- return 0;
+ /* No blobs for USB devices... */
+ return -ENOENT;
}
static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
@@ -1180,7 +1167,7 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
- .get_fwname = brcmf_usb_get_fwname,
+ .get_blob = brcmf_usb_get_blob,
};
#define BRCMF_USB_FW_CODE 0
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
index 2f3c451148db..2f8908074303 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
@@ -4,6 +4,8 @@
*/
#include <asm/unaligned.h>
+
+#include <linux/math.h>
#include <linux/string.h>
#include <linux/bug.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
index e1930ce1b642..b2c7ae8966a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
@@ -15,7 +15,7 @@
struct brcmf_xtlv {
u16 id;
u16 len;
- u8 data[0];
+ u8 data[];
};
enum brcmf_xtlv_option {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 0e8a69ab909f..488456420353 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -28,12 +28,10 @@ DECLARE_EVENT_CLASS(brcms_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -64,14 +62,12 @@ TRACE_EVENT(brcms_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index eadac0f5590f..a4034d44609b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -507,7 +507,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
brcms_c_start_station(wl->wlc, vif->addr);
else if (vif->type == NL80211_IFTYPE_AP)
brcms_c_start_ap(wl->wlc, vif->addr, vif->bss_conf.bssid,
- vif->bss_conf.ssid, vif->bss_conf.ssid_len);
+ vif->cfg.ssid, vif->cfg.ssid_len);
else if (vif->type == NL80211_IFTYPE_ADHOC)
brcms_c_start_adhoc(wl->wlc, vif->addr);
spin_unlock_bh(&wl->lock);
@@ -582,7 +582,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
static void
brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct brcms_info *wl = hw->priv;
struct bcma_device *core = wl->wlc->hw->d11core;
@@ -592,9 +592,9 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
* also implies a change in the AID.
*/
brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
- __func__, info->assoc ? "" : "dis");
+ __func__, vif->cfg.assoc ? "" : "dis");
spin_lock_bh(&wl->lock);
- brcms_c_associate_upd(wl->wlc, info->assoc);
+ brcms_c_associate_upd(wl->wlc, vif->cfg.assoc);
spin_unlock_bh(&wl->lock);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -669,7 +669,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_SSID) {
/* BSSID changed, for whatever reason (IBSS and managed mode) */
spin_lock_bh(&wl->lock);
- brcms_c_set_ssid(wl->wlc, info->ssid, info->ssid_len);
+ brcms_c_set_ssid(wl->wlc, vif->cfg.ssid, vif->cfg.ssid_len);
spin_unlock_bh(&wl->lock);
}
if (changed & BSS_CHANGED_BEACON) {
@@ -678,7 +678,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
u16 tim_offset = 0;
spin_lock_bh(&wl->lock);
- beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
+ beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
info->dtim_period);
spin_unlock_bh(&wl->lock);
@@ -715,13 +715,13 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_IBSS) {
/* IBSS join status changed */
brcms_err(core, "%s: IBSS joined: %s (implement)\n",
- __func__, info->ibss_joined ? "true" : "false");
+ __func__, vif->cfg.ibss_joined ? "true" : "false");
}
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
brcms_err(core, "%s: arp filtering: %d addresses"
- " (implement)\n", __func__, info->arp_addr_cnt);
+ " (implement)\n", __func__, vif->cfg.arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
@@ -787,7 +787,8 @@ static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw,
}
static int
-brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct brcms_info *wl = hw->priv;
@@ -868,7 +869,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
spin_lock_bh(&wl->lock);
brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size,
(1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor)) - 1);
+ sta->deflink.ht_cap.ampdu_factor)) - 1);
spin_unlock_bh(&wl->lock);
/* Power save wakeup */
break;
@@ -950,7 +951,7 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
spin_lock_bh(&wl->lock);
if (wl->wlc->vif)
beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
- &tim_offset, NULL);
+ &tim_offset, NULL, 0);
if (beacon)
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
wl->wlc->vif->bss_conf.dtim_period);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 8ddfc3d06687..11b33e78127c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -3800,7 +3800,7 @@ static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
}
/*
- * Suspend the the MAC and update the slot timing
+ * Suspend the MAC and update the slot timing
* for standard 11b/g (20us slots) or shortslot 11g (9us slots).
*/
static void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
index ae1f3ad40d45..2b0df07ced74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
@@ -123,7 +123,7 @@
*/
/********************************************************************
- * Phy/Core Configuration. Defines macros to to check core phy/rev *
+ * Phy/Core Configuration. Defines macros to check core phy/rev *
* compile-time configuration. Defines default core support. *
* ******************************************************************
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 9d81320164ce..f4939cf62767 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -32,6 +32,7 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
+#define BRCM_CC_43454_CHIP_ID 43454
#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525
@@ -50,9 +51,12 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4378_CHIP_ID 0x4378
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
+#define CY_CC_43439_CHIP_ID 43439
#define CY_CC_43752_CHIP_ID 43752
+#define CY_CC_89459_CHIP_ID 0x4355
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -71,6 +75,7 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_43570_RAW_DEVICE_ID 0xaa31
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_4359_DEVICE_ID 0x43ef
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
@@ -85,7 +90,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
-
+#define BRCM_PCIE_4378_DEVICE_ID 0x4425
+#define CY_PCIE_89459_DEVICE_ID 0x4415
+#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 45594f003ef7..fb2c35bd73bb 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -545,7 +545,7 @@ struct ConfigRid {
#define MODE_CFG_MASK cpu_to_le16(0xff)
#define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
#define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */
+#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extensions */
#define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
#define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
#define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
@@ -4672,7 +4672,7 @@ static ssize_t proc_write(struct file *file,
static int proc_status_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
CapabilityRid cap_rid;
StatusRid status_rid;
@@ -4756,7 +4756,7 @@ static int proc_stats_rid_open(struct inode *inode,
u16 rid)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
StatsRid stats;
int i, j;
@@ -4819,7 +4819,7 @@ static inline int sniffing_mode(struct airo_info *ai)
static void proc_config_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *line;
@@ -5030,7 +5030,7 @@ static const char *get_rmode(__le16 mode)
static int proc_config_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
__le16 mode;
@@ -5120,7 +5120,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
static void proc_SSID_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
SsidRid SSID_rid;
int i;
@@ -5156,7 +5156,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
static void proc_APList_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
APListRid *APList_rid = &ai->APList;
int i;
@@ -5232,7 +5232,7 @@ static int get_wep_tx_idx(struct airo_info *ai)
return -1;
}
-static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
+static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key,
u16 keylen, int perm, int lock)
{
static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
@@ -5280,10 +5280,10 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
static void proc_wepkey_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
- char key[16];
+ u8 key[16];
u16 index = 0;
int j = 0;
@@ -5311,12 +5311,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
}
for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
+ int val;
+
+ if (i % 3 == 2)
+ continue;
+
+ val = hex_to_bin(data->wbuffer[i+j]);
+ if (val < 0) {
+ airo_print_err(ai->dev->name, "WebKey passed invalid key hex");
+ return;
+ }
switch(i%3) {
case 0:
- key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
+ key[i/3] = (u8)val << 4;
break;
case 1:
- key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
+ key[i/3] |= (u8)val;
break;
}
}
@@ -5331,7 +5341,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
static int proc_wepkey_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
WepKeyRid wkr;
@@ -5379,7 +5389,7 @@ static int proc_wepkey_open(struct inode *inode, struct file *file)
static int proc_SSID_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5423,7 +5433,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
static int proc_APList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5462,7 +5472,7 @@ static int proc_APList_open(struct inode *inode, struct file *file)
static int proc_BSSList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
BSSListRid BSSList_rid;
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index 1364b0014488..208e73a16051 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLMEI) += iwlwifi/
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 2ace2b27ecad..b0f23cf1a621 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3501,7 +3501,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
priv->msg_buffers = NULL;
}
-static ssize_t show_pci(struct device *d, struct device_attribute *attr,
+static ssize_t pci_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(d);
@@ -3521,34 +3521,34 @@ static ssize_t show_pci(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(pci, 0444, show_pci, NULL);
+static DEVICE_ATTR_RO(pci);
-static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
-static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+static DEVICE_ATTR_RO(cfg);
-static ssize_t show_status(struct device *d, struct device_attribute *attr,
+static ssize_t status_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
-static DEVICE_ATTR(status, 0444, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_capability(struct device *d, struct device_attribute *attr,
+static ssize_t capability_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->capability);
}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
+static DEVICE_ATTR_RO(capability);
#define IPW2100_REG(x) { IPW_ ##x, #x }
static const struct {
@@ -3785,7 +3785,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"),
IPW2100_ORD(UCODE_VERSION, "Ucode Version"),};
-static ssize_t show_registers(struct device *d, struct device_attribute *attr,
+static ssize_t registers_show(struct device *d, struct device_attribute *attr,
char *buf)
{
int i;
@@ -3805,9 +3805,9 @@ static ssize_t show_registers(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(registers, 0444, show_registers, NULL);
+static DEVICE_ATTR_RO(registers);
-static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
+static ssize_t hardware_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3846,9 +3846,9 @@ static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
+static DEVICE_ATTR_RO(hardware);
-static ssize_t show_memory(struct device *d, struct device_attribute *attr,
+static ssize_t memory_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3905,7 +3905,7 @@ static ssize_t show_memory(struct device *d, struct device_attribute *attr,
return len;
}
-static ssize_t store_memory(struct device *d, struct device_attribute *attr,
+static ssize_t memory_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3940,9 +3940,9 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(memory, 0644, show_memory, store_memory);
+static DEVICE_ATTR_RW(memory);
-static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
+static ssize_t ordinals_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3976,9 +3976,9 @@ static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(ordinals, 0444, show_ordinals, NULL);
+static DEVICE_ATTR_RO(ordinals);
-static ssize_t show_stats(struct device *d, struct device_attribute *attr,
+static ssize_t stats_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -3997,7 +3997,7 @@ static ssize_t show_stats(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(stats, 0444, show_stats, NULL);
+static DEVICE_ATTR_RO(stats);
static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
{
@@ -4043,7 +4043,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
return 0;
}
-static ssize_t show_internals(struct device *d, struct device_attribute *attr,
+static ssize_t internals_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4095,9 +4095,9 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(internals, 0444, show_internals, NULL);
+static DEVICE_ATTR_RO(internals);
-static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
+static ssize_t bssinfo_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4140,7 +4140,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
return out - buf;
}
-static DEVICE_ATTR(bssinfo, 0444, show_bssinfo, NULL);
+static DEVICE_ATTR_RO(bssinfo);
#ifdef CONFIG_IPW2100_DEBUG
static ssize_t debug_level_show(struct device_driver *d, char *buf)
@@ -4165,7 +4165,7 @@ static ssize_t debug_level_store(struct device_driver *d,
static DRIVER_ATTR_RW(debug_level);
#endif /* CONFIG_IPW2100_DEBUG */
-static ssize_t show_fatal_error(struct device *d,
+static ssize_t fatal_error_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4190,7 +4190,7 @@ static ssize_t show_fatal_error(struct device *d,
return out - buf;
}
-static ssize_t store_fatal_error(struct device *d,
+static ssize_t fatal_error_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -4199,16 +4199,16 @@ static ssize_t store_fatal_error(struct device *d,
return count;
}
-static DEVICE_ATTR(fatal_error, 0644, show_fatal_error, store_fatal_error);
+static DEVICE_ATTR_RW(fatal_error);
-static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->ieee->scan_age);
}
-static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4232,9 +4232,9 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
return strnlen(buf, count);
}
-static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+static DEVICE_ATTR_RW(scan_age);
-static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
char *buf)
{
/* 0 - RF kill not enabled
@@ -4278,7 +4278,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
return 1;
}
-static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw2100_priv *priv = dev_get_drvdata(d);
@@ -4286,7 +4286,7 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+static DEVICE_ATTR_RW(rf_kill);
static struct attribute *ipw2100_sysfs_entries[] = {
&dev_attr_hardware.attr,
@@ -5907,8 +5907,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5916,7 +5916,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
}
@@ -6529,7 +6529,7 @@ static struct pci_driver ipw2100_pci_driver = {
.shutdown = ipw2100_shutdown,
};
-/**
+/*
* Initialize the ipw2100 driver/module
*
* @returns 0 if ok, < 0 errno node con error.
@@ -6561,7 +6561,7 @@ out:
return ret;
}
-/**
+/*
* Cleanup ipw2100 driver registration
*/
static void __exit ipw2100_exit(void)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5727c7c00a28..5b483de18c81 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1259,7 +1259,7 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
return error;
}
-static ssize_t show_event_log(struct device *d,
+static ssize_t event_log_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1289,9 +1289,9 @@ static ssize_t show_event_log(struct device *d,
return len;
}
-static DEVICE_ATTR(event_log, 0444, show_event_log, NULL);
+static DEVICE_ATTR_RO(event_log);
-static ssize_t show_error(struct device *d,
+static ssize_t error_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1326,7 +1326,7 @@ static ssize_t show_error(struct device *d,
return len;
}
-static ssize_t clear_error(struct device *d,
+static ssize_t error_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1337,9 +1337,9 @@ static ssize_t clear_error(struct device *d,
return count;
}
-static DEVICE_ATTR(error, 0644, show_error, clear_error);
+static DEVICE_ATTR_RW(error);
-static ssize_t show_cmd_log(struct device *d,
+static ssize_t cmd_log_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1364,12 +1364,12 @@ static ssize_t show_cmd_log(struct device *d,
return len;
}
-static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL);
+static DEVICE_ATTR_RO(cmd_log);
#ifdef CONFIG_IPW2200_PROMISCUOUS
static void ipw_prom_free(struct ipw_priv *priv);
static int ipw_prom_alloc(struct ipw_priv *priv);
-static ssize_t store_rtap_iface(struct device *d,
+static ssize_t rtap_iface_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1414,7 +1414,7 @@ static ssize_t store_rtap_iface(struct device *d,
return count;
}
-static ssize_t show_rtap_iface(struct device *d,
+static ssize_t rtap_iface_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1429,9 +1429,9 @@ static ssize_t show_rtap_iface(struct device *d,
}
}
-static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface);
+static DEVICE_ATTR_ADMIN_RW(rtap_iface);
-static ssize_t store_rtap_filter(struct device *d,
+static ssize_t rtap_filter_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1451,7 +1451,7 @@ static ssize_t store_rtap_filter(struct device *d,
return count;
}
-static ssize_t show_rtap_filter(struct device *d,
+static ssize_t rtap_filter_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1460,17 +1460,17 @@ static ssize_t show_rtap_filter(struct device *d,
priv->prom_priv ? priv->prom_priv->filter : 0);
}
-static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter);
+static DEVICE_ATTR_ADMIN_RW(rtap_filter);
#endif
-static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->ieee->scan_age);
}
-static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1504,16 +1504,16 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
return len;
}
-static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+static DEVICE_ATTR_RW(scan_age);
-static ssize_t show_led(struct device *d, struct device_attribute *attr,
+static ssize_t led_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
}
-static ssize_t store_led(struct device *d, struct device_attribute *attr,
+static ssize_t led_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1537,36 +1537,36 @@ static ssize_t store_led(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(led, 0644, show_led, store_led);
+static DEVICE_ATTR_RW(led);
-static ssize_t show_status(struct device *d,
+static ssize_t status_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
-static DEVICE_ATTR(status, 0444, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
-static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+static DEVICE_ATTR_RO(cfg);
-static ssize_t show_nic_type(struct device *d,
+static ssize_t nic_type_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "TYPE: %d\n", priv->nic_type);
}
-static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL);
+static DEVICE_ATTR_RO(nic_type);
-static ssize_t show_ucode_version(struct device *d,
+static ssize_t ucode_version_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 len = sizeof(u32), tmp = 0;
@@ -1578,9 +1578,9 @@ static ssize_t show_ucode_version(struct device *d,
return sprintf(buf, "0x%08x\n", tmp);
}
-static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL);
+static DEVICE_ATTR_RO(ucode_version);
-static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
+static ssize_t rtc_show(struct device *d, struct device_attribute *attr,
char *buf)
{
u32 len = sizeof(u32), tmp = 0;
@@ -1592,20 +1592,20 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
return sprintf(buf, "0x%08x\n", tmp);
}
-static DEVICE_ATTR(rtc, 0644, show_rtc, NULL);
+static DEVICE_ATTR_RO(rtc);
/*
* Add a device attribute to view/control the delay between eeprom
* operations.
*/
-static ssize_t show_eeprom_delay(struct device *d,
+static ssize_t eeprom_delay_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct ipw_priv *p = dev_get_drvdata(d);
int n = p->eeprom_delay;
return sprintf(buf, "%i\n", n);
}
-static ssize_t store_eeprom_delay(struct device *d,
+static ssize_t eeprom_delay_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1614,9 +1614,9 @@ static ssize_t store_eeprom_delay(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay);
+static DEVICE_ATTR_RW(eeprom_delay);
-static ssize_t show_command_event_reg(struct device *d,
+static ssize_t command_event_reg_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1625,7 +1625,7 @@ static ssize_t show_command_event_reg(struct device *d,
reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_command_event_reg(struct device *d,
+static ssize_t command_event_reg_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1637,10 +1637,9 @@ static ssize_t store_command_event_reg(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(command_event_reg, 0644,
- show_command_event_reg, store_command_event_reg);
+static DEVICE_ATTR_RW(command_event_reg);
-static ssize_t show_mem_gpio_reg(struct device *d,
+static ssize_t mem_gpio_reg_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1649,7 +1648,7 @@ static ssize_t show_mem_gpio_reg(struct device *d,
reg = ipw_read_reg32(p, 0x301100);
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_mem_gpio_reg(struct device *d,
+static ssize_t mem_gpio_reg_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1661,9 +1660,9 @@ static ssize_t store_mem_gpio_reg(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg);
+static DEVICE_ATTR_RW(mem_gpio_reg);
-static ssize_t show_indirect_dword(struct device *d,
+static ssize_t indirect_dword_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1676,7 +1675,7 @@ static ssize_t show_indirect_dword(struct device *d,
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_indirect_dword(struct device *d,
+static ssize_t indirect_dword_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1687,10 +1686,9 @@ static ssize_t store_indirect_dword(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(indirect_dword, 0644,
- show_indirect_dword, store_indirect_dword);
+static DEVICE_ATTR_RW(indirect_dword);
-static ssize_t show_indirect_byte(struct device *d,
+static ssize_t indirect_byte_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u8 reg = 0;
@@ -1703,7 +1701,7 @@ static ssize_t show_indirect_byte(struct device *d,
return sprintf(buf, "0x%02x\n", reg);
}
-static ssize_t store_indirect_byte(struct device *d,
+static ssize_t indirect_byte_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1714,10 +1712,9 @@ static ssize_t store_indirect_byte(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(indirect_byte, 0644,
- show_indirect_byte, store_indirect_byte);
+static DEVICE_ATTR_RW(indirect_byte);
-static ssize_t show_direct_dword(struct device *d,
+static ssize_t direct_dword_show(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
@@ -1730,7 +1727,7 @@ static ssize_t show_direct_dword(struct device *d,
return sprintf(buf, "0x%08x\n", reg);
}
-static ssize_t store_direct_dword(struct device *d,
+static ssize_t direct_dword_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1741,7 +1738,7 @@ static ssize_t store_direct_dword(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword);
+static DEVICE_ATTR_RW(direct_dword);
static int rf_kill_active(struct ipw_priv *priv)
{
@@ -1756,7 +1753,7 @@ static int rf_kill_active(struct ipw_priv *priv)
return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
}
-static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
char *buf)
{
/* 0 - RF kill not enabled
@@ -1802,7 +1799,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
return 1;
}
-static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1812,9 +1809,9 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+static DEVICE_ATTR_RW(rf_kill);
-static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
+static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1829,7 +1826,7 @@ static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
return sprintf(buf, "0\n");
}
-static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
+static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1865,16 +1862,16 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan);
+static DEVICE_ATTR_RW(speed_scan);
-static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
+static ssize_t net_stats_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
}
-static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
+static ssize_t net_stats_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
@@ -1886,9 +1883,9 @@ static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats);
+static DEVICE_ATTR_RW(net_stats);
-static ssize_t show_channels(struct device *d,
+static ssize_t channels_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
@@ -1932,7 +1929,7 @@ static ssize_t show_channels(struct device *d,
return len;
}
-static DEVICE_ATTR(channels, 0400, show_channels, NULL);
+static DEVICE_ATTR_ADMIN_RO(channels);
static void notify_wx_assoc_event(struct ipw_priv *priv)
{
@@ -2587,7 +2584,7 @@ static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
* through a couple of memory mapped registers.
*
* The following is a simplified implementation for pulling data out of the
- * the eeprom, along with some helper functions to find information in
+ * eeprom, along with some helper functions to find information in
* the per device private data's copy of the eeprom.
*
* NOTE: To better understand how these functions work (i.e what is a chip
@@ -10427,8 +10424,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10437,7 +10434,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strlcpy(info->bus_info, pci_name(p->pci_dev),
+ strscpy(info->bus_info, pci_name(p->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 55cac934f4ee..09ddd21608d4 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -651,7 +651,7 @@ struct ipw_rx_notification {
struct notif_link_deterioration link_deterioration;
struct notif_calibration calibration;
struct notif_noise noise;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 7964ef7d15f0..bec7bc273748 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -405,7 +405,7 @@ struct libipw_auth {
__le16 transaction;
__le16 status;
/* challenge */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_channel_switch {
@@ -423,7 +423,6 @@ struct libipw_action {
union {
struct libipw_action_exchange {
u8 token;
- struct libipw_info_element info_element[0];
} exchange;
struct libipw_channel_switch channel_switch;
@@ -441,7 +440,7 @@ struct libipw_disassoc {
struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_probe_response {
@@ -451,7 +450,7 @@ struct libipw_probe_response {
__le16 capability;
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
/* Alias beacon for probe_response */
@@ -462,7 +461,7 @@ struct libipw_assoc_request {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_reassoc_request {
@@ -470,7 +469,7 @@ struct libipw_reassoc_request {
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_assoc_response {
@@ -479,7 +478,7 @@ struct libipw_assoc_response {
__le16 status;
__le16 aid;
/* supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_txb {
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 7a684b76f39b..48d6870bbf4e 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -1329,8 +1329,8 @@ static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_as
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (frame->info_element, stats->len - sizeof(*frame), network))
+ if (libipw_parse_info_param((void *)frame->variable,
+ stats->len - sizeof(*frame), network))
return 1;
network->mode = 0;
@@ -1389,8 +1389,8 @@ static int libipw_network_init(struct libipw_device *ieee, struct libipw_probe_r
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (beacon->info_element, stats->len - sizeof(*beacon), network))
+ if (libipw_parse_info_param((void *)beacon->variable,
+ stats->len - sizeof(*beacon), network))
return 1;
network->mode = 0;
@@ -1510,7 +1510,7 @@ static void libipw_process_probe_response(struct libipw_device
struct libipw_network *target;
struct libipw_network *oldest = NULL;
#ifdef CONFIG_LIBIPW_DEBUG
- struct libipw_info_element *info_element = beacon->info_element;
+ struct libipw_info_element *info_element = (void *)beacon->variable;
#endif
unsigned long flags;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 36d1e6b2568d..4aec1fce1ae2 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryption
* pre/postfix */
- if (host_encrypt)
+ if (host_encrypt && crypt && crypt->ops)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_mpdu_postfix_len;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index bd4e7d752958..7352d5b2095f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2701,7 +2701,7 @@ il3945_post_associate(struct il_priv *il)
if (!il->vif || !il->is_open)
return;
- D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
+ D_ASSOC("Associated as %d to: %pM\n", il->vif->cfg.aid,
il->active.bssid_addr);
if (test_bit(S_EXIT_PENDING, &il->status))
@@ -2718,9 +2718,9 @@ il3945_post_associate(struct il_priv *il)
il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(il->vif->cfg.aid);
- D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
+ D_ASSOC("assoc id %d beacon interval %d\n", il->vif->cfg.aid,
il->vif->bss_conf.beacon_int);
if (il->vif->bss_conf.use_short_preamble)
@@ -3254,7 +3254,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strlcpy(buffer, buf, sizeof(buffer));
+ strscpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index b2478cbe558e..0eaad980c85c 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -354,13 +354,13 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
* after assoc.. */
for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
+ if (sta->deflink.supp_rates[sband->band] & (1 << i)) {
rs_sta->last_txrate_idx = i;
break;
}
}
- il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ il->_3945.sta_supp_rates = sta->deflink.supp_rates[sband->band];
/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
if (sband->band == NL80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
@@ -631,7 +631,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
il_sta = NULL;
}
- rate_mask = sta->supp_rates[sband->band];
+ rate_mask = sta->deflink.supp_rates[sband->band];
/* get user max rate if set */
max_rate_idx = fls(txrc->rate_idx_mask) - 1;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index d93900e62e3d..943de47170c7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6690,7 +6690,7 @@ il4965_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
/* ieee80211_unregister_hw call wil cause il_mac_stop to
- * to be called and il4965_down since we are removing the device
+ * be called and il4965_down since we are removing the device
* we need to set S_EXIT_PENDING bit.
*/
set_bit(S_EXIT_PENDING, &il->status);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 9a491e5db75b..718efb1aa1b0 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -627,7 +627,7 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
static bool
il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
{
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ return (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!il->ht.non_gf_sta_present;
}
@@ -970,7 +970,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
lq_sta->last_rate_n_flags = tx_rate;
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[sband->band])
+ if (sta->deflink.supp_rates[sband->band])
il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
}
@@ -1164,10 +1164,10 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
s32 rate;
s8 is_green = lq_sta->is_green;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1182,7 +1182,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1217,7 +1217,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
u8 is_green = lq_sta->is_green;
s32 rate;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
D_RATE("LQ: try to switch to SISO\n");
@@ -1228,7 +1228,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
tbl->max_search = IL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1384,7 +1384,7 @@ il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
struct il_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct il_rate_scale_data *win = &(tbl->win[idx]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz =
(sizeof(struct il_scale_tbl_info) -
(sizeof(struct il_rate_scale_data) * RATE_COUNT));
@@ -1507,7 +1507,7 @@ il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
struct il_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct il_rate_scale_data *win = &(tbl->win[idx]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz =
(sizeof(struct il_scale_tbl_info) -
(sizeof(struct il_rate_scale_data) * RATE_COUNT));
@@ -1760,7 +1760,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band];
tid = il4965_rs_tl_add_packet(lq_sta, hdr);
if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -2271,7 +2271,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
int i, j;
struct ieee80211_hw *hw = il->hw;
struct ieee80211_conf *conf = &il->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct il_station_priv *sta_priv;
struct il_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2288,7 +2288,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[sband->band];
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < RATE_COUNT; i++)
il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 9fa556486511..c34729f576cd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1756,9 +1756,9 @@ il4965_post_associate(struct il_priv *il)
if (il->ops->set_rxon_chain)
il->ops->set_rxon_chain(il);
- il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ il->staging.assoc_id = cpu_to_le16(vif->cfg.aid);
- D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->cfg.aid,
vif->bss_conf.beacon_int);
if (vif->bss_conf.use_short_preamble)
@@ -1775,7 +1775,7 @@ il4965_post_associate(struct il_priv *il)
il_commit_rxon(il);
- D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ D_ASSOC("Associated as %d to: %pM\n", vif->cfg.aid,
il->active.bssid_addr);
switch (vif->type) {
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 4a97310f8fee..28cf4e832152 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -1710,7 +1710,7 @@ struct il4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ DECLARE_FLEX_ARRAY(struct agg_tx_status, agg_status); /* for each agg frame */
} u;
} __packed;
@@ -3365,7 +3365,7 @@ struct il_rx_pkt {
struct il_compressed_ba_resp compressed_ba;
struct il_missed_beacon_notif missed_beacon;
__le32 status;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 683b632981ed..341c17fe2af4 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1863,22 +1863,22 @@ EXPORT_SYMBOL(il_send_add_sta);
static void
il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap;
__le32 sta_flags;
if (!sta || !sta_ht_inf->ht_supported)
goto done;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -1888,7 +1888,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
@@ -1900,7 +1900,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
cpu_to_le32((u32) sta_ht_inf->
ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+ if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap))
sta_flags |= STA_FLG_HT40_EN_MSK;
else
sta_flags &= ~STA_FLG_HT40_EN_MSK;
@@ -4480,7 +4480,8 @@ il_clear_isr_stats(struct il_priv *il)
}
int
-il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct il_priv *il = hw->priv;
@@ -4816,7 +4817,7 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
#define IL_WD_TICK(timeout) ((timeout) / 4)
/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * Watchdog timer callback, we check each tx queue for stuck, if hung
* we reset the firmware. If everything is fine just rearm the timer.
*/
void
@@ -5222,7 +5223,7 @@ il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int maxstreams;
maxstreams =
@@ -5276,7 +5277,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct il_priv *il = hw->priv;
unsigned long flags;
__le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
if (!skb)
return;
@@ -5311,13 +5312,13 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
void
il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes)
+ struct ieee80211_bss_conf *bss_conf, u64 changes)
{
struct il_priv *il = hw->priv;
int ret;
mutex_lock(&il->mutex);
- D_MAC80211("enter: changes 0x%x\n", changes);
+ D_MAC80211("enter: changes 0x%llx\n", changes);
if (!il_is_alive(il)) {
D_MAC80211("leave - not alive\n");
@@ -5427,8 +5428,8 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_ASSOC) {
- D_MAC80211("ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
+ D_MAC80211("ASSOC %d\n", vif->cfg.assoc);
+ if (vif->cfg.assoc) {
il->timestamp = bss_conf->sync_tsf;
if (!il_is_rfkill(il))
@@ -5437,8 +5438,8 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
il_set_no_assoc(il, vif);
}
- if (changes && il_is_associated(il) && bss_conf->aid) {
- D_MAC80211("Changes (%#x) while associated\n", changes);
+ if (changes && il_is_associated(il) && vif->cfg.aid) {
+ D_MAC80211("Changes (%#llx) while associated\n", changes);
ret = il_send_rxon_assoc(il);
if (!ret) {
/* Sync active_rxon with latest change. */
@@ -5459,10 +5460,10 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_IBSS) {
ret = il->ops->manage_ibss_station(il, vif,
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
if (ret)
IL_ERR("failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
+ vif->cfg.ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 40877ef1fbf2..69687fcf963f 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1683,7 +1683,8 @@ struct il_cfg {
***************************/
int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
@@ -1947,7 +1948,7 @@ il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
int il_mac_config(struct ieee80211_hw *hw, u32 changed);
void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changes);
+ struct ieee80211_bss_conf *bss_conf, u64 changes);
void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c21c0c68849a..b20409f8c13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
@@ -152,6 +139,8 @@ config IWLMEI
tristate "Intel Management Engine communication over WLAN"
depends on INTEL_MEI
depends on PM
+ depends on CFG80211
+ depends on BROKEN
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 330ef04ca51a..110fda65bd21 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
+#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 69
+#define IWL_22000_UCODE_API_MAX 72
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -39,6 +40,7 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
@@ -54,13 +56,16 @@
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
+#define IWL_BZ_A_FM4_A_FW_PRE "iwlwifi-bz-a0-fm4-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
+#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0-"
#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
+#define IWL_BNJ_B_FM_B_FW_PRE "iwlwifi-BzBnj-b0-fm-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
@@ -117,10 +122,12 @@
IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_FM4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
- IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_GL_B_FM_B_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -131,6 +138,8 @@
IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_BNJ_B_FM_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -224,7 +233,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -240,7 +249,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-#define IWL_DEVICE_BZ_COMMON \
+#define IWL_DEVICE_BZ \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
@@ -276,16 +285,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = LDBG_M2S_BUF_WRAP_CNT, \
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
}, \
- }
-
-#define IWL_DEVICE_BZ \
- IWL_DEVICE_BZ_COMMON, \
+ }, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_BZ, \
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -299,6 +305,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
+ }, \
+ .mon_dbgi_regs = { \
+ .write_ptr = { \
+ .addr = DBGI_SRAM_FIFO_POINTERS, \
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
+ }, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@@ -385,6 +397,21 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+ .integrated = true,
+ .low_latency_xtal = true,
+ .xtal_latency = 12000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+ .imr_enabled = true,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -476,6 +503,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
+const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@@ -816,6 +844,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
+ .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
+ .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -830,6 +872,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
+ .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
@@ -883,6 +932,13 @@ const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0 = {
+ .fw_name_pre = IWL_BZ_A_FM4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.fw_name_pre = IWL_GL_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -890,6 +946,13 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_gl_b0_fm_b0 = {
+ .fw_name_pre = IWL_GL_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
.uhb_supported = true,
@@ -931,6 +994,13 @@ const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+
+const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0 = {
+ .fw_name_pre = IWL_BNJ_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -964,3 +1034,6 @@ MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index abb8696ba294..fefaa414272b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -92,7 +92,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes);
+ u64 changes);
void iwlagn_config_ht40(struct ieee80211_conf *conf,
struct iwl_rxon_context *ctx);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
@@ -112,7 +112,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type);
int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len);
+ const struct iwl_calib_cmd *cmd, size_t len);
void iwl_calib_free_results(struct iwl_priv *priv);
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index a11884fa254b..f488620d2844 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -19,8 +19,7 @@
struct iwl_calib_result {
struct list_head list;
size_t cmd_len;
- struct iwl_calib_hdr hdr;
- /* data follows */
+ struct iwl_calib_cmd cmd;
};
struct statistics_general_data {
@@ -43,12 +42,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
int ret;
hcmd.len[0] = res->cmd_len;
- hcmd.data[0] = &res->hdr;
+ hcmd.data[0] = &res->cmd;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
ret = iwl_dvm_send_cmd(priv, &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d on calib cmd %d\n",
- ret, res->hdr.op_code);
+ ret, res->cmd.hdr.op_code);
return ret;
}
}
@@ -57,19 +56,22 @@ int iwl_send_calib_results(struct iwl_priv *priv)
}
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len)
+ const struct iwl_calib_cmd *cmd, size_t len)
{
struct iwl_calib_result *res, *tmp;
- res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
- GFP_ATOMIC);
+ if (check_sub_overflow(len, sizeof(*cmd), &len))
+ return -ENOMEM;
+
+ res = kmalloc(struct_size(res, cmd.data, len), GFP_ATOMIC);
if (!res)
return -ENOMEM;
- memcpy(&res->hdr, cmd, len);
- res->cmd_len = len;
+ res->cmd = *cmd;
+ memcpy(res->cmd.data, cmd->data, len);
+ res->cmd_len = struct_size(cmd, data, len);
list_for_each_entry(tmp, &priv->calib_results, list) {
- if (tmp->hdr.op_code == res->hdr.op_code) {
+ if (tmp->cmd.hdr.op_code == res->cmd.hdr.op_code) {
list_replace(&tmp->list, &res->list);
kfree(tmp);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index bbd574091201..1a9eadace188 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -696,6 +696,7 @@ struct iwl_priv {
/* Scan related variables */
unsigned long scan_start;
unsigned long scan_start_tsf;
+ size_t scan_cmd_size;
void *scan_cmd;
enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 40d790b36d85..1dc974e2c511 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014, 2022 Intel Corporation. All rights reserved.
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
@@ -441,7 +441,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
priv->current_ht_config.smps = smps_request;
for_each_context(priv, ctx) {
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
- ieee80211_request_smps(ctx->vif, smps_request);
+ ieee80211_request_smps(ctx->vif, 0, smps_request);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 754876cd27ce..f4070fddc8c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2022 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
@@ -1153,7 +1153,8 @@ static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
}
static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 90b9becd1673..a873be109f43 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -48,6 +48,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search.
@@ -283,7 +284,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
}
/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
- beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
+ beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif, 0);
if (!beacon) {
IWL_ERR(priv, "update beacon failed -- keeping old\n");
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b7c8b209bfea..687c906a9d72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 - 2020 Intel Corporation
+ * Copyright (C) 2019 - 2020, 2022 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -1039,7 +1039,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->last_rate_n_flags = tx_rate;
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
+ if (sta && sta->deflink.supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
@@ -1239,10 +1239,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1294,10 +1294,10 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1350,7 +1350,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
@@ -1570,7 +1570,7 @@ static void rs_move_siso_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1740,7 +1740,7 @@ static void rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1908,7 +1908,7 @@ static void rs_move_mimo3_to_other(struct iwl_priv *priv,
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -2212,7 +2212,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
@@ -2763,7 +2763,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
int i, j;
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct iwl_station_priv *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2781,7 +2781,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
+ lq_sta->supp_rates = sta->deflink.supp_rates[sband->band];
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
sta_id);
@@ -2798,7 +2798,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/*
* active legacy rates as per supported rates bitmap
*/
- supp = sta->supp_rates[sband->band];
+ supp = sta->deflink.supp_rates[sband->band];
lq_sta->active_legacy_rate = 0;
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index db0c41bbeb0e..e9d2717362cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020 Intel Corporation
+ * Copyright(c) 2018, 2020-2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
len += 1 + 2;
copylen += 1 + 2;
- new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
if (new_data) {
new_data->length = len;
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_get_cmd_string(priv->trans,
- iwl_cmd_id(pkt->hdr.cmd,
- 0, 0)),
+ WIDE_ID(0, pkt->hdr.cmd)),
pkt->hdr.cmd);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 70338bc7bb54..f80cce37e2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -183,7 +183,7 @@ static int iwlagn_update_beacon(struct iwl_priv *priv,
lockdep_assert_held(&priv->mutex);
dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
+ priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif, 0);
if (!priv->beacon_skb)
return -ENOMEM;
return iwlagn_send_beacon_cmd(priv);
@@ -562,12 +562,12 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
slot1 = bcnint - slot0;
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
- (!ctx_bss->vif->bss_conf.idle &&
- !ctx_bss->vif->bss_conf.assoc)) {
+ (!ctx_bss->vif->cfg.idle &&
+ !ctx_bss->vif->cfg.assoc)) {
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot1 = IWL_MIN_SLOT_TIME;
- } else if (!ctx_pan->vif->bss_conf.idle &&
- !ctx_pan->vif->bss_conf.assoc) {
+ } else if (!ctx_pan->vif->cfg.idle &&
+ !ctx_pan->vif->cfg.assoc) {
slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
slot0 = IWL_MIN_SLOT_TIME;
}
@@ -1280,7 +1280,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
break;
}
- ht_cap = &sta->ht_cap;
+ ht_cap = &sta->deflink.ht_cap;
need_multiple = true;
@@ -1383,7 +1383,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1392,7 +1392,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
+ if (changes & BSS_CHANGED_IDLE && vif->cfg.idle) {
/*
* If we go idle, then clearly no "passive-no-rx"
* workaround is needed any more, this is a reset.
@@ -1420,14 +1420,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwlagn_update_qos(priv, ctx);
}
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ ctx->staging.assoc_id = cpu_to_le16(vif->cfg.aid);
if (vif->bss_conf.use_short_preamble)
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (changes & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
@@ -1483,7 +1483,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_STATION) {
- if (!bss_conf->assoc)
+ if (!vif->cfg.assoc)
ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
else
ctx->staging.filter_flags &=
@@ -1493,7 +1493,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
/*
* The chain noise calibration will enable PM upon
* completion. If calibration has already been run
@@ -1509,10 +1509,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_IBSS) {
ret = iwlagn_manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
if (ret)
IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
+ vif->cfg.ibss_joined ? "add" : "remove",
bss_conf->bssid);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 2d38227dfdd2..a7e85c5c8c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -626,7 +626,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
int ret;
- int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+ size_t scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
priv->fw->ucode_capa.max_probe_length;
const u8 *ssid = NULL;
@@ -649,9 +649,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
"fail to allocate memory for scan\n");
return -ENOMEM;
}
+ priv->scan_cmd_size = scan_cmd_size;
+ }
+ if (priv->scan_cmd_size < scan_cmd_size) {
+ IWL_DEBUG_SCAN(priv,
+ "memory needed for scan grew unexpectedly\n");
+ return -ENOMEM;
}
scan = priv->scan_cmd;
- memset(scan, 0, scan_cmd_size);
+ memset(scan, 0, priv->scan_cmd_size);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8f7a0f36c276..cef43cf80620 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2022 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -139,7 +139,7 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
if (!sta)
return true;
- return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ return sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -147,7 +147,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
__le32 *flags, __le32 *mask)
{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap;
*mask = STA_FLG_RTS_MIMO_PROT_MSK |
STA_FLG_MIMO_DIS_MSK |
@@ -161,12 +161,12 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -176,7 +176,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index 4b27a53d0bb4..bb13ca5d666c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -356,18 +356,18 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_priv *priv = data;
- struct iwl_calib_hdr *hdr;
+ struct iwl_calib_cmd *cmd;
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
return true;
}
- hdr = (struct iwl_calib_hdr *)pkt->data;
+ cmd = (struct iwl_calib_cmd *)pkt->data;
- if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
+ if (iwl_calib_set(priv, cmd, iwl_rx_packet_payload_len(pkt)))
IWL_ERR(priv, "Failed to record calibration data %d\n",
- hdr->op_code);
+ cmd->hdr.op_code);
return false;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 790c96df58cb..e6d64152c81a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
+#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -19,6 +20,30 @@ const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ {}
+};
+
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{
@@ -242,7 +267,7 @@ found:
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -268,10 +293,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
+ u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
+
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- cmd->override_tas_iec = cpu_to_le16(override_iec);
- cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
+ if (fw_ver <= 3) {
+ cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
+ cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
+ } else {
+ cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
+ cmd->v4.override_tas_iec = (u8)override_iec;
+ cmd->v4.enable_tas_iec = (u8)enabled_iec;
+ }
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -297,7 +330,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->block_list_size = cpu_to_le32(block_list_size);
+ cmd->v4.block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
@@ -319,7 +352,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->block_list_array[i] = cpu_to_le32(country);
+ cmd->v4.block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -529,8 +562,8 @@ IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
int ret, tbl_rev;
+ u32 flags;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
@@ -596,7 +629,8 @@ read_table:
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ flags = wifi_pkg->package.elements[1].integer.value;
+ fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
@@ -604,7 +638,8 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
num_chains, num_sub_bands);
out_free:
kfree(data);
@@ -888,10 +923,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
@@ -901,6 +937,9 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
{
int i, j;
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
if (!iwl_sar_geo_support(fwrt))
return -EOPNOTSUPP;
@@ -961,3 +1000,181 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data, *flags;
+ int i, j, ret, tbl_rev, num_sub_bands = 0;
+ int idx = 2;
+
+ fwrt->ppag_flags = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev == 1 || tbl_rev == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ IWL_DEBUG_RADIO(fwrt,
+ "Reading PPAG table v2 (tbl_rev=%d)\n",
+ tbl_rev);
+ goto read_table;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
+ }
+
+read_table:
+ fwrt->ppag_ver = tbl_rev;
+ flags = &wifi_pkg->package.elements[1];
+
+ if (flags->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
+
+ if (!fwrt->ppag_flags) {
+ ret = 0;
+ goto out_free;
+ }
+
+ /*
+ * read, verify gain values and save them into the PPAG table.
+ * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
+ * following sub-bands to High-Band (5GHz).
+ */
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ union acpi_object *ent;
+
+ ent = &wifi_pkg->package.elements[idx++];
+ if (ent->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
+
+ if ((j == 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
+ (j != 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+ fwrt->ppag_flags = 0;
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ }
+
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+ if (!fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+ fwrt->ppag_ver);
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else if (cmd_ver == 2 || cmd_ver == 3) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 22b3c665f91a..6f361c59106f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -77,6 +77,8 @@
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
+#define ACPI_WTAS_USA_UHB_MSK BIT(16)
+#define ACPI_WTAS_USA_UHB_POS 16
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
@@ -89,6 +91,11 @@
#define ACPI_PPAG_MAX_LB 24
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+#define ACPI_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+#define IWL_REDUCE_POWER_FLAGS_POS 1
/*
* The profile for revision 2 is a superset of revision 1, which is in
@@ -126,7 +133,8 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
};
enum iwl_dsm_values_srd {
@@ -213,10 +221,17 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd);
+ union iwl_tas_config_cmd *cmd, int fw_ver);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -294,7 +309,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
return -ENOENT;
}
@@ -304,6 +319,22 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
return 0;
}
+static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
#endif /* CONFIG_ACPI */
static inline union acpi_object *
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0703e41403a6..0b052c2e563a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -20,6 +20,8 @@
* &enum iwl_phy_ops_subcmd_ids
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
+ * @SCAN_GROUP: scan group, uses command IDs from
+ * &enum iwl_scan_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
@@ -36,6 +38,7 @@ enum iwl_mvm_command_groups {
MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
+ SCAN_GROUP = 0x6,
NAN_GROUP = 0x7,
LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
@@ -323,14 +326,6 @@ enum iwl_legacy_cmds {
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
- * @DC2DC_CONFIG_CMD:
- * Set/Get DC2DC frequency tune
- * Command is &struct iwl_dc2dc_config_cmd,
- * response is &struct iwl_dc2dc_config_resp
- */
- DC2DC_CONFIG_CMD = 0x83,
-
- /**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
NVM_ACCESS_CMD = 0x88,
@@ -502,11 +497,6 @@ enum iwl_legacy_cmds {
DEBUG_LOG_MSG = 0xf7,
/**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
- /**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
@@ -613,6 +603,11 @@ enum iwl_system_subcmd_ids {
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
*/
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
+
+ /**
+ * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
+ */
+ RFI_DEACTIVATE_NOTIF = 0xff,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 1ab92f62c414..087354b3c308 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
-/**
- * struct iwl_dc2dc_config_cmd - configure dc2dc values
- *
- * (DC2DC_CONFIG_CMD = 0x83)
- *
- * Set/Get & configure dc2dc values.
- * The command always returns the current dc2dc values.
- *
- * @flags: set/get dc2dc
- * @enable_low_power_mode: not used.
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_cmd {
- __le32 flags;
- __le32 enable_low_power_mode; /* not used */
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
-
-/**
- * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
- *
- * Current dc2dc values returned by the FW.
- *
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_resp {
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
-
#endif /* __iwl_fw_api_config_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4cd9ab23954e..df0833890e55 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -766,6 +766,65 @@ struct iwl_wowlan_status_v12 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+/**
+ * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @reserved2: reserved
+ */
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 reserved2[2];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
+ * @wake_packet_length: wakeup packet length
+ * @station_id: station id
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_wake_pkt_notif {
+ __le32 wake_packet_length;
+ u8 station_id;
+ u8 reserved[3];
+ u8 wake_packet[1];
+} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_d3_end_notif - d3 end notification
+ * @flags: See &enum iwl_d0i3_flags
+ */
+struct iwl_mvm_d3_end_notif {
+ __le32 flags;
+} __packed;
+
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 89236f42c5a4..43619acc29fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
- * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
*/
TLC_MNG_CONFIG_CMD = 0xF,
@@ -58,6 +58,20 @@ enum iwl_data_path_subcmd_ids {
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
+ * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
+ * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
+ * command, and &struct iwl_rx_baid_cfg_resp as a response.
+ */
+ RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
+
+ /**
+ * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
+ * command, uses &struct iwl_scd_queue_cfg_cmd and the response
+ * is (same as before) &struct iwl_tx_queue_cfg_rsp.
+ */
+ SCD_QUEUE_CONFIG_CMD = 0x17,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
u8 reserved[3];
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * enum iwl_rx_baid_action - BAID allocation/config action
+ * @IWL_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+enum iwl_rx_baid_action {
+ IWL_RX_BAID_ACTION_ADD,
+ IWL_RX_BAID_ACTION_MODIFY,
+ IWL_RX_BAID_ACTION_REMOVE,
+}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwl_rx_baid_cfg_cmd_alloc {
+ __le32 sta_id_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le16 ssn;
+ __le16 win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwl_rx_baid_cfg_cmd_modify {
+ __le32 old_sta_id_mask;
+ __le32 new_sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove_v1 {
+ __le32 baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove {
+ __le32 sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwl_rx_baid_action
+ */
+struct iwl_rx_baid_cfg_cmd {
+ __le32 action;
+ union {
+ struct iwl_rx_baid_cfg_cmd_alloc alloc;
+ struct iwl_rx_baid_cfg_cmd_modify modify;
+ struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwl_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwl_rx_baid_cfg_resp {
+ __le32 baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * enum iwl_scd_queue_cfg_operation - scheduler queue operation
+ * @IWL_SCD_QUEUE_ADD: allocate a new queue
+ * @IWL_SCD_QUEUE_REMOVE: remove a queue
+ * @IWL_SCD_QUEUE_MODIFY: modify a queue
+ */
+enum iwl_scd_queue_cfg_operation {
+ IWL_SCD_QUEUE_ADD = 0,
+ IWL_SCD_QUEUE_REMOVE = 1,
+ IWL_SCD_QUEUE_MODIFY = 2,
+};
+
+/**
+ * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.queue: queue ID for removal
+ * @u.modify.sta_mask: new station mask for modify
+ * @u.modify.queue: queue ID to modify
+ */
+struct iwl_scd_queue_cfg_cmd {
+ __le32 operation;
+ union {
+ struct {
+ __le32 sta_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le32 flags;
+ __le32 cb_size;
+ __le64 bc_dram_addr;
+ __le64 tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ __le32 queue;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ __le32 sta_mask;
+ __le32 queue;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 456b7eaac570..ba538d70985f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -11,7 +11,8 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
-#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
+#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
/**
* struct iwl_fw_ini_hcmd
@@ -25,7 +26,7 @@ struct iwl_fw_ini_hcmd {
u8 id;
u8 group;
__le16 reserved;
- u8 data[0];
+ u8 data[];
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
@@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
-* struct iwl_fw_ini_conf_tlv - preset configuration TLV
+* struct iwl_fw_ini_addr_val - Address and value to set it to
*
* @address: the base address
* @value: value to set at address
-
*/
struct iwl_fw_ini_addr_val {
__le32 address;
@@ -275,7 +275,7 @@ struct iwl_fw_ini_conf_set_tlv {
__le32 time_point;
__le32 set_type;
__le32 addr_offset;
- struct iwl_fw_ini_addr_val addr_val[0];
+ struct iwl_fw_ini_addr_val addr_val[];
} __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */
/**
@@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+ IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
};
/**
@@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
+
+/**
+ * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
+ *
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ */
+enum iwl_fw_ini_dump_policy {
+ IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
+ IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
+
+};
+
+/**
+ * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
+ *
+ * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
+ * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_VERBOSE : dump all regions
+ */
+enum iwl_fw_ini_dump_type {
+ IWL_FW_INI_DUMP_BRIEF,
+ IWL_FW_INI_DUMP_MEDIUM,
+ IWL_FW_INI_DUMP_VERBOSE,
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 029ae64bf2b2..0c555089e05f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,12 @@ enum iwl_debug_cmds {
*/
BUFFER_ALLOCATION = 0x8,
/**
+ * @FW_DUMP_COMPLETE_CMD:
+ * sends command to fw once dump collection completed
+ * &struct iwl_dbg_dump_complete_cmd
+ */
+ FW_DUMP_COMPLETE_CMD = 0xB,
+ /**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
@@ -234,7 +240,7 @@ struct iwl_mfu_assert_dump_notif {
__le16 index_num;
__le16 parts_num;
__le32 data_size;
- __le32 data[0];
+ __le32 data[];
} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
/**
@@ -270,7 +276,7 @@ struct iwl_mvm_marker {
u8 marker_id;
__le16 reserved;
__le64 timestamp;
- __le32 metadata[0];
+ __le32 metadata[];
} __packed; /* MARKER_API_S_VER_1 */
/**
@@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
__le32 enabled_severities;
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_dbg_dump_complete_cmd - dump complete cmd
+ *
+ * @tp: timepoint whose dump has completed
+ * @tp_data: timepoint data
+ */
+struct iwl_dbg_dump_complete_cmd {
+ __le32 tp;
+ __le32 tp_data;
+} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index dd62a63956b3..88fe61d144d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -33,95 +33,7 @@ struct iwl_mcast_filter_cmd {
u8 pass_all;
u8 bssid[6];
u8 reserved[2];
- u8 addr_list[0];
+ u8 addr_list[];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index d088c820b1a9..712532f17630 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
+ /**
+ * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
+ */
+ CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
CHANNEL_SWITCH_START_NOTIF = 0xFF,
+
+ /**
+ *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
+ */
+ CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+#define CS_ERR_COUNT_ERROR BIT(0)
+#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
+#define CS_ERR_LONG_TX_BLOCK BIT(2)
+#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
+
+/**
+ * struct iwl_channel_switch_error_notif - Channel switch error notification
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @csa_err_mask: mask of channel switch error that can occur
+ */
+struct iwl_channel_switch_error_notif {
+ __le32 mac_id;
+ __le32 csa_err_mask;
+} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
+ *
+ * @mac_id: the mac that should cancel the channel switch
+ */
+struct iwl_cancel_channel_switch_cmd {
+ __le32 mac_id;
+} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
+
/**
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 11f0bd283e49..9b7caf968346 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_HE_CHANNEL_BW_INDX 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
/**
- * struct iwl_he_pkt_ext - QAM thresholds
+ * struct iwl_he_pkt_ext_v1 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
@@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
-struct iwl_he_pkt_ext {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
-} __packed; /* PKT_EXT_DOT11AX_API_S */
+struct iwl_he_pkt_ext_v1 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_pkt_ext_v2 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x
+ * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v2 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
@@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
+ * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
+ * of threshold
+ * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
+ * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
+ * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
@@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
+ STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
+ STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
+ STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
};
/**
@@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
/**
- * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
-struct iwl_he_sta_context_cmd {
+struct iwl_he_sta_context_cmd_v2 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
@@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -643,6 +677,81 @@ struct iwl_he_sta_context_cmd {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
+ * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @puncture_mask: puncture mask for EHT
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v3 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 puncture_mask;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
+/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
* @bssid: the BSSID to sniff for
* @reserved1: reserved for dword alignment
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 4949fcf85257..91bfde6d5367 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -420,6 +420,30 @@ struct iwl_tas_config_cmd_v3 {
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
+ * @reserved: reserved
+*/
+struct iwl_tas_config_cmd_v4 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+ u8 reserved;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
+
+union iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_v2 v2;
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+};
+/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
@@ -515,6 +539,32 @@ struct iwl_lari_config_change_cmd_v5 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
/**
+ * struct iwl_lari_config_change_cmd_v6 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be disabled
+ */
+struct iwl_lari_config_change_cmd_v6 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 5204aa94e72a..a0123f81f5d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -13,6 +13,21 @@
*/
enum iwl_prot_offload_subcmd_ids {
/**
+ * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif
+ */
+ WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC,
+
+ /**
+ * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif
+ */
+ WOWLAN_INFO_NOTIFICATION = 0xFD,
+
+ /**
+ * @D3_END_NOTIFICATION: End D3 state notification
+ */
+ D3_END_NOTIFICATION = 0xFE,
+
+ /**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index c04f2521fcb3..b1b9c29859c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
/**
* struct ct_kill_notif - CT-kill entry notification
+ * This structure represent both versions of this notification.
*
* @temperature: the current temperature in celsius
- * @reserved: reserved
+ * @dts: only in v2: DTS that trigger the CT Kill bitmap:
+ * bit 0: ToP master
+ * bit 1: PA chain A master
+ * bit 2: PA chain B master
+ * bit 3: ToP slave
+ * bit 4: PA chain A slave
+ * bit 5: PA chain B slave)
+ * bits 6,7: reserved (set to 0)
+ * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
*/
struct ct_kill_notif {
__le16 temperature;
- __le16 reserved;
-} __packed; /* GRP_PHY_CT_KILL_NTF */
+ u8 dts;
+ u8 scheme;
+} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 81318208f2f6..f92cac1da764 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -340,7 +340,7 @@ struct iwl_dev_tx_power_cmd_v5 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
/**
- * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
* @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
@@ -361,6 +361,28 @@ struct iwl_dev_tx_power_cmd_v6 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
/**
+ * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v7 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
* @v3: version 3 part of the command
@@ -375,6 +397,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v4 v4;
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
+ struct iwl_dev_tx_power_cmd_v7 v7;
};
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
index c678b9aa9b55..1a84a4081e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
+ *
+ * @reason: used only for a log message
+ */
+struct iwl_rfi_deactivate_notif {
+ __le32 reason;
+} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 173a6991587b..687f804c46b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
};
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v3 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v4 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 1989b270862b..74a01888715b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -668,7 +668,7 @@ struct iwl_rx_no_data {
__le32 phy_info[2];
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
- TX_NO_DATA_NTFY_API_S_VER_2 */
+ RX_NO_DATA_NTFY_API_S_VER_2 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5413087ae909..7ba0e3409199 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,6 +9,16 @@
/* Scan Commands, Responses, Notifications */
+/**
+ * enum iwl_scan_subcmd_ids - scan commands
+ */
+enum iwl_scan_subcmd_ids {
+ /**
+ * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
+ */
+ OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
+};
+
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
@@ -1165,7 +1175,7 @@ struct iwl_scan_offload_profiles_query_v1 {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match_v1 matches[0];
+ struct iwl_scan_offload_profile_match_v1 matches[];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
/**
@@ -1188,7 +1198,7 @@ struct iwl_scan_offload_profile_match {
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
/**
- * struct iwl_scan_offload_profiles_query - match results query response
+ * struct iwl_scan_offload_match_info - match results information
* @matched_profiles: bitmap of matched profiles, referencing the
* matches passed in the scan offload request
* @last_scan_age: age of the last offloaded scan
@@ -1200,7 +1210,7 @@ struct iwl_scan_offload_profile_match {
* @reserved: reserved
* @matches: array of match information, one for each match
*/
-struct iwl_scan_offload_profiles_query {
+struct iwl_scan_offload_match_info {
__le32 matched_profiles;
__le32 last_scan_age;
__le32 n_scans_done;
@@ -1209,8 +1219,10 @@ struct iwl_scan_offload_profiles_query {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match matches[0];
-} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
+ struct iwl_scan_offload_profile_match matches[];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and
+ * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1
+ */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 5edbe27c0922..d62fed543276 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -477,7 +477,7 @@ struct iwl_mvm_wep_key_cmd {
u8 decryption_type;
u8 flags;
u8 reserved;
- struct iwl_mvm_wep_key wep_key[0];
+ struct iwl_mvm_wep_key wep_key[];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 14d35000abed..893438aadab0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -132,7 +132,7 @@ struct iwl_tdls_config_cmd {
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
- u8 pti_req_template[0];
+ u8 pti_req_template[];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index e73cc7380a26..ecc6706f66ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
- * @ttl: time to live - packet lifetime limit. The FW should drop if
- * passed.
+ * @reserved: reserved
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
@@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
- __le64 ttl;
+ u8 reserved[8];
struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_8,
TX_CMD_API_S_VER_10 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 8b3a00df41da..e018946310d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
#define IWL_CMD_QUEUE_SIZE 32
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 7ad9cee925da..abf49022edbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -12,7 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
-
+#include "iwl-fh.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
@@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_release_nic_access(fwrt->trans);
}
-#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
-#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
-
struct iwl_prph_range {
u32 start, end;
};
@@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
static int
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
@@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
idx++;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1509,7 +1506,7 @@ out:
static int
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_special_device_memory *special_mem =
@@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->range_data_size = reg->dev_addr.size;
- iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
- DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
@@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ /* read the IMR memory and DMA it to SRAM */
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
+ u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
+ u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+ u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
+
+ range->range_data_size = cpu_to_le32(size_to_dump);
+ if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
+ imr_curr_addr, size_to_dump)) {
+ IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
+ return -1;
+ }
+
+ fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
+ fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
+ size_to_dump);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static void *
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1697,9 +1719,20 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
}
static void *
+iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dbgi_regs);
+}
+
+static void *
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_err_table_dump *dump = data;
@@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_special_device_memory *dump = data;
@@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->data;
}
+static void *
+iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return dump->data;
+}
+
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
return 1;
}
+static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ /* range is total number of pages need to copied from
+ *IMR memory to SRAM and later from SRAM to DRAM
+ */
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return 0;
+ }
+
+ return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
+}
+
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32
+iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+ u32 ranges = 0;
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return size;
+ }
+ size = imr_size;
+ ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
+ if (!size && !ranges) {
+ IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
+ return 0;
+ }
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data);
+ void *data, u32 data_len);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range, int idx);
+ void *range, u32 range_len, int idx);
};
/**
@@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
u32 type = reg->type;
- u32 id = le32_to_cpu(reg->id);
+ u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
u32 num_of_ranges, i, size;
- void *range;
-
- /*
- * The higher part of the ID from 2 is irrelevant for
- * us, so mask it out.
- */
- if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ u8 *range;
+ u32 free_size;
+ u64 header_size;
+ u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
+
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
+ dump_policy, id, type);
+
+ if (le32_to_cpu(reg->hdr.version) >= 2) {
+ u32 dp = le32_get_bits(reg->id,
+ IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
+ if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
+ !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ }
+ }
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
- !ops->fill_range)
+ !ops->fill_range) {
+ IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
return 0;
+ }
size = ops->get_size(fwrt, reg_data);
- if (!size)
+
+ if (size < sizeof(*header)) {
+ IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
return 0;
+ }
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
@@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
- type);
-
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
@@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg_data, header);
+ free_size = size;
+ range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
@@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
goto out_err;
}
+ header_size = range - (u8 *)header;
+
+ if (WARN(header_size > free_size,
+ "header size %llu > free_size %d",
+ header_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_mem_hdr used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= header_size;
+
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg_data, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range,
+ free_size, i);
if (range_size < 0) {
IWL_ERR(fwrt,
@@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
id, type);
goto out_err;
}
+
+ if (WARN(range_size > free_size, "range_size %d > free_size %d",
+ range_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_raged used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= range_size;
range = range + range_size;
}
@@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_csr_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {
+ .get_num_of_ranges = iwl_dump_ini_imr_ranges,
+ .get_size = iwl_dump_ini_imr_get_size,
+ .fill_mem_hdr = iwl_dump_ini_imr_fill_header,
+ .fill_range = iwl_dump_ini_imr_iter,
+ },
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
@@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
[IWL_FW_INI_REGION_DBGI_SRAM] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_size = iwl_dump_ini_mon_dbgi_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
};
@@ -2270,6 +2430,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
@@ -2305,10 +2468,32 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
+ /*
+ * DRAM_IMR can be collected only for FW/HW error timepoint
+ * when fw is not alive. In addition, it must be collected
+ * lastly as it overwrites SRAM that can possibly contain
+ * debug data which also need to be collected.
+ */
+ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
+ if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ else
+ IWL_INFO(fwrt,
+ "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
+ tp_id);
+ /* continue to next region */
+ continue;
+ }
+
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
+ /* collect DRAM_IMR region in the last */
+ if (imr_reg_data.reg_tlv)
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size)
size += iwl_dump_ini_info(fwrt, trigger, list);
@@ -2444,7 +2629,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
- struct list_head dump_list = LIST_HEAD_INIT(dump_list);
+ LIST_HEAD(dump_list);
struct scatterlist *sg_dump_data;
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
@@ -2589,7 +2774,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
- desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+ desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@@ -2685,6 +2870,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data)
+{
+ struct iwl_dbg_dump_complete_cmd hcmd_data;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
+ .data[0] = &hcmd_data,
+ .len[0] = sizeof(hcmd_data),
+ };
+
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
+ hcmd_data.tp = cpu_to_le32(timepoint);
+ hcmd_data.tp_data = cpu_to_le32(timepoint_data);
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+}
+
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
@@ -2693,10 +2900,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
-
+ u32 policy;
+ u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ if (!dump_data->trig) {
+ IWL_ERR(fwrt, "dump trigger data is not set\n");
+ goto out;
+ }
+
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
@@ -2719,6 +2932,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+ policy = le32_to_cpu(dump_data->trig->apply_policy);
+ time_point = le32_to_cpu(dump_data->trig->time_point);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
@@ -2777,10 +2997,10 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
tp_id, (u32)(delay / USEC_PER_MSEC));
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
-
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
+ else
+ schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2795,9 +3015,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
- return;
+ if (fwrt->ops && fwrt->ops->dump_start)
+ fwrt->ops->dump_start(fwrt->ops_ctx);
iwl_fw_dbg_collect_sync(fwrt, wks->idx);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 8c3c890066b0..be7806407de8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index a152ce306475..43e997283db0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
{
struct iwl_dbg_host_event_cfg_cmd event_cfg;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
+ .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
.flags = CMD_ASYNC,
.data[0] = &event_cfg,
.len[0] = sizeof(event_cfg),
@@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
ver = &fw->ucode_capa.cmd_versions[state->pos];
- cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
+ cmd_id = WIDE_ID(ver->group, ver->cmd);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 079fa0023bd8..c62576e442bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -84,7 +84,7 @@ struct iwl_fw_error_dump_data {
struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e4ebda64cd52..a7817d952022 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
struct iwl_ucode_tlv {
__le32 type; /* see above */
__le32 length; /* not including type/length fields */
- u8 data[0];
+ u8 data[];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@@ -145,7 +145,7 @@ struct iwl_tlv_ucode_header {
* Note that each TLV is padded to a length
* that is a multiple of 4 for alignment.
*/
- u8 data[0];
+ u8 data[];
};
/*
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
@@ -312,7 +310,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@@ -370,6 +367,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* reset flow
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
* channels even when these are not enabled.
+ * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
+ * complete to FW.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -388,7 +387,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
- IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
@@ -421,6 +419,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -455,6 +454,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@@ -516,34 +516,6 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_SHARED_CLK = BIT(31),
};
-#define IWL_UCODE_MAX_CS 1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
- __le32 cipher;
- u8 flags;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
- u8 hw_cipher;
-} __packed;
-
enum iwl_fw_dbg_reg_operator {
CSR_ASSIGN,
CSR_SETBIT,
@@ -631,7 +603,7 @@ struct iwl_fw_dbg_dest_tlv_v1 {
__le32 wrap_count;
u8 base_shift;
u8 end_shift;
- struct iwl_fw_dbg_reg_op reg_ops[0];
+ struct iwl_fw_dbg_reg_op reg_ops[];
} __packed;
/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
@@ -651,14 +623,14 @@ struct iwl_fw_dbg_dest_tlv {
__le32 wrap_count;
u8 base_shift;
u8 size_shift;
- struct iwl_fw_dbg_reg_op reg_ops[0];
+ struct iwl_fw_dbg_reg_op reg_ops[];
} __packed;
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -733,7 +705,7 @@ struct iwl_fw_dbg_trigger_tlv {
u8 flags;
u8 reserved[5];
- u8 data[0];
+ u8 data[];
} __packed;
#define FW_DBG_START_FROM_ALIVE 0
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index 530674a35eeb..b7deca05a953 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -2,13 +2,16 @@
/*
* Copyright(c) 2019 - 2021 Intel Corporation
*/
-
+#include <fw/api/commands.h>
#include "img.h"
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
+ /* prior to LONG_GROUP, we never used this CMD version API */
+ u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
+ u8 cmd = iwl_cmd_opcode(cmd_id);
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index fa7b1780064c..f878ac508801 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -134,16 +134,6 @@ struct iwl_fw_paging {
};
/**
- * struct iwl_fw_cscheme_list - a cipher scheme list
- * @size: a number of entries
- * @cs: cipher scheme entries
- */
-struct iwl_fw_cscheme_list {
- u8 size;
- struct iwl_fw_cipher_scheme cs[];
-} __packed;
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -197,7 +187,6 @@ struct iwl_dump_exclude {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @type: firmware type (&enum iwl_fw_type)
- * @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
* @phy_integration_ver: PHY integration version string
@@ -228,7 +217,6 @@ struct iwl_fw {
enum iwl_fw_type type;
- struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
@@ -275,7 +263,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 139ece879fab..135bd48bfe9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
+ .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
@@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
- if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 58ca3849d1f3..945bc4160cc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
}
memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset, len);
+ (const u8 *)image->sec[sec_idx].data + offset, len);
block->fw_offs = image->sec[sec_idx].offset + offset;
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
@@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 7d4aa398729a..b6d3ac6ed440 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
@@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
- sha1 = le32_to_cpup((__le32 *)data);
+ sha1 = le32_to_cpup((const __le32 *)data);
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
@@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
if (hw_match)
break;
- mac_type = le16_to_cpup((__le16 *)data);
- rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+ mac_type = le16_to_cpup((const __le16 *)data);
+ rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
@@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
- struct iwl_pnvm_section *section = (void *)data;
+ const struct iwl_pnvm_section *section = (const void *)data;
u32 data_len = tlv_len - sizeof(*section);
IWL_DEBUG_FW(trans,
@@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
tlv_len);
/* TODO: remove, this is a deprecated separator */
- if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
IWL_DEBUG_FW(trans, "Ignoring separator.\n");
break;
}
@@ -173,7 +173,7 @@ out:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
@@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a21c3befd93b..a835214611ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 3cb0ddbe3ab2..d3cb1ae68a96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -16,7 +16,7 @@
#include "fw/acpi.h"
struct iwl_fw_runtime_ops {
- int (*dump_start)(void *ctx);
+ void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@@ -163,6 +163,7 @@ struct iwl_fw_runtime {
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
+ u8 reduced_power_flags;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index f2f1789f470d..3f1272014daf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
else
cmd.id = SHARED_MEM_CFG;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index bd82c24811c8..6d408cd0f517 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -19,20 +19,14 @@
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
- struct efivar_entry *pnvm_efivar;
void *data;
unsigned long package_size;
- int err;
+ efi_status_t status;
*len = 0;
- pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
- if (!pnvm_efivar)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
- sizeof(IWL_UEFI_OEM_PNVM_NAME));
- pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
/*
* TODO: we hardcode a maximum length here, because reading
@@ -42,34 +36,29 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
package_size = IWL_HARDCODED_PNVM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
- if (!data) {
- data = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!data)
+ return ERR_PTR(-ENOMEM);
- err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "PNVM UEFI variable not found %d (len %lu)\n",
- err, package_size);
+ "PNVM UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
kfree(data);
- data = ERR_PTR(err);
- goto out;
+ return ERR_PTR(-ENOENT);
}
IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size);
*len = package_size;
-out:
- kfree(pnvm_efivar);
-
return data;
}
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
@@ -79,7 +68,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -154,7 +143,7 @@ out:
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@@ -163,7 +152,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -175,8 +164,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
@@ -211,21 +200,15 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
- struct efivar_entry *reduce_power_efivar;
struct pnvm_sku_package *package;
void *data = NULL;
unsigned long package_size;
- int err;
+ efi_status_t status;
*len = 0;
- reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
- if (!reduce_power_efivar)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
- sizeof(IWL_UEFI_REDUCED_POWER_NAME));
- reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
/*
* TODO: we hardcode a maximum length here, because reading
@@ -235,19 +218,17 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
package = kmalloc(package_size, GFP_KERNEL);
- if (!package) {
- package = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!package)
+ return ERR_PTR(-ENOMEM);
- err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_REDUCED_POWER_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found %d (len %lu)\n",
- err, package_size);
+ "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
kfree(package);
- data = ERR_PTR(err);
- goto out;
+ return ERR_PTR(-ENOENT);
}
IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
@@ -262,9 +243,6 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
kfree(package);
-out:
- kfree(reduce_power_efivar);
-
return data;
}
@@ -304,22 +282,15 @@ static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
- struct efivar_entry *sgom_efivar;
struct uefi_cnv_wlan_sgom_data *data;
unsigned long package_size;
- int err, ret;
-
- if (!fwrt->geo_enabled)
- return;
+ efi_status_t status;
+ int ret;
- sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL);
- if (!sgom_efivar)
+ if (!fwrt->geo_enabled ||
+ !efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
return;
- memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME,
- sizeof(IWL_UEFI_SGOM_NAME));
- sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
-
/* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
@@ -327,15 +298,14 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
package_size = IWL_HARDCODED_SGOM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
- if (!data) {
- data = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!data)
+ return;
- err = efivar_entry_get(sgom_efivar, NULL, &package_size, data);
- if (err) {
+ status = efi.get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found %d\n", err);
+ "SGOM UEFI variable not found 0x%lx\n", status);
goto out_free;
}
@@ -349,8 +319,6 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
out_free:
kfree(data);
-out:
- kfree(sgom_efivar);
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
#endif /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e122b8b4e1fc..cfa5e1b3c3f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -260,6 +260,7 @@ enum iwl_cfg_trans_ltr_delay {
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @imr_enabled: use the IMR if supported.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@@ -274,7 +275,8 @@ struct iwl_cfg_trans_params {
integrated:1,
low_latency_xtal:1,
bisr_workaround:1,
- ltr_delay:2;
+ ltr_delay:2,
+ imr_enabled:1;
};
/**
@@ -343,8 +345,8 @@ struct iwl_fw_mon_regs {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
- * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
- * supports 256 BA aggregation
+ * @min_ba_txq_size: minimum number of slots required in a TX queue which
+ * based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
@@ -405,9 +407,10 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 gp2_reg_addr;
- u32 min_256_ba_txq_size;
+ u32 min_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
+ const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@@ -433,6 +436,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
+#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
@@ -489,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const char iwl9162_name[];
@@ -509,6 +514,7 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
+extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -631,22 +637,28 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
+extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_gl_b0_fm_b0;
extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
+extern const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 5adf485db38e..b84884034c74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2018, 2020-2022 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -34,6 +34,7 @@ enum iwl_prph_scratch_mtr_format {
/**
* enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
* @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
* @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
* in hwm config.
@@ -55,6 +56,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
*/
enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index f90d4662c164..3e1f011e93aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -329,6 +329,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
@@ -533,6 +534,9 @@ enum {
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+/* This register is common for Tx and Rx, Rx queues start from 512 */
+#define HBUS_TARG_WRPTR_Q_SHIFT (16)
+#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
/**********************************************************
* CSR values
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index c73672d61356..3237d4b528b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
if (!node)
return -ENOMEM;
- memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ memcpy(&node->tlv, tlv, sizeof(node->tlv));
+ memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
return 0;
@@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
- * The higher part of the ID in from version 2 is irrelevant for
- * us, so mask it out.
+ * The higher part of the ID from version 2 is debug policy.
+ * The id will be only lsb 16 bits, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ id &= IWL_FW_INI_REGION_ID_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
@@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EOPNOTSUPP;
}
+ if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
+ trans->dbg.imr_data.sram_addr =
+ le32_to_cpu(reg->internal_buffer.base_addr);
+ trans->dbg.imr_data.sram_size =
+ le32_to_cpu(reg->internal_buffer.size);
+ }
+
+
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
+ const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
u32 tp = le32_to_cpu(conf_set->time_point);
u32 type = le32_to_cpu(conf_set->set_type);
@@ -362,7 +371,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
struct iwl_dbg_tlv_timer_node *node, *tmp;
list_for_each_entry_safe(node, tmp, timer_list, list) {
- del_timer(&node->timer);
+ del_timer_sync(&node->timer);
list_del(&node->list);
kfree(node);
}
@@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
@@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
- if (!fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
@@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
{
- int ret, i, dram_alloc = 0;
- struct iwl_dram_info dram_info;
+ int ret, i;
+ bool dram_alloc = false;
struct iwl_dram_data *frags =
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
+ struct iwl_dram_info *dram_info;
+
+ if (!frags || !frags->block)
+ return;
+
+ dram_info = frags->block;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
return;
- dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
- dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
+ dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
+ dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
- ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
+ ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
- dram_alloc++;
+ dram_alloc = true;
else
IWL_WARN(fwrt,
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
i, ret);
}
- if (dram_alloc) {
- memcpy(frags->block, &dram_info, sizeof(dram_info));
- IWL_DEBUG_FW(fwrt, "block data after %016x\n",
- *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
- }
+
+ if (dram_alloc)
+ IWL_DEBUG_FW(fwrt, "block data after %08x\n",
+ dram_info->first_word);
+ else
+ memset(frags->block, 0, sizeof(*dram_info));
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
- struct list_head *config_list)
+ struct list_head *conf_list)
{
struct iwl_dbg_tlv_node *node;
- list_for_each_entry(node, config_list, list) {
+ list_for_each_entry(node, conf_list, list) {
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
u32 count, address, value;
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
struct iwl_dbgc1_info dram_info = {};
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
- __le64 dram_base_addr = cpu_to_le64(frags->physical);
- __le32 dram_size = cpu_to_le32(frags->size);
- u64 dram_addr = le64_to_cpu(dram_base_addr);
+ __le64 dram_base_addr;
+ __le32 dram_size;
+ u64 dram_addr;
u32 ret;
+ if (!frags)
+ break;
+
+ dram_base_addr = cpu_to_le64(frags->physical);
+ dram_size = cpu_to_le32(frags->size);
+ dram_addr = le64_to_cpu(dram_base_addr);
+
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
dram_base_addr, dram_size);
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index 79287708bd6e..128059ca77e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
@@ -10,6 +10,8 @@
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
+#define IWL_DBG_TLV_MAX_PRESET 15
+
/**
* struct iwl_dbg_tlv_node - debug TLV node
* @list: list of &struct iwl_dbg_tlv_node
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 7dd70011fd1e..1d6c292cf545 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -18,12 +18,10 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -55,14 +53,12 @@ TRACE_EVENT(iwlwifi_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 83e3b731ad29..a2203f661321 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */
bool dbg_dest_tlv_init;
- u8 *dbg_dest_ver;
+ const u8 *dbg_dest_ver;
union {
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+ const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
};
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
@@ -324,29 +324,6 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
-static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
-{
- int i, j;
- struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
- struct iwl_fw_cipher_scheme *fwcs;
-
- if (len < sizeof(*l) ||
- len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
- return -EINVAL;
-
- for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
- fwcs = &l->cs[j];
-
- /* we skip schemes with zero cipher suite selector */
- if (!fwcs->cipher)
- continue;
-
- fw->cs[j++] = *fwcs;
- }
-
- return 0;
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -356,13 +333,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
{
struct fw_img_parsing *img;
struct fw_sec *sec;
- struct fw_sec_parsing *sec_parse;
+ const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
return -1;
- sec_parse = (struct fw_sec_parsing *)data;
+ sec_parse = (const struct fw_sec_parsing *)data;
img = &pieces->img[type];
@@ -385,8 +362,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
{
- struct iwl_tlv_calib_data *def_calib =
- (struct iwl_tlv_calib_data *)data;
+ const struct iwl_tlv_calib_data *def_calib =
+ (const struct iwl_tlv_calib_data *)data;
u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
if (ucode_type >= IWL_UCODE_TYPE_MAX) {
IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
@@ -404,7 +381,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_api *ucode_api = (void *)data;
+ const struct iwl_ucode_api *ucode_api = (const void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
@@ -425,7 +402,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_capa *ucode_capa = (void *)data;
+ const struct iwl_ucode_capa *ucode_capa = (const void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
@@ -457,7 +434,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
u32 api_ver, hdr_size, build;
char buildstr[25];
const u8 *src;
@@ -600,7 +577,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
sizeof(region->special_mem))
return;
- region = (void *)tlv->data;
+ region = (const void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
@@ -655,7 +632,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
struct iwl_ucode_capabilities *capa,
bool *usniffer_images)
{
- struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
@@ -704,8 +681,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
tlv_data = tlv->data;
@@ -762,7 +739,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->max_probe_length =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_PAN:
if (tlv_len)
@@ -783,7 +760,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
* will not work with the new firmware, or
* it'll not take advantage of new features.
*/
- capa->flags = le32_to_cpup((__le32 *)tlv_data);
+ capa->flags = le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_API_CHANGES_SET:
if (tlv_len != sizeof(struct iwl_ucode_api))
@@ -799,37 +776,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
if (tlv_len)
@@ -858,7 +835,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->standard_phy_calibration_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -884,7 +861,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PHY_SKU:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
drv->fw.valid_tx_ant = (drv->fw.phy_config &
FW_PHY_CFG_TX_CHAIN) >>
FW_PHY_CFG_TX_CHAIN_POS;
@@ -911,7 +888,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
num_of_cpus =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
if (num_of_cpus == 2) {
drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
@@ -925,18 +902,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
- case IWL_UCODE_TLV_CSCHEME:
- if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
- goto invalid_tlv_len;
- break;
case IWL_UCODE_TLV_N_SCAN_CHANNELS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->n_scan_channels =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_VERSION: {
- __le32 *ptr = (void *)tlv_data;
+ const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u8 local_comp;
@@ -960,15 +933,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
- struct iwl_fw_dbg_dest_tlv *dest = NULL;
- struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+ const struct iwl_fw_dbg_dest_tlv *dest = NULL;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
u8 mon_mode;
- pieces->dbg_dest_ver = (u8 *)tlv_data;
+ pieces->dbg_dest_ver = (const u8 *)tlv_data;
if (*pieces->dbg_dest_ver == 1) {
- dest = (void *)tlv_data;
+ dest = (const void *)tlv_data;
} else if (*pieces->dbg_dest_ver == 0) {
- dest_v1 = (void *)tlv_data;
+ dest_v1 = (const void *)tlv_data;
} else {
IWL_ERR(drv,
"The version is %d, and it is invalid\n",
@@ -1009,7 +982,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_CONF: {
- struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+ const struct iwl_fw_dbg_conf_tlv *conf =
+ (const void *)tlv_data;
if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
@@ -1043,8 +1017,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
- struct iwl_fw_dbg_trigger_tlv *trigger =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_trigger_tlv *trigger =
+ (const void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
@@ -1075,7 +1049,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
drv->fw.dbg.dump_mask =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
@@ -1087,7 +1061,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+ paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
IWL_DEBUG_FW(drv,
"Paging: paging enabled (size = %u bytes)\n",
@@ -1117,8 +1091,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
/* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+ (const void *)tlv_data;
size_t size;
struct iwl_fw_dbg_mem_seg_tlv *n;
@@ -1146,10 +1120,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
- struct {
+ const struct {
__le32 buf_addr;
__le32 buf_size;
- } *recov_info = (void *)tlv_data;
+ } *recov_info = (const void *)tlv_data;
if (tlv_len != sizeof(*recov_info))
goto invalid_tlv_len;
@@ -1160,10 +1134,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
break;
case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
- struct {
+ const struct {
u8 version[32];
u8 sha1[20];
- } *fseq_ver = (void *)tlv_data;
+ } *fseq_ver = (const void *)tlv_data;
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
@@ -1174,19 +1148,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_NUM_STATIONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- if (le32_to_cpup((__le32 *)tlv_data) >
+ if (le32_to_cpup((const __le32 *)tlv_data) >
IWL_MVM_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
- le32_to_cpup((__le32 *)tlv_data));
+ le32_to_cpup((const __le32 *)tlv_data));
goto tlv_error;
}
capa->num_stations =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
- struct iwl_umac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_umac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1201,8 +1175,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
- struct iwl_lmac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_lmac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1277,7 +1251,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
- iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
return -EINVAL;
}
@@ -1418,7 +1392,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
- struct iwl_ucode_header *ucode;
+ const struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
@@ -1456,7 +1430,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
+ ucode = (const struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
@@ -1645,6 +1619,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
mutex_lock(&iwlwifi_opmode_table_mtx);
switch (fw->type) {
case IWL_FW_DVM:
@@ -1661,8 +1637,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1707,6 +1681,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
@@ -1794,6 +1770,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
kfree(drv);
}
+#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
@@ -1801,7 +1778,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
- .enable_ini = true,
+ .enable_ini = ENABLE_INI,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1913,10 +1890,42 @@ MODULE_PARM_DESC(nvm_file, "NVM file name");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
- bool, S_IRUGO | S_IWUSR);
+
+static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ bool res;
+ __u32 new_enable_ini;
+
+ /* in case the argument type is a number */
+ ret = kstrtou32(arg, 0, &new_enable_ini);
+ if (!ret) {
+ if (new_enable_ini > ENABLE_INI) {
+ pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+ return -EINVAL;
+ }
+ goto out;
+ }
+
+ /* in case the argument type is boolean */
+ ret = kstrtobool(arg, &res);
+ if (ret)
+ return ret;
+ new_enable_ini = (res ? ENABLE_INI : 0);
+
+out:
+ iwlwifi_mod_params.enable_ini = new_enable_ini;
+ return 0;
+}
+
+static const struct kernel_param_ops enable_ini_ops = {
+ .set = enable_ini_set
+};
+
+module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
MODULE_PARM_DESC(enable_ini,
- "Enable debug INI TLV FW debug infrastructure (default: true");
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 0fd009e6d685..80073f973334 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
* everything is built-in, then we can avoid that.
*/
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
-#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
+#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
#else
#define IWL_EXPORT_SYMBOL(sym)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index b9e86bf972e5..5f386bb1a353 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -23,26 +23,22 @@
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
+ IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
"Acquired semaphore after %d tries.\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e6fd4941a4cb..bedd78a47f67 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -590,11 +590,31 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
+/* IMR DMA registers */
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
+#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
+#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
+#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
+
+/* RFH S2D DMA registers */
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
+
+/* TFH D2S DMA registers */
+#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
+#define IMR_UREG_CHICK 0x00d05c00
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
+
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwl_gen3_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+struct iwl_gen3_bc_tbl_entry {
+ __le16 tfd_offset;
} __packed;
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 253eac4cbf59..396f2c997da6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
- u32 value = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- value = iwl_read32(trans, reg);
+ u32 value = iwl_read32(trans, reg);
+
iwl_trans_release_nic_access(trans);
+ return value;
}
- return value;
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
@@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
- u32 val = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- val = iwl_read_prph_no_grab(trans, ofs);
+ u32 val = iwl_read_prph_no_grab(trans, ofs);
+
iwl_trans_release_nic_access(trans);
+
+ return val;
}
- return val;
+
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 004ebdac4535..d0b4d02bdab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -83,7 +83,8 @@ struct iwl_mod_params {
*/
bool disable_11ax;
bool remove_when_gone;
- bool enable_ini;
+ u32 enable_ini;
+ bool disable_11be;
};
static inline bool iwl_enable_rx_ampdu(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index dd58c8f9aa11..9040da3dcce3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -375,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (v4)
ch_flags =
- __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+ __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx);
else
ch_flags =
- __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
+ __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx);
if (band == NL80211_BAND_5GHZ &&
!data->sku_cap_band_52ghz_enable)
@@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_REQ,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
@@ -584,9 +583,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -654,9 +653,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
@@ -732,7 +731,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
/* we know it's writable - we set it before ourselves */
- iftype_data = (void *)sband->iftype_data;
+ iftype_data = (void *)(uintptr_t)sband->iftype_data;
for (i = 0; i < sband->n_iftype_data; i++)
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
@@ -784,6 +783,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_MR:
+ case IWL_CFG_RF_TYPE_MS:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@@ -912,7 +912,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + SKU);
- return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
+ return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
}
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
@@ -920,8 +920,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
- return le32_to_cpup((__le32 *)(nvm_sw +
- NVM_VERSION_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(nvm_sw +
+ NVM_VERSION_EXT_NVM));
}
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
@@ -930,7 +930,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
}
@@ -941,7 +941,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
- n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+ n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
return n_hw_addr & N_HW_ADDR_MASK;
}
@@ -1080,7 +1080,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL;
}
- IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+ if (!trans->csme_own)
+ IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
+ data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
return 0;
}
@@ -1385,8 +1387,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
nvm_chan = iwl_nvm_channels;
}
- if (WARN_ON(num_of_ch > max_num_ch))
+ if (num_of_ch > max_num_ch) {
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+ "Num of channels (%d) is greater than expected. Truncating to %d\n",
+ num_of_ch, max_num_ch);
num_of_ch = max_num_ch;
+ }
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@@ -1592,7 +1598,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
}
eof = fw_entry->data + fw_entry->size;
- dword_buff = (__le32 *)fw_entry->data;
+ dword_buff = (const __le32 *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
@@ -1604,7 +1610,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
- file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(trans, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
@@ -1617,7 +1623,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
goto out;
}
} else {
- file_sec = (void *)fw_entry->data;
+ file_sec = (const void *)fw_entry->data;
}
while (true) {
@@ -1685,7 +1691,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
nvm_sections[section_id].length = section_size;
/* advance to the next section */
- file_sec = (void *)(file_sec->data + section_size);
+ file_sec = (const void *)(file_sec->data + section_size);
}
out:
release_firmware(fw_entry);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 5378315d0179..0a93ac769f66 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#include <linux/slab.h>
@@ -13,8 +13,6 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
struct iwl_phy_db_entry {
u16 size;
u8 *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 95b3dae7b504..157d1f31c487 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -354,10 +354,10 @@
#define WFPM_GP2 0xA030B4
/* DBGI SRAM Register details */
-#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
-#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
+#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148
+#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
enum {
ENABLE_WFPM = BIT(31),
@@ -386,6 +386,13 @@ enum {
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
#define UREG_LMAC2_CURRENT_PC 0xa05c20
+#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
+#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
+#define HPM_SECONDARY_DEVICE_STATE 0xa03404
+#define WFPM_MAC_OTP_CFG7_ADDR 0xa03338
+#define WFPM_MAC_OTP_CFG7_DATA 0xa0333c
+
+
/* For UMAG_GEN_HW_STATUS reg check */
enum {
UMAG_GEN_HW_IS_FPGA = BIT(1),
@@ -491,4 +498,6 @@ enum {
#define HBUS_TIMEOUT 0xA5A5A5A1
#define WFPM_DPHY_OFF 0xDF10FF
+#define REG_OTP_MINOR 0xA0333C
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 9236f9106826..b1af9359cea5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*
@@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
static int iwl_hcmd_names_cmp(const void *key, const void *elt)
{
const struct iwl_hcmd_names *name = elt;
- u8 cmd1 = *(u8 *)key;
+ const u8 *cmd1 = key;
u8 cmd2 = name->cmd_id;
- return (cmd1 - cmd2);
+ return (*cmd1 - cmd2);
}
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1bcaa3598785..d659ccd065f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -406,6 +406,9 @@ struct iwl_dump_sanitize_ops {
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
* @fw_reset_handshake: firmware supports reset flow handshake
+ * @queue_alloc_cmd_ver: queue allocation command version, set to 0
+ * for using the older SCD_QUEUE_CFG, set to the version of
+ * SCD_QUEUE_CONFIG_CMD otherwise.
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@@ -424,6 +427,7 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
+ u8 queue_alloc_cmd_ver;
};
struct iwl_trans_dump_data {
@@ -569,10 +573,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int queue_wdg_timeout);
+ int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
@@ -615,6 +618,10 @@ struct iwl_trans_ops {
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
+ int (*imr_dma_data)(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt);
+
};
/**
@@ -722,6 +729,26 @@ struct iwl_self_init_dram {
};
/**
+ * struct iwl_imr_data - imr dram data used during debug process
+ * @imr_enable: imr enable status received from fw
+ * @imr_size: imr dram size received from fw
+ * @sram_addr: sram address from debug tlv
+ * @sram_size: sram size from debug tlv
+ * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr_curr_addr: current dst address used during dma transfer
+ * @imr_base_addr: imr address received from fw
+ */
+struct iwl_imr_data {
+ u32 imr_enable;
+ u32 imr_size;
+ u32 sram_addr;
+ u32 sram_size;
+ u32 imr2sram_remainbyte;
+ u64 imr_curr_addr;
+ __le64 imr_base_addr;
+};
+
+/**
* struct iwl_trans_debug - transport debug related data
*
* @n_dest_reg: num of reg_ops in %dbg_dest_tlv
@@ -785,6 +812,7 @@ struct iwl_trans_debug {
u32 ucode_preset;
bool restart_required;
u32 last_tp_resetfw;
+ struct iwl_imr_data imr_data;
};
struct iwl_dma_ptr {
@@ -904,6 +932,7 @@ struct iwl_txq {
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -929,6 +958,8 @@ struct iwl_trans_txqs {
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
};
/**
@@ -1220,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int wdg_timeout)
+ u32 flags, u32 sta_mask, u8 tid,
+ int size, unsigned int wdg_timeout)
{
might_sleep();
@@ -1234,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
- return trans->ops->txq_alloc(trans, flags, sta_id, tid,
- cmd_id, size, wdg_timeout);
+ return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
@@ -1368,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
+static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt)
+{
+ if (trans->ops->imr_dma_data)
+ return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
+ return 0;
+}
+
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index d9733aaf6f6e..357f14626cf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
@@ -312,7 +312,7 @@ static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
memcpy(q_head + wr, hdr, tx_sz);
} else {
memcpy(q_head + wr, hdr, q_sz - wr);
- memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
+ memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
}
WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
@@ -432,7 +432,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
u32 q_sz;
u32 rd;
u32 wr;
- void *q_head;
+ u8 *q_head;
if (!iwl_mei_global_cldev)
return;
@@ -493,6 +493,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
if (cb_tx) {
struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
+ memset(cb_hdr, 0, sizeof(*cb_hdr));
cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
@@ -669,6 +670,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
@@ -1017,6 +1020,8 @@ static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
/* We need enough room for the WiFi header + SNAP + IV */
skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
+ if (!skb)
+ continue;
skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
ethhdr = skb_push(skb, sizeof(*ethhdr));
@@ -1663,9 +1668,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
@@ -1784,6 +1791,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
@@ -1795,6 +1804,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
@@ -1812,15 +1822,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
@@ -1980,7 +2006,11 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
}
static const struct mei_cl_device_id iwl_mei_tbl[] = {
- { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
+ {
+ .name = KBUILD_MODNAME,
+ .uuid = MEI_WLAN_UUID,
+ .version = MEI_CL_VERSION_ANY,
+ },
/* required last entry */
{ }
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 5f966af69720..3472167c8370 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -102,8 +102,8 @@ static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
* src IP address - 4 bytes
* target MAC addess - 6 bytes
*/
- target_ip = (void *)((u8 *)(arp + 1) +
- ETH_ALEN + sizeof(__be32) + ETH_ALEN);
+ target_ip = (const void *)((const u8 *)(arp + 1) +
+ ETH_ALEN + sizeof(__be32) + ETH_ALEN);
/*
* ARP request is forwarded to ME only if IP address match in the
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index 11e3009121cc..be1456dea484 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -298,7 +298,7 @@ struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
- u8 payload[0];
+ u8 payload[];
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9b194cb8d65e..ee3c8a786199 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!chanctx_conf ||
chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
@@ -283,7 +283,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
@@ -311,7 +311,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = IEEE80211_SMPS_DYNAMIC;
/* relax SMPS constraints for next association */
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (mvmvif->phy_ctxt &&
@@ -382,7 +382,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+ mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc ||
le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b400867e94f0..919b1f478b4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -31,7 +31,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
mvmvif->rekey_data.akm = data->akm & 0xFF;
mvmvif->rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
@@ -453,8 +453,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TSC_RSC_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -672,8 +671,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_PATTERNS,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (!wowlan->n_patterns)
@@ -733,7 +731,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
rcu_read_lock();
- ctx = rcu_dereference(vif->chanctx_conf);
+ ctx = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!ctx)) {
rcu_read_unlock();
return -EINVAL;
@@ -751,7 +749,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* add back the MAC */
mvmvif->uploaded = false;
- if (WARN_ON(!vif->bss_conf.assoc))
+ if (WARN_ON(!vif->cfg.assoc))
return -EINVAL;
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
@@ -917,12 +915,11 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
wowlan_config_cmd->is_11n_connection =
- ap_sta->ht_cap.ht_supported;
+ ap_sta->deflink.ht_cap.ht_supported;
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_CONFIGURATION, 0) < 6) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1017,8 +1014,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TKIP_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
struct wowlan_key_tkip_data tkip_data = {};
int size;
@@ -1058,7 +1054,6 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
};
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN);
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
@@ -1089,7 +1084,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
/* skip the sta_id at the beginning */
_kek_kck_cmd = (void *)
- ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+ ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
}
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
@@ -1432,7 +1427,7 @@ struct iwl_wowlan_status_data {
u8 flags;
} igtk;
- u8 wake_packet[];
+ u8 *wake_packet;
};
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
@@ -1485,11 +1480,11 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
- if (status->wake_packet_bufsize) {
+ if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
- struct ieee80211_hdr *hdr = (void *)pktdata;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
int truncated = pktlen - pktsize;
/* this would be a firmware bug */
@@ -1949,57 +1944,6 @@ out:
return true;
}
-/* Occasionally, templates would be nice. This is one of those times ... */
-#define iwl_mvm_parse_wowlan_status_common(_ver) \
-static struct iwl_wowlan_status_data * \
-iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
- struct iwl_wowlan_status_ ##_ver *data,\
- int len) \
-{ \
- struct iwl_wowlan_status_data *status; \
- int data_size, i; \
- \
- if (len < sizeof(*data)) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return ERR_PTR(-EIO); \
- } \
- \
- data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
- if (len != sizeof(*data) + data_size) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return ERR_PTR(-EIO); \
- } \
- \
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
- if (!status) \
- return ERR_PTR(-ENOMEM); \
- \
- /* copy all the common fields */ \
- status->replay_ctr = le64_to_cpu(data->replay_ctr); \
- status->pattern_number = le16_to_cpu(data->pattern_number); \
- status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
- for (i = 0; i < 8; i++) \
- status->qos_seq_ctr[i] = \
- le16_to_cpu(data->qos_seq_ctr[i]); \
- status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
- status->num_of_gtk_rekeys = \
- le32_to_cpu(data->num_of_gtk_rekeys); \
- status->received_beacons = le32_to_cpu(data->received_beacons); \
- status->wake_packet_length = \
- le32_to_cpu(data->wake_packet_length); \
- status->wake_packet_bufsize = \
- le32_to_cpu(data->wake_packet_bufsize); \
- memcpy(status->wake_packet, data->wake_packet, \
- status->wake_packet_bufsize); \
- \
- return status; \
-}
-
-iwl_mvm_parse_wowlan_status_common(v6)
-iwl_mvm_parse_wowlan_status_common(v7)
-iwl_mvm_parse_wowlan_status_common(v9)
-iwl_mvm_parse_wowlan_status_common(v12)
-
static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_gtk_status_v2 *data)
{
@@ -2059,6 +2003,96 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
((u64)ipn[0] << 40);
}
+static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, &data->gtk[0]);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status_data * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ struct iwl_wowlan_status_ ##_ver *data,\
+ int len) \
+{ \
+ struct iwl_wowlan_status_data *status; \
+ int data_size, i; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ status = kzalloc(sizeof(*status), GFP_KERNEL); \
+ if (!status) \
+ return NULL; \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = le64_to_cpu(data->replay_ctr); \
+ status->pattern_number = le16_to_cpu(data->pattern_number); \
+ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
+ for (i = 0; i < 8; i++) \
+ status->qos_seq_ctr[i] = \
+ le16_to_cpu(data->qos_seq_ctr[i]); \
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
+ status->num_of_gtk_rekeys = \
+ le32_to_cpu(data->num_of_gtk_rekeys); \
+ status->received_beacons = le32_to_cpu(data->received_beacons); \
+ status->wake_packet_length = \
+ le32_to_cpu(data->wake_packet_length); \
+ status->wake_packet_bufsize = \
+ le32_to_cpu(data->wake_packet_bufsize); \
+ if (status->wake_packet_bufsize) { \
+ status->wake_packet = \
+ kmemdup(data->wake_packet, \
+ status->wake_packet_bufsize, \
+ GFP_KERNEL); \
+ if (!status->wake_packet) { \
+ kfree(status); \
+ return NULL; \
+ } \
+ } else { \
+ status->wake_packet = NULL; \
+ } \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+iwl_mvm_parse_wowlan_status_common(v9)
+iwl_mvm_parse_wowlan_status_common(v12)
+
static struct iwl_wowlan_status_data *
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
@@ -2074,8 +2108,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
};
int ret, len;
u8 notif_ver;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
@@ -2103,7 +2136,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
@@ -2134,7 +2167,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc);
@@ -2147,7 +2180,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
* difference is only in a few not used (reserved) fields.
*/
status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc);
@@ -2159,7 +2192,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc);
@@ -2171,7 +2204,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
IWL_ERR(mvm,
"Firmware advertises unknown WoWLAN status response %d!\n",
notif_ver);
- status = ERR_PTR(-EIO);
+ status = NULL;
}
out_free_resp:
@@ -2179,38 +2212,16 @@ out_free_resp:
return status;
}
-static struct iwl_wowlan_status_data *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
-{
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- OFFLOADS_QUERY_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- __le32 station_id = cpu_to_le32(sta_id);
- u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
-
- if (!mvm->net_detect) {
- /* only for tracing for now */
- int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
- cmd_size, &station_id);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
- }
-
- return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
-}
-
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_wowlan_status_data *status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
- if (IS_ERR(status))
+ if (!status)
goto out_unlock;
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
@@ -2219,7 +2230,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
/* still at hard-coded place 0 for D3 image */
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
if (!mvm_ap_sta)
- goto out_free;
+ goto out_unlock;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status->qos_seq_ctr[i];
@@ -2242,11 +2253,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
- kfree(status);
return keep;
-out_free:
- kfree(status);
out_unlock:
mutex_unlock(&mvm->mutex);
return false;
@@ -2255,16 +2263,16 @@ out_unlock:
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
IWL_SCAN_MAX_PROFILES)
-struct iwl_mvm_nd_query_results {
+struct iwl_mvm_nd_results {
u32 matched_profiles;
u8 matches[ND_QUERY_BUF_LEN];
};
static int
iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *results)
+ struct iwl_mvm_nd_results *results)
{
- struct iwl_scan_offload_profiles_query *query;
+ struct iwl_scan_offload_match_info *query;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
.flags = CMD_WANT_SKB,
@@ -2281,7 +2289,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
- query_len = sizeof(struct iwl_scan_offload_profiles_query);
+ query_len = sizeof(struct iwl_scan_offload_match_info);
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
max_profiles;
} else {
@@ -2312,7 +2320,7 @@ out_free_resp:
}
static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
int idx)
{
int n_chans = 0, i;
@@ -2320,13 +2328,13 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
@@ -2336,7 +2344,7 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
}
static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
struct cfg80211_wowlan_nd_match *match,
int idx)
{
@@ -2345,7 +2353,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2353,7 +2361,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2362,25 +2370,50 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
}
}
+/**
+ * enum iwl_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received
+ */
+enum iwl_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+/* manage d3 resume data */
+struct iwl_d3_data {
+ struct iwl_wowlan_status_data *status;
+ bool test;
+ u32 d3_end_flags;
+ u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
+ u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
+ struct iwl_mvm_nd_results *nd_results;
+ bool nd_results_valid;
+};
+
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
{
struct cfg80211_wowlan_nd_info *net_detect = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- struct iwl_wowlan_status_data *status;
- struct iwl_mvm_nd_query_results query;
unsigned long matched_profiles;
u32 reasons = 0;
int i, n_matches, ret;
- status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
- if (!IS_ERR(status)) {
- reasons = status->wakeup_reasons;
- kfree(status);
- }
+ if (WARN_ON(!d3_data || !d3_data->status))
+ goto out;
+
+ reasons = d3_data->status->wakeup_reasons;
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
@@ -2388,13 +2421,22 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
goto out;
- ret = iwl_mvm_netdetect_query_results(mvm, &query);
- if (ret || !query.matched_profiles) {
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0)) {
+ IWL_INFO(mvm, "Query FW for ND results\n");
+ ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results);
+
+ } else {
+ IWL_INFO(mvm, "Notification based ND results\n");
+ ret = d3_data->nd_results_valid ? 0 : -1;
+ }
+
+ if (ret || !d3_data->nd_results->matched_profiles) {
wakeup_report = NULL;
goto out;
}
- matched_profiles = query.matched_profiles;
+ matched_profiles = d3_data->nd_results->matched_profiles;
if (mvm->n_nd_match_sets) {
n_matches = hweight_long(matched_profiles);
} else {
@@ -2411,7 +2453,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct cfg80211_wowlan_nd_match *match;
int idx, n_channels = 0;
- n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
+ n_channels = iwl_mvm_query_num_match_chans(mvm,
+ d3_data->nd_results,
+ i);
match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
@@ -2431,7 +2475,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (mvm->n_nd_channels < n_channels)
continue;
- iwl_mvm_query_set_freqs(mvm, &query, match, i);
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
out_report_nd:
@@ -2511,16 +2555,317 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
return false;
}
+/*
+ * This function assumes:
+ * 1. The mutex is already held.
+ * 2. The callee functions unlock the mutex.
+ */
+static bool
+iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* if FW uses status notification, status shouldn't be NULL here */
+ if (!d3_data->status) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : mvmvif->ap_sta_id;
+
+ d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
+ }
+
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
+ } else {
+ bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
+
+ return keep;
+ }
+ return false;
+}
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm,
+ struct iwl_wowlan_wake_pkt_notif *notif,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length);
+
+ if (len < sizeof(*notif)) {
+ IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!status)) {
+ IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!(status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) {
+ IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n",
+ status->wakeup_reasons);
+ return -EIO;
+ }
+
+ data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet);
+
+ /* data_size got the padding from the notification, remove it. */
+ if (packet_len < data_size)
+ data_size = packet_len;
+
+ status->wake_packet = kmemdup(notif->wake_packet, data_size,
+ GFP_ATOMIC);
+
+ if (!status->wake_packet)
+ return -ENOMEM;
+
+ status->wake_packet_length = packet_len;
+ status->wake_packet_bufsize = data_size;
+
+ return 0;
+}
+
+static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data,
+ struct iwl_scan_offload_match_info *notif,
+ u32 len)
+{
+ struct iwl_wowlan_status_data *status = d3_data->status;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_nd_results *results = d3_data->nd_results;
+ size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ iwl_umac_scan_get_max_profiles(mvm->fw);
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ if (len < sizeof(struct iwl_scan_offload_match_info)) {
+ IWL_ERR(mvm, "Invalid scan match info notification\n");
+ return;
+ }
+
+ if (!mvm->net_detect) {
+ IWL_ERR(mvm, "Unexpected scan match info notification\n");
+ return;
+ }
+
+ if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ IWL_ERR(mvm,
+ "Ignore scan match info notification: no reason\n");
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done);
+#endif
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_INFO(mvm, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles) {
+ memcpy(results->matches, notif->matches, matches_len);
+ d3_data->nd_results_valid = TRUE;
+ }
+
+ /* no scan should be active at this point */
+ mvm->scan_status = 0;
+ for (i = 0; i < mvm->max_scans; i++)
+ mvm->scan_uid_status[i] = 0;
+}
+
+static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_d3_data *d3_data = data;
+ u32 len;
+ int ret;
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+ struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ /* We might get two notifications due to dual bss */
+ IWL_DEBUG_WOWLAN(mvm,
+ "Got additional wowlan info notification\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+ len);
+ if (d3_data->status &&
+ d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ /* We are supposed to get also wake packet notif */
+ d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_ERR(mvm,
+ "Got additional wowlan wake packet notification\n");
+ } else {
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ len = iwl_rx_packet_payload_len(pkt);
+ ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif,
+ d3_data->status,
+ len);
+ if (ret)
+ IWL_ERR(mvm,
+ "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n");
+ }
+
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mvm,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_ND_MATCH_INFO;
+
+ /* explicitly set this in the 'expected' as well */
+ d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO;
+
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len);
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data;
+
+ d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
+ d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return d3_data->notif_received == d3_data->notif_expected;
+}
+
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+{
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
+ };
+ bool reset = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ if (ret)
+ return ret;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ return -ENOENT;
+ }
+
+ /*
+ * We should trigger resume flow using command only for 22000 family
+ * AX210 and above don't need the command since they have
+ * the doorbell interrupt.
+ */
+ if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+
+static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data)
+{
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+
+ ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif,
+ IWL_MVM_D3_NOTIF_TIMEOUT);
+}
+
+static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_WAKE_PKT_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
- enum iwl_d3_status d3_status;
- bool keep = false;
+ struct iwl_mvm_nd_results results = {};
+ struct iwl_d3_data d3_data = {
+ .test = test,
+ .notif_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .nd_results_valid = false,
+ .nd_results = &results,
+ };
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+ bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -2544,54 +2889,30 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
- if (ret)
- goto err;
-
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- goto err;
- }
-
- if (d0i3_first) {
- struct iwl_host_cmd cmd = {
- .id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
- };
- int len;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret < 0) {
- IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
- ret);
+ if (resume_notif_based) {
+ d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL);
+ if (!d3_data.status) {
+ IWL_ERR(mvm, "Failed to allocate wowlan status\n");
+ ret = -ENOMEM;
goto err;
}
- switch (mvm->cmd_ver.d0i3_resp) {
- case 0:
- break;
- case 1:
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
- if (len != sizeof(u32)) {
- IWL_ERR(mvm,
- "Error with D0I3_END_CMD response size (%d)\n",
- len);
- goto err;
- }
- if (IWL_D0I3_RESET_REQUIRE &
- le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
- iwl_write32(mvm->trans, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- iwl_free_resp(&cmd);
- }
- break;
- default:
- WARN_ON(1);
- }
+
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ if (ret)
+ goto err;
+ } else {
+ ret = iwl_mvm_resume_firmware(mvm, test);
+ if (ret < 0)
+ goto err;
}
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ /* when reset is required we can't send these following commands */
+ if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
+ goto query_wakeup_reasons;
+
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@@ -2605,41 +2926,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* Re-configure default SAR profile */
iwl_mvm_sar_select_profile(mvm, 1, 1);
- if (mvm->net_detect) {
+ if (mvm->net_detect && unified_image) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
* fails, continue and try to get the wake-up reasons,
* but trigger a HW restart by keeping a failure code
* in ret.
*/
- if (unified_image)
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
- false);
-
- iwl_mvm_query_netdetect_reasons(mvm, vif);
- /* has unlocked the mutex, so skip that */
- goto out;
- } else {
- keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
- /* has unlocked the mutex, so skip that */
- goto out_iterate;
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
}
+query_wakeup_reasons:
+ keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+
err:
- iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
+out:
+ if (d3_data.status)
+ kfree(d3_data.status->wake_packet);
+ kfree(d3_data.status);
+ iwl_mvm_free_nd(mvm);
-out_iterate:
- if (!test)
+ if (!d3_data.test && !mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter,
+ keep ? vif : NULL);
-out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
/* no need to reset the device in unified images, if successful */
@@ -2648,9 +2964,14 @@ out:
if (d0i3_first)
return 0;
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
- if (!ret)
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0)) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
+ return 0;
+ } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) {
return 0;
+ }
}
/*
@@ -2704,7 +3025,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
/* start pseudo D3 */
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (err > 0)
err = -EINVAL;
@@ -2760,7 +3083,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
__iwl_mvm_resume(mvm, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
iwl_mvm_resume_tcm(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 7d9faeffd154..78d8b37eb71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -234,7 +234,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
}
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (chanctx_conf)
pos += scnprintf(buf+pos, bufsz-pos,
"idle rx chains %d, active rx chains: %d\n",
@@ -597,7 +597,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
/* make sure the channel context is assigned */
if (!chanctx_conf) {
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index fb4920b01dbb..1e8123140973 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/vmalloc.h>
+#include <linux/err.h>
#include <linux/ieee80211.h>
#include <linux/netdevice.h>
@@ -425,19 +426,20 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
return -EINVAL;
/* only change from debug set <-> debug unset */
- if ((amsdu_len && mvmsta->orig_amsdu_len) ||
- (!!amsdu_len && mvmsta->orig_amsdu_len))
+ if (amsdu_len && mvmsta->orig_amsdu_len)
return -EBUSY;
if (amsdu_len) {
- mvmsta->orig_amsdu_len = sta->max_amsdu_len;
- sta->max_amsdu_len = amsdu_len;
- for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++)
- sta->max_tid_amsdu_len[i] = amsdu_len;
+ mvmsta->orig_amsdu_len = sta->cur->max_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ for (i = 0; i < ARRAY_SIZE(sta->deflink.agg.max_tid_amsdu_len); i++)
+ sta->deflink.agg.max_tid_amsdu_len[i] = amsdu_len;
} else {
- sta->max_amsdu_len = mvmsta->orig_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = mvmsta->orig_amsdu_len;
mvmsta->orig_amsdu_len = 0;
}
+
return count;
}
@@ -451,7 +453,7 @@ static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file,
char buf[32];
int pos;
- pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len);
+ pos = scnprintf(buf, sizeof(buf), "current %d ", sta->cur->max_amsdu_len);
pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n",
mvmsta->orig_amsdu_len);
@@ -1233,7 +1235,7 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
mvm->hw->extra_beacon_tailroom = len;
- beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0);
if (!beacon)
goto out_err;
@@ -1369,189 +1371,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1661,7 +1480,7 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
.mvm = mvm,
};
u16 wait_cmds[] = {
- iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
};
u32 aid;
int ret;
@@ -1696,8 +1515,9 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
wait_cmds, ARRAY_SIZE(wait_cmds),
iwl_mvm_sniffer_apply, &apply);
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
- DATA_PATH_GROUP, 0), 0,
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ 0,
sizeof(he_mon_cmd), &he_mon_cmd);
/* no need to really wait, we already did anyway */
@@ -1881,11 +1701,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
@@ -1914,8 +1729,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
/* Take care of alignment of both the position and the length */
@@ -1945,7 +1759,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
goto out;
}
- ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
*ppos += ret;
out:
@@ -1969,8 +1783,7 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
if (*ppos & 0x3 || count < 4) {
op = DEBUG_MEM_OP_WRITE_BYTES;
@@ -2045,7 +1858,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
- char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
@@ -2142,6 +1939,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
- debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
+ if (!IS_ERR(mvm->debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+ buf);
+ }
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 628aee634b2a..8c5b97fb1941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -67,7 +67,7 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* the TK is already configured for this station, so it
* shouldn't be set again here.
*/
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -222,7 +222,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
else
eth_broadcast_addr(cmd->range_req_bssid);
@@ -254,7 +254,7 @@ static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
/* AP's TSF is only relevant if associated */
@@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_160:
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver >= 13) {
@@ -503,7 +503,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_ftm_put_target_common(mvm, peer, target);
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v5,
.len[0] = sizeof(cmd_v5),
@@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v7 cmd_v7;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v7,
.len[0] = sizeof(cmd_v7),
@@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -693,7 +693,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target->cipher = entry->cipher;
memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
!memcmp(vif->bss_conf.bssid, target->bssid,
sizeof(target->bssid)))
ieee80211_iter_keys(mvm->hw, vif, iter, target);
@@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v11 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v12 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v13 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
@@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
iwl_mvm_ftm_reset(mvm);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
- LOCATION_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "failed to abort FTM process\n");
}
@@ -1106,10 +1105,10 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
- IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
- IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index bda6da7d988e..e862d1b43f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
+ u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
* The command structure is the same for versions 6, 7 and 8 (only the
@@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD, 6);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
int err;
int cmd_size;
@@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
- LOCATION_GROUP, 0),
- 0, cmd_size, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
}
static int
@@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
};
u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
.data[1] = &data,
@@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
{
struct iwl_tof_responder_dyn_config_cmd cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
/* may not be able to DMA from stack */
@@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
struct ieee80211_ftm_responder_params *params)
{
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
switch (cmd_ver) {
case 2:
@@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
.addr = addr,
.hltk = hltk,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
lockdep_assert_held(&mvm->mutex);
@@ -400,7 +398,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
rcu_read_lock();
- pctx = rcu_dereference(vif->chanctx_conf);
+ pctx = rcu_dereference(vif->bss_conf.chanctx_conf);
/* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care
* about changes in the ctx after releasing the lock because the driver
* is still protected by the mutex. */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6f4690e56a46..f041e77af059 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -25,11 +25,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define UCODE_VALID_OK cpu_to_le32(0x1)
-
-#define IWL_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_TAS_US_MCC 0x5553
#define IWL_TAS_CANADA_MCC 0x4341
@@ -79,7 +74,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
struct iwl_dqa_enable_cmd dqa_cmd = {
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
};
- u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
@@ -126,13 +121,54 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 lmac_error_event_table, umac_error_table;
u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
+ u32 i;
- /*
- * For v5 and above, we can check the version, for older
- * versions we need to check the size.
- */
- if (version == 5 || version == 6) {
- /* v5 and v6 are compatible (only IMR addition) */
+ if (version == 6) {
+ struct iwl_alive_ntf_v6 *palive;
+
+ if (pkt_len < sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+ mvm->trans->dbg.imr_data.imr_enable =
+ le32_to_cpu(palive->imr.enabled);
+ mvm->trans->dbg.imr_data.imr_size =
+ le32_to_cpu(palive->imr.size);
+ mvm->trans->dbg.imr_data.imr2sram_remainbyte =
+ mvm->trans->dbg.imr_data.imr_size;
+ mvm->trans->dbg.imr_data.imr_base_addr =
+ palive->imr.base_addr;
+ mvm->trans->dbg.imr_data.imr_curr_addr =
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
+ IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
+ mvm->trans->dbg.imr_data.imr_enable,
+ mvm->trans->dbg.imr_data.imr_size,
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
+
+ if (!mvm->trans->dbg.imr_data.imr_enable) {
+ for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fw_ini_region_tlv *reg;
+
+ reg_tlv = mvm->trans->dbg.active_regions[i];
+ if (!reg_tlv)
+ continue;
+
+ reg = (void *)reg_tlv->data;
+ /*
+ * We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ mvm->trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+ }
+ }
+
+ if (version >= 5) {
struct iwl_alive_ntf_v5 *palive;
if (pkt_len < sizeof(*palive))
@@ -249,6 +285,29 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
+static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
+{
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+ IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
+ struct iwl_trans *trans = mvm->trans;
+ enum iwl_device_family device_family = trans->trans_cfg->device_family;
+
+ if (device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ if (device_family <= IWL_DEVICE_FAMILY_9000)
+ IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
+ else
+ IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
+
+ IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+
+ /* print OPT info */
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
+}
+
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -314,6 +373,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_read_prph(trans, SB_CPU_2_STATUS));
}
+ iwl_mvm_print_pd_notification(mvm);
+
/* LMAC/UMAC PC info */
if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
@@ -546,8 +607,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return 0;
}
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- SAR_OFFSET_MAPPING_TABLE_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 2) {
@@ -572,6 +632,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
+ u32 cmd_id = PHY_CONFIGURATION_CMD;
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
struct iwl_phy_specific_cfg phy_filters = {};
@@ -603,8 +664,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONFIGURATION_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
@@ -616,8 +676,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg);
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
sizeof(struct iwl_phy_cfg_cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
- cmd_size, &phy_cfg_cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
@@ -737,7 +796,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)mvm->nvm_data->channels + 1;
+ (void *)((u8 *)mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
@@ -760,6 +819,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
@@ -767,11 +827,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
-
- if (cmd_ver == 6) {
+ if (cmd_ver == 7) {
+ len = sizeof(cmd.v7);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v7.per_chain[0][0];
+ cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v6.per_chain[0][0];
@@ -805,7 +868,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -814,9 +877,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
- struct iwl_host_cmd cmd;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = { &geo_tx_cmd },
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
/* the ops field is at the same spot for all versions, so set in v1 */
@@ -838,12 +904,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
- cmd = (struct iwl_host_cmd){
- .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
- .len = { len, },
- .flags = CMD_WANT_SKB,
- .data = { &geo_tx_cmd },
- };
+ cmd.len[0] = len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -863,14 +924,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
u32 n_profiles;
u32 sk = 0;
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
@@ -948,167 +1009,19 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(sk);
- return iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD),
- 0, len, &cmd);
-}
-
-static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data, *flags;
- int i, j, ret, tbl_rev, num_sub_bands;
- int idx = 2;
-
- mvm->fwrt.ppag_flags = 0;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- IWL_DEBUG_RADIO(mvm,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
- tbl_rev);
- goto read_table;
- } else {
- ret = -EINVAL;
- goto out_free;
- }
- }
-
- /* try to read ppag table revision 0 */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
- goto read_table;
- }
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
-
-read_table:
- mvm->fwrt.ppag_ver = tbl_rev;
- flags = &wifi_pkg->package.elements[1];
-
- if (flags->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
-
- if (!mvm->fwrt.ppag_flags) {
- ret = 0;
- goto out_free;
- }
-
- /*
- * read, verify gain values and save them into the PPAG table.
- * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
- * following sub-bands to High-Band (5GHz).
- */
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- union acpi_object *ent;
-
- ent = &wifi_pkg->package.elements[idx++];
- if (ent->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
-
- if ((j == 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- mvm->fwrt.ppag_flags = 0;
- ret = -EINVAL;
- goto out_free;
- }
- }
- }
-
- ret = 0;
-out_free:
- kfree(data);
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
union iwl_ppag_table_cmd cmd;
- u8 cmd_ver;
- int i, j, ret, num_sub_bands, cmd_size;
- s8 *gain;
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG capability not supported by FW, command not sent.\n");
- return 0;
- }
- if (!mvm->fwrt.ppag_flags) {
- IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
- return 0;
- }
+ int ret, cmd_size;
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd.v1.gain[0];
- cmd_size = sizeof(cmd.v1);
- if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table rev is %d but FW supports v1, sending truncated table\n",
- mvm->fwrt.ppag_ver);
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else if (cmd_ver == 2 || cmd_ver == 3) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd.v2.gain[0];
- cmd_size = sizeof(cmd.v2);
- if (mvm->fwrt.ppag_ver == 0) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v1 but FW supports v2, sending padded table\n");
- } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else {
- IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
+ ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ /* Not supporting PPAG table is a valid scenario */
+ if(ret < 0)
return 0;
- }
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- mvm->fwrt.ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(mvm,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
@@ -1120,40 +1033,11 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return ret;
}
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- {}
-};
-
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(mvm,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- mvm->fwrt.ppag_flags = 0;
+ if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
return 0;
- }
return iwl_mvm_ppag_send_cmd(mvm);
}
@@ -1205,11 +1089,12 @@ static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigne
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- struct iwl_tas_config_cmd_v3 cmd = {};
- int cmd_size;
+ union iwl_tas_config_cmd cmd = {};
+ int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1217,7 +1102,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1232,25 +1120,24 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_US_MCC)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
+ if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_US_MCC)) ||
+ (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_CANADA_MCC))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
}
}
- cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG,
- IWL_FW_CMD_VER_UNKNOWN) < 3 ?
+ /* v4 is the same size as v3, so no need to differentiate here */
+ cmd_size = fw_ver < 3 ?
sizeof(struct iwl_tas_config_cmd_v2) :
sizeof(struct iwl_tas_config_cmd_v3);
- ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG),
- 0, cmd_size, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
@@ -1283,7 +1170,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
u32 value;
- struct iwl_lari_config_change_cmd_v5 cmd = {};
+ struct iwl_lari_config_change_cmd_v6 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
@@ -1310,25 +1197,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
if (cmd.config_bitmap ||
cmd.oem_uhb_allow_bitmap ||
cmd.oem_11ax_allow_bitmap ||
cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap) {
+ cmd.chan_state_active_bitmap ||
+ cmd.force_disable_channels_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE, 1);
- if (cmd_ver == 5)
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 1);
+ switch (cmd_ver) {
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- else if (cmd_ver == 4)
+ break;
+ case 4:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- else if (cmd_ver == 3)
+ break;
+ case 3:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- else if (cmd_ver == 2)
+ break;
+ case 2:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- else
+ break;
+ default:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
@@ -1340,8 +1245,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
le32_to_cpu(cmd.chan_state_active_bitmap),
cmd_ver);
IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap));
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd.force_disable_channels_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@@ -1358,7 +1264,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
int ret;
/* read PPAG table */
- ret = iwl_mvm_get_ppag_table(mvm);
+ ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1641,9 +1547,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* internal aux station for all aux activities that don't
* requires a dedicated data queue.
*/
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* In old version the aux station uses mac id like other
* station and not lmac id
@@ -1658,8 +1562,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
while (!sband && i < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[i++];
- if (WARN_ON_ONCE(!sband))
+ if (WARN_ON_ONCE(!sband)) {
+ ret = -ENODEV;
goto error;
+ }
chan = &sband->channels[0];
@@ -1741,7 +1647,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
@@ -1800,9 +1706,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* Add auxiliary station for scanning.
* Newer versions of this command implies that the fw uses
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index fd7d4abfb454..de0c545d50fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -481,7 +481,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
eth_broadcast_addr(cmd->bssid_addr);
rcu_read_lock();
- chanctx = rcu_dereference(vif->chanctx_conf);
+ chanctx = rcu_dereference(vif->bss_conf.chanctx_conf);
iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
: NL80211_BAND_2GHZ,
&cck_ack_rates, &ofdm_ack_rates);
@@ -552,6 +552,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+ /*
+ * We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+
if (vif->p2p) {
struct ieee80211_p2p_noa_attr *noa =
&vif->bss_conf.p2p_noa_attr;
@@ -564,10 +570,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
}
/* We need the dtim_period to set the MAC as associated */
- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+ if (vif->cfg.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u8 ap_sta_id = mvmvif->ap_sta_id;
u32 dtim_offs;
/*
@@ -609,29 +614,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO))
ctxt_sta->data_policy |=
cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE);
-
- /*
- * allow multicast data frames only as long as the station is
- * authorized, i.e., GTK keys are already installed (if needed)
- */
- if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
- struct ieee80211_sta *sta;
-
- rcu_read_lock();
-
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
- if (!IS_ERR_OR_NULL(sta)) {
- struct iwl_mvm_sta *mvmsta =
- iwl_mvm_sta_from_mac80211(sta);
-
- if (mvmsta->sta_state ==
- IEEE80211_STA_AUTHORIZED)
- cmd.filter_flags |=
- cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
- }
-
- rcu_read_unlock();
- }
} else {
ctxt_sta->is_assoc = cpu_to_le32(0);
@@ -646,9 +628,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
vif->bss_conf.dtim_period);
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
- ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->cfg.aid);
- if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
@@ -821,10 +803,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
- bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
- LONG_GROUP,
- BEACON_TEMPLATE_CMD,
- 0) > 10;
+ bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
if (rate_idx <= IWL_FIRST_CCK_RATE)
flags |= is_new_rate ? IWL_MAC_BEACON_CCK
@@ -955,19 +934,18 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
/* Enable FILS on PSC channels only */
rcu_read_lock();
- ctx = rcu_dereference(vif->chanctx_conf);
+ ctx = rcu_dereference(vif->bss_conf.chanctx_conf);
channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
!IWL_MVM_DISABLE_AP_FILS) {
- flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- BEACON_TEMPLATE_CMD,
+ flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
IWL_MAC_BEACON_FILS_V1;
beacon_cmd.short_ssid =
- cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid,
- vif->bss_conf.ssid_len));
+ cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+ vif->cfg.ssid_len));
}
rcu_read_unlock();
@@ -1024,7 +1002,7 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC);
- beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0);
if (!beacon)
return -ENOMEM;
@@ -1053,7 +1031,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
{
struct iwl_mvm_mac_ap_iterator_data *data = _data;
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
/* Station client has higher priority over P2P client*/
@@ -1121,7 +1099,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
- u32 rand = (prandom_u32() % (64 - 36)) + 36;
+ u32 rand = prandom_u32_max(64 - 36) + 36;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
@@ -1357,7 +1335,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
- if (unlikely(csa_vif && csa_vif->csa_active))
+ if (unlikely(csa_vif && csa_vif->bss_conf.csa_active))
iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
(status == TX_STATUS_SUCCESS));
@@ -1458,8 +1436,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct sk_buff *skb;
u8 *data;
u32 size = le32_to_cpu(sb->byte_count);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- STORED_BEACON_NTF, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
+ 0);
if (size == 0)
return;
@@ -1579,7 +1558,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
switch (vif->type) {
case NL80211_IFTYPE_AP:
csa_vif = rcu_dereference(mvm->csa_vif);
- if (WARN_ON(!csa_vif || !csa_vif->csa_active ||
+ if (WARN_ON(!csa_vif || !csa_vif->bss_conf.csa_active ||
csa_vif != vif))
goto out_unlock;
@@ -1602,6 +1581,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
+ /*
+ * if we don't know about an ongoing channel switch,
+ * make sure FW cancels it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0) && !vif->bss_conf.csa_active) {
+ IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
+ iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
+ break;
+ }
+
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
@@ -1615,6 +1606,31 @@ out_unlock:
rcu_read_unlock();
}
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(notif->mac_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif) {
+ rcu_read_unlock();
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
+ id, csa_err_mask);
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif, true);
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 65f4fe3ef504..8464c9b7baf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
@@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
- [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
@@ -448,28 +374,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /* currently FW API supports only one optional cipher scheme */
- if (mvm->fw->cs[0].cipher) {
- const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
- struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
-
- mvm->hw->n_cipher_schemes = 1;
-
- cs->cipher = le32_to_cpu(fwcs->cipher);
- cs->iftype = BIT(NL80211_IFTYPE_STATION);
- cs->hdr_len = fwcs->hdr_len;
- cs->pn_len = fwcs->pn_len;
- cs->pn_off = fwcs->pn_off;
- cs->key_idx_off = fwcs->key_idx_off;
- cs->key_idx_mask = fwcs->key_idx_mask;
- cs->key_idx_shift = fwcs->key_idx_shift;
- cs->mic_len = fwcs->mic_len;
-
- mvm->hw->cipher_schemes = mvm->cs;
- mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
- hw->wiphy->n_cipher_suites++;
- }
-
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
wiphy_ext_feature_set(hw->wiphy,
@@ -627,8 +531,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- WOWLAN_KEK_KCK_MATERIAL,
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN) == 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
@@ -641,9 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC, 0);
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
@@ -693,11 +594,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -1080,7 +976,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
- mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
mvm->monitor_on = false;
@@ -1233,7 +1128,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@@ -1325,6 +1220,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
@@ -1332,14 +1228,15 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
.common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 6)
+ if (cmd_ver == 7)
+ len = sizeof(cmd.v7);
+ else if (cmd_ver == 6)
len = sizeof(cmd.v6);
else if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER))
@@ -1353,7 +1250,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -1414,6 +1311,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
+ /*
+ * In the new flow since FW is in charge of the timing,
+ * if driver has canceled the channel switch he will receive the
+ * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
@@ -1473,10 +1379,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
- /* Counting number of interfaces is needed for legacy PM */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count++;
-
/*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
@@ -1493,7 +1395,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
- goto out_release;
+ goto out_unlock;
}
/*
@@ -1504,7 +1406,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
0, vif->type,
IWL_STA_MULTICAST);
if (ret)
- goto out_release;
+ goto out_unlock;
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
@@ -1514,7 +1416,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
- goto out_release;
+ goto out_unlock;
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
@@ -1591,9 +1493,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
- out_release:
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count--;
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -1675,9 +1574,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = NULL;
}
- if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count--;
-
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1718,7 +1614,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
return;
if (vif->type != NL80211_IFTYPE_STATION ||
- !vif->bss_conf.assoc)
+ !vif->cfg.assoc)
return;
cmd->port_id = data->port_id++;
@@ -1844,7 +1740,7 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
return;
/* Supported only for p2p client interfaces */
- if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc ||
!vif->p2p)
return;
@@ -1853,162 +1749,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -2028,7 +1768,7 @@ static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- if (vif->mu_mimo_owner) {
+ if (vif->bss_conf.mu_mimo_owner) {
struct iwl_mu_group_mgmt_notif *notif = _data;
/*
@@ -2036,7 +1776,7 @@ static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
* the data received from firmware as if it came from the
* action frame, so no conversion is needed.
*/
- ieee80211_update_mu_groups(vif,
+ ieee80211_update_mu_groups(vif, 0,
(u8 *)&notif->membership_status,
(u8 *)&notif->user_position);
}
@@ -2080,11 +1820,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
return res;
}
+static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
+{
+ int i;
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &sta->deflink.he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
+}
+
+static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding,
+ u32 *flags)
+{
+ int low_th = -1;
+ int high_th = -1;
+ int i;
+
+ switch (nominal_padding) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ /* Set the PPE thresholds accordingly */
+ if (low_th >= 0 && high_th >= 0) {
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ *flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+}
+
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
.bss_color = vif->bss_conf.he_bss_color.color,
@@ -2092,20 +1929,43 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
- int size = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_MBSSID_HE) ?
- sizeof(sta_ctxt_cmd) :
- sizeof(struct iwl_he_sta_context_cmd_v1);
+ struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
+ int size;
struct ieee80211_sta *sta;
u32 flags;
int i;
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
+ void *cmd;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
+ ver = 1;
+
+ switch (ver) {
+ case 1:
+ /* same layout as v2 except some data at the end */
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v1);
+ break;
+ case 2:
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v2);
+ break;
+ case 3:
+ cmd = &sta_ctxt_cmd;
+ size = sizeof(struct iwl_he_sta_context_cmd_v3);
+ break;
+ default:
+ IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
+ return;
+ }
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return;
@@ -2122,7 +1982,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
return;
}
- if (!sta->he_cap.has_he) {
+ if (!sta->deflink.he_cap.has_he) {
rcu_read_unlock();
return;
}
@@ -2134,17 +1994,17 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
flags |= STA_CTXT_HE_RU_2MHZ_BLOCK;
/* HTC flags */
- if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_HTC_HE)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
- if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
- (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
u8 link_adap =
- ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
- (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
if (link_adap == 2)
@@ -2154,116 +2014,44 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
}
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
- if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] &
IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
- if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
/*
* Initialize the PPE thresholds to "None" (7), as described in Table
* 9-262ac of 80211.ax/D3.0.
*/
- memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
+ sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
- if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- u8 nss = (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_NSS_MASK) + 1;
- u8 ru_index_bitmap =
- (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
- u8 *ppe = &sta->he_cap.ppe_thres[0];
- u8 ppe_pos_bit = 7; /* Starting after PPE header */
-
- /*
- * FW currently supports only nss == MAX_HE_SUPP_NSS
- *
- * If nss > MAX: we can ignore values we don't support
- * If nss < MAX: we can set zeros in other streams
- */
- if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
- nss = MAX_HE_SUPP_NSS;
- }
-
- for (i = 0; i < nss; i++) {
- u8 ru_index_tmp = ru_index_bitmap << 1;
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
- ru_index_tmp >>= 1;
- if (!(ru_index_tmp & 1))
- continue;
-
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- }
- }
-
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
+ &sta_ctxt_cmd.pkt_ext);
flags |= STA_CTXT_HE_PACKET_EXT;
- } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
- != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
- int low_th = -1;
- int high_th = -1;
-
- /* Take the PPE thresholds from the nominal padding info */
- switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
- low_th = IWL_HE_PKT_EXT_BPSK;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_BPSK;
- break;
- }
-
- /* Set the PPE thresholds accordingly */
- if (low_th >= 0 && high_th >= 0) {
- struct iwl_he_pkt_ext *pkt_ext =
- (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
-
- for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
- bw++) {
- pkt_ext->pkt_ext_qam_th[i][bw][0] =
- low_th;
- pkt_ext->pkt_ext_qam_th[i][bw][1] =
- high_th;
- }
- }
-
- flags |= STA_CTXT_HE_PACKET_EXT;
- }
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding fiels. */
+ } else {
+ u8 nominal_padding =
+ u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
+ nominal_padding,
+ &flags);
}
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP)
flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN)
flags |= STA_CTXT_HE_ACK_ENABLED;
@@ -2319,9 +2107,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.flags = cpu_to_le32(flags);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
- DATA_PATH_GROUP, 0),
- 0, size, &sta_ctxt_cmd))
+ if (ver < 3) {
+ /* fields before pkt_ext */
+ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
+ offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
+ offsetof(typeof(sta_ctxt_cmd), pkt_ext));
+
+ /* pkt_ext */
+ for (i = 0;
+ i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
+ i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
+ bw++) {
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
+ sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
+
+ memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
+ &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
+ sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
+ }
+ }
+
+ /* fields after pkt_ext */
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
+ sizeof(sta_ctxt_cmd_v2) -
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy((u8 *)&sta_ctxt_cmd_v2 +
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
+ (u8 *)&sta_ctxt_cmd +
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
+ sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
+ sta_ctxt_cmd_v2.reserved3 = 0;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@@ -2356,7 +2181,7 @@ static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm,
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -2366,7 +2191,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* on the beacon interval, which was not known when the station
* interface was added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
@@ -2376,7 +2201,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
- bss_conf->assoc && vif->bss_conf.he_support &&
+ vif->cfg.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
@@ -2395,10 +2220,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* after sending it once, adopt mac80211 data */
memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
- mvmvif->associated = bss_conf->assoc;
+ mvmvif->associated = vif->cfg.assoc;
if (changes & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
memset(&mvmvif->beacon_stats, 0,
@@ -2512,7 +2337,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* However, on HW restart we should restore this data.
*/
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
+ (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) {
ret = iwl_mvm_update_mu_groups(mvm, vif);
if (ret)
IWL_ERR(mvm,
@@ -2520,7 +2345,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2537,11 +2361,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
- * A firmware with the new API will remove it automatically.
*/
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
- iwl_mvm_stop_session_protection(mvm, vif);
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -2570,17 +2391,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2706,8 +2523,22 @@ out_unlock:
return ret;
}
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, link_conf);
+}
+
+static int iwl_mvm_start_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2770,11 +2601,24 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, link_conf);
+}
+
+static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -2805,13 +2649,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
mutex_lock(&mvm->mutex);
- if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+ if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
@@ -3190,7 +3034,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
he->he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
@@ -3204,7 +3048,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
#if IS_ENABLED(CONFIG_IWLMEI)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mei_conn_info conn_info = {
- .ssid_len = vif->bss_conf.ssid_len,
+ .ssid_len = vif->cfg.ssid_len,
.channel = vif->bss_conf.chandef.chan->hw_value,
};
@@ -3252,7 +3096,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
}
- memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len);
+ memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
/* TODO: add support for collocated AP data */
@@ -3330,7 +3174,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION)
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
if (sta->tdls &&
(vif->p2p ||
@@ -3349,7 +3193,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_SETUP);
}
- sta->max_rc_amsdu_len = 1;
+ sta->deflink.agg.max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@@ -3362,17 +3206,17 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
if (vif->type == NL80211_IFTYPE_AP) {
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
- vif->bss_conf.he_support = sta->he_cap.has_he;
+ vif->bss_conf.he_support = sta->deflink.he_cap.has_he;
mvmvif->he_ru_2mhz_block = false;
- if (sta->he_cap.has_he)
+ if (sta->deflink.he_cap.has_he)
iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
@@ -3505,7 +3349,8 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
}
static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -3565,7 +3410,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (!vif->bss_conf.idle) {
+ if (!vif->cfg.idle) {
ret = -EBUSY;
goto out;
}
@@ -3654,12 +3499,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
/* support HW crypto on TX */
return 0;
default:
- /* currently FW supports only one optional cipher scheme */
- if (hw->n_cipher_schemes &&
- hw->cipher_schemes->cipher == key->cipher)
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
- else
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
switch (cmd) {
@@ -3936,7 +3776,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
* like the delay to be for 2-3 dtim intervals, in case there are
* other time events with higher priority.
*/
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
/* We cannot remain off-channel longer than the DTIM interval */
if (dtim_interval <= req_dur) {
@@ -4042,8 +3882,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
u32 lmac_id;
lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
@@ -4194,7 +4033,7 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
{
struct iwl_mvm_ftm_responder_iter_data *data = _data;
- if (rcu_access_pointer(vif->chanctx_conf) == data->ctx &&
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx &&
vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
data->responder = true;
}
@@ -4425,6 +4264,7 @@ out:
}
static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -4498,6 +4338,7 @@ out:
static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -4692,7 +4533,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
- !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !vif->cfg.assoc || !vif->bss_conf.dtim_period ||
!tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
return -EINVAL;
@@ -4821,7 +4662,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
csa_vif =
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
- if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+ if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
"Another CSA is already in progress")) {
ret = -EBUSY;
goto out_unlock;
@@ -4847,11 +4688,20 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
/*
+ * In the new flow FW is in charge of timing the switch so there
+ * is no need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0))
+ break;
+
+ /*
* We haven't configured the firmware to be associated yet since
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
*/
- if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) {
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) {
ret = -EBUSY;
goto out_unlock;
}
@@ -4917,6 +4767,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
.cs_mode = chsw->block_tx,
};
+ /*
+ * In the new flow FW is in charge of timing the switch so there is no
+ * need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
return;
@@ -5091,6 +4949,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
{
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
@@ -5161,9 +5020,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
RATE_HT_MCS_INDEX(rate_n_flags) :
u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
- if (format == RATE_MCS_HE_MSK) {
- u32 gi_ltf = u32_get_bits(rate_n_flags,
- RATE_MCS_HE_GI_LTF_MSK);
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_HE_MSK:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -5202,19 +5064,14 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
- return;
- }
-
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
- if (format == RATE_MCS_HT_MSK) {
+ break;
+ case RATE_MCS_HT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
-
- } else if (format == RATE_MCS_VHT_MSK) {
+ break;
+ case RATE_MCS_VHT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
}
-
}
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
@@ -5242,7 +5099,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
mutex_lock(&mvm->mutex);
@@ -5579,10 +5436,10 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
- .start_ap = iwl_mvm_start_ap_ibss,
- .stop_ap = iwl_mvm_stop_ap_ibss,
- .join_ibss = iwl_mvm_start_ap_ibss,
- .leave_ibss = iwl_mvm_stop_ap_ibss,
+ .start_ap = iwl_mvm_start_ap,
+ .stop_ap = iwl_mvm_stop_ap,
+ .join_ibss = iwl_mvm_start_ibss,
+ .leave_ibss = iwl_mvm_stop_ibss,
.tx_last_beacon = iwl_mvm_tx_last_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1dcbb0eb63c3..97cba526e465 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -860,6 +860,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
unsigned int scan_status;
+ size_t scan_cmd_size;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* For CDB this is low band scan type, for non-CDB - type. */
@@ -884,17 +885,6 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
@@ -945,7 +935,6 @@ struct iwl_mvm {
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
u8 fw_key_deleted[STA_KEY_MAX_NUM];
- u8 vif_count;
struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
/* -1 for always, 0 for never, >0 for that many times */
@@ -1075,7 +1064,6 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
- struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct cfg80211_ftm_responder_stats ftm_resp_stats;
struct {
@@ -1092,12 +1080,10 @@ struct iwl_mvm {
struct list_head resp_pasn_list;
struct {
- u8 d0i3_resp;
u8 range_resp;
} cmd_ver;
struct ieee80211_vif *nan_vif;
-#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
@@ -1117,6 +1103,8 @@ struct iwl_mvm {
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
+
+ bool sta_remove_requires_queue_remove;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1593,8 +1581,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -1684,6 +1670,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1717,7 +1705,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_size(struct iwl_mvm *mvm);
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
@@ -1945,10 +1933,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
void iwl_mvm_stop_device(struct iwl_mvm *mvm);
-/* Re-configure the SCD for a queue that has already been configured */
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn);
-
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
@@ -2098,6 +2082,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
@@ -2172,8 +2158,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
- u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
@@ -2225,7 +2210,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index 41880517e8bb..a8bd0f5f795c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- PROT_OFFLOAD_CONFIG_CMD, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -193,9 +192,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
size = sizeof(cmd.v1);
}
- if (vif->bss_conf.arp_addr_cnt) {
+ if (vif->cfg.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
- common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 87630d38dc52..d2d42cd48af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -24,6 +24,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw/api/scan.h"
+#include "fw/api/rfi.h"
#include "time-event.h"
#include "fw-api.h"
#include "fw/acpi.h"
@@ -32,6 +33,7 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
static const struct iwl_op_mode_ops iwl_mvm_ops;
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
@@ -160,7 +162,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
/* this shouldn't happen *again*, ignore it */
@@ -191,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
WARN_ON(!he->has_he);
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
@@ -235,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
*/
mvm->fw_static_smps_request =
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
- ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ ieee80211_iterate_interfaces(mvm->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
iwl_mvm_intf_dual_chain_req, NULL);
}
@@ -382,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
iwl_mvm_channel_switch_start_notif,
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ iwl_mvm_channel_switch_error_notif,
+ RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_channel_switch_error_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -390,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_thermal_dual_chain_req,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_thermal_dual_chain_request),
+
+ RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
+ iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_rfi_deactivate_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -443,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
- HCMD_NAME(DC2DC_CONFIG_CMD),
HCMD_NAME(NVM_ACCESS_CMD),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
@@ -469,7 +479,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
@@ -500,6 +509,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+ HCMD_NAME(RFI_DEACTIVATE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@@ -536,6 +546,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -546,6 +557,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
@@ -563,6 +581,9 @@ static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -582,6 +603,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
@@ -634,13 +656,11 @@ unlock:
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_fwrt_dump_start(void *ctx)
+static void iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
mutex_lock(&mvm->mutex);
-
- return 0;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
@@ -1056,7 +1076,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
- int scan_size;
+ size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
@@ -1077,12 +1097,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
op_mode = hw->priv;
@@ -1179,13 +1199,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
- mvm->cmd_ver.d0i3_resp =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
- 0);
- /* we only support version 1 */
- if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
- goto out_free;
-
mvm->cmd_ver.range_resp =
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
@@ -1245,6 +1258,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+ trans_cfg.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ mvm->sta_remove_requires_queue_remove =
+ trans_cfg.queue_alloc_cmd_ver > 0;
+
/* Configure transport layer */
iwl_trans_configure(mvm->trans, &trans_cfg);
@@ -1282,6 +1303,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
goto out_free;
+ mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 9af40b0fa37a..a3cefbc43e80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -158,8 +158,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* we only support RLC command version 2 */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
@@ -172,8 +171,7 @@ static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
.phy_id = cpu_to_le32(ctxt->id),
};
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
return 0;
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
@@ -209,8 +207,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
u32 action)
{
int ret;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONTEXT_CMD, 1);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
if (ver == 3 || ver == 4) {
struct iwl_phy_context_cmd cmd = {};
@@ -301,8 +298,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) >= 2 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
ctxt->channel == chandef->chan &&
ctxt->width == chandef->width &&
ctxt->center_freq1 == chandef->center_freq1)
@@ -349,18 +345,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* otherwise we might not be able to reuse this phy.
*/
if (ctxt->ref == 0) {
- struct ieee80211_channel *chan;
+ struct ieee80211_channel *chan = NULL;
struct cfg80211_chan_def chandef;
- struct ieee80211_supported_band *sband = NULL;
- enum nl80211_band band = NL80211_BAND_2GHZ;
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+ int channel;
- while (!sband && band < NUM_NL80211_BANDS)
- sband = mvm->hw->wiphy->bands[band++];
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = mvm->hw->wiphy->bands[band];
- if (WARN_ON(!sband))
- return;
+ if (!sband)
+ continue;
+
+ for (channel = 0; channel < sband->n_channels; channel++)
+ if (!(sband->channels[channel].flags &
+ IEEE80211_CHAN_DISABLED)) {
+ chan = &sband->channels[channel];
+ break;
+ }
- chan = &sband->channels[0];
+ if (chan)
+ break;
+ }
+
+ if (WARN_ON(!chan))
+ return;
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index b2ea2fca5376..f5744162d0d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -223,7 +223,7 @@ static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
*is_p2p_standalone = false;
break;
case NL80211_IFTYPE_STATION:
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
*is_p2p_standalone = false;
break;
@@ -283,7 +283,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
bool radar_detect = false;
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
WARN_ON(!chanctx_conf);
if (chanctx_conf) {
chan = chanctx_conf->def.chan;
@@ -359,7 +359,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
+ if (!vif->cfg.ps || !mvmvif->pm_enabled)
return;
if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
@@ -563,6 +563,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
struct iwl_power_vifs *power_iterator = _data;
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+ if (!mvmvif->uploaded)
+ return;
+
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
@@ -887,7 +890,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
mvm->ps_disabled ||
- !vif->bss_conf.ps ||
+ !vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 3d0166df2002..cea1a34f9130 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -47,7 +47,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
break;
return;
case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index f054ce76bed5..bb77bc9aa821 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -125,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
return ERR_PTR(-EIO);
- resp = kzalloc(resp_size, GFP_KERNEL);
+ resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
if (!resp)
return ERR_PTR(-ENOMEM);
- memcpy(resp, cmd.resp_pkt->data, resp_size);
-
iwl_free_resp(&cmd);
return resp;
}
+
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
+
+ IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 66808c55aa0e..2e9081cb6627 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -11,7 +11,7 @@
static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
{
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
@@ -38,9 +38,9 @@ static u8 rs_fw_set_active_chains(u8 chains)
static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
u8 supp = 0;
if (he_cap->has_he)
@@ -62,9 +62,9 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
bool vht_ena = vht_cap->vht_supported;
u16 flags = 0;
@@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ sband->iftype_data &&
+ sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
return flags;
@@ -133,14 +136,14 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
{
u16 supp;
int i, highest_mcs;
- u8 max_nss = sta->rx_nss;
+ u8 max_nss = sta->deflink.rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
.vht_cap_info = cpu_to_le32(vht_cap->cap),
.supp_mcs = vht_cap->vht_mcs,
};
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
max_nss = 1;
for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -151,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
continue;
supp = BIT(highest_mcs + 1) - 1;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
@@ -160,7 +163,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
*/
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160 &&
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
@@ -191,7 +194,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
{
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
u16 tx_mcs_80 =
@@ -199,10 +202,10 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
u16 tx_mcs_160 =
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
int i;
- u8 nss = sta->rx_nss;
+ u8 nss = sta->deflink.rx_nss;
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
nss = 1;
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -242,12 +245,12 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
int i;
u16 supp = 0;
unsigned long tmp; /* must be unsigned long for for_each_set_bit */
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
/* non HT rates */
- tmp = sta->supp_rates[sband->band];
+ tmp = sta->deflink.supp_rates[sband->band];
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);
@@ -267,7 +270,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
@@ -337,9 +340,9 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (sta->max_amsdu_len < size) {
+ if (sta->deflink.agg.max_amsdu_len < size) {
/*
- * In debug sta->max_amsdu_len < size
+ * In debug sta->deflink.agg.max_amsdu_len < size
* so also check with orig_amsdu_len which holds the
* original data before debugfs changed the value
*/
@@ -349,18 +352,18 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled & BIT(i))
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
IWL_DEBUG_RATE(mvm,
@@ -375,11 +378,11 @@ out:
u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
- switch (le16_get_bits(sta->he_6ghz_capa.capa,
+ switch (le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
return IEEE80211_MAX_MPDU_LEN_VHT_11454;
@@ -420,7 +423,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hw *hw = mvm->hw;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
- u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
@@ -447,10 +450,24 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->max_amsdu_len = max_amsdu_len;
-
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0);
+ sta->deflink.agg.max_amsdu_len = max_amsdu_len;
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD),
+ 0);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
+ cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
+ cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
+ cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
+ cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
+ cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
if (cmd_ver == 4) {
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
sizeof(cfg_cmd), &cfg_cmd);
@@ -474,8 +491,9 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD), 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index f4d02f9fe16d..0b50b816684a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -135,10 +135,10 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
return false;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return false;
if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
@@ -157,7 +157,7 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- if (!sta->ht_cap.ht_supported)
+ if (!sta->deflink.ht_cap.ht_supported)
return false;
return true;
@@ -167,8 +167,8 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
if (is_ht20(rate) && (ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
@@ -454,8 +454,6 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
};
-#define MCS_INDEX_PER_STREAM (8)
-
static const char *rs_pretty_lq_type(enum iwl_table_type type)
{
static const char * const lq_types[] = {
@@ -1371,13 +1369,13 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->deflink.vht_cap;
struct ieee80211_vht_cap vht_cap = {
.vht_cap_info = cpu_to_le32(sta_vht_cap->cap),
.supp_mcs = sta_vht_cap->vht_mcs,
};
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
/*
* Don't use 160 MHz if VHT extended NSS support
@@ -1390,7 +1388,7 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
if (ieee80211_get_vht_max_nss(&vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true,
- sta->rx_nss) < sta->rx_nss)
+ sta->deflink.rx_nss) < sta->deflink.rx_nss)
return RATE_MCS_CHAN_WIDTH_80;
return RATE_MCS_CHAN_WIDTH_160;
case IEEE80211_STA_RX_BW_80:
@@ -1493,7 +1491,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
- sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+ sta->deflink.agg.max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@@ -1508,22 +1506,23 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvmsta->vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->deflink.agg.max_amsdu_len;
else
- mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500);
+ mvmsta->max_amsdu_len =
+ min_t(int, sta->deflink.agg.max_amsdu_len, 8500);
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled)
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
}
@@ -1863,7 +1862,7 @@ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int index = rate->index;
bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
- !vif->bss_conf.ps);
+ !vif->cfg.ps);
IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
cam, sta_ps_disabled);
@@ -1982,7 +1981,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
#endif
rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON(!chanctx_conf))
band = NUM_NL80211_BANDS;
else
@@ -2539,7 +2538,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
* In case of VHT/HT when the rssi is low fallback to the case of
* legacy rates.
*/
- if (sta->vht_cap.vht_supported &&
+ if (sta->deflink.vht_cap.vht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
/*
* In AP mode, when a new station associates, rs is initialized
@@ -2565,14 +2564,15 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
break;
default:
- IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
+ IWL_ERR(mvm, "Invalid BW %d\n",
+ sta->deflink.bandwidth);
goto out;
}
active_rate = lq_sta->active_siso_rate;
rate->type = LQ_VHT_SISO;
rate->bw = bw;
- } else if (sta->ht_cap.ht_supported &&
+ } else if (sta->deflink.ht_cap.ht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
initial_rates = rs_optimal_rates_ht;
nentries = ARRAY_SIZE(rs_optimal_rates_ht);
@@ -2763,14 +2763,14 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
- sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
continue;
lq_sta->active_siso_rate |= BIT(i);
}
}
- if (sta->rx_nss < 2)
+ if (sta->deflink.rx_nss < 2)
return;
highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
@@ -2781,7 +2781,7 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
- sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
continue;
lq_sta->active_mimo2_rate |= BIT(i);
@@ -2918,8 +2918,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
struct ieee80211_supported_band *sband;
@@ -2934,7 +2934,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = mvmsta->sta_id;
mvmsta->amsdu_enabled = 0;
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->cur->max_amsdu_len;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
@@ -2955,7 +2955,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/*
* active legacy rates as per supported rates bitmap
*/
- supp = sta->supp_rates[sband->band];
+ supp = sta->deflink.supp_rates[sband->band];
lq_sta->active_legacy_rate = 0;
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
@@ -3248,7 +3248,7 @@ static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[info->band])
+ if (sta->deflink.supp_rates[info->band])
rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 64446a11ef98..49ca1e168fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen = len - hdrlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
@@ -327,17 +327,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
/*
- * drop the packet if it has failed being decrypted by HW
- */
- if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
- &crypt_len)) {
- IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
- rx_pkt_status);
- kfree_skb(skb);
- return;
- }
-
- /*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
*/
@@ -388,6 +377,37 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvmsta->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * Don't even try to decrypt a MCAST frame that was received
+ * before the managed vif is authorized, we'd fail anyway.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !mvmvif->authorized &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif);
struct iwl_fw_dbg_trigger_tlv *trig;
@@ -640,7 +660,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
- if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+ if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
return;
if (vif->type != NL80211_IFTYPE_STATION)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 295629c5c035..1aadccd8841f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -209,13 +209,16 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
+ else
+ /* mac80211 assumes full CSUM including SNAP header */
+ skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
fraglen = len - headlen;
if (fraglen) {
- int offset = (void *)hdr + headlen + pad_len -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
@@ -1188,16 +1191,22 @@ struct iwl_mvm_rx_phy_data {
enum iwl_rx_phy_info_type info_type;
__le32 d0, d1, d2, d3;
__le16 d4;
+
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
};
static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he_mu *he_mu)
{
u32 phy_data2 = le32_to_cpu(phy_data->d2);
u32 phy_data3 = le32_to_cpu(phy_data->d3);
u16 phy_data4 = le16_to_cpu(phy_data->d4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
he_mu->flags1 |=
@@ -1243,7 +1252,6 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
static void
iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status)
@@ -1257,6 +1265,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
u8 offs = 0;
@@ -1328,7 +1337,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status,
- u32 rate_n_flags, int queue)
+ int queue)
{
switch (phy_data->info_type) {
case IWL_RX_PHY_INFO_TYPE_NONE:
@@ -1427,7 +1436,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
le16_encode_bits(le16_get_bits(phy_data->d4,
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
- iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu);
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_MU:
he_mu->flags2 |=
@@ -1441,8 +1450,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_TB:
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
- iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags,
- he, he_mu, rx_status);
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
break;
case IWL_RX_PHY_INFO_TYPE_HE_SU:
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
@@ -1458,13 +1466,14 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags, u16 phy_info, int queue)
+ int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
- u8 stbc, ltf;
+ u8 ltf;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
@@ -1481,6 +1490,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
+ u16 phy_info = phy_data->phy_info;
he = skb_put_data(skb, &known, sizeof(known));
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
@@ -1501,7 +1511,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
- rate_n_flags, queue);
+ queue);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
@@ -1528,19 +1538,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_HE;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
-
- rx_status->he_dcm =
- !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
-
#define CHECK_TYPE(F) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
@@ -1658,6 +1655,107 @@ static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
rx_sta_csa->all_sta_unblocked = false;
}
+/*
+ * Note: requires also rx_status->band to be prefilled, as well
+ * as phy_data (apart from phy_data->info_type)
+ */
+static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ bool is_sgi;
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ phy_data->info_type =
+ le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_HE_MSK)
+ iwl_mvm_rx_he(mvm, skb, phy_data, queue);
+
+ iwl_mvm_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
+ phy_data->energy_a, phy_data->energy_b);
+
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
+ is_sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
+ rx_status->encoding = RX_ENC_VHT;
+ break;
+ case RATE_MCS_HE_MSK:
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_VHT_MSK:
+ case RATE_MCS_HE_MSK:
+ rx_status->nss =
+ u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ rx_status->rate_idx = rate;
+
+ if (WARN_ONCE(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band))
+ rx_status->rate_idx = 0;
+ break;
+ }
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1667,17 +1765,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_hdr *hdr;
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
- u32 rate_n_flags, gp2_on_air_rise;
- u16 phy_info;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0, channel, energy_a, energy_b;
+ u8 crypt_len = 0;
size_t desc_size;
- struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
- };
+ struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
- bool is_sgi;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1693,35 +1786,37 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
- channel = desc->v3.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
- energy_a = desc->v3.energy_a;
- energy_b = desc->v3.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data.channel = desc->v3.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data.energy_a = desc->v3.energy_a;
+ phy_data.energy_b = desc->v3.energy_b;
phy_data.d0 = desc->v3.phy_data0;
phy_data.d1 = desc->v3.phy_data1;
phy_data.d2 = desc->v3.phy_data2;
phy_data.d3 = desc->v3.phy_data3;
} else {
- rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
- channel = desc->v1.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
- energy_a = desc->v1.energy_a;
- energy_b = desc->v1.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ phy_data.channel = desc->v1.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ phy_data.energy_a = desc->v1.energy_a;
+ phy_data.energy_b = desc->v1.energy_b;
phy_data.d0 = desc->v1.phy_data0;
phy_data.d1 = desc->v1.phy_data1;
phy_data.d2 = desc->v1.phy_data2;
phy_data.d3 = desc->v1.phy_data3;
}
+
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
REPLY_RX_MPDU_CMD, 0) < 4) {
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -1730,14 +1825,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
- phy_info = le16_to_cpu(desc->phy_info);
+ phy_data.phy_info = le16_to_cpu(desc->phy_info);
phy_data.d4 = desc->phy_data4;
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
- phy_data.info_type =
- le32_get_bits(phy_data.d1,
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
@@ -1760,27 +1850,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
/*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
@@ -1791,12 +1860,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
le32_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
+
/* set the preamble flag if appropriate */
if (format == RATE_MCS_CCK_MSK &&
- phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
@@ -1810,24 +1880,20 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
}
- rx_status->device_timestamp = gp2_on_air_rise;
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
} else {
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
}
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
/* update aggregation data for monitor sake on default queue */
- if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit;
+ toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
/*
* Toggle is switched whenever new aggregation starts. Make
@@ -1843,9 +1909,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->ampdu_reference = mvm->ampdu_ref;
}
- if (unlikely(mvm->monitor_on))
- iwl_mvm_add_rtap_sniffer_config(mvm, skb);
-
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@@ -1864,13 +1927,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
}
- if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
+ if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
kfree_skb(skb);
goto out;
}
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
+
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
@@ -1968,43 +2033,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(format == RATE_MCS_HE_MSK)) {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
- }
-
/* management stuff on default queue */
if (!queue) {
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
@@ -2036,32 +2064,32 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data *desc = (void *)pkt->data;
- u32 rate_n_flags = le32_to_cpu(desc->rate);
- u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
u32 rssi = le32_to_cpu(desc->rssi);
u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
- u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 channel, energy_a, energy_b;
- u32 format;
struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = le32_get_bits(desc->phy_info[1],
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
.d1 = desc->phy_info[1],
+ .phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD,
+ .gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time),
+ .rate_n_flags = le32_to_cpu(desc->rate),
+ .energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK),
+ .energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK),
+ .channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK),
};
- bool is_sgi;
+ u32 format;
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
RX_NO_DATA_NOTIF, 0) < 2) {
IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
- rate_n_flags);
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
return;
@@ -2069,10 +2097,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
- energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
- channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
-
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -2103,86 +2127,31 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
- rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
- rcu_read_lock();
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- /*
- * take the nss from the rx_vec since the rate_n_flags has
- * only 2 bits for the nss which gives a max of 4 ss but
- * there may be up to 8 spatial streams
- */
+ /*
+ * Override the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
- } else if (format == RATE_MCS_HE_MSK) {
+ break;
+ case RATE_MCS_HE_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
- } else {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
+ break;
}
+ rcu_read_lock();
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
-out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 5f92a09db374..acd8803dbcdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -20,7 +20,6 @@
#define IWL_SCAN_DWELL_FRAGMENTED 44
#define IWL_SCAN_DWELL_EXTENDED 90
#define IWL_SCAN_NUM_OF_FRAGS 3
-#define IWL_SCAN_LAST_2_4_CHN 14
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
@@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
u32 n_6ghz_params;
bool scan_6ghz;
bool enable_6ghz_passive;
+ bool respect_p2p_go, respect_p2p_go_hb;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
-static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int *global_cnt = data;
-
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
- mvmvif->phy_ctxt->id < NUM_PHY_CTX)
- *global_cnt += 1;
-}
-
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
return mvm->tcm.result.global_load;
@@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
-struct iwl_is_dcm_with_go_iterator_data {
+struct iwl_mvm_scan_iter_data {
+ u32 global_cnt;
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
-static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_is_dcm_with_go_iterator_data *data = _data;
- struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif *curr_mvmvif =
- iwl_mvm_vif_from_mac80211(data->current_vif);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_scan_iter_data *data = _data;
+ struct iwl_mvm_vif *curr_mvmvif;
- /* exclude the given vif */
- if (vif == data->current_vif)
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ data->global_cnt += 1;
+
+ if (!data->current_vif || vif == data->current_vif)
return;
+ curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
- other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
- other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
@@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
- int global_cnt = 0;
+ struct iwl_mvm_scan_iter_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ .global_cnt = 0,
+ };
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_scan_condition_iterator,
- &global_cnt);
- if (!global_cnt)
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_iterator,
+ &data);
+
+ if (!data.global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
- /* in case of DCM with GO where BSS DTIM interval < 220msec
+ /*
+ * in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
- * */
+ */
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- vif->bss_conf.dtim_period < 220) {
- struct iwl_is_dcm_with_go_iterator_data data = {
- .current_vif = vif,
- .is_dcm_with_p2p_go = false,
- };
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_is_dcm_with_go_iterator,
- &data);
- if (data.is_dcm_with_p2p_go)
- return IWL_SCAN_TYPE_FAST_BALANCE;
- }
+ vif->bss_conf.dtim_period < 220 &&
+ data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
@@ -651,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
NL80211_BAND_2GHZ,
no_cck);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
@@ -1090,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1142,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1156,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
void *cfg;
int ret, cmd_size;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
};
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
@@ -1247,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config cfg;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
.len[0] = sizeof(cfg),
.data[0] = &cfg,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -1258,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- SCAN_CFG_CMD, 0) < 5) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
/*
* Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
* version 5.
@@ -1662,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
}
}
-static int
+static void
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct iwl_scan_probe_params_v4 *pp)
@@ -1731,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
pp->short_ssid_num = idex_s;
pp->bssid_num = idex_b;
- return 0;
}
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
-static void
-iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
+static u32
+iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
u32 n_channels,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_channel_params_v6 *cp,
enum nl80211_iftype vif_type)
{
- struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
int i;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
+ u32 ch_cnt;
- for (i = 0; i < params->n_channels; i++) {
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
struct iwl_scan_channel_cfg_umac *cfg =
- &cp->channel_config[i];
+ &cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
bool force_passive, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
+ /*
+ * Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
cfg->v1.channel_num = params->channels[i]->hw_value;
cfg->v2.band = 2;
cfg->v2.iter_count = 1;
@@ -1875,8 +1868,16 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
else
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
- channel_cfg[i].flags |= cpu_to_le32(flags);
+ cfg->flags |= cpu_to_le32(flags);
+ ch_cnt++;
}
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
}
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
@@ -1893,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
/* set fragmented ebs for fragmented scan on HB channels */
- if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type)) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type)))
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ /*
+ * force EBS in case the scan is a fragmented and there is a need to take P2P
+ * GO operation into consideration during scan operation.
+ */
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type) &&
+ params->respect_p2p_go_hb)) {
+ IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
return flags;
}
@@ -1931,14 +1948,14 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
* reset or resume flow, or while not associated and a large interval
* has passed since the last 6GHz passive scan.
*/
- if ((vif->bss_conf.assoc ||
+ if ((vif->cfg.assoc ||
time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
(time_before(mvm->last_reset_or_resume_time_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
jiffies))) {
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
- vif->bss_conf.assoc ? "associated" :
+ vif->cfg.assoc ? "associated" :
"timeout did not expire");
return;
}
@@ -2046,6 +2063,26 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
return flags;
}
+static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif, int type)
+{
+ u8 flags = 0;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
+ if (params->respect_p2p_go_hb)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ } else {
+ if (params->respect_p2p_go)
+ flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ }
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -2164,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
- void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
+ void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
@@ -2248,13 +2285,17 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_general_params_v11 *gp,
- u16 gen_flags)
+ u16 gen_flags, u8 gen_flags2)
{
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
+ IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@@ -2358,7 +2399,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, 0);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2384,6 +2425,7 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
int ret;
u16 gen_flags;
+ u8 gen_flags2;
u32 bitmap_ssid = 0;
mvm->scan_uid_status[uid] = type;
@@ -2392,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+
+ if (version >= 15)
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ else
+ gen_flags2 = 0;
+
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, gen_flags2);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2417,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
- ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
- if (ret)
- return ret;
+ iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+
+ cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
+ params->n_channels,
+ pb, cp, vif->type);
+ if (!cp->count) {
+ mvm->scan_uid_status[uid] = 0;
+ return -EINVAL;
+ }
- iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
- params->n_channels,
- pb, cp, vif->type);
- cp->count = params->n_channels;
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
@@ -2576,7 +2626,7 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
u8 scan_ver;
lockdep_assert_held(&mvm->mutex);
- memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+ memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -2588,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (uid < 0)
return uid;
- hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+ hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
- scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
@@ -2611,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
return uid;
}
+struct iwl_mvm_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+ enum nl80211_band band;
+};
+
+static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
+ (data->band == NUM_NL80211_BANDS ||
+ mvmvif->phy_ctxt->channel->band == data->band))
+ data->p2p_go = true;
+}
+
+static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum nl80211_band band)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ .band = band,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ bool low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
+}
+
+static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ bool low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
+ NUM_NL80211_BANDS);
+}
+
+static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->respect_p2p_go =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_2GHZ);
+ params->respect_p2p_go_hb =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
+ }
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -2662,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_6ghz_params = req->scan_6ghz_params;
params.scan_6ghz = req->scan_6ghz;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
if (req->duration)
params.iter_notif = true;
@@ -2753,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@@ -2922,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
ret = iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SCAN_ABORT_UMAC,
- IWL_ALWAYS_LONG_GROUP, 0),
+ WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@@ -2962,7 +3091,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
-static int iwl_scan_req_umac_get_size(u8 scan_ver)
+static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
case 12:
@@ -2975,11 +3104,10 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
return 0;
}
-int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 655da8856c75..1f4ac1e93cee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -31,7 +31,7 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION) {
data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
data->sta_vif_state = SF_FULL_ON;
else
data->sta_vif_state = SF_INIT_OFF;
@@ -106,10 +106,10 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
* capabilities of the AP station, and choose the watermark accordingly.
*/
if (sta) {
- if (sta->ht_cap.ht_supported ||
- sta->vht_cap.vht_supported ||
- sta->he_cap.has_he) {
- switch (sta->rx_nss) {
+ if (sta->deflink.ht_cap.ht_supported ||
+ sta->deflink.vht_cap.vht_supported ||
+ sta->deflink.he_cap.has_he) {
+ switch (sta->deflink.rx_nss) {
case 1:
watermark = SF_W_MARK_SISO;
break;
@@ -261,7 +261,7 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
return -EINVAL;
if (changed_vif->type != NL80211_IFTYPE_STATION) {
new_state = SF_UNINIT;
- } else if (changed_vif->bss_conf.assoc &&
+ } else if (changed_vif->cfg.assoc &&
changed_vif->bss_conf.dtim_period) {
mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
sta_id = mvmvif->ap_sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index feab0bfcd7a2..cbd8053a9e35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -86,7 +86,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
case IEEE80211_STA_RX_BW_160:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
fallthrough;
@@ -97,13 +98,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
fallthrough;
case IEEE80211_STA_RX_BW_20:
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
add_sta_cmd.station_flags |=
cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
break;
}
- switch (sta->rx_nss) {
+ switch (sta->deflink.rx_nss) {
case 1:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
break;
@@ -115,7 +116,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
@@ -133,12 +134,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
- mpdu_dens = sta->ht_cap.ampdu_density;
+ mpdu_dens = sta->deflink.ht_cap.ampdu_density;
}
if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
@@ -146,18 +147,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
- mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa,
+ mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
- agg_size = le16_get_bits(sta->he_6ghz_capa.capa,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- } else
- if (sta->vht_cap.vht_supported) {
- agg_size = sta->vht_cap.cap &
+ agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ agg_size = sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
agg_size >>=
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- } else if (sta->ht_cap.ht_supported) {
- agg_size = sta->ht_cap.ampdu_factor;
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ agg_size = sta->deflink.ht_cap.ampdu_factor;
}
/* D6.0 10.12.2 A-MPDU length limit rules
@@ -168,8 +168,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* Maximum AMPDU Length Exponent Extension field in its HE
* Capabilities element
*/
- if (sta->he_cap.has_he)
- agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
+ if (sta->deflink.he_cap.has_he)
+ agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
/* Limit to max A-MPDU supported by FW */
@@ -316,7 +316,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
}
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- u16 *queueptr, u8 tid, u8 flags)
+ u16 *queueptr, u8 tid)
{
int queue = *queueptr;
struct iwl_scd_txq_cfg_cmd cmd = {
@@ -325,11 +325,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
};
int ret;
+ lockdep_assert_held(&mvm->mutex);
+
if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (mvm->sta_remove_requires_queue_remove) {
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD);
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.queue = cpu_to_le32(queue),
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+ sizeof(remove_cmd),
+ &remove_cmd);
+ } else {
+ ret = 0;
+ }
+
iwl_trans_txq_free(mvm->trans, queue);
*queueptr = IWL_MVM_INVALID_QUEUE;
- return 0;
+ return ret;
}
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
@@ -367,13 +384,14 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
if (ret)
@@ -461,6 +479,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -512,7 +531,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
- ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
+ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@@ -596,6 +615,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return queue;
}
+/* Re-configure the SCD for a queue that has already been configured */
+static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
+ "Trying to reconfig unallocated queue %d\n", queue))
+ return -ENXIO;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
/*
* If a given queue has a higher AC than the TID stream that is being compared
* to, the queue needs to be redirected to the lower AC. This function does that
@@ -716,21 +768,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
- int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- mvm->trans->cfg->min_256_ba_txq_size);
+ int queue, size;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
+ } else {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* this queue isn't used for traffic (cab_queue) */
+ if (IS_ERR_OR_NULL(sta)) {
+ size = IWL_MGMT_QUEUE_SIZE;
+ } else if (sta->deflink.he_cap.has_he) {
+ /* support for 256 ba size */
+ size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ } else {
+ size = IWL_DEFAULT_QUEUE_SIZE;
+ }
+
+ rcu_read_unlock();
}
- do {
- __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
+ /* take the min with bc tbl entries allowed */
+ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
- queue = iwl_trans_txq_alloc(mvm->trans, enable,
- sta_id, tid, SCD_QUEUE_CFG,
- size, timeout);
+ /* size needs to be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ do {
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
+ tid, size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
@@ -989,7 +1060,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
unsigned long *unshare_queues,
unsigned long *changetid_queues)
{
- int tid;
+ unsigned int tid;
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->mutex);
@@ -1019,12 +1090,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- u16 tid_bitmap;
+ u16 q_tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
/*
* We need to take into account a situation in which a TXQ was
@@ -1037,7 +1108,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Mark this queue in the right bitmap, we'll send the command
* to the firmware later.
*/
- if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
set_bit(queue, changetid_queues);
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1337,7 +1408,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
out_err:
queue_tmp = queue;
- iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
+ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
return ret;
}
@@ -1516,8 +1587,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
- 0) >= 12 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
sta->type == IWL_STA_AUX_ACTIVITY)
cmd.mac_id_n_color = cpu_to_le32(mac_id);
else
@@ -1784,8 +1854,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
- 0);
+ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
@@ -1794,6 +1863,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
}
@@ -1881,7 +1951,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
return ret;
/* unassoc - go ahead - remove the AP STA now */
@@ -1993,7 +2063,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, queue,
- IWL_MAX_TID_COUNT, 0);
+ IWL_MAX_TID_COUNT);
return ret;
}
@@ -2065,7 +2135,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2082,7 +2152,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2199,7 +2269,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
}
queue = *queueptr;
- iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@@ -2434,7 +2504,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
- iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -2443,8 +2513,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
-#define IWL_MAX_RX_BA_SESSIONS 16
-
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
struct iwl_mvm_delba_data notif = {
@@ -2526,18 +2594,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
}
+static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .sta_id = mvm_sta->sta_id,
+ .add_modify = STA_MODE_MODIFY,
+ };
+ u32 status;
+ int ret;
+
+ if (start) {
+ cmd.add_immediate_ba_tid = tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
+ cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
+ } else {
+ cmd.remove_immediate_ba_tid = tid;
+ cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
+ !(status & IWL_ADD_STA_BAID_VALID_MASK)))
+ return -EINVAL;
+ return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ return -ENOSPC;
+ default:
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ return -EIO;
+ }
+}
+
+static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size, int baid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ if (start) {
+ cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = cpu_to_le16(ssn);
+ cmd.alloc.win_size = cpu_to_le16(buf_size);
+ baid = -EIO;
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ cmd.remove_v1.baid = cpu_to_le32(baid);
+ BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ } else {
+ cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.remove.tid = cpu_to_le32(tid);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+ &cmd, &baid);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ /* ignore firmware baid on remove */
+ baid = 0;
+ }
+
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+
+ if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
+ return -EINVAL;
+
+ return baid;
+}
+
+static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn, u16 buf_size,
+ int baid)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
+ return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
+ tid, ssn, buf_size, baid);
+
+ return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
+ tid, ssn, buf_size);
+}
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd cmd = {};
struct iwl_mvm_baid_data *baid_data = NULL;
- int ret;
- u32 status;
+ int ret, baid;
+ u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
+ IWL_MAX_BAID_OLD;
lockdep_assert_held(&mvm->mutex);
- if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
return -ENOSPC;
}
@@ -2583,59 +2759,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
reorder_buf_size / sizeof(baid_data->entries[0]);
}
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.sta_id = mvm_sta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- if (start) {
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16(buf_size);
+ if (iwl_mvm_has_new_rx_api(mvm) && !start) {
+ baid = mvm_sta->tid_to_baid[tid];
} else {
- cmd.remove_immediate_ba_tid = (u8) tid;
+ /* we don't really need it in this case */
+ baid = -1;
}
- cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
- STA_MODIFY_REMOVE_BA_TID;
-
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
- iwl_mvm_add_sta_cmd_size(mvm),
- &cmd, &status);
- if (ret)
- goto out_free;
- switch (status & IWL_ADD_STA_STATUS_MASK) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
- start ? "start" : "stopp");
- break;
- case ADD_STA_IMMEDIATE_BA_FAILURE:
- IWL_WARN(mvm, "RX BA Session refused by fw\n");
- ret = -ENOSPC;
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
- start ? "start" : "stopp", status);
- break;
- }
+ /* Don't send command to remove (start=0) BAID during restart */
+ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
+ baid);
- if (ret)
+ if (baid < 0) {
+ ret = baid;
goto out_free;
+ }
if (start) {
- u8 baid;
-
mvm->rx_ba_sessions++;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
- if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
- ret = -EINVAL;
- goto out_free;
- }
- baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
- IWL_ADD_STA_BAID_SHIFT);
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
@@ -2663,7 +2809,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else {
- u8 baid = mvm_sta->tid_to_baid[tid];
+ baid = mvm_sta->tid_to_baid[tid];
if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
@@ -3238,8 +3384,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
int i, size;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA_KEY,
+ int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
new_api ? 2 : 1);
if (sta_id == IWL_MVM_INVALID_STA)
@@ -3939,7 +4084,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
- if (!WARN_ON(!mvmsta))
+ if (mvmsta)
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
rcu_read_unlock();
@@ -3998,3 +4143,21 @@ out:
iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
+
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id)
+{
+ struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
+ .mac_id = cpu_to_le32(mac_id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
+ CMD_ASYNC,
+ sizeof(cancel_channel_switch_cmd),
+ &cancel_channel_switch_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to cancel the channel switch\n");
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e34b82b2a288..f1a4fc3e4038 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -548,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
u8 *key, u32 key_len);
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index bf04326e35ff..674dd137fb9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2020, 2022 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -380,7 +380,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
type == TDLS_MOVE_CH) {
/* we need to return to base channel */
struct ieee80211_chanctx_conf *chanctx =
- rcu_dereference(vif->chanctx_conf);
+ rcu_dereference(vif->bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx)) {
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index ab06dcda1462..ed8ba81a6043 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -97,8 +97,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
iwl_mvm_rm_aux_sta(mvm);
}
@@ -124,7 +123,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
rcu_read_lock();
csa_vif = rcu_dereference(mvm->csa_vif);
- if (!csa_vif || !csa_vif->csa_active)
+ if (!csa_vif || !csa_vif->bss_conf.csa_active)
goto out_unlock;
IWL_DEBUG_TE(mvm, "CSA NOA started\n");
@@ -161,7 +160,7 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION)
return false;
- if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
+ if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
vif->bss_conf.dtim_period)
return false;
if (errmsg)
@@ -177,7 +176,7 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
rcu_read_unlock();
}
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
/*
* When not associated, this will be called from
* iwl_mvm_event_mlme_callback_ini()
@@ -347,7 +346,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
- !te_data->vif->bss_conf.assoc ?
+ !te_data->vif->cfg.assoc ?
"Not associated and the time event is over already..." :
"No beacon heard and the time event is over already...");
break;
@@ -658,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
@@ -860,7 +859,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
- !vif->bss_conf.assoc ?
+ !vif->cfg.assoc ?
"Not associated and the session protection is over already..." :
"No beacon heard and the session protection is over already...");
spin_lock_bh(&mvm->time_event_lock);
@@ -923,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
}
cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
}
@@ -1162,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
- MAC_CONF_GROUP, 0) };
+ const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
@@ -1201,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (!wait_for_notif) {
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
@@ -1219,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_session_prot_notif, NULL);
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 398390c59344..69cf3a372759 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
notif = (struct ct_kill_notif *)pkt->data;
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
notif->temperature);
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
+ CT_KILL_NOTIFICATION, 0) > 1)
+ IWL_DEBUG_TEMP(mvm,
+ "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
+ notif->dts, notif->scheme);
iwl_mvm_enter_ctkill(mvm);
}
@@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
* a response. For older versions we send the command and wait for a
* notification (no command TLV for previous versions).
*/
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1)
return iwl_mvm_send_temp_cmd(mvm, true, temp);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6fa2c12f7955..86d20e13bf47 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_data(fc),
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
- le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
+ le16_to_cpu(fc),
+ sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
@@ -351,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
if (!is_cck)
rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
else
@@ -654,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
struct iwl_probe_resp_data *resp_data;
- u8 *ie, *pos;
+ const u8 *ie;
+ u8 *pos;
u8 match[] = {
(WLAN_OUI_WFA >> 16) & 0xff,
(WLAN_OUI_WFA >> 8) & 0xff,
@@ -671,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
if (!resp_data->notif.noa_active)
goto out;
- ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
- mgmt->u.probe_resp.variable,
- skb->len - base_len,
- match, 4, 2);
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+ mgmt->u.probe_resp.variable,
+ skb->len - base_len,
+ match, 4, 2);
if (!ie) {
IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
goto out;
@@ -794,7 +794,7 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
/* For HE redirect to trigger based fifos */
- if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
ac += 4;
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
@@ -926,7 +926,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* Take the min of ieee80211 station and mvm station
*/
max_amsdu_len =
- min_t(unsigned int, sta->max_amsdu_len,
+ min_t(unsigned int, sta->cur->max_amsdu_len,
iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
@@ -935,7 +935,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* section 8.7.3 NOTE 3).
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- !sta->vht_cap.vht_supported)
+ !sta->deflink.vht_cap.vht_supported)
max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
/* Sub frame header + SNAP + IP header + TCP header + MSS */
@@ -1083,7 +1083,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
- if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
+ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
return -1;
if (unlikely(ieee80211_is_probe_resp(fc)))
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
@@ -1602,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
if (unlikely(!seq_ctl)) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
-
/*
* If it is an NDP, we can't update next_reclaim since
* its sequence control is 0. Note that for that same
@@ -1961,7 +1959,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (mvmsta->vif)
chanctx_conf =
- rcu_dereference(mvmsta->vif->chanctx_conf);
+ rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 1f3e90e5dbd4..14b2de65bd84 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 8)
+ if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
/* In the new rate legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM.
*/
@@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = frame_limit,
- .sta_id = sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = fifo,
- .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
- .tid = tid,
- };
- int ret;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -EINVAL;
-
- if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
- "Trying to reconfig unallocated queue %d\n", queue))
- return -ENXIO;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
- queue, fifo, ret);
-
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @mvm: Driver data.
@@ -337,7 +304,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
smps_mode = IEEE80211_SMPS_DYNAMIC;
}
- ieee80211_request_smps(vif, smps_mode);
+ ieee80211_request_smps(vif, 0, smps_mode);
}
static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
@@ -480,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
cmd.low_latency_tx = 1;
}
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
- MAC_CONF_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send low latency command\n");
}
@@ -638,7 +604,7 @@ static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
data->assoc = true;
}
@@ -850,7 +816,7 @@ static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.assoc)
+ if (!vif->cfg.assoc)
return;
if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 78450366312b..080a1587caa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_mei_get_ownership(mvm);
+ ret = iwl_mvm_mei_get_ownership(mvm);
mutex_unlock(&mvm->mutex);
- return 0;
+ return ret;
}
static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 85a6da70ca78..75fd386b048e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -125,6 +125,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
+ if (trans->trans_cfg->imr_enabled)
+ control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 5178e852c5d3..4f699862e7f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -491,18 +491,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* So devices */
{IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
- {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -668,8 +671,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
/* SO with GF2 */
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
@@ -682,6 +685,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
/* MA with GF2 */
IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
@@ -1150,10 +1155,20 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_bz_a0_fm4_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_gl_b0_fm_b0, iwl_bz_name),
/* BZ Z step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1164,11 +1179,16 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
/* BNJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_b0_fm_b0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
@@ -1301,7 +1321,30 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
+/* MsP */
+/* For now we use the same FW as MR, but this will change in the future. */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a43e56c7689f..f7e4f868363d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -104,6 +104,18 @@ struct iwl_rx_completion_desc {
} __packed;
/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+ __le16 rbid;
+ u8 flags;
+ u8 reserved[1];
+} __packed;
+
+/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
@@ -133,11 +145,7 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- union {
- void *used_bd;
- __le32 *bd_32;
- struct iwl_rx_completion_desc *cd;
- };
+ void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
@@ -262,6 +270,20 @@ enum iwl_pcie_fw_reset_state {
};
/**
+ * enum wl_pcie_imr_status - imr dma transfer state
+ * @IMR_D2S_IDLE: default value of the dma transfer
+ * @IMR_D2S_REQUESTED: dma transfer requested
+ * @IMR_D2S_COMPLETED: dma transfer completed
+ * @IMR_D2S_ERROR: dma transfer error
+ */
+enum iwl_pcie_imr_status {
+ IMR_D2S_IDLE,
+ IMR_D2S_REQUESTED,
+ IMR_D2S_COMPLETED,
+ IMR_D2S_ERROR,
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @imr_status: imr dma state machine
+ * @wait_queue_head_t: imr wait queue for dma completion
* @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
@@ -363,7 +387,7 @@ struct iwl_trans_pcie {
/* PCI bus related data */
struct pci_dev *pci_dev;
- void __iomem *hw_base;
+ u8 __iomem *hw_base;
bool ucode_write_complete;
bool sx_complete;
@@ -414,7 +438,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
-
+ enum iwl_pcie_imr_status imr_status;
+ wait_queue_head_t imr_waitq;
char rf_name[32];
};
@@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8247014278f3..9c9f87fe8377 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->mq_rx_supported)
+ if (!trans->trans_cfg->mq_rx_supported)
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
+ HBUS_TARG_WRPTR_RX_Q(rxq->id));
+ else
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- struct iwl_rx_transfer_desc *rx_td;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_transfer_desc);
- if (use_rx_td)
- return sizeof(*rx_td);
- else
- return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ return trans->trans_cfg->mq_rx_supported ?
+ sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return sizeof(struct iwl_rx_completion_desc_bz);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_completion_desc);
+
+ return sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210);
- int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ int free_size = iwl_pcie_free_bd_size(trans);
if (rxq->bd)
dma_free_coherent(trans->dev,
@@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd)
dma_free_coherent(trans->dev,
- (use_rx_td ? sizeof(*rxq->cd) :
- sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
@@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else
rxq->queue_size = RX_QUEUE_SIZE;
- free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ free_size = iwl_pcie_free_bd_size(trans);
/*
* Allocate the circular buffer of Read Buffer Descriptors
@@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
- (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
&rxq->used_bd_dma,
GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
- rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+ rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
@@ -1099,7 +1110,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
poll = iwl_pcie_napi_poll_msix;
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- poll, NAPI_POLL_WEIGHT);
+ poll);
napi_enable(&rxq->napi);
}
@@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxq->id, offset,
iwl_get_cmd_string(trans,
- iwl_cmd_id(pkt->hdr.cmd,
- pkt->hdr.group_id,
- 0)),
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
pkt->hdr.group_id, pkt->hdr.cmd,
le16_to_cpu(pkt->hdr.sequence));
@@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
/* check that what the device tells us made sense */
- if (offset > max_len)
+ if (len < sizeof(*pkt) || offset > max_len)
break;
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
@@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
@@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- vid = le16_to_cpu(rxq->cd[i].rbid);
- *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ __le32 *cd = rxq->used_bd;
+
+ vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
@@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
- if (WARN_ONCE(!rxq,
- "[%d] Got MSI-X interrupt before we have Rx queues",
- entry->entry))
+ if (!rxq) {
+ if (net_ratelimit())
+ IWL_ERR(trans,
+ "[%d] Got MSI-X interrupt before we have Rx queues\n",
+ entry->entry);
return IRQ_NONE;
+ }
lock_map_acquire(&trans->sync_cmd_lockdep_map);
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
@@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
}
/* Sending RX interrupt require many steps to be done in the
- * the device:
+ * device:
* 1- write interrupt to current index in ICT table.
* 2- dma RX frame.
* 3- update RX shared data to indicate last write index.
@@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Wake up uCode load routine, now that load is complete */
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+ /* Wake up IMR write routine, now that write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (inta & ~handled) {
@@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* This "Tx" DMA channel is used only for loading uCode */
- if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
+ trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
+ isr_stats->tx++;
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
/*
@@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh);
isr_stats->sw++;
/* during FW reset flow report errors from there */
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_ERROR;
+ wake_up(&trans_pcie->imr_waitq);
+ } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
trans_pcie->fw_reset_state = FW_RESET_ERROR;
wake_up(&trans_pcie->fw_reset_waitq);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0febdcacbd42..94f40c4d2421 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a63386a01232..bd50f52a1aad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -745,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
iwl_set_bits_prph(trans, LMPM_CHICK,
LMPM_CHICK_EXTENDED_ADDR_SPACE);
- memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+ memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
copy_size);
@@ -1085,34 +1085,44 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
}
struct iwl_causes_list {
- u32 cause_num;
- u32 mask_reg;
+ u16 mask_reg;
+ u8 bit;
u8 addr;
};
+#define IWL_CAUSE(reg, mask) \
+ { \
+ .mask_reg = reg, \
+ .bit = ilog2(mask), \
+ .addr = ilog2(mask) + \
+ ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
+ (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
+ 0xffff), /* causes overflow warning */ \
+ }
+
static const struct iwl_causes_list causes_list_common[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
};
static const struct iwl_causes_list causes_list_pre_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
};
static const struct iwl_causes_list causes_list_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
};
static void iwl_pcie_map_list(struct iwl_trans *trans,
@@ -1124,7 +1134,7 @@ static void iwl_pcie_map_list(struct iwl_trans *trans,
for (i = 0; i < arr_size; i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
- causes[i].cause_num);
+ BIT(causes[i].bit));
}
}
@@ -1329,8 +1339,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
@@ -1949,6 +1958,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -2864,7 +2874,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
+ u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -3469,7 +3479,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
.interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi \
+ .sync_nmi = iwl_trans_pcie_sync_nmi, \
+ .imr_dma_data = iwl_trans_pcie_copy_imr \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
@@ -3554,6 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
+ init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -3682,3 +3694,41 @@ out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
+
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ iwl_write_prph(trans, IMR_UREG_CHICK,
+ iwl_read_prph(trans, IMR_UREG_CHICK) |
+ IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
+ (u32)(src_addr & 0xFFFFFFFF));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
+ iwl_get_dma_hi_addr(src_addr));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
+}
+
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = -1;
+
+ trans_pcie->imr_status = IMR_D2S_REQUESTED;
+ iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
+ ret = wait_event_timeout(trans_pcie->imr_waitq,
+ trans_pcie->imr_status !=
+ IMR_D2S_REQUESTED, 5 * HZ);
+ if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
+ IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
+ return -ETIMEDOUT;
+ }
+ trans_pcie->imr_status = IMR_D2S_IDLE;
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 34bde8c87324..c72a84d8bb4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -213,7 +213,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -222,7 +222,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4f6c187eed69..3546c5269c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
memset(tfd, 0, trans->txqs.tfd.size);
@@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
@@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
ARRAY_SIZE(zero_val));
}
@@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
@@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
- cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
+ cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 0730657d54bf..726185d6fab8 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "fw/api/commands.h"
#include "fw/api/tx.h"
+#include "fw/api/datapath.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
@@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans->txqs.bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
@@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
return NULL;
/* set the chaining pointer to the previous page if there */
- *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*page_ptr = ret;
return ret;
@@ -314,7 +316,7 @@ alloc:
return NULL;
p->pos = page_address(p->page);
/* set the chaining pointer to NULL */
- *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+ *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
out:
*page_ptr = p->page;
get_page(p->page);
@@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
while (next) {
struct page *tmp = next;
- next = *(void **)(page_address(next) + PAGE_SIZE -
+ next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
sizeof(void *));
__free_page(tmp);
}
@@ -1083,9 +1085,8 @@ error:
return -ENOMEM;
}
-static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout)
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
{
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
@@ -1097,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
bc_tbl_entries = bc_tbl_size / sizeof(u16);
if (WARN_ON(size > bc_tbl_entries))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
ret = iwl_txq_alloc(trans, txq, size, false);
@@ -1124,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
txq->wd_timeout = msecs_to_jiffies(timeout);
- *intxq = txq;
- return 0;
+ return txq;
error:
iwl_txq_gen2_free_memory(trans, txq);
- return ret;
+ return ERR_PTR(ret);
}
static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -1186,30 +1186,61 @@ error_free_resp:
return ret;
}
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size, unsigned int timeout)
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
{
- struct iwl_txq *txq = NULL;
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = flags,
- .sta_id = sta_id,
- .tid = tid,
- };
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(cmd) },
- .data = { &cmd, },
.flags = CMD_WANT_SKB,
};
int ret;
- ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
- if (ret)
- return ret;
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP)
+ size = 4096;
+
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
- cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ if (trans->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@@ -1307,10 +1338,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
dma_addr_t hi_len;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return (dma_addr_t)(le64_to_cpu(tb->addr));
+ return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
}
tfd = _tfd;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
index 20efc62acf13..eca53bfd326d 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
@@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
if (trans->trans_cfg->use_tfh)
idx = iwl_txq_get_cmd_index(txq, idx);
- return txq->tfds + trans->txqs.tfd.size * idx;
+ return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
}
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
@@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
-int iwl_txq_dyn_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout);
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
@@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
struct iwl_tfd *tfd;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
}
tfd = (struct iwl_tfd *)_tfd;
@@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return le16_to_cpu(tb->tb_len);
+ return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 8bcc1cdcb75b..462ccc7d7d1a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -69,7 +69,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
@@ -320,7 +320,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
static int ap_control_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
char *policy_txt;
struct mac_entry *entry;
@@ -352,20 +352,20 @@ static int ap_control_proc_show(struct seq_file *m, void *v)
static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->mac_restrictions.lock);
return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
}
static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
}
static void ap_control_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->mac_restrictions.lock);
}
@@ -554,20 +554,20 @@ static int prism2_ap_proc_show(struct seq_file *m, void *v)
static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->sta_table_lock);
return seq_list_start_head(&ap->sta_list, *_pos);
}
static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->sta_list, _pos);
}
static void prism2_ap_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->sta_table_lock);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 7c6a5a6d1d45..3672291ced5c 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -227,7 +227,7 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
sizeof(struct prism2_download_aux_dump));
if (ret == 0) {
struct seq_file *m = file->private_data;
- m->private = PDE_DATA(inode);
+ m->private = pde_data(inode);
}
return ret;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 0a376f112db9..4e0a0c881697 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3848,7 +3848,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strlcpy(info->driver, "hostap", sizeof(info->driver));
+ strscpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 51c847d98755..61f68786056f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -97,20 +97,20 @@ static int prism2_wds_proc_show(struct seq_file *m, void *v)
static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_lock_bh(&local->iface_lock);
return seq_list_start(&local->hostap_interfaces, *_pos);
}
static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->hostap_interfaces, _pos);
}
static void prism2_wds_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_unlock_bh(&local->iface_lock);
}
@@ -123,7 +123,7 @@ static const struct seq_operations prism2_wds_proc_seqops = {
static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
struct list_head *ptr = v;
struct hostap_bss_info *bss;
@@ -151,21 +151,21 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
__acquires(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
return seq_list_start_head(&local->bss_list, *_pos);
}
static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->bss_list, _pos);
}
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
__releases(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
@@ -198,7 +198,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(file));
+ local_info_t *local = pde_data(file_inode(file));
size_t off;
if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
@@ -272,7 +272,7 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
#ifndef PRISM2_NO_STATION_MODES
static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
unsigned long entry;
int i, len;
struct hfa384x_hostscan_result *scanres;
@@ -322,7 +322,7 @@ static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
/* We have a header (pos 0) + N results to show (pos 1...N) */
@@ -333,7 +333,7 @@ static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
++*_pos;
if (*_pos > local->last_scan_results_count)
@@ -343,7 +343,7 @@ static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *
static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
diff --git a/drivers/net/wireless/intersil/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c
index 77e6c53040a3..a890bfa0d5cc 100644
--- a/drivers/net/wireless/intersil/orinoco/airport.c
+++ b/drivers/net/wireless/intersil/orinoco/airport.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <asm/pmac_feature.h>
#include "orinoco.h"
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index bece14e4ff0d..b52cce38115d 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -173,10 +173,8 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
* keeping a extra list for uploaded keys.
*/
- priv->used_rxkeys = kcalloc(BITS_TO_LONGS(priv->rx_keycache_size),
- sizeof(long),
- GFP_KERNEL);
-
+ priv->used_rxkeys = bitmap_zalloc(priv->rx_keycache_size,
+ GFP_KERNEL);
if (!priv->used_rxkeys)
return -ENOMEM;
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index a3ca6620dc0c..e127453ab51a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -139,7 +139,7 @@ static int p54_beacon_update(struct p54_common *priv,
struct sk_buff *beacon;
int ret;
- beacon = ieee80211_beacon_get(priv->hw, vif);
+ beacon = ieee80211_beacon_get(priv->hw, vif, 0);
if (!beacon)
return -ENOMEM;
ret = p54_beacon_format_ie_tim(beacon);
@@ -404,7 +404,8 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
}
static int p54_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct p54_common *priv = dev->priv;
@@ -449,7 +450,7 @@ static int p54_get_stats(struct ieee80211_hw *dev,
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct p54_common *priv = dev->priv;
@@ -480,8 +481,8 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
p54_scan(priv, P54_SCAN_EXIT, 0);
}
if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc) {
- priv->aid = info->aid;
+ if (vif->cfg.assoc) {
+ priv->aid = vif->cfg.aid;
priv->wakeup_timer = info->beacon_int *
info->dtim_period * 5;
p54_setup_mac(priv);
@@ -634,7 +635,7 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
/*
* hw/fw has not accumulated enough sample sets.
* Wait for 100ms, this ought to be enough to
- * to get at least one non-null set of channel
+ * get at least one non-null set of channel
* usage statistics.
*/
msleep(100);
@@ -682,7 +683,7 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
* queues have already been stopped and no new frames can sneak
* up from behind.
*/
- while ((total = p54_flush_count(priv) && i--)) {
+ while ((total = p54_flush_count(priv)) && i--) {
/* waste time */
msleep(20);
}
@@ -830,7 +831,7 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->output_limit);
kfree(priv->curve_data);
kfree(priv->rssi_db);
- kfree(priv->used_rxkeys);
+ bitmap_free(priv->used_rxkeys);
kfree(priv->survey);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ab0fe8565851..19152fd449ba 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -164,7 +164,7 @@ static int p54spi_request_firmware(struct ieee80211_hw *dev)
ret = p54_parse_firmware(dev, priv->firmware);
if (ret) {
- release_firmware(priv->firmware);
+ /* the firmware is released by the caller */
return ret;
}
@@ -659,6 +659,7 @@ static int p54spi_probe(struct spi_device *spi)
return 0;
err_free_common:
+ release_firmware(priv->firmware);
free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
err_free_gpio_irq:
gpio_free(p54spi_gpio_irq);
@@ -669,7 +670,7 @@ err_free:
return ret;
}
-static int p54spi_remove(struct spi_device *spi)
+static void p54spi_remove(struct spi_device *spi)
{
struct p54s_priv *priv = spi_get_drvdata(spi);
@@ -684,8 +685,6 @@ static int p54spi_remove(struct spi_device *spi)
mutex_destroy(&priv->mutex);
p54_free_common(priv->hw);
-
- return 0;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0307a6677907..0d81098c7b45 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
/*
@@ -65,6 +65,10 @@ static bool support_p2p_device = true;
module_param(support_p2p_device, bool, 0444);
MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+static bool mlo;
+module_param(mlo, bool, 0444);
+MODULE_PARM_DESC(mlo, "Support MLO");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -173,9 +177,23 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
}
};
+static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = {
+ .n_reg_rules = 6,
+ .alpha2 = "99",
+ .reg_rules = {
+ REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+ REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0),
+ REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0),
+ REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0),
+ REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0),
+ }
+};
+
static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
&hwsim_world_regdom_custom_01,
&hwsim_world_regdom_custom_02,
+ &hwsim_world_regdom_custom_03,
};
struct hwsim_vif_priv {
@@ -210,6 +228,8 @@ static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
struct hwsim_sta_priv {
u32 magic;
+ unsigned int last_link;
+ u16 active_links_rx;
};
#define HWSIM_STA_MAGIC 0x6d537749
@@ -276,8 +296,7 @@ static inline int hwsim_net_set_netgroup(struct net *net)
{
struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
- hwsim_net->netgroup = ida_simple_get(&hwsim_netgroup_ida,
- 0, 0, GFP_KERNEL);
+ hwsim_net->netgroup = ida_alloc(&hwsim_netgroup_ida, GFP_KERNEL);
return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM;
}
@@ -475,16 +494,16 @@ static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
0 },
};
-static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+static void hwsim_init_s1g_channels(struct ieee80211_channel *chans)
{
int ch, freq;
for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
freq = 902000 + (ch + 1) * 500;
- channels[ch].band = NL80211_BAND_S1GHZ;
- channels[ch].center_freq = KHZ_TO_MHZ(freq);
- channels[ch].freq_offset = freq % 1000;
- channels[ch].hw_value = ch + 1;
+ chans[ch].band = NL80211_BAND_S1GHZ;
+ chans[ch].center_freq = KHZ_TO_MHZ(freq);
+ chans[ch].freq_offset = freq % 1000;
+ chans[ch].hw_value = ch + 1;
}
}
@@ -503,6 +522,8 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+#define DEFAULT_RX_RSSI -50
+
static const u32 hwsim_ciphers[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -608,6 +629,12 @@ static struct platform_driver mac80211_hwsim_driver = {
},
};
+struct mac80211_hwsim_link_data {
+ u32 link_id;
+ u64 beacon_int /* beacon interval in us */;
+ struct hrtimer beacon_timer;
+};
+
struct mac80211_hwsim_data {
struct list_head list;
struct rhash_head rht;
@@ -626,7 +653,6 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
- struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -652,18 +678,17 @@ struct mac80211_hwsim_data {
ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel *channel;
- u64 beacon_int /* beacon interval in us */;
+ enum nl80211_chan_width bw;
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
- struct hrtimer beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
bool ps_poll_pending;
struct dentry *debugfs;
- uintptr_t pending_cookie;
+ atomic_t pending_cookie;
struct sk_buff_head pending; /* packets pending */
/*
* Only radios in the same group can communicate together (the
@@ -690,6 +715,11 @@ struct mac80211_hwsim_data {
u64 rx_bytes;
u64 tx_dropped;
u64 tx_failed;
+
+ /* RSSI in rx status of the receiver */
+ int rx_rssi;
+
+ struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS];
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -757,6 +787,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 },
[HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY },
+ [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -803,6 +834,40 @@ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
#define hwsim_virtio_enabled false
#endif
+static int hwsim_get_chanwidth(enum nl80211_chan_width bw)
+{
+ switch (bw) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return 20;
+ case NL80211_CHAN_WIDTH_40:
+ return 40;
+ case NL80211_CHAN_WIDTH_80:
+ return 80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 160;
+ case NL80211_CHAN_WIDTH_320:
+ return 320;
+ case NL80211_CHAN_WIDTH_5:
+ return 5;
+ case NL80211_CHAN_WIDTH_10:
+ return 10;
+ case NL80211_CHAN_WIDTH_1:
+ return 1;
+ case NL80211_CHAN_WIDTH_2:
+ return 2;
+ case NL80211_CHAN_WIDTH_4:
+ return 4;
+ case NL80211_CHAN_WIDTH_8:
+ return 8;
+ case NL80211_CHAN_WIDTH_16:
+ return 16;
+ }
+
+ return INT_MAX;
+}
+
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan);
@@ -835,7 +900,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan);
rcu_read_unlock();
}
@@ -845,6 +910,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *cb;
if (!vp->assoc)
return;
@@ -866,9 +932,13 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ cb = IEEE80211_SKB_CB(skb);
+ cb->control.rates[0].count = 1;
+ cb->control.rates[1].idx = -1;
+
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan);
rcu_read_unlock();
}
@@ -964,6 +1034,29 @@ DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
hwsim_fops_group_read, hwsim_fops_group_write,
"%llx\n");
+static int hwsim_fops_rx_rssi_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->rx_rssi;
+ return 0;
+}
+
+static int hwsim_fops_rx_rssi_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ int rssi = (int)val;
+
+ if (rssi >= 0 || rssi < -100)
+ return -EINVAL;
+
+ data->rx_rssi = rssi;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi,
+ hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write,
+ "%lld\n");
+
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -995,7 +1088,8 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
{
struct mac80211_hwsim_data *data = hw->priv;
u64 now = mac80211_hwsim_get_tsf(hw, vif);
- u32 bcn_int = data->beacon_int;
+ /* MLD not supported here */
+ u32 bcn_int = data->link_data[0].beacon_int;
u64 delta = abs(tsf - now);
/* adjust after beaconing with new timestamp at old TBTT */
@@ -1110,10 +1204,27 @@ struct mac80211_hwsim_addr_match_data {
static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ int i;
struct mac80211_hwsim_addr_match_data *md = data;
- if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0) {
md->ret = true;
+ return;
+ }
+
+ /* Match the link address */
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *conf;
+
+ conf = rcu_dereference(vif->link_conf[i]);
+ if (!conf)
+ continue;
+
+ if (memcmp(conf->addr, md->addr, ETH_ALEN) == 0) {
+ md->ret = true;
+ return;
+ }
+ }
}
static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
@@ -1193,6 +1304,8 @@ static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw,
struct sk_buff *skb;
void *msg_head;
+ WARN_ON(!is_valid_ether_addr(addr));
+
if (!_portid && !hwsim_virtio_enabled)
return;
@@ -1339,8 +1452,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
/* We create a cookie to identify this skb */
- data->pending_cookie++;
- cookie = data->pending_cookie;
+ cookie = atomic_inc_return(&data->pending_cookie);
info->rate_driver_data[0] = (void *)cookie;
if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
@@ -1387,15 +1499,26 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
struct ieee80211_vif *vif)
{
struct tx_iter_data *data = _data;
+ int i;
- if (!vif->chanctx_conf)
- return;
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *conf;
+ struct ieee80211_chanctx_conf *chanctx;
- if (!hwsim_chans_compat(data->channel,
- rcu_dereference(vif->chanctx_conf)->def.chan))
- return;
+ conf = rcu_dereference(vif->link_conf[i]);
+ if (!conf)
+ continue;
+
+ chanctx = rcu_dereference(conf->chanctx_conf);
+ if (!chanctx)
+ continue;
+
+ if (!hwsim_chans_compat(data->channel, chanctx->def.chan))
+ continue;
- data->receive = true;
+ data->receive = true;
+ return;
+ }
}
static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
@@ -1445,6 +1568,42 @@ static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
#endif
}
+static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ (ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control))) {
+ struct ieee80211_sta *sta;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_link_addrs(data->hw, hdr->addr2,
+ hdr->addr1, &link_id);
+ if (sta) {
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ if (ieee80211_has_pm(hdr->frame_control))
+ sp->active_links_rx &= ~BIT(link_id);
+ else
+ sp->active_links_rx |= BIT(link_id);
+ }
+ rcu_read_unlock();
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
+
+ mac80211_hwsim_add_vendor_rtap(skb);
+
+ data->rx_pkts++;
+ data->rx_bytes += skb->len;
+ ieee80211_rx_irqsafe(data->hw, skb);
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
@@ -1482,8 +1641,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.bw = RATE_INFO_BW_20;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
- /* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = -50;
+ /* TODO: simulate optional packet loss */
+ rx_status.signal = data->rx_rssi;
if (info->control.vif)
rx_status.signal += info->control.vif->bss_conf.txpower;
@@ -1572,19 +1731,63 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.mactime = now + data2->tsf_offset;
- memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
-
- mac80211_hwsim_add_vendor_rtap(nskb);
-
- data2->rx_pkts++;
- data2->rx_bytes += nskb->len;
- ieee80211_rx_irqsafe(data2->hw, nskb);
+ mac80211_hwsim_rx(data2, &rx_status, nskb);
}
spin_unlock(&hwsim_radio_lock);
return ack;
}
+static struct ieee80211_bss_conf *
+mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_link_sta **link_sta)
+{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+ int i;
+
+ if (!vif->valid_links)
+ return &vif->bss_conf;
+
+ WARN_ON(is_multicast_ether_addr(hdr->addr1));
+
+ if (WARN_ON_ONCE(!sta->valid_links))
+ return &vif->bss_conf;
+
+ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+ struct ieee80211_bss_conf *bss_conf;
+ unsigned int link_id;
+
+ /* round-robin the available link IDs */
+ link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf);
+
+ if (!(vif->active_links & BIT(link_id)))
+ continue;
+
+ if (!(sp->active_links_rx & BIT(link_id)))
+ continue;
+
+ *link_sta = rcu_dereference(sta->link[link_id]);
+ if (!*link_sta)
+ continue;
+
+ bss_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!bss_conf))
+ continue;
+
+ /* can happen while switching links */
+ if (!rcu_access_pointer(bss_conf->chanctx_conf))
+ continue;
+
+ sp->last_link = link_id;
+ return bss_conf;
+ }
+
+ return NULL;
+}
+
static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -1595,7 +1798,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
bool ack;
- u32 _portid;
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ u32 _portid, i;
if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
@@ -1605,14 +1809,57 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (!data->use_chanctx) {
channel = data->channel;
+ confbw = data->bw;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
} else {
- chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
- if (chanctx_conf)
+ u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ struct ieee80211_vif *vif = txi->control.vif;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (link != IEEE80211_LINK_UNSPECIFIED) {
+ bss_conf = rcu_dereference(txi->control.vif->link_conf[link]);
+ if (sta)
+ link_sta = rcu_dereference(sta->link[link]);
+ } else {
+ bss_conf = mac80211_hwsim_select_tx_link(data, vif, sta,
+ hdr, &link_sta);
+ }
+
+ if (WARN_ON(!bss_conf)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ if (sta && sta->mlo) {
+ if (WARN_ON(!link_sta)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ /* address translation to link addresses on TX */
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ /* translate A3 only if it's the BSSID */
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control)) {
+ if (ether_addr_equal(hdr->addr3, sta->addr))
+ ether_addr_copy(hdr->addr3, link_sta->addr);
+ else if (ether_addr_equal(hdr->addr3, vif->addr))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+ /* no need to look at A4, if present it's SA */
+ }
+
+ chanctx_conf = rcu_dereference(bss_conf->chanctx_conf);
+ if (chanctx_conf) {
channel = chanctx_conf->def.chan;
- else
+ confbw = chanctx_conf->def.width;
+ } else {
channel = NULL;
+ }
}
if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
@@ -1636,6 +1883,25 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
+ for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) {
+ u16 rflags = txi->control.rates[i].flags;
+ /* initialize to data->bw for 5/10 MHz handling */
+ enum nl80211_chan_width bw = data->bw;
+
+ if (txi->control.rates[i].idx == -1)
+ break;
+
+ if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_40;
+ else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_80;
+ else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_160;
+
+ if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw)))
+ return;
+ }
+
if (skb->len >= 24 + 8 &&
ieee80211_is_probe_resp(hdr->frame_control)) {
/* fake header transmission time */
@@ -1695,9 +1961,12 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
+ int i;
data->started = false;
- hrtimer_cancel(&data->beacon_timer);
+
+ for (i = 0; i < ARRAY_SIZE(data->link_data); i++)
+ hrtimer_cancel(&data->link_data[i].beacon_timer);
while (!skb_queue_empty(&data->pending))
ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
@@ -1788,7 +2057,12 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_vif *vif)
{
- struct mac80211_hwsim_data *data = arg;
+ struct mac80211_hwsim_link_data *link_data = arg;
+ u32 link_id = link_data->link_id;
+ struct ieee80211_bss_conf *link_conf;
+ struct mac80211_hwsim_data *data =
+ container_of(link_data, struct mac80211_hwsim_data,
+ link_data[link_id]);
struct ieee80211_hw *hw = data->hw;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
@@ -1799,13 +2073,17 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
hwsim_check_magic(vif);
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (!link_conf)
+ return;
+
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_ADHOC &&
vif->type != NL80211_IFTYPE_OCB)
return;
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, link_data->link_id);
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
@@ -1836,38 +2114,41 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
}
mac80211_hwsim_tx_frame(hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(link_conf->chanctx_conf)->def.chan);
while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) {
mac80211_hwsim_tx_frame(hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
mac80211_hwsim_beacon(struct hrtimer *timer)
{
+ struct mac80211_hwsim_link_data *link_data =
+ container_of(timer, struct mac80211_hwsim_link_data, beacon_timer);
struct mac80211_hwsim_data *data =
- container_of(timer, struct mac80211_hwsim_data, beacon_timer);
+ container_of(link_data, struct mac80211_hwsim_data,
+ link_data[link_data->link_id]);
struct ieee80211_hw *hw = data->hw;
- u64 bcn_int = data->beacon_int;
+ u64 bcn_int = link_data->beacon_int;
if (!data->started)
return HRTIMER_NORESTART;
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
- mac80211_hwsim_beacon_tx, data);
+ mac80211_hwsim_beacon_tx, link_data);
/* beacon at new TBTT + beacon interval */
if (data->bcn_delta) {
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
- hrtimer_forward_now(&data->beacon_timer,
+ hrtimer_forward_now(&link_data->beacon_timer,
ns_to_ktime(bcn_int * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
@@ -1935,6 +2216,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
if (data->survey_data[idx].channel &&
@@ -1946,19 +2228,25 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
} else {
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
}
mutex_unlock(&data->mutex);
- if (!data->started || !data->beacon_int)
- hrtimer_cancel(&data->beacon_timer);
- else if (!hrtimer_is_queued(&data->beacon_timer)) {
- u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
- u32 bcn_int = data->beacon_int;
- u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
+ for (idx = 0; idx < ARRAY_SIZE(data->link_data); idx++) {
+ struct mac80211_hwsim_link_data *link_data =
+ &data->link_data[idx];
+
+ if (!data->started || !link_data->beacon_int) {
+ hrtimer_cancel(&link_data->beacon_timer);
+ } else if (!hrtimer_is_queued(&link_data->beacon_timer)) {
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 bcn_int = link_data->beacon_int;
+ u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
- hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * NSEC_PER_USEC),
- HRTIMER_MODE_REL_SOFT);
+ hrtimer_start(&link_data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
+ }
}
return 0;
@@ -1992,47 +2280,61 @@ static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
(*count)++;
}
-static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
+static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct mac80211_hwsim_data *data = hw->priv;
hwsim_check_magic(vif);
- wiphy_dbg(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+ wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM)\n",
__func__, changed, vif->addr);
+ if (changed & BSS_CHANGED_ASSOC) {
+ wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
+ vif->cfg.assoc, vif->cfg.aid);
+ vp->assoc = vif->cfg.assoc;
+ vp->aid = vif->cfg.aid;
+ }
+}
+
+static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct mac80211_hwsim_data *data = hw->priv;
+ unsigned int link_id = info->link_id;
+ struct mac80211_hwsim_link_data *link_data = &data->link_data[link_id];
+
+ hwsim_check_magic(vif);
+
+ wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM, link id %u)\n",
+ __func__, (unsigned long long)changed, vif->addr, link_id);
+
if (changed & BSS_CHANGED_BSSID) {
wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n",
__func__, info->bssid);
memcpy(vp->bssid, info->bssid, ETH_ALEN);
}
- if (changed & BSS_CHANGED_ASSOC) {
- wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
- info->assoc, info->aid);
- vp->assoc = info->assoc;
- vp->aid = info->aid;
- }
-
if (changed & BSS_CHANGED_BEACON_ENABLED) {
wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n",
info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
- !hrtimer_is_queued(&data->beacon_timer) &&
+ !hrtimer_is_queued(&link_data->beacon_timer) &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
- data->beacon_int = info->beacon_int * 1024;
+ link_data->beacon_int = info->beacon_int * 1024;
tsf = mac80211_hwsim_get_tsf(hw, vif);
- bcn_int = data->beacon_int;
+ bcn_int = link_data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
- hrtimer_start(&data->beacon_timer,
+ hrtimer_start(&link_data->beacon_timer,
ns_to_ktime(until_tbtt * NSEC_PER_USEC),
HRTIMER_MODE_REL_SOFT);
} else if (!info->enable_beacon) {
@@ -2043,8 +2345,8 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u",
count);
if (count == 0) {
- hrtimer_cancel(&data->beacon_timer);
- data->beacon_int = 0;
+ hrtimer_cancel(&link_data->beacon_timer);
+ link_data->beacon_int = 0;
}
}
}
@@ -2077,12 +2379,82 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
}
+static void
+mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u32 bw = U32_MAX;
+ int link_id;
+
+ rcu_read_lock();
+ for (link_id = 0;
+ link_id < ARRAY_SIZE(vif->link_conf);
+ link_id++) {
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ struct ieee80211_bss_conf *vif_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+
+ if (!link_sta)
+ continue;
+
+ switch (link_sta->bandwidth) {
+#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
+ C(20);
+ C(40);
+ C(80);
+ C(160);
+ C(320);
+#undef C
+ }
+
+ if (!data->use_chanctx) {
+ confbw = data->bw;
+ } else {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ vif_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON(!vif_conf))
+ continue;
+
+ chanctx_conf = rcu_dereference(vif_conf->chanctx_conf);
+
+ if (!WARN_ON(!chanctx_conf))
+ confbw = chanctx_conf->def.width;
+ }
+
+ WARN(bw > hwsim_get_chanwidth(confbw),
+ "intf %pM [link=%d]: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
+ vif->addr, link_id, sta->addr, bw, sta->deflink.bandwidth,
+ hwsim_get_chanwidth(data->bw), data->bw);
+
+
+ }
+ rcu_read_unlock();
+
+
+}
+
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
hwsim_check_magic(vif);
hwsim_set_sta_magic(sta);
+ mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
+
+ if (sta->valid_links) {
+ WARN(hweight16(sta->valid_links) > 1,
+ "expect to add STA with single link, have 0x%x\n",
+ sta->valid_links);
+ sp->active_links_rx = sta->valid_links;
+ }
return 0;
}
@@ -2097,6 +2469,29 @@ static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ if (new_state == IEEE80211_STA_NOTEXIST)
+ return mac80211_hwsim_sta_remove(hw, vif, sta);
+
+ if (old_state == IEEE80211_STA_NOTEXIST)
+ return mac80211_hwsim_sta_add(hw, vif, sta);
+
+ /*
+ * when client is authorized (AP station marked as such),
+ * enable all links
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
+ ieee80211_set_active_links_async(vif, vif->valid_links);
+
+ return 0;
+}
+
static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -2123,10 +2518,10 @@ static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
return 0;
}
-static int mac80211_hwsim_conf_tx(
- struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static int mac80211_hwsim_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
wiphy_dbg(hw->wiphy,
"%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
@@ -2336,9 +2731,21 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock();
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ rcu_read_unlock();
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
+ rcu_read_unlock();
local_bh_enable();
}
}
@@ -2518,11 +2925,6 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2534,11 +2936,6 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = NULL;
- mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@@ -2551,11 +2948,6 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2565,20 +2957,50 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+ /* if we activate a link while already associated wake it up */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
+
return 0;
}
static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+
+ /* if we deactivate a link while associated suspend it first */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
}
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -2635,6 +3057,56 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
+static int mac80211_hwsim_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mac80211_hwsim_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ unsigned long rem = old_links & ~new_links;
+ unsigned long add = new_links & ~old_links;
+ int i;
+
+ if (!old_links)
+ rem |= BIT(0);
+ if (!new_links)
+ add |= BIT(0);
+
+ for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS)
+ mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false);
+
+ for_each_set_bit(i, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ mac80211_hwsim_config_mac_nl(hw, link_conf->addr, true);
+ }
+
+ return 0;
+}
+
+static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ hwsim_check_sta_magic(sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ sp->active_links_rx = new_links;
+
+ return 0;
+}
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.start = mac80211_hwsim_start, \
@@ -2644,42 +3116,58 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
.remove_interface = mac80211_hwsim_remove_interface, \
.config = mac80211_hwsim_config, \
.configure_filter = mac80211_hwsim_configure_filter, \
- .bss_info_changed = mac80211_hwsim_bss_info_changed, \
+ .vif_cfg_changed = mac80211_hwsim_vif_info_changed, \
+ .link_info_changed = mac80211_hwsim_link_info_changed, \
.tx_last_beacon = mac80211_hwsim_tx_last_beacon, \
- .sta_add = mac80211_hwsim_sta_add, \
- .sta_remove = mac80211_hwsim_sta_remove, \
.sta_notify = mac80211_hwsim_sta_notify, \
- .set_tim = mac80211_hwsim_set_tim, \
+ .sta_rc_update = mac80211_hwsim_sta_rc_update, \
.conf_tx = mac80211_hwsim_conf_tx, \
.get_survey = mac80211_hwsim_get_survey, \
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \
.ampdu_action = mac80211_hwsim_ampdu_action, \
.flush = mac80211_hwsim_flush, \
- .get_tsf = mac80211_hwsim_get_tsf, \
- .set_tsf = mac80211_hwsim_set_tsf, \
.get_et_sset_count = mac80211_hwsim_get_et_sset_count, \
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings,
+#define HWSIM_NON_MLO_OPS \
+ .sta_add = mac80211_hwsim_sta_add, \
+ .sta_remove = mac80211_hwsim_sta_remove, \
+ .set_tim = mac80211_hwsim_set_tim, \
+ .get_tsf = mac80211_hwsim_get_tsf, \
+ .set_tsf = mac80211_hwsim_set_tsf,
+
static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_COMMON_OPS
+ HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
};
+#define HWSIM_CHANCTX_OPS \
+ .hw_scan = mac80211_hwsim_hw_scan, \
+ .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, \
+ .remain_on_channel = mac80211_hwsim_roc, \
+ .cancel_remain_on_channel = mac80211_hwsim_croc, \
+ .add_chanctx = mac80211_hwsim_add_chanctx, \
+ .remove_chanctx = mac80211_hwsim_remove_chanctx, \
+ .change_chanctx = mac80211_hwsim_change_chanctx, \
+ .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
- .hw_scan = mac80211_hwsim_hw_scan,
- .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan,
- .sw_scan_start = NULL,
- .sw_scan_complete = NULL,
- .remain_on_channel = mac80211_hwsim_roc,
- .cancel_remain_on_channel = mac80211_hwsim_croc,
- .add_chanctx = mac80211_hwsim_add_chanctx,
- .remove_chanctx = mac80211_hwsim_remove_chanctx,
- .change_chanctx = mac80211_hwsim_change_chanctx,
- .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ HWSIM_NON_MLO_OPS
+ HWSIM_CHANCTX_OPS
+};
+
+static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
+ HWSIM_COMMON_OPS
+ HWSIM_CHANCTX_OPS
+ .set_rts_threshold = mac80211_hwsim_set_rts_threshold,
+ .change_vif_links = mac80211_hwsim_change_vif_links,
+ .change_sta_links = mac80211_hwsim_change_sta_links,
+ .sta_state = mac80211_hwsim_sta_state,
};
struct hwsim_new_radio_params {
@@ -2696,6 +3184,7 @@ struct hwsim_new_radio_params {
u32 iftypes;
u32 *ciphers;
u8 n_ciphers;
+ bool mlo;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -2800,10 +3289,114 @@ out_err:
nlmsg_free(mcast_skb);
}
-static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
+ {
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
{
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -2846,6 +3439,66 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xffff),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2888,11 +3541,134 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
+ {
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -2939,6 +3715,81 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2986,11 +3837,155 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
+ {
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_6ghz_capa = {
+ .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN |
+ IEEE80211_HE_6GHZ_CAP_SM_PS |
+ IEEE80211_HE_6GHZ_CAP_RD_RESPONDER |
+ IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS),
+ },
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -3046,6 +4041,93 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -3102,22 +4184,22 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
#endif
};
-static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
{
u16 n_iftype_data;
if (sband->band == NL80211_BAND_2GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_2ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_2ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_2ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_2ghz;
} else if (sband->band == NL80211_BAND_5GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_5ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_5ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_5ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_5ghz;
} else if (sband->band == NL80211_BAND_6GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_6ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_6ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_6ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_6ghz;
} else {
return;
}
@@ -3167,7 +4249,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (param->use_chanctx)
+ if (param->mlo)
+ ops = &mac80211_hwsim_mlo_ops;
+ else if (param->use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname);
if (!hw) {
@@ -3266,7 +4350,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
- data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@@ -3309,6 +4392,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->n_cipher_suites = param->n_ciphers;
}
+ data->rx_rssi = DEFAULT_RX_RSSI;
+
INIT_DELAYED_WORK(&data->roc_start, hw_roc_start);
INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
@@ -3326,13 +4411,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
- if (rctbl)
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ if (param->mlo) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ } else {
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ if (rctbl)
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ }
+
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -3440,7 +4534,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
- mac80211_hwsim_he_capab(sband);
+ mac80211_hwsim_sband_capab(sband);
hw->wiphy->bands[band] = sband;
}
@@ -3477,9 +4571,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->beacon_timer.function = mac80211_hwsim_beacon;
+ for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
+ hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_SOFT);
+ data->link_data[i].beacon_timer.function =
+ mac80211_hwsim_beacon;
+ data->link_data[i].link_id = i;
+ }
err = ieee80211_register_hw(hw);
if (err < 0) {
@@ -3500,6 +4598,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
debugfs_create_file("group", 0666, data->debugfs, data,
&hwsim_fops_group);
+ debugfs_create_file("rx_rssi", 0666, data->debugfs, data,
+ &hwsim_fops_rx_rssi);
if (!data->use_chanctx)
debugfs_create_file("dfs_simulate_radar", 0222,
data->debugfs,
@@ -3699,6 +4799,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
const u8 *src;
unsigned int hwsim_flags;
int i;
+ unsigned long flags;
bool found = false;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
@@ -3726,18 +4827,20 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
/* look for the skb matching the cookie passed back from user */
+ spin_lock_irqsave(&data2->pending.lock, flags);
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- u64 skb_cookie;
+ uintptr_t skb_cookie;
txi = IEEE80211_SKB_CB(skb);
- skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0];
+ skb_cookie = (uintptr_t)txi->rate_driver_data[0];
if (skb_cookie == ret_skb_cookie) {
- skb_unlink(skb, &data2->pending);
+ __skb_unlink(skb, &data2->pending);
found = true;
break;
}
}
+ spin_unlock_irqrestore(&data2->pending.lock, flags);
/* not found */
if (!found)
@@ -3770,6 +4873,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
@@ -3817,13 +4924,9 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (data2->use_chanctx) {
if (data2->tmp_chan)
channel = data2->tmp_chan;
- else if (data2->chanctx)
- channel = data2->chanctx->def.chan;
} else {
channel = data2->channel;
}
- if (!channel)
- goto out;
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
@@ -3842,24 +4945,41 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
if (info->attrs[HWSIM_ATTR_FREQ]) {
+ struct tx_iter_data iter_data = {};
+
/* throw away off-channel packets, but allow both the temporary
- * ("hw" scan/remain-on-channel) and regular channel, since the
- * internal datapath also allows this
+ * ("hw" scan/remain-on-channel), regular channels and links,
+ * since the internal datapath also allows this
*/
- mutex_lock(&data2->mutex);
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
- if (rx_status.freq != channel->center_freq) {
- mutex_unlock(&data2->mutex);
+ iter_data.channel = ieee80211_get_channel(data2->hw->wiphy,
+ rx_status.freq);
+ if (!iter_data.channel)
goto out;
+ rx_status.band = iter_data.channel->band;
+
+ mutex_lock(&data2->mutex);
+ if (!hwsim_chans_compat(iter_data.channel, channel)) {
+ ieee80211_iterate_active_interfaces_atomic(
+ data2->hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_tx_iter, &iter_data);
+ if (!iter_data.receive) {
+ mutex_unlock(&data2->mutex);
+ goto out;
+ }
}
mutex_unlock(&data2->mutex);
+ } else if (!channel) {
+ goto out;
} else {
rx_status.freq = channel->center_freq;
+ rx_status.band = channel->band;
}
- rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
+ goto out;
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
hdr = (void *)skb->data;
@@ -3868,10 +4988,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
ieee80211_is_probe_resp(hdr->frame_control))
rx_status.boottime_ns = ktime_get_boottime_ns();
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
- data2->rx_pkts++;
- data2->rx_bytes += skb->len;
- ieee80211_rx_irqsafe(data2->hw, skb);
+ mac80211_hwsim_rx(data2, &rx_status, skb);
return 0;
err:
@@ -4041,6 +5158,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
}
}
+ param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT];
+
+ if (param.mlo)
+ param.use_chanctx = true;
+
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
@@ -4241,6 +5363,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_ops,
.n_small_ops = ARRAY_SIZE(hwsim_ops),
+ .resv_start_op = HWSIM_CMD_DEL_MAC_ADDR + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -4348,7 +5471,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
NULL);
}
- ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
+ ida_free(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
static struct pernet_operations hwsim_net_ops = {
@@ -4389,6 +5512,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -4431,7 +5558,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
@@ -4498,7 +5626,7 @@ static void remove_vqs(struct virtio_device *vdev)
{
int i;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
struct virtqueue *vq = hwsim_vqs[i];
@@ -4527,6 +5655,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev)
if (err)
return err;
+ virtio_device_ready(vdev);
+
err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
if (err)
goto out_remove;
@@ -4697,7 +5827,8 @@ static int __init init_mac80211_hwsim(void)
}
param.p2p_device = support_p2p_device;
- param.use_chanctx = channels > 1;
+ param.mlo = mlo;
+ param.use_chanctx = channels > 1 || mlo;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 9dceed77c5d6..527799b2de0f 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020, 2022 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -140,6 +140,8 @@ enum {
* @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio
* @HWSIM_ATTR_IFTYPE_SUPPORT: u32 attribute of supported interface types bits
* @HWSIM_ATTR_CIPHER_SUPPORT: u32 array of supported cipher types
+ * @HWSIM_ATTR_MLO_SUPPORT: claim MLO support (exact parameters TBD) for
+ * the new radio
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -170,6 +172,7 @@ enum {
HWSIM_ATTR_PERM_ADDR,
HWSIM_ATTR_IFTYPE_SUPPORT,
HWSIM_ATTR_CIPHER_SUPPORT,
+ HWSIM_ATTR_MLO_SUPPORT,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 4e3de684928b..3e065cbb0af9 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -546,7 +546,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
pos = scanresp->bssdesc_and_tlvbuffer;
lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer,
- scanresp->bssdescriptsize);
+ bsssize);
tsfdesc = pos + bsssize;
tsfsize = 4 + 8 * scanresp->nr_sets;
@@ -1053,7 +1053,6 @@ static int lbs_set_authtype(struct lbs_private *priv,
*/
#define LBS_ASSOC_MAX_CMD_SIZE \
(sizeof(struct cmd_ds_802_11_associate) \
- - 512 /* cmd_ds_802_11_associate.iebuf */ \
+ LBS_MAX_SSID_TLV_SIZE \
+ LBS_MAX_CHANNEL_TLV_SIZE \
+ LBS_MAX_CF_PARAM_TLV_SIZE \
@@ -1130,8 +1129,7 @@ static int lbs_associate(struct lbs_private *priv,
if (sme->ie && sme->ie_len)
pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
- len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
- (u16)(pos - (u8 *) &cmd->iebuf);
+ len = sizeof(*cmd) + (u16)(pos - (u8 *) &cmd->iebuf);
cmd->hdr.size = cpu_to_le16(len);
lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd,
@@ -1437,7 +1435,7 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
static int lbs_cfg_set_default_key(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -1457,8 +1455,8 @@ static int lbs_cfg_set_default_key(struct wiphy *wiphy,
static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct lbs_private *priv = wiphy_priv(wiphy);
u16 key_info;
@@ -1518,7 +1516,8 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c
index d8e4f29b690d..9f53308a9935 100644
--- a/drivers/net/wireless/marvell/libertas/ethtool.c
+++ b/drivers/net/wireless/marvell/libertas/ethtool.c
@@ -20,8 +20,8 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strlcpy(info->driver, "libertas", sizeof(info->driver));
- strlcpy(info->version, lbs_driver_version, sizeof(info->version));
+ strscpy(info->driver, "libertas", sizeof(info->driver));
+ strscpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index ceff4b92e7a1..a202b716ad5d 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -528,7 +528,8 @@ struct cmd_ds_802_11_associate {
__le16 listeninterval;
__le16 bcnperiod;
u8 dtimperiod;
- u8 iebuf[512]; /* Enough for required and most optional IEs */
+ /* 512 permitted - enough for required and most optional IEs */
+ u8 iebuf[];
} __packed;
struct cmd_ds_802_11_associate_response {
@@ -537,7 +538,8 @@ struct cmd_ds_802_11_associate_response {
__le16 capability;
__le16 statuscode;
__le16 aid;
- u8 iebuf[512];
+ /* max 512 */
+ u8 iebuf[];
} __packed;
struct cmd_ds_802_11_set_wep {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index cd9f8ecf171f..ff1c7ec8c450 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1195,7 +1195,7 @@ out:
return err;
}
-static int libertas_spi_remove(struct spi_device *spi)
+static void libertas_spi_remove(struct spi_device *spi)
{
struct if_spi_card *card = spi_get_drvdata(spi);
struct lbs_private *priv = card->priv;
@@ -1212,8 +1212,6 @@ static int libertas_spi_remove(struct spi_device *spi)
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
-
- return 0;
}
static int if_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 5d6dc1dd050d..32fdc4150b60 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf,
return 0;
err_get_fw:
+ usb_put_dev(udev);
lbs_remove_card(priv);
err_add_card:
if_usb_reset_device(cardp);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 5c9f295536ea..8f5220cee112 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -39,8 +39,7 @@ unsigned int lbs_debug;
EXPORT_SYMBOL_GPL(lbs_debug);
module_param_named(libertas_debug, lbs_debug, int, 0644);
-unsigned int lbs_disablemesh;
-EXPORT_SYMBOL_GPL(lbs_disablemesh);
+static unsigned int lbs_disablemesh;
module_param_named(libertas_disablemesh, lbs_disablemesh, int, 0644);
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index a58c1e141f2c..90ffe8d1e0e8 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -109,9 +109,9 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
if (priv->mesh_dev) {
mesh_wdev = priv->mesh_dev->ieee80211_ptr;
- ie->val.mesh_id_len = mesh_wdev->mesh_id_up_len;
- memcpy(ie->val.mesh_id, mesh_wdev->ssid,
- mesh_wdev->mesh_id_up_len);
+ ie->val.mesh_id_len = mesh_wdev->u.mesh.id_up_len;
+ memcpy(ie->val.mesh_id, mesh_wdev->u.mesh.id,
+ mesh_wdev->u.mesh.id_up_len);
}
ie->len = sizeof(struct mrvl_meshie_val) -
@@ -986,8 +986,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
mesh_wdev->wiphy = priv->wdev->wiphy;
if (priv->mesh_tlv) {
- sprintf(mesh_wdev->ssid, "mesh");
- mesh_wdev->mesh_id_up_len = 4;
+ sprintf(mesh_wdev->u.mesh.id, "mesh");
+ mesh_wdev->u.mesh.id_up_len = 4;
}
mesh_wdev->netdev = mesh_dev;
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index 9f24b0760e1f..c34d30f7cbe0 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
done:
@@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 02a1e1f547d8..74c4942b9a5a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -417,7 +417,7 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
@@ -427,7 +427,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
switch (priv->vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- beacon = ieee80211_beacon_get(hw, vif);
+ beacon = ieee80211_beacon_get(hw, vif, 0);
if (beacon) {
lbtf_beacon_set(priv, beacon);
kfree_skb(beacon);
@@ -691,7 +691,7 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
}
}
- skb = ieee80211_beacon_get(priv->hw, priv->vif);
+ skb = ieee80211_beacon_get(priv->hw, priv->vif, 0);
if (skb) {
lbtf_beacon_set(priv, skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c
index 756f019ef28a..b9278d996c56 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.c
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11ac
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h
index 29e83468cf3f..65a88d6d8b88 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.h
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11ac
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11AC_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index d2ee6469e67b..6a9d7bc1f41e 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11h
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
@@ -303,5 +291,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+ mutex_lock(&priv->wdev.mtx);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
+ mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 9ff2058bcd7e..4af57e6d4393 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 83a88eecbda6..94b5e3e4ba08 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 46f41dbcf30d..34b4b34276d6 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n Aggregation
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 382c1265c441..69b6888812f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n Aggregation
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_AGGR_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 1046b59647f5..a04b66284af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -347,7 +335,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
struct mwifiex_sta_node *node;
/*
- * If we get a TID, ta pair which is already present dispatch all the
+ * If we get a TID, ta pair which is already present dispatch all
* the packets and move the window size until the ssn
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 465f244b3636..c205a3bbc8b3 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_11N_RXREORDER_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile
index 2bd00f40958e..12d8affced18 100644
--- a/drivers/net/wireless/marvell/mwifiex/Makefile
+++ b/drivers/net/wireless/marvell/mwifiex/Makefile
@@ -1,18 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright 2011-2020 NXP
#
-# This software file (the "File") is distributed by NXP
-# under the terms of the GNU General Public License Version 2, June 1991
-# (the "License"). You may use, redistribute and/or modify this File in
-# accordance with the terms and conditions of the License, a copy of which
-# is available by writing to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
-# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
-#
-# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
-# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
-# this warranty disclaimer.
mwifiex-y += main.o
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6f23ec34e2e2..bcd564dc3554 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: CFG80211
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "cfg80211.h"
@@ -154,7 +142,8 @@ static void *mwifiex_cfg80211_get_adapter(struct wiphy *wiphy)
*/
static int
mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -250,7 +239,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
if (ieee80211_is_action(mgmt->frame_control))
skb = mwifiex_clone_skb_for_tx_status(priv,
@@ -314,7 +303,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
duration);
if (!ret) {
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
priv->roc_cfg.cookie = *cookie;
priv->roc_cfg.chan = *chan;
@@ -443,7 +432,7 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast,
+ int link_id, u8 key_index, bool unicast,
bool multicast)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
@@ -468,8 +457,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
*/
static int
mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
struct mwifiex_wep_key *wep_key;
@@ -506,6 +495,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
struct net_device *netdev,
+ int link_id,
u8 key_index)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
@@ -1753,10 +1743,12 @@ mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
* Function configures data rates to firmware using bitrate mask
* provided by cfg80211.
*/
-static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
- struct net_device *dev,
- const u8 *peer,
- const struct cfg80211_bitrate_mask *mask)
+static int
+mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
+ struct net_device *dev,
+ unsigned int link_id,
+ const u8 *peer,
+ const struct cfg80211_bitrate_mask *mask)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
@@ -1998,7 +1990,8 @@ mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
/* cfg80211 operation handler for stop ap.
* Function stops BSS running at uAP interface.
*/
-static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2421,7 +2414,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
}
- if (priv->wdev.current_bss) {
+ if (priv->wdev.connected) {
mwifiex_dbg(adapter, ERROR,
"%s: already connected\n", dev->name);
return -EALREADY;
@@ -2649,7 +2642,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
- if (!priv->wdev.current_bss && priv->scan_block)
+ if (!priv->wdev.connected && priv->scan_block)
priv->scan_block = false;
if (!mwifiex_stop_bg_scan(priv))
@@ -4025,6 +4018,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
struct cfg80211_chan_def *chandef)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
index 530a63f13f14..50f7001f5ef0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: CFG80211
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef __MWIFIEX_CFG80211__
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index fb91ecfc5546..d39092b99212 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: Channel, Frequence and Power
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d6a61f850c6f..d3339d67e7a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: commands and events
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index dded92db1f37..bda53cb91f37 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: debugfs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/debugfs.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 6bd23c9b1eed..88648c062713 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: generic data structures and APIs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_DECL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c
index 9bdad3f59039..17c6e7fedfc4 100644
--- a/drivers/net/wireless/marvell/mwifiex/ethtool.c
+++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: ethtool
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 63c25c69ed2b..b4f945a549f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: Firmware specific macros & structures
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_FW_H_
@@ -2116,7 +2104,7 @@ struct mwifiex_fw_mef_entry {
struct host_cmd_ds_mef_cfg {
__le32 criteria;
__le16 num_entries;
- struct mwifiex_fw_mef_entry mef_entry[];
+ u8 mef_entry_data[];
} __packed;
#define CONNECTION_TYPE_INFRA 0
@@ -2266,7 +2254,7 @@ struct coalesce_receive_filt_rule {
struct host_cmd_ds_coalesce_cfg {
__le16 action;
__le16 num_of_rules;
- struct coalesce_receive_filt_rule rule[];
+ u8 rule_data[];
} __packed;
struct host_cmd_ds_multi_chan_policy {
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 40e99eaf5a30..26694cee15d3 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 88c72d1827a0..7dddb4b5dea1 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: HW/FW Initialization
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -63,9 +51,10 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
-static void fw_dump_timer_fn(struct timer_list *t)
+static void fw_dump_work(struct work_struct *work)
{
- struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+ struct mwifiex_adapter *adapter =
+ container_of(work, struct mwifiex_adapter, devdump_work.work);
mwifiex_upload_device_dump(adapter);
}
@@ -321,7 +310,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
adapter->devdump_len = 0;
- timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
+ INIT_DELAYED_WORK(&adapter->devdump_work, fw_dump_work);
}
/*
@@ -400,7 +389,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 3db449efa167..091e7ca79376 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: ioctl data structures & APIs
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_IOCTL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 173ccf79cbfc..a6e254a1185c 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: association and ad-hoc start/join
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index ace7371c4773..da2e6557e684 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: major functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/suspend.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 332dd1c8db35..63f861e6b28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: major data structures and prototypes
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_MAIN_H_
@@ -49,6 +37,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "decl.h"
#include "ioctl.h"
@@ -1055,7 +1044,7 @@ struct mwifiex_adapter {
/* Device dump data/length */
void *devdump_data;
int devdump_len;
- struct timer_list devdump_timer;
+ struct delayed_work devdump_work;
bool ignore_btcoex_events;
};
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index d5fb29400bad..5dcf61761a16 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: PCIE specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/iopoll.h>
@@ -656,7 +644,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- int retval;
+ int retval __maybe_unused;
mwifiex_dbg(adapter, EVENT,
"event: Wakeup device...\n");
@@ -3373,7 +3361,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
} else {
mwifiex_dbg(adapter, INFO,
"%s(): calling free_irq()\n", __func__);
- free_irq(card->dev->irq, &card->share_irq_ctx);
+ free_irq(card->dev->irq, &card->share_irq_ctx);
if (card->msi_enable)
pci_disable_msi(pdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 981e330c77d7..de901b3b59ad 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* @file mwifiex_pcie.h
*
* @brief This file contains definitions for PCI-E interface.
* driver.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_PCIE_H
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
index 0234cf3c2974..dd6d21f1dbfd 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
@@ -1,19 +1,5 @@
-/*
- * NXP Wireless LAN device driver: PCIE and platform specific quirks
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// NXP Wireless LAN device driver: PCIE and platform specific quirks
#include <linux/dmi.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
index 8ec4176d698f..d6ff964aec5b 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
@@ -1,19 +1,5 @@
-/*
- * NXP Wireless LAN device driver: PCIE and platform specific quirks
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* NXP Wireless LAN device driver: PCIE and platform specific quirks */
#include "pcie.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0b877f3f6b97..ac8001c84293 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: scan ioctl and command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bde9e4bbfffe..b8dc3b5c9ad9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: SDIO specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/firmware.h>
@@ -182,6 +170,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
.host_int_rsr_reg = 0x4,
.host_int_status_reg = 0x0C,
.host_int_mask_reg = 0x08,
+ .host_strap_reg = 0xF4,
+ .host_strap_mask = 0x01,
+ .host_strap_value = 0x00,
.status_reg_0 = 0xE8,
.status_reg_1 = 0xE9,
.sdio_int_mask = 0xff,
@@ -283,6 +274,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
.host_int_rsr_reg = 0x4,
.host_int_status_reg = 0x0C,
.host_int_mask_reg = 0x08,
+ .host_strap_reg = 0xF4,
+ .host_strap_mask = 0x01,
+ .host_strap_value = 0x00,
.status_reg_0 = 0xE8,
.status_reg_1 = 0xE9,
.sdio_int_mask = 0xff,
@@ -402,6 +396,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
+ .firmware_sdiouart = SD8997_SDIOUART_FW_NAME,
.reg = &mwifiex_reg_sd8997,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
@@ -536,6 +531,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
struct mwifiex_sdio_device *data = (void *)id->driver_data;
card->firmware = data->firmware;
+ card->firmware_sdiouart = data->firmware_sdiouart;
card->reg = data->reg;
card->max_ports = data->max_ports;
card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
@@ -1541,7 +1537,7 @@ done:
/*
* This function decode sdio aggreation pkt.
*
- * Based on the the data block size and pkt_len,
+ * Based on the data block size and pkt_len,
* skb data will be decoded to few packets.
*/
static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
@@ -2439,6 +2435,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
+ const char *firmware = card->firmware;
/* save adapter pointer in card */
card->adapter = adapter;
@@ -2455,7 +2452,18 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return ret;
}
- strcpy(adapter->fw_name, card->firmware);
+ /* Select correct firmware (sdsd or sdiouart) firmware based on the strapping
+ * option
+ */
+ if (card->firmware_sdiouart) {
+ u8 val;
+
+ mwifiex_read_reg(adapter, card->reg->host_strap_reg, &val);
+ if ((val & card->reg->host_strap_mask) == card->reg->host_strap_value)
+ firmware = card->firmware_sdiouart;
+ }
+ strcpy(adapter->fw_name, firmware);
+
if (card->fw_dump_enh) {
adapter->mem_type_mapping_tbl = generic_mem_type_map;
adapter->num_mem_types = 1;
@@ -2639,7 +2647,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
/* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- ret = mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card);
sdio_release_host(func);
switch (ret) {
@@ -3157,3 +3165,4 @@ MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 5648512c9300..3a24bb48b299 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: SDIO specific definitions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_SDIO_H
@@ -39,6 +27,7 @@
#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
+#define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
@@ -196,6 +185,9 @@ struct mwifiex_sdio_card_reg {
u8 host_int_rsr_reg;
u8 host_int_status_reg;
u8 host_int_mask_reg;
+ u8 host_strap_reg;
+ u8 host_strap_mask;
+ u8 host_strap_value;
u8 status_reg_0;
u8 status_reg_1;
u8 sdio_int_mask;
@@ -241,6 +233,7 @@ struct sdio_mmc_card {
struct completion fw_done;
const char *firmware;
+ const char *firmware_sdiouart;
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
@@ -274,6 +267,7 @@ struct sdio_mmc_card {
struct mwifiex_sdio_device {
const char *firmware;
+ const char *firmware_sdiouart;
const struct mwifiex_sdio_card_reg *reg;
u8 max_ports;
u8 mp_agg_pkt_limit;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 1e2798dce18f..e2800a831c8e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -1447,7 +1435,7 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
mef_entry = (struct mwifiex_fw_mef_entry *)pos;
mef_entry->mode = mef->mef_entry[i].mode;
mef_entry->action = mef->mef_entry[i].action;
- pos += sizeof(*mef_cfg->mef_entry);
+ pos += sizeof(*mef_entry);
if (mwifiex_cmd_append_rpn_expression(priv,
&mef->mef_entry[i], &pos))
@@ -1643,7 +1631,7 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
coalesce_cfg->action = cpu_to_le16(cmd_action);
coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
- rule = coalesce_cfg->rule;
+ rule = (void *)coalesce_cfg->rule_data;
for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
@@ -1790,29 +1778,31 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
wmm_qos_info->qos_info = 0;
config_len += sizeof(struct mwifiex_ie_types_qos_info);
- if (params->ht_capa) {
+ if (params->link_sta_params.ht_capa) {
ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
config_len);
ht_capab->header.type =
cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_capab->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
- memcpy(&ht_capab->ht_cap, params->ht_capa,
+ memcpy(&ht_capab->ht_cap, params->link_sta_params.ht_capa,
sizeof(struct ieee80211_ht_cap));
config_len += sizeof(struct mwifiex_ie_types_htcap);
}
- if (params->supported_rates && params->supported_rates_len) {
+ if (params->link_sta_params.supported_rates &&
+ params->link_sta_params.supported_rates_len) {
tlv_rates = (struct host_cmd_tlv_rates *)(pos +
config_len);
tlv_rates->header.type =
cpu_to_le16(WLAN_EID_SUPP_RATES);
tlv_rates->header.len =
- cpu_to_le16(params->supported_rates_len);
- memcpy(tlv_rates->rates, params->supported_rates,
- params->supported_rates_len);
+ cpu_to_le16(params->link_sta_params.supported_rates_len);
+ memcpy(tlv_rates->rates,
+ params->link_sta_params.supported_rates,
+ params->link_sta_params.supported_rates_len);
config_len += sizeof(struct host_cmd_tlv_rates) +
- params->supported_rates_len;
+ params->link_sta_params.supported_rates_len;
}
if (params->ext_capab && params->ext_capab_len) {
@@ -1826,14 +1816,14 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
config_len += sizeof(struct mwifiex_ie_types_extcap) +
params->ext_capab_len;
}
- if (params->vht_capa) {
+ if (params->link_sta_params.vht_capa) {
vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
config_len);
vht_capab->header.type =
cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
vht_capab->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
- memcpy(&vht_capab->vht_cap, params->vht_capa,
+ memcpy(&vht_capab->vht_cap, params->link_sta_params.vht_capa,
sizeof(struct ieee80211_vht_cap));
config_len += sizeof(struct mwifiex_ie_types_vhtcap);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 1a4ae8a42a31..7b69d27e0c0e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station command response handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 7d42c5d2dbf6..df9cdd10a494 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station event handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -623,8 +611,8 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
* transmission event get lost, in this cornel case,
* user would still get partial of the dump.
*/
- mod_timer(&adapter->devdump_timer,
- jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ schedule_delayed_work(&adapter->devdump_work,
+ msecs_to_jiffies(MWIFIEX_TIMER_10S));
}
/* Overflow check */
@@ -635,7 +623,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
adapter->event_skb->data, event_skb->len);
adapter->devdump_len += event_skb->len;
- if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+ if (le16_to_cpu(fw_dump_hdr->type) == FW_DUMP_INFO_ENDED) {
mwifiex_dbg(adapter, MSG,
"receive end of transmission flag event!\n");
goto upload_dump;
@@ -643,7 +631,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
return;
upload_dump:
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_upload_device_dump(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 4062e515697a..a2ad2b53f016 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: functions for station ioctl
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 0d2adf887900..13659b02ba88 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <uapi/linux/ipv6.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index a9b5eb992220..13c0e67ededf 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: station TX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index a8479b879382..54c204608dab 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: generic TX/RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18e89777b784..e78a201cd150 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP specific command handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
@@ -389,7 +377,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
{
const u8 *vendor_ie;
const u8 *wmm_ie;
- u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
+ static const u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 2e25d72dcac5..58ef5020a46a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP event handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 245ff644f81e..e495f7eaea03 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: AP TX and RX data handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -350,7 +338,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 8f01fcbe9396..d3ab9572e711 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: USB specific handling
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "main.h"
@@ -923,14 +911,14 @@ static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter,
memcpy(payload, skb_tmp->data, skb_tmp->len);
if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
/* do not padding for last packet*/
- *(u16 *)payload = cpu_to_le16(skb_tmp->len);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
skb_trim(skb_aggr, skb_aggr->len - pad);
} else {
/* add aggregation interface header */
- *(u16 *)payload = cpu_to_le16(skb_tmp->len + pad);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len + pad);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2);
}
@@ -1109,9 +1097,9 @@ send_aggr_buf:
}
payload = skb->data;
- *(u16 *)&payload[2] =
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
- *(u16 *)payload = cpu_to_le16(skb->len);
+ *(__le16 *)payload = cpu_to_le16(skb->len);
skb_send = skb;
context = &port->tx_data_list[port->tx_data_ix++];
return mwifiex_usb_construct_send_urb(adapter, port, ep,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 61a96b7fbf21..7e920b51994c 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This file contains definitions for mwifiex USB interface driver.
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_USB_H
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index d583fa600a29..94c2d219835d 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: utility functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
@@ -488,7 +476,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index 44aa80eb7827..4699c505c0a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: utility functions
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_UTIL_H_
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 0b375608df7d..00a5679b5c51 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NXP Wireless LAN device driver: WMM
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include "decl.h"
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 1cb3d1804758..4f53a271dae0 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NXP Wireless LAN device driver: WMM
*
* Copyright 2011-2020 NXP
- *
- * This software file (the "File") is distributed by NXP
- * under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available by writing to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
- * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#ifndef _MWIFIEX_WMM_H_
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 864a2ba9efee..4dc7e2e53b81 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1880,7 +1880,7 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
* packets ever exceeds the ampdu_min_traffic threshold, we will allow
* an ampdu stream to be started.
*/
- if (jiffies - tx_stats->start_time > HZ) {
+ if (time_after(jiffies, (unsigned long)tx_stats->start_time + HZ)) {
tx_stats->pkts = 0;
tx_stats->start_time = 0;
} else
@@ -1985,7 +1985,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
txpriority = index;
- if (priv->ap_fw && sta && sta->ht_cap.ht_supported && !eapol_frame &&
+ if (priv->ap_fw && sta && sta->deflink.ht_cap.ht_supported && !eapol_frame &&
ieee80211_is_data_qos(wh->frame_control)) {
tid = qos & 0xf;
mwl8k_tx_count_packet(sta, tid);
@@ -3250,7 +3250,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ cmd->aid = cpu_to_le16(vif->cfg.aid);
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
if (vif->bss_conf.use_cts_prot) {
@@ -4027,9 +4027,9 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
cmd->create_params.reset_seq_no_flag = 1;
cmd->create_params.param_info =
- (stream->sta->ht_cap.ampdu_factor &
+ (stream->sta->deflink.ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
- ((stream->sta->ht_cap.ampdu_density << 2) &
+ ((stream->sta->deflink.ht_cap.ampdu_density << 2) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
cmd->create_params.flags =
@@ -4113,18 +4113,18 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
cmd->stn_id = cpu_to_le16(sta->aid);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
- rates = sta->supp_rates[NL80211_BAND_2GHZ];
+ rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
+ rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
cmd->legacy_rates = cpu_to_le32(rates);
- if (sta->ht_cap.ht_supported) {
- cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
- cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
- cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
- cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
- cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
- cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
- ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (sta->deflink.ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->deflink.ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->deflink.ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->deflink.ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->deflink.ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->deflink.ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->deflink.ht_cap.ampdu_factor & 3) |
+ ((sta->deflink.ht_cap.ampdu_density & 7) << 2);
cmd->is_qos_sta = 1;
}
@@ -4545,16 +4545,16 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p = &cmd->peer_info;
p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
- p->ht_support = sta->ht_cap.ht_supported;
- p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
- p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
- ((sta->ht_cap.ampdu_density & 7) << 2);
+ p->ht_support = sta->deflink.ht_cap.ht_supported;
+ p->ht_caps = cpu_to_le16(sta->deflink.ht_cap.cap);
+ p->extended_ht_caps = (sta->deflink.ht_cap.ampdu_factor & 3) |
+ ((sta->deflink.ht_cap.ampdu_density & 7) << 2);
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
- rates = sta->supp_rates[NL80211_BAND_2GHZ];
+ rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
+ rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
- memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
+ memcpy(p->ht_rates, &sta->deflink.ht_cap.mcs, 16);
p->interop = 1;
p->amsdu_enabled = 0;
@@ -5013,13 +5013,13 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/*
* No need to capture a beacon if we're no longer associated.
*/
- if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->cfg.assoc)
priv->capture_beacon = false;
/*
* Get the AP's legacy and MCS rates.
*/
- if (vif->bss_conf.assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *ap;
rcu_read_lock();
@@ -5031,12 +5031,12 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
- ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
+ ap_legacy_rates = ap->deflink.supp_rates[NL80211_BAND_2GHZ];
} else {
ap_legacy_rates =
- ap->supp_rates[NL80211_BAND_5GHZ] << 5;
+ ap->deflink.supp_rates[NL80211_BAND_5GHZ] << 5;
}
- memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
+ memcpy(ap_mcs_rates, &ap->deflink.ht_cap.mcs, 16);
rcu_read_unlock();
@@ -5085,7 +5085,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (vif->bss_conf.assoc && !priv->ap_fw &&
+ if (vif->cfg.assoc && !priv->ap_fw &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT))) {
rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
@@ -5093,7 +5093,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (vif->bss_conf.assoc &&
+ if (vif->cfg.assoc &&
(changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
@@ -5147,7 +5147,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
struct sk_buff *skb;
- skb = ieee80211_beacon_get(hw, vif);
+ skb = ieee80211_beacon_get(hw, vif, 0);
if (skb != NULL) {
mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
kfree_skb(skb);
@@ -5163,7 +5163,7 @@ out:
static void
mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
if (vif->type == NL80211_IFTYPE_STATION)
mwl8k_bss_info_changed_sta(hw, vif, info, changed);
@@ -5347,7 +5347,7 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
MWL8K_STA(sta)->is_ampdu_allowed = true;
ret = 0;
}
@@ -5365,7 +5365,8 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mwl8k_priv *priv = hw->priv;
@@ -6050,7 +6051,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
goto fail;
for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
- rc = mwl8k_conf_tx(hw, NULL, i, &priv->wmm_params[i]);
+ rc = mwl8k_conf_tx(hw, NULL, 0, i, &priv->wmm_params[i]);
if (rc)
goto fail;
}
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 72622220051b..10cbd9e560e7 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -162,15 +162,15 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (!sta)
return;
- if (!status->aggr && !(status->flag & RX_FLAG_8023)) {
- mt76_rx_aggr_check_ctl(skb, frames);
+ if (!status->aggr) {
+ if (!(status->flag & RX_FLAG_8023))
+ mt76_rx_aggr_check_ctl(skb, frames);
return;
}
/* not part of a BA session */
ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
- if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
- ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
tid = rcu_dereference(wcid->aggr[tidno]);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3a9af8931c35..7378c4d1e156 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -7,6 +7,37 @@
#include "mt76.h"
#include "dma.h"
+#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
+
+#define Q_READ(_dev, _q, _field) ({ \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ u32 _val; \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
+ ((_q)->wed_regs + \
+ _offset)); \
+ else \
+ _val = readl(&(_q)->regs->_field); \
+ _val; \
+})
+
+#define Q_WRITE(_dev, _q, _field, _val) do { \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
+ ((_q)->wed_regs + _offset), \
+ _val); \
+ else \
+ writel(_val, &(_q)->regs->_field); \
+} while (0)
+
+#else
+
+#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
+#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
+
+#endif
+
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
@@ -16,11 +47,11 @@ mt76_alloc_txwi(struct mt76_dev *dev)
int size;
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
- txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ txwi = kzalloc(size, GFP_ATOMIC);
if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
@@ -73,18 +104,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
struct mt76_txwi_cache *t;
local_bh_disable();
- while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ while ((t = __mt76_get_txwi(dev)) != NULL) {
+ dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ kfree(mt76_get_txwi_ptr(dev, t));
+ }
local_bh_enable();
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- writel(q->desc_dma, &q->regs->desc_base);
- writel(q->ndesc, &q->regs->ring_size);
- q->head = readl(&q->regs->dma_idx);
+ Q_WRITE(dev, q, desc_base, q->desc_dma);
+ Q_WRITE(dev, q, ring_size, q->ndesc);
+ q->head = Q_READ(dev, q, dma_idx);
q->tail = q->head;
}
@@ -93,49 +126,19 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
- if (!q)
+ if (!q || !q->ndesc)
return;
/* clear descriptors */
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- writel(0, &q->regs->cpu_idx);
- writel(0, &q->regs->dma_idx);
+ Q_WRITE(dev, q, cpu_idx, 0);
+ Q_WRITE(dev, q, dma_idx, 0);
mt76_dma_sync_idx(dev, q);
}
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
- int idx, int n_desc, int bufsize,
- u32 ring_base)
-{
- int size;
-
- spin_lock_init(&q->lock);
- spin_lock_init(&q->cleanup_lock);
-
- q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
- q->hw_idx = idx;
-
- size = q->ndesc * sizeof(struct mt76_desc);
- q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
- if (!q->desc)
- return -ENOMEM;
-
- size = q->ndesc * sizeof(*q->entry);
- q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
- if (!q->entry)
- return -ENOMEM;
-
- mt76_dma_queue_reset(dev, q);
-
- return 0;
-}
-
-static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
@@ -203,11 +206,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0)
- dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
if (!e->skip_buf1)
- dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA)
@@ -224,7 +227,7 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- writel(q->head, &q->regs->cpu_idx);
+ Q_WRITE(dev, q, cpu_idx, q->head);
}
static void
@@ -233,14 +236,14 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
struct mt76_queue_entry entry;
int last;
- if (!q)
+ if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
if (flush)
last = -1;
else
- last = readl(&q->regs->dma_idx);
+ last = Q_READ(dev, q, dma_idx);
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
@@ -252,8 +255,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
if (!flush && q->tail == last)
- last = readl(&q->regs->dma_idx);
-
+ last = Q_READ(dev, q, dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
@@ -288,7 +290,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
- dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
@@ -325,9 +327,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + 1 >= q->ndesc - 1)
goto error;
- addr = dma_map_single(dev->dev, skb->data, skb->len,
+ addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error;
buf.addr = addr;
@@ -347,8 +349,8 @@ error:
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct ieee80211_tx_status status = {
.sta = sta,
@@ -374,8 +376,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
@@ -387,9 +389,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
- addr = dma_map_single(dev->dev, iter->data, iter->len,
+ addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
@@ -402,10 +404,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap;
}
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
@@ -415,7 +417,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
unmap:
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
@@ -448,6 +450,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
+ if (!q->ndesc)
+ return 0;
+
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
@@ -457,14 +462,15 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
- addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr))) {
+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf);
break;
}
qbuf.addr = addr + offset;
qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@@ -477,6 +483,85 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
return frames;
}
+static int
+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mmio.wed;
+ int ret, type, ring;
+ u8 flags = q->flags;
+
+ if (!mtk_wed_device_active(wed))
+ q->flags &= ~MT_QFLAG_WED;
+
+ if (!(q->flags & MT_QFLAG_WED))
+ return 0;
+
+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+ ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+
+ switch (type) {
+ case MT76_WED_Q_TX:
+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
+ if (!ret)
+ q->wed_regs = wed->tx_ring[ring].reg_base;
+ break;
+ case MT76_WED_Q_TXFREE:
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q);
+ q->flags = flags;
+
+ ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
+ if (!ret)
+ q->wed_regs = wed->txfree_ring.reg_base;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
+{
+ int ret, size;
+
+ spin_lock_init(&q->lock);
+ spin_lock_init(&q->cleanup_lock);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
+
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ size = q->ndesc * sizeof(*q->entry);
+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ ret = mt76_dma_wed_setup(dev, q);
+ if (ret)
+ return ret;
+
+ if (q->flags != MT_WED_Q_TXFREE)
+ mt76_dma_queue_reset(dev, q);
+
+ return 0;
+}
+
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
@@ -484,6 +569,9 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
void *buf;
bool more;
+ if (!q->ndesc)
+ return;
+
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
@@ -508,6 +596,9 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i;
+ if (!q->ndesc)
+ return;
+
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
@@ -552,14 +643,29 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
- int len, data_len, done = 0;
+ int len, data_len, done = 0, dma_idx;
struct sk_buff *skb;
unsigned char *data;
+ bool check_ddone = false;
bool more;
+ if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+ q->flags == MT_WED_Q_TXFREE) {
+ dma_idx = Q_READ(dev, q, dma_idx);
+ check_ddone = true;
+ }
+
while (done < budget) {
u32 info;
+ if (check_ddone) {
+ if (q->tail == dma_idx)
+ dma_idx = Q_READ(dev, q, dma_idx);
+
+ if (q->tail == dma_idx)
+ break;
+ }
+
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
@@ -590,10 +696,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_reserve(skb, q->buf_offset);
- if (q == &dev->q_rx[MT_RXQ_MCU]) {
- u32 *rxfce = (u32 *)skb->cb;
- *rxfce = info;
- }
+ *(u32 *)skb->cb = info;
__skb_put(skb, len);
done++;
@@ -652,7 +755,7 @@ mt76_dma_init(struct mt76_dev *dev,
dev->napi_dev.threaded = 1;
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}
@@ -685,10 +788,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
- for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
- mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
- if (dev->phy2)
- mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ struct mt76_phy *phy = dev->phys[i];
+ int j;
+
+ if (!phy)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
+ mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
}
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
@@ -700,5 +808,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
}
mt76_free_pending_txwi(dev);
+
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ mtk_wed_device_detach(&dev->mmio.wed);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index a499861918fa..9bc8758573fc 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -162,10 +162,13 @@ mt76_find_power_limits_node(struct mt76_dev *dev)
}
if (mt76_string_prop_find(country, dev->alpha2) ||
- mt76_string_prop_find(regd, region_name))
+ mt76_string_prop_find(regd, region_name)) {
+ of_node_put(np);
return cur;
+ }
}
+ of_node_put(np);
return fallback;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8bb1c7ab5b50..6de13d641438 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -178,9 +178,15 @@ static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
{ .start_freq = 5350, .end_freq = 5470, },
{ .start_freq = 5470, .end_freq = 5725, },
{ .start_freq = 5725, .end_freq = 5950, },
+ { .start_freq = 5945, .end_freq = 6165, },
+ { .start_freq = 6165, .end_freq = 6405, },
+ { .start_freq = 6405, .end_freq = 6525, },
+ { .start_freq = 6525, .end_freq = 6705, },
+ { .start_freq = 6705, .end_freq = 6865, },
+ { .start_freq = 6865, .end_freq = 7125, },
};
-const struct cfg80211_sar_capa mt76_sar_capa = {
+static const struct cfg80211_sar_capa mt76_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
@@ -210,6 +216,7 @@ static int mt76_led_init(struct mt76_dev *dev)
if (!of_property_read_u32(np, "led-sources", &led_pin))
dev->led_pin = led_pin;
dev->led_al = of_property_read_bool(np, "led-active-low");
+ of_node_put(np);
}
return led_classdev_register(dev->dev, &dev->led_cdev);
@@ -248,6 +255,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+ vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
for (i = 0; i < 8; i++) {
if (i < nstream)
@@ -258,6 +267,9 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
@@ -323,8 +335,6 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
@@ -444,7 +454,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
struct mt76_phy *
mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
- const struct ieee80211_ops *ops)
+ const struct ieee80211_ops *ops, u8 band_idx)
{
struct ieee80211_hw *hw;
unsigned int phy_size;
@@ -459,6 +469,7 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
phy->dev = dev;
phy->hw = hw;
phy->priv = hw->priv + phy_size;
+ phy->band_idx = band_idx;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->interface_modes =
@@ -511,7 +522,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
if (ret)
return ret;
- phy->dev->phy2 = phy;
+ phy->dev->phys[phy->band_idx] = phy;
return 0;
}
@@ -523,7 +534,7 @@ void mt76_unregister_phy(struct mt76_phy *phy)
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(phy->hw);
- dev->phy2 = NULL;
+ dev->phys[phy->band_idx] = NULL;
}
EXPORT_SYMBOL_GPL(mt76_unregister_phy);
@@ -545,10 +556,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->hw = hw;
dev->dev = pdev;
dev->drv = drv_ops;
+ dev->dma_dev = pdev;
phy = &dev->phy;
phy->dev = dev;
phy->hw = hw;
+ phy->band_idx = MT_BAND0;
+ dev->phys[phy->band_idx] = phy;
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
@@ -579,6 +593,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
+ dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@@ -729,7 +744,7 @@ static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
dev_kfree_skb(skb);
@@ -823,6 +838,10 @@ void mt76_set_channel(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
+ if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
+ phy->chandef.width != chandef->width)
+ phy->dfs_state = MT_DFS_STATE_UNKNOWN;
+
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -928,6 +947,36 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
+static int
+mt76_rx_signal(struct mt76_rx_status *status)
+{
+ s8 *chain_signal = status->chain_signal;
+ int signal = -128;
+ u8 chains;
+
+ for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
+ int cur, diff;
+
+ cur = *chain_signal;
+ if (!(chains & BIT(0)) ||
+ cur > 0)
+ continue;
+
+ if (cur > signal)
+ swap(cur, signal);
+
+ diff = signal - cur;
+ if (diff == 0)
+ signal += 3;
+ else if (diff <= 2)
+ signal += 2;
+ else if (diff <= 6)
+ signal += 1;
+ }
+
+ return signal;
+}
+
static void
mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_hw **hw,
@@ -956,6 +1005,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->ampdu_reference = mstat.ampdu_ref;
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
+ status->signal = mt76_rx_signal(&mstat);
+ if (status->signal <= -128)
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control))
@@ -968,10 +1020,10 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(mstat.chain_signal));
*sta = wcid_to_sta(mstat.wcid);
- *hw = mt76_phy_hw(dev, mstat.ext_phy);
+ *hw = mt76_phy_hw(dev, mstat.phy_idx);
}
-static int
+static void
mt76_check_ccmp_pn(struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -981,10 +1033,13 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
int ret;
if (!(status->flag & RX_FLAG_DECRYPTED))
- return 0;
+ return;
+
+ if (status->flag & RX_FLAG_ONLY_MONITOR)
+ return;
if (!wcid || !wcid->rx_check_pn)
- return 0;
+ return;
security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
if (status->flag & RX_FLAG_8023)
@@ -998,7 +1053,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
*/
if (ieee80211_is_frag(hdr) &&
!ieee80211_is_first_frag(hdr->frame_control))
- return 0;
+ return;
}
/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
@@ -1015,15 +1070,15 @@ skip_hdr_check:
BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
sizeof(status->iv));
- if (ret <= 0)
- return -EINVAL; /* replay */
+ if (ret <= 0) {
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+ return;
+ }
memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
if (status->flag & RX_FLAG_IV_STRIPPED)
status->flag |= RX_FLAG_PN_VALIDATED;
-
- return 0;
}
static void
@@ -1128,7 +1183,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
bool ps;
- hw = mt76_phy_hw(dev, status->ext_phy);
+ hw = mt76_phy_hw(dev, status->phy_idx);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
!(status->flag & RX_FLAG_8023)) {
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
@@ -1196,11 +1251,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
while ((skb = __skb_dequeue(frames)) != NULL) {
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
- if (mt76_check_ccmp_pn(skb)) {
- dev_kfree_skb(skb);
- continue;
- }
-
+ mt76_check_ccmp_pn(skb);
skb_shinfo(skb)->frag_list = NULL;
mt76_rx_convert(dev, skb, &hw, &sta);
ieee80211_rx_list(hw, sta, skb, &list);
@@ -1246,10 +1297,11 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
-mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool ext_phy)
+mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct mt76_dev *dev = phy->dev;
int ret;
int i;
@@ -1266,13 +1318,13 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
continue;
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
- mtxq->wcid = wcid;
+ mtxq->wcid = wcid->idx;
}
ewma_signal_init(&wcid->rssi);
- if (ext_phy)
+ if (phy->band_idx == MT_BAND1)
mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
- wcid->ext_phy = ext_phy;
+ wcid->phy_idx = phy->band_idx;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
mt76_packet_id_init(wcid);
@@ -1317,11 +1369,10 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
- return mt76_sta_add(dev, vif, sta, ext_phy);
+ return mt76_sta_add(phy, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
@@ -1344,7 +1395,9 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
mutex_lock(&dev->mutex);
+ spin_lock_bh(&dev->status_lock);
rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ spin_unlock_bh(&dev->status_lock);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
@@ -1418,7 +1471,7 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1440,7 +1493,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76_dev *dev = priv;
- if (!vif->csa_active)
+ if (!vif->bss_conf.csa_active)
return;
dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
@@ -1541,7 +1594,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base)
+ int ring_base, u32 flags)
{
struct mt76_queue *hwq;
int err;
@@ -1550,6 +1603,8 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
if (!hwq)
return ERR_PTR(-ENOMEM);
+ hwq->flags = flags;
+
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0)
return ERR_PTR(err);
@@ -1604,3 +1659,27 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->region == NL80211_DFS_UNSET ||
+ test_bit(MT76_SCANNING, &phy->state))
+ return MT_DFS_STATE_DISABLED;
+
+ if (!hw->conf.radar_enabled) {
+ if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
+ (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return MT_DFS_STATE_ACTIVE;
+
+ return MT_DFS_STATE_DISABLED;
+ }
+
+ if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
+ return MT_DFS_STATE_CAC;
+
+ return MT_DFS_STATE_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 3f94c37251df..a8cafa39a56d 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -6,18 +6,20 @@
#include "mt76.h"
struct sk_buff *
-mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len)
+__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int len, int data_len, gfp_t gfp)
{
const struct mt76_mcu_ops *ops = dev->mcu_ops;
- int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
- skb = alloc_skb(length, GFP_KERNEL);
+ len = max_t(int, len, data_len);
+ len = ops->headroom + len + ops->tailroom;
+
+ skb = alloc_skb(len, gfp);
if (!skb)
return NULL;
- memset(skb->head, 0, length);
+ memset(skb->head, 0, len);
skb_reserve(skb, ops->headroom);
if (data && data_len)
@@ -25,7 +27,7 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
+EXPORT_SYMBOL_GPL(__mt76_mcu_msg_alloc);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 26353b6bce97..86e3d2ac4d0d 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -73,8 +73,13 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
- if (addr)
- mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ if (addr) {
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ mtk_wed_device_irq_set_mask(&dev->mmio.wed,
+ dev->mmio.irqmask);
+ else
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ }
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 404c3d1a70d6..87db9498dea4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -13,19 +13,30 @@
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
+#include <linux/soc/mediatek/mtk_wed.h>
#include <net/mac80211.h>
#include "util.h"
#include "testmode.h"
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
-#define MT_SKB_HEAD_LEN 128
+#define MT_SKB_HEAD_LEN 256
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
#define MT76_TOKEN_FREE_THR 64
+#define MT_QFLAG_WED_RING GENMASK(1, 0)
+#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
+#define MT_QFLAG_WED BIT(4)
+
+#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
+ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
+ FIELD_PREP(MT_QFLAG_WED_RING, _n))
+#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
+#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
+
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
@@ -42,6 +53,11 @@ enum mt76_bus_type {
MT76_BUS_SDIO,
};
+enum mt76_wed_type {
+ MT76_WED_Q_TX,
+ MT76_WED_Q_TXFREE,
+};
+
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@@ -83,11 +99,21 @@ enum mt76_rxq_id {
MT_RXQ_MAIN,
MT_RXQ_MCU,
MT_RXQ_MCU_WA,
- MT_RXQ_EXT,
- MT_RXQ_EXT_WA,
+ MT_RXQ_BAND1,
+ MT_RXQ_BAND1_WA,
+ MT_RXQ_MAIN_WA,
+ MT_RXQ_BAND2,
+ MT_RXQ_BAND2_WA,
__MT_RXQ_MAX
};
+enum mt76_band_id {
+ MT_BAND0,
+ MT_BAND1,
+ MT_BAND2,
+ __MT_MAX_BAND
+};
+
enum mt76_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
@@ -104,6 +130,13 @@ enum mt76_cipher_type {
MT_CIPHER_GCMP_256,
};
+enum mt76_dfs_state {
+ MT_DFS_STATE_UNKNOWN,
+ MT_DFS_STATE_DISABLED,
+ MT_DFS_STATE_CAC,
+ MT_DFS_STATE_ACTIVE,
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@@ -161,7 +194,9 @@ struct mt76_queue {
u8 buf_offset;
u8 hw_idx;
- u8 qid;
+ u8 flags;
+
+ u32 wed_regs;
dma_addr_t desc_dma;
struct sk_buff *rx_head;
@@ -196,8 +231,8 @@ struct mt76_queue_ops {
u32 ring_base);
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta);
int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info);
@@ -217,6 +252,30 @@ struct mt76_queue_ops {
void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
};
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+ MT_PHY_TYPE_HE_SU = 8,
+ MT_PHY_TYPE_HE_EXT_SU,
+ MT_PHY_TYPE_HE_TB,
+ MT_PHY_TYPE_HE_MU,
+ __MT_PHY_TYPE_HE_MAX,
+};
+
+struct mt76_sta_stats {
+ u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
+ u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_nss[4]; /* 1, 2, 3, 4 */
+ u64 tx_mcs[16]; /* mcs idx */
+ u64 tx_bytes;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+};
+
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
@@ -224,10 +283,10 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 288
+#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
-#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
DECLARE_EWMA(signal, 10, 8);
@@ -252,8 +311,8 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
- u8 ext_phy:1;
u8 amsdu:1;
+ u8 phy_idx:2;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
@@ -264,10 +323,12 @@ struct mt76_wcid {
struct list_head list;
struct idr pktid;
+
+ struct mt76_sta_stats stats;
};
struct mt76_txq {
- struct mt76_wcid *wcid;
+ u16 wcid;
u16 agg_ssn;
bool send_bar;
@@ -307,7 +368,8 @@ struct mt76_rx_tid {
#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
-#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_WED 2
+#define MT_PACKET_ID_FIRST 3
#define MT_PACKET_ID_HAS_RATE BIT(7)
/* This is timer for when to give up when waiting for TXS callback,
* with starting time being the time at which the DMA_DONE callback
@@ -492,11 +554,10 @@ struct mt76_usb {
struct mt76_reg_pair *rp;
int rp_len;
u32 base;
- bool burst;
} mcu;
};
-#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+#define MT76S_XMIT_BUF_SZ 0x3fe00
#define MT76S_NUM_TX_ENTRIES 256
#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
@@ -506,7 +567,8 @@ struct mt76_sdio {
struct work_struct stat_work;
- u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
+ u8 *xmit_buf;
+ u32 xmit_buf_sz;
struct sdio_func *func;
void *intr_data;
@@ -528,6 +590,8 @@ struct mt76_mmio {
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
+
+ struct mtk_wed_device wed;
};
struct mt76_rx_status {
@@ -543,7 +607,7 @@ struct mt76_rx_status {
u8 iv[6];
- u8 ext_phy:1;
+ u8 phy_idx:2;
u8 aggr:1;
u8 qos_ctl;
u16 seqno;
@@ -621,6 +685,7 @@ struct mt76_vif {
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
+ u8 cipher;
};
struct mt76_phy {
@@ -629,6 +694,7 @@ struct mt76_phy {
void *priv;
unsigned long state;
+ u8 band_idx;
struct mt76_queue *q_tx[__MT_TXQ_MAX];
@@ -636,6 +702,7 @@ struct mt76_phy {
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ enum mt76_dfs_state dfs_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
@@ -667,8 +734,7 @@ struct mt76_phy {
struct mt76_dev {
struct mt76_phy phy; /* must be first */
-
- struct mt76_phy *phy2;
+ struct mt76_phy *phys[__MT_MAX_BAND];
struct ieee80211_hw *hw;
@@ -687,6 +753,7 @@ struct mt76_dev {
const struct mt76_driver_ops *drv;
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct device *dma_dev;
struct mt76_mcu mcu;
@@ -707,7 +774,9 @@ struct mt76_dev {
spinlock_t token_lock;
struct idr token;
- int token_count;
+ u16 wed_token_count;
+ u16 token_count;
+ u16 token_size;
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
@@ -716,7 +785,7 @@ struct mt76_dev {
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
- u32 vif_mask;
+ u64 vif_mask;
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
@@ -772,26 +841,6 @@ struct mt76_power_limits {
s8 ru[7][12];
};
-enum mt76_phy_type {
- MT_PHY_TYPE_CCK,
- MT_PHY_TYPE_OFDM,
- MT_PHY_TYPE_HT,
- MT_PHY_TYPE_HT_GF,
- MT_PHY_TYPE_VHT,
- MT_PHY_TYPE_HE_SU = 8,
- MT_PHY_TYPE_HE_EXT_SU,
- MT_PHY_TYPE_HE_TB,
- MT_PHY_TYPE_HE_MU,
- __MT_PHY_TYPE_HE_MAX,
-};
-
-struct mt76_sta_stats {
- u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
- u64 tx_bw[4]; /* 20, 40, 80, 160 */
- u64 tx_nss[4]; /* 1, 2, 3, 4 */
- u64 tx_mcs[16]; /* mcs idx */
-};
-
struct mt76_ethtool_worker_info {
u64 *data;
int idx;
@@ -850,16 +899,6 @@ extern struct ieee80211_rate mt76_rates[12];
#define mt76_hw(dev) (dev)->mphy.hw
-static inline struct ieee80211_hw *
-mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
-{
- if (wcid <= MT76_N_WCIDS &&
- mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
- return dev->phy2->hw;
-
- return dev->phy.hw;
-}
-
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
@@ -897,8 +936,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
- for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
- (dev)->q_rx[i].ndesc; i++)
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
+ if ((dev)->q_rx[i].ndesc)
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@@ -910,7 +949,8 @@ void mt76_free_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);
struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
- const struct ieee80211_ops *ops);
+ const struct ieee80211_ops *ops,
+ u8 band_idx);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
@@ -931,18 +971,17 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base);
+ int ring_base, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
- int n_desc, int ring_base)
+ int n_desc, int ring_base, u32 flags)
{
struct mt76_queue *q;
- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = qid;
phy->q_tx[qid] = q;
return 0;
@@ -953,28 +992,29 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
{
struct mt76_queue *q;
- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = __MT_TXQ_MAX + qid;
dev->q_mcu[qid] = q;
return 0;
}
static inline struct mt76_phy *
-mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx)
{
- if (phy_ext && dev->phy2)
- return dev->phy2;
+ if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) ||
+ (phy_idx == MT_BAND2 && dev->phys[phy_idx]))
+ return dev->phys[phy_idx];
+
return &dev->phy;
}
static inline struct ieee80211_hw *
-mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx)
{
- return mt76_dev_phy(dev, phy_ext)->hw;
+ return mt76_dev_phy(dev, phy_idx)->hw;
}
static inline u8 *
@@ -1085,13 +1125,17 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
struct ieee80211_hw **hw)
{
#ifdef CONFIG_NL80211_TESTMODE
- if (skb == dev->phy.test.tx_skb)
- *hw = dev->phy.hw;
- else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
- *hw = dev->phy2->hw;
- else
- return false;
- return true;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ struct mt76_phy *phy = dev->phys[i];
+
+ if (phy && skb == phy->test.tx_skb) {
+ *hw = dev->phys[i]->hw;
+ return true;
+ }
+ }
+ return false;
#else
return false;
#endif
@@ -1181,6 +1225,7 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1206,12 +1251,10 @@ static inline struct ieee80211_hw *
mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hw *hw = dev->phy.hw;
-
- if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
- hw = dev->phy2->hw;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx);
- info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+ info->hw_queue &= ~MT_TX_HW_QUEUE_PHY;
return hw;
}
@@ -1262,13 +1305,21 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
struct mt76_sta_stats *stats);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
- bool ext);
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len);
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val);
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
@@ -1301,8 +1352,15 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int len);
struct sk_buff *
+__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int len, int data_len, gfp_t gfp);
+static inline struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len);
+ int data_len)
+{
+ return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL);
+}
+
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
@@ -1360,8 +1418,7 @@ mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
int token;
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 5d4522f440b7..b65b0a88c1de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -20,12 +20,12 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!(mdev->beacon_mask & BIT(mvif->idx)))
return;
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif, 0);
if (!skb)
return;
- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb,
- &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
@@ -82,12 +82,12 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
__skb_queue_head_init(&data.q);
q = dev->mphy.q_tx[MT_TXQ_BEACON];
- spin_lock_bh(&q->lock);
+ spin_lock(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7603_update_beacon_iter, dev);
mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
+ spin_unlock(&q->lock);
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
@@ -117,16 +117,16 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_skb_set_moredata(data.tail[i], false);
}
- spin_lock_bh(&q->lock);
+ spin_lock(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, q, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
+ spin_unlock(&q->lock);
for (i = 0; i < ARRAY_SIZE(data.count); i++)
mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 415ea17b9be6..f9e5857850e7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -76,7 +76,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
if (q == MT_RXQ_MCU) {
if (type == PKT_TYPE_RX_EVENT)
@@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
- MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -223,8 +223,8 @@ int mt7603_dma_init(struct mt7603_dev *dev)
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7603_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7603_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index a272d64808c3..49a511ae8161 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -326,19 +326,21 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
addr = mt7603_wtbl1_addr(idx);
- ampdu_density = sta->ht_cap.ampdu_density;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
val = mt76_rr(dev, addr + 2 * 4);
val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
- val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
- FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
+ val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR,
+ sta->deflink.ht_cap.ampdu_factor) |
+ FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY,
+ sta->deflink.ht_cap.ampdu_density) |
MT_WTBL1_W2_TXS_BAF_REPORT;
- if (sta->ht_cap.cap)
+ if (sta->deflink.ht_cap.cap)
val |= MT_WTBL1_W2_HT;
- if (sta->vht_cap.cap)
+ if (sta->deflink.vht_cap.cap)
val |= MT_WTBL1_W2_VHT;
mt76_wr(dev, addr + 2 * 4, val);
@@ -347,9 +349,9 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
val = mt76_rr(dev, addr + 9 * 4);
val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
MT_WTBL2_W9_SHORT_GI_80);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
val |= MT_WTBL2_W9_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
val |= MT_WTBL2_W9_SHORT_GI_40;
mt76_wr(dev, addr + 9 * 4, val);
}
@@ -643,11 +645,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
dev->rssi_offset[1];
- status->signal = status->chain_signal[0];
- if (status->chains & BIT(1))
- status->signal = max(status->signal,
- status->chain_signal[1]);
-
if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
status->bw = RATE_INFO_BW_40;
@@ -1135,7 +1132,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1249,14 +1246,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
struct mt7603_sta *msta = NULL;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[4]);
- pid = FIELD_GET(MT_TXS4_PID, txs);
- txs = le32_to_cpu(txs_data[3]);
- wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+ pid = le32_get_bits(txs_data[4], MT_TXS4_PID);
+ wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 2b546bc05d82..ca50feb0b3a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
- mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
- dev->mt76.vif_mask |= BIT(mvif->idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->idx);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -75,7 +75,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -106,7 +106,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&dev->sta_poll_lock);
mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
@@ -297,7 +297,7 @@ mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
static void
mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
@@ -305,7 +305,7 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
- if (info->assoc || info->ibss_joined) {
+ if (vif->cfg.assoc || vif->cfg.ibss_joined) {
mt76_wr(dev, MT_BSSID0(mvif->idx),
get_unaligned_le32(info->bssid));
mt76_wr(dev, MT_BSSID1(mvif->idx),
@@ -527,7 +527,8 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static int
-mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7603_dev *dev = hw->priv;
@@ -641,6 +642,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
@@ -654,7 +658,7 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
msta->rate_probe = false;
mt7603_wtbl_set_smps(dev, msta,
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
spin_unlock_bh(&dev->mt76.lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index b53528014fbc..c26b45a09923 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -105,10 +105,10 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
- if (val == pm->enable)
- return 0;
+ mutex_lock(&dev->mt76.mutex);
- mt7615_mutex_acquire(dev);
+ if (val == pm->enable)
+ goto out;
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
@@ -119,9 +119,16 @@ mt7615_pm_set(void *data, u64 val)
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
+ /* make sure the chip is awake here and ps_work is scheduled
+ * just at end of the this routine.
+ */
+ pm->enable = false;
+ mt76_connac_pm_wake(&dev->mphy, pm);
+
pm->enable = val;
+ mt76_connac_power_save_sched(&dev->mphy, pm);
out:
- mt7615_mutex_release(dev);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -436,11 +443,16 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mt7615_dev *dev = file->private_data;
- char buf[32 * ((ETH_ALEN * 3) + 4) + 1];
+ u32 len = 32 * ((ETH_ALEN * 3) + 4) + 1;
u8 addr[ETH_ALEN];
+ char *buf;
int ofs = 0;
int i;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
for (i = 0; i < 32; i++) {
if (!(dev->muar_mask & BIT(i)))
continue;
@@ -451,10 +463,13 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr);
put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) &
MT_WF_RMAC_MAR1_ADDR), addr + 4);
- ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr);
+ ofs += snprintf(buf + ofs, len - ofs, "%d=%pM\n", i, addr);
}
- return simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+ ofs = simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+
+ kfree(buf);
+ return ofs;
}
static ssize_t
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 00aefea1bf61..f1914431ff7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -44,7 +44,7 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
static int
mt7615_init_tx_queues(struct mt7615_dev *dev)
{
- int ret, i;
+ int ret;
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
@@ -54,14 +54,11 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
- ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
- MT_TX_RING_BASE);
+ ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE,
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
- for (i = 1; i <= MT_TXQ_PSD ; i++)
- dev->mphy.q_tx[i] = dev->mphy.q_tx[0];
-
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
}
@@ -284,8 +281,8 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7615_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7615_poll_tx);
napi_enable(&dev->mt76.tx_napi);
mt76_poll(dev, MT_WPDMA_GLO_CFG,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index a753c7476d31..07a1fea94f66 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -401,6 +401,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (is_mt7615(&phy->dev->mt76))
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
@@ -458,7 +459,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
return 0;
mt7615_cap_dbdc_enable(dev);
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops, MT_BAND1);
if (!mphy)
return -ENOMEM;
@@ -508,7 +509,7 @@ EXPORT_SYMBOL_GPL(mt7615_register_ext_phy);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
- struct mt76_phy *mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1];
if (!phy)
return;
@@ -552,7 +553,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
- dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ec25e5a95d44..2ce1705c0f43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -109,6 +109,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
void mt7615_mac_reset_counters(struct mt7615_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
int i;
for (i = 0; i < 4; i++) {
@@ -118,8 +119,8 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
dev->mt76.phy.survey_time = ktime_get_boottime();
- if (dev->mt76.phy2)
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ if (mphy_ext)
+ mphy_ext->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
@@ -253,15 +254,15 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+ if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
MT_RXD1_NORMAL_U2M)
return -EINVAL;
@@ -275,47 +276,53 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
- qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
- ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
-
+ frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
+ IEEE80211_HT_CTL_LEN);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -330,14 +337,15 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7615_phy *phy = &dev->phy;
- struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
+ struct mt7615_phy *phy2;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_status = *(u32 *)skb->cb;
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
u16 hdr_gap;
int phy_idx;
@@ -349,6 +357,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+
+ phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (!phy2)
phy_idx = 0;
else if (phy2->chfreq == phy->chfreq)
@@ -385,7 +395,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
@@ -492,9 +503,9 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (phy_idx == 1 && phy2) {
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
phy = phy2;
- status->ext_phy = true;
+ status->phy_idx = phy_idx;
}
if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
@@ -570,15 +581,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
mt7615_mac_fill_tm_rx(mphy->priv, rxd);
@@ -610,14 +612,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
@@ -720,13 +722,14 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, bool beacon)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
- bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
bool is_mmio = mt76_is_mmio(&dev->mt76);
@@ -749,18 +752,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
- q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
+ } else if (qid >= MT_TXQ_PSD) {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
+ q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
@@ -879,60 +882,6 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
-static void
-mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
-{
- int i;
-
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
-mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
-{
- u32 last_mask;
- int i;
-
- last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
-
- for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
- struct mt7615_txp_ptr *ptr = &txp->ptr[i];
- bool last;
- u16 len;
-
- len = le16_to_cpu(ptr->len0);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
-
- len = le16_to_cpu(ptr->len1);
- last = len & last_mask;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
- }
-}
-
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
-{
- struct mt7615_txp_common *txp;
-
- txp = mt7615_txwi_to_txp(dev, t);
- if (is_mt7615(dev))
- mt7615_txp_skb_unmap_fw(dev, &txp->fw);
- else
- mt7615_txp_skb_unmap_hw(dev, &txp->hw);
-}
-EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
-
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
{
mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
@@ -1141,7 +1090,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
@@ -1430,7 +1379,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1442,8 +1391,8 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (sta->rate_probe) {
struct mt7615_phy *phy = &dev->phy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- phy = dev->mt76.phy2->priv;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ phy = dev->mt76.phys[MT_BAND1]->priv;
mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
}
@@ -1485,8 +1434,8 @@ out:
fallthrough;
case MT_PHY_TYPE_OFDM:
mphy = &dev->mphy;
- if (sta->wcid.ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
@@ -1561,14 +1510,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct mt76_wcid *wcid;
struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- pid = FIELD_GET(MT_TXS0_PID, txs);
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+ pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
@@ -1596,8 +1542,8 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
- if (wcid->ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
if (mt7615_fill_txs(dev, msta, &info, txs_data))
ieee80211_tx_status_noskb(mphy->hw, sta, &info);
@@ -1614,7 +1560,7 @@ mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
u32 val;
u8 wcid;
- mt7615_txp_skb_unmap(mdev, txwi);
+ mt76_connac_txp_skb_unmap(mdev, txwi);
if (!txwi->skb)
goto out;
@@ -1642,9 +1588,11 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt7615_txwi_free(dev, txwi);
}
-static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt76_connac_tx_free *free = data;
+ void *tx_token = data + sizeof(*free);
+ void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1655,21 +1603,25 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
- count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
if (is_mt7615(&dev->mt76)) {
- __le16 *token = &free->token[0];
+ __le16 *token = tx_token;
+
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
- __le32 *token = (__le32 *)&free->token[0];
+ __le32 *token = tx_token;
+
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
- dev_kfree_skb(skb);
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
@@ -1677,6 +1629,29 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -1686,8 +1661,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1698,7 +1673,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
+ mt7615_mac_tx_free(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
@@ -1835,7 +1811,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
struct mt7615_dev *dev = phy->dev;
int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
bool ext_phy = phy != &dev->phy;
- u16 def_th = ofdm ? -98 : -110;
+ s16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
@@ -1984,6 +1960,7 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
static void mt7615_update_survey(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
ktime_t cur_time;
/* MT7615 can only update both phys simultaneously
@@ -1991,14 +1968,14 @@ static void mt7615_update_survey(struct mt7615_dev *dev)
*/
mt7615_phy_update_channel(&mdev->phy, 0);
- if (mdev->phy2)
- mt7615_phy_update_channel(mdev->phy2, 1);
+ if (mphy_ext)
+ mt7615_phy_update_channel(mphy_ext, 1);
cur_time = ktime_get_boottime();
mt76_update_survey_active_time(&mdev->phy, cur_time);
- if (mdev->phy2)
- mt76_update_survey_active_time(mdev->phy2, cur_time);
+ if (mphy_ext)
+ mt76_update_survey_active_time(mphy_ext, cur_time);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
@@ -2068,10 +2045,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
int i;
if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
+ local_bh_enable();
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
false);
@@ -2103,6 +2083,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -2160,21 +2148,24 @@ static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
- err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
@@ -2185,7 +2176,8 @@ static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
@@ -2249,47 +2241,58 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dfs_state < MT_DFS_STATE_CAC)
+ dfs_state = MT_DFS_STATE_ACTIVE;
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
+ goto stop;
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
- err = mt7615_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
- goto stop;
- }
+ err = mt7615_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
- phy->dfs_state = chandef->chan->dfs_state;
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(phy);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index e241c613091c..880c9f74a7f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -165,12 +165,6 @@ enum tx_phy_bandwidth {
#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
-#define MT_USB_HDR_SIZE 4
-#define MT_USB_TAIL_SIZE 4
-
#define MT_TXD0_P_IDX BIT(31)
#define MT_TXD0_Q_IDX GENMASK(30, 26)
#define MT_TXD0_UDP_TCP_SUM BIT(24)
@@ -250,56 +244,6 @@ enum tx_phy_bandwidth {
#define MT_TX_RATE_MODE GENMASK(8, 6)
#define MT_TX_RATE_IDX GENMASK(5, 0)
-#define MT_TXP_MAX_BUF_NUM 6
-#define MT_HW_TXP_MAX_MSDU_NUM 4
-#define MT_HW_TXP_MAX_BUF_NUM 4
-
-#define MT_MSDU_ID_VALID BIT(15)
-
-#define MT_TXD_LEN_MASK GENMASK(11, 0)
-#define MT_TXD_LEN_MSDU_LAST BIT(14)
-#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-/* mt7663 */
-#define MT_TXD_LEN_LAST BIT(15)
-
-struct mt7615_txp_ptr {
- __le32 buf0;
- __le16 len0;
- __le16 len1;
- __le32 buf1;
-} __packed __aligned(4);
-
-struct mt7615_hw_txp {
- __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
- struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
-} __packed __aligned(4);
-
-struct mt7615_fw_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- u8 rept_wds_wcid;
- u8 rsv;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7615_txp_common {
- union {
- struct mt7615_fw_txp fw;
- struct mt7615_hw_txp hw;
- };
-};
-
-struct mt7615_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
- __le16 token[];
-} __packed __aligned(4);
-
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
#define MT_TXS0_PID GENMASK(31, 24)
@@ -385,19 +329,6 @@ struct mt7615_dfs_radar_spec {
struct mt7615_dfs_pattern radar_pattern[16];
};
-static inline struct mt7615_txp_common *
-mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
-}
-
static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
{
return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 82d625a16a62..8d4733f87cda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -194,7 +194,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -212,7 +212,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (ext_phy)
mvif->mt76.wmm_idx += 2;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
@@ -224,7 +224,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -234,7 +234,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
ret = mt7615_mcu_add_dev_info(phy, vif, true);
@@ -268,7 +268,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
@@ -282,25 +282,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
}
-static void mt7615_init_dfs_state(struct mt7615_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7615_set_channel(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
@@ -313,7 +294,6 @@ int mt7615_set_channel(struct mt7615_phy *phy)
set_bit(MT76_RESET, &phy->mt76->state);
- mt7615_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
@@ -365,6 +345,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -403,6 +384,11 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7615_mutex_acquire(dev);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -424,6 +410,29 @@ out:
return err;
}
+static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
+
+ if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+ return -EINVAL;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+
+ if (mt7615_firmware_offload(phy->dev))
+ return mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
+ ieee80211_stop_queues(hw);
+ err = mt7615_set_channel(phy);
+ ieee80211_wake_queues(hw);
+
+ return err;
+}
+
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -464,7 +473,8 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
}
static int
-mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
@@ -546,7 +556,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -586,7 +596,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC)
- mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
+ mt7615_mac_set_beacon_filter(phy, vif, vif->cfg.assoc);
mt7615_mutex_release(dev);
}
@@ -620,7 +630,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.phy_idx = mvif->mt76.band_idx;
phy = mvif->mt76.band_idx ? mt7615_ext_phy(dev) : &dev->phy;
err = mt76_connac_pm_wake(phy->mt76, &dev->pm);
@@ -683,6 +693,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
@@ -1182,12 +1195,16 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ mt7615_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
+
+ mt7615_mutex_release(dev);
}
#ifdef CONFIG_PM
@@ -1323,6 +1340,7 @@ const struct ieee80211_ops mt7615_ops = {
.set_wakeup = mt7615_set_wakeup,
.set_rekey_data = mt7615_set_rekey_data,
#endif /* CONFIG_PM */
+ .set_sar_specs = mt7615_set_sar_specs,
};
EXPORT_SYMBOL_GPL(mt7615_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 759dcf0e6783..3dac76e6df4d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -40,18 +40,6 @@ struct mt7615_fw_trailer {
#define FW_START_DLYCAL BIT(1)
#define FW_START_WORKING_PDA_CR4 BIT(2)
-struct mt7663_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserv[2];
- char fw_ver[10];
- char build_date[15];
- __le32 crc;
-} __packed;
-
struct mt7663_fw_buf {
__le32 crc;
__le32 d_img_size;
@@ -71,19 +59,6 @@ struct mt7663_fw_buf {
#define IMG_CRC_LEN 4
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_VALID_RAM_ENTRY BIT(5)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
@@ -363,10 +338,11 @@ static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
}
mt7622_trigger_hif_int(dev, false);
-
- pm->stats.last_doze_event = jiffies;
- pm->stats.awake_time += pm->stats.last_doze_event -
- pm->stats.last_wake_event;
+ if (!err) {
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+ }
out:
mutex_unlock(&pm->mutex);
@@ -376,7 +352,7 @@ out:
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
ieee80211_csa_finish(vif);
}
@@ -393,7 +369,7 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
return;
if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
- mphy = dev->mt76.phy2;
+ mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -412,8 +388,11 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
!r->constant_prf_detected && !r->staggered_prf_detected)
return;
- if (r->band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (r->band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
+ return;
ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
@@ -469,8 +448,8 @@ mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_phy *phy;
struct mt76_phy *mphy;
- if (*seq_num & BIT(7) && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (*seq_num & BIT(7) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -495,8 +474,8 @@ mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt7615_roc_tlv *)skb->data;
- if (event->dbdc_band && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (event->dbdc_band && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -520,8 +499,8 @@ mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt76_connac_beacon_loss_event *)skb->data;
- if (band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -541,8 +520,8 @@ mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
event = (struct mt76_connac_mcu_bss_event *)skb->data;
- if (band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if (band_idx && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
else
mphy = &dev->mt76.phy;
@@ -719,7 +698,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
if (!enable)
goto out;
- skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!skb)
return -EINVAL;
@@ -729,13 +708,11 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
return -EINVAL;
}
- if (mvif->mt76.band_idx) {
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- }
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mvif->mt76.band_idx);
mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
- 0, NULL, true);
+ 0, NULL, 0, true);
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
@@ -756,145 +733,7 @@ out:
static int
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
- &req, sizeof(req), true);
-}
-
-static int
-mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct mt7615_phy *phy,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
- struct bss_info_basic *bss;
- u8 wlan_idx = mvif->sta.wcid.idx;
- struct tlv *tlv;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable && sta) {
- struct mt7615_sta *msta;
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = wlan_idx;
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u8 omac_idx = mvif->mt76.omac_idx;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- type = CONNECTION_P2P_GO;
- else
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- type = CONNECTION_P2P_GC;
- else
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
-}
-
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-static void
-mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
-{
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+ return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
}
static int
@@ -913,13 +752,14 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return PTR_ERR(skb);
if (enable)
- mt7615_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7615_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@@ -1005,6 +845,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct mt7615_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
+ bool new_entry = true;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -1014,7 +855,13 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (IS_ERR(sskb))
return PTR_ERR(sskb);
- mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
+ if (!sta) {
+ if (mvif->sta_added)
+ new_entry = false;
+ else
+ mvif->sta_added = true;
+ }
+ mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1030,7 +877,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
NULL, wtbl_hdr);
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
- NULL, wtbl_hdr);
+ NULL, wtbl_hdr, true, true);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
@@ -1057,19 +904,7 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb = NULL;
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
- WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
- wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(WTBL_UPDATE), true);
+ return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
@@ -1238,7 +1073,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
if (!enable)
goto out;
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
if (!skb)
return -EINVAL;
@@ -1249,7 +1084,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
}
mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
- wcid, NULL, 0, NULL, true);
+ wcid, NULL, 0, NULL, 0, true);
memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
@@ -1303,7 +1138,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
- enable, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), enable,
+ true);
}
static int
@@ -1451,20 +1287,6 @@ release_fw:
return ret;
}
-static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
const struct mt7615_fw_trailer *hdr,
@@ -1475,7 +1297,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
u32 len, addr, mode;
for (i = 0; i < n_region; i++) {
- mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ hdr[i].feature_set, is_cr4);
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
@@ -1692,7 +1515,7 @@ static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
{
u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
- const struct mt7663_fw_trailer *hdr;
+ const struct mt76_connac2_fw_trailer *hdr;
const struct mt7663_fw_buf *buf;
const struct firmware *fw;
const u8 *base_addr;
@@ -1708,9 +1531,7 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
goto out;
}
- hdr = (const struct mt7663_fw_trailer *)(fw->data + fw->size -
- FW_V3_COMMON_TAILER_SIZE);
-
+ hdr = (const void *)(fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE);
dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
hdr->fw_ver, hdr->build_date);
dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
@@ -1723,7 +1544,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
- mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ buf->feature_set, false);
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
@@ -2064,27 +1886,6 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2214,7 +2015,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
struct mt76_power_limits limits;
s8 *limits_array = (s8 *)&limits;
int n_chains = hweight8(mphy->antenna_mask);
- int tx_power;
+ int tx_power = hw->conf.power_level * 2;
int i;
static const u8 sku_mapping[] = {
#define SKU_FIELD(_type, _field) \
@@ -2271,9 +2072,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
#undef SKU_FIELD
};
- tx_power = hw->conf.power_level * 2 -
- mt76_tx_power_nss_delta(n_chains);
-
+ tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
+ tx_power -= mt76_tx_power_nss_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
@@ -2346,10 +2146,13 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -2525,7 +2328,7 @@ int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy)
.bw = mt7615_mcu_chan_bw(chandef),
.band = chandef->center_freq1 > 4000,
- .dbdc_en = !!dev->mt76.phy2,
+ .dbdc_en = !!dev->mt76.phys[MT_BAND1],
};
u16 center_freq = chandef->center_freq1;
int freq_idx;
@@ -2646,7 +2449,7 @@ int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy)
.bw = mt7615_mcu_chan_bw(chandef),
.band = chandef->center_freq1 > 4000,
- .dbdc_en = !!dev->mt76.phy2,
+ .dbdc_en = !!dev->mt76.phys[MT_BAND1],
};
u16 center_freq = chandef->center_freq1;
int freq_idx;
@@ -2722,7 +2525,7 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
u8 pad;
} req = {
.bss_idx = mvif->mt76.idx,
- .aid = cpu_to_le16(vif->bss_conf.aid),
+ .aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 47863ae9f30b..615956acc6b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -201,9 +201,6 @@ struct mt7615_mcu_rdd_report {
} hw_pulse[32];
};
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
enum {
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 33f72f3657d0..a784f9d9e935 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -145,7 +145,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
return;
dev->reset_state = mcu_int;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
@@ -186,14 +186,15 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_txp_common),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
- .tx_complete_skb = mt7615_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
+ .rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
.sta_ps = mt7615_sta_ps,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 6ff6d5800918..060d52c81d9e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -141,6 +141,7 @@ struct mt7615_sta {
struct mt7615_vif {
struct mt76_vif mt76; /* must be first */
struct mt7615_sta sta;
+ bool sta_added;
};
struct mib_stats {
@@ -177,7 +178,6 @@ struct mt7615_phy {
u8 chfreq;
u8 rdd_state;
- int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -345,7 +345,7 @@ mt7615_hw_dev(struct ieee80211_hw *hw)
static inline struct mt7615_phy *
mt7615_ext_phy(struct mt7615_dev *dev)
{
- struct mt76_phy *phy = dev->mt76.phy2;
+ struct mt76_phy *phy = dev->mt76.phys[MT_BAND1];
if (!phy)
return NULL;
@@ -403,30 +403,9 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
-static inline bool is_mt7622(struct mt76_dev *dev)
-{
- if (!IS_ENABLED(CONFIG_MT7622_WMAC))
- return false;
-
- return mt76_chip(dev) == 0x7622;
-}
-
-static inline bool is_mt7615(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
-}
-
-static inline bool is_mt7611(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7611;
-}
-
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
@@ -498,7 +477,8 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key, bool beacon);
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, bool beacon);
void mt7615_mac_set_timing(struct mt7615_phy *phy);
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
@@ -528,8 +508,8 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7615_tx_worker(struct mt76_worker *w);
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -538,8 +518,6 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7615_mac_work(struct work_struct *work);
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *txwi);
int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
@@ -579,6 +557,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
struct mt76_queue_entry *e);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
+int mt7663u_mcu_power_on(struct mt7615_dev *dev);
/* sdio */
int mt7663s_mcu_init(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index d1806f198aed..0019890fdb78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -14,75 +14,6 @@
#include "../dma.h"
#include "mac.h"
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7615_dev *dev;
- struct mt7615_txp_common *txp;
- u16 token;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- txp = mt7615_txwi_to_txp(mdev, e->txwi);
-
- if (is_mt7615(&dev->mt76))
- token = le16_to_cpu(txp->fw.token);
- else
- token = le16_to_cpu(txp->hw.msdu_id[0]) &
- ~MT_MSDU_ID_VALID;
-
- t = mt76_token_put(mdev, token);
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-static void
-mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7615_hw_txp *txp = txp_ptr;
- struct mt7615_txp_ptr *ptr = &txp->ptr[0];
- int i, nbuf = tx_info->nbuf - 1;
- u32 last_mask;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- if (is_mt7663(&dev->mt76))
- last_mask = MT_TXD_LEN_LAST;
- else
- last_mask = MT_TXD_LEN_AMSDU_LAST |
- MT_TXD_LEN_MSDU_LAST;
-
- for (i = 0; i < nbuf; i++) {
- u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
- u32 addr = tx_info->buf[i + 1].addr;
-
- if (i == nbuf - 1)
- len |= last_mask;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
-}
-
static void
mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id)
@@ -91,7 +22,8 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt7615_fw_txp *txp = txp_ptr;
+ struct mt76_connac_fw_txp *txp = txp_ptr;
+ u8 *rept_wds_wcid = (u8 *)&txp->rept_wds_wcid;
int nbuf = tx_info->nbuf - 1;
int i;
@@ -122,7 +54,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
}
txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
+ *rept_wds_wcid = 0xff;
}
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
@@ -145,9 +77,10 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
- if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
- phy = mdev->phy2->priv;
+ if (phy_idx && mdev->phys[MT_BAND1])
+ phy = mdev->phys[MT_BAND1]->priv;
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
@@ -164,14 +97,14 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key, false);
+ pid, key, qid, false);
txp = txwi + MT_TXD_SIZE;
- memset(txp, 0, sizeof(struct mt7615_txp_common));
+ memset(txp, 0, sizeof(struct mt76_connac_txp_common));
if (is_mt7615(&dev->mt76))
mt7615_write_fw_txp(dev, tx_info, txp, id);
else
- mt7615_write_hw_txp(dev, tx_info, txp, id);
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
tx_info->skb = DMA_DUMMY_DATA;
@@ -250,16 +183,18 @@ mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
static void
mt7615_update_beacons(struct mt7615_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+
ieee80211_iterate_active_interfaces(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_update_vif_beacon, dev->mt76.hw);
- if (!dev->mt76.phy2)
+ if (!mphy_ext)
return;
- ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ ieee80211_iterate_active_interfaces(mphy_ext->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+ mt7615_update_vif_beacon, mphy_ext->hw);
}
void mt7615_mac_reset_work(struct work_struct *work)
@@ -268,9 +203,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
struct mt76_phy *ext_phy;
struct mt7615_dev *dev;
unsigned long timeout;
+ int i;
dev = container_of(work, struct mt7615_dev, reset_work);
- ext_phy = dev->mt76.phy2;
+ ext_phy = dev->mt76.phys[MT_BAND1];
phy2 = ext_phy ? ext_phy->priv : NULL;
if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
@@ -299,8 +235,8 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_txq_schedule_all(ext_phy);
mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
mt7615_mutex_acquire(dev);
@@ -330,11 +266,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
local_bh_enable();
ieee80211_wake_queues(mt76_hw(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 31c4a76b7f91..304212f5f8da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -56,7 +56,10 @@ static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7663s_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err)
return err;
@@ -80,6 +83,7 @@ static int mt7663s_probe(struct sdio_func *func,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
@@ -98,7 +102,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int i, ret;
+ int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@@ -137,16 +141,6 @@ static int mt7663s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
@@ -187,7 +181,6 @@ static void mt7663s_remove(struct sdio_func *func)
mt76_free_device(&dev->mt76);
}
-#ifdef CONFIG_PM
static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -242,28 +235,20 @@ static int mt7663s_resume(struct device *dev)
return err;
}
-static const struct dev_pm_ops mt7663s_pm_ops = {
- .suspend = mt7663s_suspend,
- .resume = mt7663s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7663s_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7663s_pm_ops, mt7663s_suspend, mt7663s_resume);
+
static struct sdio_driver mt7663s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7663s_probe,
.remove = mt7663s_remove,
.id_table = mt7663s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7663s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7663s_pm_ops),
};
module_sdio_driver(mt7663s_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 0396ad532ba6..f2d651d7adff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -17,9 +17,68 @@
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) },
{ },
};
+static u32 mt7663u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7663u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7663u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
static void mt7663u_stop(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -60,11 +119,20 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7663u_rr,
+ .wr = mt7663u_wr,
+ .rmw = mt7663u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7663u_copy,
+ .type = MT76_BUS_USB,
+ };
struct usb_device *udev = interface_to_usbdev(usb_intf);
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
@@ -91,7 +159,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
dev->reg_map = mt7663_usb_sdio_reg_map;
dev->ops = ops;
- ret = mt76u_init(mdev, usb_intf, true);
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
if (ret < 0)
goto error;
@@ -99,27 +167,15 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
- if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
- set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
- goto alloc_queues;
- }
-
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
- if (ret)
- goto error;
-
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- ret = -EIO;
- goto error;
+ ret = mt7663u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+ } else {
+ set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
}
-alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 0ebb4c3c336a..98bf2f6ae936 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -42,6 +42,26 @@ out:
return ret;
}
+int mt7663u_mcu_power_on(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return 0;
+}
+
int mt7663u_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7663u_mcu_ops = {
@@ -57,23 +77,17 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
- mt7615_mcu_restart(&dev->mt76);
+ ret = mt7615_mcu_restart(&dev->mt76);
+ if (ret)
+ return ret;
+
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_PWR_ON, 0, 500))
return -EIO;
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
+ ret = mt7663u_mcu_power_on(dev);
if (ret)
return ret;
-
- if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
- MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- return -EIO;
- }
}
ret = __mt7663_load_firmware(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 5a6d7829c6e0..0052d103e276 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -49,7 +49,7 @@ mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
__le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
- mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
+ mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, qid, false);
skb_push(skb, MT_USB_TXD_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index e7f01c2978a2..635192c878cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -12,9 +12,28 @@
#define MT76_CONNAC_MAX_SCHED_SCAN_SSID 10
#define MT76_CONNAC_MAX_SCAN_MATCH 16
+#define MT76_CONNAC_MAX_WMM_SETS 4
+
#define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
#define MT76_CONNAC_COREDUMP_SZ (1300 * 1024)
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_USB_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
+
+#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_SDIO_TAIL_SIZE 8
+#define MT_SDIO_HDR_SIZE 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_LAST BIT(15)
+#define MT_TXD_LEN_MASK GENMASK(11, 0)
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+
enum {
CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
@@ -44,10 +63,18 @@ enum {
REPEATER_BSSID_MAX = 0x3f,
};
+struct mt76_connac_reg_map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
struct mt76_connac_pm {
- bool enable;
- bool ds_enable;
- bool suspended;
+ bool enable:1;
+ bool enable_user:1;
+ bool ds_enable:1;
+ bool ds_enable_user:1;
+ bool suspended:1;
spinlock_t txq_lock;
struct {
@@ -83,6 +110,51 @@ struct mt76_connac_coredump {
unsigned long last_activity;
};
+struct mt76_connac_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt76_connac_fw_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ __le16 rept_wds_wcid;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+struct mt76_connac_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt76_connac_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt76_connac_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt76_connac_txp_common {
+ union {
+ struct mt76_connac_fw_txp fw;
+ struct mt76_connac_hw_txp hw;
+ };
+};
+
+struct mt76_connac_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ __le32 txd;
+} __packed __aligned(4);
+
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
@@ -100,12 +172,107 @@ static inline bool is_mt7663(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline bool is_mt7916(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7906;
+}
+
+static inline bool is_mt7986(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7986;
+}
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
+ return mt76_chip(dev) == 0x7622;
+}
+
+static inline bool is_mt7615(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_connac_v1(struct mt76_dev *dev)
+{
+ return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
+}
+
+static inline bool is_mt76_fw_txp(struct mt76_dev *dev)
+{
+ switch (mt76_chip(dev)) {
+ case 0x7961:
+ case 0x7922:
+ case 0x7663:
+ case 0x7622:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static inline u8 mt76_connac_lmac_mapping(u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
+static inline void *
+mt76_connac_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (void *)(txwi + MT_TXD_SIZE);
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
struct mt76_wcid *wcid);
+static inline void mt76_connac_tx_cleanup(struct mt76_dev *dev)
+{
+ dev->queue_ops->tx_cleanup(dev, dev->q_mcu[MT_MCUQ_WM], false);
+ dev->queue_ops->tx_cleanup(dev, dev->q_mcu[MT_MCUQ_WA], false);
+}
+
static inline bool
mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
@@ -168,11 +335,37 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
mutex_unlock(&dev->mutex);
}
+int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
+ int ring_base, u32 flags);
+void mt76_connac_write_hw_txp(struct mt76_dev *dev,
+ struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id);
+void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
struct mt76_connac_pm *pm,
struct mt76_wcid *wcid,
struct sk_buff *skb);
void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
+void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, int pid,
+ enum mt76_txq_id qid, u32 changed);
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data);
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data);
+void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ __le32 *rxv, u32 mode);
+int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ struct sk_buff *skb, u16 hdr_offset);
+int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv, u8 *mode);
#endif /* __MT76_CONNAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
new file mode 100644
index 000000000000..f33171bcd343
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef __MT76_CONNAC2_MAC_H
+#define __MT76_CONNAC2_MAC_H
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum {
+ MT_CTX0,
+ MT_HIF0 = 0x0,
+
+ MT_LMAC_AC00 = 0x0,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+};
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_LONG_FORMAT BIT(31)
+#define MT_TXD1_TGID BIT(30)
+#define MT_TXD1_OWN_MAC GENMASK(29, 24)
+#define MT_TXD1_AMSDU BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_HDR_PAD GENMASK(19, 18)
+#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
+#define MT_TXD1_HDR_INFO GENMASK(15, 11)
+#define MT_TXD1_ETH_802_3 BIT(15)
+#define MT_TXD1_VTA BIT(10)
+#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_FIXED_RATE BIT(30)
+#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_TIMING_MEASURE BIT(5)
+#define MT_TXD3_DAS BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_MD BIT(15)
+#define MT_TXD5_ADD_BA BIT(14)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_IBF BIT(31)
+#define MT_TXD6_TX_EBF BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 16)
+#define MT_TXD6_SGI GENMASK(15, 14)
+#define MT_TXD6_HELTF GENMASK(13, 12)
+#define MT_TXD6_LDPC BIT(11)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_UDP_TCP_SUM BIT(29)
+#define MT_TXD7_IP_SUM BIT(28)
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TXD7_PSE_FID GENMASK(27, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_HW_AMSDU BIT(10)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(12, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_SU_EXT_TONE BIT(5)
+#define MT_TX_RATE_DCM BIT(4)
+/* VHT/HE only use bits 0-3 */
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXS0_FIXED_RATE BIT(31)
+#define MT_TXS0_BW GENMASK(30, 29)
+#define MT_TXS0_TID GENMASK(28, 26)
+#define MT_TXS0_AMPDU BIT(25)
+#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TX_RATE GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS GENMASK(31, 30)
+#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
+#define MT_TXS2_SHARED_ANTENNA BIT(26)
+#define MT_TXS2_WCID GENMASK(25, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_PID GENMASK(31, 24)
+#define MT_TXS3_ANT_ID GENMASK(23, 0)
+
+#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+
+/* PPDU based TXS */
+#define MT_TXS5_MPDU_TX_BYTE GENMASK(22, 0)
+#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
+
+#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
+
+#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
+#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
+#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_CO_ANT BIT(6)
+#define MT_RXD2_NORMAL_BF_CQI BIT(7)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
+#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
+#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD4_NORMAL_CLS BIT(10)
+#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+
+#define MT_RXV_HDR_BAND_IDX BIT(24)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_AMSDU BIT(22)
+#define MT_RXD3_NORMAL_MESH BIT(23)
+#define MT_RXD3_NORMAL_MHCP BIT(24)
+#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
+#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
+#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
+#define MT_RXD3_NORMAL_MORE BIT(28)
+#define MT_RXD3_NORMAL_UNWANT BIT(29)
+#define MT_RXD3_NORMAL_RX_DROP BIT(30)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
+#define MT_RXD6_TA_LO GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
+
+/* P-RXV DW0 */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_TXBF BIT(10)
+#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_HT_SGI GENMASK(16, 15)
+#define MT_PRXV_HT_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
+#define MT_PRXV_DCM BIT(17)
+#define MT_PRXV_NUM_RX BIT(20, 18)
+
+/* P-RXV DW1 */
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
+#define MT_CRXV_HE_UPLINK BIT(31)
+
+#define MT_CRXV_HE_RU0 GENMASK(7, 0)
+#define MT_CRXV_HE_RU1 GENMASK(15, 8)
+#define MT_CRXV_HE_RU2 GENMASK(23, 16)
+#define MT_CRXV_HE_RU3 GENMASK(31, 24)
+
+#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+#define MT_CRXV_SNR GENMASK(18, 13)
+#define MT_CRXV_FOE_LO GENMASK(31, 19)
+#define MT_CRXV_FOE_HI GENMASK(6, 0)
+#define MT_CRXV_FOE_SHIFT 13
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+#define MT_CT_INFO_FROM_HOST BIT(7)
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 306e9eaea917..34ac3d81a510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -2,6 +2,12 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include "mt76_connac.h"
+#include "mt76_connac2_mac.h"
+#include "dma.h"
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
@@ -115,3 +121,939 @@ void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
mt76_worker_schedule(&phy->dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);
+
+void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_connac_txp_common *txp;
+ struct mt76_txwi_cache *t;
+ u16 token;
+
+ txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
+ if (is_mt76_fw_txp(mdev))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
+ t = mt76_token_put(mdev, token);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_tx_complete_skb);
+
+void mt76_connac_write_hw_txp(struct mt76_dev *dev,
+ struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt76_connac_hw_txp *txp = txp_ptr;
+ struct mt76_connac_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+ u32 last_mask;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ if (is_mt7663(dev) || is_mt7921(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_AMSDU_LAST |
+ MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= last_mask;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_write_hw_txp);
+
+static void
+mt76_connac_txp_skb_unmap_fw(struct mt76_dev *mdev,
+ struct mt76_connac_fw_txp *txp)
+{
+ struct device *dev = is_connac_v1(mdev) ? mdev->dev : mdev->dma_dev;
+ int i;
+
+ for (i = 0; i < txp->nbuf; i++)
+ dma_unmap_single(dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+static void
+mt76_connac_txp_skb_unmap_hw(struct mt76_dev *dev,
+ struct mt76_connac_hw_txp *txp)
+{
+ u32 last_mask;
+ int i;
+
+ if (is_mt7663(dev) || is_mt7921(dev))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt76_connac_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt76_connac_txp_common *txp;
+
+ txp = mt76_connac_txwi_to_txp(dev, t);
+ if (is_mt76_fw_txp(dev))
+ mt76_connac_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt76_connac_txp_skb_unmap_hw(dev, &txp->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap);
+
+int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
+ int ring_base, u32 flags)
+{
+ int i, err;
+
+ err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base, flags);
+ if (err < 0)
+ return err;
+
+ for (i = 1; i <= MT_TXQ_PSD; i++)
+ phy->q_tx[i] = phy->q_tx[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
+
+static u16
+mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+{
+ u8 mode = 0, band = mphy->chandef.chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (!vif)
+ goto legacy;
+
+ if (is_mt7921(mphy->dev)) {
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ goto legacy;
+ }
+
+ if (beacon) {
+ struct cfg80211_bitrate_mask *mask;
+
+ mask = &vif->bss_conf.beacon_tx_rate;
+ if (hweight16(mask->control[band].he_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HE_SU;
+ goto out;
+ } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_VHT;
+ goto out;
+ } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HT;
+ goto out;
+ } else if (hweight32(mask->control[band].legacy) == 1) {
+ rateidx = ffs(mask->control[band].legacy) - 1;
+ goto legacy;
+ }
+ }
+
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast && mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+legacy:
+ rateidx = mt76_calculate_default_rate(mphy, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+
+out:
+ return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+ FIELD_PREP(MT_TX_RATE_MODE, mode);
+}
+
+static void
+mt76_connac2_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
+ struct mt76_wcid *wcid)
+{
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ u8 fc_type, fc_stype;
+ u16 ethertype;
+ bool wmm = false;
+ u32 val;
+
+ if (wcid->sta) {
+ struct ieee80211_sta *sta;
+
+ sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ wmm = sta->wme;
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
+ val |= MT_TXD1_ETH_802_3;
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = IEEE80211_FTYPE_DATA >> 2;
+ fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+
+ txwi[7] |= cpu_to_le32(val);
+}
+
+static void
+mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ __le16 fc = hdr->frame_control;
+ u8 fc_type, fc_stype;
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
+ val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID, tid);
+
+ txwi[1] |= cpu_to_le32(val);
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+
+ if (!ieee80211_is_data(fc) || multicast ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ val |= MT_TXD2_FIX_RATE;
+
+ txwi[2] |= cpu_to_le32(val);
+
+ if (ieee80211_is_beacon(fc)) {
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ if (!is_mt7921(dev))
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
+ 0x18));
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
+ }
+
+ if (mt76_is_mmio(dev)) {
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+ } else {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] |= cpu_to_le32(val);
+ }
+}
+
+void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, int pid,
+ enum mt76_txq_id qid, u32 changed)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->phy;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
+ u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
+ bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED));
+ bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY));
+
+ if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ band_idx = mvif->band_idx;
+ }
+
+ if (phy_idx && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
+
+ if (inband_disc) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_ALTX0;
+ } else if (beacon) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_BCN0;
+ } else if (qid >= MT_TXQ_PSD) {
+ p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = MT_LMAC_ALTX0;
+ } else {
+ p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
+ q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+
+ /* counting non-offloading skbs */
+ wcid->stats.tx_bytes += skb->len;
+ wcid->stats.tx_packets++;
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ if (!is_mt7921(dev))
+ val |= MT_TXD1_VTA;
+ if (phy_idx || band_idx)
+ val |= MT_TXD1_TGID;
+
+ txwi[1] = cpu_to_le32(val);
+ txwi[2] = 0;
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
+ if (!is_mt7921(dev))
+ val |= MT_TXD3_SW_POWER_MGMT;
+ if (key)
+ val |= MT_TXD3_PROTECT_FRAME;
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ val |= MT_TXD3_NO_ACK;
+
+ txwi[3] = cpu_to_le32(val);
+ txwi[4] = 0;
+
+ val = FIELD_PREP(MT_TXD5_PID, pid);
+ if (pid >= MT_PACKET_ID_FIRST)
+ val |= MT_TXD5_TX_STATUS_HOST;
+
+ txwi[5] = cpu_to_le32(val);
+ txwi[6] = 0;
+ txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
+
+ if (is_8023)
+ mt76_connac2_mac_write_txwi_8023(txwi, skb, wcid);
+ else
+ mt76_connac2_mac_write_txwi_80211(dev, txwi, skb, key);
+
+ if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
+ /* Fixed rata is available just for 802.11 txd */
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ multicast);
+ u32 val = MT_TXD6_FIXED_BW;
+
+ /* hardware won't add HTC for mgmt/ctrl frame */
+ txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
+
+ val |= FIELD_PREP(MT_TXD6_TX_RATE, rate);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
+
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data)
+{
+ struct mt76_sta_stats *stats = &wcid->stats;
+ struct ieee80211_supported_band *sband;
+ struct mt76_phy *mphy;
+ struct rate_info rate = {};
+ bool cck = false;
+ u32 txrate, txs, mode;
+
+ txs = le32_to_cpu(txs_data[0]);
+
+ /* PPDU based reporting */
+ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
+ stats->tx_bytes +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+ stats->tx_packets +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
+ stats->tx_failed +=
+ le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT);
+ stats->tx_retries +=
+ le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_CNT);
+ }
+
+ txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+
+ rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
+ rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
+
+ if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
+ stats->tx_nss[rate.nss - 1]++;
+ if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
+ stats->tx_mcs[rate.mcs]++;
+
+ mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ mphy = &dev->phy;
+ if (wcid->phy_idx == MT_BAND1 && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ if (rate.mcs > 31)
+ return false;
+
+ rate.flags = RATE_INFO_FLAGS_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate.mcs > 9)
+ return false;
+
+ rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (rate.mcs > 11)
+ return false;
+
+ rate.he_gi = wcid->rate.he_gi;
+ rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
+ rate.flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ return false;
+ }
+
+ stats->tx_mode[mode]++;
+
+ switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_160:
+ rate.bw = RATE_INFO_BW_160;
+ stats->tx_bw[3]++;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate.bw = RATE_INFO_BW_80;
+ stats->tx_bw[2]++;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate.bw = RATE_INFO_BW_40;
+ stats->tx_bw[1]++;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ stats->tx_bw[0]++;
+ break;
+ }
+ wcid->rate = rate;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_txs);
+
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data)
+{
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ mt76_tx_status_lock(dev, &list);
+ skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool noacked = !(info->flags & IEEE80211_TX_STAT_ACK);
+
+ if (!(le32_to_cpu(txs_data[0]) & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !noacked;
+ info->status.rates[0].idx = -1;
+
+ wcid->stats.tx_failed += noacked;
+
+ mt76_connac2_mac_fill_txs(dev, wcid, txs_data);
+ mt76_tx_status_skb_done(dev, skb, &list);
+ }
+ mt76_tx_status_unlock(dev, &list);
+
+ return !!skb;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_add_txs_skb);
+
+static void
+mt76_connac2_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct ieee80211_radiotap_he *he,
+ __le32 *rxv)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt76_connac2_mac_decode_he_mu_radiotap(struct mt76_dev *dev, struct sk_buff *skb,
+ __le32 *rxv)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
+ HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
+ .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ struct ieee80211_radiotap_he_mu *he_mu;
+
+ if (is_mt7921(dev)) {
+ mu_known.flags1 |= HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN);
+ mu_known.flags2 |= HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN);
+ }
+
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+
+ he_mu = skb_push(skb, sizeof(mu_known));
+ memcpy(he_mu, &mu_known, sizeof(mu_known));
+
+#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
+ if (status->he_dcm)
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
+
+ he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+ le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
+
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
+
+ if (status->bw >= RATE_INFO_BW_40) {
+ he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+ he_mu->ru_ch2[0] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
+ }
+
+ if (status->bw >= RATE_INFO_BW_80) {
+ he_mu->ru_ch1[1] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
+ he_mu->ru_ch2[1] =
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
+ }
+}
+
+void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
+ struct sk_buff *skb,
+ __le32 *rxv, u32 mode)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+ struct ieee80211_radiotap_he *he;
+
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ he->data5 |= HE_BITS(DATA5_TXBF);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
+
+ switch (mode) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+ HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+ he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
+
+ mt76_connac2_mac_decode_he_radiotap_ru(status, he, rxv);
+ mt76_connac2_mac_decode_he_mu_radiotap(dev, skb, rxv);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
+
+ mt76_connac2_mac_decode_he_radiotap_ru(status, he, rxv);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_decode_he_radiotap);
+
+/* The HW does not translate the mac header to 802.3 for mesh point */
+int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ struct sk_buff *skb, u16 hdr_offset)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_offset);
+ __le32 *rxd = (__le32 *)skb->data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr hdr;
+ u16 frame_control;
+
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
+ MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ sta = container_of((void *)status->wcid, struct ieee80211_sta, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
+ hdr.duration_id = 0;
+
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_reverse_frag0_hdr_trans);
+
+int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv, u8 *mode)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ if (!is_mt7915(dev)) {
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_HT_SGI, v0);
+ *mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ if (is_mt7921(dev))
+ dcm = !!(idx & MT_PRXV_TX_DCM);
+ else
+ dcm = FIELD_GET(MT_PRXV_DCM, v0);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
+ } else {
+ stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ *mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
+ bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
+ }
+
+ switch (*mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(dev, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 11)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (*mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_rx_rate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index f79e3d5084f3..011fc9729b38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/firmware.h>
+#include "mt76_connac2_mac.h"
#include "mt76_connac_mcu.h"
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
@@ -62,8 +64,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
};
int cmd;
- if (is_mt7921(dev) &&
- (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
+ if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
+ (is_mt7921(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -193,7 +195,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
*/
} req = {
.bss_idx = mvif->idx,
- .ps_state = vif->bss_conf.ps ? 2 : 0,
+ .ps_state = vif->cfg.ps ? 2 : 0,
};
if (vif->type != NL80211_IFTYPE_STATION)
@@ -258,16 +260,18 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, len);
+ if (sta_hdr) {
+ len += le16_to_cpu(sta_hdr->len);
+ sta_hdr->len = cpu_to_le16(len);
+ }
return ptlv;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid)
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -278,7 +282,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
- skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -286,7 +290,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
+EXPORT_SYMBOL_GPL(__mt76_connac_mcu_alloc_sta_req);
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -310,12 +314,54 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u8 omac_idx = mvif->omac_idx;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ omac = (struct bss_info_omac *)tlv;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
+
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -360,7 +406,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
else
conn_type = CONNECTION_INFRA_AP;
basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(vif->bss_conf.aid);
+ basic->aid = cpu_to_le16(vif->cfg.aid);
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
@@ -376,9 +422,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
-static void
-mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
@@ -407,6 +452,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
uapsd->max_sp = sta->max_sp;
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_uapsd);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -420,13 +466,17 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
sizeof(*htr),
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+ htr->no_rx_trans = true;
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
+ if (!wcid)
+ return;
+
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
@@ -461,6 +511,25 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET, NULL,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, NULL, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_EXT_CMD(WTBL_UPDATE), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_update_hdr_trans);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -481,15 +550,14 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
if (sta) {
if (vif->type == NL80211_IFTYPE_STATION)
- generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ generic->partial_aid = cpu_to_le16(vif->cfg.aid);
else
generic->partial_aid = cpu_to_le16(sta->aid);
memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
- if (is_mt7921(dev) &&
- vif->type == NL80211_IFTYPE_STATION)
+ if (!is_connac_v1(dev) && vif->type == NL80211_IFTYPE_STATION)
memcpy(generic->peer_addr, vif->bss_conf.bssid,
ETH_ALEN);
else
@@ -506,7 +574,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
rx->rca2 = 1;
rx->rv = 1;
- if (is_mt7921(dev))
+ if (!is_connac_v1(dev))
return;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
@@ -528,14 +596,14 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_STATION)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ amsdu->max_mpdu_size = sta->deflink.agg.max_amsdu_len >=
IEEE80211_MAX_MPDU_LEN_VHT_7991;
wcid->amsdu = true;
@@ -546,7 +614,7 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
static void
mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
struct sta_rec_he *he;
struct tlv *tlv;
@@ -634,7 +702,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->he_cap = cpu_to_le32(cap);
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
@@ -686,9 +754,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
u8 mode = 0;
if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
+ ht_cap = &sta->deflink.ht_cap;
+ vht_cap = &sta->deflink.vht_cap;
+ he_cap = &sta->deflink.he_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -737,25 +805,25 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
u16 supp_rates;
/* starec ht */
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
struct sta_rec_ht *ht;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
}
/* starec vht */
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
struct sta_rec_vht *vht;
int len;
len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
/* starec uapsd */
@@ -764,11 +832,11 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
if (!is_mt7921(dev))
return;
- if (sta->ht_cap.ht_supported || sta->he_cap.has_he)
+ if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)
mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
/* starec he */
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
mt76_connac_mcu_sta_he_tlv(skb, sta);
if (band == NL80211_BAND_6GHZ &&
sta_state == MT76_STA_INFO_STATE_ASSOC) {
@@ -777,7 +845,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G,
sizeof(*he_6g_capa));
he_6g_capa = (struct sta_rec_he_6g_capa *)tlv;
- he_6g_capa->capa = sta->he_6ghz_capa.capa;
+ he_6g_capa->capa = sta->deflink.he_6ghz_capa.capa;
}
}
@@ -787,14 +855,14 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
- sta->ht_cap.ampdu_factor) |
+ sta->deflink.ht_cap.ampdu_factor) |
FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
- sta->ht_cap.ampdu_density);
+ sta->deflink.ht_cap.ampdu_density);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
- supp_rates = sta->supp_rates[band];
+ supp_rates = sta->deflink.supp_rates[band];
if (band == NL80211_BAND_2GHZ)
supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
@@ -803,25 +871,26 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
ra_info->legacy = cpu_to_le16(supp_rates);
- if (sta->ht_cap.ht_supported)
- memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
+ if (sta->deflink.ht_cap.ht_supported)
+ memcpy(ra_info->rx_mcs_bitmask,
+ sta->deflink.ht_cap.mcs.rx_mask,
HT_MCS_MASK_NUM);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
state = (struct sta_rec_state *)tlv;
state->state = sta_state;
- if (sta->vht_cap.vht_supported) {
- state->vht_opmode = sta->bandwidth;
- state->vht_opmode |= (sta->rx_nss - 1) <<
+ if (sta->deflink.vht_cap.vht_supported) {
+ state->vht_opmode = sta->deflink.bandwidth;
+ state->vht_opmode |= (sta->deflink.rx_nss - 1) <<
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
}
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
-static void
-mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
{
struct wtbl_smps *smps;
struct tlv *tlv;
@@ -829,30 +898,39 @@ mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
-
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- smps->smps = true;
+ smps->smps = (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc)
{
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
u32 flags = 0;
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_6ghz_capa.capa) {
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
+ ht->ldpc = ht_ldpc &&
+ !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+
+ if (sta->deflink.ht_cap.ht_supported) {
+ ht->af = sta->deflink.ht_cap.ampdu_factor;
+ ht->mm = sta->deflink.ht_cap.ampdu_density;
+ } else {
+ ht->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ ht->mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ }
+
ht->ht = true;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported || sta->deflink.he_6ghz_capa.capa) {
struct wtbl_vht *vht;
u8 af;
@@ -860,18 +938,19 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*vht), wtbl_tlv,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->ldpc = vht_ldpc &&
+ !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
if (ht)
ht->af = max(ht->af, af);
}
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
- if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
+ if (is_connac_v1(dev) && sta->deflink.ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
@@ -881,15 +960,15 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*raw), wtbl_tlv,
sta_wtbl);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
flags |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
flags |= MT_WTBL_W5_SHORT_GI_40;
- if (sta->vht_cap.vht_supported) {
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (sta->deflink.vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
flags |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
flags |= MT_WTBL_W5_SHORT_GI_160;
}
raw = (struct wtbl_raw *)tlv;
@@ -939,7 +1018,8 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr);
+ sta_wtbl, wtbl_hdr,
+ true, true);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -973,13 +1053,13 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
if (enable && tx) {
- u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ static const u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
int i;
for (i = 7; i > 0; i--) {
@@ -1106,7 +1186,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ int cmd, bool enable, bool tx)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
@@ -1129,8 +1209,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
- ret = mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@@ -1140,15 +1219,12 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- return mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
-static u8
-mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band,
- struct ieee80211_sta *sta)
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@@ -1156,13 +1232,13 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta_ht_cap *ht_cap;
u8 mode = 0;
- if (!is_mt7921(dev))
+ if (is_connac_v1(dev))
return 0x38;
if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
+ ht_cap = &sta->deflink.ht_cap;
+ vht_cap = &sta->deflink.vht_cap;
+ he_cap = &sta->deflink.he_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -1180,7 +1256,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
mode |= PHY_MODE_A;
if (ht_cap->ht_supported)
@@ -1189,14 +1265,18 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_5G;
+ } else if (band == NL80211_BAND_6GHZ) {
+ mode |= PHY_MODE_A | PHY_MODE_AN |
+ PHY_MODE_AC | PHY_MODE_AX_5G;
}
return mode;
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-static const struct ieee80211_sta_he_cap *
+const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
enum nl80211_band band = phy->chandef.chan->band;
@@ -1206,6 +1286,7 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
return ieee80211_get_he_iftype_cap(sband, vif->type);
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
@@ -1326,6 +1407,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
else
conn_type = CONNECTION_INFRA_AP;
basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ /* Fully active/deactivate BSS network in AP mode only */
+ basic_req.basic.active = enable;
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p)
@@ -1438,7 +1521,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_hw_scan_req *req;
struct sk_buff *skb;
@@ -1452,7 +1534,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
req->bss_idx = mvif->idx;
req->scan_type = sreq->n_ssids ? 1 : 0;
req->probe_req_num = sreq->n_ssids ? 2 : 0;
@@ -1560,7 +1642,6 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_sched_scan_req *req;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct cfg80211_match_set *match;
struct cfg80211_ssid *ssid;
struct sk_buff *skb;
@@ -1574,7 +1655,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
req->version = 1;
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
@@ -2082,8 +2163,10 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt76_vif *vif,
struct ieee80211_bss_conf *info)
{
+ struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
+ bss_conf);
struct sk_buff *skb;
- int i, len = min_t(int, info->arp_addr_cnt,
+ int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
@@ -2110,11 +2193,8 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
- for (i = 0; i < len; i++) {
- u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
-
- memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
- }
+ for (i = 0; i < len; i++)
+ skb_put_data(skb, &mvif->cfg.arp_addr_list[i], sizeof(__be32));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
}
@@ -2482,5 +2562,560 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
+static int
+mt76_connac_mcu_sta_key_tlv(struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ u32 len = sizeof(*sec);
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ sec_key->key_id = sta_key_conf->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, sta_key_conf->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MCU_CIPHER_AES_CCMP) {
+ memcpy(sta_key_conf->key, key->key, key->keylen);
+ sta_key_conf->keyidx = key->keyidx;
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
+
+/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
+
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
+ struct bss_info_basic *bss;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+ bss = (struct bss_info_basic *)tlv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ case NL80211_IFTYPE_AP:
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_MULTI_BSSID)) {
+ u8 bssid_id = vif->bss_conf.bssid_indicator;
+ struct wiphy *wiphy = phy->hw->wiphy;
+
+ if (bssid_id > ilog2(wiphy->mbssid_max_interfaces))
+ return -EINVAL;
+
+ bss->non_tx_bssid = vif->bss_conf.bssid_index;
+ bss->max_bssid = bssid_id;
+ }
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ rcu_read_lock();
+ if (!sta)
+ sta = ieee80211_find_sta(vif,
+ vif->bss_conf.bssid);
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (sta) {
+ struct mt76_wcid *wcid;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wlan_idx = wcid->idx;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss->network_type = cpu_to_le32(type);
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+ bss->cipher = mvif->cipher;
+
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phy_mode = mt76_connac_get_phy_mode(phy, vif,
+ chandef->chan->band, NULL);
+ } else {
+ memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_basic_tlv);
+
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter)
+{
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = enter ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PM_STATE_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_pm);
+
+int mt76_connac_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(SET_RDD_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
+
+static int
+mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ const struct mt76_connac2_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt76_connac2_fw_region *region;
+ u32 len, addr, mode;
+ int err;
+
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt76_connac_mcu_gen_dl_mode(dev, region->feature_set,
+ is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt76_connac_mcu_init_download(dev, addr, len, mode);
+ if (err) {
+ dev_err(dev->dev, "Download request failed\n");
+ return err;
+ }
+
+ err = __mt76_mcu_send_firmware(dev, MCU_CMD(FW_SCATTER),
+ data + offset, len, max_len);
+ if (err) {
+ dev_err(dev->dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt76_connac_mcu_start_firmware(dev, override, option);
+}
+
+int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
+ const char *fw_wa)
+{
+ const struct mt76_connac2_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_wm, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->hw->wiphy->fw_version,
+ sizeof(dev->hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+ release_firmware(fw);
+
+ if (!fw_wa)
+ return 0;
+
+ ret = request_firmware(&fw, fw_wa, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret) {
+ dev_err(dev->dev, "Failed to start WA firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->hw->wiphy->fw_version,
+ sizeof(dev->hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_load_ram);
+
+static u32 mt76_connac2_get_data_mode(struct mt76_dev *dev, u32 info)
+{
+ u32 mode = DL_MODE_NEED_RSP;
+
+ if (!is_mt7921(dev) || info == PATCH_SEC_NOT_SUPPORT)
+ return mode;
+
+ switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
+ case PATCH_SEC_ENC_TYPE_PLAIN:
+ break;
+ case PATCH_SEC_ENC_TYPE_AES:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= FIELD_PREP(DL_MODE_KEY_IDX,
+ (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ case PATCH_SEC_ENC_TYPE_SCRAMBLE:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= DL_CONFIG_ENCRY_MODE_SEL;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ default:
+ dev_err(dev->dev, "Encryption type not support!\n");
+ }
+
+ return mode;
+}
+
+int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
+{
+ int i, ret, sem, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
+ const struct mt76_connac2_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+
+ sem = mt76_connac_mcu_patch_sem_ctrl(dev, true);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, fw_name, dev->dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)fw->data;
+ dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt76_connac2_patch_sec *sec;
+ u32 len, addr, mode;
+ const u8 *dl;
+ u32 sec_info;
+
+ sec = (void *)(fw->data + sizeof(*hdr) + i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
+ sec_info = be32_to_cpu(sec->info.sec_key_idx);
+ mode = mt76_connac2_get_data_mode(dev, sec_info);
+
+ ret = mt76_connac_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = __mt76_mcu_send_firmware(dev, MCU_CMD(FW_SCATTER),
+ dl, len, max_len);
+ if (ret) {
+ dev_err(dev->dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt76_connac_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->dev, "Failed to start patch\n");
+
+out:
+ sem = mt76_connac_mcu_patch_sem_ctrl(dev, false);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->dev, "Failed to release patch semaphore\n");
+ break;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_load_patch);
+
+int mt76_connac2_mcu_fill_message(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
+ struct mt76_connac2_mcu_uni_txd *uni_txd;
+ struct mt76_connac2_mcu_txd *mcu_txd;
+ __le32 *txd;
+ u32 val;
+ u8 seq;
+
+ /* TODO: make dynamic based on msg type */
+ dev->mcu.timeout = 20 * HZ;
+
+ seq = ++dev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ if (cmd == MCU_CMD(FW_SCATTER))
+ goto exit;
+
+ txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
+ FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ if (cmd & __MCU_CMD_FIELD_UNI) {
+ uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ goto exit;
+ }
+
+ mcu_txd = (struct mt76_connac2_mcu_txd *)txd;
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
+ MT_TX_MCU_PORT_RX_Q0));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+ mcu_txd->cid = mcu_cmd;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
+
+ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
+ } else {
+ mcu_txd->set_query = MCU_Q_NA;
+ }
+
+ if (cmd & __MCU_CMD_FIELD_WA)
+ mcu_txd->s2d_index = MCU_S2D_H2C;
+ else
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 5baf8370b7bd..718f427d8f6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -6,6 +6,185 @@
#include "mt76_connac.h"
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_ENCRY_MODE BIT(4)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+#define FW_FEATURE_NON_DL BIT(6)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
+#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
+#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
+#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
+#define PATCH_SEC_ENC_TYPE_AES 0x01
+#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
+#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
+#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
+
+enum {
+ FW_TYPE_DEFAULT = 0,
+ FW_TYPE_CLC = 2,
+ FW_TYPE_MAX_NUM = 255
+};
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+struct mt76_connac2_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 rsv[5];
+} __packed __aligned(4);
+
+/**
+ * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt76_connac2_mcu_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 rsv;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 rsv1[4];
+} __packed __aligned(4);
+
+struct mt76_connac2_mcu_rxd {
+ __le32 rxd[6];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ u8 rsv[2];
+
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt76_connac2_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 rsv;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 rsv[11];
+ } desc;
+} __packed;
+
+struct mt76_connac2_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 rsv[9];
+ } info;
+ };
+} __packed;
+
+struct mt76_connac2_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 rsv[2];
+ char fw_ver[10];
+ char build_date[15];
+ __le32 crc;
+} __packed;
+
+struct mt76_connac2_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 rsv[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 type;
+ u8 rsv1[14];
+} __packed;
+
struct tlv {
__le16 tag;
__le16 len;
@@ -570,6 +749,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_muru) + \
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
@@ -953,9 +1133,9 @@ enum {
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
- MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
@@ -971,6 +1151,7 @@ enum {
MCU_UNI_CMD_SUSPEND = 0x05,
MCU_UNI_CMD_OFFLOAD = 0x06,
MCU_UNI_CMD_HIF_CTRL = 0x07,
+ MCU_UNI_CMD_SNIFFER = 0x24,
};
enum {
@@ -996,8 +1177,10 @@ enum {
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
- MCU_CE_CMD_SET_ROC = 0x1d,
+ MCU_CE_CMD_SET_ROC = 0x1c,
+ MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
+ MCU_CE_CMD_SET_CLC = 0x5c,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
@@ -1427,6 +1610,51 @@ struct mt76_connac_config {
u8 data[320];
} __packed;
+static inline enum mcu_cipher_type
+mt76_connac_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MCU_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MCU_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MCU_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MCU_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MCU_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MCU_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MCU_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MCU_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MCU_CIPHER_WAPI;
+ default:
+ return MCU_CIPHER_NONE;
+ }
+}
+
+static inline u32
+mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
+ DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
+ if (is_mt7921(dev))
+ ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@@ -1436,7 +1664,7 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
{
*wlan_idx_hi = 0;
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
*wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
*wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
} else {
@@ -1445,8 +1673,16 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len);
+static inline struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid);
+ struct mt76_wcid *wcid)
+{
+ return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+}
+
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
int cmd, void *sta_wtbl, struct sk_buff **skb);
@@ -1476,13 +1712,16 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv);
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx, void *sta_wtbl,
@@ -1496,7 +1735,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx);
+ int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
@@ -1546,4 +1785,37 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+
+const struct ieee80211_sta_he_cap *
+mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta);
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd);
+
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable);
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
+int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val);
+int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
+ const char *fw_wa);
+int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name);
+int mt76_connac2_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 436daf6d6d86..0422c332354a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -245,7 +245,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf, false);
+ ret = mt76u_init(mdev, usb_intf);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 44d1a92d9a90..50eaeff11af3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -103,7 +103,8 @@ struct mt76x02_dev {
u8 tbtt_count;
u32 tx_hang_reset;
- u8 tx_hang_check;
+ u8 tx_hang_check[4];
+ u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;
@@ -155,7 +156,8 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -186,7 +188,7 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed);
+ struct ieee80211_bss_conf *info, u64 changed);
void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 5d034cec191b..ad4dc8e17b58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -57,8 +57,11 @@ void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
int bcn_len = dev->beacon_ops->slot_size;
int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
- if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb)) {
+ if (!dev->beacon_data_count)
+ dev->beacon_hang_check++;
dev->beacon_data_count++;
+ }
dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
@@ -74,6 +77,7 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
if (!dev->mt76.beacon_mask)
dev->tbtt_count = 0;
+ dev->beacon_hang_check = 0;
if (enable) {
dev->mt76.beacon_mask |= BIT(mvif->idx);
} else {
@@ -139,7 +143,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif, 0);
if (!skb)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index a601350531cd..024a5c0a5a57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -823,10 +823,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
-
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- dev->mt76.region != NL80211_DFS_UNSET) {
+ if (mt76_phy_dfs_state(&dev->mphy) > MT_DFS_STATE_DISABLED) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index a404fd7ea968..93d96739f802 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -404,7 +404,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ if (nss > 1 && sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
@@ -412,9 +412,9 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
- ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size <<= sta->deflink.ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
@@ -860,9 +860,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[1],
1);
- signal = max_t(s8, signal, status->chain_signal[1]);
}
- status->signal = signal;
status->freq = dev->mphy.chandef.chan->center_freq;
status->band = dev->mphy.chandef.chan->band;
@@ -1040,15 +1038,34 @@ EXPORT_SYMBOL_GPL(mt76x02_update_channel);
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
- u32 val = mt76_rr(dev, 0x10f4);
+ if (dev->mt76.beacon_mask) {
+ if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
+ dev->beacon_hang_check = 0;
+ return;
+ }
- if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
- return;
+ if (dev->beacon_hang_check < 10)
+ return;
- dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+ } else {
+ u32 val = mt76_rr(dev, 0x10f4);
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+ }
+ dev_err(dev->mt76.dev, "MAC error detected\n");
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) {
+ dev_err(dev->mt76.dev, "MAC stop failed\n");
+ goto out;
+ }
+
+ dev->beacon_hang_check = 0;
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
+
+out:
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
}
@@ -1178,8 +1195,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->mt76.aggr_stats[idx++] += val >> 16;
}
- if (!dev->mt76.beacon_mask)
- mt76x02_check_mac_err(dev);
+ mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
mt76x02_edcca_check(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index ec0de691129a..e9c5e85ec07c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -59,7 +59,8 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
+ mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock(&q->lock);
}
@@ -191,13 +192,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -230,8 +231,8 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt76x02_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt76x02_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return 0;
@@ -348,18 +349,20 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++) {
q = dev->mphy.q_tx[i];
- if (!q->queued)
- continue;
-
prev_dma_idx = dev->mt76.tx_dma_idx[i];
dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
- if (prev_dma_idx == dma_idx)
- break;
+ if (!q->queued || prev_dma_idx != dma_idx) {
+ dev->tx_hang_check[i] = 0;
+ continue;
+ }
+
+ if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
+ return true;
}
- return i < 4;
+ return false;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -530,23 +533,13 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
if (test_bit(MT76_RESTART, &dev->mphy.state))
return;
- if (mt76x02_tx_hang(dev)) {
- if (++dev->tx_hang_check >= MT_TX_HANG_TH)
- goto restart;
- } else {
- dev->tx_hang_check = 0;
- }
-
- if (dev->mcu_timeout)
- goto restart;
-
- return;
+ if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
+ return;
-restart:
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
- dev->tx_hang_check = 0;
+ memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index fa7872ac22bf..fe0c5e3298bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -571,6 +571,8 @@
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_0_BEACONS GENMASK(31, 16)
+
#define MT_TX_STA_1 0x1710
#define MT_TX_STA_2 0x1714
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 2953df7d8388..02da543dfc5c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -21,29 +21,16 @@ static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- u32 reg, val;
int i;
- if (usb->mcu.burst) {
- WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
-
- reg = usb->mcu.rp[0].reg - usb->mcu.base;
- for (i = 0; i < usb->mcu.rp_len; i++) {
- val = get_unaligned_le32(data + 4 * i);
- usb->mcu.rp[i].reg = reg++;
- usb->mcu.rp[i].value = val;
- }
- } else {
- WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
-
- for (i = 0; i < usb->mcu.rp_len; i++) {
- reg = get_unaligned_le32(data + 8 * i) -
- usb->mcu.base;
- val = get_unaligned_le32(data + 8 * i + 4);
-
- WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
- usb->mcu.rp[i].value = val;
- }
+ WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ u32 reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base;
+ u32 val = get_unaligned_le32(data + 8 * i + 4);
+
+ WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+ usb->mcu.rp[i].value = val;
}
}
@@ -108,7 +95,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
MT_EP_OUT_INBAND_CMD);
if (ret)
- return ret;
+ goto out;
if (wait_resp)
ret = mt76x02u_mcu_wait_resp(dev, seq);
@@ -207,7 +194,6 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = data;
usb->mcu.rp_len = n;
usb->mcu.base = base;
- usb->mcu.burst = false;
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index dd30f537676d..604ddcc21123 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -292,7 +292,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mt76_packet_id_init(&mvif->group_wcid);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
+ rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
+ mtxq->wcid = MT_VIF_WCID(idx);
}
int
@@ -327,11 +328,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx += 8;
/* vif is already set or idx is 8 for AP/Mesh/... */
- if (dev->mt76.vif_mask & BIT(idx) ||
+ if (dev->mt76.vif_mask & BIT_ULL(idx) ||
(vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
- dev->mt76.vif_mask |= BIT(idx);
+ dev->mt76.vif_mask |= BIT_ULL(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
@@ -344,7 +345,8 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- dev->mt76.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
+ rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL);
mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
@@ -485,7 +487,8 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(mt76x02_set_key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct mt76x02_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, qid;
@@ -634,7 +637,7 @@ EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 8a22ee581674..df85ebc6e1df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 2575369e44e2..55068f3252ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -57,7 +57,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf, false);
+ err = mt76u_init(mdev, intf);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
index d98225da694c..f21282cea845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: ISC
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
- select MT76_CORE
+ select MT76_CONNAC_LIB
depends on MAC80211
depends on PCI
+ select RELAY
help
This adds support for MT7915-based wireless PCIe devices,
which support concurrent dual-band operation at both 5GHz
@@ -11,3 +12,13 @@ config MT7915E
OFDMA, spatial reuse and dual carrier modulation.
To compile this driver as a module, choose M here.
+
+config MT7986_WMAC
+ bool "MT7986 (SoC) WMAC support"
+ depends on MT7915E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ This adds support for the built-in WMAC on MT7986 SoC device
+ which has the same feature set as a MT7915, but enables 6E
+ support.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index 80e49244348e..b794ceb79c37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -6,3 +6,4 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o mmio.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7915e-$(CONFIG_MT7986_WMAC) += soc.o \ No newline at end of file
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index e96d1c31dd36..6ef3431cad64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -1,9 +1,13 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/relay.h>
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44e98caf
/** global debugfs **/
@@ -19,9 +23,9 @@ mt7915_implicit_txbf_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
- return -EBUSY;
-
+ /* The existing connected stations shall reconnect to apply
+ * new implicit txbf configuration.
+ */
dev->ibf = !!val;
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
@@ -40,42 +44,124 @@ mt7915_implicit_txbf_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
mt7915_implicit_txbf_set, "%lld\n");
-/* test knob of system layer 1/2 error recovery */
-static int mt7915_ser_trigger_set(void *data, u64 val)
+/* test knob of system error recovery */
+static ssize_t
+mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- enum {
- SER_SET_RECOVER_L1 = 1,
- SER_SET_RECOVER_L2,
- SER_ENABLE = 2,
- SER_RECOVER
- };
- struct mt7915_dev *dev = data;
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ char buf[16];
int ret = 0;
+ u16 val;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (kstrtou16(buf, 0, &val))
+ return -EINVAL;
switch (val) {
+ case SER_QUERY:
+ /* grab firmware SER stats */
+ ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
+ break;
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
- ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
+ case SER_SET_RECOVER_L3_RX_ABORT:
+ case SER_SET_RECOVER_L3_TX_ABORT:
+ case SER_SET_RECOVER_L3_TX_DISABLE:
+ case SER_SET_RECOVER_L3_BF:
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
if (ret)
return ret;
- return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0);
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
+ break;
default:
break;
}
+ return ret ? ret : count;
+}
+
+static ssize_t
+mt7915_fw_ser_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+ static const size_t bufsz = 400;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_STATUS = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_SER_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_AMSDU = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_AMSDU_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
return ret;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL,
- mt7915_ser_trigger_set, "%lld\n");
+static const struct file_operations mt7915_fw_ser_ops = {
+ .write = mt7915_fw_ser_set,
+ .read = mt7915_fw_ser_get,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
static int
mt7915_radar_trigger(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
+ val, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@@ -87,7 +173,7 @@ mt7915_muru_debug_set(void *data, u64 val)
struct mt7915_dev *dev = data;
dev->muru_debug = val;
- mt7915_mcu_muru_debug_set(dev, data);
+ mt7915_mcu_muru_debug_set(dev, dev->muru_debug);
return 0;
}
@@ -301,6 +387,53 @@ exit:
DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
static int
+mt7915_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
@@ -311,27 +444,46 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
+ bool tx, rx, en;
int ret;
- dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
+ dev->fw.debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ if (dev->fw.debug_bin)
+ val = 16;
+ else
+ val = dev->fw.debug_wm;
+
+ tx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(1));
+ rx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(2));
+ en = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(0));
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
- return ret;
+ goto out;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
- ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
- return ret;
+ goto out;
}
/* WM CPU info record control */
mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
- mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm);
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
- return 0;
+out:
+ if (ret)
+ dev->fw.debug_wm = 0;
+
+ return ret;
}
static int
@@ -339,7 +491,7 @@ mt7915_fw_debug_wm_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug_wm;
+ *val = dev->fw.debug_wm;
return 0;
}
@@ -353,14 +505,19 @@ mt7915_fw_debug_wa_set(void *data, u64 val)
struct mt7915_dev *dev = data;
int ret;
- dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
+ dev->fw.debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw.debug_wa);
if (ret)
- return ret;
+ goto out;
- return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX,
- !!dev->fw_debug_wa, 0);
+ ret = mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_PDMA_RX, !!dev->fw.debug_wa, 0);
+out:
+ if (ret)
+ dev->fw.debug_wa = 0;
+
+ return ret;
}
static int
@@ -368,7 +525,7 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug_wa;
+ *val = dev->fw.debug_wa;
return 0;
}
@@ -376,12 +533,77 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
mt7915_fw_debug_wa_set, "%lld\n");
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7915_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw.debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7915_fw_debug_wm_set(dev, dev->fw.debug_wm);
+}
+
+static int
+mt7915_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw.debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
+ mt7915_fw_debug_bin_set, "%lld\n");
+
static int
mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
- if (dev->fw_debug_wm) {
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
+ seq_printf(file, "Exception state: 0x%x\n",
+ is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) :
+ (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0)));
+
+ if (dev->fw.debug_wm) {
seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT),
mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT));
@@ -400,7 +622,9 @@ mt7915_fw_util_wa_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
- if (dev->fw_debug_wa)
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WA_MCU_PC));
+
+ if (dev->fw.debug_wa)
return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
MCU_WA_PARAM_CPU_UTIL, 0, 0);
@@ -419,12 +643,12 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
- range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
- seq_printf(file, "\nPhy %d\n", ext_phy);
+ seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -432,7 +656,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
seq_puts(file, "\n");
@@ -521,14 +745,14 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
-mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
+mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
- val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
@@ -536,13 +760,13 @@ mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
- mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
- head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
- tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
- queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
@@ -570,8 +794,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
if (val & BIT(offs))
continue;
- mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
@@ -633,7 +857,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
@@ -641,7 +865,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
@@ -738,6 +962,37 @@ mt7915_twt_stats(struct seq_file *s, void *data)
return 0;
}
+/* The index of RF registers use the generic regidx, combined with two parts:
+ * WF selection [31:28] and offset [27:0].
+ */
+static int
+mt7915_rf_regval_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+ u32 regval;
+ int ret;
+
+ ret = mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &regval, false);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+static int
+mt7915_rf_regval_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ u32 val32 = val;
+
+ return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get,
+ mt7915_rf_regval_set, "0x%08llx\n");
+
int mt7915_init_debugfs(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -755,8 +1010,10 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("xmit-queues", 0400, dir, phy,
&mt7915_xmit_queues_fops);
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
+ debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
debugfs_create_file("fw_util_wm", 0400, dir, dev,
&mt7915_fw_util_wm_fops);
debugfs_create_file("fw_util_wa", 0400, dir, dev,
@@ -767,17 +1024,79 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
&mt7915_rate_txpower_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
- debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
- if (!dev->dbdc_support || ext_phy) {
+ debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
+
+ if (!dev->dbdc_support || phy->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7915_rdd_monitor);
}
+ if (!ext_phy)
+ dev->debugfs_dir = dir;
+
return 0;
}
+static void
+mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
+ hdr.len = *(__le16 *)data;
+ mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
@@ -826,8 +1145,8 @@ static ssize_t mt7915_sta_fixed_rate_set(struct file *file,
phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0);
for (i = 0; i <= phy.bw; i++) {
- phy.sgi |= gi << (i << sta->he_cap.has_he);
- phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he);
+ phy.sgi |= gi << (i << sta->deflink.he_cap.has_he);
+ phy.he_ltf |= he_ltf << (i << sta->deflink.he_cap.has_he);
}
field = RATE_PARAM_FIXED;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 9182568f95c7..00aafc2422f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -5,25 +5,18 @@
#include "../dma.h"
#include "mac.h"
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
+static int
+mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
- int i, err;
+ struct mt7915_dev *dev = phy->dev;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
- if (err < 0)
- return err;
-
- for (i = 0; i <= MT_TXQ_PSD; i++)
- phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ ring_base = MT_WED_TX_RING_BASE;
+ idx -= MT_TXQ_ID(0);
+ }
- return 0;
-}
-
-static void
-mt7915_tx_cleanup(struct mt7915_dev *dev)
-{
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
+ MT_WED_Q_TX(idx));
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
@@ -32,148 +25,443 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
- mt7915_tx_cleanup(dev);
-
+ mt76_connac_tx_cleanup(&dev->mt76);
if (napi_complete_done(napi, 0))
mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
return 0;
}
+static void mt7915_dma_config(struct mt7915_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+ } while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ if (is_mt7915(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
+ TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ }
+}
+
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
-#define PREFETCH(base, depth) ((base) << 16 | (depth))
-
- mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u32 base = 0;
+
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs,
+ PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs,
+ PREFETCH(0x180, 0x4));
+ if (!is_mt7915(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs,
+ PREFETCH(0x1c0, 0x4));
+ base = 0x40;
+ }
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs,
+ PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x240 + base, 0x4));
+
+ /* for mt7915, the ring which is next the last
+ * used ring must be initialized.
+ */
+ if (is_mt7915(&dev->mt76)) {
+ ofs += 0x4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs,
+ PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x280 + base, 0x0));
+ }
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
- __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
+ __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
-int mt7915_dma_init(struct mt7915_dev *dev)
+static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
{
+ struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
- int ret;
-
- mt76_dma_attach(&dev->mt76);
if (dev->hif2)
- hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset */
+ if (rst) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+ }
+ }
- /* configure global setting */
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
+
+static int mt7915_dma_enable(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ }
- /* configure delay interrupt */
+ /* configure delay interrupt off */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+ }
if (dev->hif2) {
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
+ hif1_ofs, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
+ hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
+ hif1_ofs, 0);
+ }
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+ }
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
- /* configure perfetch settings */
- mt7915_dma_prefetch(dev);
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD;
+
+ if (!dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support || dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ u32 wed_irq_mask = irq_mask;
+
+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
+ }
+
+ mt7915_irq_enable(dev, irq_mask);
+
+ return 0;
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 wa_rx_base, wa_rx_idx;
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7915_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7915_dma_disable(dev, true);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
+ } else {
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+ }
/* init tx queue */
- ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
- MT7915_TX_RING_SIZE);
+ ret = mt7915_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(dev->phy.band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0));
if (ret)
return ret;
+ if (phy2) {
+ ret = mt7915_init_tx_queues(phy2,
+ MT_TXQ_ID(phy2->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
+ if (ret)
+ return ret;
+ }
+
/* command to WM */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
- MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7915_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
- MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+ } else {
+ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
+ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
+ }
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
- MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE, wa_rx_base);
if (ret)
return ret;
- /* rx data queue */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
- MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
- if (ret)
- return ret;
+ /* rx data queue for band0 */
+ if (!dev->phy.band_idx) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+ }
- if (dev->dbdc_support) {
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
- MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_DATA_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
if (ret)
return ret;
+ }
- /* event from WA */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
- MT7915_RXQ_MCU_WA_EXT,
+ if (dev->dbdc_support || dev->phy.band_idx) {
+ /* rx data queue for band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
+ MT_RXQ_ID(MT_RXQ_BAND1),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_EVENT_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs);
if (ret)
return ret;
}
@@ -182,84 +470,18 @@ int mt7915_dma_init(struct mt7915_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7915_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7915_poll_tx);
napi_enable(&dev->mt76.tx_napi);
- /* hif wait WFDMA idle */
- mt76_set(dev, MT_WFDMA0_BUSY_ENA,
- MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_BUSY_ENA,
- MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
- MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
-
- /* set WFDMA Tx/Rx */
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND);
- }
-
- /* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
- MT_INT_MCU_CMD);
+ mt7915_dma_enable(dev);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
- /* disable */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- /* reset */
- mt76_clear(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_clear(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
+ mt7915_dma_disable(dev, true);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index edd74d0de157..4b1a9811646f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -10,6 +10,7 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ u32 offs;
if (!dev->flash_mode)
return 0;
@@ -22,7 +23,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
if (!dev->cal)
return -ENOMEM;
- return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
+ offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -32,24 +35,49 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
switch (val) {
case 0x7915:
+ case 0x7916:
+ case 0x7986:
return 0;
default:
return -EINVAL;
}
}
+static char *mt7915_eeprom_name(struct mt7915_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return dev->dbdc_support ?
+ MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT;
+ case 0x7986:
+ switch (mt7915_check_adie(dev, true)) {
+ case MT7976_ONE_ADIE_DBDC:
+ return MT7986_EEPROM_MT7976_DEFAULT_DBDC;
+ case MT7975_ONE_ADIE:
+ return MT7986_EEPROM_MT7975_DEFAULT;
+ case MT7976_ONE_ADIE:
+ return MT7986_EEPROM_MT7976_DEFAULT;
+ case MT7975_DUAL_ADIE:
+ return MT7986_EEPROM_MT7975_DUAL_DEFAULT;
+ case MT7976_DUAL_ADIE:
+ return MT7986_EEPROM_MT7976_DUAL_DEFAULT;
+ default:
+ break;
+ }
+ return NULL;
+ default:
+ return MT7916_EEPROM_DEFAULT;
+ }
+}
+
static int
mt7915_eeprom_load_default(struct mt7915_dev *dev)
{
- char *default_bin = MT7915_EEPROM_DEFAULT;
u8 *eeprom = dev->mt76.eeprom.data;
const struct firmware *fw = NULL;
int ret;
- if (dev->dbdc_support)
- default_bin = MT7915_EEPROM_DEFAULT_DBDC;
-
- ret = request_firmware(&fw, default_bin, dev->mt76.dev);
+ ret = request_firmware(&fw, mt7915_eeprom_name(dev), dev->mt76.dev);
if (ret)
return ret;
@@ -59,7 +87,7 @@ mt7915_eeprom_load_default(struct mt7915_dev *dev)
goto out;
}
- memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
+ memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
dev->flash_mode = true;
out:
@@ -71,8 +99,9 @@ out:
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
+ u16 eeprom_size = mt7915_eeprom_size(dev);
- ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
if (ret < 0)
return ret;
@@ -88,7 +117,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+ block_num = DIV_ROUND_UP(eeprom_size,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
@@ -98,17 +127,34 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return mt7915_check_eeprom(dev);
}
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
- val = eeprom[MT_EE_WIFI_CONF + ext_phy];
+ val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
- if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
- val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+
+ if (!is_mt7915(&dev->mt76)) {
+ switch (val) {
+ case MT_EE_V2_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_6GHZ:
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ return;
+ }
+ } else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
+ val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+ }
switch (val) {
case MT_EE_BAND_SEL_5GHZ:
@@ -124,32 +170,65 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
}
}
-static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
{
- u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
+ u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+ bool ext_phy = phy != &dev->phy;
+
+ mt7915_eeprom_parse_band_config(phy);
- mt7915_eeprom_parse_band_config(&dev->phy);
+ /* read tx/rx mask from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ }
- /* read tx mask from eeprom */
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
if (!nss || nss > 4)
nss = 4;
+ /* read tx/rx stream */
nss_band = nss;
if (dev->dbdc_support) {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- if (!nss_band || nss_band > 2)
- nss_band = 2;
+ if (is_mt7915(&dev->mt76)) {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (phy->band_idx)
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ }
+
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+ } else {
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
+ }
- if (nss_band >= nss)
- nss = 4;
+ if (!nss_band || nss_band > nss_band_max)
+ nss_band = nss_band_max;
+
+ if (nss_band > nss) {
+ dev_warn(dev->mt76.dev,
+ "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+ nss, nss_band, phy->band_idx, ext_phy);
+ nss = nss_band;
}
- dev->chainmask = BIT(nss) - 1;
- dev->mphy.antenna_mask = BIT(nss_band) - 1;
- dev->mphy.chainmask = dev->mphy.antenna_mask;
+ mphy->chainmask = BIT(nss) - 1;
+ if (ext_phy)
+ mphy->chainmask <<= dev->chainshift;
+ mphy->antenna_mask = BIT(nss_band) - 1;
+ dev->chainmask |= mphy->chainmask;
+ dev->chainshift = hweight8(dev->mphy.chainmask);
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
@@ -171,7 +250,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
if (ret)
return ret;
- mt7915_eeprom_parse_hw_cap(dev);
+ mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
@@ -186,27 +265,43 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
{
u8 *eeprom = dev->mt76.eeprom.data;
int index, target_power;
- bool tssi_on;
+ bool tssi_on, is_7976;
if (chain_idx > 3)
return -EINVAL;
tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (chan->band == NL80211_BAND_2GHZ) {
- index = MT_EE_TX0_POWER_2G + chain_idx * 3;
- target_power = eeprom[index];
-
- if (!tssi_on)
- target_power += eeprom[index + 1];
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_2G_V2 + chain_idx;
+ target_power = eeprom[index];
+ } else {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = eeprom[index];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 1];
+ }
+ } else if (chan->band == NL80211_BAND_5GHZ) {
+ int group = mt7915_get_channel_group_5g(chan->hw_value, is_7976);
+
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_5G_V2 + chain_idx * 5;
+ target_power = eeprom[index + group];
+ } else {
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = eeprom[index + group];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 8];
+ }
} else {
- int group = mt7915_get_channel_group(chan->hw_value);
-
- index = MT_EE_TX0_POWER_5G + chain_idx * 12;
- target_power = eeprom[index + group];
+ int group = mt7915_get_channel_group_6g(chan->hw_value);
- if (!tssi_on)
- target_power += eeprom[index + 8];
+ index = MT_EE_TX0_POWER_6G_V2 + chain_idx * 8;
+ target_power = is_7976 ? eeprom[index + group] : 0;
}
return target_power;
@@ -215,15 +310,20 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u32 val;
+ u32 val, offs;
s8 delta;
+ bool is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (band == NL80211_BAND_2GHZ)
- val = eeprom[MT_EE_RATE_DELTA_2G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_2G_V2 : MT_EE_RATE_DELTA_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ offs = is_7976 ? MT_EE_RATE_DELTA_5G_V2 : MT_EE_RATE_DELTA_5G;
else
- val = eeprom[MT_EE_RATE_DELTA_5G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_6G_V2 : 0;
+
+ val = eeprom[offs];
- if (!(val & MT_EE_RATE_DELTA_EN))
+ if (!offs || !(val & MT_EE_RATE_DELTA_EN))
return 0;
delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index a43389a41800..7578ac6d0be6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -23,11 +23,19 @@ enum mt7915_eeprom_field {
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
+ MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+ MT_EE_RATE_DELTA_6G_V2 = 0x884, /* 6g fields only appear in eeprom v2 */
+ MT_EE_TX0_POWER_2G_V2 = 0x441,
+ MT_EE_TX0_POWER_5G_V2 = 0x445,
+ MT_EE_TX0_POWER_6G_V2 = 0x465,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00,
+ __MT_EE_MAX_V2 = 0x1000,
/* 0xe10 ~ 0x5780 used to save group cal data */
- MT_EE_PRECAL = 0xe10
+ MT_EE_PRECAL = 0xe10,
+ MT_EE_PRECAL_V2 = 0x1010
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
@@ -39,6 +47,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
@@ -49,6 +58,19 @@ enum mt7915_eeprom_field {
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
+#define MT_EE_NSS_MAX_MA7915 4
+#define MT_EE_NSS_MAX_DBDC_MA7915 2
+#define MT_EE_NSS_MAX_MA7986 4
+#define MT_EE_NSS_MAX_DBDC_MA7986 4
+
+enum mt7915_adie_sku {
+ MT7976_ONE_ADIE_DBDC = 0x7,
+ MT7975_ONE_ADIE = 0x8,
+ MT7976_ONE_ADIE = 0xa,
+ MT7975_DUAL_ADIE = 0xd,
+ MT7976_DUAL_ADIE = 0xf,
+};
+
enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DEFAULT,
MT_EE_BAND_SEL_5GHZ,
@@ -56,6 +78,13 @@ enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DUAL,
};
+enum {
+ MT_EE_V2_BAND_SEL_2GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ_6GHZ,
+};
+
enum mt7915_sku_rate_group {
SKU_CCK,
SKU_OFDM,
@@ -76,8 +105,20 @@ enum mt7915_sku_rate_group {
};
static inline int
-mt7915_get_channel_group(int channel)
+mt7915_get_channel_group_5g(int channel, bool is_7976)
{
+ if (is_7976) {
+ if (channel <= 64)
+ return 0;
+ if (channel <= 96)
+ return 1;
+ if (channel <= 128)
+ return 2;
+ if (channel <= 144)
+ return 3;
+ return 4;
+ }
+
if (channel >= 184 && channel <= 196)
return 0;
if (channel <= 48)
@@ -95,6 +136,15 @@ mt7915_get_channel_group(int channel)
return 7;
}
+static inline int
+mt7915_get_channel_group_6g(int channel)
+{
+ if (channel <= 29)
+ return 0;
+
+ return DIV_ROUND_UP(channel - 29, 32);
+}
+
static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index d054cdecd5f7..cc2aac86bcfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -50,15 +50,22 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
int i = to_sensor_dev_attr(attr)->index;
int temperature;
- if (i)
- return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000);
-
- temperature = mt7915_mcu_get_temperature(phy);
- if (temperature < 0)
- return temperature;
-
- /* display in millidegree celcius */
- return sprintf(buf, "%u\n", temperature * 1000);
+ switch (i) {
+ case 0:
+ temperature = mt7915_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ case 1:
+ case 2:
+ return sprintf(buf, "%u\n",
+ phy->throttle_temp[i - 1] * 1000);
+ case 3:
+ return sprintf(buf, "%hhu\n", phy->throttle_state);
+ default:
+ return -EINVAL;
+ }
}
static ssize_t mt7915_thermal_temp_store(struct device *dev,
@@ -84,11 +91,13 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7915_thermal_temp, 3);
static struct attribute *mt7915_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_throttle1.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7915_hwmon);
@@ -97,7 +106,7 @@ static int
mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = MT7915_THERMAL_THROTTLE_MAX;
+ *state = MT7915_CDEV_THROTTLE_MAX;
return 0;
}
@@ -108,7 +117,7 @@ mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
{
struct mt7915_phy *phy = cdev->devdata;
- *state = phy->throttle_state;
+ *state = phy->cdev_state;
return 0;
}
@@ -118,22 +127,27 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct mt7915_phy *phy = cdev->devdata;
+ u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
int ret;
- if (state > MT7915_THERMAL_THROTTLE_MAX)
+ if (state > MT7915_CDEV_THROTTLE_MAX)
return -EINVAL;
if (phy->throttle_temp[0] > phy->throttle_temp[1])
return 0;
- if (state == phy->throttle_state)
+ if (state == phy->cdev_state)
return 0;
- ret = mt7915_mcu_set_thermal_throttling(phy, state);
+ /*
+ * cooling_device convention: 0 = no cooling, more = more cooling
+ * mcu convention: 1 = max cooling, more = less cooling
+ */
+ ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
if (ret)
return ret;
- phy->throttle_state = state;
+ phy->cdev_state = state;
return 0;
}
@@ -186,7 +200,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
phy->throttle_temp[0] = 110;
phy->throttle_temp[1] = 120;
- return 0;
+ return mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
}
static void mt7915_led_set_config(struct led_classdev *led_cdev,
@@ -288,17 +303,18 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
struct mt7915_phy *phy = mphy->priv;
- struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7915_mcu_rdd_background_enable(phy, NULL);
+
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_6g.sband);
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
+ mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
}
@@ -306,11 +322,13 @@ static void
mt7915_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ struct mt7915_dev *dev = phy->dev;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -325,6 +343,7 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7915_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->mbssid_max_interfaces = 16;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -332,31 +351,66 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
+
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
hw->max_tx_fragments = 4;
- if (phy->mt76->cap.has_2ghz)
+ if (phy->mt76->cap.has_2ghz) {
phy->mt76->sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
+ }
if (phy->mt76->cap.has_5ghz) {
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
+
+ if (is_mt7915(&dev->mt76)) {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ } else {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
}
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
}
static void
@@ -387,19 +441,30 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
- /* disable rx rate report by default due to hw issues */
+
+ /* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
+ u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
+
+ /* config pse qid6 wfdma port selection */
+ if (!is_mt7915(&dev->mt76) && dev->hif2)
+ mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
+ MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
+
+ if (!is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
- mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
- for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ for (i = 0; i < mt7915_wtbl_size(dev); i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
@@ -430,59 +495,70 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
-static int mt7915_register_ext_phy(struct mt7915_dev *dev)
+static struct mt7915_phy *
+mt7915_alloc_ext_phy(struct mt7915_dev *dev)
{
- struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt7915_phy *phy;
struct mt76_phy *mphy;
- int ret;
if (!dev->dbdc_support)
- return 0;
+ return NULL;
- if (phy)
- return 0;
-
- mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops, MT_BAND1);
if (!mphy)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
- mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
+
+ /* Bind main phy to band0 and ext_phy to band1 for dbdc case */
+ phy->band_idx = 1;
+
+ return phy;
+}
+
+static int
+mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int ret;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
- mt7915_eeprom_parse_band_config(phy);
- mt7915_init_wiphy(mphy->hw);
+ mt7915_eeprom_parse_hw_cap(dev, phy);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
+ /* Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ }
mt76_eeprom_override(mphy);
- ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
- MT7915_TX_RING_SIZE);
- if (ret)
- goto error;
+ /* init wiphy according to mphy and phy */
+ mt7915_init_wiphy(mphy->hw);
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
- goto error;
+ return ret;
ret = mt7915_thermal_init(phy);
if (ret)
- goto error;
+ goto unreg;
- ret = mt7915_init_debugfs(phy);
- if (ret)
- goto error;
+ mt7915_init_debugfs(phy);
return 0;
-error:
- ieee80211_free_hw(mphy->hw);
+unreg:
+ mt76_unregister_phy(mphy);
return ret;
}
@@ -495,89 +571,114 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mac_init(dev);
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
mt7915_txbf_init(dev);
}
-static void mt7915_wfsys_reset(struct mt7915_dev *dev)
+void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
- u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
-
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
- mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+ if (is_mt7915(&dev->mt76)) {
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
- /* change to software control */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
- /* reset wfsys */
- val &= ~MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* release wfsys then mcu re-excutes romcode */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* switch to hw control */
- val &= ~MT_TOP_PWR_SW_RST;
- val |= MT_TOP_PWR_HW_CTRL;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* release wfsys then mcu re-executes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* check whether mcu resets to default */
- if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
- MT_MCU_DUMMY_DEFAULT, 1000)) {
- dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
- return;
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
+ MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
+ 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+
+ msleep(100);
+ } else if (is_mt7986(&dev->mt76)) {
+ mt7986_wmac_disable(dev);
+ msleep(20);
+
+ mt7986_wmac_enable(dev);
+ msleep(20);
+ } else {
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
}
+}
+
+static bool mt7915_band_config(struct mt7915_dev *dev)
+{
+ bool ret = true;
- /* wfsys reset won't clear host registers */
- mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+ dev->phy.band_idx = 0;
- msleep(100);
+ if (is_mt7986(&dev->mt76)) {
+ u32 sku = mt7915_check_adie(dev, true);
+
+ /*
+ * for mt7986, dbdc support is determined by the number
+ * of adie chips and the main phy is bound to band1 when
+ * dbdc is disabled.
+ */
+ if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
+ dev->phy.band_idx = 1;
+ ret = false;
+ }
+ } else {
+ ret = is_mt7915(&dev->mt76) ?
+ !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+ }
+
+ return ret;
}
-static int mt7915_init_hardware(struct mt7915_dev *dev)
+static int
+mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
{
int ret, idx;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
-
- /* If MCU was already running, it is likely in a bad state */
- if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
- FW_STATE_FW_DOWNLOAD)
- mt7915_wfsys_reset(dev);
- ret = mt7915_dma_init(dev);
+ ret = mt7915_dma_init(dev, phy2);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
- /*
- * force firmware operation mode into normal state,
- * which should be set before firmware download stage.
- */
- mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
-
ret = mt7915_mcu_init(dev);
- if (ret) {
- /* Reset and try again */
- mt7915_wfsys_reset(dev);
-
- ret = mt7915_mcu_init(dev);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
ret = mt7915_eeprom_init(dev);
if (ret < 0)
return ret;
-
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
@@ -626,11 +727,18 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
}
static void
-mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
+ struct ieee80211_sta_he_cap *he_cap,
int vif, int nss)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
- u8 c;
+ u8 c, nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -650,9 +758,10 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
- c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US;
+ if (!is_mt7915(&dev->mt76))
+ c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -684,20 +793,28 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
/* num_snd_dim
* for mt7915, max supported nss is 2 for bw > 80MHz
*/
- c = (nss - 1) |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ nss - 1) |
+ FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ nss_160 - 1);
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
+
+ if (!is_mt7915(&dev->mt76)) {
+ c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ elem->phy_cap_info[7] |= c;
+ }
}
static void
mt7915_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
- u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+ static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -718,9 +835,17 @@ static int
mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
+ struct mt7915_dev *dev = phy->dev;
int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
+ u8 nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -728,8 +853,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
else
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
- /* Can do 1/2 of NSS streams in 160Mhz mode. */
- if (i < nss / 2)
+ if (i < nss_160)
mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
else
mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
@@ -767,7 +891,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -806,7 +930,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
@@ -845,7 +969,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+ mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -856,6 +980,21 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
+
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+
idx++;
}
@@ -885,12 +1024,21 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
band->iftype_data = data;
band->n_iftype_data = n;
}
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
}
static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
{
struct mt7915_phy *phy = mt7915_ext_phy(dev);
- struct mt76_phy *mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1];
if (!phy)
return;
@@ -900,9 +1048,22 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
ieee80211_free_hw(mphy->hw);
}
+static void mt7915_stop_hardware(struct mt7915_dev *dev)
+{
+ mt7915_mcu_exit(dev);
+ mt7915_tx_token_put(dev);
+ mt7915_dma_cleanup(dev);
+ tasklet_disable(&dev->irq_tasklet);
+
+ if (is_mt7986(&dev->mt76))
+ mt7986_wmac_disable(dev);
+}
+
+
int mt7915_register_device(struct mt7915_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7915_phy *phy2;
int ret;
dev->phy.dev = dev;
@@ -918,20 +1079,17 @@ int mt7915_register_device(struct mt7915_dev *dev)
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
- ret = mt7915_init_hardware(dev);
- if (ret)
- return ret;
+ dev->dbdc_support = mt7915_band_config(dev);
- mt7915_init_wiphy(hw);
+ phy2 = mt7915_alloc_ext_phy(dev);
+ if (IS_ERR(phy2))
+ return PTR_ERR(phy2);
- if (!dev->dbdc_support)
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ ret = mt7915_init_hardware(dev, phy2);
+ if (ret)
+ goto free_phy2;
- dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- dev->phy.dfs_state = -1;
+ mt7915_init_wiphy(hw);
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
@@ -946,19 +1104,34 @@ int mt7915_register_device(struct mt7915_dev *dev)
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
- return ret;
+ goto stop_hw;
ret = mt7915_thermal_init(&dev->phy);
if (ret)
- return ret;
+ goto unreg_dev;
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
- ret = mt7915_register_ext_phy(dev);
- if (ret)
- return ret;
+ if (phy2) {
+ ret = mt7915_register_ext_phy(dev, phy2);
+ if (ret)
+ goto unreg_thermal;
+ }
+
+ mt7915_init_debugfs(&dev->phy);
+
+ return 0;
- return mt7915_init_debugfs(&dev->phy);
+unreg_thermal:
+ mt7915_unregister_thermal(&dev->phy);
+unreg_dev:
+ mt76_unregister_device(&dev->mt76);
+stop_hw:
+ mt7915_stop_hardware(dev);
+free_phy2:
+ if (phy2)
+ ieee80211_free_hw(phy2->mt76->hw);
+ return ret;
}
void mt7915_unregister_device(struct mt7915_dev *dev)
@@ -966,10 +1139,7 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_unregister_ext_phy(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
- mt7915_mcu_exit(dev);
- mt7915_tx_token_put(dev);
- mt7915_dma_cleanup(dev);
- tasklet_disable(&dev->irq_tasklet);
+ mt7915_stop_hardware(dev);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 48f115502282..a4bcc617c1a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -10,10 +10,6 @@
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
-#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
-#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
- IEEE80211_RADIOTAP_HE_##f)
-
static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
.radar_pattern = {
@@ -165,7 +161,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7915_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -218,250 +214,6 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
rcu_read_unlock();
}
-static void
-mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
- struct ieee80211_radiotap_he *he,
- __le32 *rxv)
-{
- u32 ru_h, ru_l;
- u8 ru, offs = 0;
-
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
- ru = (u8)(ru_l | ru_h << 4);
-
- status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
-
- he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-}
-
-static void
-mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
- HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
- .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
- };
- struct ieee80211_radiotap_he_mu *he_mu = NULL;
-
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-
- he_mu = skb_push(skb, sizeof(mu_known));
- memcpy(he_mu, &mu_known, sizeof(mu_known));
-
-#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
-
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
- if (status->he_dcm)
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
-
- he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
- MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
- le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
-
- he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
-
- if (status->bw >= RATE_INFO_BW_40) {
- he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
- he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
- }
-
- if (status->bw >= RATE_INFO_BW_80) {
- he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
- he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
- }
-}
-
-static void
-mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he known = {
- .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
- HE_BITS(DATA1_DATA_DCM_KNOWN) |
- HE_BITS(DATA1_STBC_KNOWN) |
- HE_BITS(DATA1_CODING_KNOWN) |
- HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
- HE_BITS(DATA1_DOPPLER_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
- HE_BITS(DATA1_BSS_COLOR_KNOWN),
- .data2 = HE_BITS(DATA2_GI_KNOWN) |
- HE_BITS(DATA2_TXBF_KNOWN) |
- HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
- HE_BITS(DATA2_TXOP_KNOWN),
- };
- struct ieee80211_radiotap_he *he = NULL;
- u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
-
- status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he = skb_push(skb, sizeof(known));
- memcpy(he, &known, sizeof(known));
-
- he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
- HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
- he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
- he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
- le16_encode_bits(ltf_size,
- IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
- he->data5 |= HE_BITS(DATA5_TXBF);
- he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
- HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
-
- switch (mode) {
- case MT_PHY_TYPE_HE_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
- HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_EXT_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_MU:
- he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
-
- mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
- mt7915_mac_decode_he_mu_radiotap(skb, rxv);
- break;
- case MT_PHY_TYPE_HE_TB:
- he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
-
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
-
- mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
- break;
- default:
- break;
- }
-}
-
-/* The HW does not translate the mac header to 802.3 for mesh point */
-static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
- struct ieee80211_sta *sta;
- struct ieee80211_vif *vif;
- struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
-
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
- MT_RXD3_NORMAL_U2M)
- return -EINVAL;
-
- if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
- return -EINVAL;
-
- if (!msta || !msta->vif)
- return -EINVAL;
-
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
-
- /* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
- hdr.duration_id = 0;
- ether_addr_copy(hdr.addr1, vif->addr);
- ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
- case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
- break;
- case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
- break;
- case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- break;
- case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
- break;
- default:
- break;
- }
-
- skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
- ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
- ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
- else
- skb_pull(skb, 2);
-
- if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
- if (ieee80211_has_a4(hdr.frame_control))
- memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
- else
- memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
-
- return 0;
-}
-
static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
@@ -471,7 +223,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
- u32 mode = 0;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@@ -480,22 +231,24 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
+ u8 mode = 0, qos_ctl = 0;
+ struct mt7915_sta *msta = NULL;
+ u32 csum_status = *(u32 *)skb->cb;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
- u8 qos_ctl = 0;
__le16 fc = 0;
- int i, idx;
+ int idx;
memset(status, 0, sizeof(*status));
- if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
- mphy = dev->mt76.phy2;
+ if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
+ mphy = dev->mt76.phys[MT_BAND1];
if (!mphy)
return -EINVAL;
phy = mphy->priv;
- status->ext_phy = true;
+ status->phy_idx = 1;
}
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
@@ -517,8 +270,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- struct mt7915_sta *msta;
-
msta = container_of(status->wcid, struct mt7915_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
@@ -530,13 +281,16 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->band = mphy->chandef.chan->band;
if (status->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (status->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if ((rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
@@ -626,7 +380,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u32 v0, v1, v2;
+ u32 v0, v1;
+ int ret;
rxv = rxd;
rxd += 2;
@@ -635,7 +390,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
- v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@@ -645,94 +399,19 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
/* RXD Group 5 - C-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- bool cck = false;
-
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
+ }
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
+ sband, rxv, &mode);
+ if (ret < 0)
+ return ret;
}
}
@@ -745,8 +424,18 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
- if (mt7915_reverse_frag0_hdr_trans(skb, hdr_gap))
+ struct ieee80211_vif *vif;
+ int err;
+
+ if (!msta || !msta->vif)
return -EINVAL;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
+ if (err)
+ return err;
+
hdr_trans = false;
} else {
int pad_start = 0;
@@ -759,14 +448,14 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
* When header translation failure is indicated,
* the hardware will insert an extra 2-byte field
* containing the data length after the protocol
- * type field.
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
*/
pad_start = 12;
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
pad_start += 4;
-
- if (get_unaligned_be16(skb->data + pad_start) !=
- skb->len - pad_start - 2)
+ else
pad_start = 0;
}
@@ -796,7 +485,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt7915_mac_decode_he_radiotap(skb, rxv, mode);
+ mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -818,14 +507,17 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
__le32 *rxv_hdr = rxd + 2;
__le32 *rxv = rxd + 4;
u32 rcpi, ib_rssi, wb_rssi, v20, v21;
- bool ext_phy;
+ u8 band_idx;
s32 foe;
u8 snr;
int i;
- ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
- if (ext_phy)
+ band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
+ if (band_idx && !phy->band_idx) {
phy = mt7915_ext_phy(dev);
+ if (!phy)
+ goto out;
+ }
rcpi = le32_to_cpu(rxv[6]);
ib_rssi = le32_to_cpu(rxv[7]);
@@ -850,8 +542,8 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
phy->test.last_freq_offset = foe;
phy->test.last_snr = snr;
+out:
#endif
-
dev_kfree_skb(skb);
}
@@ -970,254 +662,19 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
#endif
}
-static void
-mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid)
-{
-
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- u8 fc_type, fc_stype;
- bool wmm = false;
- u32 val;
-
- if (wcid->sta) {
- struct ieee80211_sta *sta;
-
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
- wmm = sta->wme;
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
- FIELD_PREP(MT_TXD1_TID, tid);
-
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
- val |= MT_TXD1_ETH_802_3;
-
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = IEEE80211_FTYPE_DATA >> 2;
- fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
-
- txwi[2] |= cpu_to_le32(val);
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static void
-mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key,
- bool *mcast)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
- u8 fc_type, fc_stype;
- u32 val;
-
- *mcast = is_multicast_ether_addr(hdr->addr1);
-
- if (ieee80211_is_action(fc) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
- u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-
- txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
- tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
- } else if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
- u16 control = le16_to_cpu(bar->control);
-
- tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
- FIELD_PREP(MT_TXD1_HDR_INFO,
- ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID, tid);
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
- fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD2_MULTICAST, *mcast);
-
- if (key && *mcast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- val |= MT_TXD2_BIP;
- txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
- }
-
- if (!ieee80211_is_data(fc) || *mcast ||
- info->flags & IEEE80211_TX_CTL_USE_MINRATE)
- val |= MT_TXD2_FIX_RATE;
-
- txwi[2] |= cpu_to_le32(val);
-
- if (ieee80211_is_beacon(fc)) {
- txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
- txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
- }
-
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
-
- if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = le16_to_cpu(bar->start_seq_num);
- }
-
- val = MT_TXD3_SN_VALID |
- FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
- txwi[3] |= cpu_to_le32(val);
- }
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static u16
-mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
-{
- u8 mode = 0, band = mphy->chandef.chan->band;
- int rateidx = 0, mcast_rate;
-
- if (beacon) {
- struct cfg80211_bitrate_mask *mask;
-
- mask = &vif->bss_conf.beacon_tx_rate;
- if (hweight16(mask->control[band].he_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HE_SU;
- goto out;
- } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_VHT;
- goto out;
- } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HT;
- goto out;
- } else if (hweight32(mask->control[band].legacy) == 1) {
- rateidx = ffs(mask->control[band].legacy) - 1;
- goto legacy;
- }
- }
-
- mcast_rate = vif->bss_conf.mcast_rate[band];
- if (mcast && mcast_rate > 0)
- rateidx = mcast_rate - 1;
- else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
-
-legacy:
- rateidx = mt76_calculate_default_rate(mphy, rateidx);
- mode = rateidx >> 8;
- rateidx &= GENMASK(7, 0);
-
-out:
- return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
- FIELD_PREP(MT_TX_RATE_MODE, mode);
-}
-
-void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_phy *mphy = &dev->mphy;
- bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
- bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- bool mcast = false;
- u16 tx_count = 15;
- u32 val;
-
- if (vif) {
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct mt76_phy *mphy = &dev->phy;
- omac_idx = mvif->mt76.omac_idx;
- wmm_idx = mvif->mt76.wmm_idx;
- }
+ if (phy_idx && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- if (beacon) {
- p_fmt = MT_TX_TYPE_FW;
- q_idx = MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
- p_fmt = MT_TX_TYPE_CT;
- q_idx = MT_LMAC_ALTX0;
- } else {
- p_fmt = MT_TX_TYPE_CT;
- q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
- mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
- }
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
- FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
- FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
- txwi[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT | MT_TXD1_VTA |
- FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
- FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
-
- if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
- val |= MT_TXD1_TGID;
-
- txwi[1] = cpu_to_le32(val);
-
- txwi[2] = 0;
-
- val = MT_TXD3_SW_POWER_MGMT |
- FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (key)
- val |= MT_TXD3_PROTECT_FRAME;
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- val |= MT_TXD3_NO_ACK;
-
- txwi[3] = cpu_to_le32(val);
- txwi[4] = 0;
-
- val = FIELD_PREP(MT_TXD5_PID, pid);
- if (pid >= MT_PACKET_ID_FIRST)
- val |= MT_TXD5_TX_STATUS_HOST;
- txwi[5] = cpu_to_le32(val);
-
- txwi[6] = 0;
- txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
-
- if (is_8023)
- mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid);
- else
- mt7915_mac_write_txwi_80211(dev, txwi, skb, key, &mcast);
-
- if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- u16 rate = mt7915_mac_tx_rate_val(mphy, vif, beacon, mcast);
-
- /* hardware won't add HTC for mgmt/ctrl frame */
- txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
-
- val = MT_TXD6_FIXED_BW |
- FIELD_PREP(MT_TXD6_TX_RATE, rate);
- txwi[6] |= cpu_to_le32(val);
- txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
- }
+ mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
if (mt76_testmode_enabled(mphy))
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
@@ -1233,8 +690,8 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_connac_fw_txp *txp;
struct mt76_txwi_cache *t;
- struct mt7915_txp *txp;
int id, i, nbuf = tx_info->nbuf - 1;
u8 *txwi = (u8 *)txwi_ptr;
int pid;
@@ -1264,10 +721,10 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
- false);
+ mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
+ qid, 0);
- txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+ txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
@@ -1304,6 +761,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+{
+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
+ __le32 *txwi = ptr;
+ u32 val;
+
+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+ txwi[1] = cpu_to_le32(val);
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+
+ return MT_TXD_SIZE + sizeof(*txp);
+}
+
static void
mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
@@ -1311,10 +791,10 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
u16 fc, tid;
u32 val;
- if (!sta || !sta->ht_cap.ht_supported)
+ if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1330,27 +810,16 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
static void
-mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7915_txp *txp;
- int i;
-
- txp = mt7915_txwi_to_txp(dev, t);
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7915_sta *msta;
struct mt76_wcid *wcid;
__le32 *txwi;
u16 wcid_idx;
- mt7915_txp_skb_unmap(mdev, t);
+ mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
@@ -1358,13 +827,24 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
if (sta) {
wcid = (struct mt76_wcid *)sta->drv_priv;
wcid_idx = wcid->idx;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7915_tx_check_aggr(sta, txwi);
} else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
+
+ if (wcid && wcid->sta) {
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
}
+ if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi);
+
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
out:
@@ -1373,18 +853,10 @@ out:
}
static void
-mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
{
- struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_phy *mphy_ext = mdev->phy2;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- LIST_HEAD(free_list);
- struct sk_buff *skb, *tmp;
- void *end = data + len;
- u8 i, count;
- bool wake = false;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1393,18 +865,52 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
}
+}
- /*
- * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- if (WARN_ON_ONCE((void *)&free->info[count] > end))
+static void
+mt7915_mac_tx_free_done(struct mt7915_dev *dev,
+ struct list_head *free_list, bool wake)
+{
+ struct sk_buff *skb, *tmp;
+
+ mt7915_mac_sta_poll(dev);
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ list_for_each_entry_safe(skb, tmp, free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+}
+
+static void
+mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ LIST_HEAD(free_list);
+ void *end = data + len;
+ bool v3, wake = false;
+ u16 total, count = 0;
+ u32 txd = le32_to_cpu(free->txd);
+ __le32 *cur_info;
+
+ mt7915_mac_tx_free_prepare(dev);
+
+ total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+ if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end))
return;
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
+ for (cur_info = tx_info; count < total; cur_info++) {
+ u32 msdu, info = le32_to_cpu(*cur_info);
+ u8 i;
/*
* 1'b1: new wcid pair.
@@ -1415,7 +921,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
struct mt76_wcid *wcid;
u16 idx;
- count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
@@ -1430,146 +935,58 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
}
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
continue;
- mt7915_txwi_free(dev, txwi, sta, &free_list);
- }
-
- mt7915_mac_sta_poll(dev);
-
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
+ for (i = 0; i < 1 + v3; i++) {
+ if (v3) {
+ msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
+ if (msdu == MT_TX_FREE_MSDU_ID_V3)
+ continue;
+ } else {
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ }
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
+ mt7915_txwi_free(dev, txwi, sta, &free_list);
+ }
}
+
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
}
-static bool
-mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data, struct mt76_sta_stats *stats)
+static void
+mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
{
- struct ieee80211_supported_band *sband;
+ struct mt76_connac_tx_free *free = data;
+ __le16 *info = (__le16 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_phy *mphy;
- struct ieee80211_tx_info *info;
- struct sk_buff_head list;
- struct rate_info rate = {};
- struct sk_buff *skb;
- bool cck = false;
- u32 txrate, txs, mode;
-
- mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (!skb)
- goto out_no_skb;
-
- txs = le32_to_cpu(txs_data[0]);
-
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
-
- txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
-
- rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
- rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
-
- if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
- stats->tx_nss[rate.nss - 1]++;
- if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
- stats->tx_mcs[rate.mcs]++;
-
- mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- mphy = &dev->mphy;
- if (wcid->ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &mphy->sband_5g.sband;
- else
- sband = &mphy->sband_2g.sband;
-
- rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
- rate.legacy = sband->bitrates[rate.mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
- if (rate.mcs > 31)
- goto out;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
- rate.flags = RATE_INFO_FLAGS_MCS;
- if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
- rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- if (rate.mcs > 9)
- goto out;
+ mt7915_mac_tx_free_prepare(dev);
- rate.flags = RATE_INFO_FLAGS_VHT_MCS;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- if (rate.mcs > 11)
- goto out;
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
+ if (WARN_ON_ONCE((void *)&info[count] > end))
+ return;
- rate.he_gi = wcid->rate.he_gi;
- rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
- rate.flags = RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- goto out;
- }
+ for (i = 0; i < count; i++) {
+ struct mt76_txwi_cache *txwi;
+ u16 msdu = le16_to_cpu(info[i]);
- stats->tx_mode[mode]++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
- switch (FIELD_GET(MT_TXS0_BW, txs)) {
- case IEEE80211_STA_RX_BW_160:
- rate.bw = RATE_INFO_BW_160;
- stats->tx_bw[3]++;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate.bw = RATE_INFO_BW_80;
- stats->tx_bw[2]++;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate.bw = RATE_INFO_BW_40;
- stats->tx_bw[1]++;
- break;
- default:
- rate.bw = RATE_INFO_BW_20;
- stats->tx_bw[0]++;
- break;
+ mt7915_txwi_free(dev, txwi, NULL, &free_list);
}
- wcid->rate = rate;
-
-out:
- mt76_tx_status_skb_done(mdev, skb, &list);
-
-out_no_skb:
- mt76_tx_status_unlock(mdev, &list);
- return !!skb;
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
}
static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
@@ -1578,23 +995,18 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
-
- if (pid < MT_PACKET_ID_FIRST)
+ if (pid < MT_PACKET_ID_WED)
return;
- if (wcidx >= MT7915_WTBL_SIZE)
+ if (wcidx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
@@ -1605,7 +1017,10 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
msta = container_of(wcid, struct mt7915_sta, wcid);
- mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
+ if (pid == MT_PACKET_ID_WED)
+ mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
+ else
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
@@ -1626,14 +1041,21 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, data, len);
return false;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, data, len);
+ return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
+ return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, data, len);
return false;
default:
return true;
@@ -1648,13 +1070,17 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
case PKT_TYPE_RX_EVENT:
mt7915_mcu_rx_event(dev, skb);
break;
@@ -1663,7 +1089,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
break;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
dev_kfree_skb(skb);
break;
case PKT_TYPE_NORMAL:
@@ -1678,32 +1108,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7915_txp *txp;
-
- txp = mt7915_txwi_to_txp(mdev, e->txwi);
- t = mt76_token_put(mdev, le16_to_cpu(txp->token));
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+ u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
mt76_set(dev, reg, BIT(11) | BIT(9));
@@ -1712,25 +1120,22 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
void mt7915_mac_reset_counters(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int i;
for (i = 0; i < 4; i++) {
- mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
}
- if (ext_phy) {
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ i = 0;
+ phy->mt76->survey_time = ktime_get_boottime();
+ if (phy->band_idx)
i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
- } else {
- dev->mt76.phy.survey_time = ktime_get_boottime();
- i = 0;
- }
+
memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
/* reset airtime counters */
- mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
MT_WF_RMAC_MIB_RXTIME_CLR);
mt7915_mcu_get_chan_mib_info(phy, true);
@@ -1740,29 +1145,23 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
{
s16 coverage_class = phy->coverage_class;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
u32 val, reg_offset;
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
int offset;
- bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- if (ext_phy) {
+ if (ext_phy)
coverage_class = max_t(s16, dev->phy.coverage_class,
- coverage_class);
- } else {
- struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+ ext_phy->coverage_class);
- if (phy_ext)
- coverage_class = max_t(s16, phy_ext->coverage_class,
- coverage_class);
- }
- mt76_set(dev, MT_ARB_SCR(ext_phy),
+ mt76_set(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
udelay(1);
@@ -1770,35 +1169,40 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
- mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
- mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
- mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
- FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) |
+ mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
FIELD_PREP(MT_IFS_RIFS, 2) |
FIELD_PREP(MT_IFS_SIFS, 10) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- mt76_wr(dev, MT_TMAC_ICR1(ext_phy),
+ mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
FIELD_PREP(MT_IFS_EIFS_CCK, 314));
- if (phy->slottime < 20 || is_5ghz)
+ if (phy->slottime < 20 || a_band)
val = MT7915_CFEND_RATE_DEFAULT;
else
val = MT7915_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
- mt76_clear(dev, MT_ARB_SCR(ext_phy),
+ mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
{
- mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
+ u32 reg;
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
+ MT_WF_PHY_RXTD12_MT7916(ext_phy);
+ mt76_set(dev, reg,
MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
MT_WF_PHY_RXTD12_IRPI_SW_CLR);
- mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
- FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
+ MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
+ mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
}
static u8
@@ -1810,7 +1214,9 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
int nss, i;
for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
- u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
+ u32 reg = is_mt7915(&dev->mt76) ?
+ MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
+ MT_WF_IRPI_NSS_MT7916(idx, nss);
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
val = mt76_rr(dev, reg);
@@ -1829,12 +1235,11 @@ void mt7915_update_channel(struct mt76_phy *mphy)
{
struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
- bool ext_phy = phy != &phy->dev->phy;
int nf;
mt7915_mcu_get_chan_mib_info(phy, false);
- nf = mt7915_phy_get_nf(phy, ext_phy);
+ nf = mt7915_phy_get_nf(phy, phy->band_idx);
if (!phy->noise)
phy->noise = nf << 4;
else if (nf)
@@ -1865,7 +1270,8 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
+ BSS_CHANGED_BEACON_ENABLED);
break;
default:
break;
@@ -1875,36 +1281,44 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
static void
mt7915_update_beacons(struct mt7915_dev *dev)
{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+
ieee80211_iterate_active_interfaces(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7915_update_vif_beacon, dev->mt76.hw);
- if (!dev->mt76.phy2)
+ if (!mphy_ext)
return;
- ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ ieee80211_iterate_active_interfaces(mphy_ext->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_update_vif_beacon, dev->mt76.phy2->hw);
+ mt7915_update_vif_beacon, mphy_ext->hw);
}
static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
- struct mt76_phy *mphy_ext = dev->mt76.phy2;
- u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
if (dev->hif2) {
mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
}
usleep_range(1000, 2000);
@@ -1928,19 +1342,23 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
}
}
@@ -1964,9 +1382,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
struct mt7915_phy *phy2;
struct mt76_phy *ext_phy;
struct mt7915_dev *dev;
+ int i;
dev = container_of(work, struct mt7915_dev, reset_work);
- ext_phy = dev->mt76.phy2;
+ ext_phy = dev->mt76.phys[MT_BAND1];
phy2 = ext_phy ? ext_phy->priv : NULL;
if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
@@ -1985,9 +1404,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[0]);
- napi_disable(&dev->mt76.napi[1]);
- napi_disable(&dev->mt76.napi[2]);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
@@ -2010,14 +1428,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
clear_bit(MT76_RESET, &phy2->mt76->state);
local_bh_disable();
- napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
- napi_enable(&dev->mt76.napi[1]);
- napi_schedule(&dev->mt76.napi[1]);
-
- napi_enable(&dev->mt76.napi[2]);
- napi_schedule(&dev->mt76.napi[2]);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
local_bh_enable();
tasklet_schedule(&dev->irq_tasklet);
@@ -2027,8 +1441,10 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
+ local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
ieee80211_wake_queues(mt76_hw(dev));
if (ext_phy)
@@ -2050,106 +1466,131 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
- bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
+ u32 val;
- mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
mib->rx_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy));
- mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
+ mib->rx_vector_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy));
- mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
+ mib->rx_delimiter_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy));
- mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx));
+ mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
+ mib->rx_len_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
mib->tx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy));
- mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
+ mib->tx_stop_q_empty_cnt +=
+ FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
+ mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
+ mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
- mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx));
+ mib->primary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
- mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx));
+ mib->secondary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx));
+ mib->primary_energy_detect_time +=
+ FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx));
+ mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx));
+ mib->ofdm_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx));
+ mib->green_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
mib->rx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
mib->rx_ampdu_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
- mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
+ mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy));
- mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
+ mib->tx_rwp_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy));
- mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
+ mib->tx_rwp_need_cnt +=
+ FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
- mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
+ mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
- mib->rx_vec_queue_overflow_drop_cnt +=
- FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
+ mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
mib->rx_ba_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
- mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
- mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
mib->tx_mu_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
mib->tx_mu_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
mib->tx_su_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
- mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
- mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
- mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
- mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
- mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
- mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
- mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
- mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
- mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
- mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
- mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+ cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
@@ -2157,30 +1598,127 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_amsdu_cnt += cnt;
}
- aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val;
+ aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ if (is_mt7915(&dev->mt76)) {
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ mib->ba_miss_cnt +=
+ FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
- mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- mib->ack_fail_cnt +=
- FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ }
- val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt +=
- FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
- dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
+ } else {
+ for (i = 0; i < 2; i++) {
+ /* rts count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
+ mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* rts retry count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ba miss count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ack fail count */
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ for (i = 0; i < 8; i++) {
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
}
}
+static void mt7915_mac_severe_check(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 trb;
+
+ if (!phy->omac_mask)
+ return;
+
+ /* In rare cases, TRB pointers might be out of sync leads to RMAC
+ * stopping Rx, so check status periodically to see if TRB hardware
+ * requires minimal recovery.
+ */
+ trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx));
+
+ if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
+ (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
+ trb == phy->trb_ts)
+ mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
+ ext_phy);
+
+ phy->trb_ts = trb;
+}
+
void mt7915_mac_sta_rc_work(struct work_struct *work)
{
struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
@@ -2233,6 +1771,7 @@ void mt7915_mac_work(struct work_struct *work)
mphy->mac_work_count = 0;
mt7915_mac_update_stats(phy);
+ mt7915_mac_severe_check(phy);
}
mutex_unlock(&mphy->dev->mutex);
@@ -2248,39 +1787,59 @@ static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
struct mt7915_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
{
- int err;
+ int err, region;
- err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, region);
if (err < 0)
return err;
- return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
{
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, ext_phy);
+ err = mt7915_dfs_start_rdd(dev, phy->band_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(ext_phy);
+ phy->rdd_state |= BIT(phy->band_idx);
+
+ if (!is_mt7915(&dev->mt76))
+ return 0;
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -2330,48 +1889,56 @@ mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
-
- return 0;
- }
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
+ if (prev_state == dfs_state)
return 0;
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7915_dfs_stop_radar_detector(phy);
- err = mt7915_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7915_dfs_start_radar_detector(phy);
+ err = mt7915_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ phy->band_idx, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
+ phy->band_idx, MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7915_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
@@ -2449,6 +2016,34 @@ static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
+ struct mt7915_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2474,14 +2069,24 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
+ goto unlock;
+ }
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
+ if (mt7915_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flow = &msta->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);
@@ -2523,8 +2128,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
(twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 7a2c740d1464..6fa9c79f3e5f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -4,6 +4,8 @@
#ifndef __MT7915_MAC_H
#define __MT7915_MAC_H
+#include "../mt76_connac2_mac.h"
+
#define MT_CT_PARSE_LEN 72
#define MT_CT_DMA_BUF_NUM 2
@@ -23,331 +25,24 @@ enum rx_pkt_type {
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
+ PKT_TYPE_RX_FW_MONITOR = 0x0c,
+ PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
};
-/* RXD DW1 */
-#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
-#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
-#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
-#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
-#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
-#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
-#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
-#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
-#define MT_RXD1_NORMAL_CM BIT(23)
-#define MT_RXD1_NORMAL_CLM BIT(24)
-#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
-#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
-#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
-#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
-#define MT_RXD1_NORMAL_SPP_EN BIT(29)
-#define MT_RXD1_NORMAL_ADD_OM BIT(30)
-#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
-
-/* RXD DW2 */
-#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
-#define MT_RXD2_NORMAL_CO_ANT BIT(6)
-#define MT_RXD2_NORMAL_BF_CQI BIT(7)
-#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
-#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
-#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
-#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
-#define MT_RXD2_NORMAL_MU_BAR BIT(21)
-#define MT_RXD2_NORMAL_SW_BIT BIT(22)
-#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
-#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
-#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
-#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
-#define MT_RXD2_NORMAL_FRAG BIT(27)
-#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
-#define MT_RXD2_NORMAL_NDATA BIT(29)
-#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
-#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
-
-/* RXD DW3 */
-#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
-#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
-#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
-#define MT_RXD3_NORMAL_U2M BIT(0)
-#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
-#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
-#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
-#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
-#define MT_RXD3_NORMAL_AMSDU BIT(22)
-#define MT_RXD3_NORMAL_MESH BIT(23)
-#define MT_RXD3_NORMAL_MHCP BIT(24)
-#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
-#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
-#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
-#define MT_RXD3_NORMAL_MORE BIT(28)
-#define MT_RXD3_NORMAL_UNWANT BIT(29)
-#define MT_RXD3_NORMAL_RX_DROP BIT(30)
-#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
-
-/* RXD DW4 */
-#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
-#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
-#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
-#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
-
-#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
-#define MT_RXD4_NORMAL_CLS BIT(10)
-#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
-#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
-#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
-#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
-#define MT_RXD3_NORMAL_PF_MODE BIT(29)
-#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
-
-#define MT_RXV_HDR_BAND_IDX BIT(24)
-
-/* RXD GROUP4 */
-#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
-#define MT_RXD6_TA_LO GENMASK(31, 16)
-
-#define MT_RXD7_TA_HI GENMASK(31, 0)
-
-#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
-#define MT_RXD8_QOS_CTL GENMASK(31, 16)
-
-#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
-
-/* P-RXV */
-#define MT_PRXV_TX_RATE GENMASK(6, 0)
-#define MT_PRXV_TX_DCM BIT(4)
-#define MT_PRXV_TX_ER_SU_106T BIT(5)
-#define MT_PRXV_NSTS GENMASK(9, 7)
-#define MT_PRXV_TXBF BIT(10)
-#define MT_PRXV_HT_AD_CODE BIT(11)
-#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
-#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
-#define MT_PRXV_RCPI3 GENMASK(31, 24)
-#define MT_PRXV_RCPI2 GENMASK(23, 16)
-#define MT_PRXV_RCPI1 GENMASK(15, 8)
-#define MT_PRXV_RCPI0 GENMASK(7, 0)
-
-/* C-RXV */
-#define MT_CRXV_HT_STBC GENMASK(1, 0)
-#define MT_CRXV_TX_MODE GENMASK(7, 4)
-#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
-#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
-#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
-#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
-#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
-#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
-#define MT_CRXV_HE_UPLINK BIT(31)
-#define MT_CRXV_HE_RU0 GENMASK(7, 0)
-#define MT_CRXV_HE_RU1 GENMASK(15, 8)
-#define MT_CRXV_HE_RU2 GENMASK(23, 16)
-#define MT_CRXV_HE_RU3 GENMASK(31, 24)
-
-#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
-
-#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
-#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
-#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
-#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
-
-#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
-#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
-#define MT_CRXV_HE_BEAM_CHNG BIT(13)
-#define MT_CRXV_HE_DOPPLER BIT(16)
-
-#define MT_CRXV_SNR GENMASK(18, 13)
-#define MT_CRXV_FOE_LO GENMASK(31, 19)
-#define MT_CRXV_FOE_HI GENMASK(6, 0)
-#define MT_CRXV_FOE_SHIFT 13
-
-enum tx_header_format {
- MT_HDR_FORMAT_802_3,
- MT_HDR_FORMAT_CMD,
- MT_HDR_FORMAT_802_11,
- MT_HDR_FORMAT_802_11_EXT,
-};
-
-enum tx_pkt_type {
- MT_TX_TYPE_CT,
- MT_TX_TYPE_SF,
- MT_TX_TYPE_CMD,
- MT_TX_TYPE_FW,
-};
-
-enum tx_port_idx {
- MT_TX_PORT_IDX_LMAC,
- MT_TX_PORT_IDX_MCU
-};
-
-enum tx_mcu_port_q_idx {
- MT_TX_MCU_PORT_RX_Q0 = 0x20,
- MT_TX_MCU_PORT_RX_Q1,
- MT_TX_MCU_PORT_RX_Q2,
- MT_TX_MCU_PORT_RX_Q3,
- MT_TX_MCU_PORT_RX_FWDL = 0x3e
-};
-
-#define MT_CT_INFO_APPLY_TXD BIT(0)
-#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
-#define MT_CT_INFO_MGMT_FRAME BIT(2)
-#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
-#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_CT_INFO_FROM_HOST BIT(7)
-
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_TXD0_Q_IDX GENMASK(31, 25)
-#define MT_TXD0_PKT_FMT GENMASK(24, 23)
-#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
-#define MT_TXD0_TX_BYTES GENMASK(15, 0)
-
-#define MT_TXD1_LONG_FORMAT BIT(31)
-#define MT_TXD1_TGID BIT(30)
-#define MT_TXD1_OWN_MAC GENMASK(29, 24)
-#define MT_TXD1_AMSDU BIT(23)
-#define MT_TXD1_TID GENMASK(22, 20)
-#define MT_TXD1_HDR_PAD GENMASK(19, 18)
-#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
-#define MT_TXD1_HDR_INFO GENMASK(15, 11)
-#define MT_TXD1_ETH_802_3 BIT(15)
-#define MT_TXD1_VTA BIT(10)
-#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
-
-#define MT_TXD2_FIX_RATE BIT(31)
-#define MT_TXD2_FIXED_RATE BIT(30)
-#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
-#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
-#define MT_TXD2_FRAG GENMASK(15, 14)
-#define MT_TXD2_HTC_VLD BIT(13)
-#define MT_TXD2_DURATION BIT(12)
-#define MT_TXD2_BIP BIT(11)
-#define MT_TXD2_MULTICAST BIT(10)
-#define MT_TXD2_RTS BIT(9)
-#define MT_TXD2_SOUNDING BIT(8)
-#define MT_TXD2_NDPA BIT(7)
-#define MT_TXD2_NDP BIT(6)
-#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
-#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
-
-#define MT_TXD3_SN_VALID BIT(31)
-#define MT_TXD3_PN_VALID BIT(30)
-#define MT_TXD3_SW_POWER_MGMT BIT(29)
-#define MT_TXD3_BA_DISABLE BIT(28)
-#define MT_TXD3_SEQ GENMASK(27, 16)
-#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
-#define MT_TXD3_TX_COUNT GENMASK(10, 6)
-#define MT_TXD3_TIMING_MEASURE BIT(5)
-#define MT_TXD3_DAS BIT(4)
-#define MT_TXD3_EEOSP BIT(3)
-#define MT_TXD3_EMRD BIT(2)
-#define MT_TXD3_PROTECT_FRAME BIT(1)
-#define MT_TXD3_NO_ACK BIT(0)
-
-#define MT_TXD4_PN_LOW GENMASK(31, 0)
-
-#define MT_TXD5_PN_HIGH GENMASK(31, 16)
-#define MT_TXD5_MD BIT(15)
-#define MT_TXD5_ADD_BA BIT(14)
-#define MT_TXD5_TX_STATUS_HOST BIT(10)
-#define MT_TXD5_TX_STATUS_MCU BIT(9)
-#define MT_TXD5_TX_STATUS_FMT BIT(8)
-#define MT_TXD5_PID GENMASK(7, 0)
-
-#define MT_TXD6_TX_IBF BIT(31)
-#define MT_TXD6_TX_EBF BIT(30)
-#define MT_TXD6_TX_RATE GENMASK(29, 16)
-#define MT_TXD6_SGI GENMASK(15, 14)
-#define MT_TXD6_HELTF GENMASK(13, 12)
-#define MT_TXD6_LDPC BIT(11)
-#define MT_TXD6_SPE_ID_IDX BIT(10)
-#define MT_TXD6_ANT_ID GENMASK(7, 4)
-#define MT_TXD6_DYN_BW BIT(3)
-#define MT_TXD6_FIXED_BW BIT(2)
-#define MT_TXD6_BW GENMASK(1, 0)
-
-#define MT_TXD7_TXD_LEN GENMASK(31, 30)
-#define MT_TXD7_UDP_TCP_SUM BIT(29)
-#define MT_TXD7_IP_SUM BIT(28)
-
-#define MT_TXD7_TYPE GENMASK(21, 20)
-#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
-
-#define MT_TXD7_PSE_FID GENMASK(27, 16)
-#define MT_TXD7_SPE_IDX GENMASK(15, 11)
-#define MT_TXD7_HW_AMSDU BIT(10)
-#define MT_TXD7_TX_TIME GENMASK(9, 0)
-
-#define MT_TX_RATE_STBC BIT(13)
-#define MT_TX_RATE_NSS GENMASK(12, 10)
-#define MT_TX_RATE_MODE GENMASK(9, 6)
-#define MT_TX_RATE_SU_EXT_TONE BIT(5)
-#define MT_TX_RATE_DCM BIT(4)
-/* VHT/HE only use bits 0-3 */
-#define MT_TX_RATE_IDX GENMASK(5, 0)
-
-#define MT_TXP_MAX_BUF_NUM 6
-
-struct mt7915_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7915_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
- __le32 info[];
-} __packed __aligned(4);
-
+#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
+#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
-#define MT_TX_FREE_STATUS GENMASK(14, 13)
#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
#define MT_TX_FREE_PAIR BIT(31)
+#define MT_TX_FREE_MPDU_HEADER BIT(30)
+#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
+
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
-#define MT_TXS0_FIXED_RATE BIT(31)
-#define MT_TXS0_BW GENMASK(30, 29)
-#define MT_TXS0_TID GENMASK(28, 26)
-#define MT_TXS0_AMPDU BIT(25)
-#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
-#define MT_TXS0_BA_ERROR BIT(22)
-#define MT_TXS0_PS_FLAG BIT(21)
-#define MT_TXS0_TXOP_TIMEOUT BIT(20)
-#define MT_TXS0_BIP_ERROR BIT(19)
-
-#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
-#define MT_TXS0_RTS_TIMEOUT BIT(17)
-#define MT_TXS0_ACK_TIMEOUT BIT(16)
-#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
-
-#define MT_TXS0_TX_STATUS_HOST BIT(15)
-#define MT_TXS0_TX_STATUS_MCU BIT(14)
-#define MT_TXS0_TX_RATE GENMASK(13, 0)
-
-#define MT_TXS1_SEQNO GENMASK(31, 20)
-#define MT_TXS1_RESP_RATE GENMASK(19, 16)
-#define MT_TXS1_RXV_SEQNO GENMASK(15, 8)
-#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0)
-
-#define MT_TXS2_BF_STATUS GENMASK(31, 30)
-#define MT_TXS2_LAST_TX_RATE GENMASK(29, 27)
-#define MT_TXS2_SHARED_ANTENNA BIT(26)
-#define MT_TXS2_WCID GENMASK(25, 16)
-#define MT_TXS2_TX_DELAY GENMASK(15, 0)
-
-#define MT_TXS3_PID GENMASK(31, 24)
-#define MT_TXS3_ANT_ID GENMASK(23, 0)
-
-#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
-
#define MT_TXS5_F0_FINAL_MPDU BIT(31)
#define MT_TXS5_F0_QOS BIT(30)
#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25)
@@ -403,17 +98,4 @@ struct mt7915_dfs_radar_spec {
struct mt7915_dfs_pattern radar_pattern[16];
};
-static inline struct mt7915_txp *
-mt7915_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 8ac6f59af174..89b519cfd14c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -34,7 +34,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
running = mt7915_dev_running(dev);
if (!running) {
- ret = mt7915_mcu_set_pm(dev, 0, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
if (ret)
goto out;
@@ -42,15 +42,11 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
- ret = mt7915_mcu_set_scs(dev, 0, true);
- if (ret)
- goto out;
-
mt7915_mac_enable_nf(dev, 0);
}
- if (phy != &dev->phy) {
- ret = mt7915_mcu_set_pm(dev, 1, 0);
+ if (phy != &dev->phy || phy->band_idx) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (ret)
goto out;
@@ -58,14 +54,11 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
- ret = mt7915_mcu_set_scs(dev, 1, true);
- if (ret)
- goto out;
-
mt7915_mac_enable_nf(dev, 1);
}
- ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ phy != &dev->phy);
if (ret)
goto out;
@@ -106,12 +99,12 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
- mt7915_mcu_set_pm(dev, 1, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
}
if (!mt7915_dev_running(dev)) {
- mt7915_mcu_set_pm(dev, 0, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
}
@@ -173,14 +166,14 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
- mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0);
- mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0);
+ mvif->bitrate_mask.control[i].he_gi = 0xff;
+ mvif->bitrate_mask.control[i].he_ltf = 0xff;
mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
- memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
+ memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].ht_mcs));
- memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
+ memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].vht_mcs));
- memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
+ memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].he_mcs));
}
}
@@ -203,8 +196,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
- if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) {
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->mt76.idx >= (MT7915_MAX_INTERFACES << dev->dbdc_support)) {
ret = -ENOSPC;
goto out;
}
@@ -216,7 +209,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
- mvif->mt76.band_idx = ext_phy;
+ mvif->mt76.band_idx = phy->band_idx;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -226,7 +219,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
@@ -234,7 +227,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.phy_idx = ext_phy;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -242,10 +235,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
if (vif->type != NL80211_IFTYPE_AP &&
@@ -256,6 +248,10 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_init_bitrate_mask(vif);
memset(&mvif->cap, -1, sizeof(mvif->cap));
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
out:
mutex_unlock(&dev->mt76.mutex);
@@ -286,7 +282,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mutex_unlock(&dev->mt76.mutex);
@@ -298,25 +294,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
-static void mt7915_init_dfs_state(struct mt7915_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7915_set_channel(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -327,7 +304,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
- mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
@@ -366,6 +342,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
@@ -405,6 +382,11 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&dev->mt76.mutex);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -415,8 +397,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
-
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_EXT_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -495,14 +478,14 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
}
static int
-mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7915_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
@@ -591,7 +574,7 @@ mt7915_update_bss_color(struct ieee80211_hw *hw,
static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = mt7915_hw_dev(hw);
@@ -611,7 +594,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- mt7915_mcu_add_bss_info(phy, vif, info->assoc);
+ mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
}
@@ -640,8 +623,10 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_update_bss_color(hw, vif, &info->he_bss_color);
if (changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED))
- mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
+ BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
mutex_unlock(&dev->mt76.mutex);
}
@@ -654,7 +639,7 @@ mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_beacon(hw, vif, true);
+ mt7915_mcu_add_beacon(hw, vif, true, BSS_CHANGED_BEACON);
mutex_unlock(&dev->mt76.mutex);
}
@@ -664,6 +649,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ bool ext_phy = mvif->phy != &dev->phy;
int ret, idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
@@ -675,7 +661,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.phy_idx = ext_phy;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
@@ -746,7 +732,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
- ret = mt7915_mcu_set_rts_thresh(phy, val);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -861,8 +847,12 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_READ);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
@@ -904,8 +894,12 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_WRITE);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
@@ -931,8 +925,12 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_ADJUST);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
@@ -967,12 +965,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
phy->mt76->antenna_mask = tx_ant;
- if (ext_phy) {
- if (dev->chainmask == 0xf)
- tx_ant <<= 2;
- else
- tx_ant <<= 1;
- }
+ if (ext_phy)
+ tx_ant <<= dev->chainshift;
+
phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
@@ -994,7 +989,8 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (is_mt7915(&phy->dev->mt76) &&
+ !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1014,6 +1010,23 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* offloading flows bypass networking stack, so driver counts and
+ * reports sta statistics via NL80211_STA_INFO when WED is active.
+ */
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+
+ sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+
+ sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
@@ -1079,7 +1092,7 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
@@ -1095,7 +1108,7 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -1151,8 +1164,15 @@ static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_fifo_full_cnt",
"rx_mpdu_cnt",
"channel_idle_cnt",
+ "primary_cca_busy_time",
+ "secondary_cca_busy_time",
+ "primary_energy_detect_time",
+ "cck_mdrdy_time",
+ "ofdm_mdrdy_time",
+ "green_mdrdy_time",
"rx_vector_mismatch_cnt",
"rx_delimiter_fail_cnt",
+ "rx_mrdy_cnt",
"rx_len_mismatch_cnt",
"rx_ampdu_cnt",
"rx_ampdu_bytes_cnt",
@@ -1221,7 +1241,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
@@ -1238,7 +1258,6 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- bool ext_phy = phy != &dev->phy;
int i, n, ei = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1255,7 +1274,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->tx_pkt_ibf_cnt;
/* Tx ampdu stat */
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
data[ei++] = dev->mt76.aggr_stats[i + n];
@@ -1293,8 +1312,15 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->rx_fifo_full_cnt;
data[ei++] = mib->rx_mpdu_cnt;
data[ei++] = mib->channel_idle_cnt;
+ data[ei++] = mib->primary_cca_busy_time;
+ data[ei++] = mib->secondary_cca_busy_time;
+ data[ei++] = mib->primary_energy_detect_time;
+ data[ei++] = mib->cck_mdrdy_time;
+ data[ei++] = mib->ofdm_mdrdy_time;
+ data[ei++] = mib->green_mdrdy_time;
data[ei++] = mib->rx_vector_mismatch_cnt;
data[ei++] = mib->rx_delimiter_fail_cnt;
+ data[ei++] = mib->rx_mrdy_cnt;
data[ei++] = mib->rx_len_mismatch_cnt;
data[ei++] = mib->rx_ampdu_cnt;
data[ei++] = mib->rx_ampdu_bytes_cnt;
@@ -1332,6 +1358,88 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7915_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7915_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int
+mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -ENODEV;
+
+ if (msta->wcid.idx > 0xff)
+ return -EIO;
+
+ path->type = DEV_PATH_MTK_WDMA;
+ path->dev = ctx->dev;
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.wcid = msta->wcid.idx;
+ path->mtk_wdma.queue = phy != &dev->phy;
+
+ ctx->dev = NULL;
+
+ return 0;
+}
+#endif
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -1378,4 +1486,8 @@ const struct ieee80211_ops mt7915_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
+ .set_radar_background = mt7915_set_radar_background,
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ .net_fill_forward_path = mt7915_net_fill_forward_path,
+#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 0911b6f973b5..8d297e4aa7d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -1,199 +1,37 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
-#include <linux/firmware.h>
#include <linux/fs.h>
#include "mt7915.h"
#include "mcu.h"
#include "mac.h"
#include "eeprom.h"
-struct mt7915_patch_hdr {
- char build_date[16];
- char platform[4];
- __be32 hw_sw_ver;
- __be32 patch_ver;
- __be16 checksum;
- u16 reserved;
- struct {
- __be32 patch_ver;
- __be32 subsys;
- __be32 feature;
- __be32 n_region;
- __be32 crc;
- u32 reserved[11];
- } desc;
-} __packed;
-
-struct mt7915_patch_sec {
- __be32 type;
- __be32 offs;
- __be32 size;
- union {
- __be32 spec[13];
- struct {
- __be32 addr;
- __be32 len;
- __be32 sec_key_idx;
- __be32 align_len;
- u32 reserved[9];
- } info;
- };
-} __packed;
-
-struct mt7915_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserved[2];
- char fw_ver[10];
- char build_date[15];
- u32 crc;
-} __packed;
-
-struct mt7915_fw_region {
- __le32 decomp_crc;
- __le32 decomp_len;
- __le32 decomp_blk_sz;
- u8 reserved[4];
- __le32 addr;
- __le32 len;
- u8 feature_set;
- u8 reserved1[15];
-} __packed;
+#define fw_name(_dev, name, ...) ({ \
+ char *_fw; \
+ switch (mt76_chip(&(_dev)->mt76)) { \
+ case 0x7915: \
+ _fw = MT7915_##name; \
+ break; \
+ case 0x7986: \
+ _fw = MT7986_##name##__VA_ARGS__; \
+ break; \
+ default: \
+ _fw = MT7916_##name; \
+ break; \
+ } \
+ _fw; \
+})
+
+#define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \
+ fw_name(_dev, name) : \
+ fw_name(_dev, name, _MT7975))
#define MCU_PATCH_ADDRESS 0x200000
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
-#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
-
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
-static enum mcu_cipher_type
-mt7915_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
-static const struct ieee80211_sta_he_cap *
-mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
-{
- struct ieee80211_supported_band *sband;
- enum nl80211_band band;
-
- band = phy->mt76->chandef.chan->band;
- sband = phy->mt76->hw->wiphy->bands[band];
-
- return ieee80211_get_he_iftype_cap(sband, vif->type);
-}
-
-static u8
-mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
- struct ieee80211_sta_ht_cap *ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 mode = 0;
-
- if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
- } else {
- struct ieee80211_supported_band *sband;
-
- sband = mvif->phy->mt76->hw->wiphy->bands[band];
-
- ht_cap = &sband->ht_cap;
- vht_cap = &sband->vht_cap;
- he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
- }
-
- if (band == NL80211_BAND_2GHZ) {
- mode |= PHY_MODE_B | PHY_MODE_G;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_GN;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ) {
- mode |= PHY_MODE_A;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_AN;
-
- if (vht_cap->vht_supported)
- mode |= PHY_MODE_AC;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_5G;
- }
-
- return mode;
-}
-
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
@@ -211,24 +49,13 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
static void
mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- const u16 *mask)
+ u16 mcs_map)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
- u16 mcs_map;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
- break;
- case NL80211_CHAN_WIDTH_160:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- break;
- default:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
- break;
- }
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -266,8 +93,9 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
mcs_map &= ~(0x3 << (nss * 2));
mcs_map |= mcs << (nss * 2);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
@@ -278,8 +106,10 @@ static void
mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
const u16 *mask)
{
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
u16 mcs;
for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
@@ -299,8 +129,9 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
}
@@ -309,17 +140,17 @@ static void
mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
const u8 *mask)
{
- int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
for (nss = 0; nss < max_nss; nss++)
- ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
+ ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
}
static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
- struct mt7915_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
@@ -328,7 +159,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return -ETIMEDOUT;
}
- rxd = (struct mt7915_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (seq != rxd->seq)
return -EAGAIN;
@@ -339,7 +170,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
skb_pull(skb, sizeof(*rxd) + 4);
ret = le32_to_cpu(*(__le32 *)skb->data);
} else {
- skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
return ret;
@@ -350,69 +181,20 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- struct mt7915_mcu_txd *mcu_txd;
enum mt76_mcuq_id qid;
- __le32 *txd;
- u32 val;
- u8 seq;
-
- /* TODO: make dynamic based on msg type */
- mdev->mcu.timeout = 20 * HZ;
+ int ret;
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
+ if (ret)
+ return ret;
- if (cmd == MCU_CMD(FW_SCATTER)) {
+ if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
- goto exit;
- }
-
- mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ else if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
qid = MT_MCUQ_WA;
else
qid = MT_MCUQ_WM;
- txd = mcu_txd->txd;
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
- FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
- txd[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
- txd[1] = cpu_to_le32(val);
-
- mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
- mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
- MT_TX_MCU_PORT_RX_Q0));
- mcu_txd->pkt_type = MCU_PKT_ID;
- mcu_txd->seq = seq;
-
- mcu_txd->cid = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
- if (mcu_txd->ext_cid) {
- mcu_txd->ext_cid_ack = 1;
-
- /* do not use Q_SET for efuse */
- if (cmd & __MCU_CMD_FIELD_QUERY)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- }
-
- if (cmd & __MCU_CMD_FIELD_WA)
- mcu_txd->s2d_index = MCU_S2D_H2C;
- else
- mcu_txd->s2d_index = MCU_S2D_H2N;
-
-exit:
- if (wait_seq)
- *wait_seq = seq;
-
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
@@ -434,7 +216,7 @@ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
static void
mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
ieee80211_csa_finish(vif);
}
@@ -446,8 +228,8 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
- if (c->band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -465,8 +247,8 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
- if (t->ctrl.band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
phy->throttle_state = t->ctrl.duty.duty_cycle;
@@ -480,22 +262,33 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->band_idx && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
- ieee80211_radar_detected(mphy->hw);
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
}
static void
mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
- const char *data = (char *)&rxd[1];
- const char *type;
+ struct mt76_connac2_mcu_rxd *rxd;
+ int len = skb->len - sizeof(*rxd);
+ const char *data, *type;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ data = (char *)&rxd[1];
switch (rxd->s2d_index) {
case 0:
+ if (mt7915_debugfs_rx_log(dev, data, len))
+ return;
+
type = "WM";
break;
case 2:
@@ -506,24 +299,40 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
- (int)(skb->len - sizeof(*rxd)), data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
}
static void
mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (!vif->color_change_active)
+ if (!vif->bss_conf.color_change_active)
return;
ieee80211_color_change_finish(vif);
}
static void
+mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_bcc_notify *b;
+
+ b = (struct mt7915_mcu_bcc_notify *)skb->data;
+
+ if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_cca_finish, mphy->hw);
+}
+
+static void
mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
case MCU_EXT_EVENT_THERMAL_PROTECT:
mt7915_mcu_rx_thermal_notify(dev, skb);
@@ -538,9 +347,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mcu_rx_log_message(dev, skb);
break;
case MCU_EXT_EVENT_BCC_NOTIFY:
- ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_mcu_cca_finish, dev);
+ mt7915_mcu_rx_bcc_notify(dev, skb);
break;
default:
break;
@@ -550,8 +357,9 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
static void
mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_EVENT_EXT:
mt7915_mcu_rx_ext_event(dev, skb);
@@ -564,8 +372,9 @@ mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
- struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
@@ -577,88 +386,6 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_mcu_rx_event(&dev->mt76, skb);
}
-static struct sk_buff *
-mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
- struct mt7915_sta *msta, int len)
-{
- struct sta_req_hdr hdr = {
- .bss_idx = mvif->mt76.idx,
- .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
- .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe,
- .is_tlv_append = 1,
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- return skb;
-}
-
-static struct wtbl_req_hdr *
-mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
- int cmd, void *sta_wtbl, struct sk_buff **skb)
-{
- struct tlv *sta_hdr = sta_wtbl;
- struct wtbl_req_hdr hdr = {
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
- .operation = cmd,
- };
- struct sk_buff *nskb = *skb;
-
- if (!nskb) {
- nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!nskb)
- return ERR_PTR(-ENOMEM);
-
- *skb = nskb;
- }
-
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, sizeof(hdr));
-
- return skb_put_data(nskb, &hdr, sizeof(hdr));
-}
-
-static struct tlv *
-mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
- void *sta_ntlv, void *sta_wtbl)
-{
- struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
- struct tlv *sta_hdr = sta_wtbl;
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
- u16 ntlv;
-
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
-
- ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
- ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
-
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
-
- return ptlv;
-}
-
-static struct tlv *
-mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
-{
- return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
-}
-
static struct tlv *
mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
__le16 *sub_ntlv, __le16 *len)
@@ -678,105 +405,6 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
}
/** bss info **/
-static int
-mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7915_phy *phy, bool enable)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_basic *bss;
- u16 wlan_idx = mvif->sta.wcid.idx;
- u32 type = NETWORK_INFRA;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable) {
- struct ieee80211_sta *sta;
- struct mt7915_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
- bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
- u8 idx;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0
- : mvif->mt76.omac_idx;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = idx;
-}
-
struct mt7915_he_obss_narrow_bw_ru_data {
bool tolerated;
};
@@ -829,12 +457,12 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct tlv *tlv;
int freq1 = chandef->center_freq1;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
ch = (struct bss_info_rf_ch *)tlv;
ch->pri_ch = chandef->chan->hw_value;
ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
- ch->bw = mt7915_mcu_chan_bw(chandef);
+ ch->bw = mt76_connac_chan_bw(chandef);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
@@ -843,12 +471,7 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
- struct mt7915_dev *dev = phy->dev;
- struct mt76_phy *mphy = &dev->mt76.phy;
- bool ext_phy = phy != &dev->phy;
-
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = phy->mt76;
ch->he_ru26_block =
mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
@@ -866,7 +489,7 @@ mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_ra *ra;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
ra = (struct bss_info_ra *)tlv;
ra->op_mode = vif->type == NL80211_IFTYPE_AP;
@@ -894,9 +517,9 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_he *he;
struct tlv *tlv;
- cap = mt7915_get_he_phy_cap(phy, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
@@ -920,7 +543,7 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
struct bss_info_hw_amsdu *amsdu;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct bss_info_hw_amsdu *)tlv;
amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
@@ -930,26 +553,6 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
}
static void
-mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
-static void
mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
{
struct bss_info_bmc_rate *bmc;
@@ -957,7 +560,7 @@ mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
bmc = (struct bss_info_bmc_rate *)tlv;
if (band == NL80211_BAND_2GHZ) {
@@ -1010,6 +613,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
struct sk_buff *skb;
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
@@ -1017,16 +621,17 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_muar_config(phy, vif, true, enable);
}
- skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
- MT7915_BSS_UPDATE_MAX_SIZE);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_omac must be first */
if (enable)
- mt7915_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
@@ -1042,317 +647,56 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7915_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
}
out:
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
/** starec & wtbl **/
-static int
-mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7915_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7915_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
- int ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7915_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-static void
-mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
-{
- struct sta_rec_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
-
- ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
- ba->winsize = cpu_to_le16(params->buf_size);
- ba->ssn = cpu_to_le16(params->ssn);
- ba->ba_en = enable << params->tid;
- ba->amsdu = params->amsdu;
- ba->tid = params->tid;
-}
-
-static void
-mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct wtbl_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
- wtbl_tlv, sta_wtbl);
-
- ba = (struct wtbl_ba *)tlv;
- ba->tid = params->tid;
-
- if (tx) {
- ba->ba_type = MT_BA_TYPE_ORIGINATOR;
- ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
- ba->ba_en = enable;
- } else {
- memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
- ba->ba_type = MT_BA_TYPE_RECIPIENT;
- ba->rst_ba_tid = params->tid;
- ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
- ba->rst_ba_sb = 1;
- }
-
- if (enable)
- ba->ba_winsize = cpu_to_le16(params->buf_size);
-}
-
-static int
-mt7915_mcu_sta_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
- int ret;
- if (enable && tx && !params->amsdu)
+ if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
-
- ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (ret)
- return ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable)
-{
- return mt7915_mcu_sta_ba(dev, params, enable, true);
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
{
- return mt7915_mcu_sta_ba(dev, params, enable, false);
-}
-
-static void
-mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_generic *generic;
- struct wtbl_rx *rx;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
- wtbl_tlv, sta_wtbl);
-
- generic = (struct wtbl_generic *)tlv;
-
- if (sta) {
- memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
- generic->muar_idx = mvif->mt76.omac_idx;
- generic->qos = sta->wme;
- } else {
- /* use BSSID in station mode */
- if (vif->type == NL80211_IFTYPE_STATION)
- memcpy(generic->peer_addr, vif->bss_conf.bssid,
- ETH_ALEN);
- else
- eth_broadcast_addr(generic->peer_addr);
-
- generic->muar_idx = 0xe;
- }
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
- wtbl_tlv, sta_wtbl);
-
- rx = (struct wtbl_rx *)tlv;
- rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
- rx->rca2 = 1;
- rx->rv = 1;
-}
-
-static void
-mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
-#define EXTRA_INFO_VER BIT(0)
-#define EXTRA_INFO_NEW BIT(1)
- struct sta_rec_basic *basic;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
-
- basic = (struct sta_rec_basic *)tlv;
- basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
-
- if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
-
- if (!sta) {
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
- return;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
- basic->qos = sta->wme;
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, false);
}
static void
mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
he = (struct sta_rec_he *)tlv;
@@ -1376,8 +720,9 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
- if (mvif->cap.ldpc && (elem->phy_cap_info[1] &
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ if (mvif->cap.he_ldpc &&
+ (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
cap |= STA_REC_HE_CAP_LDPC;
if (elem->phy_cap_info[1] &
@@ -1434,22 +779,23 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
he->he_cap = cpu_to_le32(cap);
- switch (sta->bandwidth) {
+ mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80));
break;
}
@@ -1480,43 +826,11 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
-{
- struct sta_rec_uapsd *uapsd;
- struct tlv *tlv;
-
- if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
- uapsd = (struct sta_rec_uapsd *)tlv;
-
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
- uapsd->dac_map |= BIT(3);
- uapsd->tac_map |= BIT(3);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
- uapsd->dac_map |= BIT(2);
- uapsd->tac_map |= BIT(2);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
- uapsd->dac_map |= BIT(1);
- uapsd->tac_map |= BIT(1);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
- uapsd->dac_map |= BIT(0);
- uapsd->tac_map |= BIT(0);
- }
- uapsd->max_sp = sta->max_sp;
-}
-
-static void
-mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
+mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
struct sta_rec_muru *muru;
struct tlv *tlv;
@@ -1524,33 +838,32 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->vht_cap.vht_supported)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
mvif->cap.vht_mu_ebfer ||
mvif->cap.vht_mu_ebfee;
+ if (!is_mt7915(&dev->mt76))
+ muru->cfg.mimo_ul_en = true;
+ muru->cfg.ofdma_dl_en = true;
- muru->mimo_dl.vht_mu_bfee =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ if (sta->deflink.vht_cap.vht_supported)
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
muru->mimo_dl.partial_bw_dl_mimo =
HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
- muru->cfg.mimo_ul_en = true;
muru->mimo_ul.full_ul_mimo =
HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
muru->mimo_ul.partial_ul_mimo =
HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
- muru->cfg.ofdma_dl_en = true;
muru->ofdma_dl.punc_pream_rx =
HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
muru->ofdma_dl.he_20m_in_40m_2g =
@@ -1574,10 +887,13 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_ht *ht;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ if (!sta->deflink.ht_cap.ht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
}
static void
@@ -1586,20 +902,20 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_vht *vht;
struct tlv *tlv;
- if (!sta->vht_cap.vht_supported)
+ if (!sta->deflink.vht_cap.vht_supported)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
static void
-mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
@@ -1609,99 +925,30 @@ mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
- IEEE80211_MAX_MPDU_LEN_VHT_7991;
msta->wcid.amsdu = true;
-}
-
-static void
-mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct wtbl_smps *smps;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
- wtbl_tlv, sta_wtbl);
- smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
-}
-
-static void
-mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_ht *ht = NULL;
- struct tlv *tlv;
-
- /* wtbl ht */
- if (sta->ht_cap.ht_supported) {
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
- wtbl_tlv, sta_wtbl);
- ht = (struct wtbl_ht *)tlv;
- ht->ldpc = mvif->cap.ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
- ht->ht = true;
- }
-
- /* wtbl vht */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *vht;
- u8 af;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
- wtbl_tlv, sta_wtbl);
- vht = (struct wtbl_vht *)tlv;
- vht->ldpc = mvif->cap.ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- vht->vht = true;
-
- af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
- if (ht)
- ht->af = max_t(u8, ht->af, af);
- }
-
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
-}
-
-static void
-mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct mt7915_sta *msta;
- struct wtbl_hdr_trans *htr = NULL;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, sizeof(*htr),
- wtbl_tlv, sta_wtbl);
- htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = true;
- if (vif->type == NL80211_IFTYPE_STATION)
- htr->to_ds = true;
- else
- htr->from_ds = true;
- if (!sta)
+ switch (sta->deflink.agg.max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ if (!is_mt7915(&dev->mt76)) {
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ }
+ fallthrough;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
return;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) {
- htr->to_ds = true;
- htr->from_ds = true;
}
}
@@ -1712,48 +959,30 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta;
struct wtbl_req_hdr *wtbl_hdr;
+ struct mt76_wcid *wcid;
struct tlv *tlv;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ wcid = sta ? &msta->wcid : NULL;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- tlv, &skb);
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, tlv,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr);
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr);
-
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv,
+ wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
if (sta)
- mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+ wtbl_hdr, mvif->cap.ht_ldpc,
+ mvif->cap.vht_ldpc);
return 0;
}
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!skb)
- return -ENOMEM;
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
- true);
-}
-
static inline bool
mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
@@ -1768,8 +997,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
if (!bfee && tx_ant < 2)
return false;
- if (sta->he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
if (bfee)
return mvif->cap.he_su_ebfee &&
@@ -1779,8 +1008,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
}
- if (sta->vht_cap.vht_supported) {
- u32 cap = sta->vht_cap.cap;
+ if (sta->deflink.vht_cap.vht_supported) {
+ u32 cap = sta->deflink.vht_cap.cap;
if (bfee)
return mvif->cap.vht_su_ebfee &&
@@ -1806,7 +1035,7 @@ static void
mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
struct sta_rec_bf *bf)
{
- struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
@@ -1831,7 +1060,7 @@ static void
mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
@@ -1852,14 +1081,14 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = bf->ncol;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = nss_mcs;
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
}
}
@@ -1868,9 +1097,10 @@ static void
mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
struct mt7915_phy *phy, struct sta_rec_bf *bf)
{
- struct ieee80211_sta_he_cap *pc = &sta->he_cap;
+ struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
- const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
@@ -1892,7 +1122,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = bf->ncol;
- if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
+ if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
return;
/* go over for 160MHz and 80p80 */
@@ -1928,8 +1158,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -1941,32 +1170,35 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
};
bool ebf;
+ if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ return;
+
ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
if (!ebf && !dev->ibf)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
/* he: eBF only, in accordance with spec
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->he_cap.has_he && ebf)
+ if (sta->deflink.he_cap.has_he && ebf)
mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
- else if (sta->vht_cap.vht_supported)
+ else if (sta->deflink.vht_cap.vht_supported)
mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf);
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
mt7915_mcu_sta_bfer_ht(sta, phy, bf);
else
return;
bf->bf_cap = ebf ? ebf : dev->ibf << 1;
- bf->bw = sta->bandwidth;
- bf->ibf_dbw = sta->bandwidth;
+ bf->bw = sta->deflink.bandwidth;
+ bf->ibf_dbw = sta->deflink.bandwidth;
bf->ibf_nrow = tx_ant;
- if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = 0x48;
else
bf->ibf_timeout = 0x18;
@@ -1976,7 +1208,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
else
bf->mem_20m = matrix[bf->nrow][bf->ncol];
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
case IEEE80211_STA_RX_BW_80:
bf->mem_total = bf->mem_20m * 2;
@@ -1995,26 +1227,28 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
u8 nrow = 0;
+ if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
+ return;
+
if (!mt7915_is_ebf_supported(phy, vif, sta, true))
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
- if (sta->he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
pe->phy_cap_info[5]);
- } else if (sta->vht_cap.vht_supported) {
- struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
pc->cap);
@@ -2050,13 +1284,13 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct sta_rec_ra_fixed *ra;
struct sk_buff *skb;
struct tlv *tlv;
- int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
ra = (struct sta_rec_ra_fixed *)tlv;
switch (field) {
@@ -2070,7 +1304,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
ra->phy = *phy;
break;
case RATE_PARAM_MMPS_UPDATE:
- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
break;
default:
break;
@@ -2091,19 +1325,19 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2126,25 +1360,30 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct sta_phy phy = {};
int ret, nrates = 0;
-#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \
+#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
do { \
u8 i, gi = mask->control[band]._gi; \
gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
- for (i = 0; i <= sta->bandwidth; i++) { \
+ for (i = 0; i <= sta->deflink.bandwidth; i++) { \
phy.sgi |= gi << (i << (_he)); \
phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
} \
- for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
- nrates += hweight16(mask->control[band]._mcs[i]); \
- phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
+ if (!mask->control[band]._mcs[i]) \
+ continue; \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ if (_ht) \
+ phy.mcs += 8 * i; \
+ } \
} while (0)
- if (sta->he_cap.has_he) {
- __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
- } else if (sta->vht_cap.vht_supported) {
- __sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
- } else if (sta->ht_cap.ht_supported) {
- __sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
+ if (sta->deflink.he_cap.has_he) {
+ __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
phy.mcs = ffs(mask->control[band].legacy) - 1;
@@ -2177,7 +1416,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
* actual txrate hardware sends out.
*/
addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
- if (sta->he_cap.has_he)
+ if (sta->deflink.he_cap.has_he)
mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
else
mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
@@ -2204,23 +1443,25 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
struct tlv *tlv;
- u32 supp_rate = sta->supp_rates[band];
+ u32 supp_rate = sta->deflink.supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = sta->bandwidth;
- ra->phy.bw = sta->bandwidth;
+ ra->bw = sta->deflink.bandwidth;
+ ra->phy.bw = sta->deflink.bandwidth;
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
@@ -2240,22 +1481,22 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
}
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
ra->supp_mode |= MODE_HT;
- ra->af = sta->ht_cap.ampdu_factor;
- ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ ra->af = sta->deflink.ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
cap |= STA_CAP_HT;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
cap |= STA_CAP_SGI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
cap |= STA_CAP_SGI_40;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
cap |= STA_CAP_TX_STBC;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (mvif->cap.ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ if (mvif->cap.ht_ldpc &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
@@ -2263,34 +1504,38 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
u8 af;
ra->supp_mode |= MODE_VHT;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
ra->af = max_t(u8, ra->af, af);
cap |= STA_CAP_VHT;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
cap |= STA_CAP_VHT_SGI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
cap |= STA_CAP_VHT_SGI_160;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
cap |= STA_CAP_VHT_TX_STBC;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (mvif->cap.ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ if (mvif->cap.vht_ldpc &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
mask->control[band].vht_mcs);
}
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
+
+ if (sta->deflink.he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
@@ -2304,8 +1549,8 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2313,7 +1558,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
* once dev->rc_work changes the settings driver should also
* update sta_rec_he here.
*/
- if (sta->he_cap.has_he && changed)
+ if (changed)
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* sta_rec_ra accommodates BW, NSS and only MCS range format
@@ -2371,18 +1616,19 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable,
+ !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
if (!enable)
goto out;
/* tag order is in accordance with firmware dependency. */
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -2390,27 +1636,31 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
/* starec vht */
mt7915_mcu_sta_vht_tlv(skb, sta);
/* starec uapsd */
- mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec amsdu */
- mt7915_mcu_sta_amsdu_tlv(skb, vif, sta);
+ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
/* starec he */
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* starec muru */
- mt7915_mcu_sta_muru_tlv(skb, sta, vif);
+ mt7915_mcu_sta_muru_tlv(dev, skb, sta, vif);
/* starec bfee */
mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta);
}
ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2471,7 +1721,7 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
if (!offs->cntdwn_counter_offs[0])
return;
- sub_tag = vif->csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
+ sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info),
&bcn->sub_ntlv, &bcn->len);
info = (struct bss_info_bcn_cntdwn *)tlv;
@@ -2479,6 +1729,61 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
}
static void
+mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_info_bcn_mbss *mbss;
+ const struct element *elem;
+ struct tlv *tlv;
+
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID,
+ sizeof(*mbss), &bcn->sub_ntlv,
+ &bcn->len);
+
+ mbss = (struct bss_info_bcn_mbss *)tlv;
+ mbss->offset[0] = cpu_to_le16(offs->tim_offset);
+ mbss->bitmap = cpu_to_le32(1);
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+ &skb->data[offs->mbssid_off],
+ skb->len - offs->mbssid_off) {
+ const struct element *sub_elem;
+
+ if (elem->datalen < 2)
+ continue;
+
+ for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
+ const struct ieee80211_bssid_index *idx;
+ const u8 *idx_ie;
+
+ if (sub_elem->id || sub_elem->datalen < 4)
+ continue; /* not a valid BSS profile */
+
+ /* Find WLAN_EID_MULTI_BSSID_IDX
+ * in the merged nontransmitted profile
+ */
+ idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub_elem->data,
+ sub_elem->datalen);
+ if (!idx_ie || idx_ie[1] < sizeof(*idx))
+ continue;
+
+ idx = (void *)(idx_ie + 2);
+ if (!idx->bssid_index || idx->bssid_index > 31)
+ continue;
+
+ mbss->offset[idx->bssid_index] =
+ cpu_to_le16(idx_ie - skb->data);
+ mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index));
+ }
+ }
+}
+
+static void
mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
@@ -2490,6 +1795,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
u8 *buf;
int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
len, &bcn->sub_ntlv, &bcn->len);
@@ -2500,15 +1806,15 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
if (offs->cntdwn_counter_offs[0]) {
u16 offset = offs->cntdwn_counter_offs[0];
- if (vif->csa_active)
+ if (vif->bss_conf.csa_active)
cont->csa_ofs = cpu_to_le16(offset - 4);
- if (vif->color_change_active)
+ if (vif->bss_conf.color_change_active)
cont->bcc_ofs = cpu_to_le16(offset - 3);
}
buf = (u8 *)tlv + sizeof(*cont);
- mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
- true);
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, BSS_CHANGED_BEACON);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
@@ -2540,8 +1846,8 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
len);
if (ie && ie[1] >= sizeof(*ht)) {
ht = (void *)(ie + 2);
- vc->ldpc |= !!(le16_to_cpu(ht->cap_info) &
- IEEE80211_HT_CAP_LDPC_CODING);
+ vc->ht_ldpc = !!(le16_to_cpu(ht->cap_info) &
+ IEEE80211_HT_CAP_LDPC_CODING);
}
ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
@@ -2552,7 +1858,7 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
vht = (void *)(ie + 2);
bc = le32_to_cpu(vht->vht_cap_info);
- vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+ vc->vht_ldpc = !!(bc & IEEE80211_VHT_CAP_RXLDPC);
vc->vht_su_ebfer =
(bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
(pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
@@ -2571,11 +1877,13 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
mgmt->u.beacon.variable, len);
if (ie && ie[1] >= sizeof(*he) + 1) {
const struct ieee80211_sta_he_cap *pc =
- mt7915_get_he_phy_cap(phy, vif);
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
he = (void *)(ie + 3);
+ vc->he_ldpc =
+ HE_PHY(CAP1_LDPC_CODING_IN_PAYLOAD, pe->phy_cap_info[1]);
vc->he_su_ebfer =
HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
@@ -2588,10 +1896,77 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
}
}
-int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int en)
+static void
+mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct sk_buff *rskb, struct bss_info_bcn *bcn,
+ u32 changed)
+{
+#define OFFLOAD_TX_MODE_SU BIT(0)
+#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_inband_discovery *discov;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = NULL;
+ struct tlv *tlv;
+ bool ext_phy = phy != &dev->phy;
+ u8 *buf, interval;
+ int len;
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+ skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
+ } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
+ vif->bss_conf.unsol_bcast_probe_resp_interval) {
+ interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+
+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+
+ if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+ len, &bcn->sub_ntlv, &bcn->len);
+ discov = (struct bss_info_inband_discovery *)tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+ discov->tx_interval = interval;
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+
+ buf = (u8 *)tlv + sizeof(*discov);
+
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, changed);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+}
+
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int en, u32 changed)
{
-#define MAX_BEACON_SIZE 512
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
@@ -2600,358 +1975,113 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct sk_buff *skb, *rskb;
struct tlv *tlv;
struct bss_info_bcn *bcn;
- int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ int len = MT7915_MAX_BSS_OFFLOAD_SIZE;
+ bool ext_phy = phy != &dev->phy;
+
+ if (vif->bss_conf.nontransmitted)
+ return 0;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = en;
if (!en)
goto out;
- skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!skb)
return -EINVAL;
- if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
+ if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
dev_kfree_skb(skb);
return -EINVAL;
}
- if (mvif->mt76.band_idx) {
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- }
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue = FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
mt7915_mcu_beacon_check_caps(phy, vif, skb);
- /* TODO: subtag - 11v MBSSID */
mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
+ if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+ changed & BSS_CHANGED_FILS_DISCOVERY)
+ mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
+ bcn, changed);
+
out:
return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
-static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
- u32 option)
+static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
{
- struct {
- __le32 option;
- __le32 addr;
- } req = {
- .option = cpu_to_le32(option),
- .addr = cpu_to_le32(addr),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(FW_START_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-
-static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
-{
- struct {
- __le32 op;
- } req = {
- .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_SEM_CONTROL), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
-{
- struct {
- u8 check_crc;
- u8 reserved[3];
- } req = {
- .check_crc = 0,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_FINISH_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_driver_own(struct mt7915_dev *dev)
-{
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0,
- MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) {
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
- return 0;
-}
-
-static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
- u32 len, u32 mode)
-{
- struct {
- __le32 addr;
- __le32 len;
- __le32 mode;
- } req = {
- .addr = cpu_to_le32(addr),
- .len = cpu_to_le32(len),
- .mode = cpu_to_le32(mode),
- };
- int attr;
-
- if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
- attr = MCU_CMD(PATCH_START_REQ);
- else
- attr = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
-
- return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
-}
-
-static int mt7915_load_patch(struct mt7915_dev *dev)
-{
- const struct mt7915_patch_hdr *hdr;
- const struct firmware *fw = NULL;
- int i, ret, sem;
-
- sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
- switch (sem) {
- case PATCH_IS_DL:
- return 0;
- case PATCH_NOT_DL_SEM_SUCCESS:
- break;
- default:
- dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
- return -EAGAIN;
- }
-
- ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
- if (ret)
- goto out;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
- hdr = (const struct mt7915_patch_hdr *)(fw->data);
-
- dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
-
- for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
- struct mt7915_patch_sec *sec;
- const u8 *dl;
- u32 len, addr;
-
- sec = (struct mt7915_patch_sec *)(fw->data + sizeof(*hdr) +
- i * sizeof(*sec));
- if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
- PATCH_SEC_TYPE_INFO) {
- ret = -EINVAL;
- goto out;
- }
-
- addr = be32_to_cpu(sec->info.addr);
- len = be32_to_cpu(sec->info.len);
- dl = fw->data + be32_to_cpu(sec->offs);
-
- ret = mt7915_mcu_init_download(dev, addr, len,
- DL_MODE_NEED_RSP);
- if (ret) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- goto out;
- }
-
- ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- dl, len, 4096);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to send patch\n");
- goto out;
- }
- }
-
- ret = mt7915_mcu_start_patch(dev);
- if (ret)
- dev_err(dev->mt76.dev, "Failed to start patch\n");
-
-out:
- sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
- switch (sem) {
- case PATCH_REL_SEM_SUCCESS:
- break;
- default:
- ret = -EAGAIN;
- dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- break;
- }
- release_firmware(fw);
-
- return ret;
-}
-
-static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
+ return 0;
}
static int
-mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
- const struct mt7915_fw_trailer *hdr,
- const u8 *data, bool is_wa)
+mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
{
- int i, offset = 0;
- u32 override = 0, option = 0;
-
- for (i = 0; i < hdr->n_region; i++) {
- const struct mt7915_fw_region *region;
- int err;
- u32 len, addr, mode;
-
- region = (const struct mt7915_fw_region *)((const u8 *)hdr -
- (hdr->n_region - i) * sizeof(*region));
- mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
- len = le32_to_cpu(region->len);
- addr = le32_to_cpu(region->addr);
-
- if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
- override = addr;
-
- err = mt7915_mcu_init_download(dev, addr, len, mode);
- if (err) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- return err;
- }
-
- err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- data + offset, len, 4096);
- if (err) {
- dev_err(dev->mt76.dev, "Failed to send firmware.\n");
- return err;
- }
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
- offset += len;
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
}
-
- if (override)
- option |= FW_START_OVERRIDE;
-
- if (is_wa)
- option |= FW_START_WORKING_PDA_CR4;
-
- return mt7915_mcu_start_firmware(dev, override, option);
+ return 0;
}
-static int mt7915_load_ram(struct mt7915_dev *dev)
+static int mt7915_load_firmware(struct mt7915_dev *dev)
{
- const struct mt7915_fw_trailer *hdr;
- const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
-
- dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
-
- ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, false);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
- goto out;
+ /* make sure fw is download state */
+ if (mt7915_firmware_state(dev, false)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ ret = mt7915_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
}
- release_firmware(fw);
-
- ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH));
if (ret)
return ret;
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
- goto out;
- }
-
- hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
-
- dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
-
- ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, true);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WA firmware\n");
- goto out;
- }
-
- snprintf(dev->mt76.hw->wiphy->fw_version,
- sizeof(dev->mt76.hw->wiphy->fw_version),
- "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int mt7915_load_firmware(struct mt7915_dev *dev)
-{
- int ret;
-
- ret = mt7915_load_patch(dev);
+ ret = mt76_connac2_load_ram(&dev->mt76, fw_name_var(dev, FIRMWARE_WM),
+ fw_name(dev, FIRMWARE_WA));
if (ret)
return ret;
- ret = mt7915_load_ram(dev);
+ ret = mt7915_firmware_state(dev, true);
if (ret)
return ret;
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_RDY), 1000)) {
- dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
- return -EIO;
- }
-
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
dev_dbg(dev->mt76.dev, "Firmware init done\n");
@@ -3021,7 +2151,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
u8 band_idx;
} req = {
.cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
};
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@@ -3107,18 +2237,29 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
- .headroom = sizeof(struct mt7915_mcu_txd),
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt7915_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7915_mcu_ops;
- ret = mt7915_driver_own(dev);
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_driver_own(dev, 0);
if (ret)
return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7915_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7915_load_firmware(dev);
if (ret)
@@ -3133,6 +2274,9 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
if (ret)
return ret;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
+
ret = mt7915_mcu_set_mwds(dev, 1);
if (ret)
return ret;
@@ -3153,14 +2297,15 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_FW_DOWNLOAD), 1000)) {
+ if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
return;
}
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN);
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -3222,42 +2367,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
&req_mac, sizeof(req_mac), true);
}
-int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
-{
- struct {
- __le32 cmd;
- u8 band;
- u8 enable;
- } __packed req = {
- .cmd = cpu_to_le32(SCS_ENABLE),
- .band = band,
- .enable = enable + 1,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SCS_CTRL), &req,
- sizeof(req), false);
-}
-
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
-{
- struct mt7915_dev *dev = phy->dev;
- struct {
- u8 prot_idx;
- u8 band;
- u8 rsv[2];
- __le32 len_thresh;
- __le32 pkt_thresh;
- } __packed req = {
- .prot_idx = 1,
- .band = phy != &dev->phy,
- .len_thresh = cpu_to_le32(val),
- .pkt_thresh = cpu_to_le32(0x2),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
@@ -3285,7 +2394,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
struct edca *e = &req.edca[ac];
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7915_MAX_WMM_SETS;
+ e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -3303,58 +2412,6 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
return mt7915_mcu_update_edca(dev, &req);
}
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
-{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx_lo;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 wlan_idx_hi;
- u8 rsv[2];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
- enum mt7915_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } __packed req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
{
struct {
@@ -3453,12 +2510,109 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
sizeof(req), true);
}
+static int
+mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7915_mcu_background_chain_ctrl req = {
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7915_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7915_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
{
+ static const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 0,
+ [NL80211_BAND_5GHZ] = 1,
+ [NL80211_BAND_6GHZ] = 2,
+ };
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
- bool ext_phy = phy != &dev->phy;
struct {
u8 control_ch;
u8 center_ch;
@@ -3479,11 +2633,11 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7915_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
- .band_idx = ext_phy,
- .channel_band = chandef->chan->band,
+ .band_idx = phy->band_idx,
+ .channel_band = ch_band[chandef->chan->band],
};
#ifdef CONFIG_NL80211_TESTMODE
@@ -3494,17 +2648,18 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
req.rx_streams = phy->mt76->test.tx_antenna_mask;
- if (ext_phy) {
- req.tx_streams_num = 2;
- req.rx_streams >>= 2;
- }
+ if (phy != &dev->phy)
+ req.rx_streams >>= dev->chainshift;
}
#endif
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -3527,7 +2682,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
#define PAGE_IDX_MASK GENMASK(4, 2)
#define PER_PAGE_SIZE 0x400
struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
- u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE);
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int eep_len;
int i;
@@ -3536,8 +2692,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
struct sk_buff *skb;
int ret;
- if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE))
- eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
else
eep_len = PER_PAGE_SIZE;
@@ -3770,19 +2926,26 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
- static const enum mt7915_chan_mib_offs offs[] = {
- MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+ static const u32 offs[] = {
+ MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
+ MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[4];
struct sk_buff *skb;
- int i, ret;
+ int i, ret, start = 0, ofs = 20;
+
+ if (!is_mt7915(&dev->mt76)) {
+ start = 4;
+ ofs = 0;
+ }
for (i = 0; i < 4; i++) {
req[i].band = cpu_to_le32(phy != &dev->phy);
- req[i].offs = cpu_to_le32(offs[i]);
+ req[i].offs = cpu_to_le32(offs[i + start]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -3790,7 +2953,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
if (ret)
return ret;
- res = (struct mt7915_mcu_mib *)(skb->data + 20);
+ res = (struct mt7915_mcu_mib *)(skb->data + ofs);
if (chan_switch)
goto out;
@@ -3842,7 +3005,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
u8 rsv[2];
} __packed req = {
.ctrl = {
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
},
};
int level;
@@ -4137,6 +3300,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
case MT_PHY_TYPE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -4210,11 +3375,13 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
struct sk_buff *skb;
struct tlv *tlv;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
bss_color = (struct bss_info_color *)tlv;
bss_color->disable = !he_bss_color->enabled;
bss_color->color = he_bss_color->color;
@@ -4272,3 +3439,32 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE),
&req, sizeof(req), true);
}
+
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
+{
+ struct {
+ __le32 idx;
+ __le32 ofs;
+ __le32 data;
+ } __packed req = {
+ .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))),
+ .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))),
+ .data = set ? cpu_to_le32(*val) : 0,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ if (set)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
+ &req, sizeof(req), false);
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *val = le32_to_cpu(*(__le32 *)(skb->data + 8));
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 92268e696931..cd1edf553fc1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -6,25 +6,6 @@
#include "../mt76_connac_mcu.h"
-struct mt7915_mcu_txd {
- __le32 txd[8];
-
- __le16 len;
- __le16 pq_id;
-
- u8 cid;
- u8 pkt_type;
- u8 set_query; /* FW don't care */
- u8 seq;
-
- u8 uc_d2b0_rev;
- u8 ext_cid;
- u8 s2d_index;
- u8 ext_cid_ack;
-
- u32 reserved[5];
-} __packed __aligned(4);
-
enum {
MCU_ATE_SET_TRX = 0x1,
MCU_ATE_SET_FREQ_OFFSET = 0xa,
@@ -32,21 +13,6 @@ enum {
MCU_ATE_CLEAN_TXQUEUE = 0x1c,
};
-struct mt7915_mcu_rxd {
- __le32 rxd[6];
-
- __le16 len;
- __le16 pkt_type_id;
-
- u8 eid;
- u8 seq;
- __le16 __rsv;
-
- u8 ext_eid;
- u8 __rsv1[2];
- u8 s2d_index;
-};
-
struct mt7915_mcu_thermal_ctrl {
u8 ctrl_id;
u8 band_idx;
@@ -63,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -71,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
u8 omac_idx;
u8 csa_count;
@@ -79,8 +45,17 @@ struct mt7915_mcu_csa_notify {
u8 rsv;
} __packed;
+struct mt7915_mcu_bcc_notify {
+ struct mt76_connac2_mcu_rxd rxd;
+
+ u8 band_idx;
+ u8 omac_idx;
+ u8 cca_count;
+ u8 rsv;
+} __packed;
+
struct mt7915_mcu_rdd_report {
- struct mt7915_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd rxd;
u8 band_idx;
u8 long_detected;
@@ -131,6 +106,29 @@ struct mt7915_mcu_rdd_report {
} hw_pulse[32];
} __packed;
+struct mt7915_mcu_background_chain_ctrl {
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@@ -161,10 +159,16 @@ struct mt7915_mcu_mib {
} __packed;
enum mt7915_chan_mib_offs {
+ /* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
- MIB_OBSS_AIRTIME = 86
+ MIB_OBSS_AIRTIME = 86,
+ /* mt7916 */
+ MIB_BUSY_TIME_V2 = 0,
+ MIB_TX_TIME_V2 = 6,
+ MIB_RX_TIME_V2 = 8,
+ MIB_OBSS_AIRTIME_V2 = 490
};
struct edca {
@@ -229,9 +233,6 @@ struct mt7915_mcu_muru_stats {
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET GENMASK(3, 0)
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
enum {
MCU_FW_LOG_WM,
MCU_FW_LOG_WA,
@@ -266,39 +267,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
-#define CONN_STATE_DISCONNECT 0
-#define CONN_STATE_CONNECT 1
-#define CONN_STATE_PORT_SECURE 2
-
-enum {
- SCS_SEND_DATA,
- SCS_SET_MANUAL_PD_TH,
- SCS_CONFIG,
- SCS_ENABLE,
- SCS_SHOW_INFO,
- SCS_GET_GLO_ADDR,
- SCS_GET_GLO_ADDR_EVENT,
-};
-
struct bss_info_bmc_rate {
__le16 tag;
__le16 len;
@@ -399,11 +367,23 @@ struct bss_info_bcn_cont {
__le16 pkt_len;
} __packed __aligned(4);
+struct bss_info_inband_discovery {
+ __le16 tag;
+ __le16 len;
+ u8 tx_type;
+ u8 tx_mode;
+ u8 tx_interval;
+ u8 enable;
+ __le16 rsv;
+ __le16 prob_rsp_len;
+} __packed __aligned(4);
+
enum {
BSS_INFO_BCN_CSA,
BSS_INFO_BCN_BCC,
BSS_INFO_BCN_MBSSID,
BSS_INFO_BCN_CONTENT,
+ BSS_INFO_BCN_DISCOV,
BSS_INFO_BCN_MAX
};
@@ -458,6 +438,26 @@ enum {
MURU_GET_TXC_TX_STATS = 151,
};
+enum {
+ SER_QUERY,
+ /* recovery */
+ SER_SET_RECOVER_L1,
+ SER_SET_RECOVER_L2,
+ SER_SET_RECOVER_L3_RX_ABORT,
+ SER_SET_RECOVER_L3_TX_ABORT,
+ SER_SET_RECOVER_L3_TX_DISABLE,
+ SER_SET_RECOVER_L3_BF,
+ /* action */
+ SER_ENABLE = 2,
+ SER_RECOVER
+};
+
+#define MT7915_MAX_BEACON_SIZE 512
+#define MT7915_MAX_INBAND_FRAME_SIZE 256
+#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+ MT7915_MAX_INBAND_FRAME_SIZE + \
+ MT7915_BEACON_UPDATE_SIZE)
+
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
sizeof(struct bss_info_basic) +\
@@ -471,6 +471,7 @@ enum {
#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_bcn_cntdwn) + \
sizeof(struct bss_info_bcn_mbss) + \
- sizeof(struct bss_info_bcn_cont))
+ sizeof(struct bss_info_bcn_cont) + \
+ sizeof(struct bss_info_inband_discovery))
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 1f6ba306c850..7bd5f6725d7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -1,103 +1,450 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
+ [FW_EXCEPTION_ADDR] = 0x219848,
+ [SWDEF_BASE_ADDR] = 0x41f200,
+};
+
+static const u32 mt7916_reg[] = {
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_EXCEPTION_ADDR] = 0x022050bc,
+ [SWDEF_BASE_ADDR] = 0x411400,
+};
+
+static const u32 mt7986_reg[] = {
+ [INT_SOURCE_CSR] = 0x24200,
+ [INT_MASK_CSR] = 0x24204,
+ [INT1_SOURCE_CSR] = 0x28200,
+ [INT1_MASK_CSR] = 0x28204,
+ [INT_MCU_CMD_SOURCE] = 0x241f0,
+ [INT_MCU_CMD_EVENT] = 0x54000108,
+ [WFDMA0_ADDR] = 0x24000,
+ [WFDMA0_PCIE1_ADDR] = 0x28000,
+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_EXCEPTION_ADDR] = 0x02204ffc,
+ [SWDEF_BASE_ADDR] = 0x411400,
+};
+
+static const u32 mt7915_offs[] = {
+ [TMAC_CDTR] = 0x090,
+ [TMAC_ODTR] = 0x094,
+ [TMAC_ATCR] = 0x098,
+ [TMAC_TRCR0] = 0x09c,
+ [TMAC_ICR0] = 0x0a4,
+ [TMAC_ICR1] = 0x0b4,
+ [TMAC_CTCR0] = 0x0f4,
+ [TMAC_TFCR0] = 0x1e0,
+ [MDP_BNRCFR0] = 0x070,
+ [MDP_BNRCFR1] = 0x074,
+ [ARB_DRNGR0] = 0x194,
+ [ARB_SCR] = 0x080,
+ [RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AWSCR0] = 0x05c,
+ [AGG_PCR0] = 0x06c,
+ [AGG_ACR0] = 0x084,
+ [AGG_ACR4] = 0x08c,
+ [AGG_MRCR] = 0x098,
+ [AGG_ATCR1] = 0x0f0,
+ [AGG_ATCR3] = 0x0f4,
+ [LPON_UTTR0] = 0x080,
+ [LPON_UTTR1] = 0x084,
+ [LPON_FRCR] = 0x314,
+ [MIB_SDR3] = 0x014,
+ [MIB_SDR4] = 0x018,
+ [MIB_SDR5] = 0x01c,
+ [MIB_SDR7] = 0x024,
+ [MIB_SDR8] = 0x028,
+ [MIB_SDR9] = 0x02c,
+ [MIB_SDR10] = 0x030,
+ [MIB_SDR11] = 0x034,
+ [MIB_SDR12] = 0x038,
+ [MIB_SDR13] = 0x03c,
+ [MIB_SDR14] = 0x040,
+ [MIB_SDR15] = 0x044,
+ [MIB_SDR16] = 0x048,
+ [MIB_SDR17] = 0x04c,
+ [MIB_SDR18] = 0x050,
+ [MIB_SDR19] = 0x054,
+ [MIB_SDR20] = 0x058,
+ [MIB_SDR21] = 0x05c,
+ [MIB_SDR22] = 0x060,
+ [MIB_SDR23] = 0x064,
+ [MIB_SDR24] = 0x068,
+ [MIB_SDR25] = 0x06c,
+ [MIB_SDR27] = 0x074,
+ [MIB_SDR28] = 0x078,
+ [MIB_SDR29] = 0x07c,
+ [MIB_SDRVEC] = 0x080,
+ [MIB_SDR31] = 0x084,
+ [MIB_SDR32] = 0x088,
+ [MIB_SDRMUBF] = 0x090,
+ [MIB_DR8] = 0x0c0,
+ [MIB_DR9] = 0x0c4,
+ [MIB_DR11] = 0x0cc,
+ [MIB_MB_SDR0] = 0x100,
+ [MIB_MB_SDR1] = 0x104,
+ [TX_AGG_CNT] = 0x0a8,
+ [TX_AGG_CNT2] = 0x164,
+ [MIB_ARNG] = 0x4b8,
+ [WTBLON_TOP_WDUCR] = 0x0,
+ [WTBL_UPDATE] = 0x030,
+ [PLE_FL_Q_EMPTY] = 0x0b0,
+ [PLE_FL_Q_CTRL] = 0x1b0,
+ [PLE_AC_QEMPTY] = 0x500,
+ [PLE_FREEPG_CNT] = 0x100,
+ [PLE_FREEPG_HEAD_TAIL] = 0x104,
+ [PLE_PG_HIF_GROUP] = 0x110,
+ [PLE_HIF_PG_INFO] = 0x114,
+ [AC_OFFSET] = 0x040,
+ [ETBF_PAR_RPT0] = 0x068,
+};
+
+static const u32 mt7916_offs[] = {
+ [TMAC_CDTR] = 0x0c8,
+ [TMAC_ODTR] = 0x0cc,
+ [TMAC_ATCR] = 0x00c,
+ [TMAC_TRCR0] = 0x010,
+ [TMAC_ICR0] = 0x014,
+ [TMAC_ICR1] = 0x018,
+ [TMAC_CTCR0] = 0x114,
+ [TMAC_TFCR0] = 0x0e4,
+ [MDP_BNRCFR0] = 0x090,
+ [MDP_BNRCFR1] = 0x094,
+ [ARB_DRNGR0] = 0x1e0,
+ [ARB_SCR] = 0x000,
+ [RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AWSCR0] = 0x030,
+ [AGG_PCR0] = 0x040,
+ [AGG_ACR0] = 0x054,
+ [AGG_ACR4] = 0x05c,
+ [AGG_MRCR] = 0x068,
+ [AGG_ATCR1] = 0x1a8,
+ [AGG_ATCR3] = 0x080,
+ [LPON_UTTR0] = 0x360,
+ [LPON_UTTR1] = 0x364,
+ [LPON_FRCR] = 0x37c,
+ [MIB_SDR3] = 0x698,
+ [MIB_SDR4] = 0x788,
+ [MIB_SDR5] = 0x780,
+ [MIB_SDR7] = 0x5a8,
+ [MIB_SDR8] = 0x78c,
+ [MIB_SDR9] = 0x024,
+ [MIB_SDR10] = 0x76c,
+ [MIB_SDR11] = 0x790,
+ [MIB_SDR12] = 0x558,
+ [MIB_SDR13] = 0x560,
+ [MIB_SDR14] = 0x564,
+ [MIB_SDR15] = 0x568,
+ [MIB_SDR16] = 0x7fc,
+ [MIB_SDR17] = 0x800,
+ [MIB_SDR18] = 0x030,
+ [MIB_SDR19] = 0x5ac,
+ [MIB_SDR20] = 0x5b0,
+ [MIB_SDR21] = 0x5b4,
+ [MIB_SDR22] = 0x770,
+ [MIB_SDR23] = 0x774,
+ [MIB_SDR24] = 0x778,
+ [MIB_SDR25] = 0x77c,
+ [MIB_SDR27] = 0x080,
+ [MIB_SDR28] = 0x084,
+ [MIB_SDR29] = 0x650,
+ [MIB_SDRVEC] = 0x5a8,
+ [MIB_SDR31] = 0x55c,
+ [MIB_SDR32] = 0x7a8,
+ [MIB_SDRMUBF] = 0x7ac,
+ [MIB_DR8] = 0x56c,
+ [MIB_DR9] = 0x570,
+ [MIB_DR11] = 0x574,
+ [MIB_MB_SDR0] = 0x688,
+ [MIB_MB_SDR1] = 0x690,
+ [TX_AGG_CNT] = 0x7dc,
+ [TX_AGG_CNT2] = 0x7ec,
+ [MIB_ARNG] = 0x0b0,
+ [WTBLON_TOP_WDUCR] = 0x200,
+ [WTBL_UPDATE] = 0x230,
+ [PLE_FL_Q_EMPTY] = 0x360,
+ [PLE_FL_Q_CTRL] = 0x3e0,
+ [PLE_AC_QEMPTY] = 0x600,
+ [PLE_FREEPG_CNT] = 0x380,
+ [PLE_FREEPG_HEAD_TAIL] = 0x384,
+ [PLE_PG_HIF_GROUP] = 0x00c,
+ [PLE_HIF_PG_INFO] = 0x388,
+ [AC_OFFSET] = 0x080,
+ [ETBF_PAR_RPT0] = 0x100,
+};
+
+static const struct mt76_connac_reg_map mt7915_reg_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct mt76_connac_reg_map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct mt76_connac_reg_map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x01000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x01000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x01000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x01000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x01000 }, /* WF_AXIDMA */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_remap;
+
+ if (is_mt7986(&dev->mt76))
+ return MT_CONN_INFRA_OFFSET(addr);
- mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ l1_remap = is_mt7915(&dev->mt76) ?
+ MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
+
+ dev->bus_ops->rmw(&dev->mt76, l1_remap,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
/* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L1);
+ dev->bus_ops->rr(&dev->mt76, l1_remap);
return MT_HIF_REMAP_BASE_L1 + offset;
}
static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base;
- mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L2);
+ if (is_mt7915(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
- return MT_HIF_REMAP_BASE_L2 + offset;
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ } else {
+ u32 ofs = is_mt7986(&dev->mt76) ? 0x400000 : 0;
+
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs,
+ MT_HIF_REMAP_L2_MASK_MT7916,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs);
+
+ offset += (MT_HIF_REMAP_BASE_L2_MT7916 + ofs);
+ }
+
+ return offset;
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
int i;
if (addr < 0x100000)
return addr;
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ if (!dev->reg.map) {
+ dev_err(dev->mt76.dev, "err: reg_map is null\n");
+ return addr;
+ }
+
+ for (i = 0; i < dev->reg.map_size; i++) {
u32 ofs;
- if (addr < fixed_map[i].phys)
+ if (addr < dev->reg.map[i].phys)
continue;
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return dev->reg.map[i].maps + ofs;
}
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000))
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
return mt7915_reg_map_l1(dev, addr);
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ return mt7915_reg_map_l1(dev, addr);
+
+ /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+ addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
+ return mt7915_reg_map_l1(dev, addr);
+ }
+
return mt7915_reg_map_l2(dev, addr);
}
@@ -125,7 +472,9 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
+static int mt7915_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
{
struct mt76_bus_ops *bus_ops;
struct mt7915_dev *dev;
@@ -133,6 +482,29 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ switch (device_id) {
+ case 0x7915:
+ dev->reg.reg_rev = mt7915_reg;
+ dev->reg.offs_rev = mt7915_offs;
+ dev->reg.map = mt7915_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
+ break;
+ case 0x7906:
+ dev->reg.reg_rev = mt7916_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7916_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
+ break;
+ case 0x7986:
+ dev->reg.reg_rev = mt7986_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7986_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7986_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
@@ -144,11 +516,214 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
bus_ops->rmw = mt7915_rmw;
dev->mt76.bus = bus_ops;
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ mdev->rev = (device_id << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ return 0;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 intr, intr1, mask;
+
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ }
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX(MT_RXQ_MAIN))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+
+ if (intr & MT_INT_RX(MT_RXQ_BAND1))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (!is_mt7915(&dev->mt76) &&
+ (intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
+
+ if (intr & MT_INT_RX(MT_RXQ_BAND1_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ }
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7915_ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ ret = mt7915_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
+}
+
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ goto error_pci;
+
+ if (IS_ENABLED(CONFIG_MT7986_WMAC)) {
+ ret = platform_driver_register(&mt7986_wmac_driver);
+ if (ret)
+ goto error_wmac;
+ }
return 0;
+
+error_wmac:
+ pci_unregister_driver(&mt7915_pci_driver);
+error_pci:
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7986_WMAC))
+ platform_driver_unregister(&mt7986_wmac_driver);
+
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 42d887383e8d..1eb11617a625 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -6,13 +6,13 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
-#include "../mt76.h"
+#include "../mt76_connac.h"
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
-#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
-#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7916_WTBL_SIZE 544
+#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
MT7915_MAX_INTERFACES)
@@ -30,10 +30,28 @@
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
+#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
+#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
+
+#define MT7986_FIRMWARE_WA "mediatek/mt7986_wa.bin"
+#define MT7986_FIRMWARE_WM "mediatek/mt7986_wm.bin"
+#define MT7986_FIRMWARE_WM_MT7975 "mediatek/mt7986_wm_mt7975.bin"
+#define MT7986_ROM_PATCH "mediatek/mt7986_rom_patch.bin"
+#define MT7986_ROM_PATCH_MT7975 "mediatek/mt7986_rom_patch_mt7975.bin"
+
#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
+#define MT7986_EEPROM_MT7975_DEFAULT "mediatek/mt7986_eeprom_mt7975.bin"
+#define MT7986_EEPROM_MT7975_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7975_dual.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT "mediatek/mt7986_eeprom_mt7976.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7986_eeprom_mt7976_dbdc.bin"
+#define MT7986_EEPROM_MT7976_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7976_dual.bin"
#define MT7915_EEPROM_SIZE 3584
+#define MT7916_EEPROM_SIZE 4096
+
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
@@ -41,11 +59,14 @@
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7915_THERMAL_THROTTLE_MAX 100
+#define MT7915_CDEV_THROTTLE_MAX 99
#define MT7915_SKU_RATE_NUM 161
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MIN_TWT_DUR 64
+#define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
@@ -68,9 +89,13 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
-struct mt7915_sta_key_conf {
- s8 keyidx;
- u8 key[16];
+enum mt7916_rxq_id {
+ MT7916_RXQ_MCU_WM = 0,
+ MT7916_RXQ_MCU_WA,
+ MT7916_RXQ_MCU_WA_MAIN,
+ MT7916_RXQ_MCU_WA_EXT,
+ MT7916_RXQ_BAND0,
+ MT7916_RXQ_BAND1,
};
struct mt7915_twt_flow {
@@ -102,9 +127,7 @@ struct mt7915_sta {
unsigned long jiffies;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
-
- struct mt7915_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
struct {
u8 flowid_mask;
@@ -113,7 +136,9 @@ struct mt7915_sta {
};
struct mt7915_vif_cap {
- bool ldpc:1;
+ bool ht_ldpc:1;
+ bool vht_ldpc:1;
+ bool he_ldpc:1;
bool vht_su_ebfer:1;
bool vht_su_ebfee:1;
bool vht_mu_ebfer:1;
@@ -172,8 +197,15 @@ struct mib_stats {
/* rx stats */
u32 rx_fifo_full_cnt;
u32 channel_idle_cnt;
+ u32 primary_cca_busy_time;
+ u32 secondary_cca_busy_time;
+ u32 primary_energy_detect_time;
+ u32 cck_mdrdy_time;
+ u32 ofdm_mdrdy_time;
+ u32 green_mdrdy_time;
u32 rx_vector_mismatch_cnt;
u32 rx_delimiter_fail_cnt;
+ u32 rx_mrdy_cnt;
u32 rx_len_mismatch_cnt;
u32 rx_mpdu_cnt;
u32 rx_ampdu_cnt;
@@ -200,16 +232,18 @@ struct mt7915_phy {
struct mt76_phy *mt76;
struct mt7915_dev *dev;
- struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
struct ieee80211_vif *monitor_vif;
struct thermal_cooling_device *cdev;
+ u8 cdev_state;
u8 throttle_state;
u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
u32 rxfilter;
u64 omac_mask;
+ u8 band_idx;
u16 noise;
@@ -217,7 +251,8 @@ struct mt7915_phy {
u8 slottime;
u8 rdd_state;
- int dfs_state;
+
+ u32 trb_ts;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -247,12 +282,21 @@ struct mt7915_dev {
};
struct mt7915_hif *hif2;
+ struct mt7915_reg_desc reg;
+ u8 q_id[MT7915_MAX_QUEUE];
+ u32 q_int_mask[MT7915_MAX_QUEUE];
+ u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7915_phy *rdd2_phy;
+
u16 chainmask;
+ u16 chainshift;
u32 hif_idx;
struct work_struct init_work;
@@ -272,34 +316,39 @@ struct mt7915_dev {
bool flash_mode;
bool muru_debug;
bool ibf;
- u8 fw_debug_wm;
- u8 fw_debug_wa;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
void *cal;
struct {
- u8 table_mask;
+ u8 debug_wm;
+ u8 debug_wa;
+ u8 debug_bin;
+ } fw;
+
+ struct {
+ u16 table_mask;
u8 n_agrt;
} twt;
+
+ struct reset_control *rstc;
+ void __iomem *dcm;
+ void __iomem *sku;
};
enum {
- MT_CTX0,
- MT_HIF0 = 0x0,
-
- MT_LMAC_AC00 = 0x0,
- MT_LMAC_AC01,
- MT_LMAC_AC02,
- MT_LMAC_AC03,
- MT_LMAC_ALTX0 = 0x10,
- MT_LMAC_BMC0,
- MT_LMAC_BCN0,
- MT_LMAC_PSMP0,
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
};
enum {
MT_RX_SEL0,
MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
};
enum mt7915_rdd_cmd {
@@ -337,7 +386,7 @@ mt7915_hw_dev(struct ieee80211_hw *hw)
static inline struct mt7915_phy *
mt7915_ext_phy(struct mt7915_dev *dev)
{
- struct mt76_phy *phy = dev->mt76.phy2;
+ struct mt76_phy *phy = dev->mt76.phys[MT_BAND1];
if (!phy)
return NULL;
@@ -345,26 +394,52 @@ mt7915_ext_phy(struct mt7915_dev *dev)
return phy->priv;
}
-static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
+static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku)
{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
+ u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK;
+
+ if (!is_mt7986(&dev->mt76))
+ return 0;
+
+ return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask;
}
extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
+extern struct pci_driver mt7915_pci_driver;
+extern struct pci_driver mt7915_hif_driver;
+extern struct platform_driver mt7986_wmac_driver;
+
+#ifdef CONFIG_MT7986_WMAC
+int mt7986_wmac_enable(struct mt7915_dev *dev);
+void mt7986_wmac_disable(struct mt7915_dev *dev);
+#else
+static inline int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ return 0;
+}
-u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+}
+#endif
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+void mt7915_wfsys_reset(struct mt7915_dev *dev);
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
-int mt7915_dma_init(struct mt7915_dev *dev);
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_mcu_init(struct mt7915_dev *dev);
@@ -378,22 +453,16 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int enable);
+ int enable, u32 changed);
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -415,10 +484,7 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
-int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
@@ -436,17 +502,23 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
- u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef);
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
-static inline bool is_mt7915(struct mt76_dev *dev)
+static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
+}
+
+static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
{
- return mt76_chip(dev) == 0x7915;
+ return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
@@ -475,9 +547,10 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
-void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, bool beacon);
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed);
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -487,7 +560,6 @@ void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
struct mt7915_sta *msta,
u8 flowid);
@@ -498,9 +570,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
@@ -514,6 +584,8 @@ void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 8130ea43971f..728a879c3b00 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -12,41 +12,26 @@
#include "mac.h"
#include "../trace.h"
+static bool wed_enable = false;
+module_param(wed_enable, bool, 0644);
+
static LIST_HEAD(hif_list);
static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
static const struct pci_device_id mt7915_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
{ },
};
static const struct pci_device_id mt7915_hif_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
{ },
};
-void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
- u32 clear, u32 set)
-{
- struct mt76_dev *mdev = &dev->mt76;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
-
- mdev->mmio.irqmask &= ~clear;
- mdev->mmio.irqmask |= set;
-
- if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
- }
-
- spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
-}
-
-static struct mt7915_hif *
-mt7915_pci_get_hif2(struct mt7915_dev *dev)
+static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
{
struct mt7915_hif *hif;
u32 val;
@@ -56,7 +41,7 @@ mt7915_pci_get_hif2(struct mt7915_dev *dev)
list_for_each_entry(hif, &hif_list, list) {
val = readl(hif->regs + MT_PCIE_RECOG_ID);
val &= MT_PCIE_RECOG_ID_MASK;
- if (val != dev->hif_idx)
+ if (val != idx)
continue;
get_device(hif->dev);
@@ -78,167 +63,133 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
put_device(hif->dev);
}
-static void
-mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- static const u32 rx_irq_mask[] = {
- [MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0,
- [MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
- [MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
- [MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
- [MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
- };
-
- mt7915_irq_enable(dev, rx_irq_mask[q]);
-}
+ hif_idx++;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+ return NULL;
-/* TODO: support 2/4/6/8 MSI-X vectors */
-static void mt7915_irq_tasklet(struct tasklet_struct *t)
-{
- struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
- u32 intr, intr1, mask;
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
- intr1 &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
-
- intr |= intr1;
- }
+ return mt7915_pci_get_hif2(hif_idx);
+}
- trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
+{
+ struct mt7915_hif *hif;
- mask = intr & MT_INT_RX_DONE_ALL;
- if (intr & MT_INT_TX_DONE_MCU)
- mask |= MT_INT_TX_DONE_MCU;
+ hif = devm_kzalloc(&pdev->dev, sizeof(*hif), GFP_KERNEL);
+ if (!hif)
+ return -ENOMEM;
- mt7915_irq_disable(dev, mask);
+ hif->dev = &pdev->dev;
+ hif->regs = pcim_iomap_table(pdev)[0];
+ hif->irq = pdev->irq;
+ spin_lock_bh(&hif_lock);
+ list_add(&hif->list, &hif_list);
+ spin_unlock_bh(&hif_lock);
+ pci_set_drvdata(pdev, hif);
- if (intr & MT_INT_TX_DONE_MCU)
- napi_schedule(&dev->mt76.tx_napi);
+ return 0;
+}
- if (intr & MT_INT_RX_DONE_DATA0)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
+ int ret;
- if (intr & MT_INT_RX_DONE_DATA1)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- if (intr & MT_INT_RX_DONE_WM)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->mt76.token_lock);
- if (intr & MT_INT_RX_DONE_WA)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+ ret = wait_event_timeout(dev->mt76.tx_wait,
+ !dev->mt76.wed_token_count, HZ);
+ if (!ret)
+ return -EAGAIN;
- if (intr & MT_INT_RX_DONE_WA_EXT)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+ phy = &dev->phy;
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
- if (intr & MT_INT_MCU_CMD) {
- u32 val = mt76_rr(dev, MT_MCU_CMD);
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
- mt76_wr(dev, MT_MCU_CMD, val);
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
- wake_up(&dev->reset_wait);
- }
- }
+ return 0;
}
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
- struct mt7915_dev *dev = dev_instance;
+ struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = MT7915_TOKEN_SIZE;
+ spin_unlock_bh(&dev->mt76.token_lock);
- tasklet_schedule(&dev->irq_tasklet);
+ /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+ * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+ */
+ phy = &dev->phy;
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
- return IRQ_HANDLED;
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
}
+#endif
-static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
+static int
+mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
{
- struct mt7915_hif *hif;
-
- dev->hif_idx = ++hif_idx;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
- return;
-
- mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
-
- hif = mt7915_pci_get_hif2(dev);
- if (!hif)
- return;
-
- dev->hif2 = hif;
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ int ret;
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ if (!wed_enable)
+ return 0;
- if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
- mt7915_put_hif2(hif);
- hif = NULL;
- }
+ wed->wlan.pci_dev = pdev;
+ wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
+ MT_WFDMA_EXT_CSR_BASE;
+ wed->wlan.nbuf = 4096;
+ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.init_buf = mt7915_wed_init_buf;
+ wed->wlan.offload_enable = mt7915_wed_offload_enable;
+ wed->wlan.offload_disable = mt7915_wed_offload_disable;
- /* master switch of PCIe tnterrupt enable */
- mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
-}
+ if (mtk_wed_device_attach(wed) != 0)
+ return 0;
-static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
-{
- struct mt7915_hif *hif;
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
- hif = devm_kzalloc(&pdev->dev, sizeof(*hif), GFP_KERNEL);
- if (!hif)
- return -ENOMEM;
-
- hif->dev = &pdev->dev;
- hif->regs = pcim_iomap_table(pdev)[0];
- hif->irq = pdev->irq;
- spin_lock_bh(&hif_lock);
- list_add(&hif->list, &hif_list);
- spin_unlock_bh(&hif_lock);
- pci_set_drvdata(pdev, hif);
+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ return 1;
+#else
return 0;
+#endif
}
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .token_size = MT7915_TOKEN_SIZE,
- .tx_prepare_skb = mt7915_tx_prepare_skb,
- .tx_complete_skb = mt7915_tx_complete_skb,
- .rx_skb = mt7915_queue_rx_skb,
- .rx_check = mt7915_rx_check,
- .rx_poll_complete = mt7915_rx_poll_complete,
- .sta_ps = mt7915_sta_ps,
- .sta_add = mt7915_mac_sta_add,
- .sta_remove = mt7915_mac_sta_remove,
- .update_survey = mt7915_update_channel,
- };
+ struct mt7915_hif *hif2 = NULL;
struct mt7915_dev *dev;
struct mt76_dev *mdev;
+ int irq;
int ret;
ret = pcim_enable_device(pdev);
@@ -257,48 +208,76 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7916)
+ if (id->device == 0x7916 || id->device == 0x790a)
return mt7915_pci_hif2_probe(pdev);
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
+ dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
- dev = container_of(mdev, struct mt7915_dev, mt76);
+ mdev = &dev->mt76;
+ mt7915_wfsys_reset(dev);
+ hif2 = mt7915_pci_init_hif2(pdev);
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ ret = mt7915_pci_wed_init(dev, pdev, &irq);
if (ret < 0)
- goto free;
+ goto free_wed_or_irq_vector;
- ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
- if (ret)
- goto error;
+ if (!ret) {
+ hif2 = mt7915_pci_init_hif2(pdev);
- tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_device;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ irq = pdev->irq;
+ }
+
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_wed_or_irq_vector;
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
+ if (hif2) {
+ dev->hif2 = hif2;
- mt7915_pci_init_hif2(dev);
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7915_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2;
+ }
ret = mt7915_register_device(dev);
if (ret)
- goto free_irq;
+ goto free_hif2_irq;
return 0;
-free_irq:
- devm_free_irq(mdev->dev, pdev->irq, dev);
-error:
- pci_free_irq_vectors(pdev);
-free:
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_wed_or_irq_vector:
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_detach(&mdev->mmio.wed);
+ else
+ pci_free_irq_vectors(pdev);
+free_device:
mt76_free_device(&dev->mt76);
return ret;
@@ -322,47 +301,25 @@ static void mt7915_pci_remove(struct pci_dev *pdev)
mt7915_unregister_device(dev);
}
-static struct pci_driver mt7915_hif_driver = {
+struct pci_driver mt7915_hif_driver = {
.name = KBUILD_MODNAME "_hif",
.id_table = mt7915_hif_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_hif_remove,
};
-static struct pci_driver mt7915_pci_driver = {
+struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
-static int __init mt7915_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&mt7915_hif_driver);
- if (ret)
- return ret;
-
- ret = pci_register_driver(&mt7915_pci_driver);
- if (ret)
- pci_unregister_driver(&mt7915_hif_driver);
-
- return ret;
-}
-
-static void __exit mt7915_exit(void)
-{
- pci_unregister_driver(&mt7915_pci_driver);
- pci_unregister_driver(&mt7915_hif_driver);
-}
-
-module_init(mt7915_init);
-module_exit(mt7915_exit);
-
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7916_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 59693535b098..5920e705835a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,41 +4,149 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
+/* used to differentiate between generations */
+struct mt7915_reg_desc {
+ const u32 *reg_rev;
+ const u32 *offs_rev;
+ const struct mt76_connac_reg_map *map;
+ u32 map_size;
+};
+
+enum reg_rev {
+ INT_SOURCE_CSR,
+ INT_MASK_CSR,
+ INT1_SOURCE_CSR,
+ INT1_MASK_CSR,
+ INT_MCU_CMD_SOURCE,
+ INT_MCU_CMD_EVENT,
+ WFDMA0_ADDR,
+ WFDMA0_PCIE1_ADDR,
+ WFDMA_EXT_CSR_ADDR,
+ CBTOP1_PHY_END,
+ INFRA_MCU_ADDR_END,
+ FW_EXCEPTION_ADDR,
+ SWDEF_BASE_ADDR,
+ __MT_REG_MAX,
+};
+
+enum offs_rev {
+ TMAC_CDTR,
+ TMAC_ODTR,
+ TMAC_ATCR,
+ TMAC_TRCR0,
+ TMAC_ICR0,
+ TMAC_ICR1,
+ TMAC_CTCR0,
+ TMAC_TFCR0,
+ MDP_BNRCFR0,
+ MDP_BNRCFR1,
+ ARB_DRNGR0,
+ ARB_SCR,
+ RMAC_MIB_AIRTIME14,
+ AGG_AWSCR0,
+ AGG_PCR0,
+ AGG_ACR0,
+ AGG_ACR4,
+ AGG_MRCR,
+ AGG_ATCR1,
+ AGG_ATCR3,
+ LPON_UTTR0,
+ LPON_UTTR1,
+ LPON_FRCR,
+ MIB_SDR3,
+ MIB_SDR4,
+ MIB_SDR5,
+ MIB_SDR7,
+ MIB_SDR8,
+ MIB_SDR9,
+ MIB_SDR10,
+ MIB_SDR11,
+ MIB_SDR12,
+ MIB_SDR13,
+ MIB_SDR14,
+ MIB_SDR15,
+ MIB_SDR16,
+ MIB_SDR17,
+ MIB_SDR18,
+ MIB_SDR19,
+ MIB_SDR20,
+ MIB_SDR21,
+ MIB_SDR22,
+ MIB_SDR23,
+ MIB_SDR24,
+ MIB_SDR25,
+ MIB_SDR27,
+ MIB_SDR28,
+ MIB_SDR29,
+ MIB_SDRVEC,
+ MIB_SDR31,
+ MIB_SDR32,
+ MIB_SDRMUBF,
+ MIB_DR8,
+ MIB_DR9,
+ MIB_DR11,
+ MIB_MB_SDR0,
+ MIB_MB_SDR1,
+ TX_AGG_CNT,
+ TX_AGG_CNT2,
+ MIB_ARNG,
+ WTBLON_TOP_WDUCR,
+ WTBL_UPDATE,
+ PLE_FL_Q_EMPTY,
+ PLE_FL_Q_CTRL,
+ PLE_AC_QEMPTY,
+ PLE_FREEPG_CNT,
+ PLE_FREEPG_HEAD_TAIL,
+ PLE_PG_HIF_GROUP,
+ PLE_HIF_PG_INFO,
+ AC_OFFSET,
+ ETBF_PAR_RPT0,
+ __MT_OFFS_MAX,
+};
+
+#define __REG(id) (dev->reg.reg_rev[(id)])
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
/* MCU WFDMA0 */
#define MT_MCU_WFDMA0_BASE 0x2000
#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+
#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
-#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
-#define MT_PLE_BASE 0x8000
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_FL_Q_EMPTY 0x0b0
-#define MT_FL_Q0_CTRL 0x1b0
-#define MT_FL_Q2_CTRL 0x1b8
-#define MT_FL_Q3_CTRL 0x1bc
-
-#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
-#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
-#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
-#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
- ((n) << 2))
+#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
+#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
+#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
+#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
+#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
+#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
+ __OFFS(AC_OFFSET) * \
+ (ac) + ((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
-#define MT_PSE_BASE 0xc000
+#define MT_PSE_BASE 0x820c8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
-#define MT_MDP_BASE 0xf000
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
@@ -47,73 +155,87 @@
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
-#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_DCR2 MT_MDP(0x0e8)
+#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2)
+
+#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
+ ((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
-#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
+ ((_band) << 8))
#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-/* TMAC: band 0(0x21000), band 1(0xa1000) */
-#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+/* TRB: band 0(0x820e1000), band 1(0x820f1000) */
+#define MT_WF_TRB_BASE(_band) ((_band) ? 0x820f1000 : 0x820e1000)
+#define MT_WF_TRB(_band, ofs) (MT_WF_TRB_BASE(_band) + (ofs))
+
+#define MT_TRB_RXPSR0(_band) MT_WF_TRB(_band, 0x03c)
+#define MT_TRB_RXPSR0_RX_WTBL_PTR GENMASK(25, 16)
+#define MT_TRB_RXPSR0_RX_RMAC_PTR GENMASK(9, 0)
+
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
-#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
-#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
+ #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
-#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
-#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
-#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
-#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
-#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
#define MT_IFS_EIFS_CCK GENMASK(8, 0)
-#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
-#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
-/* ETBF: band 0(0x24000), band 1(0xa4000) */
-#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
-#define MT_ETBF_RX_FB_CONT(_band) MT_WF_ETBF(_band, 0x068)
-#define MT_ETBF_RX_FB_BW GENMASK(7, 6)
-#define MT_ETBF_RX_FB_NC GENMASK(5, 3)
-#define MT_ETBF_RX_FB_NR GENMASK(2, 0)
+#define MT_ETBF_PAR_RPT0(_band) MT_WF_ETBF(_band, __OFFS(ETBF_PAR_RPT0))
+#define MT_ETBF_PAR_RPT0_FB_BW GENMASK(7, 6)
+#define MT_ETBF_PAR_RPT0_FB_NC GENMASK(5, 3)
+#define MT_ETBF_PAR_RPT0_FB_NR GENMASK(2, 0)
#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
@@ -125,174 +247,209 @@
#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
-/* LPON: band 0(0x24200), band 1(0xa4200) */
-#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
-#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
-#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
-#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 1))
+#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 4))
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
#define MT_LPON_TCR_SW_ADJUST BIT(1)
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
-/* MIB: band 0(0x24800), band 1(0xa4800) */
+/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
/* These counters are (mostly?) clear-on-read. So, some should not
* be read at all in case firmware is already reading them. These
* are commented with 'DNR' below. The DNR stats will be read by querying
* the firmware API for the appropriate message. For counters the driver
* does read, the driver should accumulate the counters.
*/
-#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
-#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
/* rx mpdu counter, full 32 bits */
-#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
/* aka CCA_NAV_TX_TIME */
-#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
-#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
-#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
/* tx ampdu cnt, full 32 bits */
-#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
-#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
/* counts all mpdus in ampdu, regardless of success */
-#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
/* counts all successfully tx'd mpdus in ampdu */
-#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
-#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
+#define MT_MIB_SDR17(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
-#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
+#define MT_MIB_SDR19(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
+#define MT_MIB_SDR20(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
-#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR21(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
+#define MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
-#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
/* rx ampdu bytes count, 32-bit */
-#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
/* rx ampdu valid subframe count */
-#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
/* rx ampdu valid subframe bytes count, 32bits */
-#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
/* remaining windows protected stats */
-#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
-#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
-#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
/* rx blockack count, 32 bits */
-#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
-#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
-#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT GENMASK(31, 16)
-#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
-#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT GENMASK(15, 0)
-#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
/* 36, 37 both DNR */
-#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
-#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
-#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
-#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
-#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
-#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
+#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
+ ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
+ ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
+ ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
-#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_MIB_BFCR0(_band) MT_WF_MIB(_band, 0x7b0)
+#define MT_MIB_BFCR0_RX_FB_HT GENMASK(15, 0)
+#define MT_MIB_BFCR0_RX_FB_VHT GENMASK(31, 16)
+
+#define MT_MIB_BFCR1(_band) MT_WF_MIB(_band, 0x7b4)
+#define MT_MIB_BFCR1_RX_FB_HE GENMASK(15, 0)
+
+#define MT_MIB_BFCR2(_band) MT_WF_MIB(_band, 0x7b8)
+#define MT_MIB_BFCR2_BFEE_TX_FB_TRIG GENMASK(15, 0)
+
+#define MT_MIB_BFCR7(_band) MT_WF_MIB(_band, 0x7cc)
+#define MT_MIB_BFCR7_BFEE_TX_FB_CPL GENMASK(15, 0)
+
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_BASE 0x38000
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
- FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
- FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x20800), band 1(0xa0800) */
-#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
-#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
+ (_n) * 4))
#define MT_AGG_PCR0_MM_PROT BIT(0)
#define MT_AGG_PCR0_GF_PROT BIT(1)
#define MT_AGG_PCR0_BW20_PROT BIT(2)
@@ -305,31 +462,35 @@
#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
-#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
-#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
-#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
-#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR4))
+#define MT_AGG_ACR_PPDU_TXS2H BIT(1)
+
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
-#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
-#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
-/* ARB: band 0(0x20c00), band 1(0xa0c00) */
-#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
-#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
-#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
+ (_n) * 4))
-/* RMAC: band 0(0x21400), band 1(0xa1400) */
-#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
@@ -366,7 +527,7 @@
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
/* WFDMA0 */
-#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
#define MT_WFDMA0_RST MT_WFDMA0(0x100)
@@ -381,15 +542,14 @@
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
-
-#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
-
-#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
-#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
-#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@@ -404,129 +564,400 @@
#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_MCU_CMD MT_WFDMA1(0x1f0)
-#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
-#define MT_MCU_CMD_STOP_DMA BIT(2)
-#define MT_MCU_CMD_RESET_DONE BIT(3)
-#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
-#define MT_MCU_CMD_NORMAL_STATE BIT(5)
-#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-
#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
-#define MT_TX_RING_BASE MT_WFDMA1(0x300)
-#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
-
-#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
-#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
-#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
-#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
-#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
-#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
-#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
-#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
-
-#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
-#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
-#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
-#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
-#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
-#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
-#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
-#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
-
-#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
-#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
-#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
-#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
-
/* WFDMA CSR */
-#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
+#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
-#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
-#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
-#define MT_INT_RX_DONE_DATA0 BIT(16)
-#define MT_INT_RX_DONE_DATA1 BIT(17)
-#define MT_INT_RX_DONE_WM BIT(0)
-#define MT_INT_RX_DONE_WA BIT(1)
-#define MT_INT_RX_DONE_WA_EXT BIT(2)
-#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
-#define MT_INT_TX_DONE_MCU_WA BIT(15)
-#define MT_INT_TX_DONE_FWDL BIT(26)
-#define MT_INT_TX_DONE_MCU_WM BIT(27)
-#define MT_INT_TX_DONE_BAND0 BIT(30)
-#define MT_INT_TX_DONE_BAND1 BIT(31)
-
-#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
- MT_INT_TX_DONE_BAND1)
-
-#define MT_INT_MCU_CMD BIT(29)
-
-#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
- MT_INT_TX_DONE_MCU_WM | \
- MT_INT_TX_DONE_FWDL)
-
-#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
-#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
-#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
+#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
+#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
+#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
-#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
-#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
-#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
+#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
+#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
+
+#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
+#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
+
/* WFDMA0 PCIE1 */
-#define MT_WFDMA0_PCIE1_BASE 0xd8000
-#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
-#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
/* WFDMA1 PCIE1 */
-#define MT_WFDMA1_PCIE1_BASE 0xd9000
-#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
-#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_TOP_RGU_BASE 0xf0000
-#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
-#define MT_TOP_PWR_KEY (0x5746 << 16)
-#define MT_TOP_PWR_SW_RST BIT(0)
-#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
-#define MT_TOP_PWR_HW_CTRL BIT(4)
-#define MT_TOP_PWR_PWR_ON BIT(7)
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + MT_RXQ_BAND2)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q)* 0x4)
+#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q)* 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
+#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
-#define MT_INFRA_CFG_BASE 0xf1000
-#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+#define MT_INT_RX_DONE_BAND0 BIT(16)
+#define MT_INT_RX_DONE_BAND1 BIT(17)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(1)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
+#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
+#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
+ MT_INT_RX(MT_RXQ_BAND1_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE)
+
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
-#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
+
+#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+/* TOP RGU */
+#define MT_TOP_RGU_BASE 0x18000000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
+#define MT_TOP_RGU_SYSRAM_PDN (MT_TOP_RGU_BASE + 0x050)
+#define MT_TOP_RGU_SYSRAM_SLP (MT_TOP_RGU_BASE + 0x054)
+#define MT_TOP_WFSYS_PWR (MT_TOP_RGU_BASE + 0x010)
+#define MT_TOP_PWR_EN_MASK BIT(7)
+#define MT_TOP_PWR_ACK_MASK BIT(6)
+#define MT_TOP_PWR_KEY_MASK GENMASK(31, 16)
+
+#define MT7986_TOP_WM_RESET (MT_TOP_RGU_BASE + 0x120)
+#define MT7986_TOP_WM_RESET_MASK BIT(0)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0xf11ac
+#define MT_HIF_REMAP_L1_MT7916 0xfe260
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L1 0xe0000
-#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2 0xf11b0
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_HIF_REMAP_L2_MT7916 0x1b8
+#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_CBTOP2_PHY_END 0xffffffff
+#define MT_INFRA_MCU_START 0x7c000000
+#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
+#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
+
+/* CONN INFRA CFG */
+#define MT_CONN_INFRA_BASE 0x18001000
+#define MT_CONN_INFRA(ofs) (MT_CONN_INFRA_BASE + (ofs))
+
+#define MT_CONN_INFRA_EFUSE MT_CONN_INFRA(0x020)
+
+#define MT_CONN_INFRA_ADIE_RESET MT_CONN_INFRA(0x030)
+#define MT_CONN_INFRA_ADIE1_RESET_MASK BIT(0)
+#define MT_CONN_INFRA_ADIE2_RESET_MASK BIT(2)
+
+#define MT_CONN_INFRA_OSC_RC_EN MT_CONN_INFRA(0x380)
+
+#define MT_CONN_INFRA_OSC_CTRL MT_CONN_INFRA(0x300)
+#define MT_CONN_INFRA_OSC_RC_EN_MASK BIT(7)
+#define MT_CONN_INFRA_OSC_STB_TIME_MASK GENMASK(23, 0)
+
+#define MT_CONN_INFRA_HW_CTRL MT_CONN_INFRA(0x200)
+#define MT_CONN_INFRA_HW_CTRL_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT MT_CONN_INFRA(0x540)
+#define MT_CONN_INFRA_WF_SLP_PROT_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT_RDY MT_CONN_INFRA(0x544)
+#define MT_CONN_INFRA_CONN_WF_MASK (BIT(29) | BIT(31))
+#define MT_CONN_INFRA_CONN (BIT(25) | BIT(29) | BIT(31))
+
+#define MT_CONN_INFRA_EMI_REQ MT_CONN_INFRA(0x414)
+#define MT_CONN_INFRA_EMI_REQ_MASK BIT(0)
+#define MT_CONN_INFRA_INFRA_REQ_MASK BIT(5)
+
+/* AFE */
+#define MT_AFE_CTRL_BASE(_band) (0x18003000 + ((_band) << 19))
+#define MT_AFE_CTRL(_band, ofs) (MT_AFE_CTRL_BASE(_band) + (ofs))
+
+#define MT_AFE_DIG_EN_01(_band) MT_AFE_CTRL(_band, 0x00)
+#define MT_AFE_DIG_EN_02(_band) MT_AFE_CTRL(_band, 0x04)
+#define MT_AFE_DIG_EN_03(_band) MT_AFE_CTRL(_band, 0x08)
+#define MT_AFE_DIG_TOP_01(_band) MT_AFE_CTRL(_band, 0x0c)
+
+#define MT_AFE_PLL_STB_TIME(_band) MT_AFE_CTRL(_band, 0xf4)
+#define MT_AFE_PLL_STB_TIME_MASK (GENMASK(30, 16) | GENMASK(14, 0))
+#define MT_AFE_PLL_STB_TIME_VAL (FIELD_PREP(GENMASK(30, 16), 0x4bc) | \
+ FIELD_PREP(GENMASK(14, 0), 0x7e4))
+#define MT_AFE_BPLL_CFG_MASK GENMASK(7, 6)
+#define MT_AFE_WPLL_CFG_MASK GENMASK(1, 0)
+#define MT_AFE_MCU_WPLL_CFG_MASK GENMASK(3, 2)
+#define MT_AFE_MCU_BPLL_CFG_MASK GENMASK(17, 16)
+#define MT_AFE_PLL_CFG_MASK (MT_AFE_BPLL_CFG_MASK | \
+ MT_AFE_WPLL_CFG_MASK | \
+ MT_AFE_MCU_WPLL_CFG_MASK | \
+ MT_AFE_MCU_BPLL_CFG_MASK)
+#define MT_AFE_PLL_CFG_VAL (FIELD_PREP(MT_AFE_BPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_WPLL_CFG_MASK, 0x2) | \
+ FIELD_PREP(MT_AFE_MCU_WPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_MCU_BPLL_CFG_MASK, 0x2))
+
+#define MT_AFE_DIG_TOP_01_MASK GENMASK(18, 15)
+#define MT_AFE_DIG_TOP_01_VAL FIELD_PREP(MT_AFE_DIG_TOP_01_MASK, 0x9)
+
+#define MT_AFE_RG_WBG_EN_RCK_MASK BIT(0)
+#define MT_AFE_RG_WBG_EN_BPLL_UP_MASK BIT(21)
+#define MT_AFE_RG_WBG_EN_WPLL_UP_MASK BIT(20)
+#define MT_AFE_RG_WBG_EN_PLL_UP_MASK (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK)
+#define MT_AFE_RG_WBG_EN_TXCAL_MASK GENMASK(21, 17)
+
+#define MT_ADIE_SLP_CTRL_BASE(_band) (0x18005000 + ((_band) << 19))
+#define MT_ADIE_SLP_CTRL(_band, ofs) (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs))
+
+#define MT_ADIE_SLP_CTRL_CK0(_band) MT_ADIE_SLP_CTRL(_band, 0x120)
+
+/* ADIE */
+#define MT_ADIE_CHIP_ID 0x02c
+#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+#define MT_ADIE_IDX0 GENMASK(15, 0)
+#define MT_ADIE_IDX1 GENMASK(31, 16)
+
+#define MT_ADIE_RG_TOP_THADC_BG 0x034
+#define MT_ADIE_VRPI_SEL_CR_MASK GENMASK(15, 12)
+#define MT_ADIE_VRPI_SEL_EFUSE_MASK GENMASK(6, 3)
+
+#define MT_ADIE_RG_TOP_THADC 0x038
+#define MT_ADIE_PGA_GAIN_MASK GENMASK(25, 23)
+#define MT_ADIE_PGA_GAIN_EFUSE_MASK GENMASK(2, 0)
+#define MT_ADIE_LDO_CTRL_MASK GENMASK(27, 26)
+#define MT_ADIE_LDO_CTRL_EFUSE_MASK GENMASK(6, 5)
+
+#define MT_AFE_RG_ENCAL_WBTAC_IF_SW 0x070
+#define MT_ADIE_EFUSE_RDATA0 0x130
+
+#define MT_ADIE_EFUSE2_CTRL 0x148
+#define MT_ADIE_EFUSE_CTRL_MASK BIT(1)
+
+#define MT_ADIE_EFUSE_CFG 0x144
+#define MT_ADIE_EFUSE_MODE_MASK GENMASK(7, 6)
+#define MT_ADIE_EFUSE_ADDR_MASK GENMASK(25, 16)
+#define MT_ADIE_EFUSE_VALID_MASK BIT(29)
+#define MT_ADIE_EFUSE_KICK_MASK BIT(30)
+
+#define MT_ADIE_THADC_ANALOG 0x3a6
+
+#define MT_ADIE_THADC_SLOP 0x3a7
+#define MT_ADIE_ANA_EN_MASK BIT(7)
+
+#define MT_ADIE_7975_XTAL_CAL 0x3a1
+#define MT_ADIE_TRIM_MASK GENMASK(6, 0)
+#define MT_ADIE_EFUSE_TRIM_MASK GENMASK(5, 0)
+#define MT_ADIE_XO_TRIM_EN_MASK BIT(7)
+#define MT_ADIE_XTAL_DECREASE_MASK BIT(6)
+
+#define MT_ADIE_7975_XO_TRIM2 0x3a2
+#define MT_ADIE_7975_XO_TRIM3 0x3a3
+#define MT_ADIE_7975_XO_TRIM4 0x3a4
+#define MT_ADIE_7975_XTAL_EN 0x3a5
+
+#define MT_ADIE_XO_TRIM_FLOW 0x3ac
+#define MT_ADIE_XTAL_AXM_80M_OSC 0x390
+#define MT_ADIE_XTAL_AXM_40M_OSC 0x391
+#define MT_ADIE_XTAL_TRIM1_80M_OSC 0x398
+#define MT_ADIE_XTAL_TRIM1_40M_OSC 0x399
+#define MT_ADIE_WRI_CK_SEL 0x4ac
+#define MT_ADIE_RG_STRAP_PIN_IN 0x4fc
+#define MT_ADIE_XTAL_C1 0x654
+#define MT_ADIE_XTAL_C2 0x658
+#define MT_ADIE_RG_XO_01 0x65c
+#define MT_ADIE_RG_XO_03 0x664
+
+#define MT_ADIE_CLK_EN 0xa00
+
+#define MT_ADIE_7975_XTAL 0xa18
+#define MT_ADIE_7975_XTAL_EN_MASK BIT(29)
+
+#define MT_ADIE_7975_COCLK 0xa1c
+#define MT_ADIE_7975_XO_2 0xa84
+#define MT_ADIE_7975_XO_2_FIX_EN BIT(31)
+
+#define MT_ADIE_7975_XO_CTRL2 0xa94
+#define MT_ADIE_7975_XO_CTRL2_C1_MASK GENMASK(26, 20)
+#define MT_ADIE_7975_XO_CTRL2_C2_MASK GENMASK(18, 12)
+#define MT_ADIE_7975_XO_CTRL2_MASK (MT_ADIE_7975_XO_CTRL2_C1_MASK | \
+ MT_ADIE_7975_XO_CTRL2_C2_MASK)
+
+#define MT_ADIE_7975_XO_CTRL6 0xaa4
+#define MT_ADIE_7975_XO_CTRL6_MASK BIT(16)
+
+/* TOP SPI */
+#define MT_TOP_SPI_ADIE_BASE(_band) (0x18004000 + ((_band) << 19))
+#define MT_TOP_SPI_ADIE(_band, ofs) (MT_TOP_SPI_ADIE_BASE(_band) + (ofs))
+
+#define MT_TOP_SPI_BUSY_CR(_band) MT_TOP_SPI_ADIE(_band, 0)
+#define MT_TOP_SPI_POLLING_BIT BIT(5)
+
+#define MT_TOP_SPI_ADDR_CR(_band) MT_TOP_SPI_ADIE(_band, 0x50)
+#define MT_TOP_SPI_READ_ADDR_FORMAT (BIT(12) | BIT(13) | BIT(15))
+#define MT_TOP_SPI_WRITE_ADDR_FORMAT (BIT(13) | BIT(15))
+
+#define MT_TOP_SPI_WRITE_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x54)
+#define MT_TOP_SPI_READ_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x58)
+
+/* CONN INFRA CKGEN */
+#define MT_INFRA_CKGEN_BASE 0x18009000
+#define MT_INFRA_CKGEN(ofs) (MT_INFRA_CKGEN_BASE + (ofs))
+
+#define MT_INFRA_CKGEN_BUS MT_INFRA_CKGEN(0xa00)
+#define MT_INFRA_CKGEN_BUS_CLK_SEL_MASK BIT(23)
+#define MT_INFRA_CKGEN_BUS_RDY_SEL_MASK BIT(29)
+
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_1 MT_INFRA_CKGEN(0x008)
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_2 MT_INFRA_CKGEN(0x00c)
+
+#define MT_INFRA_CKGEN_RFSPI_WPLL_DIV MT_INFRA_CKGEN(0x040)
+#define MT_INFRA_CKGEN_DIV_SEL_MASK GENMASK(7, 2)
+#define MT_INFRA_CKGEN_DIV_EN_MASK BIT(0)
+
+/* CONN INFRA BUS */
+#define MT_INFRA_BUS_BASE 0x1800e000
+#define MT_INFRA_BUS(ofs) (MT_INFRA_BUS_BASE + (ofs))
+
+#define MT_INFRA_BUS_OFF_TIMEOUT MT_INFRA_BUS(0x300)
+#define MT_INFRA_BUS_TIMEOUT_LIMIT_MASK GENMASK(14, 7)
+#define MT_INFRA_BUS_TIMEOUT_EN_MASK GENMASK(3, 0)
+
+#define MT_INFRA_BUS_ON_TIMEOUT MT_INFRA_BUS(0x31c)
+#define MT_INFRA_BUS_EMI_START MT_INFRA_BUS(0x360)
+#define MT_INFRA_BUS_EMI_END MT_INFRA_BUS(0x364)
+
+/* CONN_INFRA_SKU */
+#define MT_CONNINFRA_SKU_DEC_ADDR 0x18050000
+#define MT_CONNINFRA_SKU_MASK GENMASK(15, 0)
+#define MT_ADIE_TYPE_MASK BIT(1)
+
+/* FW MODE SYNC */
+#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
+
+#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
+
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
+#define MT_SWDEF_SER_STATS MT_SWDEF(0x040)
+#define MT_SWDEF_PLE_STATS MT_SWDEF(0x044)
+#define MT_SWDEF_PLE1_STATS MT_SWDEF(0x048)
+#define MT_SWDEF_PLE_AMSDU_STATS MT_SWDEF(0x04C)
+#define MT_SWDEF_PSE_STATS MT_SWDEF(0x050)
+#define MT_SWDEF_PSE1_STATS MT_SWDEF(0x054)
+#define MT_SWDEF_LAMC_WISR6_BN0_STATS MT_SWDEF(0x058)
+#define MT_SWDEF_LAMC_WISR6_BN1_STATS MT_SWDEF(0x05C)
+#define MT_SWDEF_LAMC_WISR7_BN0_STATS MT_SWDEF(0x060)
+#define MT_SWDEF_LAMC_WISR7_BN1_STATS MT_SWDEF(0x064)
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
@@ -540,13 +971,7 @@
#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
-#define MT_SWDEF_BASE 0x41f200
-#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
-#define MT_SWDEF_MODE MT_SWDEF(0x3c)
-#define MT_SWDEF_NORMAL_MODE 0
-#define MT_SWDEF_ICAP_MODE 1
-#define MT_SWDEF_SPECTRUM_MODE 2
-
+/* LED */
#define MT_LED_TOP_BASE 0x18013000
#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
@@ -561,46 +986,127 @@
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
+/* MT TOP */
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
-#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
-#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
+#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_MCU_EMI_BASE MT_TOP(0x1c4)
+#define MT_TOP_MCU_EMI_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_CONN_INFRA_WAKEUP MT_TOP(0x1a0)
+#define MT_TOP_CONN_INFRA_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_WFSYS_RESET_STATUS MT_TOP(0x2cc)
+#define MT_TOP_WFSYS_RESET_STATUS_MASK BIT(30)
+
+/* SEMA */
+#define MT_SEMA_BASE 0x18070000
+#define MT_SEMA(ofs) (MT_SEMA_BASE + (ofs))
+
+#define MT_SEMA_RFSPI_STATUS (MT_SEMA(0x2000) + (11 * 4))
+#define MT_SEMA_RFSPI_RELEASE (MT_SEMA(0x2200) + (11 * 4))
+#define MT_SEMA_RFSPI_STATUS_MASK BIT(1)
+
+/* MCU BUS */
+#define MT_MCU_BUS_BASE 0x18400000
+#define MT_MCU_BUS(ofs) (MT_MCU_BUS_BASE + (ofs))
+
+#define MT_MCU_BUS_TIMEOUT MT_MCU_BUS(0xf0440)
+#define MT_MCU_BUS_TIMEOUT_SET_MASK GENMASK(7, 0)
+#define MT_MCU_BUS_TIMEOUT_CG_EN_MASK BIT(28)
+#define MT_MCU_BUS_TIMEOUT_EN_MASK BIT(31)
+
+#define MT_MCU_BUS_REMAP MT_MCU_BUS(0x120)
+
+/* TOP CFG */
+#define MT_TOP_CFG_BASE 0x184b0000
+#define MT_TOP_CFG(ofs) (MT_TOP_CFG_BASE + (ofs))
+
+#define MT_TOP_CFG_IP_VERSION_ADDR MT_TOP_CFG(0x010)
+
+/* TOP CFG ON */
+#define MT_TOP_CFG_ON_BASE 0x184c1000
+#define MT_TOP_CFG_ON(ofs) (MT_TOP_CFG_ON_BASE + (ofs))
+
+#define MT_TOP_CFG_ON_ROM_IDX MT_TOP_CFG_ON(0x604)
+
+/* SLP CTRL */
+#define MT_SLP_BASE 0x184c3000
+#define MT_SLP(ofs) (MT_SLP_BASE + (ofs))
+
+#define MT_SLP_STATUS MT_SLP(0x00c)
+#define MT_SLP_WFDMA2CONN_MASK (BIT(21) | BIT(23))
+#define MT_SLP_CTRL_EN_MASK BIT(0)
+#define MT_SLP_CTRL_BSY_MASK BIT(1)
+
+/* MCU BUS DBG */
+#define MT_MCU_BUS_DBG_BASE 0x18500000
+#define MT_MCU_BUS_DBG(ofs) (MT_MCU_BUS_DBG_BASE + (ofs))
+
+#define MT_MCU_BUS_DBG_TIMEOUT MT_MCU_BUS_DBG(0x0)
+#define MT_MCU_BUS_DBG_TIMEOUT_SET_MASK GENMASK(31, 16)
+#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
+#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
#define MT_HW_BOUND 0x70010020
-#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
-#define MT_PCIE1_MAC_BASE 0x74020000
-#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
-#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
-
+/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
-#define MT_WF_IRPI_BASE 0x83006000
-#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16))
+#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
+#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+
+#define MT_WM_MCU_PC 0x7c060204
+#define MT_WA_MCU_PC 0x7c06020c
+
+/* PP TOP */
+#define MT_WF_PP_TOP_BASE 0x820cc000
+#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
+
+#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
+#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
+
+#define MT_WF_IRPI_BASE 0x83000000
+#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + (ofs))
+
+#define MT_WF_IRPI_NSS(phy, nss) MT_WF_IRPI(0x6000 + ((phy) << 20) + ((nss) << 16))
+#define MT_WF_IRPI_NSS_MT7916(phy, nss) MT_WF_IRPI(0x1000 + ((phy) << 20) + ((nss) << 16))
-/* PHY: band 0(0x83080000), band 1(0x83090000) */
+/* PHY */
#define MT_WF_PHY_BASE 0x83080000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_MT7916(_phy) MT_WF_PHY(0x2004 + ((_phy) << 20))
#define MT_WF_PHY_RX_CTRL1_IPI_EN GENMASK(2, 0)
#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
+#define MT_WF_PHY_RXTD12_MT7916(_phy) MT_WF_PHY(0x8230 + ((_phy) << 20))
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
-#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
new file mode 100644
index 000000000000..c74afa746251
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/clk.h>
+
+#include "mt7915.h"
+
+/* INFRACFG */
+#define MT_INFRACFG_CONN2AP_SLPPROT 0x0d0
+#define MT_INFRACFG_AP2CONN_SLPPROT 0x0d4
+
+#define MT_INFRACFG_RX_EN_MASK BIT(16)
+#define MT_INFRACFG_TX_RDY_MASK BIT(4)
+#define MT_INFRACFG_TX_EN_MASK BIT(0)
+
+/* TOP POS */
+#define MT_TOP_POS_FAST_CTRL 0x114
+#define MT_TOP_POS_FAST_EN_MASK BIT(3)
+
+#define MT_TOP_POS_SKU 0x21c
+#define MT_TOP_POS_SKU_MASK GENMASK(31, 28)
+#define MT_TOP_POS_SKU_ADIE_DBDC_MASK BIT(2)
+
+enum {
+ ADIE_SB,
+ ADIE_DBDC
+};
+
+static int
+mt76_wmac_spi_read(struct mt7915_dev *dev, u8 adie, u32 addr, u32 *val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_READ_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), 0);
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ *val = mt76_rr(dev, MT_TOP_SPI_READ_DATA_CR(adie));
+
+ return 0;
+}
+
+static int
+mt76_wmac_spi_write(struct mt7915_dev *dev, u8 adie, u32 addr, u32 val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_WRITE_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), val);
+
+ return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+}
+
+static int
+mt76_wmac_spi_rmw(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 mask, u32 val)
+{
+ u32 cur, ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
+ if (ret)
+ return ret;
+
+ cur &= ~mask;
+ cur |= val;
+
+ return mt76_wmac_spi_write(dev, adie, addr, cur);
+}
+
+static int
+mt7986_wmac_adie_efuse_read(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *data)
+{
+ int ret, temp;
+ u32 val, mask;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_EFUSE_CFG,
+ MT_ADIE_EFUSE_CTRL_MASK);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, BIT(30), 0x0);
+ if (ret)
+ return ret;
+
+ mask = (MT_ADIE_EFUSE_MODE_MASK | MT_ADIE_EFUSE_ADDR_MASK |
+ MT_ADIE_EFUSE_KICK_MASK);
+ val = FIELD_PREP(MT_ADIE_EFUSE_MODE_MASK, 0) |
+ FIELD_PREP(MT_ADIE_EFUSE_ADDR_MASK, addr) |
+ FIELD_PREP(MT_ADIE_EFUSE_KICK_MASK, 1);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, mask, val);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_wmac_spi_read, temp,
+ !temp && !FIELD_GET(MT_ADIE_EFUSE_KICK_MASK, val),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_EFUSE_VALID_MASK, val) == 1)
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE_RDATA0,
+ data);
+
+ return ret;
+}
+
+static inline void mt76_wmac_spi_lock(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ read_poll_timeout(mt76_rr, cur,
+ FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
+ 1000, 1000 * MSEC_PER_SEC, false, dev,
+ MT_SEMA_RFSPI_STATUS);
+}
+
+static inline void mt76_wmac_spi_unlock(struct mt7915_dev *dev)
+{
+ mt76_wr(dev, MT_SEMA_RFSPI_RELEASE, 1);
+}
+
+static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= readl(base + offset) & ~mask;
+ writel(val, base + offset);
+
+ return val;
+}
+
+static u8 mt7986_wmac_check_adie_type(struct mt7915_dev *dev)
+{
+ u32 val;
+
+ val = readl(dev->sku + MT_TOP_POS_SKU);
+
+ return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val);
+}
+
+static int mt7986_wmac_consys_reset(struct mt7915_dev *dev, bool enable)
+{
+ if (!enable)
+ return reset_control_assert(dev->rstc);
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_FAST_CTRL,
+ MT_TOP_POS_FAST_EN_MASK,
+ FIELD_PREP(MT_TOP_POS_FAST_EN_MASK, 0x1));
+
+ return reset_control_deassert(dev->rstc);
+}
+
+static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
+{
+ struct pinctrl_state *state;
+ struct pinctrl *pinctrl;
+ int ret;
+ u8 type;
+
+ type = mt7986_wmac_check_adie_type(dev);
+ pinctrl = devm_pinctrl_get(dev->mt76.dev);
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ switch (type) {
+ case ADIE_SB:
+ state = pinctrl_lookup_state(pinctrl, "default");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ case ADIE_DBDC:
+ state = pinctrl_lookup_state(pinctrl, "dbdc");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pinctrl_select_state(pinctrl, state);
+ if (ret)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable)
+{
+ int ret;
+ u32 cur;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+
+ return 0;
+}
+
+static int mt7986_wmac_coninfra_check(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02070000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC,
+ false, dev, MT_CONN_INFRA_BASE);
+}
+
+static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ u32 val;
+
+ np = of_parse_phandle(pdev->of_node, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(np);
+ if (!rmem)
+ return -EINVAL;
+
+ val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+
+ /* Set conninfra subsys PLL check */
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE,
+ MT_TOP_MCU_EMI_BASE_MASK, val);
+
+ mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+
+ mt76_rr(dev, MT_CONN_INFRA_EFUSE);
+
+ /* Set conninfra sysram */
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_PDN, 0);
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_SLP, 1);
+
+ return 0;
+}
+
+static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type)
+{
+ int ret;
+ u32 adie_main, adie_ext;
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1);
+
+ mt76_wmac_spi_lock(dev);
+
+ ret = mt76_wmac_spi_read(dev, 0, MT_ADIE_CHIP_ID, &adie_main);
+ if (ret)
+ goto out;
+
+ ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext);
+ if (ret)
+ goto out;
+
+ *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) |
+ (MT_ADIE_CHIP_ID_MASK & adie_ext);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return 0;
+}
+
+static inline u16 mt7986_adie_idx(u8 adie, u32 adie_type)
+{
+ if (adie == 0)
+ return u32_get_bits(adie_type, MT_ADIE_IDX0);
+ else
+ return u32_get_bits(adie_type, MT_ADIE_IDX1);
+}
+
+static inline bool is_7975(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7975;
+}
+
+static inline bool is_7976(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7976;
+}
+
+static int mt7986_wmac_adie_thermal_cal(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, val;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_ANALOG,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_VRPI_SEL_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC_BG,
+ MT_ADIE_VRPI_SEL_CR_MASK,
+ FIELD_PREP(MT_ADIE_VRPI_SEL_CR_MASK, val));
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MT_ADIE_PGA_GAIN_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_PGA_GAIN_MASK,
+ FIELD_PREP(MT_ADIE_PGA_GAIN_MASK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_SLOP,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_LDO_CTRL_EFUSE_MASK, data);
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_LDO_CTRL_MASK,
+ FIELD_PREP(MT_ADIE_LDO_CTRL_MASK, val));
+ }
+
+ return 0;
+}
+
+static int
+mt7986_read_efuse_xo_trim_7976(struct mt7915_dev *dev, u8 adie,
+ bool is_40m, int *result)
+{
+ int ret;
+ u32 data, addr;
+
+ addr = is_40m ? MT_ADIE_XTAL_AXM_40M_OSC : MT_ADIE_XTAL_AXM_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data)) {
+ *result = 64;
+ } else {
+ *result = FIELD_GET(MT_ADIE_TRIM_MASK, data);
+ addr = is_40m ? MT_ADIE_XTAL_TRIM1_40M_OSC :
+ MT_ADIE_XTAL_TRIM1_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data) &&
+ FIELD_GET(MT_ADIE_XTAL_DECREASE_MASK, data))
+ *result -= FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+ else if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data))
+ *result += FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+
+ *result = max(0, min(127, *result));
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret, trim_80m, trim_40m;
+ u32 data, val, mode;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_XO_TRIM_FLOW,
+ &data);
+ if (ret || !FIELD_GET(BIT(1), data))
+ return 0;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, false, &trim_80m);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, true, &trim_40m);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_RG_STRAP_PIN_IN, &val);
+ if (ret)
+ return ret;
+
+ mode = FIELD_PREP(GENMASK(6, 4), val);
+ if (!mode || mode == 0x2) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ } else if (mode == 0x3 || mode == 0x4 || mode == 0x6) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ }
+
+ return ret;
+}
+
+static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
+{
+ u32 id, version, rg_xo_01, rg_xo_03;
+ int ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_CHIP_ID, &id);
+ if (ret)
+ return ret;
+
+ version = FIELD_GET(MT_ADIE_VERSION_MASK, id);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
+ if (ret)
+ return ret;
+
+ if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ rg_xo_01 = 0x1d59080f;
+ rg_xo_03 = 0x34c00fe0;
+ } else {
+ rg_xo_01 = 0x1959f80f;
+ rg_xo_03 = 0x34d00fe0;
+ }
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, rg_xo_01);
+ if (ret)
+ return ret;
+
+ return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, rg_xo_03);
+}
+
+static int
+mt7986_read_efuse_xo_trim_7975(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *result)
+{
+ int ret;
+ u32 data;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MT_ADIE_XO_TRIM_EN_MASK)) {
+ if ((data & MT_ADIE_XTAL_DECREASE_MASK))
+ *result -= (data & MT_ADIE_EFUSE_TRIM_MASK);
+ else
+ *result += (data & MT_ADIE_EFUSE_TRIM_MASK);
+
+ *result = (*result & MT_ADIE_TRIM_MASK);
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, result = 0, value;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_EN,
+ &data);
+ if (ret || !(data & BIT(1)))
+ return 0;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_CAL,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & MT_ADIE_XO_TRIM_EN_MASK)
+ result = (data & MT_ADIE_TRIM_MASK);
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM2,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM3,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM4,
+ &result);
+ if (ret)
+ return ret;
+
+ /* Update trim value to C1 and C2*/
+ value = FIELD_GET(MT_ADIE_7975_XO_CTRL2_C1_MASK, result) |
+ FIELD_GET(MT_ADIE_7975_XO_CTRL2_C2_MASK, result);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL2,
+ MT_ADIE_7975_XO_CTRL2_MASK, value);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_7975_XTAL, &value);
+ if (ret)
+ return ret;
+
+ if (value & MT_ADIE_7975_XTAL_EN_MASK) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_2,
+ MT_ADIE_7975_XO_2_FIX_EN, 0x0);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL6,
+ MT_ADIE_7975_XO_CTRL6_MASK, 0x1);
+}
+
+static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ /* disable CAL LDO and fine tune RFDIG LDO */
+ ret = mt76_wmac_spi_write(dev, adie, 0x348, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x378, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a8, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d8, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* set CKA driving and filter */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa1c, 0x30000aaa);
+ if (ret)
+ return ret;
+
+ /* set CKB LDO to 1.4V */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa84, 0x8470008a);
+ if (ret)
+ return ret;
+
+ /* turn on SX0 LTBUF */
+ ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* CK_BUF_SW_EN = 1 (all buf in manual mode.) */
+ ret = mt76_wmac_spi_write(dev, adie, 0xaa4, 0x01001fc0);
+ if (ret)
+ return ret;
+
+ /* BT mode/WF normal mode 00000005 */
+ ret = mt76_wmac_spi_write(dev, adie, 0x070, 0x00000005);
+ if (ret)
+ return ret;
+
+ /* BG thermal sensor offset update */
+ ret = mt76_wmac_spi_write(dev, adie, 0x344, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x374, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a4, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d4, 0x00000088);
+ if (ret)
+ return ret;
+
+ /* set WCON VDD IPTAT to "0000" */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa80, 0x44d07000);
+ if (ret)
+ return ret;
+
+ /* change back LTBUF SX3 drving to default value */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa88, 0x3900aaaa);
+ if (ret)
+ return ret;
+
+ /* SM input cap off */
+ ret = mt76_wmac_spi_write(dev, adie, 0x2c4, 0x00000000);
+ if (ret)
+ return ret;
+
+ /* set CKB driving and filter */
+ return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072);
+}
+
+static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ mt76_wmac_spi_lock(dev);
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_CLK_EN, ~0);
+ if (ret)
+ goto out;
+
+ if (is_7975(dev, adie, adie_type)) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_COCLK,
+ BIT(1), 0x1);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7975(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7975(dev, adie);
+ } else if (is_7976(dev, adie, adie_type)) {
+ if (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC) {
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_ADIE_WRI_CK_SEL, 0x1c);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7976(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7976(dev, adie);
+ }
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static int
+mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type)
+{
+ int ret;
+ u8 idx;
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, adie, adie_type))
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x80000000);
+ else
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x88888005);
+ if (ret)
+ goto out;
+
+ idx = dbdc ? ADIE_DBDC : adie;
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_BPLL_UP_MASK, 0x1);
+ usleep_range(30, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x1f);
+ usleep_range(800, 1000);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x0);
+ mt76_rmw(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x5);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static void mt7986_wmac_subsys_pll_initial(struct mt7915_dev *dev, u8 band)
+{
+ mt76_rmw(dev, MT_AFE_PLL_STB_TIME(band),
+ MT_AFE_PLL_STB_TIME_MASK, MT_AFE_PLL_STB_TIME_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_02(band),
+ MT_AFE_PLL_CFG_MASK, MT_AFE_PLL_CFG_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_TOP_01(band),
+ MT_AFE_DIG_TOP_01_MASK, MT_AFE_DIG_TOP_01_VAL);
+}
+
+static void mt7986_wmac_subsys_setting(struct mt7915_dev *dev)
+{
+ /* Subsys pll init */
+ mt7986_wmac_subsys_pll_initial(dev, 0);
+ mt7986_wmac_subsys_pll_initial(dev, 1);
+
+ /* Set legacy OSC control stable time*/
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_RC_EN,
+ MT_CONN_INFRA_OSC_RC_EN_MASK, 0x0);
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_CTRL,
+ MT_CONN_INFRA_OSC_STB_TIME_MASK, 0x80706);
+
+ /* prevent subsys from power on/of in a short time interval */
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR,
+ MT_TOP_PWR_ACK_MASK | MT_TOP_PWR_KEY_MASK,
+ MT_TOP_PWR_KEY);
+}
+
+static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev)
+{
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0x2);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0xc);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x8);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_CLK_SEL_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_HW_CTRL,
+ MT_CONN_INFRA_HW_CTRL_MASK, 0x1);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x1);
+
+ usleep_range(900, 1000);
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, 0, adie_type) || is_7976(dev, 0, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(0),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ if (is_7975(dev, 1, adie_type) || is_7976(dev, 1, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(1),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ mt76_wmac_spi_unlock(dev);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x0);
+ usleep_range(900, 1000);
+}
+
+static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable)
+{
+ mt76_rmw_field(dev, MT_TOP_WFSYS_WAKEUP,
+ MT_TOP_WFSYS_WAKEUP_MASK, enable);
+
+ usleep_range(900, 1000);
+
+ if (!enable)
+ return 0;
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
+ MT7986_TOP_WM_RESET_MASK, enable);
+ if (!enable)
+ return 0;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
+ USEC_PER_MSEC, 5000 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_ON_ROM_IDX);
+}
+
+static int mt7986_wmac_wfsys_poweron(struct mt7915_dev *dev, bool enable)
+{
+ u32 mask = MT_TOP_PWR_EN_MASK | MT_TOP_PWR_KEY_MASK;
+ u32 cur;
+
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR, mask,
+ MT_TOP_PWR_KEY | FIELD_PREP(MT_TOP_PWR_EN_MASK, enable));
+
+ return read_poll_timeout(mt76_rr, cur,
+ (FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_WFSYS_RESET_STATUS);
+}
+
+static int mt7986_wmac_wfsys_setting(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 cur;
+
+ /* Turn off wfsys2conn bus sleep protect */
+ mt76_rmw(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x0);
+
+ ret = mt7986_wmac_wfsys_poweron(dev, true);
+ if (ret)
+ return ret;
+
+ /* Check bus sleep protect */
+
+ ret = read_poll_timeout(mt76_rr, cur,
+ !(cur & MT_CONN_INFRA_CONN_WF_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_SLP_STATUS);
+ if (ret)
+ return ret;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_IP_VERSION_ADDR);
+}
+
+static void mt7986_wmac_wfsys_set_timeout(struct mt7915_dev *dev)
+{
+ u32 mask = MT_MCU_BUS_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_TIMEOUT_CG_EN_MASK |
+ MT_MCU_BUS_TIMEOUT_EN_MASK;
+ u32 val = FIELD_PREP(MT_MCU_BUS_TIMEOUT_SET_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_CG_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_TIMEOUT, mask, val);
+
+ mt76_wr(dev, MT_MCU_BUS_REMAP, 0x810f0000);
+
+ mask = MT_MCU_BUS_DBG_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_EN_MASK;
+ val = FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_SET_MASK, 0x3aa) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_DBG_TIMEOUT, mask, val);
+}
+
+static int mt7986_wmac_sku_update(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 val;
+
+ if (is_7976(dev, 0, adie_type) && is_7976(dev, 1, adie_type))
+ val = 0xf;
+ else if (is_7975(dev, 0, adie_type) && is_7975(dev, 1, adie_type))
+ val = 0xd;
+ else if (is_7976(dev, 0, adie_type))
+ val = 0x7;
+ else if (is_7975(dev, 1, adie_type))
+ val = 0x8;
+ else if (is_7976(dev, 1, adie_type))
+ val = 0xa;
+ else
+ return -EINVAL;
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_SKU, MT_TOP_POS_SKU_MASK,
+ FIELD_PREP(MT_TOP_POS_SKU_MASK, val));
+
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, val);
+
+ return 0;
+}
+
+static int
+mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ if (!(is_7975(dev, adie, adie_type) || is_7976(dev, adie, adie_type)))
+ return 0;
+
+ ret = mt7986_wmac_adie_cfg(dev, adie, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_afe_cal(dev, adie, false, adie_type);
+ if (ret)
+ return ret;
+
+ if (!adie && (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC))
+ ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type);
+
+ return ret;
+}
+
+static int mt7986_wmac_subsys_powerup(struct mt7915_dev *dev, u32 adie_type)
+{
+ int ret;
+
+ mt7986_wmac_subsys_setting(dev);
+
+ ret = mt7986_wmac_bus_timeout(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_clock_enable(dev, adie_type);
+
+ return 0;
+}
+
+static int mt7986_wmac_wfsys_powerup(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7986_wmac_wm_enable(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_setting(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_wfsys_set_timeout(dev);
+
+ return mt7986_wmac_wm_enable(dev, true);
+}
+
+int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 adie_type;
+
+ ret = mt7986_wmac_consys_reset(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_gpio_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_consys_lockup(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_check(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_sku_setup(dev, &adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 0, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 1, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_subsys_powerup(dev, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_top_wfsys_wakeup(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_powerup(dev);
+ if (ret)
+ return ret;
+
+ return mt7986_wmac_sku_update(dev, adie_type);
+}
+
+void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ mt7986_wmac_top_wfsys_wakeup(dev, true);
+
+ /* Turn on wfsys2conn bus sleep protect */
+ mt76_rmw_field(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x1);
+
+ /* Check wfsys2conn bus sleep protect */
+ read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+
+ mt7986_wmac_wfsys_poweron(dev, false);
+
+ /* Turn back wpll setting */
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_MCU_BPLL_CFG_MASK, 0x2);
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_WPLL_CFG_MASK, 0x2);
+
+ /* Reset EMI */
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x0);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x0);
+
+ mt7986_wmac_top_wfsys_wakeup(dev, false);
+ mt7986_wmac_consys_lockup(dev, true);
+ mt7986_wmac_consys_reset(dev, false);
+}
+
+static int mt7986_wmac_init(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct platform_device *pfdev = to_platform_device(pdev);
+ struct clk *mcu_clk, *ap_conn_clk;
+
+ mcu_clk = devm_clk_get(pdev, "mcu");
+ if (IS_ERR(mcu_clk))
+ dev_err(pdev, "mcu clock not found\n");
+ else if (clk_prepare_enable(mcu_clk))
+ dev_err(pdev, "mcu clock configuration failed\n");
+
+ ap_conn_clk = devm_clk_get(pdev, "ap2conn");
+ if (IS_ERR(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock not found\n");
+ else if (clk_prepare_enable(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock configuration failed\n");
+
+ dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
+ if (IS_ERR(dev->dcm))
+ return PTR_ERR(dev->dcm);
+
+ dev->sku = devm_platform_ioremap_resource(pfdev, 2);
+ if (IS_ERR(dev->sku))
+ return PTR_ERR(dev->sku);
+
+ dev->rstc = devm_reset_control_get(pdev, "consys");
+ if (IS_ERR(dev->rstc))
+ return PTR_ERR(dev->rstc);
+
+ return 0;
+}
+
+static int mt7986_wmac_probe(struct platform_device *pdev)
+{
+ void __iomem *mem_base;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int irq, ret;
+ u32 chip_id;
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ dev = mt7915_mmio_probe(&pdev->dev, mem_base, chip_id);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_device;
+
+ ret = mt7986_wmac_init(dev);
+ if (ret)
+ goto free_irq;
+
+ mt7915_wfsys_reset(dev);
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ devm_free_irq(mdev->dev, irq, dev);
+
+free_device:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static int mt7986_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7915_dev *dev = platform_get_drvdata(pdev);
+
+ mt7915_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7986_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 },
+ {},
+};
+
+struct platform_driver mt7986_wmac_driver = {
+ .driver = {
+ .name = "mt7986-wmac",
+ .of_match_table = mt7986_wmac_of_match,
+ },
+ .probe = mt7986_wmac_probe,
+ .remove = mt7986_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975);
+MODULE_FIRMWARE(MT7986_ROM_PATCH);
+MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index af80c2cf8c83..efb9bb8231e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -23,30 +23,16 @@ struct reg_band {
u32 band[2];
};
-#define REG_BAND(_reg) \
- { .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
-#define REG_BAND_IDX(_reg, _idx) \
- { .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
-
-static const struct reg_band reg_backup_list[] = {
- REG_BAND_IDX(AGG_PCR0, 0),
- REG_BAND_IDX(AGG_PCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 0),
- REG_BAND_IDX(AGG_AWSCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 2),
- REG_BAND_IDX(AGG_AWSCR0, 3),
- REG_BAND(AGG_MRCR),
- REG_BAND(TMAC_TFCR0),
- REG_BAND(TMAC_TCR0),
- REG_BAND(AGG_ATCR1),
- REG_BAND(AGG_ATCR3),
- REG_BAND(TMAC_TRCR0),
- REG_BAND(TMAC_ICR0),
- REG_BAND_IDX(ARB_DRNGR0, 0),
- REG_BAND_IDX(ARB_DRNGR0, 1),
- REG_BAND(WF_RFCR),
- REG_BAND(WF_RFCR1),
-};
+#define REG_BAND(_list, _reg) \
+ { _list.band[0] = MT_##_reg(0); \
+ _list.band[1] = MT_##_reg(1); }
+#define REG_BAND_IDX(_list, _reg, _idx) \
+ { _list.band[0] = MT_##_reg(0, _idx); \
+ _list.band[1] = MT_##_reg(1, _idx); }
+
+#define TM_REG_MAX_ID 17
+static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
@@ -182,13 +168,14 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
}
static int
-mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
+mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
u16 cw_max, u16 txop)
{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
struct mt7915_mcu_tx req = { .total = 1 };
struct edca *e = &req.edca[0];
- e->queue = qid;
+ e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
e->set = WMM_PARAM_SET;
e->aifs = aifs;
@@ -196,7 +183,7 @@ mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
e->cw_max = cpu_to_le16(cw_max);
e->txop = cpu_to_le16(txop);
- return mt7915_mcu_update_edca(dev, &req);
+ return mt7915_mcu_update_edca(phy->dev, &req);
}
static int
@@ -212,7 +199,6 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
u8 aifsn = TM_MIN_AIFSN;
u32 i2t_time, tr2t_time, txv_time;
- bool ext_phy = phy != &dev->phy;
u16 cw = 0;
if (ipg < sig_ext + slot_time + sifs)
@@ -242,29 +228,25 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
ipg -= aifsn * slot_time;
- if (ipg > TM_DEFAULT_SIFS) {
- if (ipg < TM_MAX_SIFS)
- sifs = ipg;
- else
- sifs = TM_MAX_SIFS;
- }
+ if (ipg > TM_DEFAULT_SIFS)
+ sifs = min_t(u32, ipg, TM_MAX_SIFS);
}
done:
- txv_time = mt76_get_field(dev, MT_TMAC_ATCR(ext_phy),
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
MT_TMAC_ATCR_TXV_TOUT);
txv_time *= 50; /* normal clock time */
i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
- mt76_set(dev, MT_TMAC_TRCR0(ext_phy),
+ mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
mt7915_tm_set_slot_time(phy, slot_time, sifs);
- return mt7915_tm_set_wmm_qid(dev,
- mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
+ return mt7915_tm_set_wmm_qid(phy,
+ mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
@@ -290,6 +272,8 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
case MT76_TM_TX_MODE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -351,13 +335,30 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
{
int n_regs = ARRAY_SIZE(reg_backup_list);
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u32 *b = phy->test.reg_backup;
int i;
+ REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
+ REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
+ REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
+ REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
+ REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
+ REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
+ REG_BAND(reg_backup_list[6], AGG_MRCR);
+ REG_BAND(reg_backup_list[7], TMAC_TFCR0);
+ REG_BAND(reg_backup_list[8], TMAC_TCR0);
+ REG_BAND(reg_backup_list[9], AGG_ATCR1);
+ REG_BAND(reg_backup_list[10], AGG_ATCR3);
+ REG_BAND(reg_backup_list[11], TMAC_TRCR0);
+ REG_BAND(reg_backup_list[12], TMAC_ICR0);
+ REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
+ REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
+ REG_BAND(reg_backup_list[15], WF_RFCR);
+ REG_BAND(reg_backup_list[16], WF_RFCR1);
+
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
- mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
+ mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
return;
}
@@ -368,33 +369,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
- b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
}
- mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT |
+ mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
- mt76_set(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+ mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
- mt76_wr(dev, MT_AGG_PCR0(ext_phy, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+ mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
MT_AGG_PCR1_RTS0_LEN_THRES);
- mt76_clear(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_BAR_CNT_LIMIT |
+ mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
- mt76_rmw(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
- mt76_wr(dev, MT_TMAC_TFCR0(ext_phy), 0);
- mt76_clear(dev, MT_TMAC_TCR0(ext_phy), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+ mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
+ mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
/* config rx filter for testmode rx */
- mt76_wr(dev, MT_WF_RFCR(ext_phy), 0xcf70a);
- mt76_wr(dev, MT_WF_RFCR1(ext_phy), 0);
+ mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
+ mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
}
static void
@@ -452,7 +453,7 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
u8 tx_ant = td->tx_antenna_mask;
if (phy != &dev->phy)
- tx_ant >>= 2;
+ tx_ant >>= dev->chainshift;
phy->test.spe_idx = spe_idx_map[tx_ant];
}
}
@@ -574,6 +575,8 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
if (chandef->chan->band == NL80211_BAND_5GHZ)
sband = &phy->mt76->sband_5g.sband;
+ else if (chandef->chan->band == NL80211_BAND_6GHZ)
+ sband = &phy->mt76->sband_6g.sband;
else
sband = &phy->mt76->sband_2g.sband;
@@ -720,11 +723,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
enum mt76_rxq_id q;
void *rx, *rssi;
u16 fcs_err;
int i;
+ u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
@@ -768,9 +771,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
- fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
- q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
+ q = phy->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
index 71154fc2a87c..adff2d7350b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
@@ -24,3 +24,14 @@ config MT7921S
This adds support for MT7921S 802.11ax 2x2:2SS wireless devices.
To compile this driver as a module, choose M here.
+
+config MT7921U
+ tristate "MediaTek MT7921U (USB) support"
+ select MT76_USB
+ select MT7921_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7921U 802.11ax 2x2:2SS wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 1187acedfeda..e5d2d2e131a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -3,10 +3,13 @@
obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
obj-$(CONFIG_MT7921E) += mt7921e.o
obj-$(CONFIG_MT7921S) += mt7921s.o
+obj-$(CONFIG_MT7921U) += mt7921u.o
CFLAGS_trace.o := -I$(src)
mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7921-common-$(CONFIG_ACPI) += acpi_sar.o
mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
+mt7921u-y := usb.o usb_mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
new file mode 100644
index 000000000000..47e034a9b003
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/acpi.h>
+#include "mt7921.h"
+
+static int
+mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *sar_root, *sar_unit;
+ struct mt76_dev *mdev = &dev->mt76;
+ acpi_handle root, handle;
+ acpi_status status;
+ u32 i = 0;
+ int ret;
+
+ root = ACPI_HANDLE(mdev->dev);
+ if (!root)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root, method, &handle);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ sar_root = buf.pointer;
+ if (sar_root->type != ACPI_TYPE_PACKAGE ||
+ sar_root->package.count < 4 ||
+ sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ dev_err(mdev->dev, "sar cnt = %d\n",
+ sar_root->package.count);
+ goto free;
+ }
+
+ if (!*tbl) {
+ *tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
+ GFP_KERNEL);
+ if (!*tbl)
+ goto free;
+ }
+ if (len)
+ *len = sar_root->package.count;
+
+ for (i = 0; i < sar_root->package.count; i++) {
+ sar_unit = &sar_root->package.elements[i];
+
+ if (sar_unit->type != ACPI_TYPE_INTEGER)
+ break;
+ *(*tbl + i) = (u8)sar_unit->integer.value;
+ }
+free:
+ ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+
+ kfree(sar_root);
+
+ return ret;
+}
+
+/* MTCL : Country List Table for 6G band */
+static int
+mt7921_asar_acpi_read_mtcl(struct mt7921_dev *dev, u8 **table, u8 *version)
+{
+ *version = (mt7921_acpi_read(dev, MT7921_ACPI_MTCL, table, NULL) < 0)
+ ? 1 : 2;
+ return 0;
+}
+
+/* MTDS : Dynamic SAR Power Table */
+static int
+mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version)
+{
+ int len, ret, sarlen, prelen, tblcnt;
+ bool enable;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTDS, table, &len);
+ if (ret)
+ return ret;
+
+ /* Table content validation */
+ switch (version) {
+ case 1:
+ enable = ((struct mt7921_asar_dyn *)*table)->enable;
+ sarlen = sizeof(struct mt7921_asar_dyn_limit);
+ prelen = sizeof(struct mt7921_asar_dyn);
+ break;
+ case 2:
+ enable = ((struct mt7921_asar_dyn_v2 *)*table)->enable;
+ sarlen = sizeof(struct mt7921_asar_dyn_limit_v2);
+ prelen = sizeof(struct mt7921_asar_dyn_v2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tblcnt = (len - prelen) / sarlen;
+ if (!enable ||
+ tblcnt > MT7921_ASAR_MAX_DYN || tblcnt < MT7921_ASAR_MIN_DYN)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* MTGS : Geo SAR Power Table */
+static int
+mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version)
+{
+ int len, ret = 0, sarlen, prelen, tblcnt;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTGS, table, &len);
+ if (ret)
+ return ret;
+
+ /* Table content validation */
+ switch (version) {
+ case 1:
+ sarlen = sizeof(struct mt7921_asar_geo_limit);
+ prelen = sizeof(struct mt7921_asar_geo);
+ break;
+ case 2:
+ sarlen = sizeof(struct mt7921_asar_geo_limit_v2);
+ prelen = sizeof(struct mt7921_asar_geo_v2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tblcnt = (len - prelen) / sarlen;
+ if (tblcnt > MT7921_ASAR_MAX_GEO || tblcnt < MT7921_ASAR_MIN_GEO)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int mt7921_init_acpi_sar(struct mt7921_dev *dev)
+{
+ struct mt7921_acpi_sar *asar;
+ int ret;
+
+ asar = devm_kzalloc(dev->mt76.dev, sizeof(*asar), GFP_KERNEL);
+ if (!asar)
+ return -ENOMEM;
+
+ mt7921_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+
+ /* MTDS is mandatory. Return error if table is invalid */
+ ret = mt7921_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->dyn);
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ devm_kfree(dev->mt76.dev, asar);
+ return ret;
+ }
+
+ /* MTGS is optional */
+ ret = mt7921_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->geo);
+ asar->geo = NULL;
+ }
+
+ dev->phy.acpisar = asar;
+
+ return 0;
+}
+
+static s8
+mt7921_asar_get_geo_pwr(struct mt7921_phy *phy,
+ enum nl80211_band band, s8 dyn_power)
+{
+ struct mt7921_acpi_sar *asar = phy->acpisar;
+ struct mt7921_asar_geo_band *band_pwr;
+ s8 geo_power;
+ u8 idx, max;
+
+ if (!asar->geo)
+ return dyn_power;
+
+ switch (phy->mt76->dev->region) {
+ case NL80211_DFS_FCC:
+ idx = 0;
+ break;
+ case NL80211_DFS_ETSI:
+ idx = 1;
+ break;
+ default: /* WW */
+ idx = 2;
+ break;
+ }
+
+ if (asar->ver == 1) {
+ band_pwr = &asar->geo->tbl[idx].band[0];
+ max = ARRAY_SIZE(asar->geo->tbl[idx].band);
+ } else {
+ band_pwr = &asar->geo_v2->tbl[idx].band[0];
+ max = ARRAY_SIZE(asar->geo_v2->tbl[idx].band);
+ }
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ idx = 0;
+ break;
+ case NL80211_BAND_5GHZ:
+ idx = 1;
+ break;
+ case NL80211_BAND_6GHZ:
+ idx = 2;
+ break;
+ default:
+ return dyn_power;
+ }
+
+ if (idx >= max)
+ return dyn_power;
+
+ geo_power = (band_pwr + idx)->pwr;
+ dyn_power += (band_pwr + idx)->offset;
+
+ return min(geo_power, dyn_power);
+}
+
+static s8
+mt7921_asar_range_pwr(struct mt7921_phy *phy,
+ const struct cfg80211_sar_freq_ranges *range,
+ u8 idx)
+{
+ const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
+ struct mt7921_acpi_sar *asar = phy->acpisar;
+ u8 *limit, band, max;
+
+ if (!capa)
+ return 127;
+
+ if (asar->ver == 1) {
+ limit = &asar->dyn->tbl[0].frp[0];
+ max = ARRAY_SIZE(asar->dyn->tbl[0].frp);
+ } else {
+ limit = &asar->dyn_v2->tbl[0].frp[0];
+ max = ARRAY_SIZE(asar->dyn_v2->tbl[0].frp);
+ }
+
+ if (idx >= max)
+ return 127;
+
+ if (range->start_freq >= 5945)
+ band = NL80211_BAND_6GHZ;
+ else if (range->start_freq >= 5150)
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_2GHZ;
+
+ return mt7921_asar_get_geo_pwr(phy, band, limit[idx]);
+}
+
+int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
+{
+ const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
+ int i;
+
+ if (!phy->acpisar)
+ return 0;
+
+ /* When ACPI SAR enabled in HW, we should apply rules for .frp
+ * 1. w/o .sar_specs : set ACPI SAR power as the defatul value
+ * 2. w/ .sar_specs : set power with min(.sar_specs, ACPI_SAR)
+ */
+ for (i = 0; i < capa->num_freq_ranges; i++) {
+ struct mt76_freq_range_power *frp = &phy->mt76->frp[i];
+
+ frp->range = set_default ? &capa->freq_ranges[i] : frp->range;
+ if (!frp->range)
+ continue;
+
+ frp->power = min_t(s8, set_default ? 127 : frp->power,
+ mt7921_asar_range_pwr(phy, frp->range, i));
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
new file mode 100644
index 000000000000..23f86bfae0c0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef __MT7921_ACPI_SAR_H
+#define __MT7921_ACPI_SAR_H
+
+#define MT7921_ASAR_MIN_DYN 1
+#define MT7921_ASAR_MAX_DYN 8
+#define MT7921_ASAR_MIN_GEO 3
+#define MT7921_ASAR_MAX_GEO 8
+
+#define MT7921_ACPI_MTCL "MTCL"
+#define MT7921_ACPI_MTDS "MTDS"
+#define MT7921_ACPI_MTGS "MTGS"
+
+struct mt7921_asar_dyn_limit {
+ u8 idx;
+ u8 frp[5];
+} __packed;
+
+struct mt7921_asar_dyn {
+ u8 names[4];
+ u8 enable;
+ u8 nr_tbl;
+ struct mt7921_asar_dyn_limit tbl[0];
+} __packed;
+
+struct mt7921_asar_dyn_limit_v2 {
+ u8 idx;
+ u8 frp[11];
+} __packed;
+
+struct mt7921_asar_dyn_v2 {
+ u8 names[4];
+ u8 enable;
+ u8 rsvd;
+ u8 nr_tbl;
+ struct mt7921_asar_dyn_limit_v2 tbl[0];
+} __packed;
+
+struct mt7921_asar_geo_band {
+ u8 pwr;
+ u8 offset;
+} __packed;
+
+struct mt7921_asar_geo_limit {
+ u8 idx;
+ /* 0:2G, 1:5G */
+ struct mt7921_asar_geo_band band[2];
+} __packed;
+
+struct mt7921_asar_geo {
+ u8 names[4];
+ u8 version;
+ u8 nr_tbl;
+ struct mt7921_asar_geo_limit tbl[0];
+} __packed;
+
+struct mt7921_asar_geo_limit_v2 {
+ u8 idx;
+ /* 0:2G, 1:5G, 2:6G */
+ struct mt7921_asar_geo_band band[3];
+} __packed;
+
+struct mt7921_asar_geo_v2 {
+ u8 names[4];
+ u8 version;
+ u8 rsvd;
+ u8 nr_tbl;
+ struct mt7921_asar_geo_limit_v2 tbl[0];
+} __packed;
+
+struct mt7921_asar_cl {
+ u8 names[4];
+ u8 version;
+ u8 mode_6g;
+ u8 cl6g[6];
+} __packed;
+
+struct mt7921_acpi_sar {
+ u8 ver;
+ union {
+ struct mt7921_asar_dyn *dyn;
+ struct mt7921_asar_dyn_v2 *dyn_v2;
+ };
+ union {
+ struct mt7921_asar_geo *geo;
+ struct mt7921_asar_geo_v2 *geo_v2;
+ };
+ struct mt7921_asar_cl *countrylist;
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 86fd7292b229..bce76417f95d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -129,23 +129,22 @@ mt7921_queues_acq(struct seq_file *s, void *data)
mt7921_mutex_acquire(dev);
- for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
+ for (i = 0; i < 4; i++) {
u32 ctrl, val, qlen = 0;
+ int j;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
- ctrl = BIT(31) | BIT(15) | (acs << 8);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i));
+ ctrl = BIT(31) | BIT(11) | (i << 24);
for (j = 0; j < 32; j++) {
if (val & BIT(j))
continue;
- mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j);
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "AC%d: queued=%d\n", i, qlen);
}
mt7921_mutex_release(dev);
@@ -262,26 +261,21 @@ mt7921_txpwr(struct seq_file *s, void *data)
return 0;
}
-static void
-mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt7921_dev *dev = priv;
-
- mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
-}
-
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mutex_lock(&dev->mt76.mutex);
- if (val == pm->enable)
+ if (val == pm->enable_user)
goto out;
- if (!pm->enable) {
+ if (!pm->enable_user) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
@@ -291,13 +285,8 @@ mt7921_pm_set(void *data, u64 val)
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_pm_interface_iter, dev);
-
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
-
- pm->enable = val;
+ pm->enable_user = val;
+ mt7921_set_runtime_pm(dev);
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -310,7 +299,7 @@ mt7921_pm_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.enable;
+ *val = dev->pm.enable_user;
return 0;
}
@@ -322,13 +311,20 @@ mt7921_deep_sleep_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
bool enable = !!val;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mt7921_mutex_acquire(dev);
- if (pm->ds_enable != enable) {
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
- pm->ds_enable = enable;
- }
+ if (pm->ds_enable_user == enable)
+ goto out;
+
+ pm->ds_enable_user = enable;
+ pm->ds_enable = enable && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+out:
mt7921_mutex_release(dev);
return 0;
@@ -339,7 +335,7 @@ mt7921_deep_sleep_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.ds_enable;
+ *val = dev->pm.ds_enable_user;
return 0;
}
@@ -438,8 +434,13 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7921_queues_read);
+ if (mt76_is_mmio(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt7921_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt76_queues_read);
+
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7921_queues_acq);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index cdff1fd52d93..d1f10f6d9adc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -5,20 +5,6 @@
#include "../dma.h"
#include "mac.h"
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
-{
- int i, err;
-
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
- if (err < 0)
- return err;
-
- for (i = 0; i <= MT_TXQ_PSD; i++)
- phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
-
- return 0;
-}
-
static int mt7921_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7921_dev *dev;
@@ -31,7 +17,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
- mt7921_mcu_tx_cleanup(dev);
+ mt76_connac_tx_cleanup(&dev->mt76);
if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
@@ -78,110 +64,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7921_reg_map_l1(dev, addr);
-
- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
- addr);
-
- return 0;
-}
-
-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
@@ -341,23 +223,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
- struct mt76_bus_ops *bus_ops;
int ret;
- dev->phy.dev = dev;
- dev->phy.mt76 = &dev->mt76.phy;
- dev->mt76.phy.priv = &dev->phy;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7921_rr;
- bus_ops->wr = mt7921_wr;
- bus_ops->rmw = mt7921_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);
@@ -369,8 +236,9 @@ int mt7921_dma_init(struct mt7921_dev *dev)
return ret;
/* init tx queue */
- ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
- MT7921_TX_RING_SIZE);
+ ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
+ MT7921_TX_RING_SIZE,
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -415,8 +283,8 @@ int mt7921_dma_init(struct mt7921_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7921_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7921_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return mt7921_dma_enable(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
index 54f30401343c..4b647278eb30 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
@@ -11,12 +11,15 @@ enum mt7921_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_WIFI_CONF = 0x07c,
- __MT_EE_MAX = 0x3bf
+ MT_EE_HW_TYPE = 0x55b,
+ __MT_EE_MAX = 0x9ff
};
#define MT_EE_WIFI_CONF_TX_MASK BIT(0)
#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(3, 2)
+#define MT_EE_HW_TYPE_ENCAP BIT(0)
+
enum mt7921_eeprom_band {
MT_EE_NA,
MT_EE_5GHZ,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ad59ef9839dc..dcdb3cf04ac1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -11,6 +11,10 @@ static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7921_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_STATION)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP)
}
};
@@ -30,14 +34,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
+ mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
mt76_connac_mcu_set_channel_domain(hw->priv);
- mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ mt7921_set_tx_sar_pwr(hw, NULL);
mt7921_mutex_release(dev);
}
@@ -49,8 +53,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
struct wiphy *wiphy = hw->wiphy;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = 64;
- hw->max_tx_aggregation_subframes = 128;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -64,7 +68,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->iface_combinations = if_comb;
wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION);
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
@@ -80,6 +85,10 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -88,6 +97,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -165,7 +176,7 @@ out:
static int mt7921_init_hardware(struct mt7921_dev *dev)
{
- int ret, idx, i;
+ int ret, i;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
@@ -182,6 +193,13 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return ret;
}
+ return 0;
+}
+
+static int mt7921_init_wcid(struct mt7921_dev *dev)
+{
+ int idx;
+
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
if (idx)
@@ -195,6 +213,38 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return 0;
}
+static void mt7921_init_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ init_work);
+ int ret;
+
+ ret = mt7921_init_hardware(dev);
+ if (ret)
+ return;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7921_set_stream_he_caps(&dev->phy);
+
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret) {
+ dev_err(dev->mt76.dev, "register device failed\n");
+ return;
+ }
+
+ ret = mt7921_init_debugfs(dev);
+ if (ret) {
+ dev_err(dev->mt76.dev, "register debugfs failed\n");
+ return;
+ }
+
+ /* we support chip reset now */
+ dev->hw_init_done = true;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+}
+
int mt7921_register_device(struct mt7921_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -216,27 +266,34 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
+#if IS_ENABLED(CONFIG_IPV6)
+ INIT_WORK(&dev->ipv6_ns_work, mt7921_set_ipv6_ns_work);
+ skb_queue_head_init(&dev->ipv6_ns_list);
+#endif
skb_queue_head_init(&dev->phy.scan_event_list);
skb_queue_head_init(&dev->coredump.msg_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+ INIT_WORK(&dev->init_work, mt7921_init_work);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
-
- /* TODO: mt7921s run sleep mode on default */
- if (mt76_is_mmio(&dev->mt76)) {
+ if (!mt76_is_usb(&dev->mt76)) {
+ dev->pm.enable_user = true;
dev->pm.enable = true;
+ dev->pm.ds_enable_user = true;
dev->pm.ds_enable = true;
}
- if (mt76_is_sdio(&dev->mt76))
+ if (!mt76_is_mmio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- ret = mt7921_init_hardware(dev);
+ mt7921_init_acpi_sar(dev);
+
+ ret = mt7921_init_wcid(dev);
if (ret)
return ret;
@@ -251,7 +308,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -264,23 +321,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- mt76_set_stream_caps(&dev->mphy, true);
- mt7921_set_stream_he_caps(&dev->phy);
-
- ret = mt76_register_device(&dev->mt76, true, mt76_rates,
- ARRAY_SIZE(mt76_rates));
- if (ret)
- return ret;
-
- ret = mt7921_init_debugfs(dev);
- if (ret)
- return ret;
-
- ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
- if (ret)
- return ret;
-
- dev->hw_init_done = true;
+ queue_work(system_wq, &dev->init_work);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index ec10f95a4649..650ab97ae052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -9,10 +9,6 @@
#include "mac.h"
#include "mcu.h"
-#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
-#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
- IEEE80211_RADIOTAP_HE_##f)
-
static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
u16 idx, bool unicast)
{
@@ -116,7 +112,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7921_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -169,182 +165,6 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
static void
-mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
- struct ieee80211_radiotap_he *he,
- __le32 *rxv)
-{
- u32 ru_h, ru_l;
- u8 ru, offs = 0;
-
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
- ru = (u8)(ru_l | ru_h << 4);
-
- status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
-
- he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
- he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-}
-
-static void
-mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
- HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN) |
- HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN),
- .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN) |
- HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
- };
- struct ieee80211_radiotap_he_mu *he_mu;
-
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-
- he_mu = skb_push(skb, sizeof(mu_known));
- memcpy(he_mu, &mu_known, sizeof(mu_known));
-
-#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
-
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
- if (status->he_dcm)
- he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
-
- he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
- MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
- le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
-
- he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3]));
-
- if (status->bw >= RATE_INFO_BW_40) {
- he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
- he_mu->ru_ch2[0] =
- FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3]));
- }
-
- if (status->bw >= RATE_INFO_BW_80) {
- he_mu->ru_ch1[1] =
- FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3]));
- he_mu->ru_ch2[1] =
- FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3]));
- }
-}
-
-static void
-mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- static const struct ieee80211_radiotap_he known = {
- .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
- HE_BITS(DATA1_DATA_DCM_KNOWN) |
- HE_BITS(DATA1_STBC_KNOWN) |
- HE_BITS(DATA1_CODING_KNOWN) |
- HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
- HE_BITS(DATA1_DOPPLER_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
- HE_BITS(DATA1_BSS_COLOR_KNOWN),
- .data2 = HE_BITS(DATA2_GI_KNOWN) |
- HE_BITS(DATA2_TXBF_KNOWN) |
- HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
- HE_BITS(DATA2_TXOP_KNOWN),
- };
- struct ieee80211_radiotap_he *he = NULL;
- u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
-
- status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he = skb_push(skb, sizeof(known));
- memcpy(he, &known, sizeof(known));
-
- he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
- HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
- he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
- he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
- le16_encode_bits(ltf_size,
- IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
- if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
- he->data5 |= HE_BITS(DATA5_TXBF);
- he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
- HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
-
- switch (mode) {
- case MT_PHY_TYPE_HE_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
- HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
- break;
- case MT_PHY_TYPE_HE_EXT_SU:
- he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- break;
- case MT_PHY_TYPE_HE_MU:
- he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
-
- he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
-
- mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
- mt7921_mac_decode_he_mu_radiotap(skb, rxv);
- break;
- case MT_PHY_TYPE_HE_TB:
- he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
-
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
-
- mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
- break;
- default:
- break;
- }
-}
-
-static void
mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
struct mt76_rx_status *status, u8 chfreq)
{
@@ -398,81 +218,6 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_rssi_iter, skb);
}
-/* The HW does not translate the mac header to 802.3 for mesh point */
-static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
- struct ieee80211_sta *sta;
- struct ieee80211_vif *vif;
- struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
-
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
- MT_RXD3_NORMAL_U2M)
- return -EINVAL;
-
- if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
- return -EINVAL;
-
- if (!msta || !msta->vif)
- return -EINVAL;
-
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
-
- /* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
- hdr.duration_id = 0;
- ether_addr_copy(hdr.addr1, vif->addr);
- ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
- case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
- break;
- case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
- break;
- case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- break;
- case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
- break;
- default:
- break;
- }
-
- skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
- ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
- ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
- else
- skb_pull(skb, 2);
-
- if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
- if (ieee80211_has_a4(hdr.frame_control))
- memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
- else
- memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
-
- return 0;
-}
-
static int
mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
@@ -485,14 +230,16 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7921_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
+ u32 csum_status = *(u32 *)skb->cb;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
+ struct mt7921_sta *msta = NULL;
u16 seq_ctrl = 0;
__le16 fc = 0;
- u32 mode = 0;
+ u8 mode = 0;
int i, idx;
memset(status, 0, sizeof(*status));
@@ -520,8 +267,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- struct mt7921_sta *msta;
-
msta = container_of(status->wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
@@ -546,7 +291,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask)
+ if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
@@ -636,9 +382,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u8 stbc, gi;
u32 v0, v1;
- bool cck;
+ int ret;
rxv = rxd;
rxd += 2;
@@ -666,82 +411,10 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- if (status->signal == -128)
- status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
- stbc = FIELD_GET(MT_PRXV_STBC, v0);
- gi = FIELD_GET(MT_PRXV_SGI, v0);
- cck = false;
-
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
-
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
+ rxv, &mode);
+ if (ret < 0)
+ return ret;
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
rxd += 18;
@@ -759,8 +432,18 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
- if (mt7921_reverse_frag0_hdr_trans(skb, hdr_gap))
+ struct ieee80211_vif *vif;
+ int err;
+
+ if (!msta || !msta->vif)
return -EINVAL;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
+ if (err)
+ return err;
+
hdr_trans = false;
} else {
skb_pull(skb, hdr_gap);
@@ -793,7 +476,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_assoc_rssi(dev, skb);
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt7921_mac_decode_he_radiotap(skb, rxv, mode);
+ mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -805,218 +488,16 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return 0;
}
-static void
-mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid)
-{
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- u8 fc_type, fc_stype;
- bool wmm = false;
- u32 val;
-
- if (wcid->sta) {
- struct ieee80211_sta *sta;
-
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
- wmm = sta->wme;
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
- FIELD_PREP(MT_TXD1_TID, tid);
-
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
- val |= MT_TXD1_ETH_802_3;
-
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = IEEE80211_FTYPE_DATA >> 2;
- fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
-
- txwi[2] |= cpu_to_le32(val);
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-static void
-mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- bool multicast = is_multicast_ether_addr(hdr->addr1);
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
- u8 fc_type, fc_stype;
- u32 val;
-
- if (ieee80211_is_action(fc) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
- u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-
- txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
- tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
- } else if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
- u16 control = le16_to_cpu(bar->control);
-
- tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
- }
-
- val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
- FIELD_PREP(MT_TXD1_HDR_INFO,
- ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID, tid);
- txwi[1] |= cpu_to_le32(val);
-
- fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
- fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
-
- val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
- FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD2_MULTICAST, multicast);
-
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- val |= MT_TXD2_BIP;
- txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
- }
-
- if (!ieee80211_is_data(fc) || multicast ||
- info->flags & IEEE80211_TX_CTL_USE_MINRATE)
- val |= MT_TXD2_FIX_RATE;
-
- txwi[2] |= cpu_to_le32(val);
-
- if (ieee80211_is_beacon(fc)) {
- txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
- txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
- }
-
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
-
- if (ieee80211_is_back_req(hdr->frame_control)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = le16_to_cpu(bar->start_seq_num);
- }
-
- val = MT_TXD3_SN_VALID |
- FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
- txwi[3] |= cpu_to_le32(val);
- }
-
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
-}
-
-void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, int pid,
- bool beacon)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_phy *mphy = &dev->mphy;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
- bool is_mmio = mt76_is_mmio(&dev->mt76);
- u32 sz_txd = is_mmio ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
- bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- u16 tx_count = 15;
- u32 val;
-
- if (vif) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
- }
-
- if (beacon) {
- p_fmt = MT_TX_TYPE_FW;
- q_idx = MT_LMAC_BCN0;
- } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
- p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = MT_LMAC_ALTX0;
- } else {
- p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
- q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
- mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
- }
-
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
- FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
- FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
- txwi[0] = cpu_to_le32(val);
-
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
- FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
-
- txwi[1] = cpu_to_le32(val);
- txwi[2] = 0;
-
- val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (key)
- val |= MT_TXD3_PROTECT_FRAME;
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- val |= MT_TXD3_NO_ACK;
-
- txwi[3] = cpu_to_le32(val);
- txwi[4] = 0;
-
- val = FIELD_PREP(MT_TXD5_PID, pid);
- if (pid >= MT_PACKET_ID_FIRST)
- val |= MT_TXD5_TX_STATUS_HOST;
- txwi[5] = cpu_to_le32(val);
-
- txwi[6] = 0;
- txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
-
- if (is_8023)
- mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
- else
- mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
-
- if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- int rateidx = vif ? ffs(vif->bss_conf.basic_rates) - 1 : 0;
- u16 rate, mode;
-
- /* hardware won't add HTC for mgmt/ctrl frame */
- txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
-
- rate = mt76_calculate_default_rate(mphy, rateidx);
- mode = rate >> 8;
- rate &= GENMASK(7, 0);
- rate |= FIELD_PREP(MT_TX_RATE_MODE, mode);
-
- val = MT_TXD6_FIXED_BW |
- FIELD_PREP(MT_TXD6_TX_RATE, rate);
- txwi[6] |= cpu_to_le32(val);
- txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
- }
-}
-EXPORT_SYMBOL_GPL(mt7921_mac_write_txwi);
-
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
u16 fc, tid;
u32 val;
- if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1030,125 +511,6 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
-EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
-
-static bool
-mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data)
-{
- struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
- struct mt76_sta_stats *stats = &msta->stats;
- struct ieee80211_supported_band *sband;
- struct mt76_dev *mdev = &dev->mt76;
- struct ieee80211_tx_info *info;
- struct rate_info rate = {};
- struct sk_buff_head list;
- u32 txrate, txs, mode;
- struct sk_buff *skb;
- bool cck = false;
-
- mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (!skb)
- goto out;
-
- info = IEEE80211_SKB_CB(skb);
- txs = le32_to_cpu(txs_data[0]);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
-
- if (!wcid->sta)
- goto out;
-
- txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
-
- rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
- rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
-
- if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
- stats->tx_nss[rate.nss - 1]++;
- if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
- stats->tx_mcs[rate.mcs]++;
-
- mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mphy.sband_5g.sband;
- else
- sband = &dev->mphy.sband_2g.sband;
-
- rate.mcs = mt76_get_rate(dev->mphy.dev, sband, rate.mcs, cck);
- rate.legacy = sband->bitrates[rate.mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
- if (rate.mcs > 31)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_MCS;
- if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
- rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- if (rate.mcs > 9)
- goto out;
-
- rate.flags = RATE_INFO_FLAGS_VHT_MCS;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- if (rate.mcs > 11)
- goto out;
-
- rate.he_gi = wcid->rate.he_gi;
- rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
- rate.flags = RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- goto out;
- }
- stats->tx_mode[mode]++;
-
- switch (FIELD_GET(MT_TXS0_BW, txs)) {
- case IEEE80211_STA_RX_BW_160:
- rate.bw = RATE_INFO_BW_160;
- stats->tx_bw[3]++;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate.bw = RATE_INFO_BW_80;
- stats->tx_bw[2]++;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate.bw = RATE_INFO_BW_40;
- stats->tx_bw[1]++;
- break;
- default:
- rate.bw = RATE_INFO_BW_20;
- stats->tx_bw[0]++;
- break;
- }
- wcid->rate = rate;
-
-out:
- if (skb)
- mt76_tx_status_skb_done(mdev, skb, &list);
- mt76_tx_status_unlock(mdev, &list);
-
- return !!skb;
-}
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
@@ -1156,18 +518,13 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
return;
@@ -1181,12 +538,12 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
if (!wcid)
goto out;
- mt7921_mac_add_txs_skb(dev, wcid, pid, txs_data);
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
- msta = container_of(wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
@@ -1196,6 +553,134 @@ out:
rcu_read_unlock();
}
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt76_connac_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ wcid_idx = wcid->idx;
+ } else {
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+EXPORT_SYMBOL_GPL(mt7921_txwi_free);
+
+static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ if (WARN_ON_ONCE((void *)&tx_info[count] > end))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(tx_info[i]);
+ u8 stat;
+
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7921_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
+ }
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+
+ rcu_read_lock();
+ mt7921_mac_sta_poll(dev);
+ rcu_read_unlock();
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, data, len); /* mmio */
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7921_rx_check);
+
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -1205,13 +690,18 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
case PKT_TYPE_RX_EVENT:
mt7921_mcu_rx_event(dev, skb);
break;
@@ -1356,12 +846,21 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mvif->phy->dev;
+ struct ieee80211_hw *hw = mt76_hw(dev);
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
mt7921_mcu_set_tx(dev, vif);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ true);
+ mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
+ }
}
/* system error recovery */
@@ -1371,9 +870,9 @@ void mt7921_mac_reset_work(struct work_struct *work)
reset_work);
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt76_connac_pm *pm = &dev->pm;
- int i;
+ int i, ret;
- dev_err(dev->mt76.dev, "chip reset\n");
+ dev_dbg(dev->mt76.dev, "chip reset\n");
dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
@@ -1381,11 +880,14 @@ void mt7921_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
- mutex_lock(&dev->mt76.mutex);
- for (i = 0; i < 10; i++)
- if (!mt7921_dev_reset(dev))
+ for (i = 0; i < 10; i++) {
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7921_dev_reset(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (!ret)
break;
- mutex_unlock(&dev->mt76.mutex);
+ }
if (i == 10)
dev_err(dev->mt76.dev, "chip reset failed\n");
@@ -1410,6 +912,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
void mt7921_reset(struct mt76_dev *mdev)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
if (!dev->hw_init_done)
return;
@@ -1417,8 +920,12 @@ void mt7921_reset(struct mt76_dev *mdev)
if (dev->hw_full_reset)
return;
+ if (pm->suspended)
+ return;
+
queue_work(dev->mt76.wq, &dev->reset_work);
}
+EXPORT_SYMBOL_GPL(mt7921_reset);
void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
{
@@ -1522,10 +1029,12 @@ void mt7921_pm_wake_work(struct work_struct *work)
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i)
napi_schedule(&mdev->napi[i]);
+ local_bh_enable();
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- mt7921_mcu_tx_cleanup(dev);
+ mt76_connac_tx_cleanup(mdev);
}
if (test_bit(MT76_STATE_RUNNING, &mphy->state))
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
@@ -1548,7 +1057,16 @@ void mt7921_pm_power_save_work(struct work_struct *work)
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
+ dev->fw_assert)
+ goto out;
+
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1592,7 +1110,7 @@ void mt7921_coredump_work(struct work_struct *work)
if (!skb)
break;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
dev_kfree_skb(skb);
continue;
@@ -1610,3 +1128,120 @@ void mt7921_coredump_work(struct work_struct *work)
mt7921_reset(&dev->mt76);
}
+
+/* usb_sdio */
+static void
+mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
+ struct sk_buff *skb)
+{
+ __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
+
+ memset(txwi, 0, MT_SDIO_TXD_SIZE);
+ mt76_connac2_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
+ skb_push(skb, MT_SDIO_TXD_SIZE);
+}
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct sk_buff *skb = tx_info->skb;
+ int err, pad, pktid, type;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->last_txs = jiffies;
+ }
+ }
+
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
+
+ type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
+ pad = round_up(skb->len, 4) - skb->len;
+ if (mt76_is_usb(mdev))
+ pad += 4;
+
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
+
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
+ unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(mdev->wcid[idx]);
+ sta = wcid_to_sta(wcid);
+
+ if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ skb_pull(e->skb, headroom);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
+
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mac_sta_poll(dev);
+ mt7921_mutex_release(dev);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
+
+#if IS_ENABLED(CONFIG_IPV6)
+void mt7921_set_ipv6_ns_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ ipv6_ns_work);
+ struct sk_buff *skb;
+ int ret = 0;
+
+ do {
+ skb = skb_dequeue(&dev->ipv6_ns_list);
+
+ if (!skb)
+ break;
+
+ mt7921_mutex_acquire(dev);
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(OFFLOAD), true);
+ mt7921_mutex_release(dev);
+
+ } while (!ret);
+
+ if (ret)
+ skb_queue_purge(&dev->ipv6_ns_list);
+}
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 544a1c33126a..8afec600364f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -4,6 +4,8 @@
#ifndef __MT7921_MAC_H
#define __MT7921_MAC_H
+#include "../mt76_connac2_mac.h"
+
#define MT_CT_PARSE_LEN 72
#define MT_CT_DMA_BUF_NUM 2
@@ -27,290 +29,6 @@ enum rx_pkt_type {
PKT_TYPE_NORMAL_MCU,
};
-/* RXD DW1 */
-#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
-#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
-#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
-#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
-#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
-#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
-#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
-#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
-#define MT_RXD1_NORMAL_CM BIT(23)
-#define MT_RXD1_NORMAL_CLM BIT(24)
-#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
-#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
-#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
-#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
-#define MT_RXD1_NORMAL_SPP_EN BIT(29)
-#define MT_RXD1_NORMAL_ADD_OM BIT(30)
-#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
-
-/* RXD DW2 */
-#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
-#define MT_RXD2_NORMAL_CO_ANT BIT(6)
-#define MT_RXD2_NORMAL_BF_CQI BIT(7)
-#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
-#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
-#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
-#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
-#define MT_RXD2_NORMAL_MU_BAR BIT(21)
-#define MT_RXD2_NORMAL_SW_BIT BIT(22)
-#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
-#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
-#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
-#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
-#define MT_RXD2_NORMAL_FRAG BIT(27)
-#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
-#define MT_RXD2_NORMAL_NDATA BIT(29)
-#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
-#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
-
-/* RXD DW3 */
-#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
-#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
-#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
-#define MT_RXD3_NORMAL_U2M BIT(0)
-#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
-#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
-#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
-#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
-#define MT_RXD3_NORMAL_AMSDU BIT(22)
-#define MT_RXD3_NORMAL_MESH BIT(23)
-#define MT_RXD3_NORMAL_MHCP BIT(24)
-#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
-#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
-#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
-#define MT_RXD3_NORMAL_MORE BIT(28)
-#define MT_RXD3_NORMAL_UNWANT BIT(29)
-#define MT_RXD3_NORMAL_RX_DROP BIT(30)
-#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
-
-/* RXD DW4 */
-#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
-#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0)
-#define MT_RXD4_MID_AMSDU_FRAME BIT(1)
-#define MT_RXD4_LAST_AMSDU_FRAME BIT(0)
-#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
-#define MT_RXD4_NORMAL_CLS BIT(10)
-#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
-#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
-#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
-#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
-#define MT_RXD3_NORMAL_PF_MODE BIT(29)
-#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
-
-/* RXD GROUP4 */
-#define MT_RXD6_FRAME_CONTROL GENMASK(15, 0)
-#define MT_RXD6_TA_LO GENMASK(31, 16)
-
-#define MT_RXD7_TA_HI GENMASK(31, 0)
-
-#define MT_RXD8_SEQ_CTRL GENMASK(15, 0)
-#define MT_RXD8_QOS_CTL GENMASK(31, 16)
-
-#define MT_RXD9_HT_CONTROL GENMASK(31, 0)
-
-/* P-RXV DW0 */
-#define MT_PRXV_TX_RATE GENMASK(6, 0)
-#define MT_PRXV_TX_DCM BIT(4)
-#define MT_PRXV_TX_ER_SU_106T BIT(5)
-#define MT_PRXV_NSTS GENMASK(9, 7)
-#define MT_PRXV_TXBF BIT(10)
-#define MT_PRXV_HT_AD_CODE BIT(11)
-#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
-#define MT_PRXV_SGI GENMASK(16, 15)
-#define MT_PRXV_STBC GENMASK(23, 22)
-#define MT_PRXV_TX_MODE GENMASK(27, 24)
-#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
-
-/* P-RXV DW1 */
-#define MT_PRXV_RCPI3 GENMASK(31, 24)
-#define MT_PRXV_RCPI2 GENMASK(23, 16)
-#define MT_PRXV_RCPI1 GENMASK(15, 8)
-#define MT_PRXV_RCPI0 GENMASK(7, 0)
-#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
-
-/* C-RXV */
-#define MT_CRXV_HT_STBC GENMASK(1, 0)
-#define MT_CRXV_TX_MODE GENMASK(7, 4)
-#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
-#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
-#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
-#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
-#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
-#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
-#define MT_CRXV_HE_UPLINK BIT(31)
-
-#define MT_CRXV_HE_RU0 GENMASK(7, 0)
-#define MT_CRXV_HE_RU1 GENMASK(15, 8)
-#define MT_CRXV_HE_RU2 GENMASK(23, 16)
-#define MT_CRXV_HE_RU3 GENMASK(31, 24)
-#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
-
-#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
-#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
-#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
-#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
-
-#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
-#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
-#define MT_CRXV_HE_BEAM_CHNG BIT(13)
-#define MT_CRXV_HE_DOPPLER BIT(16)
-
-#define MT_CRXV_SNR GENMASK(18, 13)
-#define MT_CRXV_FOE_LO GENMASK(31, 19)
-#define MT_CRXV_FOE_HI GENMASK(6, 0)
-#define MT_CRXV_FOE_SHIFT 13
-
-enum tx_header_format {
- MT_HDR_FORMAT_802_3,
- MT_HDR_FORMAT_CMD,
- MT_HDR_FORMAT_802_11,
- MT_HDR_FORMAT_802_11_EXT,
-};
-
-enum tx_pkt_type {
- MT_TX_TYPE_CT,
- MT_TX_TYPE_SF,
- MT_TX_TYPE_CMD,
- MT_TX_TYPE_FW,
-};
-
-enum tx_port_idx {
- MT_TX_PORT_IDX_LMAC,
- MT_TX_PORT_IDX_MCU
-};
-
-enum tx_mcu_port_q_idx {
- MT_TX_MCU_PORT_RX_Q0 = 0x20,
- MT_TX_MCU_PORT_RX_Q1,
- MT_TX_MCU_PORT_RX_Q2,
- MT_TX_MCU_PORT_RX_Q3,
- MT_TX_MCU_PORT_RX_FWDL = 0x3e
-};
-
-#define MT_CT_INFO_APPLY_TXD BIT(0)
-#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
-#define MT_CT_INFO_MGMT_FRAME BIT(2)
-#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
-#define MT_CT_INFO_HSR2_TX BIT(4)
-#define MT_CT_INFO_FROM_HOST BIT(7)
-
-#define MT_TXD_SIZE (8 * 4)
-
-#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
-#define MT_SDIO_TAIL_SIZE 8
-#define MT_SDIO_HDR_SIZE 4
-
-#define MT_TXD0_Q_IDX GENMASK(31, 25)
-#define MT_TXD0_PKT_FMT GENMASK(24, 23)
-#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
-#define MT_TXD0_TX_BYTES GENMASK(15, 0)
-
-#define MT_TXD1_LONG_FORMAT BIT(31)
-#define MT_TXD1_TGID BIT(30)
-#define MT_TXD1_OWN_MAC GENMASK(29, 24)
-#define MT_TXD1_AMSDU BIT(23)
-#define MT_TXD1_TID GENMASK(22, 20)
-#define MT_TXD1_HDR_PAD GENMASK(19, 18)
-#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
-#define MT_TXD1_HDR_INFO GENMASK(15, 11)
-#define MT_TXD1_ETH_802_3 BIT(15)
-#define MT_TXD1_VTA BIT(10)
-#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
-
-#define MT_TXD2_FIX_RATE BIT(31)
-#define MT_TXD2_FIXED_RATE BIT(30)
-#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
-#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
-#define MT_TXD2_FRAG GENMASK(15, 14)
-#define MT_TXD2_HTC_VLD BIT(13)
-#define MT_TXD2_DURATION BIT(12)
-#define MT_TXD2_BIP BIT(11)
-#define MT_TXD2_MULTICAST BIT(10)
-#define MT_TXD2_RTS BIT(9)
-#define MT_TXD2_SOUNDING BIT(8)
-#define MT_TXD2_NDPA BIT(7)
-#define MT_TXD2_NDP BIT(6)
-#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
-#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
-
-#define MT_TXD3_SN_VALID BIT(31)
-#define MT_TXD3_PN_VALID BIT(30)
-#define MT_TXD3_SW_POWER_MGMT BIT(29)
-#define MT_TXD3_BA_DISABLE BIT(28)
-#define MT_TXD3_SEQ GENMASK(27, 16)
-#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
-#define MT_TXD3_TX_COUNT GENMASK(10, 6)
-#define MT_TXD3_TIMING_MEASURE BIT(5)
-#define MT_TXD3_DAS BIT(4)
-#define MT_TXD3_EEOSP BIT(3)
-#define MT_TXD3_EMRD BIT(2)
-#define MT_TXD3_PROTECT_FRAME BIT(1)
-#define MT_TXD3_NO_ACK BIT(0)
-
-#define MT_TXD4_PN_LOW GENMASK(31, 0)
-
-#define MT_TXD5_PN_HIGH GENMASK(31, 16)
-#define MT_TXD5_MD BIT(15)
-#define MT_TXD5_ADD_BA BIT(14)
-#define MT_TXD5_TX_STATUS_HOST BIT(10)
-#define MT_TXD5_TX_STATUS_MCU BIT(9)
-#define MT_TXD5_TX_STATUS_FMT BIT(8)
-#define MT_TXD5_PID GENMASK(7, 0)
-
-#define MT_TXD6_TX_IBF BIT(31)
-#define MT_TXD6_TX_EBF BIT(30)
-#define MT_TXD6_TX_RATE GENMASK(29, 16)
-#define MT_TXD6_SGI GENMASK(15, 14)
-#define MT_TXD6_HELTF GENMASK(13, 12)
-#define MT_TXD6_LDPC BIT(11)
-#define MT_TXD6_SPE_ID_IDX BIT(10)
-#define MT_TXD6_ANT_ID GENMASK(7, 4)
-#define MT_TXD6_DYN_BW BIT(3)
-#define MT_TXD6_FIXED_BW BIT(2)
-#define MT_TXD6_BW GENMASK(1, 0)
-
-#define MT_TXD7_TXD_LEN GENMASK(31, 30)
-#define MT_TXD7_UDP_TCP_SUM BIT(29)
-#define MT_TXD7_IP_SUM BIT(28)
-
-#define MT_TXD7_TYPE GENMASK(21, 20)
-#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
-
-#define MT_TXD7_PSE_FID GENMASK(27, 16)
-#define MT_TXD7_SPE_IDX GENMASK(15, 11)
-#define MT_TXD7_HW_AMSDU BIT(10)
-#define MT_TXD7_TX_TIME GENMASK(9, 0)
-
-#define MT_TX_RATE_STBC BIT(13)
-#define MT_TX_RATE_NSS GENMASK(12, 10)
-#define MT_TX_RATE_MODE GENMASK(9, 6)
-#define MT_TX_RATE_SU_EXT_TONE BIT(5)
-#define MT_TX_RATE_DCM BIT(4)
-#define MT_TX_RATE_IDX GENMASK(3, 0)
-
-#define MT_TXP_MAX_BUF_NUM 6
-
-struct mt7921_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
-struct mt7921_tx_free {
- __le16 rx_byte_cnt;
- __le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
- __le32 info[];
-} __packed __aligned(4);
-
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
@@ -321,56 +39,6 @@ struct mt7921_tx_free {
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
-#define MT_TXS0_BW GENMASK(30, 29)
-#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
-#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
-#define MT_TXS0_TX_RATE GENMASK(13, 0)
-
-#define MT_TXS2_WCID GENMASK(25, 16)
-
-#define MT_TXS3_PID GENMASK(31, 24)
-
-static inline struct mt7921_txp_common *
-mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
-}
-
-#define MT_HW_TXP_MAX_MSDU_NUM 4
-#define MT_HW_TXP_MAX_BUF_NUM 4
-
-#define MT_MSDU_ID_VALID BIT(15)
-
-#define MT_TXD_LEN_MASK GENMASK(11, 0)
-#define MT_TXD_LEN_MSDU_LAST BIT(14)
-#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-#define MT_TXD_LEN_LAST BIT(15)
-
-struct mt7921_txp_ptr {
- __le32 buf0;
- __le16 len0;
- __le16 len1;
- __le32 buf1;
-} __packed __aligned(4);
-
-struct mt7921_hw_txp {
- __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
- struct mt7921_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
-} __packed __aligned(4);
-
-struct mt7921_txp_common {
- union {
- struct mt7921_hw_txp hw;
- };
-};
-
#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7
#define MT_WTBL_TXRX_RATE_G2_HE 24
#define MT_WTBL_TXRX_RATE_G2 12
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7a8d2596c226..7e409ac7d9a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -5,6 +5,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <net/ipv6.h>
#include "mt7921.h"
#include "mcu.h"
@@ -12,7 +13,7 @@ static void
mt7921_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
- u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+ static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -53,6 +54,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
break;
default:
continue;
@@ -86,6 +88,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@@ -238,7 +257,7 @@ int __mt7921_start(struct mt7921_phy *phy)
if (err)
return err;
- err = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ err = mt7921_set_tx_sar_pwr(mphy->hw, NULL);
if (err)
return err;
@@ -264,7 +283,7 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
-static void mt7921_stop(struct ieee80211_hw *hw)
+void mt7921_stop(struct ieee80211_hw *hw)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
@@ -273,6 +292,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
+ cancel_work_sync(&dev->reset_work);
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7921_mutex_acquire(dev);
@@ -280,6 +300,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
mt7921_mutex_release(dev);
}
+EXPORT_SYMBOL_GPL(mt7921_stop);
static int mt7921_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
@@ -292,7 +313,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mt7921_mutex_acquire(dev);
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -301,21 +322,21 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = mvif->mt76.idx;
mvif->phy = phy;
mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
+ mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7921_WTBL_RESERVED - mvif->mt76.idx;
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -328,9 +349,10 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
out:
mt7921_mutex_release(dev);
@@ -352,7 +374,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt7921_mutex_release(dev);
@@ -452,19 +474,77 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif,
+ &mvif->wep_sta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &mvif->wep_sta->wcid, cmd);
out:
mt7921_mutex_release(dev);
return err;
}
+static void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ bool pm_enable = dev->pm.enable;
+ int err;
+
+ err = mt7921_mcu_set_beacon_filter(dev, vif, pm_enable);
+ if (err < 0)
+ return;
+
+ if (pm_enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
+ }
+}
+
+static void
+mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ mt7921_mcu_set_sniffer(dev, vif, monitor);
+ pm->enable = pm->enable_user && !monitor;
+ pm->ds_enable = pm->ds_enable_user && !monitor;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
+ if (monitor)
+ mt7921_mcu_set_beacon_filter(dev, vif, false);
+}
+
+void mt7921_set_runtime_pm(struct mt7921_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ pm->enable = pm->enable_user && !monitor;
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_pm_interface_iter, dev);
+ pm->ds_enable = pm->ds_enable_user && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+}
+
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -482,22 +562,16 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt7921_mutex_acquire(dev);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ret = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ ret = mt7921_set_tx_sar_pwr(hw, NULL);
if (ret)
goto out;
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
- if (!enabled)
- phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
- else
- phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
- mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
- enabled);
- mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_sniffer_interface_iter, dev);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
}
out:
@@ -507,14 +581,14 @@ out:
}
static int
-mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7921_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
@@ -526,7 +600,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -536,23 +609,23 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- phy->rxfilter &= ~(_hw); \
- phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mt7921_mutex_acquire(dev);
- phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
+ dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@@ -566,7 +639,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
if (*total_flags & FIF_CONTROL)
mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
@@ -579,7 +652,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -595,6 +668,11 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif,
+ info->enable_beacon);
+
/* ensure that enable txcmd_mode after bss_info */
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7921_mcu_set_tx(dev, vif);
@@ -605,8 +683,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
- if (dev->pm.enable)
- mt7921_mcu_set_beacon_filter(dev, vif, info->assoc);
+ mt7921_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -635,7 +712,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.phy_idx = mvif->mt76.band_idx;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->last_txs = jiffies;
@@ -675,6 +752,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
@@ -968,7 +1046,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
@@ -1105,7 +1183,7 @@ void mt7921_scan_work(struct work_struct *work)
scan_work.work);
while (true) {
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
struct sk_buff *skb;
spin_lock_bh(&phy->dev->mt76.lock);
@@ -1115,7 +1193,7 @@ void mt7921_scan_work(struct work_struct *work)
if (!skb)
break;
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
ieee80211_sched_scan_results(phy->mt76->hw);
} else if (test_and_clear_bit(MT76_HW_SCANNING,
@@ -1261,7 +1339,7 @@ static int mt7921_suspend(struct ieee80211_hw *hw,
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76_connac_mcu_set_suspend_iter,
+ mt7921_mcu_set_suspend_iter,
&dev->mphy);
mt7921_mutex_release(dev);
@@ -1327,6 +1405,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ mt7921_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
@@ -1334,27 +1414,163 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
+
+ mt7921_mutex_release(dev);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mvif->phy->dev;
+ struct inet6_ifaddr *ifa;
+ struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ struct sk_buff *skb;
+ u8 i, idx = 0;
+
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arpns;
+ } req_hdr = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
+ .mode = 2, /* update */
+ .option = 1, /* update only */
+ },
+ };
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (ifa->flags & IFA_F_TENTATIVE)
+ continue;
+ ns_addrs[idx] = ifa->addr;
+ if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ if (!idx)
+ return;
+
+ req_hdr.arpns.ips_num = idx;
+ req_hdr.arpns.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)
+ + idx * sizeof(struct in6_addr));
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, &req_hdr,
+ sizeof(req_hdr) + idx * sizeof(struct in6_addr),
+ sizeof(req_hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ for (i = 0; i < idx; i++)
+ skb_put_data(skb, &ns_addrs[i].in6_u, sizeof(struct in6_addr));
+
+ skb_queue_tail(&dev->ipv6_ns_list, skb);
+
+ ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work);
+}
+#endif
+
+int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ if (sar) {
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+ }
+
+ mt7921_init_acpi_sar_power(mt7921_hw_phy(hw), !sar);
+
+ err = mt76_connac_mcu_set_rate_txpower(mphy);
+
+ return err;
}
static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt76_phy *mphy = hw->priv;
int err;
mt7921_mutex_acquire(dev);
- err = mt76_init_sar_power(hw, sar);
+ err = mt7921_set_tx_sar_pwr(hw, sar);
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
+static void
+mt7921_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
+ mt7921_mutex_release(dev);
+}
+
+static int
+mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ int err;
+
+ mt7921_mutex_acquire(dev);
+
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+ true);
if (err)
goto out;
- err = mt76_connac_mcu_set_rate_txpower(mphy);
+ err = mt7921_mcu_set_bss_pm(dev, vif, true);
+ if (err)
+ goto out;
+
+ err = mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
out:
mt7921_mutex_release(dev);
return err;
}
+static void
+mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ int err;
+
+ mt7921_mutex_acquire(dev);
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, false);
+ if (err)
+ goto out;
+
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
+
+out:
+ mt7921_mutex_release(dev);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1365,14 +1581,20 @@ const struct ieee80211_ops mt7921_ops = {
.conf_tx = mt7921_conf_tx,
.configure_filter = mt7921_configure_filter,
.bss_info_changed = mt7921_bss_info_changed,
+ .start_ap = mt7921_start_ap,
+ .stop_ap = mt7921_stop_ap,
.sta_state = mt7921_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7921_set_key,
.sta_set_decap_offload = mt7921_sta_set_decap_offload,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = mt7921_ipv6_addr_change,
+#endif /* CONFIG_IPV6 */
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
+ .channel_switch_beacon = mt7921_channel_switch_beacon,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,
.get_et_sset_count = mt7921_get_et_sset_count,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index ef1e1ef91611..67bf92969a7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1,143 +1,20 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
-#include <linux/firmware.h>
#include <linux/fs.h>
+#include <linux/firmware.h>
#include "mt7921.h"
#include "mt7921_trace.h"
+#include "eeprom.h"
#include "mcu.h"
#include "mac.h"
-struct mt7921_patch_hdr {
- char build_date[16];
- char platform[4];
- __be32 hw_sw_ver;
- __be32 patch_ver;
- __be16 checksum;
- u16 reserved;
- struct {
- __be32 patch_ver;
- __be32 subsys;
- __be32 feature;
- __be32 n_region;
- __be32 crc;
- u32 reserved[11];
- } desc;
-} __packed;
-
-struct mt7921_patch_sec {
- __be32 type;
- __be32 offs;
- __be32 size;
- union {
- __be32 spec[13];
- struct {
- __be32 addr;
- __be32 len;
- __be32 sec_key_idx;
- __be32 align_len;
- u32 reserved[9];
- } info;
- };
-} __packed;
-
-struct mt7921_fw_trailer {
- u8 chip_id;
- u8 eco_code;
- u8 n_region;
- u8 format_ver;
- u8 format_flag;
- u8 reserved[2];
- char fw_ver[10];
- char build_date[15];
- u32 crc;
-} __packed;
-
-struct mt7921_fw_region {
- __le32 decomp_crc;
- __le32 decomp_len;
- __le32 decomp_blk_sz;
- u8 reserved[4];
- __le32 addr;
- __le32 len;
- u8 feature_set;
- u8 reserved1[15];
-} __packed;
-
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_ENCRY_MODE BIT(4)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
-#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
-#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
-#define PATCH_SEC_ENC_TYPE_AES 0x01
-#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
-#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
-#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-
-static enum mcu_cipher_type
-mt7921_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
+static bool mt7921_disable_clc;
+module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
+MODULE_PARM_DESC(disable_clc, "disable CLC support");
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
@@ -148,7 +25,7 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
if (!skb)
return -EINVAL;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
res = (struct mt7921_mcu_eeprom_info *)skb->data;
buf = dev->eeprom.data + le32_to_cpu(res->addr);
@@ -161,7 +38,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
@@ -172,11 +49,12 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return -ETIMEDOUT;
}
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (seq != rxd->seq)
return -EAGAIN;
- if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL) ||
+ cmd == MCU_CMD(PATCH_FINISH_REQ)) {
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
@@ -205,89 +83,76 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
event = (struct mt7921_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
} else {
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
}
return ret;
}
EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
-int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, int *wait_seq)
+static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
- struct mt7921_uni_txd *uni_txd;
- struct mt7921_mcu_txd *mcu_txd;
- __le32 *txd;
- u32 val;
- u8 seq;
-
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ struct mt7921_mcu_eeprom_info *res, req = {
+ .addr = cpu_to_le32(round_down(offset,
+ MT7921_EEPROM_BLOCK_SIZE)),
+ };
+ struct sk_buff *skb;
+ int ret;
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
- if (cmd == MCU_CMD(FW_SCATTER))
- goto exit;
+ res = (struct mt7921_mcu_eeprom_info *)skb->data;
+ *val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
+ dev_kfree_skb(skb);
- txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
- txd = (__le32 *)skb_push(skb, txd_len);
+ return 0;
+}
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) |
- FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0);
- txd[0] = cpu_to_le32(val);
+#ifdef CONFIG_PM
- val = MT_TXD1_LONG_FORMAT |
- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
- txd[1] = cpu_to_le32(val);
+static int
+mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
+ struct ieee80211_vif *vif, bool suspend)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arpns;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
+ .mode = suspend,
+ },
+ };
- if (cmd & __MCU_CMD_FIELD_UNI) {
- uni_txd = (struct mt7921_uni_txd *)txd;
- uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
- uni_txd->cid = cpu_to_le16(mcu_cmd);
- uni_txd->s2d_index = MCU_S2D_H2N;
- uni_txd->pkt_type = MCU_PKT_ID;
- uni_txd->seq = seq;
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
+ true);
+}
- goto exit;
- }
+void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct mt76_phy *phy = priv;
- mcu_txd = (struct mt7921_mcu_txd *)txd;
- mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
- mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
- MT_TX_MCU_PORT_RX_Q0));
- mcu_txd->pkt_type = MCU_PKT_ID;
- mcu_txd->seq = seq;
- mcu_txd->cid = mcu_cmd;
- mcu_txd->s2d_index = MCU_S2D_H2N;
- mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
-
- if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
- if (cmd & __MCU_CMD_FIELD_QUERY)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
- } else {
- mcu_txd->set_query = MCU_Q_NA;
+ mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
+ !test_bit(MT76_STATE_RUNNING,
+ &phy->state));
}
-exit:
- if (wait_seq)
- *wait_seq = seq;
-
- return 0;
+ mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
}
-EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message);
+
+#endif /* CONFIG_PM */
static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
@@ -313,7 +178,8 @@ mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
if (mvif->idx != event->bss_idx)
return;
- if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
+ vif->type != NL80211_IFTYPE_STATION)
return;
ieee80211_connection_loss(vif);
@@ -325,7 +191,7 @@ mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_connac_beacon_loss_event *event;
struct mt76_phy *mphy = &dev->mt76.phy;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt76_connac_beacon_loss_event *)skb->data;
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -339,7 +205,7 @@ mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_mcu_bss_event *event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt76_connac_mcu_bss_event *)skb->data;
if (event->is_absent)
ieee80211_stop_queues(mphy->hw);
@@ -359,7 +225,7 @@ mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
u8 content[512];
} __packed * msg;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
msg = (struct mt7921_debug_msg *)skb->data;
if (msg->type == 3) { /* fw log */
@@ -382,7 +248,7 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
u8 reserved[3];
} __packed * event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt7921_mcu_lp_event *)skb->data;
trace_lp_event(dev, event->state);
@@ -393,7 +259,7 @@ mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_tx_done_event *event;
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
event = (struct mt7921_mcu_tx_done_event *)skb->data;
mt7921_mac_add_txs(dev, event->txs);
@@ -402,8 +268,9 @@ mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+ struct mt76_connac2_mcu_rxd *rxd;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
switch (rxd->eid) {
case MCU_EVENT_BSS_BEACON_LOSS:
mt7921_mcu_connection_loss_event(dev, skb);
@@ -437,12 +304,12 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd;
+ struct mt76_connac2_mcu_rxd *rxd;
if (skb_linearize(skb))
return;
- rxd = (struct mt7921_mcu_rxd *)skb->data;
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
if (rxd->eid == 0x6) {
mt76_mcu_rx_event(&dev->mt76, skb);
@@ -465,95 +332,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
/** starec & wtbl **/
-static int
-mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7921_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7921_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
-}
-
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
@@ -564,6 +342,7 @@ int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
msta->wcid.amsdu = false;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@@ -574,272 +353,112 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
-int mt7921_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
-
-static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
+static char *mt7921_patch_name(struct mt7921_dev *dev)
{
- u32 mode = DL_MODE_NEED_RSP;
-
- if (info == PATCH_SEC_NOT_SUPPORT)
- return mode;
+ char *ret;
- switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
- case PATCH_SEC_ENC_TYPE_PLAIN:
- break;
- case PATCH_SEC_ENC_TYPE_AES:
- mode |= DL_MODE_ENCRYPT;
- mode |= FIELD_PREP(DL_MODE_KEY_IDX,
- (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
- mode |= DL_MODE_RESET_SEC_IV;
- break;
- case PATCH_SEC_ENC_TYPE_SCRAMBLE:
- mode |= DL_MODE_ENCRYPT;
- mode |= DL_CONFIG_ENCRY_MODE_SEL;
- mode |= DL_MODE_RESET_SEC_IV;
- break;
- default:
- dev_err(dev->mt76.dev, "Encryption type not support!\n");
- }
+ if (is_mt7922(&dev->mt76))
+ ret = MT7922_ROM_PATCH;
+ else
+ ret = MT7921_ROM_PATCH;
- return mode;
+ return ret;
}
-static char *mt7921_patch_name(struct mt7921_dev *dev)
+static char *mt7921_ram_name(struct mt7921_dev *dev)
{
char *ret;
if (is_mt7922(&dev->mt76))
- ret = MT7922_ROM_PATCH;
+ ret = MT7922_FIRMWARE_WM;
else
- ret = MT7921_ROM_PATCH;
+ ret = MT7921_FIRMWARE_WM;
return ret;
}
-static int mt7921_load_patch(struct mt7921_dev *dev)
+static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
{
- const struct mt7921_patch_hdr *hdr;
- const struct firmware *fw = NULL;
- int i, ret, sem, max_len;
-
- max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
+ const struct mt76_connac2_fw_trailer *hdr;
+ const struct mt76_connac2_fw_region *region;
+ const struct mt7921_clc *clc;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7921_phy *phy = &dev->phy;
+ const struct firmware *fw;
+ int ret, i, len, offset = 0;
+ u8 *clc_base = NULL, hw_encap = 0;
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
- switch (sem) {
- case PATCH_IS_DL:
+ if (mt7921_disable_clc ||
+ mt76_is_usb(&dev->mt76))
return 0;
- case PATCH_NOT_DL_SEM_SUCCESS:
- break;
- default:
- dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
- return -EAGAIN;
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
}
- ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name, mdev->dev);
if (ret)
- goto out;
+ return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
+ dev_err(mdev->dev, "Invalid firmware\n");
ret = -EINVAL;
goto out;
}
- hdr = (const struct mt7921_patch_hdr *)(fw->data);
-
- dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
-
- for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
- struct mt7921_patch_sec *sec;
- const u8 *dl;
- u32 len, addr, mode;
- u32 sec_info = 0;
-
- sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) +
- i * sizeof(*sec));
- if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
- PATCH_SEC_TYPE_INFO) {
- ret = -EINVAL;
- goto out;
- }
-
- addr = be32_to_cpu(sec->info.addr);
- len = be32_to_cpu(sec->info.len);
- dl = fw->data + be32_to_cpu(sec->offs);
- sec_info = be32_to_cpu(sec->info.sec_key_idx);
- mode = mt7921_get_data_mode(dev, sec_info);
-
- ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- mode);
- if (ret) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- goto out;
- }
-
- ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- dl, len, max_len);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to send patch\n");
- goto out;
- }
- }
-
- ret = mt76_connac_mcu_start_patch(&dev->mt76);
- if (ret)
- dev_err(dev->mt76.dev, "Failed to start patch\n");
-
- if (mt76_is_sdio(&dev->mt76)) {
- /* activate again */
- ret = __mt7921_mcu_fw_pmctrl(dev);
- if (ret)
- return ret;
-
- ret = __mt7921_mcu_drv_pmctrl(dev);
- if (ret)
- return ret;
- }
-
-out:
- sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
- switch (sem) {
- case PATCH_REL_SEM_SUCCESS:
- break;
- default:
- ret = -EAGAIN;
- dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- break;
- }
- release_firmware(fw);
-
- return ret;
-}
-
-static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
- DL_CONFIG_ENCRY_MODE_SEL : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
-static int
-mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
- const struct mt7921_fw_trailer *hdr,
- const u8 *data, bool is_wa)
-{
- int i, offset = 0, max_len;
- u32 override = 0, option = 0;
-
- max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
-
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
for (i = 0; i < hdr->n_region; i++) {
- const struct mt7921_fw_region *region;
- int err;
- u32 len, addr, mode;
-
- region = (const struct mt7921_fw_region *)((const u8 *)hdr -
- (hdr->n_region - i) * sizeof(*region));
- mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
len = le32_to_cpu(region->len);
- addr = le32_to_cpu(region->addr);
- if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
- override = addr;
-
- err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- mode);
- if (err) {
- dev_err(dev->mt76.dev, "Download request failed\n");
- return err;
+ /* check if we have valid buffer size */
+ if (offset + len > fw->size) {
+ dev_err(mdev->dev, "Invalid firmware region\n");
+ ret = -EINVAL;
+ goto out;
}
- err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- data + offset, len, max_len);
- if (err) {
- dev_err(dev->mt76.dev, "Failed to send firmware.\n");
- return err;
+ if ((region->feature_set & FW_FEATURE_NON_DL) &&
+ region->type == FW_TYPE_CLC) {
+ clc_base = (u8 *)(fw->data + offset);
+ break;
}
-
offset += len;
}
- if (override)
- option |= FW_START_OVERRIDE;
-
- if (is_wa)
- option |= FW_START_WORKING_PDA_CR4;
-
- return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
-}
-
-static char *mt7921_ram_name(struct mt7921_dev *dev)
-{
- char *ret;
-
- if (is_mt7922(&dev->mt76))
- ret = MT7922_FIRMWARE_WM;
- else
- ret = MT7921_FIRMWARE_WM;
-
- return ret;
-}
-
-static int mt7921_load_ram(struct mt7921_dev *dev)
-{
- const struct mt7921_fw_trailer *hdr;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- ret = -EINVAL;
+ if (!clc_base)
goto out;
- }
- hdr = (const struct mt7921_fw_trailer *)(fw->data + fw->size -
- sizeof(*hdr));
+ for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
+ clc = (const struct mt7921_clc *)(clc_base + offset);
- dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
- hdr->fw_ver, hdr->build_date);
+ /* do not init buf again if chip reset triggered */
+ if (phy->clc[clc->idx])
+ continue;
- ret = mt7921_mcu_send_ram_firmware(dev, hdr, fw->data, false);
- if (ret) {
- dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
- goto out;
- }
+ /* header content sanity */
+ if (clc->idx == MT7921_CLC_POWER &&
+ u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
- snprintf(dev->mt76.hw->wiphy->fw_version,
- sizeof(dev->mt76.hw->wiphy->fw_version),
- "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+ phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
+ le32_to_cpu(clc->len),
+ GFP_KERNEL);
+ if (!phy->clc[clc->idx]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
out:
release_firmware(fw);
@@ -856,11 +475,18 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
goto fw_loaded;
}
- ret = mt7921_load_patch(dev);
+ ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
if (ret)
return ret;
- ret = mt7921_load_ram(dev);
+ if (mt76_is_sdio(&dev->mt76)) {
+ /* activate again */
+ ret = __mt7921_mcu_fw_pmctrl(dev);
+ if (!ret)
+ ret = __mt7921_mcu_drv_pmctrl(dev);
+ }
+
+ ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL);
if (ret)
return ret;
@@ -908,45 +534,36 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ err = mt7921_load_clc(dev, mt7921_ram_name(dev));
+ if (err)
+ return err;
+
return mt7921_mcu_fw_log_2_host(dev, 1);
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
-void mt7921_mcu_exit(struct mt7921_dev *dev)
-{
- skb_queue_purge(&dev->mt76.mcu.res_q);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
-
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
-#define WMM_AIFS_SET BIT(0)
-#define WMM_CW_MIN_SET BIT(1)
-#define WMM_CW_MAX_SET BIT(2)
-#define WMM_TXOP_SET BIT(3)
-#define WMM_PARAM_SET GENMASK(3, 0)
-#define TX_CMD_MODE 1
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct edca {
- u8 queue;
- u8 set;
- u8 aifs;
- u8 cw_min;
+ __le16 cw_min;
__le16 cw_max;
__le16 txop;
- };
+ __le16 aifs;
+ u8 guardtime;
+ u8 acm;
+ } __packed;
struct mt7921_mcu_tx {
- u8 total;
- u8 action;
- u8 valid;
- u8 mode;
-
struct edca edca[IEEE80211_NUM_ACS];
+ u8 bss_idx;
+ u8 qos;
+ u8 wmm_idx;
+ u8 pad;
} __packed req = {
- .valid = true,
- .mode = TX_CMD_MODE,
- .total = IEEE80211_NUM_ACS,
+ .bss_idx = mvif->mt76.idx,
+ .qos = vif->bss_conf.qos,
+ .wmm_idx = mvif->mt76.wmm_idx,
};
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mu_edca {
u8 cw_min;
u8 cw_max;
@@ -970,30 +587,29 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
+ static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
- struct edca *e = &req.edca[ac];
+ struct edca *e = &req.edca[to_aci[ac]];
- e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
- e->aifs = q->aifs;
+ e->aifs = cpu_to_le16(q->aifs);
e->txop = cpu_to_le16(q->txop);
if (q->cw_min)
- e->cw_min = fls(q->cw_min);
+ e->cw_min = cpu_to_le16(q->cw_min);
else
- e->cw_min = 5;
+ e->cw_min = cpu_to_le16(5);
if (q->cw_max)
- e->cw_max = cpu_to_le16(fls(q->cw_max));
+ e->cw_max = cpu_to_le16(q->cw_max);
else
e->cw_max = cpu_to_le16(10);
}
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
- &req, sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
+ sizeof(req), false);
if (ret)
return ret;
@@ -1003,7 +619,6 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- int to_aci[] = {1, 0, 2, 3};
if (!mvif->queue_params[ac].mu_edca)
break;
@@ -1046,7 +661,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7921_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
@@ -1057,10 +672,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
else
req.channel_band = chandef->chan->band;
- if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -1093,30 +711,6 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
-{
- struct mt7921_mcu_eeprom_info req = {
- .addr = cpu_to_le32(round_down(offset, 16)),
- };
- struct mt7921_mcu_eeprom_info *res;
- struct sk_buff *skb;
- int ret;
- u8 *buf;
-
- ret = mt76_mcu_send_and_get_msg(&dev->mt76,
- MCU_EXT_QUERY(EFUSE_ACCESS),
- &req, sizeof(req), true, &skb);
- if (ret)
- return ret;
-
- res = (struct mt7921_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
- dev_kfree_skb(skb);
-
- return 0;
-}
-
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1143,7 +737,7 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
.len = cpu_to_le16(sizeof(struct ps_tlv)),
- .ps_state = vif->bss_conf.ps ? 2 : 0,
+ .ps_state = vif->cfg.ps ? 2 : 0,
},
};
@@ -1190,7 +784,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
&bcnft_req, sizeof(bcnft_req), true);
}
-static int
+int
mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable)
{
@@ -1207,7 +801,7 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
u8 pad;
} req = {
.bss_idx = mvif->mt76.idx,
- .aid = cpu_to_le16(vif->bss_conf.aid),
+ .aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
};
@@ -1219,9 +813,6 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
};
int err;
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
-
err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
&req_hdr, sizeof(req_hdr), false);
if (err < 0 || !enable)
@@ -1303,7 +894,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
struct ieee80211_vif *vif,
bool enable)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
int err;
if (enable) {
@@ -1311,8 +901,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
- ieee80211_hw_set(hw, CONNECTION_MONITOR);
mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
return 0;
@@ -1322,8 +910,6 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
- __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
return 0;
@@ -1351,3 +937,194 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
return 0;
}
+
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 band_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct sniffer_enable_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 pad[3];
+ } __packed enable;
+ } req = {
+ .hdr = {
+ .band_idx = mvif->band_idx,
+ },
+ .enable = {
+ .tag = cpu_to_le16(0),
+ .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
+ .enable = enable,
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
+ true);
+}
+
+int
+mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ /* 0: disable beacon offload
+ * 1: enable beacon offload
+ * 2: update probe respond offload
+ */
+ u8 enable;
+ /* 0: legacy format (TXD + payload)
+ * 1: only cap field IE
+ */
+ u8 type;
+ __le16 pkt_len;
+ u8 pkt[512];
+ } __packed beacon_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .beacon_tlv = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
+ .len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
+ .enable = enable,
+ },
+ };
+ struct sk_buff *skb;
+
+ /* support enable/update process only
+ * disable flow would be handled in bss stop handler automatically
+ */
+ if (!enable)
+ return -EOPNOTSUPP;
+
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "beacon size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt),
+ skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON);
+ memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+
+ if (offs.cntdwn_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
+ req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
+ }
+ dev_kfree_skb(skb);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &req, sizeof(req), true);
+}
+
+static
+int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap,
+ struct mt7921_clc *clc,
+ u8 idx)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 ver;
+ u8 pad0;
+ __le16 len;
+ u8 idx;
+ u8 env;
+ u8 pad1[2];
+ u8 alpha2[2];
+ u8 type[2];
+ u8 rsvd[64];
+ } __packed req = {
+ .idx = idx,
+ .env = env_cap,
+ };
+ int ret, valid_cnt = 0;
+ u8 i, *pos;
+
+ if (!clc)
+ return 0;
+
+ pos = clc->data;
+ for (i = 0; i < clc->nr_country; i++) {
+ struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
+ u16 len = le16_to_cpu(rule->len);
+
+ pos += len + sizeof(*rule);
+ if (rule->alpha2[0] != alpha2[0] ||
+ rule->alpha2[1] != alpha2[1])
+ continue;
+
+ memcpy(req.alpha2, rule->alpha2, 2);
+ memcpy(req.type, rule->type, 2);
+
+ req.len = cpu_to_le16(sizeof(req) + len);
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
+ le16_to_cpu(req.len),
+ sizeof(req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_put_data(skb, rule->data, len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CE_CMD(SET_CLC), false);
+ if (ret < 0)
+ return ret;
+ valid_cnt++;
+ }
+
+ if (!valid_cnt)
+ return -ENOENT;
+
+ return 0;
+}
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap)
+{
+ struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
+ int i, ret;
+
+ /* submit all clc config */
+ for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
+ ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
+ phy->clc[i], i);
+
+ /* If no country found, set "00" as default */
+ if (ret == -ENOENT)
+ ret = __mt7921_mcu_set_clc(dev, "00",
+ ENVIRON_INDOOR,
+ phy->clc[i], i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 77cc0cc5b436..96dc870fd35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -6,76 +6,6 @@
#include "../mt76_connac_mcu.h"
-struct mt7921_mcu_txd {
- __le32 txd[8];
-
- __le16 len;
- __le16 pq_id;
-
- u8 cid;
- u8 pkt_type;
- u8 set_query; /* FW don't care */
- u8 seq;
-
- u8 uc_d2b0_rev;
- u8 ext_cid;
- u8 s2d_index;
- u8 ext_cid_ack;
-
- u32 reserved[5];
-} __packed __aligned(4);
-
-/**
- * struct mt7921_uni_txd - mcu command descriptor for firmware v3
- * @txd: hardware descriptor
- * @len: total length not including txd
- * @cid: command identifier
- * @pkt_type: must be 0xa0 (cmd packet by long format)
- * @frag_n: fragment number
- * @seq: sequence number
- * @checksum: 0 mean there is no checksum
- * @s2d_index: index for command source and destination
- * Definition | value | note
- * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
- * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
- * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
- * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
- *
- * @option: command option
- * BIT[0]: UNI_CMD_OPT_BIT_ACK
- * set to 1 to request a fw reply
- * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
- * is set, mcu firmware will send response event EID = 0x01
- * (UNI_EVENT_ID_CMD_RESULT) to the host.
- * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
- * 0: original command
- * 1: unified command
- * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
- * 0: QUERY command
- * 1: SET command
- */
-struct mt7921_uni_txd {
- __le32 txd[8];
-
- /* DW1 */
- __le16 len;
- __le16 cid;
-
- /* DW2 */
- u8 reserved;
- u8 pkt_type;
- u8 frag_n;
- u8 seq;
-
- /* DW3 */
- __le16 checksum;
- u8 s2d_index;
- u8 option;
-
- /* DW4 */
- u8 reserved2[4];
-} __packed __aligned(4);
-
struct mt7921_mcu_tx_done_event {
u8 pid;
u8 status;
@@ -108,25 +38,10 @@ enum {
MCU_EXT_EVENT_RATE_REPORT = 0x87,
};
-struct mt7921_mcu_rxd {
- __le32 rxd[6];
-
- __le16 len;
- __le16 pkt_type_id;
-
- u8 eid;
- u8 seq;
- __le16 __rsv;
-
- u8 ext_eid;
- u8 __rsv1[2];
- u8 s2d_index;
-};
-
struct mt7921_mcu_eeprom_info {
__le32 addr;
__le32 valid;
- u8 data[16];
+ u8 data[MT7921_EEPROM_BLOCK_SIZE];
} __packed;
#define MT_RA_RATE_NSS GENMASK(8, 6)
@@ -135,9 +50,6 @@ struct mt7921_mcu_eeprom_info {
#define MT_RA_RATE_DCM_EN BIT(4)
#define MT_RA_RATE_BW GENMASK(14, 13)
-#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
-#define MCU_PKT_ID 0xa0
-
struct mt7921_mcu_uni_event {
u8 cid;
u8 pad[3];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 96647801850a..eaba114a9c7e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -8,9 +8,9 @@
#include <linux/ktime.h>
#include "../mt76_connac_mcu.h"
#include "regs.h"
+#include "acpi_sar.h"
#define MT7921_MAX_INTERFACES 4
-#define MT7921_MAX_WMM_SETS 4
#define MT7921_WTBL_SIZE 20
#define MT7921_WTBL_RESERVED (MT7921_WTBL_SIZE - 1)
#define MT7921_WTBL_STA (MT7921_WTBL_RESERVED - \
@@ -30,6 +30,7 @@
#define MT7921_DRV_OWN_RETRY_COUNT 10
#define MT7921_MCU_INIT_RETRY_COUNT 10
+#define MT7921_WFSYS_INIT_RETRY_COUNT 2
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@@ -40,6 +41,8 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
+#define MT7921_EEPROM_BLOCK_SIZE 16
+
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
@@ -89,11 +92,6 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
-struct mt7921_sta_key_conf {
- s8 keyidx;
- u8 key[16];
-};
-
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@@ -104,9 +102,8 @@ struct mt7921_sta {
unsigned long last_txs;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
- struct mt7921_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
};
DECLARE_EWMA(rssi, 10, 8);
@@ -153,13 +150,35 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
+enum {
+ MT7921_CLC_POWER,
+ MT7921_CLC_CHAN,
+ MT7921_CLC_MAX_NUM,
+};
+
+struct mt7921_clc_rule {
+ u8 alpha2[2];
+ u8 type[2];
+ __le16 len;
+ u8 data[];
+} __packed;
+
+struct mt7921_clc {
+ __le32 len;
+ u8 idx;
+ u8 ver;
+ u8 nr_country;
+ u8 type;
+ u8 rsv[8];
+ u8 data[];
+};
+
struct mt7921_phy {
struct mt76_phy *mt76;
struct mt7921_dev *dev;
struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
- u32 rxfilter;
u64 omac_mask;
u16 noise;
@@ -176,6 +195,11 @@ struct mt7921_phy {
struct sk_buff_head scan_event_list;
struct delayed_work scan_work;
+#ifdef CONFIG_ACPI
+ struct mt7921_acpi_sar *acpisar;
+#endif
+
+ struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@@ -209,11 +233,17 @@ struct mt7921_dev {
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
+ struct work_struct init_work;
+
u8 fw_debug;
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
const struct mt7921_hif_ops *hif_ops;
+
+ struct work_struct ipv6_ns_work;
+ /* IPv6 addresses for WoWLAN */
+ struct sk_buff_head ipv6_ns_list;
};
enum {
@@ -246,16 +276,6 @@ struct mt7921_txpwr {
} data[TXPWR_MAX_NUM];
};
-enum {
- MT_LMAC_AC00,
- MT_LMAC_AC01,
- MT_LMAC_AC02,
- MT_LMAC_AC03,
- MT_LMAC_ALTX0 = 0x10,
- MT_LMAC_BMC0,
- MT_LMAC_BCN0,
-};
-
static inline struct mt7921_phy *
mt7921_hw_phy(struct ieee80211_hw *hw)
{
@@ -277,14 +297,7 @@ mt7921_hw_dev(struct ieee80211_hw *hw)
#define mt7921_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
-{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
-}
-
extern const struct ieee80211_ops mt7921_ops;
-extern struct pci_driver mt7921_pci_driver;
u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
@@ -296,21 +309,18 @@ int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
+int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb);
-void mt7921_mcu_exit(struct mt7921_dev *dev);
static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask)
{
@@ -361,23 +371,20 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
-static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
-{
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
-}
-
-static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb,
- enum mt7921_sdio_pkt_type type)
+static inline void
+mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb,
+ int type)
{
- u32 hdr;
+ u32 hdr, len;
- hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) |
+ len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr);
+ hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) |
FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type);
put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr)));
}
+void mt7921_stop(struct ieee80211_hw *hw);
int mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
@@ -398,9 +405,8 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7921_tx_worker(struct mt76_worker *w);
-void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -424,7 +430,6 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
void mt7921_pm_power_save_work(struct work_struct *work);
-bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_wfsys_reset(struct mt7921_dev *dev);
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
@@ -432,37 +437,75 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
-void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, int pid,
- bool beacon);
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list);
void mt7921_mac_sta_poll(struct mt7921_dev *dev);
-int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-int mt7921_mcu_restart(struct mt76_dev *dev);
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921e_mac_reset(struct mt7921_dev *dev);
int mt7921e_mcu_init(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
int mt7921s_mac_reset(struct mt7921_dev *dev);
int mt7921s_init_reset(struct mt7921_dev *dev);
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_init(struct mt7921_dev *dev);
int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info);
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
+void mt7921_set_runtime_pm(struct mt7921_dev *dev);
+void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+void mt7921_set_ipv6_ns_work(struct work_struct *work);
+
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+
+/* usb */
+#define MT_USB_TYPE_VENDOR (USB_TYPE_VENDOR | 0x1f)
+#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e)
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev);
+int mt7921u_wfsys_reset(struct mt7921_dev *dev);
+int mt7921u_dma_init(struct mt7921_dev *dev, bool resume);
+int mt7921u_init_reset(struct mt7921_dev *dev);
+int mt7921u_mac_reset(struct mt7921_dev *dev);
+int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable);
+#ifdef CONFIG_ACPI
+int mt7921_init_acpi_sar(struct mt7921_dev *dev);
+int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default);
+#else
+static inline int
+mt7921_init_acpi_sar(struct mt7921_dev *dev)
+{
+ return 0;
+}
+
+static inline int
+mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
+{
+ return 0;
+}
+#endif
+int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 9dae2f5972bf..8a53d8f286db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -105,6 +105,7 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
@@ -115,10 +116,110 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt7921_mcu_drv_pmctrl(dev);
mt7921_dma_cleanup(dev);
mt7921_wfsys_reset(dev);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
tasklet_disable(&dev->irq_tasklet);
- mt76_free_device(&dev->mt76);
+}
+
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct mt76_connac_reg_map fixed_map[] = {
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
+ { 0x74030000, 0x10000, 0x10000 }, /* PCIE_MAC_IREG */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].maps + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_pci_probe(struct pci_dev *pdev,
@@ -126,15 +227,16 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7921_txp_common),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
- .tx_complete_skb = mt7921e_tx_complete_skb,
- .rx_skb = mt7921e_queue_rx_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
+ .rx_check = mt7921_rx_check,
+ .rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -142,7 +244,6 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel,
};
-
static const struct mt7921_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
.reset = mt7921e_mac_reset,
@@ -151,6 +252,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
+ struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@@ -183,11 +285,34 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
goto err_free_pci_vec;
}
+ pci_set_drvdata(pdev, mdev);
+
dev = container_of(mdev, struct mt7921_dev, mt76);
dev->hif_ops = &mt7921_pcie_ops;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops) {
+ ret = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ ret = __mt7921e_mcu_drv_pmctrl(dev);
+ if (ret)
+ goto err_free_dev;
+
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
@@ -228,18 +353,20 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
mt7921e_unregister_device(dev);
devm_free_irq(&pdev->dev, pdev->irq, dev);
+ mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
}
-#ifdef CONFIG_PM
-static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mt7921_pci_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
int i, err;
pm->suspended = true;
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -263,8 +390,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
napi_disable(&mdev->napi[i]);
}
- pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
-
/* wait until dma is idle */
mt76_poll(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
@@ -284,11 +409,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (err)
goto restore_napi;
- pci_save_state(pdev);
- err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
- if (err)
- goto restore_napi;
-
return 0;
restore_napi:
@@ -305,25 +425,23 @@ restore_napi:
restore_suspend:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
-static int mt7921_pci_resume(struct pci_dev *pdev)
+static int mt7921_pci_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
int i, err;
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt7921_wpdma_reinit_cond(dev);
@@ -353,24 +471,23 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
-#endif /* CONFIG_PM */
-struct pci_driver mt7921_pci_driver = {
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
+
+static struct pci_driver mt7921_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7921_pci_device_table,
.probe = mt7921_pci_probe,
.remove = mt7921_pci_remove,
-#ifdef CONFIG_PM
- .suspend = mt7921_pci_suspend,
- .resume = mt7921_pci_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&mt7921_pm_ops),
};
module_pci_driver(mt7921_pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 85286cc9add1..8dd60408b117 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -5,37 +5,6 @@
#include "../dma.h"
#include "mac.h"
-static void
-mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7921_hw_txp *txp = txp_ptr;
- struct mt7921_txp_ptr *ptr = &txp->ptr[0];
- int i, nbuf = tx_info->nbuf - 1;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- for (i = 0; i < nbuf; i++) {
- u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
- u32 addr = tx_info->buf[i + 1].addr;
-
- if (i == nbuf - 1)
- len |= MT_TXD_LEN_LAST;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
-}
-
int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -44,8 +13,8 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct mt76_connac_hw_txp *txp;
struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
int id, pid;
u8 *txwi = (u8 *)txwi_ptr;
@@ -72,198 +41,18 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- pid, false);
+ mt76_connac2_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, qid, 0);
- txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
- memset(txp, 0, sizeof(struct mt7921_txp_common));
- mt7921_write_hw_txp(dev, tx_info, txp, id);
+ txp = (struct mt76_connac_hw_txp *)(txwi + MT_TXD_SIZE);
+ memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
tx_info->skb = DMA_DUMMY_DATA;
return 0;
}
-static void
-mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7921_txp_common *txp;
- int i;
-
- txp = mt7921_txwi_to_txp(dev, t);
-
- for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
- struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
- bool last;
- u16 len;
-
- len = le16_to_cpu(ptr->len0);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
-
- len = le16_to_cpu(ptr->len1);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
- }
-}
-
-static void
-mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
- struct list_head *free_list)
-{
- struct mt76_dev *mdev = &dev->mt76;
- __le32 *txwi;
- u16 wcid_idx;
-
- mt7921_txp_skb_unmap(mdev, t);
- if (!t->skb)
- goto out;
-
- txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- wcid_idx = wcid->idx;
- } else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
- }
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-
-out:
- t->skb = NULL;
- mt76_put_txwi(mdev, t);
-}
-
-static void
-mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
-{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- LIST_HEAD(free_list);
- struct sk_buff *tmp;
- bool wake = false;
- u8 i, count;
-
- /* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-
- /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
- u8 stat;
-
- /* 1'b1: new wcid pair.
- * 1'b0: msdu_id with the same 'wcid pair' as above.
- */
- if (info & MT_TX_FREE_PAIR) {
- struct mt7921_sta *msta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- count++;
- idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- sta = wcid_to_sta(wcid);
- if (!sta)
- continue;
-
- msta = container_of(wcid, struct mt7921_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
- continue;
- }
-
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- stat = FIELD_GET(MT_TX_FREE_STATUS, info);
-
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
- continue;
-
- mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
- }
-
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
-
- napi_consume_skb(skb, 1);
-
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
- }
-
- rcu_read_lock();
- mt7921_mac_sta_poll(dev);
- rcu_read_unlock();
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- enum rx_pkt_type type;
-
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921_mac_tx_free(dev, skb);
- break;
- default:
- mt7921_queue_rx_skb(mdev, q, skb);
- break;
- }
-}
-
-void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
- u16 token;
-
- txp = mt7921_txwi_to_txp(mdev, e->txwi);
- token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
- t = mt76_token_put(mdev, token);
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
@@ -314,6 +103,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
}
local_bh_enable();
+ dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
@@ -323,7 +113,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
err = mt7921e_driver_own(dev);
if (err)
- return err;
+ goto out;
err = mt7921_run_firmware(dev);
if (err)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index a020352122a1..86340d3205c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -26,10 +26,12 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
enum mt76_mcuq_id txq = MT_MCUQ_WM;
int ret;
- ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
if (ret)
return ret;
+ mdev->mcu.timeout = 3 * HZ;
+
if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
@@ -39,10 +41,10 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int mt7921e_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921_mcu_ops = {
- .headroom = sizeof(struct mt7921_mcu_txd),
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt7921_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int err;
@@ -52,6 +54,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
if (err)
return err;
+ mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1);
+
err = mt7921_run_firmware(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
@@ -59,10 +63,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
return err;
}
-int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -75,9 +77,21 @@ int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
- goto out;
}
+ return err;
+}
+
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ err = __mt7921e_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto out;
+
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
@@ -92,7 +106,7 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int i, err = 0;
+ int i;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
@@ -104,12 +118,12 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
+ return -EIO;
}
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
- return err;
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index cbd38122c510..c65582acfa55 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -17,13 +17,12 @@
#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
-#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
-#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
-#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
- ((n) << 2))
+#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
#define MT_MDP_BASE 0x820cd000
@@ -354,6 +353,7 @@
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
@@ -378,6 +378,9 @@
#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640)
#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644)
+#define MT_WPDMA0_MAX_CNT_MASK GENMASK(7, 0)
+#define MT_WPDMA0_BASE_PTR_MASK GENMASK(31, 16)
+
#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
@@ -426,6 +429,10 @@
#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120)
#define MT_WFDMA_NEED_REINIT BIT(1)
+#define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs))
+#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
+#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
@@ -433,13 +440,17 @@
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194)
+#define MT_PCIE_MAC_PM_L0S_DIS BIT(8)
-#define MT_DMA_SHDL(ofs) (0xd6000 + (ofs))
+#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
#define MT_DMASHDL_DMASHDL_BYPASS BIT(28)
#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008)
#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c)
+#define MT_DMASHDL_GROUP_SEQ_ORDER BIT(16)
#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010)
+#define MT_DMASHDL_REFILL_MASK GENMASK(31, 16)
#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c)
#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
@@ -454,6 +465,46 @@
#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
+#define MT_WFDMA_HOST_CONFIG 0x7c027030
+#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN BIT(6)
+
+#define MT_UMAC(ofs) (0x74000000 + (ofs))
+#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
+#define MT_FW_DL_EN BIT(3)
+
+#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
+#define MT_WL_TX_DPH_CHK_EN BIT(17)
+#define MT_WL_RX_MPSZ_PAD0 BIT(18)
+#define MT_WL_RX_FLUSH BIT(19)
+#define MT_TICK_1US_EN BIT(20)
+#define MT_WL_RX_AGG_EN BIT(21)
+#define MT_WL_RX_EN BIT(22)
+#define MT_WL_TX_EN BIT(23)
+#define MT_WL_RX_BUSY BIT(30)
+#define MT_WL_TX_BUSY BIT(31)
+
+#define MT_UDMA_CONN_INFRA_STATUS MT_UMAC(0xa20)
+#define MT_UDMA_CONN_WFSYS_INIT_DONE BIT(22)
+#define MT_UDMA_CONN_INFRA_STATUS_SEL MT_UMAC(0xa24)
+
+#define MT_SSUSB_EPCTL_CSR(ofs) (0x74011800 + (ofs))
+#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT MT_SSUSB_EPCTL_CSR(0x090)
+
+#define MT_UWFDMA0(ofs) (0x7c024000 + (ofs))
+#define MT_UWFDMA0_GLO_CFG MT_UWFDMA0(0x208)
+#define MT_UWFDMA0_GLO_CFG_EXT0 MT_UWFDMA0(0x2b0)
+#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n) MT_UWFDMA0(0x600 + ((_n) << 2))
+
+#define MT_CONN_STATUS 0x7c053c10
+#define MT_WIFI_PATCH_DL_STATE BIT(0)
+
#define MT_CONN_ON_LPCTL 0x7c060010
#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
@@ -464,6 +515,12 @@
#define WFSYS_SW_INIT_DONE BIT(4)
#define MT_CONN_ON_MISC 0x7c0600f0
+#define MT_TOP_MISC2_FW_PWR_ON BIT(0)
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
+#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs))
+#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028)
+#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6)
+#define MT_WF_SW_SER_DONE_SUSPEND BIT(7)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 65d693902c22..3b25a06fd946 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -41,13 +41,14 @@ static void mt7921s_unregister_device(struct mt7921_dev *dev)
{
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
mt76s_deinit(&dev->mt76);
mt7921s_wfsys_reset(dev);
- mt7921_mcu_exit(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
mt76_free_device(&dev->mt76);
}
@@ -58,7 +59,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7921_sdio_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err < 0)
return err;
@@ -88,10 +92,11 @@ static int mt7921s_probe(struct sdio_func *func,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7921s_tx_prepare_skb,
- .tx_complete_skb = mt7921s_tx_complete_skb,
- .tx_status_data = mt7921s_tx_status_data,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
@@ -118,7 +123,7 @@ static int mt7921s_probe(struct sdio_func *func,
struct mt7921_dev *dev;
struct mt76_dev *mdev;
- int ret, i;
+ int ret;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
@@ -151,16 +156,6 @@ static int mt7921s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
@@ -200,7 +195,6 @@ static void mt7921s_remove(struct sdio_func *func)
mt7921s_unregister_device(dev);
}
-#ifdef CONFIG_PM
static int mt7921s_suspend(struct device *__dev)
{
struct sdio_func *func = dev_to_sdio_func(__dev);
@@ -212,6 +206,7 @@ static int mt7921s_suspend(struct device *__dev)
pm->suspended = true;
set_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -267,6 +262,9 @@ restore_suspend:
clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
@@ -282,7 +280,7 @@ static int mt7921s_resume(struct device *__dev)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt76_worker_enable(&mdev->tx_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
@@ -294,34 +292,27 @@ static int mt7921s_resume(struct device *__dev)
mt76_connac_mcu_set_deep_sleep(mdev, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
-static const struct dev_pm_ops mt7921s_pm_ops = {
- .suspend = mt7921s_suspend,
- .resume = mt7921s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7921s_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7921s_pm_ops, mt7921s_suspend, mt7921s_resume);
+
static struct sdio_driver mt7921s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7921s_probe,
.remove = mt7921s_remove,
.id_table = mt7921s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7921s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index ccaf8134cec7..1b3adb3d91e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -60,7 +60,11 @@ int mt7921s_wfsys_reset(struct mt7921_dev *dev)
sdio_release_host(sdio->func);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
/* activate mt7921s again */
+ mt7921s_mcu_drv_pmctrl(dev);
+ mt76_clear(dev, MT_CONN_STATUS, MT_WIFI_PATCH_DL_STATE);
mt7921s_mcu_fw_pmctrl(dev);
mt7921s_mcu_drv_pmctrl(dev);
@@ -81,7 +85,6 @@ int mt7921s_init_reset(struct mt7921_dev *dev)
mt7921s_wfsys_reset(dev);
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -114,7 +117,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
mt76_worker_enable(&dev->mt76.sdio.net_worker);
dev->fw_assert = false;
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -138,86 +140,3 @@ out:
return err;
}
-
-static void
-mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
- enum mt76_txq_id qid, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key, int pid,
- struct sk_buff *skb)
-{
- __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
-
- memset(txwi, 0, MT_SDIO_TXD_SIZE);
- mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
- skb_push(skb, MT_SDIO_TXD_SIZE);
-}
-
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct sk_buff *skb = tx_info->skb;
- int err, pad, pktid;
-
- if (unlikely(tx_info->skb->len <= ETH_HLEN))
- return -EINVAL;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- if (sta) {
- struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
-
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
- }
- }
-
- pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
- mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
-
- mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
- pad = round_up(skb->len, 4) - skb->len;
-
- err = mt76_skb_adjust_pad(skb, pad);
- if (err)
- /* Release pktid in case of error. */
- idr_remove(&wcid->pktid, pktid);
-
- return err;
-}
-
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
- unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- struct ieee80211_sta *sta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
- wcid = rcu_dereference(mdev->wcid[idx]);
- sta = wcid_to_sta(wcid);
-
- if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- skb_pull(e->skb, headroom);
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
-
- mt7921_mutex_acquire(dev);
- mt7921_mac_sta_poll(dev);
- mt7921_mutex_release(dev);
-
- return false;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index d20f2ff01be1..5c1489766d9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -29,14 +29,16 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (dev->fw_assert)
return -EBUSY;
- ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
if (ret)
return ret;
+ mdev->mcu.timeout = 3 * HZ;
+
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
- mt7921_skb_add_sdio_hdr(skb, type);
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
pad = round_up(skb->len, 4) - skb->len;
__skb_put_zero(skb, pad);
@@ -49,10 +51,31 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
+static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+}
+
+static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 val;
+
+ val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+ if (val)
+ sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
+ MCR_WSICR, NULL);
+
+ return val;
+}
+
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
- .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .headroom = MT_SDIO_HDR_SIZE +
+ sizeof(struct mt76_connac2_mcu_txd),
.tailroom = MT_SDIO_TAIL_SIZE,
.mcu_skb_send_msg = mt7921s_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
@@ -79,8 +102,8 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int err = 0;
u32 status;
+ int err;
sdio_claim_host(func);
@@ -88,12 +111,17 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+
+ if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
+ status & D2HRM3R_IS_DRIVER_OWN,
+ 2000, 1000000);
+
sdio_release_host(func);
if (err < 0) {
dev_err(dev->mt76.dev, "driver own failed\n");
- err = -EIO;
- goto out;
+ return -EIO;
}
clear_bit(MT76_STATE_PM, &mphy->state);
@@ -101,8 +129,8 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
-out:
- return err;
+
+ return 0;
}
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
@@ -110,26 +138,38 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int err = 0;
u32 status;
+ int err;
sdio_claim_host(func);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
+ dev, status,
+ !(status & D2HRM3R_IS_DRIVER_OWN),
+ 2000, 1000000);
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
+ goto out;
+ }
+ }
+
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
+out:
sdio_release_host(func);
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
+ return -EIO;
}
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
- return err;
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
new file mode 100644
index 000000000000..29c0ee330dbe
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static const struct usb_device_id mt7921u_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
+ { },
+};
+
+static u32 mt7921u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7921u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7921u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ MT_TOP_MISC2_FW_PWR_ON, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 pad, ep;
+ int ret;
+
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, seq);
+ if (ret)
+ return ret;
+
+ mdev->mcu.timeout = 3 * HZ;
+
+ if (cmd != MCU_CMD(FW_SCATTER))
+ ep = MT_EP_OUT_INBAND_CMD;
+ else
+ ep = MT_EP_OUT_AC_BE;
+
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, 0);
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ __skb_put_zero(skb, pad);
+
+ ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+ 1000, ep);
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int mt7921u_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mcu_ops = {
+ .headroom = MT_SDIO_HDR_SIZE +
+ sizeof(struct mt76_connac2_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7921u_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_restart = mt76_connac_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mcu_ops;
+
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+ ret = mt7921_run_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ return 0;
+}
+
+static void mt7921u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt76u_stop_tx(&dev->mt76);
+ mt7921_stop(hw);
+}
+
+static void mt7921u_cleanup(struct mt7921_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt7921u_wfsys_reset(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static int mt7921u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_SDIO_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
+ .rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
+ .sta_ps = mt7921_sta_ps,
+ .sta_add = mt7921_mac_sta_add,
+ .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_remove = mt7921_mac_sta_remove,
+ .update_survey = mt7921_update_channel,
+ };
+ static const struct mt7921_hif_ops hif_ops = {
+ .mcu_init = mt7921u_mcu_init,
+ .init_reset = mt7921u_init_reset,
+ .reset = mt7921u_mac_reset,
+ };
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7921u_rr,
+ .wr = mt7921u_wr,
+ .rmw = mt7921u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7921u_copy,
+ .type = MT76_BUS_USB,
+ };
+ struct usb_device *udev = interface_to_usbdev(usb_intf);
+ struct ieee80211_ops *ops;
+ struct ieee80211_hw *hw;
+ struct mt7921_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->stop = mt7921u_stop;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->hif_ops = &hif_ops;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
+ if (ret < 0)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) {
+ ret = mt7921u_wfsys_reset(dev);
+ if (ret)
+ goto error;
+ }
+
+ ret = mt7921u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_mcu_queue(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_queues(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt7921u_dma_init(dev, false);
+ if (ret)
+ return ret;
+
+ hw = mt76_hw(dev);
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
+
+ ret = mt7921_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mt76u_queues_deinit(&dev->mt76);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7921u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(usb_intf);
+
+ cancel_work_sync(&dev->init_work);
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ mt76_unregister_device(&dev->mt76);
+ mt7921u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+}
+
+#ifdef CONFIG_PM
+static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ pm->suspended = true;
+ flush_work(&dev->reset_work);
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ if (err)
+ goto failed;
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ return 0;
+
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
+ return err;
+}
+
+static int mt7921u_resume(struct usb_interface *intf)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool reinit = true;
+ int err, i;
+
+ for (i = 0; i < 10; i++) {
+ u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
+
+ if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) {
+ reinit = false;
+ break;
+ }
+ if (val & MT_WF_SW_SER_DONE_SUSPEND) {
+ mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0);
+ break;
+ }
+
+ msleep(20);
+ }
+
+ if (reinit || mt7921_dma_need_reinit(dev)) {
+ err = mt7921u_dma_init(dev, true);
+ if (err)
+ goto failed;
+ }
+
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err < 0)
+ goto failed;
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+
+static struct usb_driver mt7921u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7921u_device_table,
+ .probe = mt7921u_probe,
+ .disconnect = mt7921u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt7921u_suspend,
+ .resume = mt7921u_resume,
+ .reset_resume = mt7921u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7921u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
new file mode 100644
index 000000000000..efbd3954c883
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static u32 mt7921u_uhw_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE,
+ USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE,
+ USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt7921u_dma_prefetch(struct mt7921_dev *dev)
+{
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_BASE_PTR_MASK, 0x80);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_BASE_PTR_MASK, 0xc0);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_BASE_PTR_MASK, 0x100);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_BASE_PTR_MASK, 0x140);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_BASE_PTR_MASK, 0x180);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_BASE_PTR_MASK, 0x280);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+}
+
+static void mt7921u_wfdma_init(struct mt7921_dev *dev)
+{
+ mt7921u_dma_prefetch(dev);
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL |
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ /* disable dmashdl */
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
+ MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
+ mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+}
+
+static int mt7921u_dma_rx_evt_ep4(struct mt7921_dev *dev)
+{
+ if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+ return -ETIMEDOUT;
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ return 0;
+}
+
+static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
+{
+ u32 val;
+
+ /* usb endpoint reset opt
+ * bits[4,9]: out blk ep 4-9
+ * bits[20,21]: in blk ep 4-5
+ * bits[22]: in int ep 6
+ */
+ val = mt7921u_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT);
+ if (reset)
+ val |= GENMASK(9, 4) | GENMASK(22, 20);
+ else
+ val &= ~(GENMASK(9, 4) | GENMASK(22, 20));
+ mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+}
+
+int mt7921u_dma_init(struct mt7921_dev *dev, bool resume)
+{
+ int err;
+
+ mt7921u_wfdma_init(dev);
+
+ mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+
+ mt76_set(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_EN | MT_WL_TX_EN |
+ MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN);
+ mt76_clear(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
+ mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
+
+ if (resume)
+ return 0;
+
+ err = mt7921u_dma_rx_evt_ep4(dev);
+ if (err)
+ return err;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ return 0;
+}
+
+int mt7921u_wfsys_reset(struct mt7921_dev *dev)
+{
+ u32 val;
+ int i;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ usleep_range(10, 20);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ mt7921u_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+ for (i = 0; i < MT7921_WFSYS_INIT_RETRY_COUNT; i++) {
+ val = mt7921u_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+ if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
+ break;
+
+ msleep(100);
+ }
+
+ if (i == MT7921_WFSYS_INIT_RETRY_COUNT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt7921u_init_reset(struct mt7921_dev *dev)
+{
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ return mt76u_resume_rx(&dev->mt76);
+}
+
+int mt7921u_mac_reset(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err)
+ goto out;
+
+ err = mt7921u_mcu_power_on(dev);
+ if (err)
+ goto out;
+
+ err = mt7921u_dma_init(dev, false);
+ if (err)
+ goto out;
+
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ goto out;
+
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 54f72d215948..0ec308f99af5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -12,6 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/sched.h>
#include <linux/kthread.h>
@@ -348,7 +350,6 @@ int mt76s_alloc_tx(struct mt76_dev *dev)
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = i;
dev->phy.q_tx[i] = q;
}
@@ -356,7 +357,6 @@ int mt76s_alloc_tx(struct mt76_dev *dev)
if (IS_ERR(q))
return PTR_ERR(q);
- q->qid = MT_MCUQ_WM;
dev->q_mcu[MT_MCUQ_WM] = q;
return 0;
@@ -478,14 +478,14 @@ static void mt76s_status_worker(struct mt76_worker *w)
if (ndata_frames > 0)
resched = true;
- if (dev->drv->tx_status_data &&
+ if (dev->drv->tx_status_data && ndata_frames > 0 &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
!test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
- queue_work(dev->wq, &dev->sdio.stat_work);
+ ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
} while (nframes > 0);
if (resched)
- mt76_worker_schedule(&dev->sdio.txrx_worker);
+ mt76_worker_schedule(&dev->tx_worker);
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -508,15 +508,15 @@ static void mt76s_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
- queue_work(dev->wq, &sdio->stat_work);
+ ieee80211_queue_work(dev->hw, &sdio->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static int
mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
@@ -528,7 +528,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
@@ -627,6 +627,7 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
+ u32 host_max_cap;
int err;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
@@ -648,7 +649,16 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
dev->bus = bus_ops;
dev->sdio.func = func;
- return 0;
+ host_max_cap = min_t(u32, func->card->host->max_req_size,
+ func->cur_blksize *
+ func->card->host->max_blk_count);
+ dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
+ dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
+ GFP_KERNEL);
+ if (!dev->sdio.xmit_buf)
+ err = -ENOMEM;
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76s_init);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h
index 99db4ad93b7c..27d5d2077eba 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.h
+++ b/drivers/net/wireless/mediatek/mt76/sdio.h
@@ -65,6 +65,7 @@
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
+#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
@@ -109,6 +110,7 @@
#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
+#define D2HRM3R_IS_DRIVER_OWN BIT(0)
#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 801590a0a334..bfc4de50a4d2 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -85,7 +85,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i;
struct page *page;
- u8 *buf;
+ u8 *buf, *end;
for (i = 0; i < intr->rx.num[qid]; i++)
len += round_up(intr->rx.len[qid][i] + 4, 4);
@@ -102,27 +102,39 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
put_page(page);
return err;
}
- for (i = 0; i < intr->rx.num[qid]; i++) {
+ end = buf + len;
+ i = 0;
+
+ while (i < intr->rx.num[qid] && buf < end) {
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
- len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0]));
- e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
- if (!e->skb)
- break;
-
+ len = le32_get_bits(rxd[0], GENMASK(15, 0));
+
+ /* Optimized path for TXS */
+ if (!dev->drv->rx_check || dev->drv->rx_check(dev, buf, len)) {
+ e->skb = mt76s_build_rx_skb(buf, len,
+ round_up(len + 4, 4));
+ if (!e->skb)
+ break;
+
+ if (q->queued + i + 1 == q->ndesc)
+ break;
+ i++;
+ }
buf += round_up(len + 4, 4);
- if (q->queued + i + 1 == q->ndesc)
- break;
}
put_page(page);
@@ -214,7 +226,10 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
+ sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
@@ -223,12 +238,11 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
u8 pad;
- qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
@@ -249,27 +263,25 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
}
pad = roundup(e->skb->len, 4) - e->skb->len;
- if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
break;
if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
- memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
- skb_headlen(e->skb));
+ memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
len += skb_headlen(e->skb);
nframes++;
skb_walk_frags(e->skb, iter) {
- memcpy(sdio->xmit_buf[qid] + len, iter->data,
- iter->len);
+ memcpy(sdio->xmit_buf + len, iter->data, iter->len);
len += iter->len;
nframes++;
}
if (unlikely(pad)) {
- memset(sdio->xmit_buf[qid] + len, 0, pad);
+ memset(sdio->xmit_buf + len, 0, pad);
len += pad;
}
next:
@@ -278,8 +290,8 @@ next:
}
if (nframes) {
- memset(sdio->xmit_buf[qid] + len, 0, 4);
- err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ memset(sdio->xmit_buf + len, 0, 4);
+ err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
if (err)
return err;
}
@@ -298,6 +310,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
do {
nframes = 0;
@@ -327,6 +340,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
} while (nframes > 0);
/* enable interrupt */
+ sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
@@ -341,6 +355,7 @@ void mt76s_sdio_irq(struct sdio_func *func)
test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
mt76_worker_schedule(&sdio->txrx_worker);
}
EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 1a01ad7a4c16..0accc71a91c9 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/random.h>
#include "mt76.h"
const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
@@ -50,8 +52,8 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
q->queued < q->ndesc / 2) {
int ret;
- ret = dev->queue_ops->tx_queue_skb(dev, q, skb_get(skb), wcid,
- NULL);
+ ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb),
+ wcid, NULL);
if (ret < 0)
break;
@@ -101,7 +103,6 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_FROMDS;
struct mt76_testmode_data *td = &phy->test;
- bool ext_phy = phy != &phy->dev->phy;
struct sk_buff **frag_tail, *head;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
@@ -124,21 +125,21 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
if (!head)
return -ENOMEM;
- hdr = __skb_put_zero(head, head_len);
+ hdr = __skb_put_zero(head, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
+ get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
+ head_len - sizeof(*hdr));
info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
frag_tail = &skb_shinfo(head)->frag_list;
for (i = 0; i < nfrags; i++) {
@@ -157,7 +158,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
return -ENOMEM;
}
- __skb_put_zero(frag, frag_len);
+ get_random_bytes(__skb_put(frag, frag_len), frag_len);
head->len += frag->len;
head->data_len += frag->len;
@@ -409,7 +410,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &phy->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS];
- bool ext_phy = phy != &dev->phy;
u32 state;
int err;
int i;
@@ -447,8 +447,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
- mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
- 1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA],
+ &td->tx_antenna_mask, 0, 0xff) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
&td->tx_duty_cycle, 0, 99) ||
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 6b8c9dc80542..6c054850363f 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -60,15 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
+ struct ieee80211_rate_status rs = {};
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
if (wcid) {
status.sta = wcid_to_sta(wcid);
-
- if (status.sta)
- status.rate = &wcid->rate;
+ if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
+ rs.rate_idx = wcid->rate;
+ status.rates = &rs;
+ status.n_rates = 1;
+ } else {
+ status.n_rates = 0;
+ }
}
hw = mt76_tx_status_get_hw(dev, skb);
@@ -120,7 +125,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
memset(cb, 0, sizeof(*cb));
- if (!wcid)
+ if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
return MT_PACKET_ID_NO_ACK;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
@@ -285,7 +290,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
int idx;
non_aql = !info->tx_time_est;
- idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
if (idx < 0 || !sta)
return idx;
@@ -311,7 +316,6 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
- bool ext_phy = phy != &dev->phy;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -328,16 +332,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
qid = MT_TXQ_PSD;
- skb_set_queue_mapping(skb, qid);
}
if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
q = phy->q_tx[qid];
spin_lock_bh(&q->lock);
@@ -352,7 +353,6 @@ mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
- bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
skb = ieee80211_tx_dequeue(phy->hw, txq);
@@ -360,8 +360,7 @@ mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
return NULL;
info = IEEE80211_SKB_CB(skb);
- if (ext_phy)
- info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
return skb;
}
@@ -436,12 +435,11 @@ mt76_txq_stopped(struct mt76_queue *q)
static int
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
- struct mt76_txq *mtxq)
+ struct mt76_txq *mtxq, struct mt76_wcid *wcid)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
- struct mt76_wcid *wcid = mtxq->wcid;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1;
@@ -463,7 +461,9 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
+ spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
+ spin_unlock(&q->lock);
if (idx < 0)
return idx;
@@ -483,14 +483,18 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
+ spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
+ spin_unlock(&q->lock);
if (idx < 0)
break;
n_frames++;
} while (1);
+ spin_lock(&q->lock);
dev->queue_ops->kick(dev, q);
+ spin_unlock(&q->lock);
return n_frames;
}
@@ -521,12 +525,10 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
break;
mtxq = (struct mt76_txq *)txq->drv_priv;
- wcid = mtxq->wcid;
- if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
+ if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
- spin_lock_bh(&q->lock);
-
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -535,15 +537,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
u8 tid = txq->tid;
mtxq->send_bar = false;
- spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
- spin_lock_bh(&q->lock);
}
if (!mt76_txq_stopped(q))
- n_frames = mt76_txq_send_burst(phy, q, mtxq);
-
- spin_unlock_bh(&q->lock);
+ n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
ieee80211_return_txq(phy->hw, txq, false);
@@ -563,6 +561,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
if (qid >= 4)
return;
+ local_bh_disable();
rcu_read_lock();
do {
@@ -572,6 +571,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
} while (len > 0);
rcu_read_unlock();
+ local_bh_enable();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@@ -586,15 +586,25 @@ EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
void mt76_tx_worker_run(struct mt76_dev *dev)
{
- mt76_txq_schedule_all(&dev->phy);
- if (dev->phy2)
- mt76_txq_schedule_all(dev->phy2);
+ struct mt76_phy *phy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ phy = dev->phys[i];
+ if (!phy)
+ continue;
+
+ mt76_txq_schedule_all(phy);
+ }
#ifdef CONFIG_NL80211_TESTMODE
- if (dev->phy.test.tx_pending)
- mt76_testmode_tx_pending(&dev->phy);
- if (dev->phy2 && dev->phy2->test.tx_pending)
- mt76_testmode_tx_pending(dev->phy2);
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
+ phy = dev->phys[i];
+ if (!phy || !phy->test.tx_pending)
+ continue;
+
+ mt76_testmode_tx_pending(phy);
+ }
#endif
}
EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
@@ -697,17 +707,23 @@ EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
- struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
- struct mt76_queue *q, *q2 = NULL;
+ struct mt76_phy *phy = &dev->phy;
+ struct mt76_queue *q = phy->q_tx[0];
- q = phy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
- if (phy2) {
- q2 = phy2->q_tx[0];
- q2->blocked = blocked;
+
+ phy = dev->phys[MT_BAND1];
+ if (phy) {
+ q = phy->q_tx[0];
+ q->blocked = blocked;
+ }
+ phy = dev->phys[MT_BAND2];
+ if (phy) {
+ q = phy->q_tx[0];
+ q->blocked = blocked;
}
if (!blocked)
@@ -721,12 +737,17 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
- if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ token >= dev->mmio.wed.wlan.token_start)
+ dev->wed_token_count++;
+#endif
+
+ if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
@@ -743,10 +764,18 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
- if (txwi)
+ if (txwi) {
dev->token_count--;
- if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ token >= dev->mmio.wed.wlan.token_start &&
+ --dev->wed_token_count == 0)
+ wake_up(&dev->tx_wait);
+#endif
+ }
+
+ if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 0a7006c8959b..4c4033bb1bb3 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -15,9 +15,8 @@ static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
-static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
- u8 req_type, u16 val, u16 offset,
- void *buf, size_t len)
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -45,6 +44,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req, offset, ret);
return ret;
}
+EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
@@ -62,22 +62,21 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
int ret;
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
if (ret == sizeof(__le32))
data = get_unaligned_le32(usb->data);
trace_usb_reg_rr(dev, addr, data);
return data;
}
+EXPORT_SYMBOL_GPL(___mt76u_rr);
static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
{
@@ -95,7 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
break;
}
- return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
+ return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -109,29 +109,17 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
-{
- u32 ret;
-
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return ret;
-}
-
-static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
- u32 addr, u32 val)
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
put_unaligned_le32(val, usb->data);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
trace_usb_reg_wr(dev, addr, val);
}
+EXPORT_SYMBOL_GPL(___mt76u_wr);
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
@@ -145,7 +133,8 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
+ ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -155,13 +144,6 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
-static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-}
-
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -173,17 +155,6 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
-static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
- u32 mask, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return val;
-}
-
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
@@ -216,33 +187,8 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
mutex_unlock(&usb->usb_ctrl_mtx);
}
-static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
- const void *data, int len)
-{
- struct mt76_usb *usb = &dev->usb;
- int ret, i = 0, batch_len;
- const u8 *val = data;
-
- len = round_up(len, 4);
- mutex_lock(&usb->usb_ctrl_mtx);
- while (i < len) {
- batch_len = min_t(int, usb->data_len, len - i);
- memcpy(usb->data, val + i, batch_len);
- ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- (offset + i) >> 16, offset + i,
- usb->data, batch_len);
- if (ret < 0)
- break;
-
- i += batch_len;
- }
- mutex_unlock(&usb->usb_ctrl_mtx);
-}
-
-static void
-mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
- void *data, int len)
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
int i = 0, batch_len, ret;
@@ -264,6 +210,7 @@ mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
+EXPORT_SYMBOL_GPL(mt76u_read_copy);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val)
@@ -581,6 +528,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
data_len = min_t(int, len, data_len - head_room);
+
+ if (len == data_len &&
+ dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
+ return 0;
+
skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
@@ -898,8 +850,8 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+ enum mt76_txq_id qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
@@ -911,7 +863,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
@@ -990,7 +942,6 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
- q->qid = i;
dev->phy.q_tx[i] = q;
@@ -1112,27 +1063,16 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
-int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf, bool ext)
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops)
{
- static struct mt76_bus_ops mt76u_ops = {
- .read_copy = mt76u_read_copy_ext,
- .wr_rp = mt76u_wr_rp,
- .rd_rp = mt76u_rd_rp,
- .type = MT76_BUS_USB,
- };
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
int err;
- mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
- mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
- mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
- mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
-
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
if (usb->data_len < 32)
usb->data_len = 32;
@@ -1141,7 +1081,7 @@ int mt76u_init(struct mt76_dev *dev,
return -ENOMEM;
mutex_init(&usb->usb_ctrl_mtx);
- dev->bus = &mt76u_ops;
+ dev->bus = ops;
dev->queue_ops = &usb_queue_ops;
dev_set_drvdata(&udev->dev, dev);
@@ -1167,6 +1107,23 @@ int mt76u_init(struct mt76_dev *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(__mt76u_init);
+
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
+{
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt76u_copy,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
+ };
+
+ return __mt76u_init(dev, intf, &bus_ops);
+}
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 20669eacb66e..230b0e1061a7 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -88,7 +88,7 @@ mt7601u_eeprom_param_show(struct seq_file *file, void *data)
dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
- seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ seq_printf(file, "Reg channels: %hhu-%d\n", dev->ee->reg.start,
dev->ee->reg.start + dev->ee->reg.num - 1);
seq_puts(file, "Per rate power:\n");
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index aa3b64902cf9..625bebe60538 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -188,7 +188,7 @@ mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
if (idx != -1)
dev_info(dev->dev,
- "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ "EEPROM country region %02x (channels %d-%d)\n",
val, chan_bounds[idx].start,
chan_bounds[idx].start + chan_bounds[idx].num - 1);
else
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index d2ee1aaa3c81..ca9cf628eb10 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -385,7 +385,7 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
msta = container_of(wcid, struct mt76_sta, wcid);
sta = container_of(msta, struct ieee80211_sta, drv_priv);
- min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ min_factor = min(min_factor, sta->deflink.ht_cap.ampdu_factor);
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 671d8897ae76..6c9c7a61c5c9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -132,7 +132,7 @@ mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
static void
mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+ struct ieee80211_bss_conf *info, u64 changed)
{
struct mt7601u_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index a122f1dd38f6..118d43707853 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -368,7 +368,8 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
void mt7601u_tx_stat(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 8a00f6a75ca9..d4cd2215aba9 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -1097,7 +1097,10 @@ static void mt7601u_phy_freq_cal(struct work_struct *work)
void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
struct ieee80211_bss_conf *info)
{
- if (!info->assoc)
+ struct ieee80211_vif *vif = container_of(info, struct ieee80211_vif,
+ bss_conf);
+
+ if (!vif->cfg.assoc)
cancel_delayed_work_sync(&dev->freq_cal.work);
/* Start/stop collecting beacon data */
@@ -1108,10 +1111,10 @@ void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
spin_unlock_bh(&dev->con_mon_lock);
dev->freq_cal.freq = dev->ee->rf_freq_off;
- dev->freq_cal.enabled = info->assoc;
+ dev->freq_cal.enabled = vif->cfg.assoc;
dev->freq_cal.adjusting = false;
- if (info->assoc)
+ if (vif->cfg.assoc)
ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
MT_FREQ_CAL_INIT_DELAY);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index f3dff8319a4c..51d977ffc52f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -163,7 +163,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
- ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size <<= sta->deflink.ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
@@ -172,7 +172,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
txwi->flags =
cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density));
+ sta->deflink.ht_cap.ampdu_density));
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->flags = 0;
}
@@ -258,7 +258,8 @@ void mt7601u_tx_stat(struct work_struct *work)
}
int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct mt7601u_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 8d8378bafd9b..9bbfff803357 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -20,9 +20,11 @@
static const struct ieee80211_txrx_stypes
wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
- .tx = 0xffff,
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
@@ -305,6 +307,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
int ret;
u32 i;
u8 security = WILC_FW_SEC_NO;
+ enum mfptype mfp_type = WILC_FW_MFP_NONE;
enum authtype auth_type = WILC_FW_AUTH_ANY;
u32 cipher_group;
struct cfg80211_bss *bss;
@@ -313,32 +316,9 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
vif->connecting = true;
- memset(priv->wep_key, 0, sizeof(priv->wep_key));
- memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
-
cipher_group = sme->crypto.cipher_group;
if (cipher_group != 0) {
- if (cipher_group == WLAN_CIPHER_SUITE_WEP40) {
- security = WILC_FW_SEC_WEP;
-
- priv->wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->wep_key[sme->key_idx], sme->key,
- sme->key_len);
-
- wilc_set_wep_default_keyid(vif, sme->key_idx);
- wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
- sme->key_idx);
- } else if (cipher_group == WLAN_CIPHER_SUITE_WEP104) {
- security = WILC_FW_SEC_WEP_EXTENDED;
-
- priv->wep_key_len[sme->key_idx] = sme->key_len;
- memcpy(priv->wep_key[sme->key_idx], sme->key,
- sme->key_len);
-
- wilc_set_wep_default_keyid(vif, sme->key_idx);
- wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
- sme->key_idx);
- } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
if (cipher_group == WLAN_CIPHER_SUITE_TKIP)
security = WILC_FW_SEC_WPA2_TKIP;
else
@@ -373,8 +353,14 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
auth_type = WILC_FW_AUTH_OPEN_SYSTEM;
break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- auth_type = WILC_FW_AUTH_SHARED_KEY;
+ case NL80211_AUTHTYPE_SAE:
+ auth_type = WILC_FW_AUTH_SAE;
+ if (sme->ssid_len) {
+ memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
+ vif->auth.ssid.ssid_len = sme->ssid_len;
+ }
+ vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
default:
@@ -384,6 +370,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_akm_suites) {
if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X)
auth_type = WILC_FW_AUTH_IEEE8021;
+ else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_PSK_SHA256)
+ auth_type = WILC_FW_AUTH_OPEN_SYSTEM_SHA256;
+ else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X_SHA256)
+ auth_type = WILC_FW_AUTH_IEE8021X_SHA256;
}
if (wfi_drv->usr_scan_req.scan_result) {
@@ -427,6 +417,13 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wfi_drv->conn_info.arg = priv;
wfi_drv->conn_info.param = join_params;
+ if (sme->mfp == NL80211_MFP_OPTIONAL)
+ mfp_type = WILC_FW_MFP_OPTIONAL;
+ else if (sme->mfp == NL80211_MFP_REQUIRED)
+ mfp_type = WILC_FW_MFP_REQUIRED;
+
+ wfi_drv->conn_info.mfp_type = mfp_type;
+
ret = wilc_set_join_req(vif, bss->bssid, sme->ie, sme->ie_len);
if (ret) {
netdev_err(dev, "wilc_set_join_req(): Error\n");
@@ -487,14 +484,6 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static inline void wilc_wfi_cfg_copy_wep_info(struct wilc_priv *priv,
- u8 key_index,
- struct key_params *params)
-{
- priv->wep_key_len[key_index] = params->key_len;
- memcpy(priv->wep_key[key_index], params->key, params->key_len);
-}
-
static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
{
if (!priv->wilc_gtk[idx]) {
@@ -514,6 +503,18 @@ static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
return 0;
}
+static int wilc_wfi_cfg_allocate_wpa_igtk_entry(struct wilc_priv *priv, u8 idx)
+{
+ idx -= 4;
+ if (!priv->wilc_igtk[idx]) {
+ priv->wilc_igtk[idx] = kzalloc(sizeof(*priv->wilc_igtk[idx]),
+ GFP_KERNEL);
+ if (!priv->wilc_igtk[idx])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
struct key_params *params)
{
@@ -539,8 +540,9 @@ static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
return 0;
}
-static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, struct key_params *params)
+static int add_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
{
int ret = 0, keylen = params->key_len;
@@ -550,35 +552,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
u8 op_mode;
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
+ struct wilc_wfi_key *key;
switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- if (priv->wdev.iftype == NL80211_IFTYPE_AP) {
- wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
-
- if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
- mode = WILC_FW_SEC_WEP;
- else
- mode = WILC_FW_SEC_WEP_EXTENDED;
-
- ret = wilc_add_wep_key_bss_ap(vif, params->key,
- params->key_len,
- key_index, mode,
- WILC_FW_AUTH_OPEN_SYSTEM);
- break;
- }
- if (memcmp(params->key, priv->wep_key[key_index],
- params->key_len)) {
- wilc_wfi_cfg_copy_wep_info(priv, key_index, params);
-
- ret = wilc_add_wep_key_bss_sta(vif, params->key,
- params->key_len,
- key_index);
- }
-
- break;
-
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
if (priv->wdev.iftype == NL80211_IFTYPE_AP ||
@@ -640,6 +616,26 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
key_index);
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ ret = wilc_wfi_cfg_allocate_wpa_igtk_entry(priv, key_index);
+ if (ret)
+ return -ENOMEM;
+
+ key = priv->wilc_igtk[key_index - 4];
+ ret = wilc_wfi_cfg_copy_wpa_info(key, params);
+ if (ret)
+ return -ENOMEM;
+
+ if (priv->wdev.iftype == NL80211_IFTYPE_AP ||
+ priv->wdev.iftype == NL80211_IFTYPE_P2P_GO)
+ op_mode = WILC_AP_MODE;
+ else
+ op_mode = WILC_STATION_MODE;
+
+ ret = wilc_add_igtk(vif, params->key, keylen, params->seq,
+ params->seq_len, mac_addr, op_mode,
+ key_index);
+ break;
default:
netdev_err(netdev, "%s: Unsupported cipher\n", __func__);
@@ -649,7 +645,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
return ret;
}
-static int del_key(struct wiphy *wiphy, struct net_device *netdev,
+static int del_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
u8 key_index,
bool pairwise,
const u8 *mac_addr)
@@ -657,37 +653,42 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
- if (priv->wilc_gtk[key_index]) {
- kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = NULL;
- kfree(priv->wilc_gtk[key_index]->seq);
- priv->wilc_gtk[key_index]->seq = NULL;
-
- kfree(priv->wilc_gtk[key_index]);
- priv->wilc_gtk[key_index] = NULL;
- }
-
- if (priv->wilc_ptk[key_index]) {
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = NULL;
- kfree(priv->wilc_ptk[key_index]->seq);
- priv->wilc_ptk[key_index]->seq = NULL;
- kfree(priv->wilc_ptk[key_index]);
- priv->wilc_ptk[key_index] = NULL;
- }
-
- if (key_index <= 3 && priv->wep_key_len[key_index]) {
- memset(priv->wep_key[key_index], 0,
- priv->wep_key_len[key_index]);
- priv->wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
+ if (!pairwise && (key_index == 4 || key_index == 5)) {
+ key_index -= 4;
+ if (priv->wilc_igtk[key_index]) {
+ kfree(priv->wilc_igtk[key_index]->key);
+ priv->wilc_igtk[key_index]->key = NULL;
+ kfree(priv->wilc_igtk[key_index]->seq);
+ priv->wilc_igtk[key_index]->seq = NULL;
+ kfree(priv->wilc_igtk[key_index]);
+ priv->wilc_igtk[key_index] = NULL;
+ }
+ } else {
+ if (priv->wilc_gtk[key_index]) {
+ kfree(priv->wilc_gtk[key_index]->key);
+ priv->wilc_gtk[key_index]->key = NULL;
+ kfree(priv->wilc_gtk[key_index]->seq);
+ priv->wilc_gtk[key_index]->seq = NULL;
+
+ kfree(priv->wilc_gtk[key_index]);
+ priv->wilc_gtk[key_index] = NULL;
+ }
+ if (priv->wilc_ptk[key_index]) {
+ kfree(priv->wilc_ptk[key_index]->key);
+ priv->wilc_ptk[key_index]->key = NULL;
+ kfree(priv->wilc_ptk[key_index]->seq);
+ priv->wilc_ptk[key_index]->seq = NULL;
+ kfree(priv->wilc_ptk[key_index]);
+ priv->wilc_ptk[key_index] = NULL;
+ }
}
return 0;
}
-static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, void *cookie,
+static int get_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
void (*callback)(void *cookie, struct key_params *))
{
struct wilc_vif *vif = netdev_priv(netdev);
@@ -695,11 +696,20 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
struct key_params key_params;
if (!pairwise) {
- key_params.key = priv->wilc_gtk[key_index]->key;
- key_params.cipher = priv->wilc_gtk[key_index]->cipher;
- key_params.key_len = priv->wilc_gtk[key_index]->key_len;
- key_params.seq = priv->wilc_gtk[key_index]->seq;
- key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
+ if (key_index == 4 || key_index == 5) {
+ key_index -= 4;
+ key_params.key = priv->wilc_igtk[key_index]->key;
+ key_params.cipher = priv->wilc_igtk[key_index]->cipher;
+ key_params.key_len = priv->wilc_igtk[key_index]->key_len;
+ key_params.seq = priv->wilc_igtk[key_index]->seq;
+ key_params.seq_len = priv->wilc_igtk[key_index]->seq_len;
+ } else {
+ key_params.key = priv->wilc_gtk[key_index]->key;
+ key_params.cipher = priv->wilc_gtk[key_index]->cipher;
+ key_params.key_len = priv->wilc_gtk[key_index]->key_len;
+ key_params.seq = priv->wilc_gtk[key_index]->seq;
+ key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
+ }
} else {
key_params.key = priv->wilc_ptk[key_index]->key;
key_params.cipher = priv->wilc_ptk[key_index]->cipher;
@@ -713,14 +723,20 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
return 0;
}
+/* wiphy_new_nm() will WARNON if not present */
static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
- struct wilc_vif *vif = netdev_priv(netdev);
+ return 0;
+}
- wilc_set_wep_default_keyid(vif, key_index);
+static int set_default_mgmt_key(struct wiphy *wiphy, struct net_device *netdev,
+ int link_id, u8 key_index)
+{
+ struct wilc_vif *vif = netdev_priv(netdev);
- return 0;
+ return wilc_set_default_mgmt_key_index(vif, key_index);
}
static int get_station(struct wiphy *wiphy, struct net_device *dev,
@@ -977,6 +993,17 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch)
}
}
+bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size)
+{
+ struct wilc *wl = vif->wilc;
+ struct wilc_priv *priv = &vif->priv;
+ int freq;
+
+ freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
+
+ return cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
+}
+
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
{
struct wilc *wl = vif->wilc;
@@ -1134,7 +1161,7 @@ static int mgmt_tx(struct wiphy *wiphy,
const u8 *vendor_ie;
int ret = 0;
- *cookie = prandom_u32();
+ *cookie = get_random_u32();
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
@@ -1162,8 +1189,14 @@ static int mgmt_tx(struct wiphy *wiphy,
goto out_txq_add_pkt;
}
- if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len))
+ if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len)) {
+ if (chan)
+ wilc_set_mac_chnl_num(vif, chan->hw_value);
+ else
+ wilc_set_mac_chnl_num(vif, vif->wilc->op_ch);
+
goto out_set_timeout;
+ }
d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action);
if (d->oui_type != WLAN_OUI_TYPE_WFA_P2P ||
@@ -1230,6 +1263,7 @@ void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
struct wilc_vif *vif = netdev_priv(wdev->netdev);
u32 presp_bit = BIT(IEEE80211_STYPE_PROBE_REQ >> 4);
u32 action_bit = BIT(IEEE80211_STYPE_ACTION >> 4);
+ u32 pauth_bit = BIT(IEEE80211_STYPE_AUTH >> 4);
if (wl->initialized) {
bool prev = vif->mgmt_reg_stypes & presp_bit;
@@ -1243,10 +1277,26 @@ void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
if (now != prev)
wilc_frame_register(vif, IEEE80211_STYPE_ACTION, now);
+
+ prev = vif->mgmt_reg_stypes & pauth_bit;
+ now = upd->interface_stypes & pauth_bit;
+ if (now != prev)
+ wilc_frame_register(vif, IEEE80211_STYPE_AUTH, now);
}
vif->mgmt_reg_stypes =
- upd->interface_stypes & (presp_bit | action_bit);
+ upd->interface_stypes & (presp_bit | action_bit | pauth_bit);
+}
+
+static int external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *auth)
+{
+ struct wilc_vif *vif = netdev_priv(dev);
+
+ if (auth->status == WLAN_STATUS_SUCCESS)
+ wilc_set_external_auth_param(vif, auth);
+
+ return 0;
}
static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
@@ -1264,12 +1314,11 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
if (idx != 0)
return -ENOENT;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
-
ret = wilc_get_rssi(vif, &sinfo->signal);
if (ret)
return ret;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
memcpy(mac, vif->priv.associated_bss, ETH_ALEN);
return 0;
}
@@ -1378,7 +1427,8 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
return wilc_add_beacon(vif, 0, 0, beacon);
}
-static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
int ret;
struct wilc_vif *vif = netdev_priv(dev);
@@ -1647,6 +1697,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.del_key = del_key,
.get_key = get_key,
.set_default_key = set_default_key,
+ .set_default_mgmt_key = set_default_mgmt_key,
.add_virtual_intf = add_virtual_intf,
.del_virtual_intf = del_virtual_intf,
.change_virtual_intf = change_virtual_intf,
@@ -1662,6 +1713,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.change_bss = change_bss,
.set_wiphy_params = set_wiphy_params,
+ .external_auth = external_auth,
.set_pmksa = set_pmksa,
.del_pmksa = del_pmksa,
.flush_pmksa = flush_pmksa,
@@ -1804,7 +1856,7 @@ struct wilc *wilc_create_wiphy(struct device *dev)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
-
+ wiphy->features |= NL80211_FEATURE_SAE;
set_wiphy_dev(wiphy, dev);
wl->wiphy = wiphy;
ret = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h
index 1114530d03e4..5c5cac4aab02 100644
--- a/drivers/net/wireless/microchip/wilc1000/fw.h
+++ b/drivers/net/wireless/microchip/wilc1000/fw.h
@@ -41,21 +41,23 @@ struct wilc_drv_handler {
u8 mode;
} __packed;
-struct wilc_wep_key {
- u8 index;
+struct wilc_sta_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
u8 key_len;
u8 key[];
} __packed;
-struct wilc_sta_wpa_ptk {
+struct wilc_ap_wpa_ptk {
u8 mac_addr[ETH_ALEN];
+ u8 index;
u8 key_len;
u8 key[];
} __packed;
-struct wilc_ap_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
+struct wilc_wpa_igtk {
u8 index;
+ u8 pn_len;
+ u8 pn[6];
u8 key_len;
u8 key[];
} __packed;
@@ -116,4 +118,13 @@ struct wilc_join_bss_param {
struct wilc_noa_opp_enable opp_en;
};
} __packed;
+
+struct wilc_external_auth_param {
+ u8 action;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ __le32 key_mgmt_suites;
+ __le16 status;
+} __packed;
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 71b44cfe0dfc..eb1d1ba3a443 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -271,12 +271,19 @@ error:
static int wilc_send_connect_wid(struct wilc_vif *vif)
{
int result = 0;
- struct wid wid_list[4];
+ struct wid wid_list[5];
u32 wid_cnt = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
struct wilc_conn_info *conn_attr = &hif_drv->conn_info;
struct wilc_join_bss_param *bss_param = conn_attr->param;
+
+ wid_list[wid_cnt].id = WID_SET_MFP;
+ wid_list[wid_cnt].type = WID_CHAR;
+ wid_list[wid_cnt].size = sizeof(char);
+ wid_list[wid_cnt].val = (s8 *)&conn_attr->mfp_type;
+ wid_cnt++;
+
wid_list[wid_cnt].id = WID_INFO_ELEMENT_ASSOCIATE;
wid_list[wid_cnt].type = WID_BIN_DATA;
wid_list[wid_cnt].val = conn_attr->req_ies;
@@ -306,7 +313,10 @@ static int wilc_send_connect_wid(struct wilc_vif *vif)
netdev_err(vif->ndev, "failed to send config packet\n");
goto error;
} else {
- hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
+ if (conn_attr->auth_type == WILC_FW_AUTH_SAE)
+ hif_drv->hif_state = HOST_IF_EXTERNAL_AUTH;
+ else
+ hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
}
return 0;
@@ -625,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-static inline void host_int_handle_disconnect(struct wilc_vif *vif)
+void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -637,8 +647,6 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
if (hif_drv->conn_info.conn_result)
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
0, hif_drv->conn_info.arg);
- else
- netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
eth_zero_addr(hif_drv->assoc_bssid);
@@ -665,11 +673,16 @@ static void handle_rcvd_gnrl_async_info(struct work_struct *work)
goto free_msg;
}
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
+
+ if (hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) {
+ cfg80211_external_auth_request(vif->ndev, &vif->auth,
+ GFP_KERNEL);
+ hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
+ } else if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
host_int_parse_assoc_resp_info(vif, mac_info->status);
} else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) {
if (hif_drv->hif_state == HOST_IF_CONNECTED) {
- host_int_handle_disconnect(vif);
+ wilc_handle_disconnect(vif);
} else if (hif_drv->usr_scan_req.scan_result) {
del_timer(&hif_drv->scan_timer);
handle_scan_done(vif, SCAN_EVENT_ABORTED);
@@ -710,7 +723,8 @@ int wilc_disconnect(struct wilc_vif *vif)
}
if (conn_info->conn_result) {
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
+ hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH)
del_timer(&hif_drv->connect_timer);
conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0,
@@ -800,15 +814,15 @@ static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac,
put_unaligned_le16(params->aid, cur_byte);
cur_byte += 2;
- *cur_byte++ = params->supported_rates_len;
- if (params->supported_rates_len > 0)
- memcpy(cur_byte, params->supported_rates,
- params->supported_rates_len);
- cur_byte += params->supported_rates_len;
+ *cur_byte++ = params->link_sta_params.supported_rates_len;
+ if (params->link_sta_params.supported_rates_len > 0)
+ memcpy(cur_byte, params->link_sta_params.supported_rates,
+ params->link_sta_params.supported_rates_len);
+ cur_byte += params->link_sta_params.supported_rates_len;
- if (params->ht_capa) {
+ if (params->link_sta_params.ht_capa) {
*cur_byte++ = true;
- memcpy(cur_byte, params->ht_capa,
+ memcpy(cur_byte, params->link_sta_params.ht_capa,
sizeof(struct ieee80211_ht_cap));
} else {
*cur_byte++ = false;
@@ -986,6 +1000,31 @@ void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled)
pr_err("Failed to send wowlan trigger config packet\n");
}
+int wilc_set_external_auth_param(struct wilc_vif *vif,
+ struct cfg80211_external_auth_params *auth)
+{
+ int ret;
+ struct wid wid;
+ struct wilc_external_auth_param *param;
+
+ wid.id = WID_EXTERNAL_AUTH_PARAM;
+ wid.type = WID_BIN_DATA;
+ wid.size = sizeof(*param);
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return -EINVAL;
+
+ wid.val = (u8 *)param;
+ param->action = auth->action;
+ ether_addr_copy(param->bssid, auth->bssid);
+ memcpy(param->ssid, auth->ssid.ssid, auth->ssid.ssid_len);
+ param->ssid_len = auth->ssid.ssid_len;
+ ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+
+ kfree(param);
+ return ret;
+}
+
static void handle_scan_timer(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
@@ -1038,108 +1077,6 @@ static void timer_connect_cb(struct timer_list *t)
kfree(msg);
}
-int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
-{
- struct wid wid;
- int result;
-
- wid.id = WID_REMOVE_WEP_KEY;
- wid.type = WID_STR;
- wid.size = sizeof(char);
- wid.val = &index;
-
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to send remove wep key config packet\n");
- return result;
-}
-
-int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
-{
- struct wid wid;
- int result;
-
- wid.id = WID_KEY_ID;
- wid.type = WID_CHAR;
- wid.size = sizeof(char);
- wid.val = &index;
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to send wep default key config packet\n");
-
- return result;
-}
-
-int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index)
-{
- struct wid wid;
- int result;
- struct wilc_wep_key *wep_key;
-
- wid.id = WID_ADD_WEP_KEY;
- wid.type = WID_STR;
- wid.size = sizeof(*wep_key) + len;
- wep_key = kzalloc(wid.size, GFP_KERNEL);
- if (!wep_key)
- return -ENOMEM;
-
- wid.val = (u8 *)wep_key;
-
- wep_key->index = index;
- wep_key->key_len = len;
- memcpy(wep_key->key, key, len);
-
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev,
- "Failed to add wep key config packet\n");
-
- kfree(wep_key);
- return result;
-}
-
-int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum authtype auth_type)
-{
- struct wid wid_list[3];
- int result;
- struct wilc_wep_key *wep_key;
-
- wid_list[0].id = WID_11I_MODE;
- wid_list[0].type = WID_CHAR;
- wid_list[0].size = sizeof(char);
- wid_list[0].val = &mode;
-
- wid_list[1].id = WID_AUTH_TYPE;
- wid_list[1].type = WID_CHAR;
- wid_list[1].size = sizeof(char);
- wid_list[1].val = (s8 *)&auth_type;
-
- wid_list[2].id = WID_WEP_KEY_VALUE;
- wid_list[2].type = WID_STR;
- wid_list[2].size = sizeof(*wep_key) + len;
- wep_key = kzalloc(wid_list[2].size, GFP_KERNEL);
- if (!wep_key)
- return -ENOMEM;
-
- wid_list[2].val = (u8 *)wep_key;
-
- wep_key->index = index;
- wep_key->key_len = len;
- memcpy(wep_key->key, key, len);
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
- ARRAY_SIZE(wid_list));
- if (result)
- netdev_err(vif->ndev,
- "Failed to add wep ap key config packet\n");
-
- kfree(wep_key);
- return result;
-}
-
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index)
@@ -1211,6 +1148,36 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
return result;
}
+int wilc_add_igtk(struct wilc_vif *vif, const u8 *igtk, u8 igtk_key_len,
+ const u8 *pn, u8 pn_len, const u8 *mac_addr, u8 mode, u8 index)
+{
+ int result = 0;
+ u8 t_key_len = igtk_key_len;
+ struct wid wid;
+ struct wilc_wpa_igtk *key_buf;
+
+ key_buf = kzalloc(sizeof(*key_buf) + t_key_len, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ key_buf->index = index;
+
+ memcpy(&key_buf->pn[0], pn, pn_len);
+ key_buf->pn_len = pn_len;
+
+ memcpy(&key_buf->key[0], igtk, igtk_key_len);
+ key_buf->key_len = t_key_len;
+
+ wid.id = WID_ADD_IGTK;
+ wid.type = WID_STR;
+ wid.size = sizeof(*key_buf) + t_key_len;
+ wid.val = (s8 *)key_buf;
+ result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ kfree(key_buf);
+
+ return result;
+}
+
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 index, u32 key_rsc_len, const u8 *key_rsc,
const u8 *rx_mic, const u8 *tx_mic, u8 mode,
@@ -1749,6 +1716,10 @@ void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
reg_frame.reg_id = WILC_FW_PROBE_REQ_IDX;
break;
+ case IEEE80211_STYPE_AUTH:
+ reg_frame.reg_id = WILC_FW_AUTH_REQ_IDX;
+ break;
+
default:
break;
}
@@ -1826,7 +1797,8 @@ int wilc_add_station(struct wilc_vif *vif, const u8 *mac,
wid.id = WID_ADD_STA;
wid.type = WID_BIN;
- wid.size = WILC_ADD_STA_LENGTH + params->supported_rates_len;
+ wid.size = WILC_ADD_STA_LENGTH +
+ params->link_sta_params.supported_rates_len;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
return -ENOMEM;
@@ -1911,7 +1883,8 @@ int wilc_edit_station(struct wilc_vif *vif, const u8 *mac,
wid.id = WID_EDIT_STA;
wid.type = WID_BIN;
- wid.size = WILC_ADD_STA_LENGTH + params->supported_rates_len;
+ wid.size = WILC_ADD_STA_LENGTH +
+ params->link_sta_params.supported_rates_len;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
return -ENOMEM;
@@ -1996,3 +1969,20 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
return wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1);
}
+
+int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index)
+{
+ struct wid wid;
+ int result;
+
+ wid.id = WID_DEFAULT_MGMT_KEY_ID;
+ wid.type = WID_CHAR;
+ wid.size = sizeof(char);
+ wid.val = &index;
+ result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ if (result)
+ netdev_err(vif->ndev,
+ "Failed to send default mgmt key index\n");
+
+ return result;
+}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index cccd54ed0518..baa2881f4465 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -47,6 +47,7 @@ enum host_if_state {
HOST_IF_WAITING_CONN_RESP = 3,
HOST_IF_CONNECTED = 4,
HOST_IF_P2P_LISTEN = 5,
+ HOST_IF_EXTERNAL_AUTH = 6,
HOST_IF_FORCE_32BIT = 0xFFFFFFFF
};
@@ -107,6 +108,7 @@ struct wilc_conn_info {
u8 bssid[ETH_ALEN];
u8 security;
enum authtype auth_type;
+ enum mfptype mfp_type;
u8 ch;
u8 *req_ies;
size_t req_ies_len;
@@ -123,7 +125,7 @@ struct wilc_remain_ch {
u32 duration;
void (*expired)(void *priv, u64 cookie);
void *arg;
- u32 cookie;
+ u64 cookie;
};
struct wilc;
@@ -151,15 +153,12 @@ struct host_if_drv {
};
struct wilc_vif;
-int wilc_remove_wep_key(struct wilc_vif *vif, u8 index);
-int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index);
-int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index);
-int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum authtype auth_type);
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index);
+int wilc_add_igtk(struct wilc_vif *vif, const u8 *igtk, u8 igtk_key_len,
+ const u8 *pn, u8 pn_len, const u8 *mac_addr, u8 mode,
+ u8 index);
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
u32 *out_val);
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
@@ -208,9 +207,14 @@ int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled);
+int wilc_set_external_auth_param(struct wilc_vif *vif,
+ struct cfg80211_external_auth_params *param);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
+int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
+void wilc_handle_disconnect(struct wilc_vif *vif);
+
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 6bd63934c2d8..03b7229a0ff5 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -229,11 +229,11 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
return NULL;
wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP;
- strlcpy(wl->monitor_dev->name, name, IFNAMSIZ);
+ strscpy(wl->monitor_dev->name, name, IFNAMSIZ);
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
- if (cfg80211_register_netdevice(wl->monitor_dev)) {
+ if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
free_netdev(wl->monitor_dev);
return NULL;
@@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked)
return;
if (rtnl_locked)
- cfg80211_unregister_netdevice(wl->monitor_dev);
+ unregister_netdevice(wl->monitor_dev);
else
unregister_netdev(wl->monitor_dev);
wl->monitor_dev = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 643bddaae32a..9b319a455b96 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -14,6 +14,7 @@
#include "wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
+#define WILC_MAX_FW_VERSION_STR_SIZE 50
/* latest API version supported */
#define WILC1000_API_VER 1
@@ -96,12 +97,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
- if (vif->mode == WILC_STATION_MODE)
+ if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
- if (vif->mode == WILC_AP_MODE)
+ if (vif->iftype == WILC_AP_MODE)
if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
ndev = vif->ndev;
goto out;
@@ -121,7 +122,7 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
else
eth_zero_addr(vif->bssid);
- vif->mode = mode;
+ vif->iftype = mode;
}
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -471,7 +472,7 @@ static int wlan_initialize_threads(struct net_device *dev)
"%s-tx", dev->name);
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
- wilc->close = 0;
+ wilc->close = 1;
return PTR_ERR(wilc->txq_thread);
}
wait_for_completion(&wilc->txq_thread_started);
@@ -522,7 +523,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
int size;
- char firmware_ver[20];
+ char firmware_ver[WILC_MAX_FW_VERSION_STR_SIZE];
size = wilc_wlan_cfg_get_val(wl, WID_FIRMWARE_VERSION,
firmware_ver,
@@ -779,6 +780,7 @@ static int wilc_mac_close(struct net_device *ndev)
if (vif->ndev) {
netif_stop_queue(vif->ndev);
+ wilc_handle_disconnect(vif);
wilc_deinit_host_int(vif->ndev);
}
@@ -834,15 +836,24 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
}
}
-void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
+void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
{
int srcu_idx;
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
+ u32 auth_bit = BIT(IEEE80211_STYPE_AUTH >> 4);
+
+ if ((vif->mgmt_reg_stypes & auth_bit &&
+ ieee80211_is_auth(mgmt->frame_control)) &&
+ vif->iftype == WILC_STATION_MODE && is_auth) {
+ wilc_wfi_mgmt_frame_rx(vif, buff, size);
+ break;
+ }
if (vif->priv.p2p_listen_state &&
vif->mgmt_reg_stypes & type_bit)
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index a067274c2014..bb1a315a7b7e 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -45,12 +45,6 @@ struct wilc_wfi_key {
u32 cipher;
};
-struct wilc_wfi_wep_key {
- u8 *key;
- u8 key_len;
- u8 key_idx;
-};
-
struct sta_info {
u8 sta_associated_bss[WILC_MAX_NUM_STA][ETH_ALEN];
};
@@ -63,8 +57,6 @@ struct wilc_wfi_p2p_listen_params {
};
static const u32 wilc_cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC
@@ -132,13 +124,12 @@ struct wilc_priv {
struct net_device *dev;
struct host_if_drv *hif_drv;
struct wilc_pmkid_attr pmkid_list;
- u8 wep_key[4][WLAN_KEY_LEN_WEP104];
- u8 wep_key_len[4];
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[WILC_MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[WILC_MAX_NUM_STA];
+ struct wilc_wfi_key *wilc_igtk[2];
u8 wilc_groupkey;
/* mutexes */
@@ -186,7 +177,6 @@ struct wilc_vif {
u8 bssid[ETH_ALEN];
struct host_if_drv *hif_drv;
struct net_device *ndev;
- u8 mode;
struct timer_list during_ip_timer;
struct timer_list periodic_rssi;
struct rf_info periodic_stat;
@@ -195,6 +185,7 @@ struct wilc_vif {
struct wilc_priv priv;
struct list_head list;
struct cfg80211_bss *bss;
+ struct cfg80211_external_auth_params auth;
};
struct wilc_tx_queue_status {
@@ -254,6 +245,7 @@ struct wilc {
u8 *rx_buffer;
u32 rx_buffer_offset;
u8 *tx_buffer;
+ u32 *vmm_table;
struct txq_handle txq[NQUEUES];
int txq_entries;
@@ -288,7 +280,7 @@ struct wilc_wfi_mon_priv {
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
-void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
+void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth);
void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
u8 mode);
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index ec595dbd8959..7390f94cd4ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -26,7 +26,9 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
+ bool isinit;
int has_thrpt_enh3;
+ u8 *cmd53_buf;
};
struct sdio_cmd52 {
@@ -46,6 +48,7 @@ struct sdio_cmd53 {
u32 count: 9;
u8 *buffer;
u32 block_size;
+ bool use_global_buf;
};
static const struct wilc_hif_func wilc_hif_sdio;
@@ -90,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+ u8 *buf = cmd->buffer;
sdio_claim_host(func);
@@ -100,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
else
size = cmd->count;
+ if (cmd->use_global_buf) {
+ if (size > sizeof(u32))
+ return -EINVAL;
+
+ buf = sdio_priv->cmd53_buf;
+ }
+
if (cmd->read_write) { /* write */
- ret = sdio_memcpy_toio(func, cmd->address,
- (void *)cmd->buffer, size);
+ if (cmd->use_global_buf)
+ memcpy(buf, cmd->buffer, size);
+
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
} else { /* read */
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
- cmd->address, size);
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
+
+ if (cmd->use_global_buf)
+ memcpy(cmd->buffer, buf, size);
}
sdio_release_host(func);
@@ -127,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
if (!sdio_priv)
return -ENOMEM;
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdio_priv->cmd53_buf) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret)
@@ -160,6 +182,7 @@ dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
free:
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
return ret;
}
@@ -171,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
@@ -193,6 +217,13 @@ static int wilc_sdio_reset(struct wilc *wilc)
return 0;
}
+static bool wilc_sdio_is_init(struct wilc *wilc)
+{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ return sdio_priv->isinit;
+}
+
static int wilc_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -367,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)&data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret)
@@ -406,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -484,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -527,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -581,6 +616,9 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
static int wilc_sdio_deinit(struct wilc *wilc)
{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ sdio_priv->isinit = false;
return 0;
}
@@ -598,7 +636,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 1;
- cmd.address = SDIO_FBR_BASE(func->num);
+ cmd.address = SDIO_FBR_BASE(1);
cmd.data = SDIO_FBR_ENABLE_CSA;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
@@ -700,6 +738,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
sdio_priv->has_thrpt_enh3);
}
+ sdio_priv->isinit = true;
return 0;
}
@@ -981,6 +1020,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
.hif_reset = wilc_sdio_reset,
+ .hif_is_init = wilc_sdio_is_init,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 2c2ed4b09efd..b0fc5e68feec 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -191,11 +191,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* deassert RESET: */
- gpiod_set_value(gpios->reset, 0);
- } else {
/* assert RESET: */
gpiod_set_value(gpios->reset, 1);
+ } else {
+ /* deassert RESET: */
+ gpiod_set_value(gpios->reset, 0);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -240,7 +240,7 @@ free:
return ret;
}
-static int wilc_bus_remove(struct spi_device *spi)
+static void wilc_bus_remove(struct spi_device *spi)
{
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -248,8 +248,6 @@ static int wilc_bus_remove(struct spi_device *spi)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
-
- return 0;
}
static const struct of_device_id wilc_of_match[] = {
@@ -727,10 +725,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
int nbytes;
u8 rsp;
- if (sz <= DATA_PKT_SZ)
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ;
+ nbytes = min_t(u32, sz, DATA_PKT_SZ);
/*
* Data Response header
@@ -1034,6 +1029,13 @@ static int wilc_spi_reset(struct wilc *wilc)
return result;
}
+static bool wilc_spi_is_init(struct wilc *wilc)
+{
+ struct wilc_spi *spi_priv = wilc->bus_data;
+
+ return spi_priv->isinit;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -1255,4 +1257,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
.hif_reset = wilc_spi_reset,
+ .hif_is_init = wilc_spi_is_init,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index fb5633a05fd5..58bbf50081e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
int ret = 0;
int counter;
int timeout;
- u32 vmm_table[WILC_VMM_TBL_SIZE];
+ u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
@@ -875,14 +875,15 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
char *bssid;
u8 mgmt_ptk = 0;
+ if (vmm_table[i] == 0 || vmm_entries_ac[i] >= NQUEUES)
+ break;
+
tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]);
- ac_pkt_num_to_chip[vmm_entries_ac[i]]++;
if (!tqe)
break;
+ ac_pkt_num_to_chip[vmm_entries_ac[i]]++;
vif = tqe->vif;
- if (vmm_table[i] == 0)
- break;
le32_to_cpus(&vmm_table[i]);
vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]);
@@ -967,7 +968,8 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
if (pkt_offset & IS_MANAGMEMENT) {
buff_ptr += HOST_HDR_OFFSET;
- wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len);
+ wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len,
+ pkt_offset & IS_MGMT_AUTH_PKT);
} else {
if (!is_cfg_packet) {
wilc_frmw_to_host(wilc, buff_ptr, pkt_len,
@@ -1250,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
@@ -1479,8 +1483,19 @@ int wilc_wlan_init(struct net_device *dev)
wilc->quit = 0;
- if (wilc->hif_func->hif_init(wilc, false)) {
- ret = -EIO;
+ if (!wilc->hif_func->hif_is_init(wilc)) {
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ ret = wilc->hif_func->hif_init(wilc, false);
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ if (ret)
+ goto fail;
+ }
+
+ if (!wilc->vmm_table)
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
goto fail;
}
@@ -1508,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
return 0;
fail:
-
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index eb7978166d73..a72cd5cac81d 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -305,6 +305,7 @@
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
#define IS_MGMT_STATUS_SUCCES 0x040
+#define IS_MGMT_AUTH_PKT 0x010
#define WILC_WID_TYPE GENMASK(15, 12)
#define WILC_VMM_ENTRY_FULL_RETRY 1
@@ -372,6 +373,7 @@ struct wilc_hif_func {
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
int (*hif_reset)(struct wilc *wilc);
+ bool (*hif_is_init)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
@@ -423,6 +425,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc);
netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size);
+bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size);
void host_wakeup_notify(struct wilc *wilc);
void host_sleep_notify(struct wilc *wilc);
void chip_allow_sleep(struct wilc *wilc);
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index dba301378b7f..131388886acb 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_TX_POWER, 0},
{WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
@@ -180,9 +181,10 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
i++;
if (cfg->s[i].id == wid)
- memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+ memcpy(cfg->s[i].str, &info[2],
+ get_unaligned_le16(&info[2]) + 2);
- len = 2 + info[2];
+ len = 2 + get_unaligned_le16(&info[2]);
break;
default:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
index 6eb7eb4ac294..df2f5a63bdf6 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
@@ -85,7 +85,16 @@ enum authtype {
WILC_FW_AUTH_OPEN_SYSTEM = 1,
WILC_FW_AUTH_SHARED_KEY = 2,
WILC_FW_AUTH_ANY = 3,
- WILC_FW_AUTH_IEEE8021 = 5
+ WILC_FW_AUTH_IEEE8021 = 5,
+ WILC_FW_AUTH_SAE = 7,
+ WILC_FW_AUTH_IEE8021X_SHA256 = 9,
+ WILC_FW_AUTH_OPEN_SYSTEM_SHA256 = 13
+};
+
+enum mfptype {
+ WILC_FW_MFP_NONE = 0x0,
+ WILC_FW_MFP_OPTIONAL = 0x1,
+ WILC_FW_MFP_REQUIRED = 0x2
};
enum site_survey {
@@ -176,7 +185,8 @@ enum {
enum {
WILC_FW_ACTION_FRM_IDX = 0,
- WILC_FW_PROBE_REQ_IDX = 1
+ WILC_FW_PROBE_REQ_IDX = 1,
+ WILC_FW_AUTH_REQ_IDX = 2
};
enum wid_type {
@@ -657,6 +667,9 @@ enum {
WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
WID_WOWLAN_TRIGGER = 0X00CF,
+ WID_SET_MFP = 0x00D0,
+
+ WID_DEFAULT_MGMT_KEY_ID = 0x00D2,
/* EMAC Short WID list */
/* RTS Threshold */
/*
@@ -746,6 +759,7 @@ enum {
WID_REMOVE_KEY = 0x301E,
WID_ASSOC_REQ_INFO = 0x301F,
WID_ASSOC_RES_INFO = 0x3020,
+ WID_ADD_IGTK = 0x3022,
WID_MANUFACTURER = 0x3026, /* Added for CAPI tool */
WID_MODEL_NAME = 0x3027, /* Added for CAPI tool */
WID_MODEL_NUM = 0x3028, /* Added for CAPI tool */
@@ -789,7 +803,7 @@ enum {
WID_ADD_BEACON = 0x408a,
WID_SETUP_MULTICAST_FILTER = 0x408b,
-
+ WID_EXTERNAL_AUTH_PARAM = 0x408d,
/* Miscellaneous WIDs */
WID_ALL = 0x7FFE,
WID_MAX = 0xFFFF
diff --git a/drivers/net/wireless/purelifi/Kconfig b/drivers/net/wireless/purelifi/Kconfig
new file mode 100644
index 000000000000..e39afec3dcae
--- /dev/null
+++ b/drivers/net/wireless/purelifi/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_PURELIFI
+ bool "pureLiFi devices"
+ default y
+ help
+ If you have a pureLiFi device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_PURELIFI
+
+source "drivers/net/wireless/purelifi/plfxlc/Kconfig"
+
+endif # WLAN_VENDOR_PURELIFI
diff --git a/drivers/net/wireless/purelifi/Makefile b/drivers/net/wireless/purelifi/Makefile
new file mode 100644
index 000000000000..0bd6880b022c
--- /dev/null
+++ b/drivers/net/wireless/purelifi/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLFXLC) := plfxlc/
diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig
new file mode 100644
index 000000000000..4e0be27a5e0e
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config PLFXLC
+ tristate "pureLiFi X, XL, XC device support"
+ depends on CFG80211 && MAC80211 && USB
+ help
+ This option adds support for pureLiFi LiFi wireless USB
+ adapters. The pureLiFi X, XL, XC USB devices are based on
+ 802.11 OFDM PHY but uses light as the transmission medium.
+ The driver supports common 802.11 encryption/authentication
+ methods including Open, WPA, WPA2-Personal and
+ WPA2-Enterprise (802.1X).
+
+ To compile this driver as a module, choose m here. The module will
+ be called plfxlc.
diff --git a/drivers/net/wireless/purelifi/plfxlc/Makefile b/drivers/net/wireless/purelifi/plfxlc/Makefile
new file mode 100644
index 000000000000..7ed954e871d6
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLFXLC) := plfxlc.o
+plfxlc-objs += chip.o firmware.o usb.o mac.o
diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.c b/drivers/net/wireless/purelifi/plfxlc/chip.c
new file mode 100644
index 000000000000..f4ef9ff97146
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/chip.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "chip.h"
+#include "mac.h"
+#include "usb.h"
+
+void plfxlc_chip_init(struct plfxlc_chip *chip,
+ struct ieee80211_hw *hw,
+ struct usb_interface *intf)
+{
+ memset(chip, 0, sizeof(*chip));
+ mutex_init(&chip->mutex);
+ plfxlc_usb_init(&chip->usb, hw, intf);
+}
+
+void plfxlc_chip_release(struct plfxlc_chip *chip)
+{
+ plfxlc_usb_release(&chip->usb);
+ mutex_destroy(&chip->mutex);
+}
+
+int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
+ u8 dtim_period, int type)
+{
+ if (!interval ||
+ (chip->beacon_set && chip->beacon_interval == interval))
+ return 0;
+
+ chip->beacon_interval = interval;
+ chip->beacon_set = true;
+ return plfxlc_usb_wreq(chip->usb.ez_usb,
+ &chip->beacon_interval,
+ sizeof(chip->beacon_interval),
+ USB_REQ_BEACON_INTERVAL_WR);
+}
+
+int plfxlc_chip_init_hw(struct plfxlc_chip *chip)
+{
+ unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip));
+ struct usb_device *udev = interface_to_usbdev(chip->usb.intf);
+
+ pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ le16_to_cpu(udev->descriptor.bcdDevice),
+ addr,
+ plfxlc_speed(udev->speed));
+
+ return plfxlc_set_beacon_interval(chip, 100, 0, 0);
+}
+
+int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value)
+{
+ int r;
+ __le16 radio_on = cpu_to_le16(value);
+
+ r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on,
+ sizeof(value), USB_REQ_POWER_WR);
+ if (r)
+ dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r);
+ return r;
+}
+
+int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip)
+{
+ plfxlc_usb_enable_tx(&chip->usb);
+ return plfxlc_usb_enable_rx(&chip->usb);
+}
+
+void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip)
+{
+ u8 value = 0;
+
+ plfxlc_usb_wreq(chip->usb.ez_usb,
+ &value, sizeof(value), USB_REQ_RXTX_WR);
+ plfxlc_usb_disable_rx(&chip->usb);
+ plfxlc_usb_disable_tx(&chip->usb);
+}
+
+int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate)
+{
+ int r;
+
+ if (!chip)
+ return -EINVAL;
+
+ r = plfxlc_usb_wreq(chip->usb.ez_usb,
+ &rate, sizeof(rate), USB_REQ_RATE_WR);
+ if (r)
+ dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r);
+ return r;
+}
diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.h b/drivers/net/wireless/purelifi/plfxlc/chip.h
new file mode 100644
index 000000000000..dc04bbed07ac
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/chip.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_CHIP_H
+#define PLFXLC_CHIP_H
+
+#include <net/mac80211.h>
+
+#include "usb.h"
+
+enum unit_type {
+ STA = 0,
+ AP = 1,
+};
+
+enum {
+ PLFXLC_RADIO_OFF = 0,
+ PLFXLC_RADIO_ON = 1,
+};
+
+struct plfxlc_chip {
+ struct plfxlc_usb usb;
+ struct mutex mutex; /* lock to protect chip data */
+ enum unit_type unit_type;
+ u16 link_led;
+ u8 beacon_set;
+ u16 beacon_interval;
+};
+
+struct plfxlc_mc_hash {
+ u32 low;
+ u32 high;
+};
+
+#define plfxlc_chip_dev(chip) (&(chip)->usb.intf->dev)
+
+void plfxlc_chip_init(struct plfxlc_chip *chip,
+ struct ieee80211_hw *hw,
+ struct usb_interface *intf);
+
+void plfxlc_chip_release(struct plfxlc_chip *chip);
+
+void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip);
+
+int plfxlc_chip_init_hw(struct plfxlc_chip *chip);
+
+int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip);
+
+int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate);
+
+int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval,
+ u8 dtim_period, int type);
+
+int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value);
+
+static inline struct plfxlc_chip *plfxlc_usb_to_chip(struct plfxlc_usb
+ *usb)
+{
+ return container_of(usb, struct plfxlc_chip, usb);
+}
+
+static inline void plfxlc_mc_add_all(struct plfxlc_mc_hash *hash)
+{
+ hash->low = 0xffffffff;
+ hash->high = 0xffffffff;
+}
+
+#endif /* PLFXLC_CHIP_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/firmware.c b/drivers/net/wireless/purelifi/plfxlc/firmware.c
new file mode 100644
index 000000000000..8a3529d07927
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/firmware.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/firmware.h>
+#include <linux/bitrev.h>
+
+#include "mac.h"
+#include "usb.h"
+
+static int send_vendor_request(struct usb_device *udev, int request,
+ unsigned char *buffer, int buffer_size)
+{
+ return usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ request, 0xC0, 0, 0,
+ buffer, buffer_size, PLF_USB_TIMEOUT);
+}
+
+static int send_vendor_command(struct usb_device *udev, int request,
+ unsigned char *buffer, int buffer_size)
+{
+ return usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ request, USB_TYPE_VENDOR /*0x40*/, 0, 0,
+ buffer, buffer_size, PLF_USB_TIMEOUT);
+}
+
+int plfxlc_download_fpga(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned char *fpga_dmabuff = NULL;
+ const struct firmware *fw = NULL;
+ int blk_tran_len = PLF_BULK_TLEN;
+ unsigned char *fw_data;
+ const char *fw_name;
+ int r, actual_length;
+ int fw_data_i = 0;
+
+ if ((le16_to_cpu(udev->descriptor.idVendor) ==
+ PURELIFI_X_VENDOR_ID_0) &&
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ PURELIFI_X_PRODUCT_ID_0)) {
+ fw_name = "plfxlc/lifi-x.bin";
+ dev_dbg(&intf->dev, "bin file for X selected\n");
+
+ } else if ((le16_to_cpu(udev->descriptor.idVendor)) ==
+ PURELIFI_XC_VENDOR_ID_0 &&
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ PURELIFI_XC_PRODUCT_ID_0)) {
+ fw_name = "plfxlc/lifi-xc.bin";
+ dev_dbg(&intf->dev, "bin file for XC selected\n");
+
+ } else {
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = request_firmware(&fw, fw_name, &intf->dev);
+ if (r) {
+ dev_err(&intf->dev, "request_firmware failed (%d)\n", r);
+ goto error;
+ }
+ fpga_dmabuff = kmalloc(PLF_FPGA_STATUS_LEN, GFP_KERNEL);
+
+ if (!fpga_dmabuff) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+ send_vendor_request(udev, PLF_VNDR_FPGA_SET_REQ,
+ fpga_dmabuff, PLF_FPGA_STATUS_LEN);
+
+ send_vendor_command(udev, PLF_VNDR_FPGA_SET_CMD, NULL, 0);
+
+ if (fpga_dmabuff[0] != PLF_FPGA_MG) {
+ dev_err(&intf->dev, "fpga_dmabuff[0] is wrong\n");
+ r = -EINVAL;
+ goto error_free_fw;
+ }
+
+ for (fw_data_i = 0; fw_data_i < fw->size;) {
+ int tbuf_idx;
+
+ if ((fw->size - fw_data_i) < blk_tran_len)
+ blk_tran_len = fw->size - fw_data_i;
+
+ fw_data = kmemdup(&fw->data[fw_data_i], blk_tran_len,
+ GFP_KERNEL);
+ if (!fw_data) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+
+ for (tbuf_idx = 0; tbuf_idx < blk_tran_len; tbuf_idx++) {
+ /* u8 bit reverse */
+ fw_data[tbuf_idx] = bitrev8(fw_data[tbuf_idx]);
+ }
+ r = usb_bulk_msg(udev,
+ usb_sndbulkpipe(interface_to_usbdev(intf),
+ fpga_dmabuff[0] & 0xff),
+ fw_data,
+ blk_tran_len,
+ &actual_length,
+ 2 * PLF_USB_TIMEOUT);
+
+ if (r)
+ dev_err(&intf->dev, "Bulk msg failed (%d)\n", r);
+
+ kfree(fw_data);
+ fw_data_i += blk_tran_len;
+ }
+
+ kfree(fpga_dmabuff);
+ fpga_dmabuff = kmalloc(PLF_FPGA_STATE_LEN, GFP_KERNEL);
+ if (!fpga_dmabuff) {
+ r = -ENOMEM;
+ goto error_free_fw;
+ }
+ memset(fpga_dmabuff, 0xff, PLF_FPGA_STATE_LEN);
+
+ send_vendor_request(udev, PLF_VNDR_FPGA_STATE_REQ, fpga_dmabuff,
+ PLF_FPGA_STATE_LEN);
+
+ dev_dbg(&intf->dev, "%*ph\n", 8, fpga_dmabuff);
+
+ if (fpga_dmabuff[0] != 0) {
+ r = -EINVAL;
+ goto error_free_fw;
+ }
+
+ send_vendor_command(udev, PLF_VNDR_FPGA_STATE_CMD, NULL, 0);
+
+ msleep(PLF_MSLEEP_TIME);
+
+error_free_fw:
+ kfree(fpga_dmabuff);
+ release_firmware(fw);
+error:
+ return r;
+}
+
+int plfxlc_download_xl_firmware(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ const struct firmware *fwp = NULL;
+ struct plfxlc_firmware_file file = {0};
+ const char *fw_pack;
+ int s, r;
+ u8 *buf;
+ u32 i;
+
+ r = send_vendor_command(udev, PLF_VNDR_XL_FW_CMD, NULL, 0);
+ msleep(PLF_MSLEEP_TIME);
+
+ if (r) {
+ dev_err(&intf->dev, "vendor command failed (%d)\n", r);
+ return -EINVAL;
+ }
+ /* Code for single pack file download */
+
+ fw_pack = "plfxlc/lifi-xl.bin";
+
+ r = request_firmware(&fwp, fw_pack, &intf->dev);
+ if (r) {
+ dev_err(&intf->dev, "Request_firmware failed (%d)\n", r);
+ return -EINVAL;
+ }
+ file.total_files = get_unaligned_le32(&fwp->data[0]);
+ file.total_size = get_unaligned_le32(&fwp->size);
+
+ dev_dbg(&intf->dev, "XL Firmware (%d, %d)\n",
+ file.total_files, file.total_size);
+
+ buf = kzalloc(PLF_XL_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ release_firmware(fwp);
+ return -ENOMEM;
+ }
+
+ if (file.total_files > 10) {
+ dev_err(&intf->dev, "Too many files (%d)\n", file.total_files);
+ release_firmware(fwp);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ /* Download firmware files in multiple steps */
+ for (s = 0; s < file.total_files; s++) {
+ buf[0] = s;
+ r = send_vendor_command(udev, PLF_VNDR_XL_FILE_CMD, buf,
+ PLF_XL_BUF_LEN);
+
+ if (s < file.total_files - 1)
+ file.size = get_unaligned_le32(&fwp->data[4 + ((s + 1) * 4)])
+ - get_unaligned_le32(&fwp->data[4 + (s) * 4]);
+ else
+ file.size = file.total_size -
+ get_unaligned_le32(&fwp->data[4 + (s) * 4]);
+
+ if (file.size > file.total_size || file.size > 60000) {
+ dev_err(&intf->dev, "File size is too large (%d)\n", file.size);
+ break;
+ }
+
+ file.start_addr = get_unaligned_le32(&fwp->data[4 + (s * 4)]);
+
+ if (file.size % PLF_XL_BUF_LEN && s < 2)
+ file.size += PLF_XL_BUF_LEN - file.size % PLF_XL_BUF_LEN;
+
+ file.control_packets = file.size / PLF_XL_BUF_LEN;
+
+ for (i = 0; i < file.control_packets; i++) {
+ memcpy(buf,
+ &fwp->data[file.start_addr + (i * PLF_XL_BUF_LEN)],
+ PLF_XL_BUF_LEN);
+ r = send_vendor_command(udev, PLF_VNDR_XL_DATA_CMD, buf,
+ PLF_XL_BUF_LEN);
+ }
+ dev_dbg(&intf->dev, "fw-dw step=%d,r=%d size=%d\n", s, r,
+ file.size);
+ }
+ release_firmware(fwp);
+ kfree(buf);
+
+ /* Code for single pack file download ends fw download finish */
+
+ r = send_vendor_command(udev, PLF_VNDR_XL_EX_CMD, NULL, 0);
+ dev_dbg(&intf->dev, "Download fpga (4) (%d)\n", r);
+
+ return 0;
+}
+
+int plfxlc_upload_mac_and_serial(struct usb_interface *intf,
+ unsigned char *hw_address,
+ unsigned char *serial_number)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long long firmware_version;
+ unsigned char *dma_buffer = NULL;
+
+ dma_buffer = kmalloc(PLF_SERIAL_LEN, GFP_KERNEL);
+ if (!dma_buffer)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(ETH_ALEN > PLF_SERIAL_LEN);
+ BUILD_BUG_ON(PLF_FW_VER_LEN > PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_MAC_VENDOR_REQUEST, dma_buffer,
+ ETH_ALEN);
+
+ memcpy(hw_address, dma_buffer, ETH_ALEN);
+
+ send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST,
+ dma_buffer, PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST,
+ dma_buffer, PLF_SERIAL_LEN);
+
+ memcpy(serial_number, dma_buffer, PLF_SERIAL_LEN);
+
+ memset(dma_buffer, 0x00, PLF_SERIAL_LEN);
+
+ send_vendor_request(udev, PLF_FIRMWARE_VERSION_VENDOR_REQUEST,
+ (unsigned char *)dma_buffer, PLF_FW_VER_LEN);
+
+ memcpy(&firmware_version, dma_buffer, PLF_FW_VER_LEN);
+
+ dev_info(&intf->dev, "Firmware Version: %llu\n", firmware_version);
+ kfree(dma_buffer);
+
+ dev_dbg(&intf->dev, "Mac: %pM\n", hw_address);
+
+ return 0;
+}
+
diff --git a/drivers/net/wireless/purelifi/plfxlc/intf.h b/drivers/net/wireless/purelifi/plfxlc/intf.h
new file mode 100644
index 000000000000..5ae89343b579
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/intf.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#define PURELIFI_BYTE_NUM_ALIGNMENT 4
+#define ETH_ALEN 6
+#define AP_USER_LIMIT 8
+
+#define PLF_VNDR_FPGA_STATE_REQ 0x30
+#define PLF_VNDR_FPGA_SET_REQ 0x33
+#define PLF_VNDR_FPGA_SET_CMD 0x34
+#define PLF_VNDR_FPGA_STATE_CMD 0x35
+
+#define PLF_VNDR_XL_FW_CMD 0x80
+#define PLF_VNDR_XL_DATA_CMD 0x81
+#define PLF_VNDR_XL_FILE_CMD 0x82
+#define PLF_VNDR_XL_EX_CMD 0x83
+
+#define PLF_MAC_VENDOR_REQUEST 0x36
+#define PLF_SERIAL_NUMBER_VENDOR_REQUEST 0x37
+#define PLF_FIRMWARE_VERSION_VENDOR_REQUEST 0x39
+#define PLF_SERIAL_LEN 14
+#define PLF_FW_VER_LEN 8
+
+struct rx_status {
+ __be16 rssi;
+ u8 rate_idx;
+ u8 pad;
+ __be64 crc_error_count;
+} __packed;
+
+enum plf_usb_req_enum {
+ USB_REQ_TEST_WR = 0,
+ USB_REQ_MAC_WR = 1,
+ USB_REQ_POWER_WR = 2,
+ USB_REQ_RXTX_WR = 3,
+ USB_REQ_BEACON_WR = 4,
+ USB_REQ_BEACON_INTERVAL_WR = 5,
+ USB_REQ_RTS_CTS_RATE_WR = 6,
+ USB_REQ_HASH_WR = 7,
+ USB_REQ_DATA_TX = 8,
+ USB_REQ_RATE_WR = 9,
+ USB_REQ_SET_FREQ = 15
+};
+
+struct plf_usb_req {
+ __be32 id; /* should be plf_usb_req_enum */
+ __be32 len;
+ u8 buf[512];
+};
+
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
new file mode 100644
index 000000000000..d3cdffbded69
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/gpio.h>
+#include <linux/jiffies.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "chip.h"
+#include "mac.h"
+#include "usb.h"
+
+static const struct ieee80211_rate plfxlc_rates[] = {
+ { .bitrate = 10,
+ .hw_value = PURELIFI_CCK_RATE_1M,
+ .flags = 0 },
+ { .bitrate = 20,
+ .hw_value = PURELIFI_CCK_RATE_2M,
+ .hw_value_short = PURELIFI_CCK_RATE_2M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = PURELIFI_CCK_RATE_5_5M,
+ .hw_value_short = PURELIFI_CCK_RATE_5_5M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = PURELIFI_CCK_RATE_11M,
+ .hw_value_short = PURELIFI_CCK_RATE_11M
+ | PURELIFI_CCK_PREA_SHORT,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60,
+ .hw_value = PURELIFI_OFDM_RATE_6M,
+ .flags = 0 },
+ { .bitrate = 90,
+ .hw_value = PURELIFI_OFDM_RATE_9M,
+ .flags = 0 },
+ { .bitrate = 120,
+ .hw_value = PURELIFI_OFDM_RATE_12M,
+ .flags = 0 },
+ { .bitrate = 180,
+ .hw_value = PURELIFI_OFDM_RATE_18M,
+ .flags = 0 },
+ { .bitrate = 240,
+ .hw_value = PURELIFI_OFDM_RATE_24M,
+ .flags = 0 },
+ { .bitrate = 360,
+ .hw_value = PURELIFI_OFDM_RATE_36M,
+ .flags = 0 },
+ { .bitrate = 480,
+ .hw_value = PURELIFI_OFDM_RATE_48M,
+ .flags = 0 },
+ { .bitrate = 540,
+ .hw_value = PURELIFI_OFDM_RATE_54M,
+ .flags = 0 }
+};
+
+static const struct ieee80211_channel plfxlc_channels[] = {
+ { .center_freq = 2412, .hw_value = 1 },
+ { .center_freq = 2417, .hw_value = 2 },
+ { .center_freq = 2422, .hw_value = 3 },
+ { .center_freq = 2427, .hw_value = 4 },
+ { .center_freq = 2432, .hw_value = 5 },
+ { .center_freq = 2437, .hw_value = 6 },
+ { .center_freq = 2442, .hw_value = 7 },
+ { .center_freq = 2447, .hw_value = 8 },
+ { .center_freq = 2452, .hw_value = 9 },
+ { .center_freq = 2457, .hw_value = 10 },
+ { .center_freq = 2462, .hw_value = 11 },
+ { .center_freq = 2467, .hw_value = 12 },
+ { .center_freq = 2472, .hw_value = 13 },
+ { .center_freq = 2484, .hw_value = 14 },
+};
+
+int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address)
+{
+ SET_IEEE80211_PERM_ADDR(hw, hw_address);
+ return 0;
+}
+
+int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct plfxlc_chip *chip = &mac->chip;
+ int r;
+
+ r = plfxlc_chip_init_hw(chip);
+ if (r) {
+ dev_warn(plfxlc_mac_dev(mac), "init hw failed (%d)\n", r);
+ return r;
+ }
+
+ dev_dbg(plfxlc_mac_dev(mac), "irq_disabled (%d)\n", irqs_disabled());
+ regulatory_hint(hw->wiphy, "00");
+ return r;
+}
+
+void plfxlc_mac_release(struct plfxlc_mac *mac)
+{
+ plfxlc_chip_release(&mac->chip);
+ lockdep_assert_held(&mac->lock);
+}
+
+int plfxlc_op_start(struct ieee80211_hw *hw)
+{
+ plfxlc_hw_mac(hw)->chip.usb.initialized = 1;
+ return 0;
+}
+
+void plfxlc_op_stop(struct ieee80211_hw *hw)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ clear_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+}
+
+int plfxlc_restore_settings(struct plfxlc_mac *mac)
+{
+ int beacon_interval, beacon_period;
+ struct sk_buff *beacon;
+
+ spin_lock_irq(&mac->lock);
+ beacon_interval = mac->beacon.interval;
+ beacon_period = mac->beacon.period;
+ spin_unlock_irq(&mac->lock);
+
+ if (mac->type != NL80211_IFTYPE_ADHOC)
+ return 0;
+
+ if (mac->vif) {
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
+ if (beacon) {
+ /*beacon is hardcoded in firmware */
+ kfree_skb(beacon);
+ /* Returned skb is used only once and lowlevel
+ * driver is responsible for freeing it.
+ */
+ }
+ }
+
+ plfxlc_set_beacon_interval(&mac->chip, beacon_interval,
+ beacon_period, mac->type);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ return 0;
+}
+
+static void plfxlc_mac_tx_status(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ int ackssi,
+ struct tx_status *tx_status)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int success = 1;
+
+ ieee80211_tx_info_clear_status(info);
+ if (tx_status)
+ success = !tx_status->failure;
+
+ if (success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ info->status.ack_signal = 50;
+ ieee80211_tx_status_irqsafe(hw, skb);
+}
+
+void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = info->rate_driver_data[0];
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct sk_buff_head *q = NULL;
+
+ ieee80211_tx_info_clear_status(info);
+ skb_pull(skb, sizeof(struct plfxlc_ctrlset));
+
+ if (unlikely(error ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
+ ieee80211_tx_status_irqsafe(hw, skb);
+ return;
+ }
+
+ q = &mac->ack_wait_queue;
+
+ skb_queue_tail(q, skb);
+ while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) {
+ plfxlc_mac_tx_status(hw, skb_dequeue(q),
+ mac->ack_pending ?
+ mac->ack_signal : 0,
+ NULL);
+ mac->ack_pending = 0;
+ }
+}
+
+static int plfxlc_fill_ctrlset(struct plfxlc_mac *mac, struct sk_buff *skb)
+{
+ unsigned int frag_len = skb->len;
+ struct plfxlc_ctrlset *cs;
+ u32 temp_payload_len = 0;
+ unsigned int tmp;
+ u32 temp_len = 0;
+
+ if (skb_headroom(skb) < sizeof(struct plfxlc_ctrlset)) {
+ dev_dbg(plfxlc_mac_dev(mac), "Not enough hroom(1)\n");
+ return 1;
+ }
+
+ cs = (void *)skb_push(skb, sizeof(struct plfxlc_ctrlset));
+ temp_payload_len = frag_len;
+ temp_len = temp_payload_len +
+ sizeof(struct plfxlc_ctrlset) -
+ sizeof(cs->id) - sizeof(cs->len);
+
+ /* Data packet lengths must be multiple of four bytes and must
+ * not be a multiple of 512 bytes. First, it is attempted to
+ * append the data packet in the tailroom of the skb. In rare
+ * occasions, the tailroom is too small. In this case, the
+ * content of the packet is shifted into the headroom of the skb
+ * by memcpy. Headroom is allocated at startup (below in this
+ * file). Therefore, there will be always enough headroom. The
+ * call skb_headroom is an additional safety which might be
+ * dropped.
+ */
+ /* check if 32 bit aligned and align data */
+ tmp = skb->len & 3;
+ if (tmp) {
+ if (skb_tailroom(skb) < (3 - tmp)) {
+ if (skb_headroom(skb) >= 4 - tmp) {
+ u8 len;
+ u8 *src_pt;
+ u8 *dest_pt;
+
+ len = skb->len;
+ src_pt = skb->data;
+ dest_pt = skb_push(skb, 4 - tmp);
+ memmove(dest_pt, src_pt, len);
+ } else {
+ return -ENOBUFS;
+ }
+ } else {
+ skb_put(skb, 4 - tmp);
+ }
+ temp_len += 4 - tmp;
+ }
+
+ /* check if not multiple of 512 and align data */
+ tmp = skb->len & 0x1ff;
+ if (!tmp) {
+ if (skb_tailroom(skb) < 4) {
+ if (skb_headroom(skb) >= 4) {
+ u8 len = skb->len;
+ u8 *src_pt = skb->data;
+ u8 *dest_pt = skb_push(skb, 4);
+
+ memmove(dest_pt, src_pt, len);
+ } else {
+ /* should never happen because
+ * sufficient headroom was reserved
+ */
+ return -ENOBUFS;
+ }
+ } else {
+ skb_put(skb, 4);
+ }
+ temp_len += 4;
+ }
+
+ cs->id = cpu_to_be32(USB_REQ_DATA_TX);
+ cs->len = cpu_to_be32(temp_len);
+ cs->payload_len_nw = cpu_to_be32(temp_payload_len);
+
+ return 0;
+}
+
+static void plfxlc_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct plfxlc_header *plhdr = (void *)skb->data;
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct plfxlc_usb *usb = &mac->chip.usb;
+ unsigned long flags;
+ int r;
+
+ r = plfxlc_fill_ctrlset(mac, skb);
+ if (r)
+ goto fail;
+
+ info->rate_driver_data[0] = hw;
+
+ if (plhdr->frametype == IEEE80211_FTYPE_DATA) {
+ u8 *dst_mac = plhdr->dmac;
+ u8 sidx;
+ bool found = false;
+ struct plfxlc_usb_tx *tx = &usb->tx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++) {
+ if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (memcmp(tx->station[sidx].mac, dst_mac, ETH_ALEN))
+ continue;
+ found = true;
+ break;
+ }
+
+ /* Default to broadcast address for unknown MACs */
+ if (!found)
+ sidx = STA_BROADCAST_INDEX;
+
+ /* Stop OS from sending packets, if the queue is half full */
+ if (skb_queue_len(&tx->station[sidx].data_list) > 60)
+ ieee80211_stop_queues(plfxlc_usb_to_hw(usb));
+
+ /* Schedule packet for transmission if queue is not full */
+ if (skb_queue_len(&tx->station[sidx].data_list) > 256)
+ goto fail;
+ skb_queue_tail(&tx->station[sidx].data_list, skb);
+ plfxlc_send_packet_from_data_queue(usb);
+
+ } else {
+ spin_lock_irqsave(&usb->tx.lock, flags);
+ r = plfxlc_usb_wreq_async(&mac->chip.usb, skb->data, skb->len,
+ USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb);
+ spin_unlock_irqrestore(&usb->tx.lock, flags);
+ if (r)
+ goto fail;
+ }
+ return;
+
+fail:
+ dev_kfree_skb(skb);
+}
+
+static int plfxlc_filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
+ struct ieee80211_rx_status *stats)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct sk_buff_head *q;
+ int i, position = 0;
+ unsigned long flags;
+ struct sk_buff *skb;
+ bool found = false;
+
+ if (!ieee80211_is_ack(rx_hdr->frame_control))
+ return 0;
+
+ dev_dbg(plfxlc_mac_dev(mac), "ACK Received\n");
+
+ /* code based on zy driver, this logic may need fix */
+ q = &mac->ack_wait_queue;
+ spin_lock_irqsave(&q->lock, flags);
+
+ skb_queue_walk(q, skb) {
+ struct ieee80211_hdr *tx_hdr;
+
+ position++;
+
+ if (mac->ack_pending && skb_queue_is_first(q, skb))
+ continue;
+ if (mac->ack_pending == 0)
+ break;
+
+ tx_hdr = (struct ieee80211_hdr *)skb->data;
+ if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1))) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ for (i = 1; i < position; i++)
+ skb = __skb_dequeue(q);
+ if (i == position) {
+ plfxlc_mac_tx_status(hw, skb,
+ mac->ack_pending ?
+ mac->ack_signal : 0,
+ NULL);
+ mac->ack_pending = 0;
+ }
+
+ mac->ack_pending = skb_queue_len(q) ? 1 : 0;
+ mac->ack_signal = stats->signal;
+ }
+
+ spin_unlock_irqrestore(&q->lock, flags);
+ return 1;
+}
+
+int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
+ unsigned int length)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ struct ieee80211_rx_status stats;
+ const struct rx_status *status;
+ unsigned int payload_length;
+ struct plfxlc_usb_tx *tx;
+ struct sk_buff *skb;
+ int need_padding;
+ __le16 fc;
+ int sidx;
+
+ /* Packet blockade during disabled interface. */
+ if (!mac->vif)
+ return 0;
+
+ status = (struct rx_status *)buffer;
+
+ memset(&stats, 0, sizeof(stats));
+
+ stats.flag = 0;
+ stats.freq = 2412;
+ stats.band = NL80211_BAND_LC;
+ mac->rssi = -15 * be16_to_cpu(status->rssi) / 10;
+
+ stats.signal = mac->rssi;
+
+ if (status->rate_idx > 7)
+ stats.rate_idx = 0;
+ else
+ stats.rate_idx = status->rate_idx;
+
+ mac->crc_errors = be64_to_cpu(status->crc_error_count);
+
+ /* TODO bad frame check for CRC error*/
+ if (plfxlc_filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) &&
+ !mac->pass_ctrl)
+ return 0;
+
+ buffer += sizeof(struct rx_status);
+ payload_length = get_unaligned_be32(buffer);
+
+ if (payload_length > 1560) {
+ dev_err(plfxlc_mac_dev(mac), " > MTU %u\n", payload_length);
+ return 0;
+ }
+ buffer += sizeof(u32);
+
+ fc = get_unaligned((__le16 *)buffer);
+ need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
+
+ tx = &mac->chip.usb.tx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (memcmp(&buffer[10], tx->station[sidx].mac, ETH_ALEN))
+ continue;
+ if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) {
+ tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG;
+ break;
+ }
+ }
+
+ if (sidx == MAX_STA_NUM - 1) {
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (tx->station[sidx].flag & STATION_CONNECTED_FLAG)
+ continue;
+ memcpy(tx->station[sidx].mac, &buffer[10], ETH_ALEN);
+ tx->station[sidx].flag |= STATION_CONNECTED_FLAG;
+ tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG;
+ break;
+ }
+ }
+
+ switch (buffer[0]) {
+ case IEEE80211_STYPE_PROBE_REQ:
+ dev_dbg(plfxlc_mac_dev(mac), "Probe request\n");
+ break;
+ case IEEE80211_STYPE_ASSOC_REQ:
+ dev_dbg(plfxlc_mac_dev(mac), "Association request\n");
+ break;
+ case IEEE80211_STYPE_AUTH:
+ dev_dbg(plfxlc_mac_dev(mac), "Authentication req\n");
+ break;
+ case IEEE80211_FTYPE_DATA:
+ dev_dbg(plfxlc_mac_dev(mac), "802.11 data frame\n");
+ break;
+ }
+
+ skb = dev_alloc_skb(payload_length + (need_padding ? 2 : 0));
+ if (!skb)
+ return -ENOMEM;
+
+ if (need_padding)
+ /* Make sure that the payload data is 4 byte aligned. */
+ skb_reserve(skb, 2);
+
+ skb_put_data(skb, buffer, payload_length);
+ memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
+ ieee80211_rx_irqsafe(hw, skb);
+ return 0;
+}
+
+static int plfxlc_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ static const char * const iftype80211[] = {
+ [NL80211_IFTYPE_STATION] = "Station",
+ [NL80211_IFTYPE_ADHOC] = "Adhoc"
+ };
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
+ return -EOPNOTSUPP;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_STATION) {
+ dev_dbg(plfxlc_mac_dev(mac), "%s %s\n", __func__,
+ iftype80211[vif->type]);
+ mac->type = vif->type;
+ mac->vif = vif;
+ return 0;
+ }
+ dev_dbg(plfxlc_mac_dev(mac), "unsupported iftype\n");
+ return -EOPNOTSUPP;
+}
+
+static void plfxlc_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ mac->type = NL80211_IFTYPE_UNSPECIFIED;
+ mac->vif = NULL;
+}
+
+static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+#define SUPPORTED_FIF_FLAGS \
+ (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+ FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
+static void plfxlc_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct plfxlc_mc_hash hash = {
+ .low = multicast,
+ .high = multicast >> 32,
+ };
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ unsigned long flags;
+
+ /* Only deal with supported flags */
+ *new_flags &= SUPPORTED_FIF_FLAGS;
+
+ /* If multicast parameter
+ * (as returned by plfxlc_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
+ if (*new_flags & (FIF_ALLMULTI))
+ plfxlc_mc_add_all(&hash);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL);
+ mac->pass_ctrl = !!(*new_flags & FIF_CONTROL);
+ mac->multicast_hash = hash;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ /* no handling required for FIF_OTHER_BSS as we don't currently
+ * do BSSID filtering
+ */
+ /* FIXME: in future it would be nice to enable the probe response
+ * filter (so that the driver doesn't see them) until
+ * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd
+ * have to schedule work to enable prbresp reception, which might
+ * happen too late. For now we'll just listen and forward them all the
+ * time.
+ */
+}
+
+static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u64 changes)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+ int associated;
+
+ dev_dbg(plfxlc_mac_dev(mac), "changes: %llx\n", changes);
+
+ if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */
+ associated = is_valid_ether_addr(bss_conf->bssid);
+ goto exit_all;
+ }
+ /* for ADHOC */
+ associated = true;
+ if (changes & BSS_CHANGED_BEACON) {
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif, 0);
+
+ if (beacon) {
+ /*beacon is hardcoded in firmware */
+ kfree_skb(beacon);
+ /*Returned skb is used only once and
+ * low-level driver is
+ * responsible for freeing it.
+ */
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ u16 interval = 0;
+ u8 period = 0;
+
+ if (bss_conf->enable_beacon) {
+ period = bss_conf->dtim_period;
+ interval = bss_conf->beacon_int;
+ }
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.period = period;
+ mac->beacon.interval = interval;
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ plfxlc_set_beacon_interval(&mac->chip, interval,
+ period, mac->type);
+ }
+exit_all:
+ spin_lock_irq(&mac->lock);
+ mac->associated = associated;
+ spin_unlock_irq(&mac->lock);
+}
+
+static int plfxlc_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ stats->dot11ACKFailureCount = 0;
+ stats->dot11RTSFailureCount = 0;
+ stats->dot11FCSErrorCount = 0;
+ stats->dot11RTSSuccessCount = 0;
+ return 0;
+}
+
+static const char et_strings[][ETH_GSTRING_LEN] = {
+ "phy_rssi",
+ "phy_rx_crc_err"
+};
+
+static int plfxlc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(et_strings);
+
+ return 0;
+}
+
+static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *et_strings, sizeof(et_strings));
+}
+
+static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
+
+ data[0] = mac->rssi;
+ data[1] = mac->crc_errors;
+}
+
+static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static const struct ieee80211_ops plfxlc_ops = {
+ .tx = plfxlc_op_tx,
+ .start = plfxlc_op_start,
+ .stop = plfxlc_op_stop,
+ .add_interface = plfxlc_op_add_interface,
+ .remove_interface = plfxlc_op_remove_interface,
+ .set_rts_threshold = plfxlc_set_rts_threshold,
+ .config = plfxlc_op_config,
+ .configure_filter = plfxlc_op_configure_filter,
+ .bss_info_changed = plfxlc_op_bss_info_changed,
+ .get_stats = plfxlc_get_stats,
+ .get_et_sset_count = plfxlc_get_et_sset_count,
+ .get_et_stats = plfxlc_get_et_stats,
+ .get_et_strings = plfxlc_get_et_strings,
+};
+
+struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw;
+ struct plfxlc_mac *mac;
+
+ hw = ieee80211_alloc_hw(sizeof(struct plfxlc_mac), &plfxlc_ops);
+ if (!hw) {
+ dev_dbg(&intf->dev, "out of memory\n");
+ return NULL;
+ }
+ set_wiphy_dev(hw->wiphy, &intf->dev);
+
+ mac = plfxlc_hw_mac(hw);
+ memset(mac, 0, sizeof(*mac));
+ spin_lock_init(&mac->lock);
+ mac->hw = hw;
+
+ mac->type = NL80211_IFTYPE_UNSPECIFIED;
+
+ memcpy(mac->channels, plfxlc_channels, sizeof(plfxlc_channels));
+ memcpy(mac->rates, plfxlc_rates, sizeof(plfxlc_rates));
+ mac->band.n_bitrates = ARRAY_SIZE(plfxlc_rates);
+ mac->band.bitrates = mac->rates;
+ mac->band.n_channels = ARRAY_SIZE(plfxlc_channels);
+ mac->band.channels = mac->channels;
+ hw->wiphy->bands[NL80211_BAND_LC] = &mac->band;
+ hw->conf.chandef.width = NL80211_CHAN_WIDTH_20;
+
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ hw->max_signal = 100;
+ hw->queues = 1;
+ /* 4 for 32 bit alignment if no tailroom */
+ hw->extra_tx_headroom = sizeof(struct plfxlc_ctrlset) + 4;
+ /* Tell mac80211 that we support multi rate retries */
+ hw->max_rates = IEEE80211_TX_MAX_RATES;
+ hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
+
+ skb_queue_head_init(&mac->ack_wait_queue);
+ mac->ack_pending = 0;
+
+ plfxlc_chip_init(&mac->chip, hw, intf);
+
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ return hw;
+}
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
new file mode 100644
index 000000000000..49b92413729b
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_MAC_H
+#define PLFXLC_MAC_H
+
+#include <linux/kernel.h>
+#include <net/mac80211.h>
+
+#include "chip.h"
+
+#define PURELIFI_CCK 0x00
+#define PURELIFI_OFDM 0x10
+#define PURELIFI_CCK_PREA_SHORT 0x20
+
+#define PURELIFI_OFDM_PLCP_RATE_6M 0xb
+#define PURELIFI_OFDM_PLCP_RATE_9M 0xf
+#define PURELIFI_OFDM_PLCP_RATE_12M 0xa
+#define PURELIFI_OFDM_PLCP_RATE_18M 0xe
+#define PURELIFI_OFDM_PLCP_RATE_24M 0x9
+#define PURELIFI_OFDM_PLCP_RATE_36M 0xd
+#define PURELIFI_OFDM_PLCP_RATE_48M 0x8
+#define PURELIFI_OFDM_PLCP_RATE_54M 0xc
+
+#define PURELIFI_CCK_RATE_1M (PURELIFI_CCK | 0x00)
+#define PURELIFI_CCK_RATE_2M (PURELIFI_CCK | 0x01)
+#define PURELIFI_CCK_RATE_5_5M (PURELIFI_CCK | 0x02)
+#define PURELIFI_CCK_RATE_11M (PURELIFI_CCK | 0x03)
+#define PURELIFI_OFDM_RATE_6M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_6M)
+#define PURELIFI_OFDM_RATE_9M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_9M)
+#define PURELIFI_OFDM_RATE_12M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_12M)
+#define PURELIFI_OFDM_RATE_18M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_18M)
+#define PURELIFI_OFDM_RATE_24M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_24M)
+#define PURELIFI_OFDM_RATE_36M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_36M)
+#define PURELIFI_OFDM_RATE_48M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_48M)
+#define PURELIFI_OFDM_RATE_54M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_54M)
+
+#define PURELIFI_RX_ERROR 0x80
+#define PURELIFI_RX_CRC32_ERROR 0x10
+
+#define PLF_REGDOMAIN_FCC 0x10
+#define PLF_REGDOMAIN_IC 0x20
+#define PLF_REGDOMAIN_ETSI 0x30
+#define PLF_REGDOMAIN_SPAIN 0x31
+#define PLF_REGDOMAIN_FRANCE 0x32
+#define PLF_REGDOMAIN_JAPAN_2 0x40
+#define PLF_REGDOMAIN_JAPAN 0x41
+#define PLF_REGDOMAIN_JAPAN_3 0x49
+
+#define PLF_RX_ERROR 0x80
+#define PLF_RX_CRC32_ERROR 0x10
+
+enum {
+ MODULATION_RATE_BPSK_1_2 = 0,
+ MODULATION_RATE_BPSK_3_4,
+ MODULATION_RATE_QPSK_1_2,
+ MODULATION_RATE_QPSK_3_4,
+ MODULATION_RATE_QAM16_1_2,
+ MODULATION_RATE_QAM16_3_4,
+ MODULATION_RATE_QAM64_1_2,
+ MODULATION_RATE_QAM64_3_4,
+ MODULATION_RATE_AUTO,
+ MODULATION_RATE_NUM
+};
+
+#define plfxlc_mac_dev(mac) plfxlc_chip_dev(&(mac)->chip)
+
+#define PURELIFI_MAC_STATS_BUFFER_SIZE 16
+#define PURELIFI_MAC_MAX_ACK_WAITERS 50
+
+struct plfxlc_ctrlset {
+ /* id should be plf_usb_req_enum */
+ __be32 id;
+ __be32 len;
+ u8 modulation;
+ u8 control;
+ u8 service;
+ u8 pad;
+ __le16 packet_length;
+ __le16 current_length;
+ __le16 next_frame_length;
+ __le16 tx_length;
+ __be32 payload_len_nw;
+} __packed;
+
+/* overlay */
+struct plfxlc_header {
+ struct plfxlc_ctrlset plf_ctrl;
+ u32 frametype;
+ u8 *dmac;
+} __packed;
+
+struct tx_status {
+ u8 type;
+ u8 id;
+ u8 rate;
+ u8 pad;
+ u8 mac[ETH_ALEN];
+ u8 retry;
+ u8 failure;
+} __packed;
+
+struct beacon {
+ struct delayed_work watchdog_work;
+ struct sk_buff *cur_beacon;
+ unsigned long last_update;
+ u16 interval;
+ u8 period;
+};
+
+enum plfxlc_device_flags {
+ PURELIFI_DEVICE_RUNNING,
+};
+
+struct plfxlc_mac {
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ struct beacon beacon;
+ struct work_struct set_rts_cts_work;
+ struct work_struct process_intr;
+ struct plfxlc_mc_hash multicast_hash;
+ struct sk_buff_head ack_wait_queue;
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct plfxlc_chip chip;
+ spinlock_t lock; /* lock for mac data */
+ u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
+ char serial_number[PURELIFI_SERIAL_LEN];
+ unsigned char hw_address[ETH_ALEN];
+ u8 default_regdomain;
+ unsigned long flags;
+ bool pass_failed_fcs;
+ bool pass_ctrl;
+ bool ack_pending;
+ int ack_signal;
+ int associated;
+ u8 regdomain;
+ u8 channel;
+ int type;
+ u64 crc_errors;
+ u64 rssi;
+};
+
+static inline struct plfxlc_mac *
+plfxlc_hw_mac(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct plfxlc_mac *
+plfxlc_chip_to_mac(struct plfxlc_chip *chip)
+{
+ return container_of(chip, struct plfxlc_mac, chip);
+}
+
+static inline struct plfxlc_mac *
+plfxlc_usb_to_mac(struct plfxlc_usb *usb)
+{
+ return plfxlc_chip_to_mac(plfxlc_usb_to_chip(usb));
+}
+
+static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac)
+{
+ return mac->hw->wiphy->perm_addr;
+}
+
+struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf);
+void plfxlc_mac_release(struct plfxlc_mac *mac);
+
+int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address);
+int plfxlc_mac_init_hw(struct ieee80211_hw *hw);
+
+int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
+ unsigned int length);
+void plfxlc_mac_tx_failed(struct urb *urb);
+void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error);
+int plfxlc_op_start(struct ieee80211_hw *hw);
+void plfxlc_op_stop(struct ieee80211_hw *hw);
+int plfxlc_restore_settings(struct plfxlc_mac *mac);
+
+#endif /* PLFXLC_MAC_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
new file mode 100644
index 000000000000..39e54b3787d6
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include <linux/sysfs.h>
+
+#include "mac.h"
+#include "usb.h"
+#include "chip.h"
+
+static const struct usb_device_id usb_ids[] = {
+ { USB_DEVICE(PURELIFI_X_VENDOR_ID_0, PURELIFI_X_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_X },
+ { USB_DEVICE(PURELIFI_XC_VENDOR_ID_0, PURELIFI_XC_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_XC },
+ { USB_DEVICE(PURELIFI_XL_VENDOR_ID_0, PURELIFI_XL_PRODUCT_ID_0),
+ .driver_info = DEVICE_LIFI_XL },
+ {}
+};
+
+void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ u8 last_served_sidx;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ last_served_sidx = usb->sidx;
+ do {
+ usb->sidx = (usb->sidx + 1) % MAX_STA_NUM;
+ if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG))
+ skb = skb_peek(&tx->station[usb->sidx].data_list);
+ } while ((usb->sidx != last_served_sidx) && (!skb));
+
+ if (skb) {
+ skb = skb_dequeue(&tx->station[usb->sidx].data_list);
+ plfxlc_usb_wreq_async(usb, skb->data, skb->len, USB_REQ_DATA_TX,
+ plfxlc_tx_urb_complete, skb);
+ if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60)
+ ieee80211_wake_queues(plfxlc_usb_to_hw(usb));
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+static void handle_rx_packet(struct plfxlc_usb *usb, const u8 *buffer,
+ unsigned int length)
+{
+ plfxlc_mac_rx(plfxlc_usb_to_hw(usb), buffer, length);
+}
+
+static void rx_urb_complete(struct urb *urb)
+{
+ struct plfxlc_usb_tx *tx;
+ struct plfxlc_usb *usb;
+ unsigned int length;
+ const u8 *buffer;
+ u16 status;
+ u8 sidx;
+ int r;
+
+ if (!urb) {
+ pr_err("urb is NULL\n");
+ return;
+ }
+ if (!urb->context) {
+ pr_err("urb ctx is NULL\n");
+ return;
+ }
+ usb = urb->context;
+
+ if (usb->initialized != 1) {
+ pr_err("usb is not initialized\n");
+ return;
+ }
+
+ tx = &usb->tx;
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
+ default:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ if (tx->submitted_urbs++ < PURELIFI_URB_RETRY_MAX) {
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit %d", urb,
+ tx->submitted_urbs++);
+ goto resubmit;
+ } else {
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p max resubmits reached", urb);
+ tx->submitted_urbs = 0;
+ return;
+ }
+ }
+
+ buffer = urb->transfer_buffer;
+ length = le32_to_cpu(*(__le32 *)(buffer + sizeof(struct rx_status)))
+ + sizeof(u32);
+
+ if (urb->actual_length != (PLF_MSG_STATUS_OFFSET + 1)) {
+ if (usb->initialized && usb->link_up)
+ handle_rx_packet(usb, buffer, length);
+ goto resubmit;
+ }
+
+ status = buffer[PLF_MSG_STATUS_OFFSET];
+
+ switch (status) {
+ case STATION_FIFO_ALMOST_FULL_NOT_MESSAGE:
+ dev_dbg(&usb->intf->dev,
+ "FIFO full not packet receipt\n");
+ tx->mac_fifo_full = 1;
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++)
+ tx->station[sidx].flag |= STATION_FIFO_FULL_FLAG;
+ break;
+ case STATION_FIFO_ALMOST_FULL_MESSAGE:
+ dev_dbg(&usb->intf->dev, "FIFO full packet receipt\n");
+
+ for (sidx = 0; sidx < MAX_STA_NUM; sidx++)
+ tx->station[sidx].flag &= STATION_ACTIVE_FLAG;
+
+ plfxlc_send_packet_from_data_queue(usb);
+ break;
+ case STATION_CONNECT_MESSAGE:
+ usb->link_up = 1;
+ dev_dbg(&usb->intf->dev, "ST_CONNECT_MSG packet receipt\n");
+ break;
+ case STATION_DISCONNECT_MESSAGE:
+ usb->link_up = 0;
+ dev_dbg(&usb->intf->dev, "ST_DISCONN_MSG packet receipt\n");
+ break;
+ default:
+ dev_dbg(&usb->intf->dev, "Unknown packet receipt\n");
+ break;
+ }
+
+resubmit:
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit fail (%d)\n", urb, r);
+}
+
+static struct urb *alloc_rx_urb(struct plfxlc_usb *usb)
+{
+ struct usb_device *udev = plfxlc_usb_to_usbdev(usb);
+ struct urb *urb;
+ void *buffer;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buffer) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
+ buffer, USB_MAX_RX_SIZE,
+ rx_urb_complete, usb);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+static void free_rx_urb(struct urb *urb)
+{
+ if (!urb)
+ return;
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+static int __lf_x_usb_enable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ struct urb **urbs;
+ int i, r;
+
+ r = -ENOMEM;
+ urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL);
+ if (!urbs)
+ goto error;
+
+ for (i = 0; i < RX_URBS_COUNT; i++) {
+ urbs[i] = alloc_rx_urb(usb);
+ if (!urbs[i])
+ goto error;
+ }
+
+ spin_lock_irq(&rx->lock);
+
+ dev_dbg(plfxlc_usb_dev(usb), "irq_disabled %d\n", irqs_disabled());
+
+ if (rx->urbs) {
+ spin_unlock_irq(&rx->lock);
+ r = 0;
+ goto error;
+ }
+ rx->urbs = urbs;
+ rx->urbs_count = RX_URBS_COUNT;
+ spin_unlock_irq(&rx->lock);
+
+ for (i = 0; i < RX_URBS_COUNT; i++) {
+ r = usb_submit_urb(urbs[i], GFP_KERNEL);
+ if (r)
+ goto error_submit;
+ }
+
+ return 0;
+
+error_submit:
+ for (i = 0; i < RX_URBS_COUNT; i++)
+ usb_kill_urb(urbs[i]);
+ spin_lock_irq(&rx->lock);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+ spin_unlock_irq(&rx->lock);
+error:
+ if (urbs) {
+ for (i = 0; i < RX_URBS_COUNT; i++)
+ free_rx_urb(urbs[i]);
+ }
+ return r;
+}
+
+int plfxlc_usb_enable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ int r;
+
+ mutex_lock(&rx->setup_mutex);
+ r = __lf_x_usb_enable_rx(usb);
+ if (!r)
+ usb->rx_usb_enabled = 1;
+
+ mutex_unlock(&rx->setup_mutex);
+
+ return r;
+}
+
+static void __lf_x_usb_disable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+ unsigned long flags;
+ unsigned int count;
+ struct urb **urbs;
+ int i;
+
+ spin_lock_irqsave(&rx->lock, flags);
+ urbs = rx->urbs;
+ count = rx->urbs_count;
+ spin_unlock_irqrestore(&rx->lock, flags);
+
+ if (!urbs)
+ return;
+
+ for (i = 0; i < count; i++) {
+ usb_kill_urb(urbs[i]);
+ free_rx_urb(urbs[i]);
+ }
+ kfree(urbs);
+ rx->urbs = NULL;
+ rx->urbs_count = 0;
+}
+
+void plfxlc_usb_disable_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ __lf_x_usb_disable_rx(usb);
+ usb->rx_usb_enabled = 0;
+ mutex_unlock(&rx->setup_mutex);
+}
+
+void plfxlc_usb_disable_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ unsigned long flags;
+
+ clear_bit(PLF_BIT_ENABLED, &tx->enabled);
+
+ /* kill all submitted tx-urbs */
+ usb_kill_anchored_urbs(&tx->submitted);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+ WARN_ON(tx->submitted_urbs != 0);
+ tx->submitted_urbs = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ /* The stopped state is ignored, relying on ieee80211_wake_queues()
+ * in a potentionally following plfxlc_usb_enable_tx().
+ */
+}
+
+void plfxlc_usb_enable_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ set_bit(PLF_BIT_ENABLED, &tx->enabled);
+ tx->submitted_urbs = 0;
+ ieee80211_wake_queues(plfxlc_usb_to_hw(usb));
+ tx->stopped = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+void plfxlc_tx_urb_complete(struct urb *urb)
+{
+ struct ieee80211_tx_info *info;
+ struct plfxlc_usb *usb;
+ struct sk_buff *skb;
+
+ skb = urb->context;
+ info = IEEE80211_SKB_CB(skb);
+ /* grab 'usb' pointer before handing off the skb (since
+ * it might be freed by plfxlc_mac_tx_to_dev or mac80211)
+ */
+ usb = &plfxlc_hw_mac(info->rate_driver_data[0])->chip.usb;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ case -EINVAL:
+ case -ENODEV:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ break;
+ default:
+ dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
+ }
+
+ plfxlc_mac_tx_to_dev(skb, urb->status);
+ plfxlc_send_packet_from_data_queue(usb);
+ usb_free_urb(urb);
+}
+
+static inline void init_usb_rx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_rx *rx = &usb->rx;
+
+ spin_lock_init(&rx->lock);
+ mutex_init(&rx->setup_mutex);
+
+ if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH)
+ rx->usb_packet_size = 512;
+ else
+ rx->usb_packet_size = 64;
+
+ if (rx->fragment_length != 0)
+ dev_dbg(plfxlc_usb_dev(usb), "fragment_length error\n");
+}
+
+static inline void init_usb_tx(struct plfxlc_usb *usb)
+{
+ struct plfxlc_usb_tx *tx = &usb->tx;
+
+ spin_lock_init(&tx->lock);
+ clear_bit(PLF_BIT_ENABLED, &tx->enabled);
+ tx->stopped = 0;
+ skb_queue_head_init(&tx->submitted_skbs);
+ init_usb_anchor(&tx->submitted);
+}
+
+void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
+ struct usb_interface *intf)
+{
+ memset(usb, 0, sizeof(*usb));
+ usb->intf = usb_get_intf(intf);
+ usb_set_intfdata(usb->intf, hw);
+ init_usb_tx(usb);
+ init_usb_rx(usb);
+}
+
+void plfxlc_usb_release(struct plfxlc_usb *usb)
+{
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_usb_disable_tx(usb);
+ plfxlc_usb_disable_rx(usb);
+ usb_set_intfdata(usb->intf, NULL);
+ usb_put_intf(usb->intf);
+}
+
+const char *plfxlc_speed(enum usb_device_speed speed)
+{
+ switch (speed) {
+ case USB_SPEED_LOW:
+ return "low";
+ case USB_SPEED_FULL:
+ return "full";
+ case USB_SPEED_HIGH:
+ return "high";
+ default:
+ return "unknown";
+ }
+}
+
+int plfxlc_usb_init_hw(struct plfxlc_usb *usb)
+{
+ int r;
+
+ r = usb_reset_configuration(plfxlc_usb_to_usbdev(usb));
+ if (r) {
+ dev_err(plfxlc_usb_dev(usb), "cfg reset failed (%d)\n", r);
+ return r;
+ }
+ return 0;
+}
+
+static void get_usb_req(struct usb_device *udev, void *buffer,
+ u32 buffer_len, enum plf_usb_req_enum usb_req_id,
+ struct plf_usb_req *usb_req)
+{
+ __be32 payload_len_nw = cpu_to_be32(buffer_len + FCS_LEN);
+ const u8 *buffer_src_p = buffer;
+ u8 *buffer_dst = usb_req->buf;
+ u32 temp_usb_len = 0;
+
+ usb_req->id = cpu_to_be32(usb_req_id);
+ usb_req->len = cpu_to_be32(0);
+
+ /* Copy buffer length into the transmitted buffer, as it is important
+ * for the Rx MAC to know its exact length.
+ */
+ if (usb_req->id == cpu_to_be32(USB_REQ_BEACON_WR)) {
+ memcpy(buffer_dst, &payload_len_nw, sizeof(payload_len_nw));
+ buffer_dst += sizeof(payload_len_nw);
+ temp_usb_len += sizeof(payload_len_nw);
+ }
+
+ memcpy(buffer_dst, buffer_src_p, buffer_len);
+ buffer_dst += buffer_len;
+ buffer_src_p += buffer_len;
+ temp_usb_len += buffer_len;
+
+ /* Set the FCS_LEN (4) bytes as 0 for CRC checking. */
+ memset(buffer_dst, 0, FCS_LEN);
+ buffer_dst += FCS_LEN;
+ temp_usb_len += FCS_LEN;
+
+ /* Round the packet to be transmitted to 4 bytes. */
+ if (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT) {
+ memset(buffer_dst, 0, PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len %
+ PURELIFI_BYTE_NUM_ALIGNMENT));
+ buffer_dst += PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len %
+ PURELIFI_BYTE_NUM_ALIGNMENT);
+ temp_usb_len += PURELIFI_BYTE_NUM_ALIGNMENT -
+ (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT);
+ }
+
+ usb_req->len = cpu_to_be32(temp_usb_len);
+}
+
+int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ int buffer_len, enum plf_usb_req_enum usb_req_id,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ struct usb_device *udev = interface_to_usbdev(usb->ez_usb);
+ struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC);
+ int r;
+
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
+ (void *)buffer, buffer_len, complete_fn, context);
+
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_err(&udev->dev, "Async write submit failed (%d)\n", r);
+
+ return r;
+}
+
+int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len,
+ enum plf_usb_req_enum usb_req_id)
+{
+ struct usb_device *udev = interface_to_usbdev(ez_usb);
+ unsigned char *dma_buffer = NULL;
+ struct plf_usb_req usb_req;
+ int usb_bulk_msg_len;
+ int actual_length;
+ int r;
+
+ get_usb_req(udev, buffer, buffer_len, usb_req_id, &usb_req);
+ usb_bulk_msg_len = sizeof(__le32) + sizeof(__le32) +
+ be32_to_cpu(usb_req.len);
+
+ dma_buffer = kmemdup(&usb_req, usb_bulk_msg_len, GFP_KERNEL);
+
+ if (!dma_buffer) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, EP_DATA_OUT),
+ dma_buffer, usb_bulk_msg_len,
+ &actual_length, USB_BULK_MSG_TIMEOUT_MS);
+ kfree(dma_buffer);
+error:
+ if (r) {
+ r = -ENOMEM;
+ dev_err(&udev->dev, "usb_bulk_msg failed (%d)\n", r);
+ }
+
+ return r;
+}
+
+static void slif_data_plane_sap_timer_callb(struct timer_list *t)
+{
+ struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer);
+
+ plfxlc_send_packet_from_data_queue(usb);
+ timer_setup(&usb->tx.tx_retry_timer,
+ slif_data_plane_sap_timer_callb, 0);
+ mod_timer(&usb->tx.tx_retry_timer, jiffies + TX_RETRY_BACKOFF_JIFF);
+}
+
+static void sta_queue_cleanup_timer_callb(struct timer_list *t)
+{
+ struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup);
+ struct plfxlc_usb_tx *tx = &usb->tx;
+ int sidx;
+
+ for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) {
+ if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG))
+ continue;
+ if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) {
+ tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG;
+ } else {
+ eth_zero_addr(tx->station[sidx].mac);
+ tx->station[sidx].flag = 0;
+ }
+ }
+ timer_setup(&usb->sta_queue_cleanup,
+ sta_queue_cleanup_timer_callb, 0);
+ mod_timer(&usb->sta_queue_cleanup, jiffies + STA_QUEUE_CLEANUP_JIFF);
+}
+
+static int probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ u8 serial_number[PURELIFI_SERIAL_LEN];
+ struct ieee80211_hw *hw = NULL;
+ struct plfxlc_usb_tx *tx;
+ struct plfxlc_chip *chip;
+ struct plfxlc_usb *usb;
+ u8 hw_address[ETH_ALEN];
+ unsigned int i;
+ int r = 0;
+
+ hw = plfxlc_mac_alloc_hw(intf);
+
+ if (!hw) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ chip = &plfxlc_hw_mac(hw)->chip;
+ usb = &chip->usb;
+ usb->ez_usb = intf;
+ tx = &usb->tx;
+
+ r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number);
+ if (r) {
+ dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r);
+ goto error;
+ }
+
+ chip->unit_type = STA;
+ dev_err(&intf->dev, "Unit type is station");
+
+ r = plfxlc_mac_preinit_hw(hw, hw_address);
+ if (r) {
+ dev_err(&intf->dev, "Init mac failed (%d)\n", r);
+ goto error;
+ }
+
+ r = ieee80211_register_hw(hw);
+ if (r) {
+ dev_err(&intf->dev, "Register device failed (%d)\n", r);
+ goto error;
+ }
+
+ if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) ==
+ PURELIFI_XL_VENDOR_ID_0) &&
+ (le16_to_cpu(interface_to_usbdev(intf)->descriptor.idProduct) ==
+ PURELIFI_XL_PRODUCT_ID_0)) {
+ r = plfxlc_download_xl_firmware(intf);
+ } else {
+ r = plfxlc_download_fpga(intf);
+ }
+ if (r != 0) {
+ dev_err(&intf->dev, "FPGA download failed (%d)\n", r);
+ goto error;
+ }
+
+ tx->mac_fifo_full = 0;
+ spin_lock_init(&tx->lock);
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_usb_init_hw(usb);
+ if (r < 0) {
+ dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_chip_set_rate(chip, 8);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r);
+ goto error;
+ }
+
+ msleep(PLF_MSLEEP_TIME);
+ r = plfxlc_usb_wreq(usb->ez_usb,
+ hw_address, ETH_ALEN, USB_REQ_MAC_WR);
+ if (r < 0) {
+ dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r);
+ goto error;
+ }
+
+ plfxlc_chip_enable_rxtx(chip);
+
+ /* Initialise the data plane Tx queue */
+ for (i = 0; i < MAX_STA_NUM; i++) {
+ skb_queue_head_init(&tx->station[i].data_list);
+ tx->station[i].flag = 0;
+ }
+
+ tx->station[STA_BROADCAST_INDEX].flag |= STATION_CONNECTED_FLAG;
+ for (i = 0; i < ETH_ALEN; i++)
+ tx->station[STA_BROADCAST_INDEX].mac[i] = 0xFF;
+
+ timer_setup(&tx->tx_retry_timer, slif_data_plane_sap_timer_callb, 0);
+ tx->tx_retry_timer.expires = jiffies + TX_RETRY_BACKOFF_JIFF;
+ add_timer(&tx->tx_retry_timer);
+
+ timer_setup(&usb->sta_queue_cleanup,
+ sta_queue_cleanup_timer_callb, 0);
+ usb->sta_queue_cleanup.expires = jiffies + STA_QUEUE_CLEANUP_JIFF;
+ add_timer(&usb->sta_queue_cleanup);
+
+ plfxlc_mac_init_hw(hw);
+ usb->initialized = true;
+ return 0;
+error:
+ if (hw) {
+ plfxlc_mac_release(plfxlc_hw_mac(hw));
+ ieee80211_unregister_hw(hw);
+ ieee80211_free_hw(hw);
+ }
+ dev_err(&intf->dev, "pureLifi:Device error");
+ return r;
+}
+
+static void disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ /* Either something really bad happened, or
+ * we're just dealing with a DEVICE_INSTALLER.
+ */
+ if (!hw)
+ return;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ del_timer_sync(&usb->tx.tx_retry_timer);
+ del_timer_sync(&usb->sta_queue_cleanup);
+
+ ieee80211_unregister_hw(hw);
+
+ plfxlc_chip_disable_rxtx(&mac->chip);
+
+ /* If the disconnect has been caused by a removal of the
+ * driver module, the reset allows reloading of the driver. If the
+ * reset will not be executed here, the upload of the firmware in the
+ * probe function caused by the reloading of the driver will fail.
+ */
+ usb_reset_device(interface_to_usbdev(intf));
+
+ plfxlc_mac_release(mac);
+ ieee80211_free_hw(hw);
+}
+
+static void plfxlc_usb_resume(struct plfxlc_usb *usb)
+{
+ struct plfxlc_mac *mac = plfxlc_usb_to_mac(usb);
+ int r;
+
+ r = plfxlc_op_start(plfxlc_usb_to_hw(usb));
+ if (r < 0) {
+ dev_warn(plfxlc_usb_dev(usb),
+ "Device resume failed (%d)\n", r);
+
+ if (usb->was_running)
+ set_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+
+ usb_queue_reset_device(usb->intf);
+ return;
+ }
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+ r = plfxlc_restore_settings(mac);
+ if (r < 0) {
+ dev_dbg(plfxlc_usb_dev(usb),
+ "Restore failed (%d)\n", r);
+ return;
+ }
+ }
+}
+
+static void plfxlc_usb_stop(struct plfxlc_usb *usb)
+{
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_usb_disable_tx(usb);
+ plfxlc_usb_disable_rx(usb);
+
+ usb->initialized = false;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ usb->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+
+ plfxlc_usb_stop(usb);
+
+ return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct plfxlc_mac *mac;
+ struct plfxlc_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = plfxlc_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ if (usb->was_running)
+ plfxlc_usb_resume(usb);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static struct plfxlc_usb *get_plfxlc_usb(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf);
+ struct plfxlc_mac *mac;
+
+ /* Either something really bad happened, or
+ * we're just dealing with a DEVICE_INSTALLER.
+ */
+ if (!hw)
+ return NULL;
+
+ mac = plfxlc_hw_mac(hw);
+ return &mac->chip.usb;
+}
+
+static int suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct plfxlc_usb *pl = get_plfxlc_usb(interface);
+ struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl);
+
+ if (!pl)
+ return -ENODEV;
+ if (pl->initialized == 0)
+ return 0;
+ pl->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags);
+ plfxlc_usb_stop(pl);
+ return 0;
+}
+
+static int resume(struct usb_interface *interface)
+{
+ struct plfxlc_usb *pl = get_plfxlc_usb(interface);
+
+ if (!pl)
+ return -ENODEV;
+ if (pl->was_running)
+ plfxlc_usb_resume(pl);
+ return 0;
+}
+
+#endif
+
+static struct usb_driver driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = usb_ids,
+ .probe = probe,
+ .disconnect = disconnect,
+ .pre_reset = pre_reset,
+ .post_reset = post_reset,
+#ifdef CONFIG_PM
+ .suspend = suspend,
+ .resume = resume,
+#endif
+ .disable_hub_initiated_lpm = 1,
+};
+
+static int __init usb_init(void)
+{
+ int r;
+
+ r = usb_register(&driver);
+ if (r) {
+ pr_err("%s usb_register() failed %d\n", driver.name, r);
+ return r;
+ }
+
+ pr_debug("Driver initialized :%s\n", driver.name);
+ return 0;
+}
+
+static void __exit usb_exit(void)
+{
+ usb_deregister(&driver);
+ pr_debug("%s %s\n", driver.name, __func__);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB driver for pureLiFi devices");
+MODULE_AUTHOR("pureLiFi");
+MODULE_VERSION("1.0");
+MODULE_FIRMWARE("plfxlc/lifi-x.bin");
+MODULE_DEVICE_TABLE(usb, usb_ids);
+
+module_init(usb_init);
+module_exit(usb_exit);
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.h b/drivers/net/wireless/purelifi/plfxlc/usb.h
new file mode 100644
index 000000000000..ba2defd593bd
--- /dev/null
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 pureLiFi
+ */
+
+#ifndef PLFXLC_USB_H
+#define PLFXLC_USB_H
+
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+
+#include "intf.h"
+
+#define USB_BULK_MSG_TIMEOUT_MS 2000
+
+#define PURELIFI_X_VENDOR_ID_0 0x16C1
+#define PURELIFI_X_PRODUCT_ID_0 0x1CDE
+#define PURELIFI_XC_VENDOR_ID_0 0x2EF5
+#define PURELIFI_XC_PRODUCT_ID_0 0x0008
+#define PURELIFI_XL_VENDOR_ID_0 0x2EF5
+#define PURELIFI_XL_PRODUCT_ID_0 0x000A /* Station */
+
+#define PLF_FPGA_STATUS_LEN 2
+#define PLF_FPGA_STATE_LEN 9
+#define PLF_BULK_TLEN 16384
+#define PLF_FPGA_MG 6 /* Magic check */
+#define PLF_XL_BUF_LEN 64
+#define PLF_MSG_STATUS_OFFSET 7
+
+#define PLF_USB_TIMEOUT 1000
+#define PLF_MSLEEP_TIME 200
+
+#define PURELIFI_URB_RETRY_MAX 5
+
+#define plfxlc_usb_dev(usb) (&(usb)->intf->dev)
+
+/* Tx retry backoff timer (in milliseconds) */
+#define TX_RETRY_BACKOFF_MS 10
+#define STA_QUEUE_CLEANUP_MS 5000
+
+/* Tx retry backoff timer (in jiffies) */
+#define TX_RETRY_BACKOFF_JIFF ((TX_RETRY_BACKOFF_MS * HZ) / 1000)
+#define STA_QUEUE_CLEANUP_JIFF ((STA_QUEUE_CLEANUP_MS * HZ) / 1000)
+
+/* Ensures that MAX_TRANSFER_SIZE is even. */
+#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1)
+#define plfxlc_urb_dev(urb) (&(urb)->dev->dev)
+
+#define STATION_FIFO_ALMOST_FULL_MESSAGE 0
+#define STATION_FIFO_ALMOST_FULL_NOT_MESSAGE 1
+#define STATION_CONNECT_MESSAGE 2
+#define STATION_DISCONNECT_MESSAGE 3
+
+int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len,
+ enum plf_usb_req_enum usb_req_id);
+void plfxlc_tx_urb_complete(struct urb *urb);
+
+enum {
+ USB_MAX_RX_SIZE = 4800,
+ USB_MAX_EP_INT_BUFFER = 64,
+};
+
+struct plfxlc_usb_interrupt {
+ spinlock_t lock; /* spin lock for usb interrupt buffer */
+ struct urb *urb;
+ void *buffer;
+ int interval;
+};
+
+#define RX_URBS_COUNT 5
+
+struct plfxlc_usb_rx {
+ spinlock_t lock; /* spin lock for rx urb */
+ struct mutex setup_mutex; /* mutex lockt for rx urb */
+ u8 fragment[2 * USB_MAX_RX_SIZE];
+ unsigned int fragment_length;
+ unsigned int usb_packet_size;
+ struct urb **urbs;
+ int urbs_count;
+};
+
+struct plf_station {
+ /* 7...3 | 2 | 1 | 0 |
+ * Reserved | Heartbeat | FIFO full | Connected |
+ */
+ unsigned char flag;
+ unsigned char mac[ETH_ALEN];
+ struct sk_buff_head data_list;
+};
+
+struct plfxlc_firmware_file {
+ u32 total_files;
+ u32 total_size;
+ u32 size;
+ u32 start_addr;
+ u32 control_packets;
+} __packed;
+
+#define STATION_CONNECTED_FLAG 0x1
+#define STATION_FIFO_FULL_FLAG 0x2
+#define STATION_HEARTBEAT_FLAG 0x4
+#define STATION_ACTIVE_FLAG 0xFD
+
+#define PURELIFI_SERIAL_LEN 256
+#define STA_BROADCAST_INDEX (AP_USER_LIMIT)
+#define MAX_STA_NUM (AP_USER_LIMIT + 1)
+
+struct plfxlc_usb_tx {
+ unsigned long enabled;
+ spinlock_t lock; /* spinlock for USB tx */
+ u8 mac_fifo_full;
+ struct sk_buff_head submitted_skbs;
+ struct usb_anchor submitted;
+ int submitted_urbs;
+ bool stopped;
+ struct timer_list tx_retry_timer;
+ struct plf_station station[MAX_STA_NUM];
+};
+
+/* Contains the usb parts. The structure doesn't require a lock because intf
+ * will not be changed after initialization.
+ */
+struct plfxlc_usb {
+ struct timer_list sta_queue_cleanup;
+ struct plfxlc_usb_rx rx;
+ struct plfxlc_usb_tx tx;
+ struct usb_interface *intf;
+ struct usb_interface *ez_usb;
+ u8 req_buf[64]; /* plfxlc_usb_iowrite16v needs 62 bytes */
+ u8 sidx; /* store last served */
+ bool rx_usb_enabled;
+ bool initialized;
+ bool was_running;
+ bool link_up;
+};
+
+enum endpoints {
+ EP_DATA_IN = 2,
+ EP_DATA_OUT = 8,
+};
+
+enum devicetype {
+ DEVICE_LIFI_X = 0,
+ DEVICE_LIFI_XC = 1,
+ DEVICE_LIFI_XL = 1,
+};
+
+enum {
+ PLF_BIT_ENABLED = 1,
+ PLF_BIT_MAX = 2,
+};
+
+int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ int buffer_len, enum plf_usb_req_enum usb_req_id,
+ usb_complete_t complete_fn, void *context);
+
+static inline struct usb_device *
+plfxlc_usb_to_usbdev(struct plfxlc_usb *usb)
+{
+ return interface_to_usbdev(usb->intf);
+}
+
+static inline struct ieee80211_hw *
+plfxlc_intf_to_hw(struct usb_interface *intf)
+{
+ return usb_get_intfdata(intf);
+}
+
+static inline struct ieee80211_hw *
+plfxlc_usb_to_hw(struct plfxlc_usb *usb)
+{
+ return plfxlc_intf_to_hw(usb->intf);
+}
+
+void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
+ struct usb_interface *intf);
+void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb);
+void plfxlc_usb_release(struct plfxlc_usb *usb);
+void plfxlc_usb_disable_rx(struct plfxlc_usb *usb);
+void plfxlc_usb_enable_tx(struct plfxlc_usb *usb);
+void plfxlc_usb_disable_tx(struct plfxlc_usb *usb);
+int plfxlc_usb_tx(struct plfxlc_usb *usb, struct sk_buff *skb);
+int plfxlc_usb_enable_rx(struct plfxlc_usb *usb);
+int plfxlc_usb_init_hw(struct plfxlc_usb *usb);
+const char *plfxlc_speed(enum usb_device_speed speed);
+
+/* Firmware declarations */
+int plfxlc_download_xl_firmware(struct usb_interface *intf);
+int plfxlc_download_fpga(struct usb_interface *intf);
+
+int plfxlc_upload_mac_and_serial(struct usb_interface *intf,
+ unsigned char *hw_address,
+ unsigned char *serial_number);
+
+#endif /* PLFXLC_USB_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 84b15a655eab..73e6f9408b51 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -352,7 +352,8 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ unsigned int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -448,7 +449,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
- u32 short_cookie = prandom_u32();
+ u32 short_cookie = get_random_u32();
u16 flags = 0;
u16 freq;
@@ -500,7 +501,7 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
switch (vif->wdev.iftype) {
case NL80211_IFTYPE_STATION:
- if (idx != 0 || !vif->wdev.current_bss)
+ if (idx != 0 || !vif->wdev.connected)
return -ENOENT;
ether_addr_copy(mac, vif->bssid);
@@ -531,8 +532,8 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -547,7 +548,8 @@ static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -568,7 +570,8 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -584,7 +587,7 @@ static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
static int
qtnf_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index)
+ int link_id, u8 key_index)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -720,16 +723,15 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
return -EFAULT;
}
- if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- }
ret = qtnf_cmd_send_disconnect(vif, reason_code);
if (ret)
pr_err("VIF%u.%u: failed to disconnect\n",
mac->macid, vif->vifid);
- if (vif->wdev.current_bss) {
+ if (vif->wdev.connected) {
netif_carrier_off(vif->netdev);
cfg80211_disconnected(vif->netdev, reason_code,
NULL, 0, true, GFP_KERNEL);
@@ -745,7 +747,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_supported_band *sband;
- const struct cfg80211_chan_def *chandef = &wdev->chandef;
+ const struct cfg80211_chan_def *chandef = wdev_chandef(wdev, 0);
struct ieee80211_channel *chan;
int ret;
@@ -765,7 +767,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
survey->channel = chan;
survey->filled = 0x0;
- if (chan == chandef->chan)
+ if (chandef && chan == chandef->chan)
survey->filled = SURVEY_INFO_IN_USE;
ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
@@ -778,7 +780,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
static int
qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct cfg80211_chan_def *chandef)
+ unsigned int link_id, struct cfg80211_chan_def *chandef)
{
struct net_device *ndev = wdev->netdev;
struct qtnf_vif *vif;
@@ -1221,7 +1223,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
mac->macinfo.extended_capabilities_len;
}
- strlcpy(wiphy->fw_version, hw_info->fw_version,
+ strscpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index c68563c83098..b1b73478d89b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -241,6 +241,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
struct qlink_auth_encr *aen;
int ret;
int i;
+ int n;
if (!qtnf_cmd_start_ap_can_fit(vif, s))
return -E2BIG;
@@ -280,8 +281,9 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
aen->ciphers_pairwise[i] =
cpu_to_le32(s->crypto.ciphers_pairwise[i]);
- aen->n_akm_suites = cpu_to_le32(s->crypto.n_akm_suites);
- for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ n = min(QLINK_MAX_NR_AKM_SUITES, s->crypto.n_akm_suites);
+ aen->n_akm_suites = cpu_to_le32(n);
+ for (i = 0; i < n; i++)
aen->akm_suites[i] = cpu_to_le32(s->crypto.akm_suites[i]);
aen->control_port = s->crypto.control_port;
aen->control_port_no_encrypt = s->crypto.control_port_no_encrypt;
@@ -965,7 +967,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_rx_chain, hwinfo->total_tx_chain,
hwinfo->fw_ver);
- strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+ strscpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
return 0;
@@ -2005,7 +2007,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
dwell_active = scan_req->duration;
dwell_passive = scan_req->duration;
} else if (wdev->iftype == NL80211_IFTYPE_STATION &&
- wdev->current_bss) {
+ wdev->connected) {
/* let device select dwell based on traffic conditions */
dwell_active = QTNF_SCAN_TIME_AUTO;
dwell_passive = QTNF_SCAN_TIME_AUTO;
@@ -2076,6 +2078,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
struct qlink_auth_encr *aen;
int ret;
int i;
+ int n;
u32 connect_flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2132,9 +2135,10 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
aen->ciphers_pairwise[i] =
cpu_to_le32(sme->crypto.ciphers_pairwise[i]);
- aen->n_akm_suites = cpu_to_le32(sme->crypto.n_akm_suites);
+ n = min(QLINK_MAX_NR_AKM_SUITES, sme->crypto.n_akm_suites);
+ aen->n_akm_suites = cpu_to_le32(n);
- for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ for (i = 0; i < n; i++)
aen->akm_suites[i] = cpu_to_le32(sme->crypto.akm_suites[i]);
aen->control_port = sme->crypto.control_port;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 8dc80574d08d..4fafe370101a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -189,7 +189,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
vif->mac->macid, vif->vifid,
join_info->bssid, chandef.chan->hw_value);
- if (!vif->wdev.ssid_len) {
+ if (!vif->wdev.u.client.ssid_len) {
pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n",
vif->mac->macid, vif->vifid,
join_info->bssid);
@@ -197,7 +197,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
goto done;
}
- ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL);
+ ie = kzalloc(2 + vif->wdev.u.client.ssid_len, GFP_KERNEL);
if (!ie) {
pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n",
vif->mac->macid, vif->vifid,
@@ -207,14 +207,15 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
}
ie[0] = WLAN_EID_SSID;
- ie[1] = vif->wdev.ssid_len;
- memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len);
+ ie[1] = vif->wdev.u.client.ssid_len;
+ memcpy(ie + 2, vif->wdev.u.client.ssid,
+ vif->wdev.u.client.ssid_len);
bss = cfg80211_inform_bss(wiphy, chandef.chan,
CFG80211_BSS_FTYPE_UNKNOWN,
join_info->bssid, 0,
WLAN_CAPABILITY_ESS, 100,
- ie, 2 + vif->wdev.ssid_len,
+ ie, 2 + vif->wdev.u.client.ssid_len,
0, GFP_KERNEL);
if (!bss) {
pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n",
@@ -470,14 +471,14 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
- !vif->wdev.current_bss)
+ !vif->wdev.connected)
continue;
if (!vif->netdev)
continue;
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
mutex_unlock(&vif->wdev.mtx);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 840728ed57b2..8c23a77d1671 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1146,8 +1146,8 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
}
tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_pcie_pearl_rx_poll, 10);
+ netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ qtnf_pcie_pearl_rx_poll, 10);
ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
ipc_int.arg = ps;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 9534e1b33780..d83362578374 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -1159,8 +1159,8 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
}
tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_topaz_rx_poll, 10);
+ netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ qtnf_topaz_rx_poll, 10);
ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
ipc_int.arg = ts;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 2dda4c5d7427..674461fa7fb3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1721,8 +1721,8 @@ enum qlink_chan_stat {
* @time_on: amount of time radio operated on that channel.
* @time_tx: amount of time radio spent transmitting on the channel.
* @time_rx: amount of time radio spent receiving on the channel.
- * @cca_busy: amount of time the the primary channel was busy.
- * @cca_busy_ext: amount of time the the secondary channel was busy.
+ * @cca_busy: amount of time the primary channel was busy.
+ * @cca_busy_ext: amount of time the secondary channel was busy.
* @time_scan: amount of radio spent scanning on the channel.
* @chan_noise: channel noise.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index dec6ffdf07c4..ddfc16de1b26 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1023,9 +1023,9 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1561,7 +1561,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
@@ -1654,7 +1654,8 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1667,7 +1668,7 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
if (queue != 0)
return -EINVAL;
- if (rt2x00mac_conf_tx(hw, vif, queue, params))
+ if (rt2x00mac_conf_tx(hw, vif, link_id, queue, params))
return -EINVAL;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
index b8187b6de143..979d5fd8babf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
@@ -939,7 +939,7 @@
#define DEFAULT_TXPOWER 39
#define __CLAMP_TX(__txpower) \
- clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
#define TXPOWER_FROM_DEV(__txpower) \
((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 8faa0a80e73a..cd6371e25062 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1176,9 +1176,9 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1856,7 +1856,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
index 7e64aee2a172..ba362675c52c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
@@ -1219,6 +1219,6 @@
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT2500PCI_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index bb5ed6630645..4f3b0e6c6256 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -984,9 +984,9 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev,
u16 reg;
u16 reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1663,7 +1663,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
index 0c070288a140..746f0e950b76 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
@@ -839,6 +839,6 @@
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT2500USB_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index d758e8874457..de2ee5ffc34e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -1016,6 +1016,8 @@
*/
#define MAC_STATUS_CFG 0x1200
#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_TX FIELD32(0x00000001)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_RX FIELD32(0x00000002)
/*
* PWR_PIN_CFG:
@@ -2739,6 +2741,7 @@ enum rt2800_eeprom_word {
#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_EXTERNAL_PA FIELD16(0x8000)
/*
* EEPROM LNA
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index deddb0afd312..12b700c7b9c3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -198,6 +198,26 @@ static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
}
+static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ rt2800_bbp_write(rt2x00dev, 159, value);
+}
+
+static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ return rt2800_bbp_read(rt2x00dev, 159);
+}
+
+static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 195, reg);
+ rt2800_bbp_write(rt2x00dev, 196, value);
+}
+
static u8 rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word)
{
@@ -1801,8 +1821,8 @@ int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* do not have a choice if some connected STA is not capable to
* receive the same amount of data like the others.
*/
- if (sta->ht_cap.ht_supported) {
- drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++;
+ if (sta->deflink.ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]++;
rt2800_set_max_psdu_len(rt2x00dev);
}
@@ -1847,8 +1867,8 @@ int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
int wcid = sta_priv->wcid;
- if (sta->ht_cap.ht_supported) {
- drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--;
+ if (sta->deflink.ht_cap.ht_supported) {
+ drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]--;
rt2800_set_max_psdu_len(rt2x00dev);
}
@@ -2143,6 +2163,48 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev,
+ const struct rt2x00_field32 mask)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
+ if (!rt2x00_get_field32(reg, mask))
+ return 0;
+
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
+ return -EACCES;
+}
+
+static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u8 value;
+
+ /*
+ * BBP was enabled after firmware was loaded,
+ * but we need to reactivate it now.
+ */
+ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ msleep(1);
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ value = rt2800_bbp_read(rt2x00dev, 0);
+ if ((value != 0xff) && (value != 0x00))
+ return 0;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
+ return -EACCES;
+}
+
static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -3310,10 +3372,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
- static const char r55_bt_rev[] = {0x83, 0x83,
+ static const u8 r55_bt_rev[] = {0x83, 0x83,
0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
- static const char r59_bt_rev[] = {0x0e, 0x0e,
+ static const u8 r59_bt_rev[] = {0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
@@ -3322,7 +3384,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 59,
r59_bt_rev[idx]);
} else {
- static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+ static const u8 r59_bt[] = {0x8b, 0x8b, 0x8b,
0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
0x88, 0x88, 0x86, 0x85, 0x84};
@@ -3330,10 +3392,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
}
} else {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
- static const char r55_nonbt_rev[] = {0x23, 0x23,
+ static const u8 r55_nonbt_rev[] = {0x23, 0x23,
0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
- static const char r59_nonbt_rev[] = {0x07, 0x07,
+ static const u8 r59_nonbt_rev[] = {0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
@@ -3344,14 +3406,14 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352)) {
- static const char r59_non_bt[] = {0x8f, 0x8f,
+ static const u8 r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
rt2800_rfcsr_write(rt2x00dev, 59,
r59_non_bt[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5350)) {
- static const char r59_non_bt[] = {0x0b, 0x0b,
+ static const u8 r59_non_bt[] = {0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
@@ -3793,16 +3855,23 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev,
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
}
+
+ if (conf_is_ht40(conf)) {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f);
+ } else {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40);
+ }
}
static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level) {
u16 eeprom, target_power, max_power;
- u32 mac_sys_ctrl, mac_status;
+ u32 mac_sys_ctrl;
u32 reg;
u8 bbp;
- int i;
/* hardware unit is 0.5dBm, limited to 23.5dBm */
power_level *= 2;
@@ -3838,16 +3907,8 @@ static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
/* Disable Tx/Rx */
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
/* Check MAC Tx/Rx idle */
- for (i = 0; i < 10000; i++) {
- mac_status = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (mac_status & 0x3)
- usleep_range(50, 200);
- else
- break;
- }
-
- if (i == 10000)
- rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "RF busy while configuring ALC\n");
if (chan->center_freq > 2457) {
bbp = rt2800_bbp_read(rt2x00dev, 30);
@@ -3974,23 +4035,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
}
-static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
+static s8 rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
unsigned int channel,
- char txpower)
+ s8 txpower)
{
if (rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT3883))
txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
if (channel <= 14)
- return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
+ return clamp_t(s8, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
if (rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT3883))
- return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
+ return clamp_t(s8, txpower, MIN_A_TXPOWER_3593,
MAX_A_TXPOWER_3593);
else
- return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
+ return clamp_t(s8, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
}
static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev,
@@ -4164,7 +4225,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
@@ -4365,7 +4429,45 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
- rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ if (rt2x00_rt(rt2x00dev, RT5592))
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT6352)) {
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags)) {
+ reg = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, reg);
+
+ reg = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, reg);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00);
+
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT,
+ 0x36303636);
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ }
}
bbp = rt2800_bbp_read(rt2x00dev, 4);
@@ -5644,7 +5746,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2x00_rt(rt2x00dev, RT3883) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5867,7 +5970,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else if (rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
- rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
@@ -6129,6 +6232,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_soc(rt2x00dev)) {
+ struct clk *clk = clk_get_sys("bus", NULL);
+ int rate;
+
+ if (IS_ERR(clk)) {
+ clk = clk_get_sys("cpu", NULL);
+
+ if (IS_ERR(clk)) {
+ rate = 125;
+ } else {
+ rate = clk_get_rate(clk) / 3000000;
+ clk_put(clk);
+ }
+ } else {
+ rate = clk_get_rate(clk) / 1000000;
+ clk_put(clk);
+ }
+
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, rate);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
@@ -6212,46 +6336,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return 0;
}
-static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
- return 0;
-
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
- return -EACCES;
-}
-
-static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u8 value;
-
- /*
- * BBP was enabled after firmware was loaded,
- * but we need to reactivate it now.
- */
- rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
- rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
- msleep(1);
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- value = rt2800_bbp_read(rt2x00dev, 0);
- if ((value != 0xff) && (value != 0x00))
- return 0;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
- return -EACCES;
-}
static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
{
@@ -6916,26 +7000,6 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
-static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 195, reg);
- rt2800_bbp_write(rt2x00dev, 196, value);
-}
-
-static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- rt2800_bbp_write(rt2x00dev, 159, value);
-}
-
-static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- return rt2800_bbp_read(rt2x00dev, 159);
-}
-
static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
{
u8 bbp;
@@ -8398,6 +8462,1519 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_led_open_drain_enable(rt2x00dev);
}
+static void rt2800_rf_self_txdc_cal(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb5r1_org, rfb7r1_org, rfvalue;
+ u32 mac0518, mac051c, mac0528, mac052c;
+ u8 i;
+
+ mac0518 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ mac051c = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ mac0528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ mac052c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0xC);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x3306);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, 0x3330);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0xfffff);
+ rfb5r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ rfb7r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rfb5r1_org);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, rfb7r1_org);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, mac0518);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, mac051c);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, mac0528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, mac052c);
+}
+
+static int rt2800_calcrcalibrationcode(struct rt2x00_dev *rt2x00dev, int d1, int d2)
+{
+ int calcode = ((d2 - d1) * 1000) / 43;
+
+ if ((calcode % 10) >= 5)
+ calcode += 10;
+ calcode = (calcode / 10);
+
+ return calcode;
+}
+
+static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u32 savemacsysctrl;
+ u8 saverfb0r1, saverfb0r34, saverfb0r35;
+ u8 saverfb5r4, saverfb5r17, saverfb5r18;
+ u8 saverfb5r19, saverfb5r20;
+ u8 savebbpr22, savebbpr47, savebbpr49;
+ u8 bytevalue = 0;
+ int rcalcode;
+ u8 r_cal_code = 0;
+ s8 d1 = 0, d2 = 0;
+ u8 rfvalue;
+ u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG;
+ u32 maccfg;
+
+ saverfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ saverfb0r34 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 34);
+ saverfb0r35 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ savebbpr22 = rt2800_bbp_read(rt2x00dev, 22);
+ savebbpr47 = rt2800_bbp_read(rt2x00dev, 47);
+ savebbpr49 = rt2800_bbp_read(rt2x00dev, 49);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ MAC_PWR_PIN_CFG = rt2800_register_read(rt2x00dev, PWR_PIN_CFG);
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n");
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Rx Status to MAX !!!\n");
+
+ rfvalue = (MAC_RF_BYPASS0 | 0x3004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, rfvalue);
+ rfvalue = (MAC_RF_CONTROL0 | (~0x3002));
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, rfvalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x27);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0x83);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, 0x13);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x1);
+
+ rt2800_bbp_write(rt2x00dev, 47, 0x04);
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d1 = bytevalue - 256;
+ else
+ d1 = (s8)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d2 = bytevalue - 256;
+ else
+ d2 = (s8)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2);
+ if (rcalcode < 0)
+ r_cal_code = 256 + rcalcode;
+ else
+ r_cal_code = (u8)rcalcode;
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 7, r_cal_code);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue &= (~0x1);
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, saverfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, saverfb0r34);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, saverfb0r35);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20);
+
+ rt2800_bbp_write(rt2x00dev, 22, savebbpr22);
+ rt2800_bbp_write(rt2x00dev, 47, savebbpr47);
+ rt2800_bbp_write(rt2x00dev, 49, savebbpr49);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, MAC_PWR_PIN_CFG);
+}
+
+static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbpreg = 0;
+ u32 macvalue = 0;
+ u8 saverfb0r2, saverfb5r4, saverfb7r4, rfvalue;
+ int i;
+
+ saverfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfvalue = saverfb0r2;
+ rfvalue |= 0x03;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg |= 0x10;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x8);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in RX RXDCOC calibration\n");
+
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ saverfb5r4 = saverfb5r4 & (~0x40);
+ saverfb7r4 = saverfb7r4 & (~0x40);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x64);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+ bbpreg |= 0x48;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ for (i = 0; i < 10000; i++) {
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpreg & 0x40) == 0)
+ break;
+ usleep_range(50, 100);
+ }
+
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg &= (~0x10);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, saverfb0r2);
+}
+
+static u32 rt2800_do_sqrt_accumulation(u32 si)
+{
+ u32 root, root_pre, bit;
+ s8 i;
+
+ bit = 1 << 15;
+ root = 0;
+ for (i = 15; i >= 0; i = i - 1) {
+ root_pre = root + bit;
+ if ((root_pre * root_pre) <= si)
+ root = root_pre;
+ bit = bit >> 1;
+ }
+
+ return root;
+}
+
+static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb0r1, rfb0r2, rfb0r42;
+ u8 rfb4r0, rfb4r19;
+ u8 rfb5r3, rfb5r4, rfb5r17, rfb5r18, rfb5r19, rfb5r20;
+ u8 rfb6r0, rfb6r19;
+ u8 rfb7r3, rfb7r4, rfb7r17, rfb7r18, rfb7r19, rfb7r20;
+
+ u8 bbp1, bbp4;
+ u8 bbpr241, bbpr242;
+ u32 i;
+ u8 ch_idx;
+ u8 bbpval;
+ u8 rfval, vga_idx = 0;
+ int mi = 0, mq = 0, si = 0, sq = 0, riq = 0;
+ int sigma_i, sigma_q, r_iq, g_rx;
+ int g_imb;
+ int ph_rx;
+ u32 savemacsysctrl = 0;
+ u32 orig_RF_CONTROL0 = 0;
+ u32 orig_RF_BYPASS0 = 0;
+ u32 orig_RF_CONTROL1 = 0;
+ u32 orig_RF_BYPASS1 = 0;
+ u32 orig_RF_CONTROL3 = 0;
+ u32 orig_RF_BYPASS3 = 0;
+ u32 bbpval1 = 0;
+ static const u8 rf_vga_table[] = {0x20, 0x21, 0x22, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ orig_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ orig_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ orig_RF_CONTROL1 = rt2800_register_read(rt2x00dev, RF_CONTROL1);
+ orig_RF_BYPASS1 = rt2800_register_read(rt2x00dev, RF_BYPASS1);
+ orig_RF_CONTROL3 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ orig_RF_BYPASS3 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbp1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbp4 = rt2800_bbp_read(rt2x00dev, 4);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x0);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "Timeout waiting for MAC status in RXIQ calibration\n");
+
+ bbpval = bbp4 & (~0x18);
+ bbpval = bbp4 | 0x00;
+ rt2800_bbp_write(rt2x00dev, 4, bbpval);
+
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval = bbpval | 1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ bbpval = bbpval & 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, 0x00000202);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, 0x00000303);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0101);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0000);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0xf1f1);
+
+ rfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rfb4r0 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rfb4r19 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 19);
+ rfb5r3 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ rfb6r0 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rfb6r19 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 19);
+ rfb7r3 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rfb7r17 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rfb7r18 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rfb7r19 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rfb7r20 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x87);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0x27);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x80);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0xC1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x60);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x0);
+ rt2800_bbp_write(rt2x00dev, 24, 0x0);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 5, 0x0);
+
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x10);
+ rt2800_bbp_write(rt2x00dev, 242, 0x84);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+
+ bbpval = rt2800_bbp_dcoc_read(rt2x00dev, 3);
+ bbpval = bbpval & (~0x7);
+ rt2800_bbp_dcoc_write(rt2x00dev, 3, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ usleep_range(1, 200);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003376);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x02);
+ rt2800_bbp_write(rt2x00dev, 24, 0x02);
+ }
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) {
+ if (ch_idx == 0) {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x1;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x11;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x10;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00);
+ } else {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x2;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x22;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x40;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x01);
+ }
+ usleep_range(500, 1500);
+
+ vga_idx = 0;
+ while (vga_idx < 11) {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rf_vga_table[vga_idx]);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rf_vga_table[vga_idx]);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x93);
+
+ for (i = 0; i < 10000; i++) {
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpval & 0xff) == 0x93)
+ usleep_range(50, 100);
+ else
+ break;
+ }
+
+ if ((bbpval & 0xff) == 0x93) {
+ rt2x00_warn(rt2x00dev, "Fatal Error: Calibration doesn't finish");
+ goto restore_value;
+ }
+ for (i = 0; i < 5; i++) {
+ u32 bbptemp = 0;
+ u8 value = 0;
+ int result = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x1e);
+ rt2800_bbp_write(rt2x00dev, 159, i);
+ rt2800_bbp_write(rt2x00dev, 158, 0x22);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 24);
+ rt2800_bbp_write(rt2x00dev, 158, 0x21);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 16);
+ rt2800_bbp_write(rt2x00dev, 158, 0x20);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 8);
+ rt2800_bbp_write(rt2x00dev, 158, 0x1f);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + value;
+
+ if (i < 2 && (bbptemp & 0x800000))
+ result = (bbptemp & 0xffffff) - 0x1000000;
+ else if (i == 4)
+ result = bbptemp;
+ else
+ result = bbptemp;
+
+ if (i == 0)
+ mi = result / 4096;
+ else if (i == 1)
+ mq = result / 4096;
+ else if (i == 2)
+ si = bbptemp / 4096;
+ else if (i == 3)
+ sq = bbptemp / 4096;
+ else
+ riq = result / 4096;
+ }
+
+ bbpval1 = si - mi * mi;
+ rt2x00_dbg(rt2x00dev,
+ "RXIQ si=%d, sq=%d, riq=%d, bbpval %d, vga_idx %d",
+ si, sq, riq, bbpval1, vga_idx);
+
+ if (bbpval1 >= (100 * 100))
+ break;
+
+ if (bbpval1 <= 100)
+ vga_idx = vga_idx + 9;
+ else if (bbpval1 <= 158)
+ vga_idx = vga_idx + 8;
+ else if (bbpval1 <= 251)
+ vga_idx = vga_idx + 7;
+ else if (bbpval1 <= 398)
+ vga_idx = vga_idx + 6;
+ else if (bbpval1 <= 630)
+ vga_idx = vga_idx + 5;
+ else if (bbpval1 <= 1000)
+ vga_idx = vga_idx + 4;
+ else if (bbpval1 <= 1584)
+ vga_idx = vga_idx + 3;
+ else if (bbpval1 <= 2511)
+ vga_idx = vga_idx + 2;
+ else
+ vga_idx = vga_idx + 1;
+ }
+
+ sigma_i = rt2800_do_sqrt_accumulation(100 * (si - mi * mi));
+ sigma_q = rt2800_do_sqrt_accumulation(100 * (sq - mq * mq));
+ r_iq = 10 * (riq - (mi * mq));
+
+ rt2x00_dbg(rt2x00dev, "Sigma_i=%d, Sigma_q=%d, R_iq=%d", sigma_i, sigma_q, r_iq);
+
+ if (sigma_i <= 1400 && sigma_i >= 1000 &&
+ (sigma_i - sigma_q) <= 112 &&
+ (sigma_i - sigma_q) >= -112 &&
+ mi <= 32 && mi >= -32 &&
+ mq <= 32 && mq >= -32) {
+ r_iq = 10 * (riq - (mi * mq));
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+
+ g_rx = (1000 * sigma_q) / sigma_i;
+ g_imb = ((-2) * 128 * (1000 - g_rx)) / (1000 + g_rx);
+ ph_rx = (r_iq * 2292) / (sigma_i * sigma_q);
+
+ if (ph_rx > 20 || ph_rx < -20) {
+ ph_rx = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (g_imb > 12 || g_imb < -12) {
+ g_imb = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+ } else {
+ g_imb = 0;
+ ph_rx = 0;
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x37);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x35);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x55);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x53);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ }
+ }
+
+restore_value:
+ rt2800_bbp_write(rt2x00dev, 158, 0x3);
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ rt2800_bbp_write(rt2x00dev, 159, (bbpval | 0x07));
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 1, bbp1);
+ rt2800_bbp_write(rt2x00dev, 4, bbp4);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ usleep_range(10, 200);
+ bbpval &= 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfb0r2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, rfb4r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 19, rfb4r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rfb5r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, rfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, rfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, rfb5r20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, rfb6r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 19, rfb6r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, rfb7r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, rfb7r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, rfb7r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, rfb7r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, rfb7r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, rfb7r20);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, orig_RF_CONTROL0);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, orig_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, orig_RF_CONTROL1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, orig_RF_BYPASS1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, orig_RF_CONTROL3);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, orig_RF_BYPASS3);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+}
+
+static void rt2800_rf_configstore(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_reg_record[][13], u8 chain)
+{
+ u8 rfvalue = 0;
+
+ if (chain == CHAIN_0) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_0][0].bank = 0;
+ rf_reg_record[CHAIN_0][0].reg = 1;
+ rf_reg_record[CHAIN_0][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_0][1].bank = 0;
+ rf_reg_record[CHAIN_0][1].reg = 2;
+ rf_reg_record[CHAIN_0][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_0][2].bank = 0;
+ rf_reg_record[CHAIN_0][2].reg = 35;
+ rf_reg_record[CHAIN_0][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_0][3].bank = 0;
+ rf_reg_record[CHAIN_0][3].reg = 42;
+ rf_reg_record[CHAIN_0][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rf_reg_record[CHAIN_0][4].bank = 4;
+ rf_reg_record[CHAIN_0][4].reg = 0;
+ rf_reg_record[CHAIN_0][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 2);
+ rf_reg_record[CHAIN_0][5].bank = 4;
+ rf_reg_record[CHAIN_0][5].reg = 2;
+ rf_reg_record[CHAIN_0][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 34);
+ rf_reg_record[CHAIN_0][6].bank = 4;
+ rf_reg_record[CHAIN_0][6].reg = 34;
+ rf_reg_record[CHAIN_0][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rf_reg_record[CHAIN_0][7].bank = 5;
+ rf_reg_record[CHAIN_0][7].reg = 3;
+ rf_reg_record[CHAIN_0][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rf_reg_record[CHAIN_0][8].bank = 5;
+ rf_reg_record[CHAIN_0][8].reg = 4;
+ rf_reg_record[CHAIN_0][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rf_reg_record[CHAIN_0][9].bank = 5;
+ rf_reg_record[CHAIN_0][9].reg = 17;
+ rf_reg_record[CHAIN_0][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rf_reg_record[CHAIN_0][10].bank = 5;
+ rf_reg_record[CHAIN_0][10].reg = 18;
+ rf_reg_record[CHAIN_0][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rf_reg_record[CHAIN_0][11].bank = 5;
+ rf_reg_record[CHAIN_0][11].reg = 19;
+ rf_reg_record[CHAIN_0][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+ rf_reg_record[CHAIN_0][12].bank = 5;
+ rf_reg_record[CHAIN_0][12].reg = 20;
+ rf_reg_record[CHAIN_0][12].value = rfvalue;
+ } else if (chain == CHAIN_1) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_1][0].bank = 0;
+ rf_reg_record[CHAIN_1][0].reg = 1;
+ rf_reg_record[CHAIN_1][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_1][1].bank = 0;
+ rf_reg_record[CHAIN_1][1].reg = 2;
+ rf_reg_record[CHAIN_1][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_1][2].bank = 0;
+ rf_reg_record[CHAIN_1][2].reg = 35;
+ rf_reg_record[CHAIN_1][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_1][3].bank = 0;
+ rf_reg_record[CHAIN_1][3].reg = 42;
+ rf_reg_record[CHAIN_1][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rf_reg_record[CHAIN_1][4].bank = 6;
+ rf_reg_record[CHAIN_1][4].reg = 0;
+ rf_reg_record[CHAIN_1][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 2);
+ rf_reg_record[CHAIN_1][5].bank = 6;
+ rf_reg_record[CHAIN_1][5].reg = 2;
+ rf_reg_record[CHAIN_1][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 34);
+ rf_reg_record[CHAIN_1][6].bank = 6;
+ rf_reg_record[CHAIN_1][6].reg = 34;
+ rf_reg_record[CHAIN_1][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rf_reg_record[CHAIN_1][7].bank = 7;
+ rf_reg_record[CHAIN_1][7].reg = 3;
+ rf_reg_record[CHAIN_1][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rf_reg_record[CHAIN_1][8].bank = 7;
+ rf_reg_record[CHAIN_1][8].reg = 4;
+ rf_reg_record[CHAIN_1][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rf_reg_record[CHAIN_1][9].bank = 7;
+ rf_reg_record[CHAIN_1][9].reg = 17;
+ rf_reg_record[CHAIN_1][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rf_reg_record[CHAIN_1][10].bank = 7;
+ rf_reg_record[CHAIN_1][10].reg = 18;
+ rf_reg_record[CHAIN_1][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rf_reg_record[CHAIN_1][11].bank = 7;
+ rf_reg_record[CHAIN_1][11].reg = 19;
+ rf_reg_record[CHAIN_1][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+ rf_reg_record[CHAIN_1][12].bank = 7;
+ rf_reg_record[CHAIN_1][12].reg = 20;
+ rf_reg_record[CHAIN_1][12].value = rfvalue;
+ } else {
+ rt2x00_warn(rt2x00dev, "Unknown chain = %u\n", chain);
+ }
+}
+
+static void rt2800_rf_configrecover(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_record[][13])
+{
+ u8 chain_index = 0, record_index = 0;
+ u8 bank = 0, rf_register = 0, value = 0;
+
+ for (chain_index = 0; chain_index < 2; chain_index++) {
+ for (record_index = 0; record_index < 13; record_index++) {
+ bank = rf_record[chain_index][record_index].bank;
+ rf_register = rf_record[chain_index][record_index].reg;
+ value = rf_record[chain_index][record_index].value;
+ rt2800_rfcsr_write_bank(rt2x00dev, bank, rf_register, value);
+ rt2x00_dbg(rt2x00dev, "bank: %d, rf_register: %d, value: %x\n",
+ bank, rf_register, value);
+ }
+ }
+}
+
+static void rt2800_setbbptonegenerator(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 158, 0xAA);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAB);
+ rt2800_bbp_write(rt2x00dev, 159, 0x0A);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAC);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAD);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x40);
+}
+
+static u32 rt2800_do_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx, u8 read_neg)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+ u8 bbp = 0;
+ u8 tidxi;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x9b);
+
+ bbp = 0x9b;
+
+ while (bbp == 0x9b) {
+ usleep_range(10, 50);
+ bbp = rt2800_bbp_read(rt2x00dev, 159);
+ bbp = bbp & 0xff;
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+ rt2x00_dbg(rt2x00dev, "I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, pint);
+ if (read_neg) {
+ pint = pint >> 1;
+ tidxi = 0x40 - tidx;
+ tidxi = tidxi & 0x3f;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ ptmp = ptmp >> 1;
+ pint = pint + ptmp;
+ }
+
+ return pint;
+}
+
+static u32 rt2800_read_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xBA);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+
+ return pint;
+}
+
+static void rt2800_write_dc(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc, u8 iorq, u8 dc)
+{
+ u8 bbp = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ bbp = alc | 0x80;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (ch_idx == 0)
+ bbp = (iorq == 0) ? 0xb1 : 0xb2;
+ else
+ bbp = (iorq == 0) ? 0xb8 : 0xb9;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ bbp = dc;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+}
+
+static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
+ u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2])
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ s8 idx0 = 0, idx1 = 0;
+ u8 idxf[] = {0x00, 0x00};
+ u8 ibit = 0x20;
+ u8 iorq;
+ s8 bidx;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (bidx = 5; bidx >= 0; bidx--) {
+ for (iorq = 0; iorq <= 1; iorq++) {
+ if (idxf[iorq] == 0x20) {
+ idx0 = 0x20;
+ p0 = pf;
+ } else {
+ idx0 = idxf[iorq] - ibit;
+ idx0 = idx0 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx0);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ }
+
+ idx1 = idxf[iorq] + (bidx == 5 ? 0 : ibit);
+ idx1 = idx1 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx1);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+
+ rt2x00_dbg(rt2x00dev, "alc=%u, IorQ=%u, idx_final=%2x\n",
+ alc_idx, iorq, idxf[iorq]);
+ rt2x00_dbg(rt2x00dev, "p0=%x, p1=%x, pf=%x, idx_0=%x, idx_1=%x, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, ibit);
+
+ if (bidx != 5 && pf <= p0 && pf < p1) {
+ idxf[iorq] = idxf[iorq];
+ } else if (p0 < p1) {
+ pf = p0;
+ idxf[iorq] = idx0 & 0x3F;
+ } else {
+ pf = p1;
+ idxf[iorq] = idx1 & 0x3F;
+ }
+ rt2x00_dbg(rt2x00dev, "IorQ=%u, idx_final[%u]:%x, pf:%8x\n",
+ iorq, iorq, idxf[iorq], pf);
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idxf[iorq]);
+ }
+ ibit = ibit >> 1;
+ }
+ dc_result[ch_idx][alc_idx][0] = idxf[0];
+ dc_result[ch_idx][alc_idx][1] = idxf[1];
+}
+
+static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes)
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ s8 perr = 0, gerr = 0, iq_err = 0;
+ s8 pef = 0, gef = 0;
+ s8 psta, pend;
+ s8 gsta, gend;
+
+ u8 ibit = 0x20;
+ u8 first_search = 0x00, touch_neg_max = 0x00;
+ s8 idx0 = 0, idx1 = 0;
+ u8 gop;
+ u8 bbp = 0;
+ s8 bidx;
+
+ for (bidx = 5; bidx >= 1; bidx--) {
+ for (gop = 0; gop < 2; gop++) {
+ if (gop == 1 || bidx < 4) {
+ if (gop == 0)
+ iq_err = gerr;
+ else
+ iq_err = perr;
+
+ first_search = (gop == 0) ? (bidx == 3) : (bidx == 5);
+ touch_neg_max = (gop) ? ((iq_err & 0x0F) == 0x08) :
+ ((iq_err & 0x3F) == 0x20);
+
+ if (touch_neg_max) {
+ p0 = pf;
+ idx0 = iq_err;
+ } else {
+ idx0 = iq_err - ibit;
+ bbp = (ch_idx == 0) ? ((gop == 0) ? 0x28 : 0x29) :
+ ((gop == 0) ? 0x46 : 0x47);
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx0);
+
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ }
+
+ idx1 = iq_err + (first_search ? 0 : ibit);
+ idx1 = (gop == 0) ? (idx1 & 0x0F) : (idx1 & 0x3F);
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx1);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+
+ rt2x00_dbg(rt2x00dev,
+ "p0=%x, p1=%x, pwer_final=%x, idx0=%x, idx1=%x, iq_err=%x, gop=%d, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, iq_err, gop, ibit);
+
+ if (!(!first_search && pf <= p0 && pf < p1)) {
+ if (p0 < p1) {
+ pf = p0;
+ iq_err = idx0;
+ } else {
+ pf = p1;
+ iq_err = idx1;
+ }
+ }
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, iq_err);
+
+ if (gop == 0)
+ gerr = iq_err;
+ else
+ perr = iq_err;
+
+ rt2x00_dbg(rt2x00dev, "IQCalibration pf=%8x (%2x, %2x) !\n",
+ pf, gerr & 0x0F, perr & 0x3F);
+ }
+ }
+
+ if (bidx > 0)
+ ibit = (ibit >> 1);
+ }
+ gerr = (gerr & 0x08) ? (gerr & 0x0F) - 0x10 : (gerr & 0x0F);
+ perr = (perr & 0x20) ? (perr & 0x3F) - 0x40 : (perr & 0x3F);
+
+ gerr = (gerr < -0x07) ? -0x07 : (gerr > 0x05) ? 0x05 : gerr;
+ gsta = gerr - 1;
+ gend = gerr + 2;
+
+ perr = (perr < -0x1f) ? -0x1f : (perr > 0x1d) ? 0x1d : perr;
+ psta = perr - 1;
+ pend = perr + 2;
+
+ for (gef = gsta; gef <= gend; gef = gef + 1)
+ for (pef = psta; pef <= pend; pef = pef + 1) {
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, gef & 0x0F);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, pef & 0x3F);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ if (gef == gsta && pef == psta) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ } else if (pf > p1) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ }
+ rt2x00_dbg(rt2x00dev, "Fine IQCalibration p1=%8x pf=%8x (%2x, %2x) !\n",
+ p1, pf, gef & 0x0F, pef & 0x3F);
+ }
+
+ ges[ch_idx] = gerr & 0x0F;
+ pes[ch_idx] = perr & 0x3F;
+}
+
+static void rt2800_rf_aux_tx0_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x21);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x10);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x1b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+}
+
+static void rt2800_rf_aux_tx1_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x22);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x20);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x4b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, 0x20);
+}
+
+static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ struct rf_reg_pair rf_store[CHAIN_NUM][13];
+ u32 macorg1 = 0;
+ u32 macorg2 = 0;
+ u32 macorg3 = 0;
+ u32 macorg4 = 0;
+ u32 macorg5 = 0;
+ u32 orig528 = 0;
+ u32 orig52c = 0;
+
+ u32 savemacsysctrl = 0;
+ u32 macvalue = 0;
+ u32 mac13b8 = 0;
+ u32 p0 = 0, p1 = 0;
+ u32 p0_idx10 = 0, p1_idx10 = 0;
+
+ u8 rfvalue;
+ u8 loft_dc_search_result[CHAIN_NUM][RF_ALC_NUM][2];
+ u8 ger[CHAIN_NUM], per[CHAIN_NUM];
+
+ u8 vga_gain[] = {14, 14};
+ u8 bbp = 0, ch_idx = 0, rf_alc_idx = 0, idx = 0;
+ u8 bbpr30, rfb0r39, rfb0r42;
+ u8 bbpr1;
+ u8 bbpr4;
+ u8 bbpr241, bbpr242;
+ u8 count_step;
+
+ static const u8 rf_gain[] = {0x00, 0x01, 0x02, 0x04, 0x08, 0x0c};
+ static const u8 rfvga_gain_table[] = {0x24, 0x25, 0x26, 0x27, 0x28, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3F};
+ static const u8 bbp_2324gain[] = {0x16, 0x14, 0x12, 0x10, 0x0c, 0x08};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+ orig528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ orig52c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ bbpr30 = rt2800_bbp_read(rt2x00dev, 30);
+ rfb0r39 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 39);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+
+ rt2800_bbp_write(rt2x00dev, 30, 0x1F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x5B);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_setbbptonegenerator(rt2x00dev);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x10);
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ else
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ if (ch_idx == 0)
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ else
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+
+ vga_gain[ch_idx] = 18;
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, bbp_2324gain[rf_alc_idx]);
+ rt2800_bbp_write(rt2x00dev, 24, bbp_2324gain[rf_alc_idx]);
+
+ macvalue = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macvalue &= (~0x0000F1F1);
+ macvalue |= (rf_gain[rf_alc_idx] << 4);
+ macvalue |= (rf_gain[rf_alc_idx] << 12);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macvalue);
+ macvalue = (0x0000F1F1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macvalue);
+
+ if (rf_alc_idx == 0) {
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x21);
+ for (; vga_gain[ch_idx] > 0;
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 2) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2x00_dbg(rt2x00dev, "LOFT AGC %d %d\n", p0, p1);
+ if ((p0 < 7000 * 7000) && (p1 < (7000 * 7000)))
+ break;
+ }
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ if (vga_gain[ch_idx] < 0)
+ vga_gain[ch_idx] = 0;
+ }
+
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ rt2800_loft_search(rt2x00dev, ch_idx, rf_alc_idx, loft_dc_search_result);
+ }
+ }
+
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ for (idx = 0; idx < 4; idx++) {
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ bbp = (idx << 2) + rf_alc_idx;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " ALC %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb1);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb2);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb8);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I1 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb9);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q1 %2x\n", bbp);
+ }
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ bbp = 0x00;
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, orig528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, orig52c);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbpr1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbpr4 = rt2800_bbp_read(rt2x00dev, 4);
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000101);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4 & (~0x18));
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x14);
+ rt2800_bbp_write(rt2x00dev, 242, 0x80);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+ } else {
+ rt2800_setbbptonegenerator(rt2x00dev);
+ }
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ udelay(1);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+
+ if (!test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000000);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x00000010);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x3B);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x3B);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x03);
+ rt2800_bbp_write(rt2x00dev, 159, 0x60);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x04);
+
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ count_step = 1;
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x1F);
+ rt2800_bbp_write(rt2x00dev, 24, 0x1F);
+ count_step = 2;
+ }
+
+ for (; vga_gain[ch_idx] < 19; vga_gain[ch_idx] = (vga_gain[ch_idx] + count_step)) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ p0_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags))
+ p1_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ rt2x00_dbg(rt2x00dev, "IQ AGC %d %d\n", p0, p1);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2x00_dbg(rt2x00dev, "IQ AGC IDX 10 %d %d\n", p0_idx10, p1_idx10);
+ if ((p0_idx10 > 7000 * 7000) || (p1_idx10 > 7000 * 7000)) {
+ if (vga_gain[ch_idx] != 0)
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 1;
+ break;
+ }
+ }
+
+ if ((p0 > 2500 * 2500) || (p1 > 2500 * 2500))
+ break;
+ }
+
+ if (vga_gain[ch_idx] > 18)
+ vga_gain[ch_idx] = 18;
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_iq_search(rt2x00dev, ch_idx, ger, per);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x28);
+ bbp = ger[CHAIN_0] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x29);
+ bbp = per[CHAIN_0] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x46);
+ bbp = ger[CHAIN_1] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x47);
+ bbp = per[CHAIN_1] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 1, bbpr1);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+ }
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 30, bbpr30);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, rfb0r39);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+}
+
static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev,
bool set_bw, bool is_ht40)
{
@@ -8466,11 +10043,11 @@ static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal)
return 0;
}
-static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
+static s8 rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
{
unsigned int cnt;
u8 bbp_val;
- char cal_val;
+ s8 cal_val;
rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82);
@@ -8502,7 +10079,7 @@ static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev,
u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31;
int loop = 0, is_ht40, cnt;
u8 bbp_val, rf_val;
- char cal_r32_init, cal_r32_val, cal_diff;
+ s8 cal_r32_init, cal_r32_val, cal_diff;
u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05;
u8 saverfb5r06, saverfb5r07;
u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20;
@@ -9005,8 +10582,13 @@ static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C);
+ rt2800_r_calibration(rt2x00dev);
+ rt2800_rf_self_txdc_cal(rt2x00dev);
+ rt2800_rxdcoc_calibration(rt2x00dev);
rt2800_bw_filter_calibration(rt2x00dev, true);
rt2800_bw_filter_calibration(rt2x00dev, false);
+ rt2800_loft_iq_calibration(rt2x00dev);
+ rt2800_rxiq_calibration(rt2x00dev);
}
static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -9073,7 +10655,7 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Wait BBP/RF to wake up.
*/
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
return -EIO;
/*
@@ -9435,6 +11017,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rf = RF3853;
else if (rt2x00_rt(rt2x00dev, RT5350))
rf = RF5350;
+ else if (rt2x00_rt(rt2x00dev, RT5592))
+ rf = RF5592;
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -9564,7 +11148,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
- if (rt2x00_rt(rt2x00dev, RT3352)) {
+ if (rt2x00_rt(rt2x00dev, RT3352) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
__set_bit(CAPABILITY_EXTERNAL_PA_TX0,
@@ -9575,6 +11160,18 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
&rt2x00dev->cap_flags);
}
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF2);
+
+ if (rt2x00_rt(rt2x00dev, RT6352) && eeprom != 0 && eeprom != 0xffff) {
+ if (!rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF2_EXTERNAL_PA)) {
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags);
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX1,
+ &rt2x00dev->cap_flags);
+ }
+ }
+
return 0;
}
@@ -9953,9 +11550,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *default_power1;
- char *default_power2;
- char *default_power3;
+ s8 *default_power1;
+ s8 *default_power2;
+ s8 *default_power3;
unsigned int i, tx_chains, rx_chains;
u32 reg;
@@ -10395,7 +11992,8 @@ int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold);
int rt2800_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -10411,7 +12009,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 1139405c0ebb..194de676df8f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -17,15 +17,25 @@
#define WCID_START 33
#define WCID_END 222
#define STA_IDS_SIZE (WCID_END - WCID_START + 2)
+#define CHAIN_0 0x0
+#define CHAIN_1 0x1
+#define RF_ALC_NUM 6
+#define CHAIN_NUM 2
+
+struct rf_reg_pair {
+ u8 bank;
+ u8 reg;
+ u8 value;
+};
/* RT2800 driver data structure */
struct rt2800_drv_data {
u8 calibration_bw20;
u8 calibration_bw40;
- char rx_calibration_bw20;
- char rx_calibration_bw40;
- char tx_calibration_bw20;
- char tx_calibration_bw40;
+ s8 rx_calibration_bw20;
+ s8 rx_calibration_bw40;
+ s8 tx_calibration_bw20;
+ s8 tx_calibration_bw40;
u8 bbp25;
u8 bbp26;
u8 txmixer_gain_24g;
@@ -245,7 +255,8 @@ void rt2800_get_key_seq(struct ieee80211_hw *hw,
struct ieee80211_key_seq *seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
int rt2800_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params);
u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 9f6fc40649be..07a6a5a9ce13 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -232,7 +232,7 @@ struct link_qual {
* VGC levels
* Hardware driver will tune the VGC level during each call
* to the link_tuner() callback function. This vgc_level is
- * is determined based on the link quality statistics like
+ * determined based on the link quality statistics like
* average RSSI and the false CCA count.
*
* In some cases the drivers need to differentiate between
@@ -1309,8 +1309,11 @@ void rt2x00queue_unmap_skb(struct queue_entry *entry);
*/
static inline struct data_queue *
rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
+ enum data_queue_qid queue)
{
+ if (queue >= rt2x00dev->ops->tx_queues && queue < IEEE80211_NUM_ACS)
+ queue = rt2x00dev->ops->tx_queues - 1;
+
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
@@ -1479,9 +1482,10 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes);
+ u64 changes);
int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 6bafdd991171..f895f560a185 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -70,6 +70,8 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
+ bss_conf);
struct rt2x00lib_erp erp;
memset(&erp, 0, sizeof(erp));
@@ -87,7 +89,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
erp.beacon_int = bss_conf->beacon_int;
/* Update the AID, this is needed for dynamic PS support */
- rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
+ rt2x00dev->aid = vif->cfg.assoc ? vif->cfg.aid : 0;
rt2x00dev->last_beacon = bss_conf->sync_tsf;
/* Update global beacon interval time, this is needed for PS support */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index e95c101c2711..3a035afcf7f9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1093,6 +1093,19 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
kfree(rt2x00dev->spec.channels_info);
}
+static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 2 * 1024, .blink_time = 220 },
+ { .throughput = 5 * 1024, .blink_time = 190 },
+ { .throughput = 10 * 1024, .blink_time = 170 },
+ { .throughput = 25 * 1024, .blink_time = 150 },
+ { .throughput = 54 * 1024, .blink_time = 130 },
+ { .throughput = 120 * 1024, .blink_time = 110 },
+ { .throughput = 265 * 1024, .blink_time = 80 },
+ { .throughput = 586 * 1024, .blink_time = 50 },
+};
+
static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -1174,6 +1187,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
#undef RT2X00_TASKLET_INIT
+ ieee80211_create_tpt_led_trigger(rt2x00dev->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ rt2x00_tpt_blink,
+ ARRAY_SIZE(rt2x00_tpt_blink));
+
/*
* Register HW.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index dea5babd30fe..4202c6517783 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -325,7 +325,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
*/
rt2x00queue_stop_queue(rt2x00dev->rx);
- /* Do not race with with link tuner. */
+ /* Do not race with link tuner. */
mutex_lock(&rt2x00dev->conf_mutex);
/*
@@ -574,7 +574,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
@@ -645,7 +645,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
rt2x00dev->link.count = 0;
- if (bss_conf->assoc)
+ if (vif->cfg.assoc)
rt2x00dev->intf_associated++;
else
rt2x00dev->intf_associated--;
@@ -665,7 +665,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index fb1d31b2d52a..98df0aef8168 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -303,7 +303,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
if (sta) {
sta_priv = sta_to_rt2x00_sta(sta);
txdesc->u.ht.wcid = sta_priv->wcid;
- density = sta->ht_cap.ampdu_density;
+ density = sta->deflink.ht_cap.ampdu_density;
}
/*
@@ -318,7 +318,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* when using more then one tx stream (>MCS7).
*/
if (sta && txdesc->u.ht.mcs > 7 &&
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
@@ -758,7 +758,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
+ intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0);
if (!intf->beacon->skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 74c3d8cb3100..8fd22c69855f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -117,12 +117,12 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u16 buffer_length)
{
int status = 0;
- unsigned char *tb;
+ u8 *tb;
u16 off, len, bsize;
mutex_lock(&rt2x00dev->csr_mutex);
- tb = (char *)buffer;
+ tb = (u8 *)buffer;
off = offset;
len = buffer_length;
while (len && !status) {
@@ -215,7 +215,7 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
rd->cr.wLength = cpu_to_le16(sizeof(u32));
usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
+ (u8 *)(&rd->cr), &rd->reg, sizeof(rd->reg),
rt2x00usb_register_read_async_cb, rd);
usb_anchor_urb(urb, rt2x00dev->anchor);
if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
@@ -586,10 +586,10 @@ static void rt2x00usb_assign_endpoint(struct data_queue *queue,
if (queue->qid == QID_RX) {
pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
} else {
pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
- queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
+ queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
}
if (!queue->usb_maxpacket)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 82cfc2aadc2b..81db7f57c7e4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -1709,7 +1709,7 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
+ bool put_to_sleep;
put_to_sleep = (state != STATE_AWAKE);
@@ -2656,7 +2656,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
@@ -2799,7 +2799,8 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt61pci_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2815,7 +2816,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
index 5f208ad509bd..d72d0ffd1127 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
@@ -1484,6 +1484,6 @@ struct hw_pairwise_ta_entry {
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT61PCI_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 5ff2c740c3ea..861035444374 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -1378,7 +1378,7 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
+ bool put_to_sleep;
put_to_sleep = (state != STATE_AWAKE);
@@ -2090,7 +2090,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
@@ -2218,7 +2218,8 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
* IEEE80211 stack callback functions.
*/
static int rt73usb_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue_idx,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2234,7 +2235,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw,
* we are free to update the registers based on the value
* in the queue parameter.
*/
- retval = rt2x00mac_conf_tx(hw, vif, queue_idx, params);
+ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params);
if (retval)
return retval;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.h b/drivers/net/wireless/ralink/rt2x00/rt73usb.h
index 1b56d285c34b..bb0a68516c08 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.h
@@ -1063,6 +1063,6 @@ struct hw_pairwise_ta_entry {
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT73USB_H */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e3a3dc3e45b4..1f57a0055bbd 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (!local->sram)
+ goto failed;
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
@@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (!local->rmem)
+ goto failed;
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
@@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
+ if (!local->amem)
+ goto failed;
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
@@ -1637,38 +1643,34 @@ static void authenticate_timeout(struct timer_list *t)
/*===========================================================================*/
static int parse_addr(char *in_str, UCHAR *out)
{
+ int i, k;
int len;
- int i, j, k;
- int status;
if (in_str == NULL)
return 0;
- if ((len = strlen(in_str)) < 2)
+ len = strnlen(in_str, ADDRLEN * 2 + 1) - 1;
+ if (len < 1)
return 0;
memset(out, 0, ADDRLEN);
- status = 1;
- j = len - 1;
- if (j > 12)
- j = 12;
i = 5;
- while (j > 0) {
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ while (len > 0) {
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] = k;
else
return 0;
- if (j == 0)
+ if (len == 0)
break;
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] += k << 4;
else
return 0;
if (!i--)
break;
}
- return status;
+ return 1;
}
/*===========================================================================*/
@@ -2746,7 +2748,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE_DATA(file_inode(file)) = nr;
+ *(int *)pde_data(file_inode(file)) = nr;
return count;
}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 2477e18c7cae..cdfe08078c57 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
+ unsigned int prio = 0;
unsigned long flags;
- unsigned int idx, prio, hw_prio;
+ unsigned int idx, hw_prio;
+
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
@@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
/* do arithmetic and then convert to le16 */
u16 frame_duration = 0;
- prio = skb_get_queue_mapping(skb);
+ /* rtl8180/rtl8185 only has one useable tx queue */
+ if (dev->queues > IEEE80211_AC_BK)
+ prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
@@ -1296,7 +1300,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
goto resched;
/* grab a fresh beacon */
- skb = ieee80211_beacon_get(dev, vif);
+ skb = ieee80211_beacon_get(dev, vif, 0);
if (!skb)
goto resched;
@@ -1420,7 +1424,8 @@ static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
}
static int rtl8180_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rtl8180_priv *priv = dev->priv;
@@ -1496,7 +1501,7 @@ static void rtl8180_conf_erp(struct ieee80211_hw *dev,
static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_vif *vif_priv;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index eb68b2d3caa1..c0f6e9c6d03e 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1075,7 +1075,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
goto resched;
/* grab a fresh beacon */
- skb = ieee80211_beacon_get(dev, vif);
+ skb = ieee80211_beacon_get(dev, vif, 0);
if (!skb)
goto resched;
@@ -1251,7 +1251,7 @@ static void rtl8187_conf_erp(struct rtl8187_priv *priv, bool use_short_slot,
static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct rtl8187_priv *priv = dev->priv;
struct rtl8187_vif *vif_priv;
@@ -1338,7 +1338,8 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
}
static int rtl8187_conf_tx(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rtl8187_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
index 49421d10e22b..f7d95c9624a0 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
@@ -143,7 +143,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
led->dev = dev;
led->ledpin = ledpin;
led->is_radio = is_radio;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7ddce3c3f0c4..782b089a2e1b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1425,7 +1425,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1511,9 +1511,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 06d59ffb7444..ac641a56efb0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1607,6 +1607,7 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
+ struct ieee80211_hw *hw = priv->hw;
u32 val32, bonding;
u16 val16;
@@ -1684,6 +1685,9 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->has_wifi = 1;
}
+ hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
+ hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
+
switch (priv->rtl_chip) {
case RTL8188E:
case RTL8192E:
@@ -1874,13 +1878,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
/* We have 8 bits to indicate validity */
map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (word_mask & BIT(i)) {
@@ -1891,6 +1888,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
if (ret)
goto exit;
+ if (map_addr >= EFUSE_MAP_LEN - 1) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
priv->efuse_wifi.raw[map_addr++] = val8;
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
@@ -2925,12 +2929,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
}
if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
- /* path B RX OK */
+ /* path B TX OK */
for (i = 4; i < 6; i++)
result[3][i] = result[c1][i];
}
- if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ if (!(simubitmap & 0xc0) && priv->tx_paths > 1) {
/* path B RX OK */
for (i = 6; i < 8; i++)
result[3][i] = result[c1][i];
@@ -4282,6 +4286,17 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
rtl8xxxu_debug = tmp_debug;
}
+static
+int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ *tx_ant = BIT(priv->tx_paths) - 1;
+ *rx_ant = BIT(priv->rx_paths) - 1;
+
+ return 0;
+}
+
static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, const u8 *mac)
{
@@ -4305,7 +4320,7 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
}
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
@@ -4325,10 +4340,15 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
- u8 bw = RTL8XXXU_CHANNEL_WIDTH_20;
+ u8 bw;
+
+ if (txbw_40mhz)
+ bw = RTL8XXXU_CHANNEL_WIDTH_40;
+ else
+ bw = RTL8XXXU_CHANNEL_WIDTH_20;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4338,15 +4358,14 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
- h2c.ramask.arg = 0x80;
h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
h2c.b_macid_cfg.data2 = bw;
- dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
- __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n",
+ __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg));
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
@@ -4458,6 +4477,35 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
priv->rx_buf_aggregation = 1;
}
+static const struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE_54M)
+ return;
+
+ if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+ if (rate < DESC_RATE_MCS8)
+ *nss = 1;
+ else
+ *nss = 2;
+ *mcs = rate - DESC_RATE_MCS0;
+ }
+}
+
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
struct ieee80211_hw *hw = priv->hw;
@@ -4489,21 +4537,21 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
u16 network_type = WIRELESS_MODE_UNKNOWN;
if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
network_type = WIRELESS_MODE_AC;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
network_type = WIRELESS_MODE_N_5G;
network_type |= WIRELESS_MODE_A;
} else {
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
network_type = WIRELESS_MODE_AC;
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
network_type = WIRELESS_MODE_N_24G;
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
network_type |= WIRELESS_MODE_B;
- else if (sta->supp_rates[0] & 0xf)
+ else if (sta->deflink.supp_rates[0] & 0xf)
network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G);
else
network_type |= WIRELESS_MODE_G;
@@ -4512,24 +4560,78 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
return network_type;
}
+static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
+{
+ u32 reg_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+ };
+ u32 val32;
+ u16 wireless_mode = 0;
+ u8 aifs, aifsn, sifs;
+ int i;
+
+ if (priv->vif) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ if (sta)
+ wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
+ rcu_read_unlock();
+ }
+
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
+ (wireless_mode & WIRELESS_MODE_N_24G))
+ sifs = 16;
+ else
+ sifs = 10;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ val32 = rtl8xxxu_read32(priv, reg_edca_param[i]);
+
+ /* It was set in conf_tx. */
+ aifsn = val32 & 0xff;
+
+ /* aifsn not set yet or already fixed */
+ if (aifsn < 2 || aifsn > 15)
+ continue;
+
+ aifs = aifsn * slot_time + sifs;
+
+ val32 &= ~0xff;
+ val32 |= aifs;
+ rtl8xxxu_write32(priv, reg_edca_param[i], val32);
+ }
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf, u32 changed)
+ struct ieee80211_bss_conf *bss_conf, u64 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct ieee80211_sta *sta;
+ struct rtl8xxxu_ra_report *rarpt;
u32 val32;
u8 val8;
+ rarpt = &priv->ra_report;
+
if (changed & BSS_CHANGED_ASSOC) {
- dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+ dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
rtl8xxxu_set_linktype(priv, vif->type);
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
u32 ramask;
int sgi = 0;
+ u8 highest_rate;
+ u8 mcs = 0, nss = 0;
+ u32 bit_rate;
+
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -4540,24 +4642,51 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto error;
}
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
dev_info(dev, "%s: VHT supported\n", __func__);
/* TODO: Set bits 28-31 for rate adaptive id */
- ramask = (sta->supp_rates[0] & 0xfff) |
- sta->ht_cap.mcs.rx_mask[0] << 12 |
- sta->ht_cap.mcs.rx_mask[1] << 20;
- if (sta->ht_cap.cap &
+ ramask = (sta->deflink.supp_rates[0] & 0xfff) |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12 |
+ sta->deflink.ht_cap.mcs.rx_mask[1] << 20;
+ if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
rcu_read_unlock();
+ highest_rate = fls(ramask) - 1;
+ if (highest_rate < DESC_RATE_MCS0) {
+ rarpt->txrate.legacy =
+ rtl8xxxu_legacy_ratetable[highest_rate].bitrate;
+ } else {
+ rtl8xxxu_desc_to_mcsrate(highest_rate,
+ &mcs, &nss);
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ rarpt->txrate.mcs = mcs;
+ rarpt->txrate.nss = nss;
+
+ if (sgi) {
+ rarpt->txrate.flags |=
+ RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ if (rtl8xxxu_ht40_2g &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ rarpt->txrate.bw = RATE_INFO_BW_40;
+ else
+ rarpt->txrate.bw = RATE_INFO_BW_20;
+ }
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+ rarpt->desc_rate = highest_rate;
+
priv->vif = vif;
priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
- priv->fops->update_rate_mask(priv, ramask, 0, sgi);
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi, rarpt->txrate.bw == RATE_INFO_BW_40);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -4565,7 +4694,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
- 0xc000 | bss_conf->aid);
+ 0xc000 | vif->cfg.aid);
priv->fops->report_connect(priv, 0, true);
} else {
@@ -4597,6 +4726,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
else
val8 = 20;
rtl8xxxu_write8(priv, REG_SLOT, val8);
+
+ rtl8xxxu_set_aifs(priv, val8);
}
if (changed & BSS_CHANGED_BSSID) {
@@ -4636,9 +4767,8 @@ static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
return rtlqueue;
}
-static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+static u32 rtl8xxxu_queue_select(struct ieee80211_hdr *hdr, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u32 queue;
if (ieee80211_is_mgmt(hdr->frame_control))
@@ -4988,6 +5118,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
+ queue = rtl8xxxu_queue_select(hdr, skb);
+
tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
@@ -5000,7 +5132,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
- queue = rtl8xxxu_queue_select(hw, skb);
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
if (tx_info->control.hw_key) {
@@ -5021,12 +5152,12 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
ampdu_enable = false;
if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
u32 ampdu, val32;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- ampdu = (u32)sta->ht_cap.ampdu_density;
+ ampdu = (u32)sta->deflink.ht_cap.ampdu_density;
val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
tx_desc->txdw2 |= cpu_to_le32(val32);
@@ -5041,7 +5172,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->ht_cap.cap &
+ sta && sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
sgi = true;
@@ -5331,7 +5462,7 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->bss_conf.assoc);
+ wifi_connected = (vif && vif->cfg.assoc);
if (!wifi_connected) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
@@ -5357,7 +5488,7 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->bss_conf.assoc);
+ wifi_connected = (vif && vif->cfg.assoc);
if (wifi_connected) {
u32 val32 = 0;
@@ -5404,35 +5535,6 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
}
}
-static struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
- {.bitrate = 10, .hw_value = 0x00,},
- {.bitrate = 20, .hw_value = 0x01,},
- {.bitrate = 55, .hw_value = 0x02,},
- {.bitrate = 110, .hw_value = 0x03,},
- {.bitrate = 60, .hw_value = 0x04,},
- {.bitrate = 90, .hw_value = 0x05,},
- {.bitrate = 120, .hw_value = 0x06,},
- {.bitrate = 180, .hw_value = 0x07,},
- {.bitrate = 240, .hw_value = 0x08,},
- {.bitrate = 360, .hw_value = 0x09,},
- {.bitrate = 480, .hw_value = 0x0a,},
- {.bitrate = 540, .hw_value = 0x0b,},
-};
-
-static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
-{
- if (rate <= DESC_RATE_54M)
- return;
-
- if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
- if (rate < DESC_RATE_MCS8)
- *nss = 1;
- else
- *nss = 2;
- *mcs = rate - DESC_RATE_MCS0;
- }
-}
-
static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv;
@@ -5912,7 +6014,8 @@ exit:
}
static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -6117,8 +6220,8 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_TX_START:
dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
- ampdu_factor = sta->ht_cap.ampdu_factor;
- ampdu_density = sta->ht_cap.ampdu_density;
+ ampdu_factor = sta->deflink.ht_cap.ampdu_factor;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
dev_dbg(dev,
@@ -6210,10 +6313,10 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u32 rate_bitmap = 0;
rcu_read_lock();
- rate_bitmap = (sta->supp_rates[0] & 0xfff) |
- (sta->ht_cap.mcs.rx_mask[0] << 12) |
- (sta->ht_cap.mcs.rx_mask[1] << 20);
- if (sta->ht_cap.cap &
+ rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
+ if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
rcu_read_unlock();
@@ -6298,7 +6401,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
}
priv->rssi_level = rssi_level;
- priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz);
}
}
@@ -6472,6 +6575,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.set_key = rtl8xxxu_set_key,
.ampdu_action = rtl8xxxu_ampdu_action,
.sta_statistics = rtl8xxxu_sta_statistics,
+ .get_antenna = rtl8xxxu_get_antenna,
};
static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
@@ -6610,8 +6714,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
if (!hw) {
ret = -ENOMEM;
- priv = NULL;
- goto exit;
+ goto err_put_dev;
}
priv = hw->priv;
@@ -6633,24 +6736,24 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = rtl8xxxu_parse_usb(priv, interface);
if (ret)
- goto exit;
+ goto err_set_intfdata;
ret = rtl8xxxu_identify_chip(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to identify chip\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = priv->fops->parse_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
rtl8xxxu_print_chipinfo(priv);
@@ -6658,12 +6761,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = priv->fops->load_firmware(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to load firmware\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_init_device(hw);
if (ret)
- goto exit;
+ goto err_set_intfdata;
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6713,22 +6816,21 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret) {
dev_err(&udev->dev, "%s: Failed to register: %i\n",
__func__, ret);
- goto exit;
+ goto err_set_intfdata;
}
return 0;
-exit:
+err_set_intfdata:
usb_set_intfdata(interface, NULL);
- if (priv) {
- kfree(priv->fw_data);
- mutex_destroy(&priv->usb_buf_mutex);
- mutex_destroy(&priv->h2c_mutex);
- }
- usb_put_dev(udev);
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
ieee80211_free_hw(hw);
+err_put_dev:
+ usb_put_dev(udev);
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ffd150ec181f..9e7e98b55eff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -629,11 +629,12 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
if (sta == NULL)
return;
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
+ sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
- if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported))
+ if (!sta->deflink.ht_cap.ht_supported &&
+ !sta->deflink.vht_cap.vht_supported)
return;
if (!sgi_40 && !sgi_20)
@@ -645,8 +646,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- bw_80 = sta->vht_cap.vht_supported;
+ bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ bw_80 = sta->deflink.vht_cap.vht_supported;
}
if (bw_80) {
@@ -864,11 +865,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->ht_cap.ht_supported) ||
- !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ if (!(sta->deflink.ht_cap.ht_supported) ||
+ !(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
- if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
+ if (!mac->bw_40 || !(sta->deflink.ht_cap.ht_supported))
return;
}
if (tcb_desc->multicast || tcb_desc->broadcast)
@@ -884,11 +885,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
- if (!(sta->vht_cap.vht_supported))
+ if (!(sta->deflink.vht_cap.vht_supported))
return;
} else if (mac->opmode == NL80211_IFTYPE_STATION) {
if (!mac->bw_80 ||
- !(sta->vht_cap.vht_supported))
+ !(sta->deflink.vht_cap.vht_supported))
return;
}
if (tcb_desc->hw_rate <=
@@ -904,7 +905,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 hw_rate;
- u16 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ u16 tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
if ((get_rf_type(rtlphy) == RF_2T2R) &&
(tx_mcs_map & 0x000c) != 0x000c) {
@@ -944,7 +945,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
u8 hw_rate;
if (get_rf_type(rtlphy) == RF_2T2R &&
- sta->ht_cap.mcs.rx_mask[1] != 0)
+ sta->deflink.ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1271,11 +1272,11 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
*and N rate will all be controlled by FW
*when tcb_desc->use_driver_rate = false
*/
- if (sta && sta->vht_cap.vht_supported) {
+ if (sta && sta->deflink.vht_cap.vht_supported) {
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta && sta->deflink.ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -1994,8 +1995,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
unsigned long flags;
- struct rtl_bssid_entry *entry;
- bool entry_found = false;
+ struct rtl_bssid_entry *entry = NULL, *iter;
/* check if it is scanning */
if (!mac->act_scanning)
@@ -2008,10 +2008,10 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags);
- list_for_each_entry(entry, &rtlpriv->scan_list.list, list) {
- if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
- list_del_init(&entry->list);
- entry_found = true;
+ list_for_each_entry(iter, &rtlpriv->scan_list.list, list) {
+ if (memcmp(iter->bssid, hdr->addr3, ETH_ALEN) == 0) {
+ list_del_init(&iter->list);
+ entry = iter;
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Update BSSID=%pM to scan list (total=%d)\n",
hdr->addr3, rtlpriv->scan_list.num);
@@ -2019,7 +2019,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
- if (!entry_found) {
+ if (!entry) {
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index a18dffc8753a..67d0b9aee064 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -1600,18 +1600,10 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
coex_dm->auto_tdma_adjust = false;
}
} else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
- /* HID+A2DP */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- } else {
- /*for low BT RSSI*/
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->auto_tdma_adjust = false;
- }
+ /* HID+A2DP (no need to consider BT RSSI) */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
} else if ((bt_link_info->pan_only) ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 7a0355dc6bab..32970ea4b4e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -208,7 +208,7 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_encalgo;
u8 entry_i;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 8efe2f5e5b9f..ca01270944fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -671,7 +671,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
/*
*because we should back channel to
- *current_network.chan in in scanning,
+ *current_network.chan in scanning,
*So if set_chan == current_network.chan
*we should set it.
*because mac80211 tell us wrong bw40
@@ -903,18 +903,18 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
sta_entry->wireless_mode = WIRELESS_MODE_G;
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
sta_entry->wireless_mode = WIRELESS_MODE_B;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
if (vif->type == NL80211_IFTYPE_ADHOC)
sta_entry->wireless_mode = WIRELESS_MODE_G;
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
sta_entry->wireless_mode = WIRELESS_MODE_A;
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_5G;
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_AC_5G;
if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -922,7 +922,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
}
/*disable cck rate for p2p*/
if (mac->p2p)
- sta->supp_rates[0] &= 0xfffffff0;
+ sta->deflink.supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -982,7 +982,8 @@ static int _rtl_get_hal_qnum(u16 queue)
*for rtl819x BE = 0, BK = 1, VI = 2, VO = 3
*/
static int rtl_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1009,7 +1010,7 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
struct rtl_tcb_desc tcb_desc;
if (skb) {
@@ -1040,7 +1041,7 @@ EXPORT_SYMBOL_GPL(rtl_update_beacon_work_callback);
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
@@ -1094,7 +1095,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
u8 mstatus;
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
struct ieee80211_sta *sta = NULL;
u8 keep_alive = 10;
@@ -1111,7 +1112,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
- mac->assoc_id = bss_conf->aid;
+ mac->assoc_id = vif->cfg.aid;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
if (rtlpriv->cfg->ops->linked_set_reg)
@@ -1126,7 +1127,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
"send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
IEEE80211_SMPS_STATIC);
}
@@ -1134,20 +1135,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_5G) {
mac->mode = WIRELESS_MODE_A;
} else {
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
mac->mode = WIRELESS_MODE_B;
else
mac->mode = WIRELESS_MODE_G;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
if (rtlhal->current_bandtype == BAND_ON_2_4G)
mac->mode = WIRELESS_MODE_N_24G;
else
mac->mode = WIRELESS_MODE_N_5G;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
if (rtlhal->current_bandtype == BAND_ON_5G)
mac->mode = WIRELESS_MODE_AC_5G;
else
@@ -1256,14 +1257,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
if (sta) {
- if (sta->ht_cap.ampdu_density >
+ if (sta->deflink.ht_cap.ampdu_density >
mac->current_ampdu_density)
mac->current_ampdu_density =
- sta->ht_cap.ampdu_density;
- if (sta->ht_cap.ampdu_factor <
+ sta->deflink.ht_cap.ampdu_density;
+ if (sta->deflink.ht_cap.ampdu_factor <
mac->current_ampdu_factor)
mac->current_ampdu_factor =
- sta->ht_cap.ampdu_factor;
+ sta->deflink.ht_cap.ampdu_factor;
}
rcu_read_unlock();
@@ -1298,20 +1299,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_5G) {
mac->mode = WIRELESS_MODE_A;
} else {
- if (sta->supp_rates[0] <= 0xf)
+ if (sta->deflink.supp_rates[0] <= 0xf)
mac->mode = WIRELESS_MODE_B;
else
mac->mode = WIRELESS_MODE_G;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
if (rtlhal->current_bandtype == BAND_ON_2_4G)
mac->mode = WIRELESS_MODE_N_24G;
else
mac->mode = WIRELESS_MODE_N_5G;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
if (rtlhal->current_bandtype == BAND_ON_5G)
mac->mode = WIRELESS_MODE_AC_5G;
else
@@ -1327,7 +1328,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
sta_entry->wireless_mode = mac->mode;
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
mac->ht_enable = true;
/*
@@ -1338,16 +1339,16 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
* */
}
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
mac->vht_enable = true;
if (changed & BSS_CHANGED_BASIC_RATES) {
/* for 5G must << RATE_6M_INDEX = 4,
* because 5G have no cck rate*/
if (rtlhal->current_bandtype == BAND_ON_5G)
- basic_rates = sta->supp_rates[1] << 4;
+ basic_rates = sta->deflink.supp_rates[1] << 4;
else
- basic_rates = sta->supp_rates[0];
+ basic_rates = sta->deflink.supp_rates[0];
mac->basic_rates = basic_rates;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
@@ -1702,7 +1703,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = 0;
eth_zero_addr(mac_addr);
/*
- *mac80211 will delete entrys one by one,
+ *mac80211 will delete entries one by one,
*so don't use rtl_cam_reset_all_entry
*or clear all entry here.
*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 901cdfe3723c..0b1bc04cb6ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -329,8 +329,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -340,8 +340,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
&h2c_data[4], &h2c_data[5],
&h2c_data[6], &h2c_data[7]);
- if (h2c_len <= 0)
- return count;
+ if (h2c_len == 0)
+ return -EINVAL;
for (i = 0; i < h2c_len; i++)
h2c_data_packed[i] = (u8)h2c_data[i];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ad327bae754b..ca79f652fef3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -323,14 +323,13 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- bool find_buddy_priv = false;
- struct rtl_priv *tpriv;
+ struct rtl_priv *tpriv = NULL, *iter;
struct rtl_pci_priv *tpcipriv = NULL;
if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
+ list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
list) {
- tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+ tpcipriv = (struct rtl_pci_priv *)iter->priv;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
@@ -344,19 +343,19 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
tpcipriv->ndis_adapter.devnumber &&
pcipriv->ndis_adapter.funcnumber !=
tpcipriv->ndis_adapter.funcnumber) {
- find_buddy_priv = true;
+ tpriv = iter;
break;
}
}
}
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ "find_buddy_priv %d\n", tpriv != NULL);
- if (find_buddy_priv)
+ if (tpriv)
*buddy_priv = tpriv;
- return find_buddy_priv;
+ return tpriv != NULL;
}
static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
@@ -1101,7 +1100,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct tasklet_struct *t)
}
/*NB: the beacon data buffer must be 32-bit aligned. */
- pskb = ieee80211_beacon_get(hw, mac->vif);
+ pskb = ieee80211_beacon_get(hw, mac->vif, 0);
if (!pskb)
return;
hdr = rtl_get_hdr(pskb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 4b5ea0ec9109..a164364109ba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -66,7 +66,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
else
return N_MODE_MCS15_RIX;
} else if (wireless_mode == WIRELESS_MODE_AC_24G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) {
ieee80211_rate_set_vht(&rate,
AC_MODE_MCS8_RIX,
nss);
@@ -88,7 +88,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
else
return N_MODE_MCS15_RIX;
} else if (wireless_mode == WIRELESS_MODE_AC_5G) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) {
ieee80211_rate_set_vht(&rate,
AC_MODE_MCS8_RIX,
nss);
@@ -121,9 +121,9 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
if (sta) {
- sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
- sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
- sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
+ sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
wireless_mode = sta_entry->wireless_mode;
}
@@ -135,10 +135,10 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
- if (sta && (sta->ht_cap.cap &
+ if (sta && (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && sta->vht_cap.vht_supported)
+ if (sta && sta->deflink.vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_80)
@@ -149,11 +149,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (sta && sta->ht_cap.ht_supported &&
+ if (sta && sta->deflink.ht_cap.ht_supported &&
(wireless_mode == WIRELESS_MODE_N_5G ||
wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
- if (sta && sta->vht_cap.vht_supported &&
+ if (sta && sta->deflink.vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
wireless_mode == WIRELESS_MODE_AC_24G ||
wireless_mode == WIRELESS_MODE_AC_ONLY))
@@ -229,7 +229,7 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (sta->ht_cap.ht_supported &&
+ if (sta->deflink.ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 4cf8face0bbd..0bc4afa4fda3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -178,7 +178,7 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
}
}
-/* Allows active scan scan on Ch 12 and 13 */
+/* Allows active scan on Ch 12 and 13 */
static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator
initiator)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index bf686a916acb..58c2ab3d44be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1975,21 +1975,21 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2061,11 +2061,11 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool b_shortgi = false;
@@ -2083,13 +2083,13 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index c948dafa0c80..6e4741e9483f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -58,7 +58,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = cck_buf->cck_agc_rpt;
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
if (ppsc->rfpwr_state == ERFON)
@@ -187,7 +187,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -504,7 +504,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -591,7 +591,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index bb5a0c4aec93..b9c62640d2cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1765,22 +1765,22 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -1853,11 +1853,11 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap &
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1874,13 +1874,13 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 04735da11168..da54e51badd3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -396,36 +396,6 @@ void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
}
}
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
- u32 u4b_tmp;
- u8 delay = 5;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- while (u4b_tmp != 0 && delay > 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- delay--;
- }
- if (delay == 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!\n");
- return;
- }
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state)
{
@@ -519,7 +489,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
jiffies_to_msecs(jiffies -
ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
- _rtl92ce_phy_set_rf_sleep(hw);
+ _rtl92c_phy_set_rf_sleep(hw);
break;
}
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 4165175cf5c0..730c7e939bd2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -166,7 +166,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -379,7 +379,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -441,7 +441,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index eaba66113328..a040c07791d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -520,7 +520,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
* 2 out-ep. Remainder pages have assigned to High queue */
if (outepnum > 1 && txqremaininpage)
numhq += txqremaininpage;
- /* NOTE: This step done before writting REG_RQPN. */
+ /* NOTE: This step done before writing REG_RQPN. */
if (ischipn) {
if (queue_sel & TX_SELE_NQ)
numnq = txqpageunit;
@@ -539,7 +539,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ :
WMM_CHIP_A_PAGE_NUM_LPQ;
}
- /* NOTE: This step done before writting REG_RQPN. */
+ /* NOTE: This step done before writing REG_RQPN. */
if (ischipn) {
if (queue_sel & TX_SELE_NQ)
numnq = WMM_CHIP_B_PAGE_NUM_NPQ;
@@ -1918,21 +1918,21 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2003,11 +2003,11 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
u8 curshortgi_40mhz = curtxbw_40mhz &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2025,13 +2025,13 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index b53daf1b29f7..876c14d46c2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -334,6 +334,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x0846, 0x9042, rtl92cu_hal_cfg)}, /*On Netwrks N150MA*/
/****** 8188CUS Slim Combo ********/
{RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 87f959d5d861..ae3c4f97637e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -540,7 +540,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(txdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index f849291cc587..2aecb2583f75 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1802,18 +1802,18 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value = sta->deflink.supp_rates[0];
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_A:
ratr_value &= 0x00000FF0;
@@ -1880,10 +1880,10 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1901,11 +1901,11 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap = sta->deflink.supp_rates[0];
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 51fe51bb0504..d18c092b6142 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2386,14 +2386,10 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Just Read IQK Matrix reg for channel:%d....\n",
channel);
- if ((rtlphy->iqk_matrix[indexforchannel].
- value[0] != NULL)
- /*&&(regea4 != 0) */)
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
_rtl92d_phy_patha_fill_iqk_matrix(hw, true,
- rtlphy->iqk_matrix[
- indexforchannel].value, 0,
- (rtlphy->iqk_matrix[
- indexforchannel].value[0][2] == 0));
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
if (IS_92D_SINGLEPHY(rtlhal->version)) {
if ((rtlphy->iqk_matrix[
indexforchannel].value[0][4] != 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index c02813fba934..807b66c16e11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -498,7 +498,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
@@ -586,7 +586,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 76189283104c..47d8999e31c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2256,11 +2256,11 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool b_shortgi = false;
@@ -2276,12 +2276,12 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index eef7a041e80d..8043d819fb85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -55,7 +55,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a;
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
@@ -153,7 +153,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1)
@@ -665,7 +665,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -759,7 +759,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 91199262aaca..bd0b7e365edb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1407,7 +1407,7 @@ static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
/* If IPS we need to turn LED on. So we not
- * not disable BIT 3/7 of reg3. */
+ * disable BIT 3/7 of reg3. */
if (rtlpriv->psc.rfoff_reason & (RF_CHANGE_BY_IPS | RF_CHANGE_BY_HW))
tmpu1b &= 0xFB;
else
@@ -2017,20 +2017,20 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate = 0;
u32 tmp_ratr_value = 0;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_value &= 0x0000000D;
@@ -2115,10 +2115,10 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index = 0;
- u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2139,13 +2139,13 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
band |= WIRELESS_11B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index e474b4ec17f3..a5853a170b58 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -342,7 +342,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c98f2216734f..965d98b9b09f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1841,21 +1841,21 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
u32 ratr_mask;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -1928,11 +1928,11 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -1949,13 +1949,13 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
macid = sta->aid + 1;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->supp_rates[1] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
else
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index 340b3d68a54e..27fddbcade32 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -52,7 +52,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
if (ppsc->rfpwr_state == ERFON)
@@ -170,7 +170,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -376,7 +376,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
@@ -442,7 +442,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 0748aedce2ad..189cc6437600 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2315,11 +2315,11 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry = NULL;
u32 ratr_bitmap;
u8 ratr_index;
- u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = 0;
bool shortgi = false;
@@ -2335,13 +2335,13 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
macid = sta->aid + 1;
- ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 5a7cd270575a..24ef7cc52e99 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -55,7 +55,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BIT(9));
@@ -126,7 +126,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) &
@@ -429,7 +429,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
- bw_40 = sta->ht_cap.cap &
+ bw_40 = sta->deflink.ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
@@ -516,7 +516,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index f6bff0ebd6b0..f3fe16798c59 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -872,7 +872,7 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
else
falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
- /*reset OFDM FA coutner*/
+ /*reset OFDM FA counter*/
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
/* reset CCK FA counter*/
@@ -1464,7 +1464,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
const u8 *delta_swing_table_idx_tup_b;
const u8 *delta_swing_table_idx_tdown_b;
- /*2. Initilization ( 7 steps in total )*/
+ /*2. Initialization ( 7 steps in total )*/
rtl8812ae_get_delta_swing_table(hw,
&delta_swing_table_idx_tup_a,
&delta_swing_table_idx_tdown_a,
@@ -2502,7 +2502,7 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
rtlpriv->dm.dbginfo.num_non_be_pkt = 0;
/*===============================
- * list paramter for different platform
+ * list parameter for different platform
*===============================
*/
pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 33ffc24d3675..7e0f62d59fe1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3300,20 +3300,20 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
u16 shortgi_rate;
u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
enum wireless_mode wirelessmode = mac->mode;
if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->supp_rates[1] << 4;
+ ratr_value = sta->deflink.supp_rates[1] << 4;
else
- ratr_value = sta->supp_rates[0];
+ ratr_value = sta->deflink.supp_rates[0];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_value = 0xfff;
- ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -3484,12 +3484,12 @@ static bool _rtl8821ae_get_ra_shortgi(struct ieee80211_hw *hw, struct ieee80211_
u8 mac_id)
{
bool b_short_gi = false;
- u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1 : 0;
- u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1 : 0;
u8 b_curshortgi_80mhz = 0;
- b_curshortgi_80mhz = (sta->vht_cap.cap &
+ b_curshortgi_80mhz = (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SHORT_GI_80) ? 1 : 0;
if (mac_id == MAC_ID_STATIC_FOR_BROADCAST_MULTICAST)
@@ -3512,7 +3512,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
u32 ratr_bitmap;
u8 ratr_index;
enum wireless_mode wirelessmode = 0;
- u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
? 1 : 0;
bool b_shortgi = false;
u8 rate_mask[7];
@@ -3534,22 +3534,22 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
if (wirelessmode == WIRELESS_MODE_N_5G ||
wirelessmode == WIRELESS_MODE_AC_5G ||
wirelessmode == WIRELESS_MODE_A)
- ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
else
- ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
+ ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_2GHZ];
if (mac->opmode == NL80211_IFTYPE_ADHOC)
ratr_bitmap = 0xfff;
if (wirelessmode == WIRELESS_MODE_N_24G
|| wirelessmode == WIRELESS_MODE_N_5G)
- ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
- sta->ht_cap.mcs.rx_mask[0] << 12);
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
else if (wirelessmode == WIRELESS_MODE_AC_24G
|| wirelessmode == WIRELESS_MODE_AC_5G
|| wirelessmode == WIRELESS_MODE_AC_ONLY)
ratr_bitmap |= _rtl8821ae_rate_to_bitmap_2ssvht(
- sta->vht_cap.vht_mcs.rx_mcs_map) << 12;
+ sta->deflink.vht_cap.vht_mcs.rx_mcs_map) << 12;
b_shortgi = _rtl8821ae_get_ra_shortgi(hw, sta, macid);
rf_type = _rtl8821ae_get_ra_rftype(hw, wirelessmode, ratr_bitmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 9d6f8dcbf2d6..d7cb3319d885 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -86,7 +86,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
cck_agc_rpt = p_phystrpt->cfosho[0];
/* (1)Hardware does not provide RSSI for CCK
- * (2)PWDB, Average PWDB cacluated by
+ * (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
cck_highpwr = (u8)rtlphy->cck_high_power;
@@ -215,7 +215,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
pstatus->rx_mimo_signalstrength[i] = (u8)rssi;
}
- /* (2)PWDB, Average PWDB cacluated by
+ /* (2)PWDB, Average PWDB calculated by
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
@@ -761,7 +761,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_linip(pdesc, 0);
set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
- u8 ampdu_density = sta->ht_cap.ampdu_density;
+ u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 86a236873254..a8eebafb9a7e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1014,7 +1014,7 @@ int rtl_usb_probe(struct usb_interface *intf,
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
+ pr_warn("rtl_usb: ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index df750b3a35e9..038a30b170ef 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -30,11 +30,11 @@ void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
struct rtw_bfee *bfee = &rtwvif->bfee;
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
- struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_sta *sta;
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_sta_vht_cap *ic_vht_cap;
@@ -55,7 +55,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
- vht_cap = &sta->vht_cap;
+ vht_cap = &sta->deflink.vht_cap;
if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
@@ -67,7 +67,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
ether_addr_copy(bfee->mac_addr, bssid);
bfee->role = RTW_BFEE_MU;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
- bfee->aid = bss_conf->aid;
+ bfee->aid = vif->cfg.aid;
bfinfo->bfer_mu_cnt++;
rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 2551e228b581..6276ad624299 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -13,7 +13,7 @@
static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
u8 rssi, u8 rssi_thresh)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tol = chip->rssi_tolerance;
u8 next_state;
@@ -36,7 +36,7 @@ static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
static void rtw_coex_limited_tx(struct rtw_dev *rtwdev,
bool tx_limit_en, bool ampdu_limit_en)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 num_of_active_port = 1;
@@ -211,6 +211,10 @@ static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
bool is_cck_lock_rate = false;
+ if (coex_stat->wl_coex_mode != COEX_WLINK_2G1PORT &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)
+ return;
+
if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE ||
coex_stat->bt_setup_link) {
coex_stat->wl_cck_lock = false;
@@ -361,7 +365,7 @@ static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap,
void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u16 val = 0x2;
@@ -396,7 +400,7 @@ EXPORT_SYMBOL(rtw_coex_write_scbd);
static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->scbd_support)
return 0;
@@ -406,7 +410,7 @@ static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
static void rtw_coex_check_rfk(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
@@ -460,9 +464,32 @@ static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode)
rtw_coex_set_gnt_fix(rtwdev);
}
+static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u32 tmp;
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
+ coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
+ coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
+ BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
+ coex_stat->hi_pri_rx, coex_stat->hi_pri_tx,
+ coex_stat->lo_pri_rx, coex_stat->lo_pri_tx);
+}
+
static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
@@ -497,10 +524,10 @@ static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_traffic_stats *stats = &rtwdev->stats;
bool is_5G = false;
bool wl_busy = false;
@@ -679,10 +706,10 @@ static const char *rtw_coex_get_bt_status_string(u8 bt_status)
static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 i;
u8 rssi_state;
u8 rssi_step;
@@ -779,8 +806,10 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
u8 link = 0;
u8 center_chan = 0;
u8 bw;
@@ -791,7 +820,9 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
if (type != COEX_MEDIA_DISCONNECT)
center_chan = rtwdev->hal.current_channel;
- if (center_chan == 0) {
+ if (center_chan == 0 ||
+ (efuse->share_ant && center_chan <= 14 &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)) {
link = 0;
center_chan = 0;
bw = 0;
@@ -902,7 +933,7 @@ EXPORT_SYMBOL(rtw_coex_write_indirect_reg);
static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_hw_reg *btg_reg = chip->btg_reg;
if (wifi_control) {
@@ -930,10 +961,27 @@ static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state);
}
+static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if (!force && state == coex_stat->wl_mimo_ps)
+ return;
+
+ coex_stat->wl_mimo_ps = state;
+
+ rtw_set_txrx_1ss(rtwdev, state);
+
+ rtw_coex_update_wl_ch_info(rtwdev, (u8)coex_stat->wl_connected);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], %s(): state = %d\n", __func__, state);
+}
+
static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
u8 table_case)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 h2c_para[6] = {0};
u32 table_wl = 0x5a5a5a5a;
@@ -1017,9 +1065,9 @@ static void rtw_coex_set_table(struct rtw_dev *rtwdev, bool force, u32 table0,
static void rtw_coex_table(struct rtw_dev *rtwdev, bool force, u8 type)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_stat *coex_stat = &coex->stat;
@@ -1087,9 +1135,9 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
u8 byte3, u8 byte4, u8 byte5)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 ps_type = COEX_PS_WIFI_NATIVE;
bool ap_enable = false;
@@ -1106,7 +1154,8 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
ps_type = COEX_PS_WIFI_NATIVE;
rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
- } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+ } else if ((byte1 & BIT(4) && !(byte1 & BIT(5))) ||
+ coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
rtw_dbg(rtwdev, RTW_DBG_COEX,
"[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__,
byte1);
@@ -1144,10 +1193,10 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 n, type;
bool turn_on;
@@ -1477,8 +1526,8 @@ static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1500,11 +1549,11 @@ static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 level = 0;
bool bt_afh_loss = true;
@@ -1545,8 +1594,8 @@ static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1570,8 +1619,8 @@ static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1595,10 +1644,10 @@ static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1635,11 +1684,11 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
u8 table_case = 0xff, tdma_case = 0xff;
@@ -1704,10 +1753,10 @@ exit:
static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1802,12 +1851,60 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
+static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
+{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ u8 table_case, tdma_case;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+ if (efuse->share_ant) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->bt_whck_test)
+ table_case = 2;
+ else if (coex_stat->wl_linkscan_proc || coex_stat->bt_hid_exist)
+ table_case = 33;
+ else if (coex_stat->bt_setup_link || coex_stat->bt_inq_page)
+ table_case = 0;
+ else if (coex_stat->bt_a2dp_exist)
+ table_case = 34;
+ else
+ table_case = 33;
+
+ tdma_case = 0;
+ } else {
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+
+ table_case = 121;
+ }
+
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
+ rtw_coex_table(rtwdev, false, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1816,13 +1913,8 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
- if (coex_stat->bt_multi_link) {
- table_case = 10;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 5;
- }
+ table_case = 10;
+ tdma_case = 5;
} else {
/* Non-Shared-Ant */
if (coex_stat->bt_multi_link) {
@@ -1840,10 +1932,10 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
bool bt_multi_link_remain = false, is_toggle_table = false;
@@ -1923,11 +2015,11 @@ static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1965,10 +2057,10 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool ap_enable = false;
@@ -2004,10 +2096,10 @@ static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2041,11 +2133,11 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case, interval = 0;
u32 slot_type = 0;
bool is_toggle_table = false;
@@ -2098,10 +2190,10 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool wl_cpt_test = false, bt_cpt_test = false;
@@ -2155,10 +2247,10 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2190,10 +2282,10 @@ static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2224,8 +2316,10 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2235,6 +2329,9 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_linkscan_proc)
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+
if (efuse->share_ant) {
/* Shared-Ant */
table_case = 0;
@@ -2251,8 +2348,8 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2275,9 +2372,10 @@ static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
if (coex->under_5g)
@@ -2286,7 +2384,6 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
@@ -2298,16 +2395,26 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
tdma_case = 100;
}
+ if (coex_stat->bt_game_hid_exist) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -2344,8 +2451,8 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2421,6 +2528,7 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
@@ -2494,6 +2602,11 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
goto exit;
}
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_connected) {
+ rtw_coex_action_bt_game_hid(rtwdev);
+ goto exit;
+ }
+
if (coex_stat->bt_whck_test) {
rtw_coex_action_bt_whql_test(rtwdev);
goto exit;
@@ -2530,6 +2643,18 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
}
exit:
+
+ if (chip->wl_mimo_ps_support) {
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_dm->reason == COEX_RSN_2GMEDIA)
+ rtw_coex_mimo_ps(rtwdev, true, true);
+ else
+ rtw_coex_mimo_ps(rtwdev, false, true);
+ } else {
+ rtw_coex_mimo_ps(rtwdev, false, false);
+ }
+ }
+
rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode);
rtw_coex_limited_wl(rtwdev);
}
@@ -2877,9 +3002,9 @@ void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type)
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
u32 bt_relink_time;
u8 i, rsp_source = 0, type;
@@ -3139,6 +3264,135 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
}
+#define COEX_BT_HIDINFO_MTK 0x46
+static const u8 coex_bt_hidinfo_ps[] = {0x57, 0x69, 0x72};
+static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
+
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ struct rtw_coex_hid_info_a *hida;
+ struct rtw_coex_hid_handle_list *hl, *bhl;
+ u8 sub_id = buf[2], gamehid_cnt = 0, handle, i;
+ bool cur_game_hid_exist, complete;
+
+ if (!chip->wl_mimo_ps_support &&
+ (sub_id == COEX_BT_HIDINFO_LIST || sub_id == COEX_BT_HIDINFO_A))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info notify, sub_id = 0x%x\n", sub_id);
+
+ switch (sub_id) {
+ case COEX_BT_HIDINFO_LIST:
+ hl = &coex_stat->hid_handle_list;
+ bhl = (struct rtw_coex_hid_handle_list *)buf;
+ if (!memcmp(hl, bhl, sizeof(*hl)))
+ return;
+ coex_stat->hid_handle_list = *bhl;
+ memset(&coex_stat->hid_info, 0, sizeof(coex_stat->hid_info));
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hl->handle[i] != COEX_BT_HIDINFO_NOTCON &&
+ hl->handle[i] != 0)
+ hidinfo->hid_handle = hl->handle[i];
+ }
+ break;
+ case COEX_BT_HIDINFO_A:
+ hida = (struct rtw_coex_hid_info_a *)buf;
+ handle = hida->handle;
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hidinfo->hid_handle == handle) {
+ hidinfo->hid_vendor = hida->vendor;
+ memcpy(hidinfo->hid_name, hida->name,
+ sizeof(hidinfo->hid_name));
+ hidinfo->hid_info_completed = true;
+ break;
+ }
+ }
+ break;
+ }
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (!complete || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle == 0 || handle >= COEX_BT_BLE_HANDLE_THRS) {
+ hidinfo->is_game_hid = false;
+ continue;
+ }
+
+ if (hidinfo->hid_vendor == COEX_BT_HIDINFO_MTK) {
+ if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_ps,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_xb,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else
+ hidinfo->is_game_hid = false;
+ } else {
+ hidinfo->is_game_hid = false;
+ }
+ if (hidinfo->is_game_hid)
+ gamehid_cnt++;
+ }
+
+ if (gamehid_cnt > 0)
+ cur_game_hid_exist = true;
+ else
+ cur_game_hid_exist = false;
+
+ if (cur_game_hid_exist != coex_stat->bt_game_hid_exist) {
+ coex_stat->bt_game_hid_exist = cur_game_hid_exist;
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info changed!bt_game_hid_exist = %d!\n",
+ coex_stat->bt_game_hid_exist);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS);
+ }
+}
+
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
+{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ u8 i, handle;
+ bool complete;
+
+ if (!chip->wl_mimo_ps_support || coex_stat->wl_under_ips ||
+ (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl))
+ return;
+
+ if (!coex_stat->bt_hid_exist &&
+ !((coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION) &&
+ (coex_stat->hi_pri_tx + coex_stat->hi_pri_rx >
+ COEX_BT_GAMEHID_CNT)))
+ return;
+
+ rtw_fw_coex_query_hid_info(rtwdev, COEX_BT_HIDINFO_LIST, 0);
+
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (handle == 0 || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle >= COEX_BT_BLE_HANDLE_THRS || complete)
+ continue;
+
+ rtw_fw_coex_query_hid_info(rtwdev,
+ COEX_BT_HIDINFO_A,
+ handle);
+ }
+}
+
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
struct rtw_coex *coex = &rtwdev->coex;
@@ -3175,6 +3429,17 @@ void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type)
rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
}
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if ((coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) ||
+ coex_stat->wl_under_ips)
+ return;
+
+ rtw_coex_monitor_bt_ctr(rtwdev);
+}
+
void rtw_coex_bt_relink_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
@@ -3317,7 +3582,7 @@ static const char *rtw_coex_get_reason_string(u8 reason)
static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
u32 wl_reg_6c4)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 ans = 0xFF;
u8 n, i;
@@ -3353,8 +3618,8 @@ static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
static u8 rtw_coex_get_tdma_index(struct rtw_dev *rtwdev, u8 *tdma_para)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 ans = 0xFF;
u8 n, i, j;
u8 load_cur_tab_val;
@@ -3471,7 +3736,7 @@ static int rtw_coex_val_info(struct rtw_dev *rtwdev,
static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_reg_domain *reg;
char addr_info[INFO_SIZE];
int n_addr = 0;
@@ -3637,6 +3902,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
switch (coex_wl_link_mode) {
case_WLINK(2G1PORT);
case_WLINK(5G);
+ case_WLINK(2GFREE);
default:
return "Unknown";
}
@@ -3644,7 +3910,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
@@ -3658,7 +3924,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
u16 score_board_WB, score_board_BW;
u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
u32 lte_coex, bt_coex;
- u32 bt_hi_pri, bt_lo_pri;
int i;
score_board_BW = rtw_coex_read_scbd(rtwdev);
@@ -3669,17 +3934,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H);
wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL);
- bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
- bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
- rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
- BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
-
- coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri);
- coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri);
-
- coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri);
- coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri);
-
sys_lte = rtw_read8(rtwdev, 0x73);
lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index fc61a0cab3e4..57cf29da9ea4 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -11,6 +11,7 @@
#define COEX_MIN_DELAY 10 /* delay unit in ms */
#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */
+#define COEX_BT_GAMEHID_CNT 800
#define COEX_RF_OFF 0x0
#define COEX_RF_ON 0x1
@@ -172,6 +173,7 @@ enum coex_bt_profile {
enum coex_wl_link_mode {
COEX_WLINK_2G1PORT = 0x0,
COEX_WLINK_5G = 0x3,
+ COEX_WLINK_2GFREE = 0x7,
COEX_WLINK_MAX
};
@@ -325,7 +327,7 @@ struct coex_rf_para {
static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_init(rtwdev);
}
@@ -333,7 +335,7 @@ static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
static inline
void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->ops->coex_set_ant_switch)
return;
@@ -343,28 +345,28 @@ void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_fix(rtwdev);
}
static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_debug(rtwdev);
}
static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_rfe_type(rtwdev);
}
static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr);
}
@@ -372,7 +374,7 @@ static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
static inline
void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain);
}
@@ -401,9 +403,12 @@ void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type);
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev);
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev);
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index e429428232c1..9ebe544e51d0 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -269,11 +269,7 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
for (i = 0 ; i < buf_size ; i += 8) {
if (i % page_size == 0)
seq_printf(m, "PAGE %d\n", (i + offset) / page_size);
- seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
- *(buf + i), *(buf + i + 1),
- *(buf + i + 2), *(buf + i + 3),
- *(buf + i + 4), *(buf + i + 5),
- *(buf + i + 6), *(buf + i + 7));
+ seq_printf(m, "%8ph\n", buf + i);
}
vfree(buf);
@@ -390,7 +386,7 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
&param[0], &param[1], &param[2], &param[3],
&param[4], &param[5], &param[6], &param[7]);
if (num != 8) {
- rtw_info(rtwdev, "invalid H2C command format for debug\n");
+ rtw_warn(rtwdev, "invalid H2C command format for debug\n");
return -EINVAL;
}
@@ -625,11 +621,13 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- u8 path, rate;
+ u8 path, rate, bw, ch, regd;
struct rtw_power_params pwr_param = {0};
- u8 bw = hal->current_band_width;
- u8 ch = hal->current_channel;
- u8 regd = rtw_regd_get(rtwdev);
+
+ mutex_lock(&rtwdev->mutex);
+ bw = hal->current_band_width;
+ ch = hal->current_channel;
+ regd = rtw_regd_get(rtwdev);
seq_printf(m, "channel: %u\n", ch);
seq_printf(m, "bandwidth: %u\n", bw);
@@ -671,6 +669,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
}
mutex_unlock(&hal->tx_power_mutex);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -715,8 +714,10 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
- seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n",
stats->tx_throughput, stats->rx_throughput);
+ seq_printf(m, "1SS for TX and RX = %c\n\n", rtwdev->hal.txrx_1ss ?
+ 'Y' : 'N');
seq_puts(m, "==========[Tx Phy Info]========\n");
seq_puts(m, "[Tx Rate] = ");
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 61f8369fe2d6..066792dd96af 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -23,6 +23,7 @@ enum rtw_debug_mask {
RTW_DBG_PATH_DIV = 0x00004000,
RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_HW_SCAN = 0x00010000,
+ RTW_DBG_STATE = 0x00020000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
index c266c84ef233..b85075cd68d0 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.c
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -86,7 +86,7 @@ static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 size = rtwdev->efuse.physical_size;
u32 efuse_ctl;
u32 addr;
@@ -145,7 +145,7 @@ EXPORT_SYMBOL(rtw_read8_physical_efuse);
int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u32 phy_size = efuse->physical_size;
u32 log_size = efuse->logical_size;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 2f7c036f9022..0b5f903c0f36 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -14,6 +14,8 @@
#include "util.h"
#include "wow.h"
#include "ps.h"
+#include "phy.h"
+#include "mac.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -116,7 +118,7 @@ legacy:
si->ra_report.desc_rate = rate;
si->ra_report.bit_rate = bit_rate;
- sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
}
static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
@@ -233,6 +235,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
break;
+ case C2H_BT_HID_INFO:
+ rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_WLAN_INFO:
rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
break;
@@ -538,6 +543,18 @@ void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
+
+ SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
+ SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -570,10 +587,10 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
- bool no_update = si->updated;
bool disable_pt = true;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
@@ -584,7 +601,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
- SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
+ SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
@@ -593,7 +610,6 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
si->init_ra_lv = 0;
- si->updated = true;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
@@ -635,7 +651,7 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
- if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
return;
if (!connect) {
@@ -645,6 +661,10 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
return;
}
+
+ if (!si)
+ return;
+
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
@@ -886,7 +906,7 @@ void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct rtw_nlo_info_hdr *nlo_hdr;
struct cfg80211_ssid *ssid;
@@ -941,7 +961,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct ieee80211_channel *channels = pno_req->channels;
struct sk_buff *skb;
@@ -975,7 +995,7 @@ static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
struct rtw_lps_pg_dpk_hdr *dpk_hdr;
struct sk_buff *skb;
@@ -1000,7 +1020,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
@@ -1033,6 +1053,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif;
struct sk_buff *skb_new;
struct cfg80211_ssid *ssid;
+ u16 tim_offset = 0;
if (rsvd_pkt->type == RSVD_DUMMY) {
skb_new = alloc_skb(1, GFP_KERNEL);
@@ -1051,7 +1072,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
switch (rsvd_pkt->type) {
case RSVD_BEACON:
- skb_new = ieee80211_beacon_get(hw, vif);
+ skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
+ rsvd_pkt->tim_offset = tim_offset;
break;
case RSVD_PS_POLL:
skb_new = ieee80211_pspoll_get(hw, vif);
@@ -1060,10 +1082,10 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = ieee80211_proberesp_get(hw, vif);
break;
case RSVD_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, false);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
break;
case RSVD_QOS_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, true);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
break;
case RSVD_LPS_PG_DPK:
skb_new = rtw_lps_pg_dpk_get(hw);
@@ -1102,7 +1124,7 @@ static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
struct rtw_tx_pkt_info pkt_info = {0};
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 *pkt_desc;
rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
@@ -1413,7 +1435,7 @@ static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *iter;
struct rtw_rsvd_page *rsvd_pkt;
u32 page = 0;
@@ -1582,6 +1604,16 @@ free:
return ret;
}
+void rtw_fw_update_beacon_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ update_beacon_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_fw_download_rsvd_page(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
u32 *buf, u32 residue, u16 start_pg)
{
@@ -1617,7 +1649,7 @@ out:
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
u32 offset, u32 size, u32 *buf)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 start_pg, residue;
if (sel >= RTW_FW_FIFO_MAX) {
@@ -1676,7 +1708,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
u8 location)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
@@ -1766,7 +1798,7 @@ void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
- SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
+ SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
@@ -1784,12 +1816,12 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
- struct sk_buff_head *list,
- struct rtw_vif *rtwvif)
+static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct sk_buff_head *list, u8 *bands,
+ struct rtw_vif *rtwvif)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
- struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *new;
u8 idx;
@@ -1797,25 +1829,37 @@ static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
if (!(BIT(idx) & chip->band))
continue;
new = skb_copy(skb, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
skb_put_data(new, ies->ies[idx], ies->len[idx]);
skb_put_data(new, ies->common_ies, ies->common_ie_len);
skb_queue_tail(list, new);
+ (*bands)++;
}
+
+ return 0;
}
-static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
+static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
+ u8 page_cnt, pages;
unsigned int pkt_len;
int ret;
+ if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ page_cnt = RTW_OLD_PROBE_PG_CNT;
+ else
+ page_cnt = RTW_PROBE_PG_CNT;
+
+ pages = page_offset + num_probes * page_cnt;
+
buf = kzalloc(page_size * pages, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1824,7 +1868,7 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
skb_queue_walk_safe(probe_req_list, skb, tmp) {
skb_unlink(skb, probe_req_list);
rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
- if (skb->len > page_size * RTW_PROBE_PG_CNT) {
+ if (skb->len > page_size * page_cnt) {
ret = -EINVAL;
goto out;
}
@@ -1834,8 +1878,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
- buf_offset += RTW_PROBE_PG_CNT * page_size;
- page_offset += RTW_PROBE_PG_CNT;
+ buf_offset += page_cnt * page_size;
+ page_offset += page_cnt;
kfree_skb(skb);
}
@@ -1848,6 +1892,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
rtwdev->scan_info.probe_pg_size = page_offset;
out:
kfree(buf);
+ skb_queue_walk_safe(probe_req_list, skb, tmp)
+ kfree_skb(skb);
return ret;
}
@@ -1857,8 +1903,9 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct sk_buff_head list;
- struct sk_buff *skb;
- u8 num = req->n_ssids, i;
+ struct sk_buff *skb, *tmp;
+ u8 num = req->n_ssids, i, bands = 0;
+ int ret;
skb_queue_head_init(&list);
for (i = 0; i < num; i++) {
@@ -1866,11 +1913,25 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
- rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
+ rtwvif);
+ if (ret)
+ goto out;
+
kfree_skb(skb);
}
- return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
+ return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
+
+out:
+ skb_queue_walk_safe(&list, skb, tmp)
+ kfree_skb(skb);
+
+ return ret;
}
static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
@@ -1996,6 +2057,9 @@ void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw_leave_lps_deep(rtwdev);
+ rtw_hci_flush_all_queues(rtwdev, false);
+ rtw_mac_flush_all_queues(rtwdev, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
@@ -2014,7 +2078,10 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct cfg80211_scan_info info = {
.aborted = aborted,
};
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw_hal *hal = &rtwdev->hal;
struct rtw_vif *rtwvif;
+ u8 chan = scan_info->op_chan;
if (!vif)
return;
@@ -2022,12 +2089,15 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, true);
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ if (chan)
+ rtw_store_op_chan(rtwdev, false);
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
- rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
rtwdev->scan_info.scanning_vif = NULL;
@@ -2065,6 +2135,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_ch_switch_option cs_option = {0};
struct rtw_chan_list chan_list = {0};
int ret = 0;
@@ -2073,7 +2144,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
return -EINVAL;
cs_option.switch_en = enable;
- cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED;
+ cs_option.back_op_en = scan_info->op_chan != 0;
if (enable) {
ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
if (ret)
@@ -2109,17 +2180,36 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_hw_scan_complete(rtwdev, vif, aborted);
if (aborted)
- rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
+ rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
}
-void rtw_store_op_chan(struct rtw_dev *rtwdev)
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
{
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_hal *hal = &rtwdev->hal;
+ u8 band;
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+ if (backup) {
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+ scan_info->op_pri_ch = hal->primary_channel;
+ } else {
+ band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, scan_info->op_chan,
+ scan_info->op_pri_ch,
+ band, scan_info->op_bw);
+ }
+}
+
+void rtw_clear_op_chan(struct rtw_dev *rtwdev)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ scan_info->op_chan = 0;
+ scan_info->op_bw = 0;
+ scan_info->op_pri_ch_idx = 0;
+ scan_info->op_pri_ch = 0;
}
static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
@@ -2134,7 +2224,10 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_c2h_cmd *c2h;
enum rtw_scan_notify_id id;
- u8 chan, status;
+ u8 chan, band, status;
+
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ return;
c2h = get_c2h_from_skb(skb);
chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
@@ -2142,10 +2235,13 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
status = GET_CHAN_SWITCH_STATUS(c2h->payload);
if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
- if (rtw_is_op_chan(rtwdev, chan))
+ band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, chan, chan, band,
+ RTW_CHANNEL_WIDTH_20);
+ if (rtw_is_op_chan(rtwdev, chan)) {
+ rtw_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
- hal->current_channel = chan;
- hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ }
} else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
if (IS_CH_5G_BAND(chan)) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -2158,7 +2254,12 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
rtw_coex_switchband_notify(rtwdev, chan_type);
}
- if (rtw_is_op_chan(rtwdev, chan))
+ /* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
+ * channel that hardware will switch. We need to stop queue
+ * if next channel is non-op channel.
+ */
+ if (!rtw_is_op_chan(rtwdev, chan) &&
+ rtw_is_op_chan(rtwdev, hal->current_channel))
ieee80211_stop_queues(rtwdev->hw);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 654c3c2e5721..a5a965803a3c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,12 +41,14 @@
#define RTW_EX_CH_INFO_HDR_SIZE 2
#define RTW_SCAN_WIDTH 0
#define RTW_PRI_CH_IDX 1
-#define RTW_PROBE_PG_CNT 2
+#define RTW_OLD_PROBE_PG_CNT 2
+#define RTW_PROBE_PG_CNT 4
enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_BT_HID_INFO = 0x45,
C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
@@ -119,6 +121,10 @@ enum rtw_fw_feature {
FW_FEATURE_MAX = BIT(31),
};
+enum rtw_fw_feature_ext {
+ FW_FEATURE_EXT_OLD_PAGE_NUM = BIT(0),
+};
+
enum rtw_beacon_filter_offload_mode {
BCN_FILTER_OFFLOAD_MODE_0 = 0,
BCN_FILTER_OFFLOAD_MODE_1,
@@ -171,6 +177,7 @@ struct rtw_rsvd_page {
struct sk_buff *skb;
enum rtw_rsvd_packet_type type;
u8 page;
+ u16 tim_offset;
bool add_txdesc;
struct cfg80211_ssid *ssid;
u16 probe_req_size;
@@ -321,6 +328,11 @@ struct rtw_fw_hdr_legacy {
__le32 rsvd5;
} __packed;
+#define RTW_FW_VER_CODE(ver, sub_ver, idx) \
+ (((ver) << 16) | ((sub_ver) << 8) | (idx))
+#define RTW_FW_SUIT_VER_CODE(s) \
+ RTW_FW_VER_CODE((s).version, (s).sub_version, (s).sub_index)
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc)
#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0)
@@ -529,6 +541,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
#define H2C_CMD_WIFI_CALIBRATION 0x6d
+#define H2C_CMD_QUERY_BT_HID_INFO 0x73
#define H2C_CMD_KEEP_ALIVE 0x03
#define H2C_CMD_DISCONNECT_DECISION 0x04
@@ -681,6 +694,11 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
@@ -762,6 +780,12 @@ static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
return !!(fw->feature & feature);
}
+static inline bool rtw_fw_feature_ext_check(struct rtw_fw_state *fw,
+ enum rtw_fw_feature_ext feature)
+{
+ return !!(fw->feature_ext & feature);
+}
+
void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -780,9 +804,12 @@ void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data);
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
-void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
@@ -798,6 +825,7 @@ void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
struct rtw_vif *rtwvif);
int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev);
+void rtw_fw_update_beacon_work(struct work_struct *work);
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
@@ -819,7 +847,8 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
-void rtw_store_op_chan(struct rtw_dev *rtwdev);
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup);
+void rtw_clear_op_chan(struct rtw_dev *rtwdev);
void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index d1678aed9d9c..52076e89d59a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -75,7 +75,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
- rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
+ rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
break;
case RTW_HCI_TYPE_USB:
break;
@@ -243,7 +243,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -587,7 +587,7 @@ static int
download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
u32 src, u32 dst, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 desc_size = chip->tx_pkt_desc_sz;
u8 first_part;
u32 mem_offset;
@@ -934,7 +934,7 @@ static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
u32 prio_queue, bool drop)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_prioq_addr *addr;
bool wsize;
u16 avail_page, rsvd_page;
@@ -996,7 +996,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
@@ -1037,8 +1037,8 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
static int set_trx_fifo_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u16 cur_pg_addr;
u8 csi_buf_pg_num = chip->csi_buf_pg_num;
@@ -1092,8 +1092,8 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
@@ -1123,8 +1123,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 val32;
val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
@@ -1149,8 +1149,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
@@ -1277,7 +1277,7 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
int rtw_mac_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
ret = rtw_init_trx_cfg(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index ae7d97de5fdf..07578ccc4bab 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -72,6 +72,9 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
@@ -98,7 +101,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_set_channel(rtwdev);
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (hw->conf.flags & IEEE80211_CONF_IDLE))
+ (hw->conf.flags & IEEE80211_CONF_IDLE) &&
+ !test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_enter_ips(rtwdev);
out:
@@ -205,7 +209,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
return 0;
}
@@ -216,7 +220,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,8 +246,8 @@ static int rtw_ops_change_interface(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
- rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
- vif->addr, vif->type, type, vif->p2p, p2p);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
rtw_ops_remove_interface(hw, vif);
@@ -352,7 +356,7 @@ static void rtw_conf_tx(struct rtw_dev *rtwdev,
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
@@ -366,15 +370,14 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
rtw_vif_assoc_changed(rtwvif, conf);
- if (conf->assoc) {
+ if (vif->cfg.assoc) {
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
- rtw_coex_media_status_notify(rtwdev, conf->assoc);
+ rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
- rtw_store_op_chan(rtwdev);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
@@ -392,6 +395,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
config |= PORT_SET_BSSID;
+ if (is_zero_ether_addr(rtwvif->bssid))
+ rtw_clear_op_chan(rtwdev);
+ else
+ rtw_store_op_chan(rtwdev, true);
}
if (changed & BSS_CHANGED_BEACON_INT) {
@@ -399,8 +406,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
coex_stat->wl_beacon_interval = conf->beacon_int;
}
- if (changed & BSS_CHANGED_BEACON)
+ if (changed & BSS_CHANGED_BEACON) {
+ rtw_set_dtim_period(rtwdev, conf->dtim_period);
rtw_fw_download_rsvd_page(rtwdev);
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (conf->enable_beacon)
@@ -424,8 +433,23 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+
+ mutex_lock(&rtwdev->mutex);
+ chip->ops->phy_calibration(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -471,6 +495,16 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ ieee80211_queue_work(hw, &rtwdev->update_beacon_work);
+
+ return 0;
+}
+
static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -614,7 +648,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -691,7 +725,7 @@ static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
}
si->use_cfg_mask = true;
- rtw_update_sta_info(br_data->rtwdev, si);
+ rtw_update_sta_info(br_data->rtwdev, si, true);
}
static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
@@ -722,7 +756,7 @@ static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
u32 rx_antenna)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
if (!chip->ops->set_antenna)
@@ -842,11 +876,24 @@ static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ mutex_lock(&rtwdev->mutex);
rtw_set_sar_specs(rtwdev, sar);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
+static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ if (changed & IEEE80211_RC_BW_CHANGED)
+ rtw_update_sta_info(rtwdev, si, true);
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -858,9 +905,11 @@ const struct ieee80211_ops rtw_ops = {
.change_interface = rtw_ops_change_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
+ .start_ap = rtw_ops_start_ap,
.conf_tx = rtw_ops_conf_tx,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
+ .set_tim = rtw_ops_set_tim,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
.can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu,
@@ -876,6 +925,7 @@ const struct ieee80211_ops rtw_ops = {
.reconfig_complete = rtw_reconfig_complete,
.hw_scan = rtw_ops_hw_scan,
.cancel_hw_scan = rtw_ops_cancel_hw_scan,
+ .sta_rc_update = rtw_ops_sta_rc_update,
.set_sar_specs = rtw_ops_set_sar_specs,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 38252113c4a8..67151dbf8384 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -171,7 +171,7 @@ static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
if (vif->type == NL80211_IFTYPE_STATION)
- if (vif->bss_conf.assoc)
+ if (vif->cfg.assoc)
iter_data->rtwvif = rtwvif;
rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif);
@@ -207,6 +207,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ rtw_coex_wl_status_check(rtwdev);
+ rtw_coex_query_bt_hid_list(rtwdev);
+
if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev, 0);
@@ -272,6 +275,16 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+static void rtw_ips_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtw_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -300,13 +313,13 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw_txq_init(rtwdev, sta->txq[i]);
- rtw_update_sta_info(rtwdev, si);
+ rtw_update_sta_info(rtwdev, si, true);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
rtwdev->sta_cnt++;
rtwdev->beacon_loss = false;
- rtw_info(rtwdev, "sta %pM joined with macid %d\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
return 0;
}
@@ -327,8 +340,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
kfree(si->mask);
rtwdev->sta_cnt--;
- rtw_info(rtwdev, "sta %pM with macid %d left\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
}
struct rtw_fwcd_hdr {
@@ -340,7 +353,7 @@ struct rtw_fwcd_hdr {
static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
@@ -512,8 +525,13 @@ EXPORT_SYMBOL(rtw_dump_reg);
void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
struct ieee80211_bss_conf *conf)
{
- if (conf && conf->assoc) {
- rtwvif->aid = conf->aid;
+ struct ieee80211_vif *vif = NULL;
+
+ if (conf)
+ vif = container_of(conf, struct ieee80211_vif, bss_conf);
+
+ if (conf && vif->cfg.assoc) {
+ rtwvif->aid = vif->cfg.aid;
rtwvif->net_type = RTW_NET_MGD_LINKED;
} else {
rtwvif->aid = 0;
@@ -651,67 +669,132 @@ void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel)
}
EXPORT_SYMBOL(rtw_set_rx_freq_band);
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period)
+{
+ rtw_write32_set(rtwdev, REG_TCR, BIT_TCR_UPDATE_TIMIE);
+ rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1);
+}
+
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw_hw_to_nl80211_band(band);
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 *cch_by_bw = hal->cch_by_bw;
+ u32 center_freq, primary_freq;
+ enum rtw_sar_bands sar_band;
+ u8 primary_channel_idx;
+
+ center_freq = ieee80211_channel_to_frequency(center_channel, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_channel, nl_band);
+
+ /* assign the center channel used while 20M bw is selected */
+ cch_by_bw[RTW_CHANNEL_WIDTH_20] = primary_channel;
+
+ /* assign the center channel used while current bw is selected */
+ cch_by_bw[bandwidth] = center_channel;
+
+ switch (bandwidth) {
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ primary_channel_idx = RTW_SC_DONT_CARE;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWER;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_UPMOST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel + 4;
+ } else {
+ if (center_freq - primary_freq == 10)
+ primary_channel_idx = RTW_SC_20_LOWER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWEST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel - 4;
+ }
+ break;
+ }
+
+ switch (center_channel) {
+ case 1 ... 14:
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ case 36 ... 64:
+ sar_band = RTW_SAR_BAND_1;
+ break;
+ case 100 ... 144:
+ sar_band = RTW_SAR_BAND_3;
+ break;
+ case 149 ... 177:
+ sar_band = RTW_SAR_BAND_4;
+ break;
+ default:
+ WARN(1, "unknown ch(%u) to SAR band\n", center_channel);
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ }
+
+ hal->current_primary_channel_index = primary_channel_idx;
+ hal->current_band_width = bandwidth;
+ hal->primary_channel = primary_channel;
+ hal->current_channel = center_channel;
+ hal->current_band_type = band;
+ hal->sar_band = sar_band;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
- u8 *cch_by_bw = chan_params->cch_by_bw;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
- u8 i;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
- /* assign the center channel used while 20M bw is selected */
- cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value;
-
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
- if (primary_freq > center_freq) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq > center_freq)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWER;
+ else
center_chan += 2;
- }
break;
case NL80211_CHAN_WIDTH_80:
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq - center_freq == 10)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_UPMOST;
+ else
center_chan -= 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW_SC_20_LOWER;
+ if (center_freq - primary_freq == 10)
center_chan += 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWEST;
+ else
center_chan += 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4;
}
break;
default:
@@ -721,60 +804,30 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
chan_params->center_chan = center_chan;
chan_params->bandwidth = bandwidth;
- chan_params->primary_chan_idx = primary_chan_idx;
-
- /* assign the center channel used while current bw is selected */
- cch_by_bw[bandwidth] = center_chan;
-
- for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- cch_by_bw[i] = 0;
+ chan_params->primary_chan = channel->hw_value;
}
void rtw_set_channel(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_channel_params ch_param;
- u8 center_chan, bandwidth, primary_chan_idx;
- u8 i;
+ u8 center_chan, primary_chan, bandwidth, band;
rtw_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
return;
center_chan = ch_param.center_chan;
+ primary_chan = ch_param.primary_chan;
bandwidth = ch_param.bandwidth;
- primary_chan_idx = ch_param.primary_chan_idx;
+ band = ch_param.center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->current_primary_channel_index = primary_chan_idx;
- hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- case 36 ... 64:
- hal->sar_band = RTW_SAR_BAND_1;
- break;
- case 100 ... 144:
- hal->sar_band = RTW_SAR_BAND_3;
- break;
- case 149 ... 177:
- hal->sar_band = RTW_SAR_BAND_4;
- break;
- default:
- WARN(1, "unknown ch(%u) to SAR band\n", center_chan);
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- }
+ rtw_update_channel(rtwdev, center_chan, primary_chan, band, bandwidth);
- for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
-
- chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+ chip->ops->set_channel(rtwdev, center_chan, bandwidth,
+ hal->current_primary_channel_index);
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -797,7 +850,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
void rtw_chip_prepare_tx(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtwdev->need_rfk) {
rtwdev->need_rfk = false;
@@ -866,8 +919,8 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
@@ -891,7 +944,7 @@ static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
static u64 get_vht_ra_mask(struct ieee80211_sta *sta)
{
u64 ra_mask = 0;
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
u8 vht_mcs_cap;
int i, nss;
@@ -1011,37 +1064,52 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
RA_MASK_VHT_RATES_2SS | \
RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_BG 0x00005
#define RA_MASK_CCK_IN_HT 0x00005
#define RA_MASK_CCK_IN_VHT 0x00005
#define RA_MASK_OFDM_IN_VHT 0x00010
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
-static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
- struct rtw_sta_info *si,
- u64 ra_mask, bool is_vht_enable,
- u8 wireless_set)
+static u64 rtw_rate_mask_rssi(struct rtw_sta_info *si, u8 wireless_set)
+{
+ u8 rssi_level = si->rssi_level;
+
+ if (wireless_set == WIRELESS_CCK)
+ return 0xffffffffffffffffULL;
+
+ if (rssi_level == 0)
+ return 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ return 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ return 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ return 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ return 0xffffffffffff8f80ULL;
+ else
+ return 0xffffffffffff0f00ULL;
+}
+
+static u64 rtw_rate_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
+static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable)
{
struct rtw_hal *hal = &rtwdev->hal;
const struct cfg80211_bitrate_mask *mask = si->mask;
u64 cfg_mask = GENMASK_ULL(63, 0);
- u8 rssi_level, band;
-
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ u8 band;
if (!si->use_cfg_mask)
return ra_mask;
@@ -1077,7 +1145,8 @@ static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
return ra_mask;
}
-void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct ieee80211_sta *sta = si->sta;
@@ -1091,34 +1160,36 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
u8 ldpc_en = 0;
u8 tx_num = 1;
u64 ra_mask = 0;
+ u64 ra_mask_bak = 0;
bool is_vht_enable = false;
bool is_support_sgi = false;
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
is_vht_enable = true;
ra_mask |= get_vht_ra_mask(sta);
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = VHT_STBC_EN;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
- } else if (sta->ht_cap.ht_supported) {
- ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
- (sta->ht_cap.mcs.rx_mask[0] << 12);
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = HT_LDPC_EN;
}
- if (efuse->hw_cap.nss == 1)
+ if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
- ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
- if (sta->vht_cap.vht_supported) {
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask_bak = ra_mask;
+ if (sta->deflink.vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G;
wireless_set = WIRELESS_OFDM | WIRELESS_HT;
} else {
@@ -1126,58 +1197,62 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
}
dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
- ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
- if (sta->vht_cap.vht_supported) {
+ ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+ ra_mask_bak = ra_mask;
+ if (sta->deflink.vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT | WIRELESS_VHT;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT |
RA_MASK_OFDM_IN_HT_2G;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT;
- } else if (sta->supp_rates[0] <= 0xf) {
+ } else if (sta->deflink.supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
+ ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
dm_info->rrsr_val_init = RRSR_INIT_2G;
} else {
rtw_err(rtwdev, "Unknown band type\n");
+ ra_mask_bak = ra_mask;
wireless_set = 0;
}
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
- is_support_sgi = sta->vht_cap.vht_supported &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ is_support_sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
break;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW_CHANNEL_WIDTH_40;
- is_support_sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ is_support_sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
break;
default:
bw_mode = RTW_CHANNEL_WIDTH_20;
- is_support_sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ is_support_sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
break;
}
- if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) {
tx_num = 2;
rf_type = RF_2T2R;
- } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) {
tx_num = 2;
rf_type = RF_2T2R;
}
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
- wireless_set);
+ ra_mask &= rtw_rate_mask_rssi(si, wireless_set);
+ ra_mask = rtw_rate_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask = rtw_rate_mask_cfg(rtwdev, si, ra_mask, is_vht_enable);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -1189,12 +1264,12 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
si->ra_mask = ra_mask;
si->rate_id = rate_id;
- rtw_fw_send_ra_info(rtwdev, si);
+ rtw_fw_send_ra_info(rtwdev, si, reset_ra_mask);
}
static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
fw = &rtwdev->fw;
@@ -1215,7 +1290,7 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported ||
!fw->feature)
@@ -1234,7 +1309,7 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
static int rtw_power_on(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
bool wifi_only;
int ret;
@@ -1320,7 +1395,7 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
rtw_leave_lps(rtwdev);
- if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) {
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) {
ret = rtw_leave_ips(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave idle state\n");
@@ -1339,11 +1414,15 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
}
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan)
{
- struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
u32 config = 0;
+ if (!rtwvif)
+ return;
+
clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
@@ -1354,6 +1433,9 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
int rtw_core_start(struct rtw_dev *rtwdev)
@@ -1397,6 +1479,7 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
mutex_unlock(&rtwdev->mutex);
cancel_work_sync(&rtwdev->c2h_work);
+ cancel_work_sync(&rtwdev->update_beacon_work);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
cancel_delayed_work_sync(&coex->bt_reenable_work);
@@ -1415,6 +1498,7 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
ht_cap->ht_supported = true;
@@ -1433,7 +1517,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_SGI_40;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ ht_cap->ampdu_density = chip->ampdu_density;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (efuse->hw_cap.nss > 1) {
ht_cap->mcs.rx_mask[0] = 0xFF;
@@ -1497,8 +1581,23 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->vht_mcs.tx_highest = highest;
}
+static u16 rtw_get_max_scan_ie_len(struct rtw_dev *rtwdev)
+{
+ u16 len;
+
+ len = rtwdev->chip->max_scan_ie_len;
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) &&
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822C)
+ len = IEEE80211_MAX_DATA_LEN;
+ else if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ len -= RTW_OLD_PROBE_PG_CNT * TX_PAGE_SIZE;
+
+ return len;
+}
+
static void rtw_set_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
struct rtw_dev *rtwdev = hw->priv;
struct ieee80211_supported_band *sband;
@@ -1530,12 +1629,43 @@ err_out:
}
static void rtw_unset_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
+static void rtw_vif_smps_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return;
+
+ if (rtwdev->hal.txrx_1ss)
+ ieee80211_request_smps(vif, 0, IEEE80211_SMPS_STATIC);
+ else
+ ieee80211_request_smps(vif, 0, IEEE80211_SMPS_OFF);
+}
+
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
+{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
+ return;
+
+ rtwdev->hal.txrx_1ss = txrx_1ss;
+ if (txrx_1ss)
+ chip->ops->config_txrx_mode(rtwdev, BB_PATH_A, BB_PATH_A, false);
+ else
+ chip->ops->config_txrx_mode(rtwdev, hal->antenna_tx,
+ hal->antenna_rx, false);
+ rtw_iterate_vifs_atomic(rtwdev, rtw_vif_smps_iter, rtwdev);
+}
+
static void __update_firmware_feature(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
@@ -1545,6 +1675,10 @@ static void __update_firmware_feature(struct rtw_dev *rtwdev,
feature = le32_to_cpu(fw_hdr->feature);
fw->feature = feature & FW_FEATURE_SIG ? feature : 0;
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C &&
+ RTW_FW_SUIT_VER_CODE(rtwdev->fw) < RTW_FW_VER_CODE(9, 9, 13))
+ fw->feature_ext |= FW_FEATURE_EXT_OLD_PAGE_NUM;
}
static void __update_firmware_info(struct rtw_dev *rtwdev,
@@ -1638,7 +1772,7 @@ static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
@@ -1896,7 +2030,7 @@ static void rtw_stats_init(struct rtw_dev *rtwdev)
int rtw_core_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
@@ -1906,6 +2040,10 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!rtwdev->tx_wq) {
+ rtw_warn(rtwdev, "alloc_workqueue rtw_tx_wq failed\n");
+ return -ENOMEM;
+ }
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
@@ -1919,7 +2057,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
+ INIT_WORK(&rtwdev->update_beacon_work, rtw_fw_update_beacon_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
@@ -1953,7 +2093,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
- return ret;
+ goto out;
}
if (chip->wow_fw_name) {
@@ -1963,11 +2103,15 @@ int rtw_core_init(struct rtw_dev *rtwdev)
wait_for_completion(&rtwdev->fw.completion);
if (rtwdev->fw.firmware)
release_firmware(rtwdev->fw.firmware);
- return ret;
+ goto out;
}
}
return 0;
+
+out:
+ destroy_workqueue(rtwdev->tx_wq);
+ return ret;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -2044,7 +2188,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
- hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN;
+ hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
@@ -2088,7 +2232,7 @@ EXPORT_SYMBOL(rtw_register_hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index dc1cd9bd4b8a..bccd7b28f60c 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -17,13 +17,11 @@
#include "util.h"
-#define RTW_NAPI_WEIGHT_NUM 64
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_SCAN_MAX_SSIDS 4
-#define RTW_SCAN_MAX_IE_LEN 128
#define RTW_MAX_PATTERN_NUM 12
#define RTW_MAX_PATTERN_MASK_SIZE 16
@@ -34,6 +32,7 @@
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
#define TX_PAGE_SIZE_SHIFT 7
+#define TX_PAGE_SIZE (1 << TX_PAGE_SIZE_SHIFT)
#define RTW_CHANNEL_WIDTH_MAX 3
#define RTW_RF_PATH_MAX 4
@@ -511,12 +510,8 @@ struct rtw_timer_list {
struct rtw_channel_params {
u8 center_chan;
+ u8 primary_chan;
u8 bandwidth;
- u8 primary_chan_idx;
- /* center channel by different available bandwidth,
- * val of (bw > current bandwidth) is invalid
- */
- u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1];
};
struct rtw_hw_reg {
@@ -580,6 +575,7 @@ struct rtw_tx_pkt_info {
u32 tx_pkt_size;
u8 offset;
u8 pkt_offset;
+ u8 tim_offset;
u8 mac_id;
u8 rate_id;
u8 rate;
@@ -753,7 +749,6 @@ struct rtw_sta_info {
u8 ldpc_en:2;
bool sgi_enable;
bool vht_enable;
- bool updated;
u8 init_ra_lv;
u64 ra_mask;
@@ -874,6 +869,8 @@ struct rtw_chip_ops {
enum rtw_bb_path tx_path_1ss,
enum rtw_bb_path tx_path_cck,
bool is_tx2_path);
+ void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1177,6 +1174,7 @@ struct rtw_chip_info {
bool rx_ldpc;
bool tx_stbc;
u8 max_power_index;
+ u8 ampdu_density;
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
@@ -1230,9 +1228,7 @@ struct rtw_chip_info {
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
-
- /* for 8821c set channel */
- u32 ch_param[3];
+ const u16 max_scan_ie_len;
/* coex paras */
u32 coex_para_ver;
@@ -1240,6 +1236,7 @@ struct rtw_chip_info {
bool scbd_support;
bool new_scbd10_def; /* true: fix 2M(8822c) */
bool ble_hid_profile_support;
+ bool wl_mimo_ps_support;
u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
u8 bt_rssi_type;
u8 ant_isolation;
@@ -1352,6 +1349,42 @@ struct rtw_coex_dm {
#define COEX_BTINFO_LENGTH_MAX 10
#define COEX_BTINFO_LENGTH 7
+#define COEX_BT_HIDINFO_LIST 0x0
+#define COEX_BT_HIDINFO_A 0x1
+#define COEX_BT_HIDINFO_NAME 3
+
+#define COEX_BT_HIDINFO_LENGTH 6
+#define COEX_BT_HIDINFO_HANDLE_NUM 4
+#define COEX_BT_HIDINFO_C2H_HANDLE 0
+#define COEX_BT_HIDINFO_C2H_VENDOR 1
+#define COEX_BT_BLE_HANDLE_THRS 0x10
+#define COEX_BT_HIDINFO_NOTCON 0xff
+
+struct rtw_coex_hid {
+ u8 hid_handle;
+ u8 hid_vendor;
+ u8 hid_name[COEX_BT_HIDINFO_NAME];
+ bool hid_info_completed;
+ bool is_game_hid;
+};
+
+struct rtw_coex_hid_handle_list {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle_cnt;
+ u8 handle[COEX_BT_HIDINFO_HANDLE_NUM];
+} __packed;
+
+struct rtw_coex_hid_info_a {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle;
+ u8 vendor;
+ u8 name[COEX_BT_HIDINFO_NAME];
+} __packed;
+
struct rtw_coex_stat {
bool bt_disabled;
bool bt_disabled_pre;
@@ -1382,6 +1415,8 @@ struct rtw_coex_stat {
bool bt_slave;
bool bt_418_hid_exist;
bool bt_ble_hid_exist;
+ bool bt_game_hid_exist;
+ bool bt_hid_handle_cnt;
bool bt_mailbox_reply;
bool wl_under_lps;
@@ -1402,6 +1437,7 @@ struct rtw_coex_stat {
bool wl_connecting;
bool wl_slot_toggle;
bool wl_slot_toggle_change; /* if toggle to no-toggle */
+ bool wl_mimo_ps;
u32 bt_supported_version;
u32 bt_supported_feature;
@@ -1459,6 +1495,9 @@ struct rtw_coex_stat {
u32 darfrc;
u32 darfrch;
+
+ struct rtw_coex_hid hid_info[COEX_BT_HIDINFO_HANDLE_NUM];
+ struct rtw_coex_hid_handle_list hid_handle_list;
};
struct rtw_coex {
@@ -1811,6 +1850,7 @@ struct rtw_fw_state {
u8 sub_index;
u16 h2c_version;
u32 feature;
+ u32 feature_ext;
};
enum rtw_sar_sources {
@@ -1854,6 +1894,7 @@ struct rtw_hal {
u8 current_primary_channel_index;
u8 current_band_width;
u8 current_band_type;
+ u8 primary_channel;
/* center channel for different available bandwidth,
* val of (bw > current_band_width) is invalid
@@ -1867,6 +1908,7 @@ struct rtw_hal {
u32 antenna_tx;
u32 antenna_rx;
u8 bfee_sts_cap;
+ bool txrx_1ss;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1891,6 +1933,9 @@ struct rtw_hal {
enum rtw_sar_bands sar_band;
struct rtw_sar sar;
+
+ /* for 8821c set channel */
+ u32 ch_param[3];
};
struct rtw_path_div {
@@ -1921,6 +1966,7 @@ struct rtw_hw_scan_info {
struct ieee80211_vif *scanning_vif;
u8 probe_pg_size;
u8 op_pri_ch_idx;
+ u8 op_pri_ch;
u8 op_chan;
u8 op_bw;
};
@@ -1932,7 +1978,7 @@ struct rtw_dev {
struct rtw_hci hci;
struct rtw_hw_scan_info scan_info;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
struct rtw_hal hal;
struct rtw_fifo_conf fifo;
struct rtw_fw_state fw;
@@ -1960,7 +2006,9 @@ struct rtw_dev {
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct work_struct fw_recovery_work;
+ struct work_struct update_beacon_work;
/* used to protect txqs list */
spinlock_t txq_lock;
@@ -2084,7 +2132,22 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
return 0;
}
+static inline
+enum nl80211_band rtw_hw_to_nl80211_band(enum rtw_supported_band hw_band)
+{
+ switch (hw_band) {
+ default:
+ case RTW_BAND_2G:
+ return NL80211_BAND_2GHZ;
+ case RTW_BAND_5G:
+ return NL80211_BAND_5GHZ;
+ case RTW_BAND_60G:
+ return NL80211_BAND_60GHZ;
+ }
+}
+
void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
+void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -2098,10 +2161,12 @@ void rtw_chip_prepare_tx(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
void rtw_tx_report_purge_timer(struct timer_list *t);
-void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ bool reset_ra_mask);
void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
@@ -2121,5 +2186,8 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
u32 fwcd_item);
int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
-
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a0991d3f15c0..0975d27240e4 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -322,7 +322,7 @@ static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
struct rtw_pci_rx_ring *rx_ring;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
int tx_desc_size, rx_desc_size;
u32 len;
@@ -689,6 +689,9 @@ static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
queue = RTW_TX_QUEUE_BCN;
else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
queue = RTW_TX_QUEUE_MGMT;
+ else if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ queue = RTW_TX_QUEUE_HI0;
else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
queue = ac_to_hwq[IEEE80211_AC_BE];
else
@@ -718,7 +721,7 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
u32 idx)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_buffer_desc *buf_desc;
u32 desc_sz = chip->rx_buf_desc_sz;
u16 total_pkt_size;
@@ -831,7 +834,7 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_tx_ring *ring;
struct rtw_pci_tx_data *tx_data;
dma_addr_t dma;
@@ -1070,7 +1073,7 @@ static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 hw_queue, u32 limit)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct napi_struct *napi = &rtwpci->napi;
struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
struct rtw_rx_pkt_stat pkt_stat;
@@ -1422,7 +1425,7 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
@@ -1464,7 +1467,7 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
switch (chip->id) {
case RTW_CHIP_TYPE_8822C:
@@ -1479,12 +1482,15 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
int i;
+ int ret;
cut = BIT(0) << rtwdev->hal.cut_version;
@@ -1517,13 +1523,22 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
}
rtw_pci_link_cfg(rtwdev);
+
+ /* Disable 8821ce completion timeout by default */
+ if (chip->id == RTW_CHIP_TYPE_8821C) {
+ ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
+ if (ret)
+ rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
+ ret);
+ }
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1535,7 +1550,7 @@ static int __maybe_unused rtw_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1702,8 +1717,7 @@ static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
init_dummy_netdev(&rtwpci->netdev);
- netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
- RTW_NAPI_WEIGHT_NUM);
+ netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1770,7 +1784,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
}
/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
- if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL)
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
rtwpci->rx_no_aspm = true;
rtw_pci_phy_cfg(rtwdev);
@@ -1833,7 +1847,7 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw_dev *rtwdev;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
if (!hw)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index e505d17f107e..bd7d05e08084 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(rtw_phy_set_edcca_th);
void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
/* turn off in debugfs for debug usage */
@@ -165,7 +165,7 @@ void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
rtw_phy_adaptivity_set_mode(rtwdev);
if (chip->ops->adaptivity_init)
@@ -180,7 +180,7 @@ static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_init)
chip->ops->cfo_init(rtwdev);
@@ -199,7 +199,7 @@ static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
void rtw_phy_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 addr, mask;
@@ -226,7 +226,7 @@ EXPORT_SYMBOL(rtw_phy_init);
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u32 addr, mask;
u8 path;
@@ -245,7 +245,7 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->false_alarm_statistics(rtwdev);
}
@@ -536,7 +536,7 @@ static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
struct rtw_dev *rtwdev = data;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- rtw_update_sta_info(rtwdev, si);
+ rtw_update_sta_info(rtwdev, si, false);
}
static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
@@ -603,7 +603,7 @@ static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->dpk_track)
chip->ops->dpk_track(rtwdev);
@@ -659,7 +659,7 @@ EXPORT_SYMBOL(rtw_phy_parsing_cfo);
static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_track)
chip->ops->cfo_track(rtwdev);
@@ -720,8 +720,8 @@ static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 cck_fa = dm_info->cck_fa_cnt;
u8 level;
@@ -816,23 +816,18 @@ static u8 rtw_phy_linear_2_db(u64 linear)
u8 j;
u32 dB;
- if (linear >= db_invert_table[11][7])
- return 96; /* maximum 96 dB */
-
for (i = 0; i < 12; i++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
- break;
- else if (i > 2 && linear <= db_invert_table[i][7])
- break;
+ for (j = 0; j < 8; j++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
+ goto cnt;
+ else if (i > 2 && linear <= db_invert_table[i][j])
+ goto cnt;
+ }
}
- for (j = 0; j < 8; j++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
- break;
- else if (i > 2 && linear <= db_invert_table[i][j])
- break;
- }
+ return 96; /* maximum 96 dB */
+cnt:
if (j == 0 && i == 0)
goto end;
@@ -900,7 +895,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
@@ -923,7 +918,7 @@ u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rf_sipi_addr *rf_sipi_addr;
const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
u32 val32;
@@ -972,8 +967,8 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
- u32 *sipi_addr = chip->rf_sipi_addr;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *sipi_addr = chip->rf_sipi_addr;
u32 data_and_addr;
u32 old_data = 0;
u32 shift;
@@ -1012,7 +1007,7 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
@@ -1747,7 +1742,7 @@ EXPORT_SYMBOL(rtw_phy_cfg_rf);
static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
if (!chip->rfk_init_tbl)
@@ -1766,7 +1761,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
@@ -1875,7 +1870,7 @@ static u8 rtw_get_channel_group(u8 channel, u8 rate)
static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
s8 dpd_diff = 0;
if (!chip->en_dis_dpd)
@@ -1909,7 +1904,7 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
bool mcs_rate;
bool above_2ss;
@@ -1956,7 +1951,7 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
u8 upper, lower;
bool mcs_rate;
@@ -2209,7 +2204,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u8 path;
@@ -2484,7 +2479,7 @@ static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
{
struct rtw_path_div *path_div = &rtwdev->dm_path_div;
enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (tx_path_sel_1ss == path_div->current_tx_path)
return;
@@ -2539,7 +2534,7 @@ static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->path_div_supported)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index b6c5ae60a462..ccfcbd3ced03 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -114,7 +114,7 @@ const struct rtw_table name ## _tbl = { \
static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw_rfe_def *rfe_def = NULL;
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index bfa64c038f5f..c93da743681f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -19,14 +19,14 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
+ if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -50,6 +50,9 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
+ if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
+
rtw_hci_link_ps(rtwdev, false);
ret = rtw_ips_pwr_up(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 84ba9ec489c3..03bd8dc53f72 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -389,12 +389,14 @@
#define BIT_EN_FREE_CNT BIT(3)
#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
#define REG_HIQ_NO_LMT_EN 0x5A7
+#define REG_DTIM_COUNTER_ROOT 0x5A8
#define BIT_HIQ_NO_LMT_EN_ROOT BIT(0)
#define REG_TIMER0_SRC_SEL 0x05B4
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
#define BIT_PWRMGT_HWDATA_EN BIT(7)
+#define BIT_TCR_UPDATE_TIMIE BIT(5)
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 315c2b193e92..2f547cbcf6da 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -479,6 +479,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
rtwdev->regd.state, next_regd.state);
+ mutex_lock(&rtwdev->mutex);
rtwdev->regd = next_regd;
rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
request->alpha2[0],
@@ -487,6 +488,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+ mutex_unlock(&rtwdev->mutex);
}
u8 rtw_regd_get(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 3fdbaf7302c5..0a4f770fcbb7 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2701,7 +2701,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8723d[] = {
{0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8723d_hw_spec = {
+const struct rtw_chip_info rtw8723d_hw_spec = {
.ops = &rtw8723d_ops,
.id = RTW_CHIP_TYPE_8723D,
.fw_name = "rtw88/rtw8723d_fw.bin",
@@ -2720,7 +2720,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = false,
@@ -2747,12 +2747,15 @@ struct rtw_chip_info rtw8723d_hw_spec = {
.rx_ldpc = false,
.pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
.iqk_threshold = 8,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x2007022f,
.bt_desired_ver = 0x2f,
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 41d35174a542..4641f6e047b4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -72,6 +72,8 @@ struct rtw8723d_efuse {
struct rtw8723de_efuse e;
};
+extern const struct rtw_chip_info rtw8723d_hw_spec;
+
/* phy status page0 */
#define GET_PHY_STAT_P0_PWDB(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
index 2dd689441e8d..abbaafa32851 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8723de.h"
+#include "rtw8723d.h"
static const struct pci_device_id rtw_8723de_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.h b/drivers/net/wireless/realtek/rtw88/rtw8723de.h
deleted file mode 100644
index 2b4894846a07..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8723DE_H_
-#define __RTW_8723DE_H_
-
-extern struct rtw_chip_info rtw8723d_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index db078df63f85..9afdc5ce86b4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)
static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u8 crystal_cap, val;
/* power on BB/RF domain */
@@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
/* post init after header files config */
rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
- rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
- rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
- rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
+ hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
+ hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
+ hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
rtw_phy_init(rtwdev);
rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f;
@@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u32 val32;
if (channel <= 14) {
@@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);
} else {
rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD,
- rtwdev->chip->ch_param[0]);
+ hal->ch_param[0]);
rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD,
- rtwdev->chip->ch_param[1] & MASKLWORD);
+ hal->ch_param[1] & MASKLWORD);
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD,
- rtwdev->chip->ch_param[2]);
+ hal->ch_param[2]);
}
} else if (channel > 35) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
@@ -499,7 +501,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
}
if (lna_idx >= lna_gain_table_size) {
- rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
return -120;
}
@@ -512,6 +514,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 rx_power;
u8 lna_idx = 0;
u8 vga_idx = 0;
@@ -523,6 +526,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = rx_power;
}
@@ -530,6 +534,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
@@ -549,6 +554,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = bw;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
@@ -1514,6 +1520,7 @@ static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
[4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [6] = RTW_DEF_RFE(8821c, 0, 0),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
@@ -1872,7 +1879,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = {
{0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8821c_hw_spec = {
+const struct rtw_chip_info rtw8821c_hw_spec = {
.ops = &rtw8821c_ops,
.id = RTW_CHIP_TYPE_8821C,
.fw_name = "rtw88/rtw8821c_fw.bin",
@@ -1891,7 +1898,7 @@ struct rtw_chip_info rtw8821c_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -1918,12 +1925,15 @@ struct rtw_chip_info rtw8821c_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x19092746,
.bt_desired_ver = 0x46,
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index d9fbddd7b0f3..2698801fc35d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -84,6 +84,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
}
+extern const struct rtw_chip_info rtw8821c_hw_spec;
+
#define rtw_write32s_mask(rtwdev, addr, mask, data) \
do { \
BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
index 8e8915c5c498..6c82c4383497 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
@@ -13,7 +13,7 @@ static const u32 rtw8821c_mac[] = {
0x04F, 0x00000001,
0x029, 0x000000F9,
0x420, 0x00000080,
- 0x421, 0x0000000F,
+ 0x421, 0x0000001F,
0x428, 0x0000000A,
0x429, 0x00000010,
0x430, 0x00000000,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index f34de115e4bc..f3d971feda04 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -5,10 +5,14 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8821ce.h"
+#include "rtw8821c.h"
static const struct pci_device_id rtw_8821ce_id_table[] = {
{
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB821),
+ .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec
+ },
+ {
PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC821),
.driver_data = (kernel_ulong_t)&rtw8821c_hw_spec
},
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h
deleted file mode 100644
index 54142acca534..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8821CE_H_
-#define __RTW_8821CE_H_
-
-extern struct rtw_chip_info rtw8821c_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index dd4fbb82750d..690e35c98f6e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1012,12 +1012,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2497,7 +2497,7 @@ static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = {
[EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0},
};
-struct rtw_chip_info rtw8822b_hw_spec = {
+const struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
@@ -2517,7 +2517,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -2548,12 +2548,15 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.edcca_th = rtw8822b_edcca_th,
.l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
.l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 3fff8b881854..01d3644e0c94 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -187,4 +187,6 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_ANTWT 0x1904
#define REG_IQKFAILMSK 0x1bf0
+extern const struct rtw_chip_info rtw8822b_hw_spec;
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
index 62ee7e62cac0..4994950776cd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8822be.h"
+#include "rtw8822b.h"
static const struct pci_device_id rtw_8822be_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.h b/drivers/net/wireless/realtek/rtw88/rtw8822be.h
deleted file mode 100644
index 6668460d664d..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8822BE_H_
-#define __RTW_8822BE_H_
-
-extern struct rtw_chip_info rtw8822b_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 35c46e5209de..fccb15dfb959 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2798,7 +2798,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
@@ -2808,7 +2808,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2996,19 +2996,34 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
* enable "DAC off if GNT_WL = 0" for non-shared-antenna
* disable 0x1c30[22] = 0,
* enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
- *
- * disable WL-S1 BB chage RF mode if GNT_BT
+ */
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 0);
+ } else {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 1);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1,
+ BIT_DAC_OFF_ENABLE, 0);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3,
+ BIT_DAC_OFF_ENABLE, 1);
+ }
+
+ /* disable WL-S1 BB chage RF mode if GNT_BT
* since RF TRx mask can do it
*/
- rtw_write8_mask(rtwdev, REG_ANAPAR + 2, BIT_ANAPAR_BTPS >> 16, 1);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1, BIT_DAC_OFF_ENABLE, 0);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3, BIT_DAC_OFF_ENABLE, 1);
- rtw_write8_mask(rtwdev, REG_IGN_GNTBT4, BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_IGN_GNTBT4,
+ BIT_PI_IGNORE_GNT_BT, 1);
/* disable WL-S0 BB chage RF mode if wifi is at 5G,
* or antenna path is separated
*/
- if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
+ BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_NOMASK_TXBT,
+ BIT_NOMASK_TXBT_ENABLE, 1);
+ } else if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
coex->under_5g || !efuse->share_ant) {
if (coex_stat->kt_ver >= 3) {
rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
@@ -4962,6 +4977,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
+ .config_txrx_mode = rtw8822c_config_trx_mode,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -5007,6 +5023,8 @@ static const struct coex_table_para table_sant_8822c[] = {
{0x66556aaa, 0x6a5a6aaa}, /*case-30*/
{0xffffffff, 0x5aaa5aaa},
{0x56555555, 0x5a5a5aaa},
+ {0xdaffdaff, 0xdaffdaff},
+ {0xddffddff, 0xddffddff},
};
/* Non-Shared-Antenna Coex Table */
@@ -5107,7 +5125,8 @@ static const struct coex_rf_para rf_para_tx_8822c[] = {
{8, 17, true, 4},
{7, 18, true, 4},
{6, 19, true, 4},
- {5, 20, true, 4}
+ {5, 20, true, 4},
+ {0, 21, true, 4} /* for gamg hid */
};
static const struct coex_rf_para rf_para_rx_8822c[] = {
@@ -5116,7 +5135,8 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
{3, 24, true, 5},
{2, 26, true, 5},
{1, 27, true, 5},
- {0, 28, true, 5}
+ {0, 28, true, 5},
+ {0, 28, true, 5} /* for gamg hid */
};
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
@@ -5290,7 +5310,7 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822c[] = {
{0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
-struct rtw_chip_info rtw8822c_hw_spec = {
+const struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
@@ -5310,7 +5330,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.max_power_index = 0x7f,
.csi_buf_pg_num = 50,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.default_1ss_tx_path = BB_PATH_A,
.path_div_supported = true,
@@ -5348,17 +5368,20 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.edcca_th = rtw8822c_edcca_th,
.l2h_th_ini_cs = 60,
.l2h_th_ini_ad = 45,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
- .coex_para_ver = 0x2103181c,
- .bt_desired_ver = 0x1c,
+ .max_scan_ie_len = (RTW_PROBE_PG_CNT - 1) * TX_PAGE_SIZE,
+ .coex_para_ver = 0x22020720,
+ .bt_desired_ver = 0x20,
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = true,
+ .wl_mimo_ps_support = true,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_DBM,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 8201955e1f21..479d5d769c52 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -118,6 +118,8 @@ enum rtw8822c_dpk_one_shot_action {
void rtw8822c_parse_tbl_dpk(struct rtw_dev *rtwdev,
const struct rtw_table *tbl);
+extern const struct rtw_chip_info rtw8822c_hw_spec;
+
#define RTW_DECL_TABLE_DPK(name) \
const struct rtw_table name ## _tbl = { \
.data = name, \
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
index 3845b1333dc3..e26c6bc82936 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -5,7 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "pci.h"
-#include "rtw8822ce.h"
+#include "rtw8822c.h"
static const struct pci_device_id rtw_8822ce_id_table[] = {
{
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
deleted file mode 100644
index fee32d7a4504..000000000000
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2018-2019 Realtek Corporation
- */
-
-#ifndef __RTW_8822CE_H_
-#define __RTW_8822CE_H_
-
-extern struct rtw_chip_info rtw8822c_hw_spec;
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index d2d607e22198..84aedabdf285 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -158,7 +158,8 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) &&
+ test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status);
if (pkt_stat->crc_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
index 3383726c4d90..c472f1502b82 100644
--- a/drivers/net/wireless/realtek/rtw88/sar.c
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -91,10 +91,10 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
return -EINVAL;
power = sar->sub_specs[i].power;
- rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
- rtw_common_sar_freq_ranges[idx].start_freq,
- rtw_common_sar_freq_ranges[idx].end_freq,
- power, BIT(RTW_COMMON_SAR_FCT));
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+ rtw_common_sar_freq_ranges[idx].start_freq,
+ rtw_common_sar_freq_ranges[idx].end_freq,
+ power, BIT(RTW_COMMON_SAR_FCT));
for (j = 0; j < RTW_RF_PATH_MAX; j++) {
for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index efcc1b0371a8..ab39245e9c2f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -67,12 +67,16 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr);
SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null);
+ if (pkt_info->tim_offset) {
+ SET_TX_DESC_TIM_EN(txdesc, 1);
+ SET_TX_DESC_TIM_OFFSET(txdesc, pkt_info->tim_offset);
+ }
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
{
- u8 exp = sta->ht_cap.ampdu_factor;
+ u8 exp = sta->deflink.ht_cap.ampdu_factor;
/* the least ampdu factor is 8K, and the value in the tx desc is the
* max aggregation num, which represents val * 2 packets can be
@@ -83,7 +87,7 @@ static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
{
- return sta->ht_cap.ampdu_density;
+ return sta->deflink.ht_cap.ampdu_density;
}
static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
@@ -91,7 +95,7 @@ static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
{
u8 rate;
- if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
+ if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0)
rate = DESC_RATEMCS15;
else
rate = DESC_RATEMCS7;
@@ -106,7 +110,7 @@ static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
u8 rate;
u16 tx_mcs_map;
- tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
if (efuse->hw_cap.nss == 1) {
switch (tx_mcs_map & 0x3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
@@ -340,11 +344,11 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold)
pkt_info->rts = true;
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
- else if (sta->ht_cap.ht_supported)
+ else if (sta->deflink.ht_cap.ht_supported)
rate = get_highest_ht_tx_rate(rtwdev, sta);
- else if (sta->supp_rates[0] <= 0xf)
+ else if (sta->deflink.supp_rates[0] <= 0xf)
rate = DESC_RATE11M;
else
rate = DESC_RATE54M;
@@ -353,7 +357,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
bw = si->bw_mode;
rate_id = si->rate_id;
- stbc = si->stbc_en;
+ stbc = rtwdev->hal.txrx_1ss ? false : si->stbc_en;
ldpc = si->ldpc_en;
out:
@@ -380,7 +384,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw_sta_info *si;
@@ -420,7 +424,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool bmc;
@@ -448,6 +452,19 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
if (type == RSVD_QOS_NULL)
pkt_info->bt_null = true;
+ if (type == RSVD_BEACON) {
+ struct rtw_rsvd_page *rsvd_pkt;
+ int hdr_len;
+
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page,
+ build_list);
+ if (rsvd_pkt && rsvd_pkt->tim_offset != 0) {
+ hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ pkt_info->tim_offset = rsvd_pkt->tim_offset - hdr_len;
+ }
+ }
+
rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
/* TODO: need to change hw port and hw ssn sel for multiple vifs */
@@ -458,7 +475,7 @@ rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
@@ -484,7 +501,7 @@ rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 56371eff9f7f..8419603adce4 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -33,6 +33,10 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5))
#define SET_TX_DESC_SW_SEQ(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
+#define SET_TX_DESC_TIM_EN(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, BIT(7))
+#define SET_TX_DESC_TIM_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(6, 0))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
#define SET_TX_DESC_USE_RTS(tx_desc, value) \
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 2c515af214e7..cdfd66a85075 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(check_hw_ready);
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
@@ -37,7 +37,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 37e5def24d9f..93e09400aac4 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -16,15 +16,33 @@ config RTW89_CORE
config RTW89_PCI
tristate
+config RTW89_8852A
+ tristate
+
+config RTW89_8852C
+ tristate
+
config RTW89_8852AE
- tristate "Realtek 8852AE PCI wireless network adapter"
+ tristate "Realtek 8852AE PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
+ select RTW89_8852A
help
Select this option will enable support for 8852AE chipset
- 802.11ax PCIe wireless network adapter
+ 802.11ax PCIe wireless network (Wi-Fi 6) adapter
+
+config RTW89_8852CE
+ tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8852C
+ help
+ Select this option will enable support for 8852CE chipset
+
+ 802.11ax PCIe wireless network (Wi-Fi 6E) adapter
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 077e8fe23f60..a87f2aff4def 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -6,18 +6,33 @@ rtw89_core-y += core.o \
mac.o \
phy.o \
fw.o \
- rtw8852a.o \
- rtw8852a_table.o \
- rtw8852a_rfk.o \
- rtw8852a_rfk_table.o \
cam.o \
efuse.o \
regd.o \
sar.o \
coex.o \
ps.o \
+ chan.o \
ser.o
+obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
+rtw89_8852a-objs := rtw8852a.o \
+ rtw8852a_table.o \
+ rtw8852a_rfk.o \
+ rtw8852a_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
+rtw89_8852ae-objs := rtw8852ae.o
+
+obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o
+rtw89_8852c-objs := rtw8852c.o \
+ rtw8852c_table.o \
+ rtw8852c_rfk.o \
+ rtw8852c_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o
+rtw89_8852ce-objs := rtw8852ce.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index bd34e4bbe107..f5301c2bbf13 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -18,7 +18,7 @@ rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
u8 *cmd;
int i, j;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(cmd_len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, cmd_len);
if (!skb)
return NULL;
@@ -231,7 +231,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
@@ -244,6 +244,12 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
addr_cam->sec_entries[key_idx] = sec_cam;
set_bit(key_idx, addr_cam->sec_cam_map);
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n",
+ ret);
+ return ret;
+ }
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
@@ -320,6 +326,7 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 hw_key_type;
bool ext_key = false;
int ret;
@@ -353,7 +360,8 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
return -EOPNOTSUPP;
}
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (!chip->hw_sec_hdr)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
ret = rtw89_cam_sec_key_install(rtwdev, vif, sta, key, hw_key_type,
ext_key);
@@ -387,7 +395,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
sec_cam = addr_cam->sec_entries[key_idx];
if (!sec_cam)
return -EINVAL;
@@ -396,6 +404,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
clear_bit(key_idx, addr_cam->sec_cam_map);
addr_cam->sec_entries[key_idx] = NULL;
if (inform_fw) {
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret);
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret)
rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
@@ -421,24 +432,37 @@ static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw,
void *data)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtw89_cam_sec_key_del(rtwdev, vif, sta, key, false);
- rtw89_cam_deinit(rtwdev, rtwvif);
}
-void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
- struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
addr_cam->valid = false;
- bssid_cam->valid = false;
clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
+}
+
+void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_bssid_cam_entry *bssid_cam)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ bssid_cam->valid = false;
clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
}
+void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
+ rtw89_cam_deinit_bssid_cam(rtwdev, bssid_cam);
+}
+
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev)
{
rcu_read_lock();
@@ -464,14 +488,20 @@ static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam)
{
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
u8 addr_cam_idx;
int i;
int ret;
+ if (unlikely(addr_cam->valid)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "addr cam is already valid; skip init\n");
+ return 0;
+ }
+
ret = rtw89_cam_get_avail_addr_cam(rtwdev, &addr_cam_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get available addr cam\n");
@@ -484,14 +514,17 @@ static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
addr_cam->valid = true;
addr_cam->addr_mask = 0;
addr_cam->mask_sel = RTW89_NO_MSK;
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
- ether_addr_copy(addr_cam->sma, rtwvif->mac_addr);
for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
addr_cam->sec_ent_keyid[i] = 0;
addr_cam->sec_ent[i] = 0;
}
+ /* associate addr cam with bssid cam */
+ addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
+
return 0;
}
@@ -513,13 +546,20 @@ static int rtw89_cam_get_avail_bssid_cam(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_bssid_cam_entry *bssid_cam,
+ const u8 *bssid)
{
- struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
u8 bssid_cam_idx;
int ret;
+ if (unlikely(bssid_cam->valid)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "bssid cam is already valid; skip init\n");
+ return 0;
+ }
+
ret = rtw89_cam_get_avail_bssid_cam(rtwdev, &bssid_cam_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get available bssid cam\n");
@@ -531,7 +571,7 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
bssid_cam->len = BSSID_CAM_ENT_SIZE;
bssid_cam->offset = 0;
bssid_cam->valid = true;
- ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
+ ether_addr_copy(bssid_cam->bssid, bssid);
return 0;
}
@@ -549,35 +589,40 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
int ret;
- ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, rtwvif->bssid);
if (ret) {
- rtw89_err(rtwdev, "failed to init addr cam\n");
+ rtw89_err(rtwdev, "failed to init bssid cam\n");
return ret;
}
- ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_addr_cam(rtwdev, addr_cam, bssid_cam);
if (ret) {
- rtw89_err(rtwdev, "failed to init bssid cam\n");
+ rtw89_err(rtwdev, "failed to init addr cam\n");
return ret;
}
- /* associate addr cam with bssid cam */
- addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
-
return 0;
}
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, u8 *cmd)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
u8 bss_color = vif->bss_conf.he_bss_color.color;
+ u8 bss_mask;
+
+ if (vif->bss_conf.nontransmitted)
+ bss_mask = RTW89_BSSID_MATCH_5_BYTES;
+ else
+ bss_mask = RTW89_BSSID_MATCH_ALL;
FWCMD_SET_ADDR_BSSID_IDX(cmd, bssid_cam->bssid_cam_idx);
FWCMD_SET_ADDR_BSSID_OFFSET(cmd, bssid_cam->offset);
FWCMD_SET_ADDR_BSSID_LEN(cmd, bssid_cam->len);
FWCMD_SET_ADDR_BSSID_VALID(cmd, bssid_cam->valid);
+ FWCMD_SET_ADDR_BSSID_MASK(cmd, bss_mask);
FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, bssid_cam->phy_idx);
FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, bss_color);
@@ -609,7 +654,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
u8 sma_hash, tma_hash, addr_msk_start;
@@ -665,7 +710,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind);
FWCMD_SET_ADDR_MACID(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
if (rtwvif->net_type == RTW89_NET_TYPE_INFRA)
- FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff);
+ FWCMD_SET_ADDR_AID12(cmd, vif->cfg.aid & 0xfff);
else if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
FWCMD_SET_ADDR_AID12(cmd, sta ? sta->aid & 0xfff : 0);
FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern);
@@ -690,3 +735,31 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]);
FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ u8 *cmd)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
+ SET_DCTL_OPERATION_V1(cmd, 1);
+
+ SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]);
+ SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]);
+ SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]);
+ SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]);
+ SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]);
+ SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]);
+ SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]);
+
+ SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff);
+ SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]);
+ SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]);
+ SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]);
+ SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]);
+ SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]);
+ SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
+ SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 33a3ad582b81..83c160a614e6 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -9,6 +9,9 @@
#define RTW89_SEC_CAM_LEN 20
+#define RTW89_BSSID_MATCH_ALL GENMASK(5, 0)
+#define RTW89_BSSID_MATCH_5_BYTES GENMASK(4, 0)
+
static inline void FWCMD_SET_ADDR_IDX(void *cmd, u32 value)
{
le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0));
@@ -309,6 +312,11 @@ static inline void FWCMD_SET_ADDR_BSSID_BB_SEL(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1));
}
+static inline void FWCMD_SET_ADDR_BSSID_MASK(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(7, 2));
+}
+
static inline void FWCMD_SET_ADDR_BSSID_BSS_COLOR(void *cmd, u32 value)
{
le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8));
@@ -346,12 +354,28 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam);
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam);
+int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_bssid_cam_entry *bssid_cam,
+ const u8 *bssid);
+void rtw89_cam_deinit_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_bssid_cam_entry *bssid_cam);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
const u8 *scan_mac_addr, u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ u8 *cmd);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif *vif, u8 *cmd);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, u8 *cmd);
int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
new file mode 100644
index 000000000000..a4f61c2f6512
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+
+static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
+ u8 center_chan)
+{
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ return RTW89_CH_2G;
+ }
+ case RTW89_BAND_5G:
+ switch (center_chan) {
+ default:
+ case 36 ... 64:
+ return RTW89_CH_5G_BAND_1;
+ case 100 ... 144:
+ return RTW89_CH_5G_BAND_3;
+ case 149 ... 177:
+ return RTW89_CH_5G_BAND_4;
+ }
+ case RTW89_BAND_6G:
+ switch (center_chan) {
+ default:
+ case 1 ... 29:
+ return RTW89_CH_6G_BAND_IDX0;
+ case 33 ... 61:
+ return RTW89_CH_6G_BAND_IDX1;
+ case 65 ... 93:
+ return RTW89_CH_6G_BAND_IDX2;
+ case 97 ... 125:
+ return RTW89_CH_6G_BAND_IDX3;
+ case 129 ... 157:
+ return RTW89_CH_6G_BAND_IDX4;
+ case 161 ... 189:
+ return RTW89_CH_6G_BAND_IDX5;
+ case 193 ... 221:
+ return RTW89_CH_6G_BAND_IDX6;
+ case 225 ... 253:
+ return RTW89_CH_6G_BAND_IDX7;
+ }
+ }
+}
+
+static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,
+ u32 center_freq,
+ u32 primary_freq)
+{
+ u8 primary_chan_idx;
+ u32 offset;
+
+ switch (bw) {
+ default:
+ case RTW89_CHANNEL_WIDTH_20:
+ primary_chan_idx = RTW89_SC_DONT_CARE;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_chan_idx = RTW89_SC_20_UPPER;
+ else
+ primary_chan_idx = RTW89_SC_20_LOWER;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_160:
+ if (primary_freq > center_freq) {
+ offset = (primary_freq - center_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+ } else {
+ offset = (center_freq - primary_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+ }
+ break;
+ }
+
+ return primary_chan_idx;
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u32 center_freq, primary_freq;
+
+ memset(chan, 0, sizeof(*chan));
+ chan->channel = center_chan;
+ chan->primary_channel = primary_chan;
+ chan->band_type = band;
+ chan->band_width = bandwidth;
+
+ center_freq = ieee80211_channel_to_frequency(center_chan, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_chan, nl_band);
+
+ chan->freq = center_freq;
+ chan->subband_type = rtw89_get_subband_type(band, center_chan);
+ chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq,
+ primary_freq);
+}
+
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chan *chan = &hal->chan[idx];
+ struct rtw89_chan_rcd *rcd = &hal->chan_rcd[idx];
+ bool band_changed;
+
+ rcd->prev_primary_channel = chan->primary_channel;
+ rcd->prev_band_type = chan->band_type;
+ band_changed = new->band_type != chan->band_type;
+
+ *chan = *new;
+ return band_changed;
+}
+
+static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef,
+ bool from_stack)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ hal->chandef[idx] = *chandef;
+
+ if (from_stack)
+ set_bit(idx, hal->entity_map);
+}
+
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef)
+{
+ __rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
+}
+
+static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
+{
+ struct cfg80211_chan_def chandef = {0};
+
+ rtw89_get_default_chandef(&chandef);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
+}
+
+void rtw89_entity_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_config_default_chandef(rtwdev);
+}
+
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_entity_mode mode;
+ u8 weight;
+
+ weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ switch (weight) {
+ default:
+ rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ fallthrough;
+ case 0:
+ rtw89_config_default_chandef(rtwdev);
+ fallthrough;
+ case 1:
+ mode = RTW89_ENTITY_MODE_SCC;
+ break;
+ }
+
+ rtw89_set_entity_mode(rtwdev, mode);
+ return mode;
+}
+
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 idx;
+
+ idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ if (idx >= chip->support_chanctx_num)
+ return -ENOENT;
+
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ cfg->idx = idx;
+ return 0;
+}
+
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+
+ clear_bit(cfg->idx, hal->entity_map);
+ rtw89_set_channel(rtwdev);
+}
+
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ u8 idx = cfg->idx;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ }
+}
+
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return 0;
+}
+
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
new file mode 100644
index 000000000000..ecbd4503bead
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ * Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_CHAN_H__
+#define __RTW89_CHAN_H__
+
+#include "core.h"
+
+static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_active);
+}
+
+static inline void rtw89_set_entity_state(struct rtw89_dev *rtwdev, bool active)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_active, active);
+}
+
+static inline
+enum rtw89_entity_mode rtw89_get_entity_mode(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_mode);
+}
+
+static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev,
+ enum rtw89_entity_mode mode)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_mode, mode);
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth);
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new);
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef);
+void rtw89_entity_init(struct rtw89_dev *rtwdev);
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 9f7d4f8d0c56..bbdfa9ac203c 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -9,6 +9,7 @@
#include "ps.h"
#include "reg.h"
+#define RTW89_COEX_VERSION 0x06030013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
enum btc_fbtc_tdma_template {
@@ -77,21 +78,21 @@ static const struct rtw89_btc_fbtc_tdma t_def[] = {
static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2W] = __DEF_FBTC_SLOT(5, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W1] = __DEF_FBTC_SLOT(70, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W2] = __DEF_FBTC_SLOT(70, 0x5a5a5aaa, SLOT_ISO),
- [CXST_W2B] = __DEF_FBTC_SLOT(15, 0x5a5a5a5a, SLOT_ISO),
- [CXST_B1] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2] = __DEF_FBTC_SLOT(7, 0x6a5a5a5a, SLOT_MIX),
- [CXST_B3] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
- [CXST_B4] = __DEF_FBTC_SLOT(50, 0x55555555, SLOT_MIX),
- [CXST_LK] = __DEF_FBTC_SLOT(20, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_B2W] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W1] = __DEF_FBTC_SLOT(70, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(70, 0xea5a5aaa, SLOT_ISO),
+ [CXST_W2B] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
+ [CXST_B1] = __DEF_FBTC_SLOT(100, 0xe5555555, SLOT_MIX),
+ [CXST_B2] = __DEF_FBTC_SLOT(7, 0xea5a5a5a, SLOT_MIX),
+ [CXST_B3] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
+ [CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
+ [CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(20, 0x6a5a5a5a, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
- [CXST_EBT] = __DEF_FBTC_SLOT(20, 0x55555555, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(20, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
- [CXST_WLK] = __DEF_FBTC_SLOT(250, 0x6a5a6a5a, SLOT_MIX),
+ [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
};
@@ -99,13 +100,13 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
static const u32 cxtbl[] = {
0xffffffff, /* 0 */
0xaaaaaaaa, /* 1 */
- 0x55555555, /* 2 */
- 0x66555555, /* 3 */
- 0x66556655, /* 4 */
+ 0xe5555555, /* 2 */
+ 0xee555555, /* 3 */
+ 0xd5555555, /* 4 */
0x5a5a5a5a, /* 5 */
- 0x5a5a5aaa, /* 6 */
- 0xaa5a5a5a, /* 7 */
- 0x6a5a5a5a, /* 8 */
+ 0xfa5a5a5a, /* 6 */
+ 0xda5a5a5a, /* 7 */
+ 0xea5a5a5a, /* 8 */
0x6a5a5aaa, /* 9 */
0x6a5a6a5a, /* 10 */
0x6a5a6aaa, /* 11 */
@@ -261,6 +262,12 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ /* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+
+ /* TDMA off + pri: WL_Hi-Tx = BT */
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -270,6 +277,21 @@ enum btc_cx_poicy_type {
/* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
BTC_CXP_OFFE_DEF2 = (BTC_CXP_OFFE << 8) | 1,
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWISOB = (BTC_CXP_OFFE << 8) | 2,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
+ BTC_CXP_OFFE_2GISOB = (BTC_CXP_OFFE << 8) | 3,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot WL > BT */
+ BTC_CXP_OFFE_2GBWMIXB = (BTC_CXP_OFFE << 8) | 4,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G/EBT-slot WL > BT */
+ BTC_CXP_OFFE_WL = (BTC_CXP_OFFE << 8) | 5,
+
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWMIXB2 = (BTC_CXP_OFFE << 8) | 6,
+
/* TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_FIX_TD3030 = (BTC_CXP_FIX << 8) | 0,
@@ -300,6 +322,9 @@ enum btc_cx_poicy_type {
/* TDMA Fix slot-9: W1:B1 = 40:20 */
BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9,
+ /* TDMA Fix slot-9: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010ISO = (BTC_CXP_FIX << 8) | 10,
+
/* PS-TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0,
@@ -322,25 +347,25 @@ enum btc_cx_poicy_type {
BTC_CXP_PFIX_TDW1B1 = (BTC_CXP_PFIX << 8) | 6,
/* TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_AUTO_TD50200 = (BTC_CXP_AUTO << 8) | 0,
+ BTC_CXP_AUTO_TD50B1 = (BTC_CXP_AUTO << 8) | 0,
/* TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_AUTO_TD60200 = (BTC_CXP_AUTO << 8) | 1,
+ BTC_CXP_AUTO_TD60B1 = (BTC_CXP_AUTO << 8) | 1,
/* TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_AUTO_TD20200 = (BTC_CXP_AUTO << 8) | 2,
+ BTC_CXP_AUTO_TD20B1 = (BTC_CXP_AUTO << 8) | 2,
/* TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_AUTO_TDW1B1 = (BTC_CXP_AUTO << 8) | 3,
/* PS-TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_PAUTO_TD50200 = (BTC_CXP_PAUTO << 8) | 0,
+ BTC_CXP_PAUTO_TD50B1 = (BTC_CXP_PAUTO << 8) | 0,
/* PS-TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_PAUTO_TD60200 = (BTC_CXP_PAUTO << 8) | 1,
+ BTC_CXP_PAUTO_TD60B1 = (BTC_CXP_PAUTO << 8) | 1,
/* PS-TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_PAUTO_TD20200 = (BTC_CXP_PAUTO << 8) | 2,
+ BTC_CXP_PAUTO_TD20B1 = (BTC_CXP_PAUTO << 8) | 2,
/* PS-TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_PAUTO_TDW1B1 = (BTC_CXP_PAUTO << 8) | 3,
@@ -412,7 +437,7 @@ enum btc_w2b_scoreboard {
BTC_WSCB_TDMA = BIT(9),
BTC_WSCB_FIX2M = BIT(10),
BTC_WSCB_WLRFK = BIT(11),
- BTC_WSCB_BTRFK_GNT = BIT(12), /* not used, use mailbox to inform BT */
+ BTC_WSCB_RXSCAN_PRI = BIT(12),
BTC_WSCB_BT_HILNA = BIT(13),
BTC_WSCB_BTLOG = BIT(14),
BTC_WSCB_ALL = GENMASK(23, 0),
@@ -434,6 +459,16 @@ enum btc_wl_link_mode {
BTC_WLINK_MAX
};
+enum btc_wl_mrole_type {
+ BTC_WLMROLE_NONE = 0x0,
+ BTC_WLMROLE_STA_GC,
+ BTC_WLMROLE_STA_GC_NOA,
+ BTC_WLMROLE_STA_GO,
+ BTC_WLMROLE_STA_GO_NOA,
+ BTC_WLMROLE_STA_STA,
+ BTC_WLMROLE_MAX
+};
+
enum btc_bt_hid_type {
BTC_HID_218 = BIT(0),
BTC_HID_418 = BIT(1),
@@ -460,6 +495,11 @@ enum btc_gnt_state {
BTC_GNT_MAX
};
+enum btc_ctr_path {
+ BTC_CTRL_BY_BT = 0,
+ BTC_CTRL_BY_WL
+};
+
enum btc_wl_max_tx_time {
BTC_MAX_TX_TIME_L1 = 500,
BTC_MAX_TX_TIME_L2 = 1000,
@@ -531,6 +571,7 @@ enum btc_reason_and_action {
#define BTC_FREERUN_ANTISO_MIN 30
#define BTC_TDMA_BTHID_MAX 2
#define BTC_BLINK_NOCONNECT 0
+#define BTC_B1_MAX 250 /* unit ms */
static void _run_coex(struct rtw89_dev *rtwdev,
enum btc_reason_and_action reason);
@@ -551,8 +592,10 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
return;
- } else if ((wl->status.map.rf_off_pre == 1 && wl->status.map.rf_off == 1) ||
- (wl->status.map.lps_pre == 1 && wl->status.map.lps == 1)) {
+ } else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
+ (wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.lps == BTC_LPS_RF_OFF)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
@@ -594,7 +637,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++)
+ for (i = 0; i < RTW89_PORT_NUM; i++)
memset(wl_linfo[i].rssi_state, 0,
sizeof(wl_linfo[i].rssi_state));
@@ -616,8 +659,6 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
}
-#define BTC_FWINFO_BUF 1024
-
#define BTC_RPT_HDR_SIZE 3
#define BTC_CHK_WLSLOT_DRIFT_MAX 15
#define BTC_CHK_HANG_MAX 3
@@ -869,18 +910,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *prptbuf, u32 index)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_fbtc_rpt_ctrl *prpt = NULL;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_fbtc_rpt_ctrl *prpt;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prpt_v1;
struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1 = NULL;
struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
struct rtw89_btc_prpt *btc_prpt = NULL;
struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
- u8 rpt_type = 0, *rpt_content = NULL, *pfinfo = NULL;
- u16 wl_slot_set = 0;
+ void *rpt_content = NULL, *pfinfo = NULL;
+ u8 rpt_type = 0;
+ u16 wl_slot_set = 0, wl_slot_real = 0;
u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t;
+ u32 cnt_leak_slot = 0, bt_slot_real = 0, cnt_rx_imr = 0;
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -904,100 +951,129 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_fver = BTCRPT_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxbtcrpt_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_TDMA:
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_fver = FCXTDMA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxtdma_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_slots.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
- pcinfo->req_fver = FCXSLOTS_VER;
+ pcinfo->req_fver = chip->fcxslots_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_CYSTA:
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_cysta.finfo);
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
- pcinfo->req_fver = FCXCYSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo;
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
+ rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxcysta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_step.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
- trace_step + 8;
- pcinfo->req_fver = FCXSTEP_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps, step);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo_v1.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps_v1, step);
+ }
+ pcinfo->req_fver = chip->fcxstep_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_NULLSTA:
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_fver = FCXNULLSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxnullsta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_MREG:
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_mregval.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
- pcinfo->req_fver = FCXMREG_VER;
+ pcinfo->req_fver = chip->fcxmreg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
- pcinfo->req_fver = FCXGPIODBG_VER;
+ pcinfo->req_fver = chip->fcxgpiodbg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btver.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
- pcinfo->req_fver = FCX_BTVER_VER;
+ pcinfo->req_fver = chip->fcxbtver_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_SCAN:
pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btscan.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
- pcinfo->req_fver = FCX_BTSCAN_VER;
+ pcinfo->req_fver = chip->fcxbtscan_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_AFH:
pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btafh.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
- pcinfo->req_fver = FCX_BTAFH_VER;
+ pcinfo->req_fver = chip->fcxbtafh_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_DEVICE:
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btdev.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
- pcinfo->req_fver = FCX_BTDEVINFO_VER;
+ pcinfo->req_fver = chip->fcxbtdevinfo_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
@@ -1026,7 +1102,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcpy(pfinfo, rpt_content, pcinfo->req_len);
pcinfo->valid = 1;
- if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ if (rpt_type == BTC_RPT_TYPE_TDMA && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): check %d %zu\n", __func__,
BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
@@ -1039,7 +1115,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
dm->tdma_now.type, dm->tdma_now.rxflctrl,
dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rsvd0, dm->tdma_now.rsvd1);
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
@@ -1050,14 +1127,46 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd0,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd1);
+ pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo.option_ctrl);
}
_chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo,
sizeof(dm->tdma_now)));
+ } else if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
+
+ if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)) != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ dm->tdma_now.type, dm->tdma_now.rxflctrl,
+ dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
+ dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.type,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.txpause,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.wtgle_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.leak_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.ext_ctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.option_ctrl);
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)));
}
if (rpt_type == BTC_RPT_TYPE_SLOT) {
@@ -1097,7 +1206,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
sizeof(dm->slot_now)));
}
- if (rpt_type == BTC_RPT_TYPE_CYSTA &&
+ if (rpt_type == BTC_RPT_TYPE_CYSTA && chip->chip_id == RTL8852A &&
pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
/* Check Leak-AP */
if (pcysta->slot_cnt[CXST_LK] != 0 &&
@@ -1120,16 +1229,55 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
}
_chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_B1]);
_chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
+ } else if (rpt_type == BTC_RPT_TYPE_CYSTA && pcysta_v1 &&
+ le16_to_cpu(pcysta_v1->cycles) >= BTC_CYSTA_CHK_PERIOD) {
+ cnt_leak_slot = le32_to_cpu(pcysta_v1->slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta_v1->leak_slot.cnt_rximr);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
+ }
+
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_BT]);
+
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ (u32)le16_to_cpu(pcysta_v1->cycles));
}
- if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ if (rpt_type == BTC_RPT_TYPE_CTRL && chip->chip_id == RTL8852A) {
prpt = &pfwinfo->rpt_ctrl.finfo;
btc->fwinfo.rpt_en_map = prpt->rpt_enable;
wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
wl->ver_info.fw = prpt->wl_fw_ver;
- dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload);
+ dm->wl_fw_cx_offload = !!prpt->wl_fw_cx_offload;
_chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
pfwinfo->event[BTF_EVNT_RPT]);
@@ -1142,6 +1290,33 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
btc->cx.cnt_bt[BTC_BCNT_POLUT] =
rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
}
+ } else if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ prpt_v1 = &pfwinfo->rpt_ctrl.finfo_v1;
+ btc->fwinfo.rpt_en_map = le32_to_cpu(prpt_v1->rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt_v1->wl_fw_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt_v1->wl_fw_info.fw_ver);
+ dm->wl_fw_cx_offload = !!le32_to_cpu(prpt_v1->wl_fw_info.cx_offload);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt_v1->gnt_val[i],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_POLLUTED]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ if (le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
}
if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
@@ -1155,6 +1330,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *pbuf, u32 buf_len)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc_prpt *btc_prpt = NULL;
u32 index = 0, rpt_len = 0;
@@ -1164,7 +1340,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
while (pbuf) {
btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
- if (index + 2 >= BTC_FWINFO_BUF)
+ if (index + 2 >= chip->btc_fwinfo_buf)
break;
/* At least 3 bytes: type(1) & len(2) */
rpt_len = le16_to_cpu(btc_prpt->len);
@@ -1182,10 +1358,12 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
static void _append_tdma(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_btf_tlv *tlv = NULL;
- struct rtw89_btc_fbtc_tdma *v = NULL;
+ struct rtw89_btc_btf_tlv *tlv;
+ struct rtw89_btc_fbtc_tdma *v;
+ struct rtw89_btc_fbtc_tdma_v1 *v1;
u16 len = btc->policy_len;
if (!btc->update_policy_force &&
@@ -1197,12 +1375,19 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
}
tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
- v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->type = CXPOLICY_TDMA;
- tlv->len = sizeof(*v);
-
- memcpy(v, &dm->tdma, sizeof(*v));
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ if (chip->chip_id == RTL8852A) {
+ v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
+ tlv->len = sizeof(*v);
+ memcpy(v, &dm->tdma, sizeof(*v));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ } else {
+ tlv->len = sizeof(*v1);
+ v1 = (struct rtw89_btc_fbtc_tdma_v1 *)&tlv->val[0];
+ v1->fver = chip->fcxtdma_ver;
+ v1->tdma = dm->tdma;
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): type:%d, rxflctrl=%d, txpause=%d, wtgle_n=%d, leak_n=%d, ext_ctrl=%d\n",
@@ -1408,12 +1593,17 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
switch (type) {
case CXDRVINFO_INIT:
rtw89_fw_h2c_cxdrv_init(rtwdev);
break;
case CXDRVINFO_ROLE:
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ rtw89_fw_h2c_cxdrv_role(rtwdev);
+ else
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
break;
case CXDRVINFO_CTRL:
rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
@@ -1448,7 +1638,7 @@ void btc_fw_event(struct rtw89_dev *rtwdev, u8 evt_id, void *data, u32 len)
}
}
-static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
+static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -1462,7 +1652,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
if (!(phy_map & BIT(i)))
continue;
- switch (state) {
+ switch (wl_state) {
case BTC_GNT_HW:
g[i].gnt_wl_sw_en = 0;
g[i].gnt_wl = 0;
@@ -1476,9 +1666,24 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
g[i].gnt_wl = 1;
break;
}
+
+ switch (bt_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
}
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+ rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
#define BTC_TDMA_WLROLE_MAX 2
@@ -1534,6 +1739,7 @@ static void _set_wl_tx_power(struct rtw89_dev *rtwdev, u32 level)
static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -1546,6 +1752,8 @@ static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
+
+ chip->ops->btc_set_wl_rx_gain(rtwdev, level);
}
static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
@@ -1683,39 +1891,64 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ struct rtw89_btc_wl_active_role *r;
+ struct rtw89_btc_wl_active_role_v1 *r1;
u8 en = 0, i, ch = 0, bw = 0;
+ u8 mode, connect_cnt;
if (btc->ctrl.manual || wl->status.map.scan)
return;
- /* TODO if include module->ant.type == BTC_ANT_SHARED */
+ if (chip->chip_id == RTL8852A) {
+ mode = wl_rinfo->link_mode;
+ connect_cnt = wl_rinfo->connect_cnt;
+ } else {
+ mode = wl_rinfo_v1->link_mode;
+ connect_cnt = wl_rinfo_v1->connect_cnt;
+ }
+
if (wl->status.map.rf_off || bt->whql_test ||
- wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- wl_rinfo->link_mode == BTC_WLINK_5G ||
- wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_5G ||
+ connect_cnt > BTC_TDMA_WLROLE_MAX) {
en = false;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ } else if (mode == BTC_WLINK_2G_MCC || mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_GO ||
- wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_CLIENT) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ (r->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ (r1->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r1->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
} else {
en = true;
/* get 2g channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].connected &&
- wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ r->connected && r->band == RTW89_BAND_2G) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ r1->connected && r1->band == RTW89_BAND_2G) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
@@ -1768,6 +2001,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -1777,7 +2011,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* The below is dedicated antenna case */
- if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX ||
+ wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -1826,6 +2061,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
#define _tdma_set_flctrl(btc, flc) ({(btc)->dm.tdma.rxflctrl = flc; })
+#define _tdma_set_flctrl_role(btc, role) ({(btc)->dm.tdma.rxflctrl_role = role; })
#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
@@ -1904,6 +2140,15 @@ union btc_btinfo {
static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
enum btc_reason_and_action action)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->btc_set_policy(rtwdev, policy_type);
+ _fw_set_policy(rtwdev, policy_type, action);
+}
+
+#define BTC_B1_MAX 250 /* unit ms */
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
+{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
@@ -1964,6 +2209,9 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
case BTC_CXP_OFF_BWB1:
_slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
}
break;
case BTC_CXP_OFFB: /* TDMA off + beacon protect */
@@ -2080,17 +2328,361 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO];
switch (policy_type) {
- case BTC_CXP_AUTO_TD50200:
+ case BTC_CXP_AUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TDW1B1:
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO2];
+ switch (policy_type) {
+ case BTC_CXP_AUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO2];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL(rtw89_btc_set_policy);
+
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
+ struct rtw89_btc_fbtc_slot *s = dm->slot;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
+ struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
+ struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ u8 type, null_role;
+ u32 tbl_w1, tbl_b1, tbl_b4;
+
+ type = FIELD_GET(BTC_CXP_MASK, policy_type);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ tbl_w1 = cxtbl[1];
+ else if (hid->exist && hid->type == BTC_HID_218)
+ tbl_w1 = cxtbl[7]; /* Ack/BA no break bt Hi-Pri-rx */
+ else
+ tbl_w1 = cxtbl[8];
+
+ if (dm->leak_ap &&
+ (type == BTC_CXP_PFIX || type == BTC_CXP_PAUTO2)) {
+ tbl_b1 = cxtbl[3];
+ tbl_b4 = cxtbl[3];
+ } else if (hid->exist && hid->type == BTC_HID_218) {
+ tbl_b1 = cxtbl[4]; /* Ack/BA no break bt Hi-Pri-rx */
+ tbl_b4 = cxtbl[4];
+ } else {
+ tbl_b1 = cxtbl[2];
+ tbl_b4 = cxtbl[2];
+ }
+ } else {
+ tbl_w1 = cxtbl[16];
+ tbl_b1 = cxtbl[17];
+ tbl_b4 = cxtbl[17];
+ }
+
+ btc->bt_req_en = false;
+
+ switch (type) {
+ case BTC_CXP_USERDEF0:
+ btc->update_policy_force = true;
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF: /* TDMA off */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFF_BT:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF_WL:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[1]);
+ break;
+ case BTC_CXP_OFF_EQ0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ break;
+ case BTC_CXP_OFF_EQ1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
+ break;
+ case BTC_CXP_OFF_EQ2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ break;
+ case BTC_CXP_OFF_EQ3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ break;
+ case BTC_CXP_OFF_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
+ break;
+ case BTC_CXP_OFF_BWB1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ case BTC_CXP_OFF_BWB2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[7]);
+ break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFB: /* TDMA off + beacon protect */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF_B2];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFB_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFE: /* TDMA off + beacon protect + Ext_control */
+ btc->bt_req_en = true;
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_OFF_EXT];
+
+ /* To avoid wl-s0 tx break by hid/hfp tx */
+ if (hid->exist || hfp->exist)
+ tbl_w1 = cxtbl[16];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFE_DEF:
+ s[CXST_E2G] = s_def[CXST_E2G];
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ case BTC_CXP_OFFE_DEF2:
+ _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_FIX: /* TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_FIX];
+
+ switch (policy_type) {
+ case BTC_CXP_FIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010:
+ _slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010ISO:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD7010:
+ _slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD3060:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_PFIX: /* PS-TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PFIX];
+
+ switch (policy_type) {
+ case BTC_CXP_PFIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO: /* TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO];
+
+ switch (policy_type) {
+ case BTC_CXP_AUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD60200:
+ case BTC_CXP_AUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD20200:
+ case BTC_CXP_AUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2098,23 +2690,26 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO];
+
switch (policy_type) {
- case BTC_CXP_PAUTO_TD50200:
+ case BTC_CXP_PAUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD60200:
+ case BTC_CXP_PAUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD20200:
+ case BTC_CXP_PAUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_PAUTO_TDW1B1:
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2122,119 +2717,112 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO2];
+
switch (policy_type) {
case BTC_CXP_AUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO2];
+
switch (policy_type) {
case BTC_CXP_PAUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
}
- _fw_set_policy(rtwdev, policy_type, action);
-}
-
-static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_mac_ax_gnt *g = dm->gnt.band;
- u8 i;
-
- if (phy_map > BTC_PHY_ALL)
- return;
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC && dm->tdma.rxflctrl) {
+ null_role = FIELD_PREP(0x0f, dm->wl_scc.null_role1) |
+ FIELD_PREP(0xf0, dm->wl_scc.null_role2);
+ _tdma_set_flctrl_role(btc, null_role);
+ }
- for (i = 0; i < RTW89_PHY_MAX; i++) {
- if (!(phy_map & BIT(i)))
- continue;
+ /* enter leak_slot after each null-1 */
+ if (dm->leak_ap && dm->tdma.leak_n > 1)
+ _tdma_set_lek(btc, 1);
- switch (state) {
- case BTC_GNT_HW:
- g[i].gnt_bt_sw_en = 0;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_LO:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_HI:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 1;
- break;
- }
+ if (dm->tdma_instant_excute) {
+ btc->dm.tdma.option_ctrl |= BIT(0);
+ btc->update_policy_force = true;
}
-
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
}
+EXPORT_SYMBOL(rtw89_btc_set_policy_v1);
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
u8 tx_val, u8 rx_val)
@@ -2300,86 +2888,74 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
switch (type) {
case BTC_ANT_WPOWERON:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
break;
case BTC_ANT_WINIT:
- if (bt->enable.now) {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
- } else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- }
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ if (bt->enable.now)
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
+ else
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
break;
case BTC_ANT_WONLY:
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WOFF:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W2G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
if (rtwdev->dbcc_en) {
for (i = 0; i < RTW89_PHY_MAX; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
- _set_gnt_wl(rtwdev, BIT(i), gnt_wl_ctrl);
-
gnt_bt_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
/* BT should control by GNT_BT if WL_2G at S0 */
if (i == 1 &&
wl_dinfo->real_band[0] == RTW89_BAND_2G &&
wl_dinfo->real_band[1] == RTW89_BAND_5G)
gnt_bt_ctrl = BTC_GNT_HW;
- _set_gnt_bt(rtwdev, BIT(i), gnt_bt_ctrl);
-
+ _set_gnt(rtwdev, BIT(i), gnt_wl_ctrl, gnt_bt_ctrl);
plt_ctrl = b2g ? BTC_PLT_BT : BTC_PLT_NONE;
_set_bt_plut(rtwdev, BIT(i),
plt_ctrl, plt_ctrl);
}
} else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_BT, BTC_PLT_BT);
}
break;
case BTC_ANT_W5G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W25G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
break;
case BTC_ANT_FREERUN:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_BRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
default:
@@ -2491,14 +3067,19 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
static void _action_bt_hfp(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
- if (btc->cx.wl.status.map._4way)
+ if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HFP);
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB2, BTC_ACT_BT_HFP);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ }
} else {
_set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
@@ -2506,17 +3087,37 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
static void _action_bt_hid(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_hid_desc *hid = &bt->link_info.hid_desc;
+ u16 policy_type = BTC_CXP_OFF_BT;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) /* shared-antenna */
- if (btc->cx.wl.status.map._4way)
- _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HID);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HID);
- else /* dedicated-antenna */
- _set_policy(rtwdev, BTC_CXP_OFF_EQ3, BTC_ACT_BT_HID);
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (wl->status.map._4way) {
+ policy_type = BTC_CXP_OFF_WL;
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ if (hid->type & BTC_HID_BLE)
+ policy_type = BTC_CXP_OFF_BWB0;
+ else
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (hid->type == BTC_HID_218) {
+ bt->scan_rx_low_pri = true;
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (chip->para_ver == 0x1) {
+ policy_type = BTC_CXP_OFF_BWB3;
+ } else {
+ policy_type = BTC_CXP_OFF_BWB1;
+ }
+ } else { /* dedicated-antenna */
+ policy_type = BTC_CXP_OFF_EQ3;
+ }
+
+ _set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
}
static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
@@ -2537,7 +3138,7 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
@@ -2554,12 +3155,12 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP);
}
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- _set_policy(rtwdev, BTC_CXP_AUTO_TD20200, BTC_ACT_BT_A2DP);
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
break;
}
}
@@ -2639,7 +3240,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP_HID);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
@@ -2657,7 +3258,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP_HID);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP_HID);
}
break;
@@ -2792,19 +3393,27 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- bool is_btg = false;
+ bool is_btg;
+ u8 mode;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
/* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
- if (wl_rinfo->link_mode == BTC_WLINK_5G) /* always 0 if 5G */
+ if (mode == BTC_WLINK_5G) /* always 0 if 5G */
is_btg = false;
- else if (wl_rinfo->link_mode == BTC_WLINK_25G_DBCC &&
+ else if (mode == BTC_WLINK_25G_DBCC &&
wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
is_btg = false;
else
@@ -2816,7 +3425,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
btc->dm.wl_btg_rx = is_btg;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (mode == BTC_WLINK_25G_MCC)
return;
rtw89_ctrl_btg(rtwdev, is_btg);
@@ -2889,6 +3498,7 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -2898,16 +3508,22 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode = wl_rinfo->link_mode;
- u8 tx_retry = 0;
- u32 tx_time = 0;
- u16 enable = 0;
+ u8 mode;
+ u8 tx_retry;
+ u32 tx_time;
+ u16 enable;
bool reenable = false;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
@@ -2951,13 +3567,21 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
+ u8 mode;
- if (wl_rinfo->link_mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ if (mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
bt_hi_lna_rx = true;
if (bt_hi_lna_rx == bt->hi_lna_rx)
@@ -2966,14 +3590,34 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_BT_HILNA, bt_hi_lna_rx);
}
+static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+
+ _write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
+}
+
/* TODO add these functions */
static void _action_common(struct rtw89_dev *rtwdev)
{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
_set_btg_ctrl(rtwdev);
_set_wl_tx_limit(rtwdev);
_set_bt_afh_info(rtwdev);
_set_bt_rx_agc(rtwdev);
_set_rf_trx_para(rtwdev);
+ _set_bt_rx_scan_pri(rtwdev);
+
+ if (wl->scbd_change) {
+ rtw89_mac_cfg_sb(rtwdev, wl->scbd);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
+ wl->scbd);
+ wl->scbd_change = false;
+ btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ }
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -3068,7 +3712,17 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (rtwdev->dbcc_en) {
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
+ BTC_RSN_NTFY_SCAN_START);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0,
+ BTC_RSN_NTFY_SCAN_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
+ } else if (rtwdev->dbcc_en) {
if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G &&
wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
_action_wl_5g(rtwdev);
@@ -3135,6 +3789,68 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
}
}
+static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ u16 policy_type = BTC_CXP_OFF_BT;
+ u32 dur;
+
+ if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ policy_type = BTC_CXP_OFF_EQ0;
+ } else {
+ /* shared-antenna */
+ switch (wl_rinfo->mrole_type) {
+ case BTC_WLMROLE_STA_GC:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_STA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_GC_NOA:
+ case BTC_WLMROLE_STA_GO:
+ case BTC_WLMROLE_STA_GO_NOA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
+ dur = wl_rinfo->mrole_noa_duration;
+
+ if (wl->status.map._4way) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_WL;
+ } else if (bt->link_info.status.map.connect == 0) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ } else if (bt->link_info.a2dp_desc.exist &&
+ dur < btc->bt_req_len) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWMIXB2;
+ } else if (bt->link_info.a2dp_desc.exist ||
+ bt->link_info.pan_desc.exist) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
+}
+
static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -3224,20 +3940,20 @@ static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 scbd_val = 0;
+ u8 force_exec = false;
if (!chip->scbd)
return;
scbd_val = state ? wl->scbd | val : wl->scbd & ~val;
- if (scbd_val == wl->scbd)
- return;
- rtw89_mac_cfg_sb(rtwdev, scbd_val);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
- scbd_val);
- wl->scbd = scbd_val;
+ if (val & BTC_WSCB_ACTIVE || val & BTC_WSCB_ON)
+ force_exec = true;
- btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ if (scbd_val != wl->scbd || force_exec) {
+ wl->scbd = scbd_val;
+ wl->scbd_change = true;
+ }
}
static u8
@@ -3287,7 +4003,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
/* check if role active? */
if (!wl_linfo[i].active)
continue;
@@ -3418,8 +4134,158 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], cnt_connect = %d, link_mode = %d\n",
- cnt_connect, wl_rinfo->link_mode);
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
+
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
+static void _update_wl_info_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ u8 cnt_connect = 0, cnt_connecting = 0, cnt_active = 0;
+ u8 cnt_2g = 0, cnt_5g = 0, phy;
+ u32 wl_2g_ch[2] = {}, wl_5g_ch[2] = {};
+ bool b2g = false, b5g = false, client_joined = false;
+ u8 i;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active)
+ continue;
+
+ cnt_active++;
+ wl_rinfo->active_role_v1[cnt_active - 1].role = wl_linfo[i].role;
+ wl_rinfo->active_role_v1[cnt_active - 1].pid = wl_linfo[i].pid;
+ wl_rinfo->active_role_v1[cnt_active - 1].phy = wl_linfo[i].phy;
+ wl_rinfo->active_role_v1[cnt_active - 1].band = wl_linfo[i].band;
+ wl_rinfo->active_role_v1[cnt_active - 1].noa = (u8)wl_linfo[i].noa;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 0;
+
+ wl->port_id[wl_linfo[i].role] = wl_linfo[i].pid;
+
+ phy = wl_linfo[i].phy;
+
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ wl_dinfo->role[phy] = wl_linfo[i].role;
+ wl_dinfo->op_band[phy] = wl_linfo[i].band;
+ _update_dbcc_band(rtwdev, phy);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ cnt_connecting++;
+ } else {
+ cnt_connect++;
+ if ((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ }
+
+ wl_rinfo->role_map.val |= BIT(wl_linfo[i].role);
+ wl_rinfo->active_role_v1[cnt_active - 1].ch = wl_linfo[i].ch;
+ wl_rinfo->active_role_v1[cnt_active - 1].bw = wl_linfo[i].bw;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 1;
+
+ /* only care 2 roles + BT coex */
+ if (wl_linfo[i].band != RTW89_BAND_2G) {
+ if (cnt_5g <= ARRAY_SIZE(wl_5g_ch) - 1)
+ wl_5g_ch[cnt_5g] = wl_linfo[i].ch;
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (cnt_2g <= ARRAY_SIZE(wl_2g_ch) - 1)
+ wl_2g_ch[cnt_2g] = wl_linfo[i].ch;
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+
+ wl_rinfo->connect_cnt = cnt_connect;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt_connect == 0) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map.role.none = 1;
+ } else if (!b2g && b5g) {
+ wl_rinfo->link_mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map.role.nan) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_NAN;
+ } else if (cnt_connect > BTC_TDMA_WLROLE_MAX) {
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ } else if (b2g && b5g && cnt_connect == 2) {
+ if (rtwdev->dbcc_en) {
+ switch (wl_dinfo->role[RTW89_PHY_0]) {
+ case RTW89_WIFI_ROLE_STATION:
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ break;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ break;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ break;
+ case RTW89_WIFI_ROLE_AP:
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ break;
+ default:
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ break;
+ }
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_25G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 2) {
+ if (wl_rinfo->role_map.role.station &&
+ (wl_rinfo->role_map.role.p2p_go ||
+ wl_rinfo->role_map.role.p2p_gc ||
+ wl_rinfo->role_map.role.ap)) {
+ if (wl_2g_ch[0] == wl_2g_ch[1])
+ wl_rinfo->link_mode = BTC_WLINK_2G_SCC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 1) {
+ if (wl_rinfo->role_map.role.station)
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ else if (wl_rinfo->role_map.role.ap)
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ else if (wl_rinfo->role_map.role.p2p_go)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ else if (wl_rinfo->role_map.role.p2p_gc)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ }
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (wl_rinfo->role_map.role.p2p_go || wl_rinfo->role_map.role.ap) {
+ if (!client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt = 1;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt = 0;
+ }
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
@@ -3574,23 +4440,32 @@ static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
static
void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
- u8 mode = wl_rinfo->link_mode;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
lockdep_assert_held(&rtwdev->mutex);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
- __func__, reason, mode);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
- __func__, dm->wl_only, dm->bt_only);
dm->run_reason = reason;
_update_dm_step(rtwdev, reason);
_update_btc_state_map(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
+ __func__, reason, mode);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
+ __func__, dm->wl_only, dm->bt_only);
+
/* Be careful to change the following function sequence!! */
if (btc->ctrl.manual) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -3647,6 +4522,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
btc->ctrl.igno_bt = false;
dm->freerun = false;
+ bt->scan_rx_low_pri = false;
if (reason == BTC_RSN_NTFY_INIT) {
_action_wl_init(rtwdev);
@@ -3689,21 +4565,30 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_sta(rtwdev);
break;
case BTC_WLINK_2G_AP:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_ap(rtwdev);
break;
case BTC_WLINK_2G_GO:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_go(rtwdev);
break;
case BTC_WLINK_2G_GC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_gc(rtwdev);
break;
case BTC_WLINK_2G_SCC:
- _action_wl_2g_scc(rtwdev);
+ bt->scan_rx_low_pri = true;
+ if (chip->chip_id == RTL8852A)
+ _action_wl_2g_scc(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ _action_wl_2g_scc_v1(rtwdev);
break;
case BTC_WLINK_2G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_mcc(rtwdev);
break;
case BTC_WLINK_25G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_25g_mcc(rtwdev);
break;
case BTC_WLINK_5G:
@@ -3733,11 +4618,14 @@ void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev)
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
btc->dm.cnt_notify[BTC_NCNT_POWER_OFF]++;
btc->cx.wl.status.map.rf_off = 1;
+ btc->cx.wl.status.map.busy = 0;
+ wl->status.map.lps = BTC_LPS_OFF;
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
_run_coex(rtwdev, BTC_RSN_NTFY_POWEROFF);
@@ -3797,7 +4685,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_write_scbd(rtwdev,
BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
_update_bt_scbd(rtwdev, true);
- if (rtw89_mac_get_ctrl_path(rtwdev)) {
+ if (rtw89_mac_get_ctrl_path(rtwdev) && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): PTA owner warning!!\n",
__func__);
@@ -4140,7 +5028,8 @@ enum btc_wl_mode {
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4155,8 +5044,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
vif->type == NL80211_IFTYPE_STATION);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n",
- hal->current_band_type, hal->current_channel,
- hal->current_band_width);
+ chan->band_type, chan->channel, chan->band_width);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n",
state == BTC_ROLE_MSTS_STA_CONN_END);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -4169,14 +5057,14 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], STA support HE=%d VHT=%d HT=%d\n",
- sta->he_cap.has_he,
- sta->vht_cap.vht_supported,
- sta->ht_cap.ht_supported);
- if (sta->he_cap.has_he)
+ sta->deflink.he_cap.has_he,
+ sta->deflink.vht_cap.vht_supported,
+ sta->deflink.ht_cap.ht_supported);
+ if (sta->deflink.he_cap.has_he)
mode |= BIT(BTC_WL_MODE_HE);
- if (sta->vht_cap.vht_supported)
+ if (sta->deflink.vht_cap.vht_supported)
mode |= BIT(BTC_WL_MODE_VHT);
- if (sta->ht_cap.ht_supported)
+ if (sta->deflink.ht_cap.ht_supported)
mode |= BIT(BTC_WL_MODE_HT);
r.mode = mode;
@@ -4195,9 +5083,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
r.connected = MLME_LINKED;
r.bcn_period = vif->bss_conf.beacon_int;
r.dtim_period = vif->bss_conf.dtim_period;
- r.band = hal->current_band_type;
- r.ch = hal->current_channel;
- r.bw = hal->current_band_width;
+ r.band = chan->band_type;
+ r.ch = chan->channel;
+ r.bw = chan->band_width;
ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
@@ -4208,7 +5096,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
memcpy(wlinfo, &r, sizeof(*wlinfo));
- _update_wl_info(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ _update_wl_info(rtwdev);
+ else
+ _update_wl_info_v1(rtwdev);
if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
wlinfo->connected == MLME_NO_LINK)
@@ -4230,6 +5121,7 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 val;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rf_state = %d\n",
__func__, rf_state);
@@ -4239,10 +5131,12 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
case BTC_RFCTRL_WL_OFF:
wl->status.map.rf_off = 1;
wl->status.map.lps = BTC_LPS_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_FW_CTRL:
wl->status.map.rf_off = 0;
wl->status.map.lps = BTC_LPS_RF_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_WL_ON:
default:
@@ -4252,14 +5146,17 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
}
if (rf_state == BTC_RFCTRL_WL_ON) {
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
rtw89_btc_fw_en_rpt(rtwdev,
RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE, true);
+ val = BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG;
+ _write_scbd(rtwdev, val, true);
_update_bt_scbd(rtwdev, true);
chip->ops->btc_init_cfg(rtwdev);
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE | BTC_WSCB_WLBUSY, false);
+ if (rf_state == BTC_RFCTRL_WL_OFF)
+ _write_scbd(rtwdev, BTC_WSCB_ALL, false);
}
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
@@ -4370,6 +5267,7 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
rtwdev->is_bt_iqk_timeout = true;
}
}
+EXPORT_SYMBOL(rtw89_btc_ntfy_wl_rfk);
struct rtw89_btc_wl_sta_iter_data {
struct rtw89_dev *rtwdev;
@@ -4598,10 +5496,10 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
chip->chip_id);
- ver_main = FIELD_GET(GENMASK(31, 24), chip->para_ver);
- ver_sub = FIELD_GET(GENMASK(23, 16), chip->para_ver);
- ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->para_ver);
- id_branch = FIELD_GET(GENMASK(7, 0), chip->para_ver);
+ ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
+ ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION);
+ id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION);
seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
"[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
@@ -4622,12 +5520,12 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
seq_printf(m, "(%s, desired:%d.%d.%d), ",
(wl->ver_info.fw_coex >= chip->wlcx_desired ?
- "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix);
+ "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
bt->ver_info.fw_coex,
(bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mis-Match"), chip->btcx_desired);
+ "Match" : "Mismatch"), chip->btcx_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -4676,7 +5574,7 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
wl_dinfo->real_band[RTW89_PHY_1]);
}
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
plink = &btc->cx.wl.link_info[i];
if (!plink->active)
@@ -4715,23 +5613,29 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
return;
seq_puts(m, "========== [WL Status] ==========\n");
- seq_printf(m, " %-15s : link_mode:%d, ",
- "[status]", (u32)wl_rinfo->link_mode);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode);
seq_printf(m,
- "rf_off:%s, power_save:%s, scan:%s(band:%d/phy_map:0x%x), ",
- wl->status.map.rf_off ? "Y" : "N",
- wl->status.map.lps ? "Y" : "N",
+ "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off, wl->status.map.lps,
wl->status.map.scan ? "Y" : "N",
wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
@@ -4897,6 +5801,7 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_ACT_STR(e) case BTC_ACT_ ## e | BTC_ACT_EXT_BIT: return #e
#define CASE_BTC_POLICY_STR(e) \
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
+#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
static const char *steps_to_str(u16 step)
{
@@ -4958,9 +5863,16 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ3);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
+ CASE_BTC_POLICY_STR(OFF_BWB2);
+ CASE_BTC_POLICY_STR(OFF_BWB3);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
+ CASE_BTC_POLICY_STR(OFFE_2GBWISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB);
+ CASE_BTC_POLICY_STR(OFFE_WL);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB2);
CASE_BTC_POLICY_STR(FIX_TD3030);
CASE_BTC_POLICY_STR(FIX_TD5050);
CASE_BTC_POLICY_STR(FIX_TD2030);
@@ -4971,6 +5883,7 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(FIX_TD2080);
CASE_BTC_POLICY_STR(FIX_TDW1B1);
CASE_BTC_POLICY_STR(FIX_TD4020);
+ CASE_BTC_POLICY_STR(FIX_TD4010ISO);
CASE_BTC_POLICY_STR(PFIX_TD3030);
CASE_BTC_POLICY_STR(PFIX_TD5050);
CASE_BTC_POLICY_STR(PFIX_TD2030);
@@ -4978,13 +5891,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(PFIX_TD3070);
CASE_BTC_POLICY_STR(PFIX_TD2080);
CASE_BTC_POLICY_STR(PFIX_TDW1B1);
- CASE_BTC_POLICY_STR(AUTO_TD50200);
- CASE_BTC_POLICY_STR(AUTO_TD60200);
- CASE_BTC_POLICY_STR(AUTO_TD20200);
+ CASE_BTC_POLICY_STR(AUTO_TD50B1);
+ CASE_BTC_POLICY_STR(AUTO_TD60B1);
+ CASE_BTC_POLICY_STR(AUTO_TD20B1);
CASE_BTC_POLICY_STR(AUTO_TDW1B1);
- CASE_BTC_POLICY_STR(PAUTO_TD50200);
- CASE_BTC_POLICY_STR(PAUTO_TD60200);
- CASE_BTC_POLICY_STR(PAUTO_TD20200);
+ CASE_BTC_POLICY_STR(PAUTO_TD50B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD60B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD20B1);
CASE_BTC_POLICY_STR(PAUTO_TDW1B1);
CASE_BTC_POLICY_STR(AUTO2_TD3050);
CASE_BTC_POLICY_STR(AUTO2_TD3070);
@@ -5003,6 +5916,32 @@ static const char *steps_to_str(u16 step)
}
}
+static const char *id_to_slot(u32 id)
+{
+ switch (id) {
+ CASE_BTC_SLOT_STR(OFF);
+ CASE_BTC_SLOT_STR(B2W);
+ CASE_BTC_SLOT_STR(W1);
+ CASE_BTC_SLOT_STR(W2);
+ CASE_BTC_SLOT_STR(W2B);
+ CASE_BTC_SLOT_STR(B1);
+ CASE_BTC_SLOT_STR(B2);
+ CASE_BTC_SLOT_STR(B3);
+ CASE_BTC_SLOT_STR(B4);
+ CASE_BTC_SLOT_STR(LK);
+ CASE_BTC_SLOT_STR(BLK);
+ CASE_BTC_SLOT_STR(E2G);
+ CASE_BTC_SLOT_STR(E5G);
+ CASE_BTC_SLOT_STR(EBT);
+ CASE_BTC_SLOT_STR(ENULL);
+ CASE_BTC_SLOT_STR(WLK);
+ CASE_BTC_SLOT_STR(W1FDD);
+ CASE_BTC_SLOT_STR(B1FDD);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -5074,7 +6013,7 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
(BTC_CX_FW_OFFLOAD ? "Y" : "N"),
(dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
- "" : "(Mis-Match!!)"));
+ "" : "(Mismatch!!)"));
if (dm->rf_trx_para.wl_tx_power == 0xff)
seq_printf(m,
@@ -5094,21 +6033,31 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
(bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
seq_printf(m,
- " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU\n",
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
"[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
- dm->wl_tx_limit.tx_retry, btc->bt_req_len);
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri);
}
static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_cysta *pcysta = NULL;
-
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ struct rtw89_btc_fbtc_cysta *pcysta;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1;
+ u32 except_cnt, exception_map;
+
+ if (chip->chip_id == RTL8852A) {
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ except_cnt = le32_to_cpu(pcysta->except_cnt);
+ exception_map = le32_to_cpu(pcysta->exception);
+ } else {
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ except_cnt = le32_to_cpu(pcysta_v1->except_cnt);
+ exception_map = le32_to_cpu(pcysta_v1->except_map);
+ }
- if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 &&
- pcysta->except_cnt == 0 &&
+ if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
!pfwinfo->len_mismch && !pfwinfo->fver_mismch)
return;
@@ -5133,16 +6082,17 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
}
/* cycle statistics exceptions */
- if (pcysta->exception || pcysta->except_cnt) {
+ if (exception_map || except_cnt) {
seq_printf(m,
"exception-type: 0x%x, exception-cnt = %d",
- pcysta->exception, pcysta->except_cnt);
+ exception_map, except_cnt);
}
seq_puts(m, "\n");
}
static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -5155,7 +6105,10 @@ static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ if (chip->chip_id == RTL8852A)
+ t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ else
+ t = &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma;
seq_printf(m,
" %-15s : ", "[tdma_policy]");
@@ -5358,12 +6311,145 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
}
}
+static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_a2dp_trx_stat *a2dp_trx;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u8 i, cnt = 0, slot_pair, divide_cnt;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le32_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+
+ seq_printf(m, ", %s:%d", id_to_slot(i),
+ le32_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (le32_to_cpu(pcysta->collision_cnt))
+ seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt));
+
+ if (le32_to_cpu(pcysta->skip_cnt))
+ seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt));
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+
+ cycle = le16_to_cpu(pcysta->cycles);
+ if (cycle == 0)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (cycle <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = cycle - slot_pair + 1;
+
+ c_end = cycle;
+
+ if (a2dp->exist)
+ divide_cnt = 3;
+ else
+ divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1) {
+ seq_printf(m, "\n\r %-15s : ", "[cycle_step]");
+ } else {
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ }
+ if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m, "%-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+
+ seq_puts(m, "\n");
+ }
+}
+
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_cynullsta *ns = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_fbtc_cynullsta *ns;
+ struct rtw89_btc_fbtc_cynullsta_v1 *ns_v1;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
@@ -5373,25 +6459,58 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ if (chip->chip_id == RTL8852A) {
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
- seq_printf(m, " %-15s : ", "[null_sta]");
+ seq_printf(m, " %-15s : ", "[null_sta]");
- for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[ok:%d/", le32_to_cpu(ns->result[i][1]));
- seq_printf(m, "fail:%d/", le32_to_cpu(ns->result[i][0]));
- seq_printf(m, "on_time:%d/", le32_to_cpu(ns->result[i][2]));
- seq_printf(m, "retry:%d/", le32_to_cpu(ns->result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->avg_t[i]) / 1000,
- le32_to_cpu(ns->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns->max_t[i]) / 1000,
- le32_to_cpu(ns->max_t[i]) % 1000);
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->avg_t[i]) / 1000,
+ le32_to_cpu(ns->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns->max_t[i]) / 1000,
+ le32_to_cpu(ns->max_t[i]) % 1000);
+ }
+ } else {
+ ns_v1 = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+
+ seq_printf(m, " %-15s : ", "[null_sta]");
+
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[Tx:%d/",
+ le32_to_cpu(ns_v1->result[i][4]));
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns_v1->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns_v1->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns_v1->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns_v1->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns_v1->avg_t[i]) / 1000,
+ le32_to_cpu(ns_v1->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns_v1->max_t[i]) / 1000,
+ le32_to_cpu(ns_v1->max_t[i]) % 1000);
+ }
}
seq_puts(m, "\n");
}
@@ -5467,6 +6586,7 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
@@ -5475,11 +6595,57 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_error(rtwdev, m);
_show_fbtc_tdma(rtwdev, m);
_show_fbtc_slots(rtwdev, m);
- _show_fbtc_cysta(rtwdev, m);
+
+ if (chip->chip_id == RTL8852A)
+ _show_fbtc_cysta(rtwdev, m);
+ else
+ _show_fbtc_cysta_v1(rtwdev, m);
+
_show_fbtc_nullsta(rtwdev, m);
_show_fbtc_step(rtwdev, m);
}
+static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_ax_gnt *gnt;
+ u32 val, status;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) {
+ rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
+ rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0_STA);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1_STA);
+ } else if (chip->chip_id == RTL8852C) {
+ val = rtw89_read32(rtwdev, R_AX_GNT_SW_CTRL);
+ status = rtw89_read32(rtwdev, R_AX_GNT_VAL_V1);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1);
+ } else {
+ return;
+ }
+}
+
static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5491,7 +6657,8 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_mac_ax_gnt gnt[2] = {0};
+ struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
+ struct rtw89_mac_ax_gnt gnt;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
@@ -5508,45 +6675,28 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
/* To avoid I/O if WL LPS or power-off */
if (!wl->status.map.lps && !wl->status.map.rf_off) {
- rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (val & (B_AX_GNT_BT_RFC_S0_SW_VAL |
- B_AX_GNT_BT_BB_S0_SW_VAL))
- gnt[0].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S0_SW_CTRL |
- B_AX_GNT_BT_BB_S0_SW_CTRL))
- gnt[0].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_VAL |
- B_AX_GNT_WL_BB_S0_SW_VAL))
- gnt[0].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_CTRL |
- B_AX_GNT_WL_BB_S0_SW_CTRL))
- gnt[0].gnt_wl_sw_en = true;
-
- if (val & (B_AX_GNT_BT_RFC_S1_SW_VAL |
- B_AX_GNT_BT_BB_S1_SW_VAL))
- gnt[1].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S1_SW_CTRL |
- B_AX_GNT_BT_BB_S1_SW_CTRL))
- gnt[1].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_VAL |
- B_AX_GNT_WL_BB_S1_SW_VAL))
- gnt[1].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_CTRL |
- B_AX_GNT_WL_BB_S1_SW_CTRL))
- gnt[1].gnt_wl_sw_en = true;
+ if (chip->chip_id == RTL8852A)
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ btc->dm.pta_owner = 0;
+ _get_gnt(rtwdev, &gnt_cfg);
+ gnt = gnt_cfg.band[0];
seq_printf(m,
" %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
"[gnt_status]",
- (rtw89_mac_get_ctrl_path(rtwdev) ? "WL" : "BT"),
- (gnt[0].gnt_wl_sw_en ? "SW" : "HW"), gnt[0].gnt_wl,
- (gnt[0].gnt_bt_sw_en ? "SW" : "HW"), gnt[0].gnt_bt);
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ gnt = gnt_cfg.band[1];
seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- (gnt[1].gnt_wl_sw_en ? "SW" : "HW"), gnt[1].gnt_wl,
- (gnt[1].gnt_bt_sw_en ? "SW" : "HW"), gnt[1].gnt_bt);
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
}
-
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5703,8 +6853,121 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo_v1;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le32_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en),
+ dm->error.val);
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
+
+ if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ seq_puts(m, " (WL FW report invalid!!)\n");
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
@@ -5735,5 +6998,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_info(rtwdev, m);
_show_fw_dm_msg(rtwdev, m);
_show_mreg(rtwdev, m);
- _show_summary(rtwdev, m);
+ if (chip->chip_id == RTL8852A)
+ _show_summary(rtwdev, m);
+ else
+ _show_summary_v1(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index c3a722d259d7..ca16afa97ec0 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -162,17 +162,19 @@ void rtw89_coex_act1_work(struct work_struct *work);
void rtw89_coex_bt_devinfo_work(struct work_struct *work);
void rtw89_coex_rfk_chk_work(struct work_struct *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
enum rtw89_rf_path_bit paths)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 phy_map;
phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
FIELD_PREP(BTC_RFK_PHY_MAP, BIT(phy_idx)) |
- FIELD_PREP(BTC_RFK_BAND_MAP, hal->current_band_type);
+ FIELD_PREP(BTC_RFK_BAND_MAP, chan->band_type);
return phy_map;
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a0737eea9f81..bc2994865372 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -4,6 +4,8 @@
#include <linux/ip.h>
#include <linux/udp.h>
+#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
@@ -21,50 +23,122 @@ static bool rtw89_disable_ps_mode;
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
+#define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
+ { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
+#define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
+#define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
+
static struct ieee80211_channel rtw89_channels_2ghz[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ RTW89_DEF_CHAN_2G(2412, 1),
+ RTW89_DEF_CHAN_2G(2417, 2),
+ RTW89_DEF_CHAN_2G(2422, 3),
+ RTW89_DEF_CHAN_2G(2427, 4),
+ RTW89_DEF_CHAN_2G(2432, 5),
+ RTW89_DEF_CHAN_2G(2437, 6),
+ RTW89_DEF_CHAN_2G(2442, 7),
+ RTW89_DEF_CHAN_2G(2447, 8),
+ RTW89_DEF_CHAN_2G(2452, 9),
+ RTW89_DEF_CHAN_2G(2457, 10),
+ RTW89_DEF_CHAN_2G(2462, 11),
+ RTW89_DEF_CHAN_2G(2467, 12),
+ RTW89_DEF_CHAN_2G(2472, 13),
+ RTW89_DEF_CHAN_2G(2484, 14),
};
static struct ieee80211_channel rtw89_channels_5ghz[] = {
- {.center_freq = 5180, .hw_value = 36,},
- {.center_freq = 5200, .hw_value = 40,},
- {.center_freq = 5220, .hw_value = 44,},
- {.center_freq = 5240, .hw_value = 48,},
- {.center_freq = 5260, .hw_value = 52,},
- {.center_freq = 5280, .hw_value = 56,},
- {.center_freq = 5300, .hw_value = 60,},
- {.center_freq = 5320, .hw_value = 64,},
- {.center_freq = 5500, .hw_value = 100,},
- {.center_freq = 5520, .hw_value = 104,},
- {.center_freq = 5540, .hw_value = 108,},
- {.center_freq = 5560, .hw_value = 112,},
- {.center_freq = 5580, .hw_value = 116,},
- {.center_freq = 5600, .hw_value = 120,},
- {.center_freq = 5620, .hw_value = 124,},
- {.center_freq = 5640, .hw_value = 128,},
- {.center_freq = 5660, .hw_value = 132,},
- {.center_freq = 5680, .hw_value = 136,},
- {.center_freq = 5700, .hw_value = 140,},
- {.center_freq = 5720, .hw_value = 144,},
- {.center_freq = 5745, .hw_value = 149,},
- {.center_freq = 5765, .hw_value = 153,},
- {.center_freq = 5785, .hw_value = 157,},
- {.center_freq = 5805, .hw_value = 161,},
- {.center_freq = 5825, .hw_value = 165,
- .flags = IEEE80211_CHAN_NO_HT40MINUS},
+ RTW89_DEF_CHAN_5G(5180, 36),
+ RTW89_DEF_CHAN_5G(5200, 40),
+ RTW89_DEF_CHAN_5G(5220, 44),
+ RTW89_DEF_CHAN_5G(5240, 48),
+ RTW89_DEF_CHAN_5G(5260, 52),
+ RTW89_DEF_CHAN_5G(5280, 56),
+ RTW89_DEF_CHAN_5G(5300, 60),
+ RTW89_DEF_CHAN_5G(5320, 64),
+ RTW89_DEF_CHAN_5G(5500, 100),
+ RTW89_DEF_CHAN_5G(5520, 104),
+ RTW89_DEF_CHAN_5G(5540, 108),
+ RTW89_DEF_CHAN_5G(5560, 112),
+ RTW89_DEF_CHAN_5G(5580, 116),
+ RTW89_DEF_CHAN_5G(5600, 120),
+ RTW89_DEF_CHAN_5G(5620, 124),
+ RTW89_DEF_CHAN_5G(5640, 128),
+ RTW89_DEF_CHAN_5G(5660, 132),
+ RTW89_DEF_CHAN_5G(5680, 136),
+ RTW89_DEF_CHAN_5G(5700, 140),
+ RTW89_DEF_CHAN_5G(5720, 144),
+ RTW89_DEF_CHAN_5G(5745, 149),
+ RTW89_DEF_CHAN_5G(5765, 153),
+ RTW89_DEF_CHAN_5G(5785, 157),
+ RTW89_DEF_CHAN_5G(5805, 161),
+ RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
+};
+
+static struct ieee80211_channel rtw89_channels_6ghz[] = {
+ RTW89_DEF_CHAN_6G(5955, 1),
+ RTW89_DEF_CHAN_6G(5975, 5),
+ RTW89_DEF_CHAN_6G(5995, 9),
+ RTW89_DEF_CHAN_6G(6015, 13),
+ RTW89_DEF_CHAN_6G(6035, 17),
+ RTW89_DEF_CHAN_6G(6055, 21),
+ RTW89_DEF_CHAN_6G(6075, 25),
+ RTW89_DEF_CHAN_6G(6095, 29),
+ RTW89_DEF_CHAN_6G(6115, 33),
+ RTW89_DEF_CHAN_6G(6135, 37),
+ RTW89_DEF_CHAN_6G(6155, 41),
+ RTW89_DEF_CHAN_6G(6175, 45),
+ RTW89_DEF_CHAN_6G(6195, 49),
+ RTW89_DEF_CHAN_6G(6215, 53),
+ RTW89_DEF_CHAN_6G(6235, 57),
+ RTW89_DEF_CHAN_6G(6255, 61),
+ RTW89_DEF_CHAN_6G(6275, 65),
+ RTW89_DEF_CHAN_6G(6295, 69),
+ RTW89_DEF_CHAN_6G(6315, 73),
+ RTW89_DEF_CHAN_6G(6335, 77),
+ RTW89_DEF_CHAN_6G(6355, 81),
+ RTW89_DEF_CHAN_6G(6375, 85),
+ RTW89_DEF_CHAN_6G(6395, 89),
+ RTW89_DEF_CHAN_6G(6415, 93),
+ RTW89_DEF_CHAN_6G(6435, 97),
+ RTW89_DEF_CHAN_6G(6455, 101),
+ RTW89_DEF_CHAN_6G(6475, 105),
+ RTW89_DEF_CHAN_6G(6495, 109),
+ RTW89_DEF_CHAN_6G(6515, 113),
+ RTW89_DEF_CHAN_6G(6535, 117),
+ RTW89_DEF_CHAN_6G(6555, 121),
+ RTW89_DEF_CHAN_6G(6575, 125),
+ RTW89_DEF_CHAN_6G(6595, 129),
+ RTW89_DEF_CHAN_6G(6615, 133),
+ RTW89_DEF_CHAN_6G(6635, 137),
+ RTW89_DEF_CHAN_6G(6655, 141),
+ RTW89_DEF_CHAN_6G(6675, 145),
+ RTW89_DEF_CHAN_6G(6695, 149),
+ RTW89_DEF_CHAN_6G(6715, 153),
+ RTW89_DEF_CHAN_6G(6735, 157),
+ RTW89_DEF_CHAN_6G(6755, 161),
+ RTW89_DEF_CHAN_6G(6775, 165),
+ RTW89_DEF_CHAN_6G(6795, 169),
+ RTW89_DEF_CHAN_6G(6815, 173),
+ RTW89_DEF_CHAN_6G(6835, 177),
+ RTW89_DEF_CHAN_6G(6855, 181),
+ RTW89_DEF_CHAN_6G(6875, 185),
+ RTW89_DEF_CHAN_6G(6895, 189),
+ RTW89_DEF_CHAN_6G(6915, 193),
+ RTW89_DEF_CHAN_6G(6935, 197),
+ RTW89_DEF_CHAN_6G(6955, 201),
+ RTW89_DEF_CHAN_6G(6975, 205),
+ RTW89_DEF_CHAN_6G(6995, 209),
+ RTW89_DEF_CHAN_6G(7015, 213),
+ RTW89_DEF_CHAN_6G(7035, 217),
+ RTW89_DEF_CHAN_6G(7055, 221),
+ RTW89_DEF_CHAN_6G(7075, 225),
+ RTW89_DEF_CHAN_6G(7095, 229),
+ RTW89_DEF_CHAN_6G(7115, 233),
};
static struct ieee80211_rate rtw89_bitrates[] = {
@@ -82,18 +156,19 @@ static struct ieee80211_rate rtw89_bitrates[] = {
{ .bitrate = 540, .hw_value = 0x0b, },
};
-u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate)
+bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
- rtw89_info(rtwdev, "invalid rpt rate %d\n", rpt_rate);
- return 0;
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
+ return false;
}
rate = rtw89_bitrates[rpt_rate];
+ *bitrate = rate.bitrate;
- return rate.bitrate;
+ return true;
}
static struct ieee80211_supported_band rtw89_sband_2ghz = {
@@ -118,6 +193,16 @@ static struct ieee80211_supported_band rtw89_sband_5ghz = {
.vht_cap = {0},
};
+static struct ieee80211_supported_band rtw89_sband_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+ .channels = rtw89_channels_6ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
+
+ /* 6G has no CCK rates, 1M/2M/5.5M/11M */
+ .bitrates = rtw89_bitrates + 4,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
+};
+
static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats,
struct sk_buff *skb, bool tx)
@@ -140,15 +225,22 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
- struct rtw89_channel_params *chan_param)
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef)
+{
+ cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0],
+ NL80211_CHAN_NO_HT);
+}
+
+static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
+ struct rtw89_chan *chan)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
+ u32 offset;
+ u8 band;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
@@ -158,36 +250,24 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW89_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW89_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW89_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = RTW89_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = RTW89_SC_20_LOWER;
center_chan += 2;
}
break;
case NL80211_CHAN_WIDTH_80:
- bandwidth = RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ bandwidth = nl_to_rtw89_bandwidth(width);
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW89_SC_20_UPPER;
- center_chan -= 2;
- } else {
- primary_chan_idx = RTW89_SC_20_UPMOST;
- center_chan -= 6;
- }
+ offset = (primary_freq - center_freq - 10) / 20;
+ center_chan -= 2 + offset * 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW89_SC_20_LOWER;
- center_chan += 2;
- } else {
- primary_chan_idx = RTW89_SC_20_LOWEST;
- center_chan += 6;
- }
+ offset = (center_freq - primary_freq - 10) / 20;
+ center_chan += 2 + offset * 4;
}
break;
default:
@@ -195,66 +275,89 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
break;
}
- chan_param->center_chan = center_chan;
- chan_param->primary_chan = channel->hw_value;
- chan_param->bandwidth = bandwidth;
- chan_param->pri_ch_idx = primary_chan_idx;
+ switch (channel->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ band = RTW89_BAND_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ band = RTW89_BAND_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ band = RTW89_BAND_6G;
+ break;
+ }
+
+ rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
+}
+
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_phy_idx phy_idx;
+ enum rtw89_entity_mode mode;
+ bool entity_active;
+
+ entity_active = rtw89_get_entity_state(rtwdev);
+ if (!entity_active)
+ return;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
+ return;
+
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ phy_idx = RTW89_PHY_0;
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ if (chip->ops->set_txpwr)
+ chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
{
- struct ieee80211_hw *hw = rtwdev->hw;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_channel_params ch_param;
+ const struct cfg80211_chan_def *chandef;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_mac_idx mac_idx;
+ enum rtw89_phy_idx phy_idx;
+ struct rtw89_chan chan;
struct rtw89_channel_help_params bak;
- u8 center_chan, bandwidth;
- u8 band_type;
+ enum rtw89_entity_mode mode;
bool band_changed;
+ bool entity_active;
+
+ entity_active = rtw89_get_entity_state(rtwdev);
- rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
- if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ mode = rtw89_entity_recalc(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
return;
- center_chan = ch_param.center_chan;
- bandwidth = ch_param.bandwidth;
- band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- band_changed = hal->current_band_type != band_type ||
- hal->current_channel == 0;
-
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->prev_primary_channel = hal->current_primary_channel;
- hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = band_type;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->current_subband = RTW89_CH_2G;
- break;
- case 36 ... 64:
- hal->current_subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- hal->current_subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- hal->current_subband = RTW89_CH_5G_BAND_4;
- break;
- }
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ mac_idx = RTW89_MAC_0;
+ phy_idx = RTW89_PHY_0;
+ chandef = rtw89_chandef_get(rtwdev, sub_entity_idx);
+ rtw89_get_channel_params(chandef, &chan);
+ if (WARN(chan.channel == 0, "Invalid channel\n"))
+ return;
+
+ band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan);
- rtw89_chip_set_channel_prepare(rtwdev, &bak);
+ rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx);
- chip->ops->set_channel(rtwdev, &ch_param);
+ chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
- rtw89_chip_set_channel_done(rtwdev, &bak);
+ rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx);
- if (band_changed) {
- rtw89_btc_ntfy_switch_band(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_band_changed(rtwdev);
+ if (!entity_active || band_changed) {
+ rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
}
+
+ rtw89_set_entity_state(rtwdev, true);
}
static enum rtw89_core_tx_type
@@ -272,26 +375,38 @@ rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
static void
rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
- struct rtw89_core_tx_request *tx_req, u8 tid)
+ struct rtw89_core_tx_request *tx_req,
+ enum btc_pkt_type pkt_type)
{
struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct sk_buff *skb = tx_req->skb;
struct rtw89_sta *rtwsta;
u8 ampdu_num;
+ u8 tid;
+
+ if (pkt_type == PACKET_EAPOL) {
+ desc_info->bk = true;
+ return;
+ }
+
+ if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
+ return;
if (!sta) {
rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
return;
}
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
rtwsta = (struct rtw89_sta *)sta->drv_priv;
ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
rtwsta->ampdu_params[tid].agg_num :
- 4 << sta->ht_cap.ampdu_factor) - 1);
+ 4 << sta->deflink.ht_cap.ampdu_factor) - 1);
desc_info->agg_en = true;
- desc_info->ampdu_density = sta->ht_cap.ampdu_density;
+ desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density;
desc_info->ampdu_num = ampdu_num;
}
@@ -299,15 +414,19 @@ static void
rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
struct rtw89_vif *rtwvif;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_addr_cam_entry *addr_cam;
struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
+ u64 pn64;
if (!vif) {
rtw89_warn(rtwdev, "cannot set sec key without vif\n");
@@ -315,7 +434,7 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
@@ -353,8 +472,21 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
}
desc_info->sec_en = true;
+ desc_info->sec_keyid = key->keyidx;
desc_info->sec_type = sec_type;
desc_info->sec_cam_idx = sec_cam->sec_cam_idx;
+
+ if (!chip->hw_sec_hdr)
+ return;
+
+ pn64 = atomic64_inc_return(&key->tx_pn);
+ desc_info->sec_seq[0] = pn64;
+ desc_info->sec_seq[1] = pn64 >> 8;
+ desc_info->sec_seq[2] = pn64 >> 16;
+ desc_info->sec_seq[3] = pn64 >> 24;
+ desc_info->sec_seq[4] = pn64 >> 32;
+ desc_info->sec_seq[5] = pn64 >> 40;
+ desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */
}
static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
@@ -363,9 +495,15 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = tx_info->control.vif;
- struct rtw89_hal *hal = &rtwdev->hal;
- u16 lowest_rate = hal->current_band_type == RTW89_BAND_2G ?
- RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 lowest_rate;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
return lowest_rate;
@@ -377,14 +515,20 @@ static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 qsel, ch_dma;
- qsel = RTW89_TX_QSEL_B0_MGMT;
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
- desc_info->qsel = RTW89_TX_QSEL_B0_MGMT;
+ desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
+ desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
/* fixed data rate for mgmt frames */
desc_info->en_wd_info = true;
@@ -393,9 +537,9 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
- "tx mgmt frame with rate 0x%x on channel %d (bw %d)\n",
- desc_info->data_rate, rtwdev->hal.current_channel,
- rtwdev->hal.current_band_width);
+ "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
+ desc_info->data_rate, chan->channel, chan->band_type,
+ chan->band_width);
}
static void
@@ -420,15 +564,16 @@ static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 om_bandwidth;
if (!chip->dis_2g_40m_ul_ofdma ||
- hal->current_band_type != RTW89_BAND_2G ||
- hal->current_band_width != RTW89_CHANNEL_WIDTH_40)
+ chan->band_type != RTW89_BAND_2G ||
+ chan->band_width != RTW89_CHANNEL_WIDTH_40)
return;
- om_bandwidth = hal->current_band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
- rtw89_bandwidth_to_om[hal->current_band_width] : 0;
+ om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
+ rtw89_bandwidth_to_om[chan->band_width] : 0;
*htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
@@ -446,6 +591,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
enum btc_pkt_type pkt_type)
{
struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct sk_buff *skb = tx_req->skb;
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -454,7 +600,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
if (pkt_type < PACKET_MAX)
return false;
- if (!sta || !sta->he_cap.has_he)
+ if (!sta || !sta->deflink.he_cap.has_he)
return false;
if (!ieee80211_is_data_qos(fc))
@@ -463,6 +609,9 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
return false;
+ if (rtwsta && rtwsta->ra_report.might_fallback_legacy)
+ return false;
+
return true;
}
@@ -520,6 +669,21 @@ desc_bk:
desc_info->bk = true;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -527,7 +691,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 tid, tid_indicate;
@@ -535,24 +699,26 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
- qsel = rtw89_core_get_qsel(rtwdev, tid);
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
desc_info->qsel = qsel;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
- if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)
- rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, tid);
if (IEEE80211_SKB_CB(skb)->control.hw_key)
rtw89_core_tx_update_sec_key(rtwdev, tx_req);
- if (rate_pattern->enable)
+ if (vif->p2p)
+ desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (rate_pattern->enable)
desc_info->data_retry_lowest_rate = rate_pattern->rate;
- else if (hal->current_band_type == RTW89_BAND_2G)
+ else if (chan->band_type == RTW89_BAND_2G)
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1;
else
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
@@ -595,12 +761,43 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
return PACKET_MAX;
}
+static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
+ desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
+}
+
+static void
+rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
+ return;
+
+ if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ return;
+
+ if (chip->chip_id != RTL8852C &&
+ tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+ return;
+
+ rtw89_mac_notify_wake(rtwdev);
+}
+
static void
rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
enum rtw89_core_tx_type tx_type;
enum btc_pkt_type pkt_type;
@@ -619,6 +816,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
desc_info->pkt_size = skb->len;
desc_info->is_bmc = is_bmc;
desc_info->wd_page = true;
+ desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
switch (tx_req->tx_type) {
case RTW89_CORE_TX_TYPE_MGMT:
@@ -628,6 +826,8 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_data_info(rtwdev, tx_req);
pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
+ rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type);
+ rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
break;
case RTW89_CORE_TX_TYPE_FWCMD:
rtw89_core_tx_update_h2c_info(rtwdev, tx_req);
@@ -651,6 +851,14 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
u32 cnt;
int ret;
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "ignore h2c due to power is off with firmware state=%d\n",
+ test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
tx_req.skb = skb;
tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD;
if (fwdl)
@@ -691,6 +899,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ rtw89_core_tx_wake(rtwdev, &tx_req);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -710,16 +920,40 @@ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
+ FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
+ FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
+ FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
- FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size);
+ FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
+ FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
return cpu_to_le32(dword);
}
@@ -733,11 +967,46 @@ static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
+ FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
+ FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) |
+ FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
- FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
@@ -762,6 +1031,15 @@ static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info)
return cpu_to_le32(dword);
}
+static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
+ FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
+ FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
+
+ return cpu_to_le32(dword);
+}
+
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
@@ -793,6 +1071,54 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_core_fill_txdesc);
+void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc;
+ struct rtw89_txwd_info *txwd_info;
+
+ txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info);
+ txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info);
+ txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
+ txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
+ if (desc_info->sec_en) {
+ txwd_body->dword4 = rtw89_build_txwd_body4(desc_info);
+ txwd_body->dword5 = rtw89_build_txwd_body5(desc_info);
+ }
+ txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info);
+
+ if (!desc_info->en_wd_info)
+ return;
+
+ txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
+ txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info);
+ txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
+ txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info);
+ txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1);
+
+static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
+ FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
+ RTW89_CORE_RX_TYPE_FWDL :
+ RTW89_CORE_RX_TYPE_H2C);
+
+ return cpu_to_le32(dword);
+}
+
+void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc;
+
+ txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info);
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1);
+
static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
struct sk_buff *skb,
struct rtw89_rx_phy_ppdu *phy_ppdu)
@@ -830,9 +1156,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ int i;
- if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self)
+ if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) {
ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
+ }
}
#define VAR_LEN 0xff
@@ -888,22 +1219,22 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr,
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- s8 *rssi = phy_ppdu->rssi;
+ u8 *rssi = phy_ppdu->rssi;
u8 *buf = phy_ppdu->buf;
phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf);
phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf);
- rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf));
- rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf));
- rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf));
- rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf));
+ rssi[RF_PATH_A] = RTW89_GET_PHY_STS_RSSI_A(buf);
+ rssi[RF_PATH_B] = RTW89_GET_PHY_STS_RSSI_B(buf);
+ rssi[RF_PATH_C] = RTW89_GET_PHY_STS_RSSI_C(buf);
+ rssi[RF_PATH_D] = RTW89_GET_PHY_STS_RSSI_D(buf);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
if (RTW89_GET_PHY_STS_LEN(phy_ppdu->buf) << 3 != phy_ppdu->len) {
- rtw89_warn(rtwdev, "phy ppdu len mismatch\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n");
return -EINVAL;
}
rtw89_core_update_phy_ppdu(phy_ppdu);
@@ -996,13 +1327,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
}
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- bw = RATE_INFO_BW_40;
- else
- bw = RATE_INFO_BW_20;
-
+ bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
status->rate_idx == rate_idx &&
@@ -1020,6 +1345,47 @@ struct rtw89_vif_rx_stats_iter_data {
const u8 *bssid;
};
+static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data;
+ u8 *pos, *end, type;
+ u16 aid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) ||
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION ||
+ rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ return;
+
+ type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK);
+ if (type != IEEE80211_TRIGGER_TYPE_BASIC)
+ return;
+
+ end = (u8 *)tf + skb->len;
+ pos = tf->variable;
+
+ while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) {
+ aid = RTW89_GET_TF_USER_INFO_AID12(pos);
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "[TF] aid: %d, ul_mcs: %d, rua: %d\n",
+ aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos),
+ RTW89_GET_TF_USER_INFO_RUA(pos));
+
+ if (aid == RTW89_TF_PAD)
+ break;
+
+ if (aid == vif->cfg.aid) {
+ rtwvif->stats.rx_tf_acc++;
+ rtwdev->stats.rx_tf_acc++;
+ break;
+ }
+
+ pos += RTW89_TF_BASIC_USER_INFO_SZ;
+ }
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1032,6 +1398,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const u8 *bssid = iter_data->bssid;
+ if (ieee80211_is_trigger(hdr->frame_control)) {
+ rtw89_stats_trigger_frame(rtwdev, vif, skb);
+ return;
+ }
+
if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
return;
@@ -1067,8 +1438,11 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
- u16 chan = rtwdev->hal.prev_primary_channel;
- u8 band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ const struct rtw89_chan_rcd *rcd =
+ rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 chan = rcd->prev_primary_channel;
+ u8 band = rcd->prev_band_type == RTW89_BAND_2G ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (status->band != NL80211_BAND_2GHZ &&
status->encoding == RX_ENC_LEGACY &&
@@ -1083,10 +1457,40 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
if (rx_status->band == NL80211_BAND_2GHZ ||
rx_status->encoding != RX_ENC_LEGACY)
return;
+
+ /* Some control frames' freq(ACKs in this case) are reported wrong due
+ * to FW notify timing, set to lowest rate to prevent overflow.
+ */
+ if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
+ rx_status->rate_idx = 0;
+ return;
+ }
+
/* No 4 CCK rates for non-2G */
rx_status->rate_idx -= 4;
}
+static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb_ppdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct napi_struct *napi = &rtwdev->napi;
+
+ /* In low power mode, napi isn't scheduled. Receive it to netif. */
+ if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state)))
+ napi = NULL;
+
+ rtw89_core_hw_to_sband_rate(rx_status);
+ rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ /* In low power mode, it does RX in thread context. */
+ local_bh_disable();
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
+ local_bh_enable();
+ rtwdev->napi_budget_countdown--;
+}
+
static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -1106,10 +1510,7 @@ static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
rtw89_correct_cck_chan(rtwdev, rx_status);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
}
}
@@ -1158,6 +1559,7 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_rxdesc_short *rxd_s;
struct rtw89_rxdesc_long *rxd_l;
u8 shift_len, drv_info_len;
@@ -1168,7 +1570,10 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s);
desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s);
desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s);
- desc_info->bw = RTW89_GET_RXWD_BW(rxd_s);
+ if (chip->chip_id == RTL8852C)
+ desc_info->bw = RTW89_GET_RXWD_BW_V1(rxd_s);
+ else
+ desc_info->bw = RTW89_GET_RXWD_BW(rxd_s);
desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s);
desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s);
desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s);
@@ -1249,13 +1654,26 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_hw *hw = rtwdev->hw;
+ const struct cfg80211_chan_def *chandef =
+ rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u16 data_rate;
u8 data_rate_mode;
/* currently using single PHY */
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = chandef->chan->center_freq;
+ rx_status->band = chandef->chan->band;
+
+ if (rtwdev->scanning &&
+ RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ u8 chan = cur->primary_channel;
+ u8 band = cur->band_type;
+ enum nl80211_band nl_band;
+
+ nl_band = rtw89_hw_to_nl80211_band(band);
+ rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band);
+ rx_status->band = nl_band;
+ }
if (desc_info->icv_err || desc_info->crc32_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -1264,12 +1682,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
!(desc_info->sw_dec || desc_info->icv_err))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- rx_status->bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- rx_status->bw = RATE_INFO_BW_40;
- else
- rx_status->bw = RATE_INFO_BW_20;
+ rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
data_rate = desc_info->data_rate;
data_rate_mode = GET_DATA_RATE_MODE(data_rate);
@@ -1308,7 +1721,8 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (rtw89_disable_ps_mode || !chip->ps_mode_supported)
+ if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
+ RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
@@ -1334,10 +1748,7 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
}
}
@@ -1364,14 +1775,10 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
if (desc_info->long_rxdesc &&
- BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
+ BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
- } else {
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
- }
+ else
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
}
EXPORT_SYMBOL(rtw89_core_rx);
@@ -1398,7 +1805,7 @@ void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
{
init_dummy_netdev(&rtwdev->netdev);
netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
- rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT);
+ rtwdev->hci.ops->napi_poll);
}
EXPORT_SYMBOL(rtw89_core_napi_init);
@@ -1463,6 +1870,48 @@ static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
spin_unlock_bh(&rtwdev->ba_lock);
}
+static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+
+ if (sta == txq->sta) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ list_del_init(&rtwtxq->list);
+ }
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
+static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
+ struct rtw89_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+
+ if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc))
+ return;
+
+ if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) ||
+ test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ return;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
+ spin_unlock_bh(&rtwdev->ba_lock);
+
+ ieee80211_stop_tx_ba_session(sta, txq->tid);
+ cancel_delayed_work(&rtwdev->forbid_ba_work);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
+ RTW89_FORBID_BA_TIMER);
+}
+
static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct rtw89_txq *rtwtxq,
struct sk_buff *skb)
@@ -1472,11 +1921,13 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
- if (unlikely(skb_get_queue_mapping(skb) == IEEE80211_AC_VO))
+ if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
return;
- if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
return;
+ }
if (unlikely(!sta))
return;
@@ -1509,11 +1960,12 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
unsigned long i;
int ret;
+ rcu_read_lock();
for (i = 0; i < frame_cnt; i++) {
skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
if (!skb) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
- return;
+ goto out;
}
rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
@@ -1523,6 +1975,8 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
break;
}
}
+out:
+ rcu_read_unlock();
}
static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
@@ -1594,10 +2048,24 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_return_txq(hw, txq, sched_txq);
if (frame_cnt != 0)
rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid));
+
+ /* bound of tx_resource could get stuck due to burst traffic */
+ if (frame_cnt == tx_resource)
+ *reinvoke = true;
}
ieee80211_txq_schedule_end(hw, ac);
}
+static void rtw89_ips_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ ips_work);
+ mutex_lock(&rtwdev->mutex);
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtw89_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw89_core_txq_work(struct work_struct *w)
{
struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
@@ -1621,6 +2089,20 @@ static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
}
+static void rtw89_forbid_ba_work(struct work_struct *w)
+{
+ struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
+ forbid_ba_work.work);
+ struct rtw89_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ list_del_init(&rtwtxq->list);
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
u32 throughput, u64 cnt)
{
@@ -1662,6 +2144,8 @@ static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
stats->rx_unicast = 0;
stats->tx_cnt = 0;
stats->rx_cnt = 0;
+ stats->rx_tf_periodic = stats->rx_tf_acc;
+ stats->rx_tf_acc = 0;
if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv)
return true;
@@ -1683,12 +2167,13 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
- rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ rtw89_enter_lps(rtwdev, rtwvif);
}
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
@@ -1741,6 +2226,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_chip_rfk_track(rtwdev);
rtw89_phy_ra_update(rtwdev);
rtw89_phy_cfo_track(rtwdev);
+ rtw89_phy_tx_path_div_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -1770,6 +2256,75 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
bitmap_zero(addr, nbits);
}
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
+ u8 idx;
+ int i;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
+ if (idx == chip->bacam_num) {
+ /* allocate a static BA CAM to tid=0/5, so replace the existing
+ * one if BA CAM is full. Hardware will process the original tid
+ * automatically.
+ */
+ if (tid != 0 && tid != 5)
+ return -ENOSPC;
+
+ for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) {
+ tmp = &cam_info->ba_cam_entry[i];
+ if (tmp->tid == 0 || tmp->tid == 5)
+ continue;
+
+ idx = i;
+ entry = tmp;
+ list_del(&entry->list);
+ break;
+ }
+
+ if (!entry)
+ return -ENOSPC;
+ } else {
+ entry = &cam_info->ba_cam_entry[idx];
+ }
+
+ entry->tid = tid;
+ list_add_tail(&entry->list, &rtwsta->ba_cam_list);
+
+ *cam_idx = idx;
+
+ return 0;
+}
+
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
+ u8 idx;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) {
+ if (entry->tid != tid)
+ continue;
+
+ idx = entry - cam_info->ba_cam_entry;
+ list_del(&entry->list);
+
+ rtw89_core_release_bit_map(cam_info->ba_cam_map, idx);
+ *cam_idx = idx;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
#define RTW89_TYPE_MAPPING(_type) \
case NL80211_IFTYPE_ ## _type: \
rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
@@ -1779,9 +2334,19 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_STATION;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_AP;
+ break;
RTW89_TYPE_MAPPING(ADHOC);
- RTW89_TYPE_MAPPING(STATION);
- RTW89_TYPE_MAPPING(AP);
RTW89_TYPE_MAPPING(MONITOR);
RTW89_TYPE_MAPPING(MESH_POINT);
default:
@@ -1824,20 +2389,27 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int i;
+ rtwsta->rtwdev = rtwdev;
rtwsta->rtwvif = rtwvif;
rtwsta->prev_rssi = 0;
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw89_core_txq_init(rtwdev, sta->txq[i]);
ewma_rssi_init(&rtwsta->avg_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_init(&rtwsta->rssi[i]);
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
rtwsta->mac_id = rtwvif->mac_id;
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
+ } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+ rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
+ RTW89_MAX_MAC_ID_NUM);
}
return 0;
@@ -1866,8 +2438,14 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta);
+ rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+ if (sta->tdls)
+ rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
- rtw89_vif_type_mapping(vif, false);
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ rtw89_vif_type_mapping(vif, false);
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1875,14 +2453,22 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
}
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -1897,9 +2483,36 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
int ret;
- rtw89_vif_type_mapping(vif, true);
+ if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+
+ if (sta->tdls) {
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n");
+ return ret;
+ }
+ }
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, bssid_cam);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
+ return ret;
+ }
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1907,7 +2520,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
@@ -1931,7 +2544,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_mac_bf_assoc(rtwdev, vif, sta);
rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
@@ -1947,13 +2560,69 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
+ else if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
return 0;
}
+static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_cfg *tid_conf)
+{
+ struct ieee80211_txq *txq;
+ struct rtw89_txq *rtwtxq;
+ u32 mask = tid_conf->mask;
+ u8 tids = tid_conf->tids;
+ int tids_nbit = BITS_PER_BYTE;
+ int i;
+
+ for (i = 0; i < tids_nbit; i++, tids >>= 1) {
+ if (!tids)
+ break;
+
+ if (!(tids & BIT(0)))
+ continue;
+
+ txq = sta->txq[i];
+ rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ } else {
+ if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags))
+ ieee80211_stop_tx_ba_session(sta, txq->tid);
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_del_init(&rtwtxq->list);
+ set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ }
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) {
+ if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE)
+ sta->max_amsdu_subframes = 0;
+ else
+ sta->max_amsdu_subframes = 1;
+ }
+ }
+}
+
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ int i;
+
+ for (i = 0; i < tid_config->n_tid_conf; i++)
+ _rtw89_core_set_tid_config(rtwdev, sta,
+ &tid_config->tid_conf[i]);
+}
+
static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
@@ -1986,9 +2655,14 @@ static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
- static const __le16 highest[RF_PATH_MAX] = {
+ static const __le16 highest_bw80[RF_PATH_MAX] = {
cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
};
+ static const __le16 highest_bw160[RF_PATH_MAX] = {
+ cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -2017,6 +2691,9 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (chip->support_bw160)
+ vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
@@ -2074,8 +2751,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
he_cap->has_he = true;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
@@ -2087,8 +2763,15 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bw160)
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
@@ -2104,6 +2787,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bw160)
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
phy_cap_info[5] = no_ng16 ? 0 :
IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
@@ -2117,6 +2802,9 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bw160)
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
@@ -2127,6 +2815,22 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bw160) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
+
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data[idx].he_6ghz_capa.capa = capa;
+ }
idx++;
}
@@ -2139,34 +2843,52 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
+ struct ieee80211_supported_band *sband_6ghz = NULL;
u32 size = sizeof(struct ieee80211_supported_band);
+ u8 support_bands = rtwdev->chip->support_bands;
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ if (support_bands & BIT(NL80211_BAND_2GHZ)) {
+ sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
+ if (!sband_2ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ }
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ if (support_bands & BIT(NL80211_BAND_5GHZ)) {
+ sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
+ if (!sband_5ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ }
+
+ if (support_bands & BIT(NL80211_BAND_6GHZ)) {
+ sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
+ if (!sband_6ghz)
+ goto err;
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ }
return 0;
err:
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
if (sband_2ghz)
kfree(sband_2ghz->iftype_data);
if (sband_5ghz)
kfree(sband_5ghz->iftype_data);
+ if (sband_6ghz)
+ kfree(sband_6ghz->iftype_data);
kfree(sband_2ghz);
kfree(sband_5ghz);
+ kfree(sband_6ghz);
return -ENOMEM;
}
@@ -2176,10 +2898,14 @@ static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
+ if (hw->wiphy->bands[NL80211_BAND_6GHZ])
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
@@ -2192,6 +2918,21 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
+void rtw89_core_update_beacon_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev;
+ struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
+ update_beacon_work);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ rtwdev = rtwvif->rtwdev;
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2208,8 +2949,13 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
/* efuse process */
/* pre-config BB/RF, BB reset/RFC reset */
- rtw89_mac_disable_bb_rf(rtwdev);
- rtw89_mac_enable_bb_rf(rtwdev);
+ ret = rtw89_chip_disable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
+ ret = rtw89_chip_enable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
+
rtw89_phy_init_bb_reg(rtwdev);
rtw89_phy_init_rf_reg(rtwdev);
@@ -2233,6 +2979,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable);
+ rtw89_fw_h2c_init_ba_cam(rtwdev);
return 0;
}
@@ -2262,6 +3009,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
cancel_delayed_work_sync(&rtwdev->cfo_track_work);
+ cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
mutex_lock(&rtwdev->mutex);
@@ -2278,10 +3026,17 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
int ret;
+ u8 band;
INIT_LIST_HEAD(&rtwdev->ba_list);
+ INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwdev->early_h2c_list);
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+ INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
+ }
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -2290,13 +3045,16 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
spin_lock_init(&rtwdev->ba_lock);
+ spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
@@ -2315,6 +3073,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
return ret;
}
rtw89_ser_init(rtwdev);
+ rtw89_entity_init(rtwdev);
return 0;
}
@@ -2332,12 +3091,51 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_deinit);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ rtwdev->scanning = true;
+ rtw89_leave_lps(rtwdev);
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw89_leave_ips(rtwdev);
+
+ ether_addr_copy(rtwvif->mac_addr, mac_addr);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
+ rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_hci_recalc_int_mit(rtwdev);
+
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+}
+
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan)
+{
+ struct rtw89_vif *rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+
+ if (!rtwvif)
+ return;
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+
+ rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+
+ rtwdev->scanning = false;
+ rtwdev->dig.bypass_dig = true;
+ if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+}
+
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 cv;
cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
- if (cv <= CHIP_CBV) {
+ if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
cv = CHIP_CAV;
else
@@ -2347,6 +3145,15 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
rtwdev->hal.cv = cv;
}
+static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hal.support_cckpd =
+ !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
+ !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
+ rtwdev->hal.support_igi =
+ rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV;
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2367,6 +3174,8 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ rtw89_core_setup_phycap(rtwdev);
+
rtw89_mac_pwr_off(rtwdev);
return 0;
@@ -2419,6 +3228,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->vif_data_size = sizeof(struct rtw89_vif);
hw->sta_data_size = sizeof(struct rtw89_sta);
hw->txq_data_size = sizeof(struct rtw89_txq);
+ hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
@@ -2426,6 +3236,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->queues = IEEE80211_NUM_ACS;
hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -2438,13 +3249,30 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
+ WIPHY_FLAG_AP_UAPSD;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
+ hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
ret = rtw89_core_set_supported_band(rtwdev);
@@ -2504,6 +3332,63 @@ void rtw89_core_unregister(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_unregister);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip)
+{
+ struct ieee80211_hw *hw;
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_ops *ops;
+ u32 driver_data_size;
+ u32 early_feat_map = 0;
+ bool no_chanctx;
+
+ rtw89_early_fw_feature_recognize(device, chip, &early_feat_map);
+
+ ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL);
+ if (!ops)
+ goto err;
+
+ no_chanctx = chip->support_chanctx_num == 0 ||
+ !(early_feat_map & BIT(RTW89_FW_FEATURE_SCAN_OFFLOAD));
+
+ if (no_chanctx) {
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ }
+
+ driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
+ hw = ieee80211_alloc_hw(driver_data_size, ops);
+ if (!hw)
+ goto err;
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = device;
+ rtwdev->ops = ops;
+ rtwdev->chip = chip;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ no_chanctx ? "without" : "with");
+
+ return rtwdev;
+
+err:
+ kfree(ops);
+ return NULL;
+}
+EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw);
+
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->ops);
+ ieee80211_free_hw(rtwdev->hw);
+}
+EXPORT_SYMBOL(rtw89_free_ieee80211_hw);
+
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 7c84556ec4ad..db041b32a8c2 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -13,9 +13,9 @@
#include <net/mac80211.h>
struct rtw89_dev;
+struct rtw89_pci_info;
extern const struct ieee80211_ops rtw89_ops;
-extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
@@ -29,11 +29,12 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
#define INV_RF_DATA 0xffffffff
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
+#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
#define CFO_TRACK_MAX_USER 64
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
-#define RTW89_MAX_HW_PORT_NUM 5
+#define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR)
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -56,6 +57,16 @@ enum htc_om_channel_width {
#define RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR BIT(16)
#define RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS BIT(17)
+#define RTW89_TF_PAD GENMASK(11, 0)
+#define RTW89_TF_BASIC_USER_INFO_SZ 6
+
+#define RTW89_GET_TF_USER_INFO_AID12(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(11, 0))
+#define RTW89_GET_TF_USER_INFO_RUA(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(19, 12))
+#define RTW89_GET_TF_USER_INFO_UL_MCS(data) \
+ le32_get_bits(*((const __le32 *)(data)), GENMASK(24, 21))
+
enum rtw89_subband {
RTW89_CH_2G = 0,
RTW89_CH_5G_BAND_1 = 1,
@@ -63,9 +74,28 @@ enum rtw89_subband {
RTW89_CH_5G_BAND_3 = 3,
RTW89_CH_5G_BAND_4 = 4,
+ RTW89_CH_6G_BAND_IDX0, /* Low */
+ RTW89_CH_6G_BAND_IDX1, /* Low */
+ RTW89_CH_6G_BAND_IDX2, /* Mid */
+ RTW89_CH_6G_BAND_IDX3, /* Mid */
+ RTW89_CH_6G_BAND_IDX4, /* High */
+ RTW89_CH_6G_BAND_IDX5, /* High */
+ RTW89_CH_6G_BAND_IDX6, /* Ultra-high */
+ RTW89_CH_6G_BAND_IDX7, /* Ultra-high */
+
RTW89_SUBBAND_NR,
};
+enum rtw89_gain_offset {
+ RTW89_GAIN_OFFSET_2G_CCK,
+ RTW89_GAIN_OFFSET_2G_OFDM,
+ RTW89_GAIN_OFFSET_5G_LOW,
+ RTW89_GAIN_OFFSET_5G_MID,
+ RTW89_GAIN_OFFSET_5G_HIGH,
+
+ RTW89_GAIN_OFFSET_NR,
+};
+
enum rtw89_hci_type {
RTW89_HCI_TYPE_PCIE,
RTW89_HCI_TYPE_USB,
@@ -109,11 +139,14 @@ enum rtw89_core_rx_type {
RTW89_CORE_RX_TYPE_C2H = 10,
RTW89_CORE_RX_TYPE_CSI = 11,
RTW89_CORE_RX_TYPE_CQI = 12,
+ RTW89_CORE_RX_TYPE_H2C = 13,
+ RTW89_CORE_RX_TYPE_FWDL = 14,
};
enum rtw89_txq_flags {
RTW89_TXQ_F_AMPDU = 0,
RTW89_TXQ_F_BLOCK_BA = 1,
+ RTW89_TXQ_F_FORBID_BA = 2,
};
enum rtw89_net_type {
@@ -140,11 +173,11 @@ enum rtw89_wifi_role {
};
enum rtw89_upd_mode {
- RTW89_VIF_CREATE,
- RTW89_VIF_REMOVE,
- RTW89_VIF_TYPE_CHANGE,
- RTW89_VIF_INFO_CHANGE,
- RTW89_VIF_CON_DISCONN
+ RTW89_ROLE_CREATE,
+ RTW89_ROLE_REMOVE,
+ RTW89_ROLE_TYPE_CHANGE,
+ RTW89_ROLE_INFO_CHANGE,
+ RTW89_ROLE_CON_DISCONN
};
enum rtw89_self_role {
@@ -205,6 +238,7 @@ enum rtw89_port {
enum rtw89_band {
RTW89_BAND_2G = 0,
RTW89_BAND_5G = 1,
+ RTW89_BAND_6G = 2,
RTW89_BAND_MAX,
};
@@ -363,6 +397,25 @@ enum rtw89_hw_rate {
*/
#define RTW89_5G_CH_NUM 53
+/* 6G channels,
+ * 1, 3, 5, 7, 9, 11, 13, 15,
+ * 17, 19, 21, 23, 25, 27, 29, 33,
+ * 35, 37, 39, 41, 43, 45, 47, 49,
+ * 51, 53, 55, 57, 59, 61, 65, 67,
+ * 69, 71, 73, 75, 77, 79, 81, 83,
+ * 85, 87, 89, 91, 93, 97, 99, 101,
+ * 103, 105, 107, 109, 111, 113, 115, 117,
+ * 119, 121, 123, 125, 129, 131, 133, 135,
+ * 137, 139, 141, 143, 145, 147, 149, 151,
+ * 153, 155, 157, 161, 163, 165, 167, 169,
+ * 171, 173, 175, 177, 179, 181, 183, 185,
+ * 187, 189, 193, 195, 197, 199, 201, 203,
+ * 205, 207, 209, 211, 213, 215, 217, 219,
+ * 221, 225, 227, 229, 231, 233, 235, 237,
+ * 239, 241, 243, 245, 247, 249, 251, 253,
+ */
+#define RTW89_6G_CH_NUM 120
+
enum rtw89_rate_section {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -371,6 +424,7 @@ enum rtw89_rate_section {
RTW89_RS_OFFSET,
RTW89_RS_MAX,
RTW89_RS_LMT_NUM = RTW89_RS_MCS + 1,
+ RTW89_RS_TX_SHAPE_NUM = RTW89_RS_OFDM + 1,
};
enum rtw89_rate_max {
@@ -418,12 +472,10 @@ enum rtw89_regulation_type {
RTW89_UKRAINE = 11,
RTW89_CN = 12,
RTW89_QATAR = 13,
+ RTW89_UK = 14,
RTW89_REGD_NUM,
};
-extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
-extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
-
struct rtw89_txpwr_byrate {
s8 cck[RTW89_RATE_CCK_MAX];
s8 ofdm[RTW89_RATE_OFDM_MAX];
@@ -471,7 +523,7 @@ struct rtw89_rx_phy_ppdu {
u8 *buf;
u32 len;
u8 rssi_avg;
- s8 rssi[RF_PATH_MAX];
+ u8 rssi[RF_PATH_MAX];
u8 mac_id;
u8 chan_idx;
u8 ie;
@@ -491,6 +543,12 @@ enum rtw89_phy_idx {
RTW89_PHY_MAX
};
+enum rtw89_sub_entity_idx {
+ RTW89_SUB_ENTITY_0 = 0,
+
+ NUM_OF_RTW89_SUB_ENTITY,
+};
+
enum rtw89_rf_path {
RF_PATH_A = 0,
RF_PATH_B = 1,
@@ -548,8 +606,9 @@ enum rtw89_ps_mode {
};
#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
-#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -564,19 +623,35 @@ enum rtw89_sc_offset {
RTW89_SC_20_LOWER = 2,
RTW89_SC_20_UPMOST = 3,
RTW89_SC_20_LOWEST = 4,
+ RTW89_SC_20_UP2X = 5,
+ RTW89_SC_20_LOW2X = 6,
+ RTW89_SC_20_UP3X = 7,
+ RTW89_SC_20_LOW3X = 8,
RTW89_SC_40_UPPER = 9,
RTW89_SC_40_LOWER = 10,
};
-struct rtw89_channel_params {
- u8 center_chan;
- u8 primary_chan;
- u8 bandwidth;
- u8 pri_ch_idx;
+struct rtw89_chan {
+ u8 channel;
+ u8 primary_channel;
+ enum rtw89_band band_type;
+ enum rtw89_bandwidth band_width;
+
+ /* The follow-up are derived from the above. We must ensure that it
+ * is assigned correctly in rtw89_chan_create() if new one is added.
+ */
+ u32 freq;
+ enum rtw89_subband subband_type;
+ enum rtw89_sc_offset pri_ch_idx;
+};
+
+struct rtw89_chan_rcd {
+ u8 prev_primary_channel;
+ enum rtw89_band prev_band_type;
};
struct rtw89_channel_help_params {
- u16 tx_en;
+ u32 tx_en;
};
struct rtw89_port_reg {
@@ -606,6 +681,17 @@ struct rtw89_txwd_body {
__le32 dword5;
} __packed;
+struct rtw89_txwd_body_v1 {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+ __le32 dword6;
+ __le32 dword7;
+} __packed;
+
struct rtw89_txwd_info {
__le32 dword0;
__le32 dword1;
@@ -670,6 +756,7 @@ struct rtw89_rxdesc_long {
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
+ u8 mac_id;
u8 qsel;
u8 ch_dma;
u8 hdr_llc_len;
@@ -684,13 +771,22 @@ struct rtw89_tx_desc_info {
u8 ampdu_density;
u8 ampdu_num;
bool sec_en;
+ u8 addr_info_nr;
+ u8 sec_keyid;
u8 sec_type;
u8 sec_cam_idx;
+ u8 sec_seq[6];
u16 data_rate;
u16 data_retry_lowest_rate;
bool fw_dl;
u16 seq;
bool a_ctrl_bsr;
+ u8 hw_ssn_sel;
+#define RTW89_MGMT_HW_SSN_SEL 1
+ u8 hw_seq_mode;
+#define RTW89_MGMT_HW_SEQ_MODE 1
+ bool hiq;
+ u8 port;
};
struct rtw89_core_tx_request {
@@ -713,7 +809,7 @@ struct rtw89_mac_ax_gnt {
u8 gnt_bt;
u8 gnt_wl_sw_en;
u8 gnt_wl;
-};
+} __packed;
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
@@ -768,6 +864,7 @@ enum rtw89_btc_dcnt {
BTC_DCNT_SLOT_NONSYNC,
BTC_DCNT_BTCNT_FREEZE,
BTC_DCNT_WL_SLOT_DRIFT,
+ BTC_DCNT_BT_SLOT_DRIFT,
BTC_DCNT_WL_STA_LAST,
BTC_DCNT_NUM,
};
@@ -840,12 +937,12 @@ struct rtw89_btc_wl_smap {
u32 roaming: 1;
u32 _4way: 1;
u32 rf_off: 1;
- u32 lps: 1;
+ u32 lps: 2;
u32 ips: 1;
u32 init_ok: 1;
u32 traffic_dir : 2;
u32 rf_off_pre: 1;
- u32 lps_pre: 1;
+ u32 lps_pre: 2;
};
enum rtw89_tfc_lv {
@@ -875,6 +972,10 @@ struct rtw89_traffic_stats {
u32 rx_throughput;
u32 tx_throughput_raw;
u32 rx_throughput_raw;
+
+ u32 rx_tf_acc;
+ u32 rx_tf_periodic;
+
enum rtw89_tfc_lv tx_tfc_lv;
enum rtw89_tfc_lv rx_tfc_lv;
struct ewma_tp tx_ewma_tp;
@@ -1024,6 +1125,27 @@ struct rtw89_btc_wl_active_role {
u16 rx_rate;
};
+struct rtw89_btc_wl_active_role_v1 {
+ u8 connected: 1;
+ u8 pid: 3;
+ u8 phy: 1;
+ u8 noa: 1;
+ u8 band: 2;
+
+ u8 client_ps: 1;
+ u8 bw: 7;
+
+ u8 role;
+ u8 ch;
+
+ u16 tx_lvl;
+ u16 rx_lvl;
+ u16 tx_rate;
+ u16 rx_rate;
+
+ u32 noa_duration; /* ms */
+};
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1039,6 +1161,12 @@ struct rtw89_btc_wl_role_info_bpos {
u16 nan: 1;
};
+struct rtw89_btc_wl_scc_ctrl {
+ u8 null_role1;
+ u8 null_role2;
+ u8 ebt_null; /* if tx null at EBT slot */
+};
+
union rtw89_btc_wl_role_info_map {
u16 val;
struct rtw89_btc_wl_role_info_bpos role;
@@ -1048,7 +1176,22 @@ struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
union rtw89_btc_wl_role_info_map role_map;
- struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
+};
+
+struct rtw89_btc_wl_role_info_v1 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ union rtw89_btc_wl_role_info_map role_map;
+ struct rtw89_btc_wl_active_role_v1 active_role_v1[RTW89_PORT_NUM];
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+
+ u32 dbcc_en: 1;
+ u32 dbcc_chg: 1;
+ u32 dbcc_2g_phy: 2; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+ u32 link_mode_chg: 1;
+ u32 rsvd: 27;
};
struct rtw89_btc_wl_ver_info {
@@ -1151,11 +1294,12 @@ struct rtw89_btc_rf_para {
};
struct rtw89_btc_wl_info {
- struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
struct rtw89_btc_wl_role_info role_info;
+ struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
struct rtw89_btc_rf_para rf_para;
@@ -1164,6 +1308,7 @@ struct rtw89_btc_wl_info {
u8 port_id[RTW89_WIFI_ROLE_MLME_MAX];
u8 rssi_level;
+ bool scbd_change;
u32 scbd;
};
@@ -1249,7 +1394,8 @@ struct rtw89_btc_bt_info {
u32 pag: 1;
u32 run_patch_code: 1;
u32 hi_lna_rx: 1;
- u32 rsvd: 22;
+ u32 scan_rx_low_pri: 1;
+ u32 rsvd: 21;
};
struct rtw89_btc_cx {
@@ -1262,32 +1408,43 @@ struct rtw89_btc_cx {
};
struct rtw89_btc_fbtc_tdma {
- u8 type;
+ u8 type; /* chip_info::fcxtdma_ver */
u8 rxflctrl;
u8 txpause;
u8 wtgle_n;
u8 leak_n;
u8 ext_ctrl;
- u8 rsvd0;
- u8 rsvd1;
+ u8 rxflctrl_role;
+ u8 option_ctrl;
+} __packed;
+
+struct rtw89_btc_fbtc_tdma_v1 {
+ u8 fver; /* chip_info::fcxtdma_ver */
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_tdma tdma;
} __packed;
#define CXMREG_MAX 30
#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
-#define BTCRPT_VER 1
#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
-enum rtw89_btc_bt_rfk_counter {
+enum rtw89_btc_bt_sta_counter {
BTC_BCNT_RFK_REQ = 0,
BTC_BCNT_RFK_GO = 1,
BTC_BCNT_RFK_REJECT = 2,
BTC_BCNT_RFK_FAIL = 3,
BTC_BCNT_RFK_TIMEOUT = 4,
- BTC_BCNT_RFK_MAX
+ BTC_BCNT_HI_TX = 5,
+ BTC_BCNT_HI_RX = 6,
+ BTC_BCNT_LO_TX = 7,
+ BTC_BCNT_LO_RX = 8,
+ BTC_BCNT_POLLUTED = 9,
+ BTC_BCNT_STA_MAX
};
struct rtw89_btc_fbtc_rpt_ctrl {
- u16 fver;
+ u16 fver; /* chip_info::fcxbtcrpt_ver */
u16 rpt_cnt; /* tmr counters */
u32 wl_fw_coex_ver; /* match which driver's coex version */
u32 wl_fw_cx_offload;
@@ -1300,11 +1457,56 @@ struct rtw89_btc_fbtc_rpt_ctrl {
u32 mb_a2dp_empty_cnt; /* a2dp empty count */
u32 mb_a2dp_flct_cnt; /* a2dp empty flow control counter */
u32 mb_a2dp_full_cnt; /* a2dp empty full counter */
- u32 bt_rfk_cnt[BTC_BCNT_RFK_MAX];
+ u32 bt_rfk_cnt[BTC_BCNT_HI_TX];
u32 c2h_cnt; /* fw send c2h counter */
u32 h2c_cnt; /* fw recv h2c counter */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info {
+ __le32 cnt; /* fw report counter */
+ __le32 en; /* report map */
+ __le32 para; /* not used */
+
+ __le32 cnt_c2h; /* fw send c2h counter */
+ __le32 cnt_h2c; /* fw recv h2c counter */
+ __le32 len_c2h; /* The total length of the last C2H */
+
+ __le32 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le32 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 cx_offload;
+ __le32 fw_ver;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty {
+ __le32 cnt_empty; /* a2dp empty count */
+ __le32 cnt_flowctrl; /* a2dp empty flow control counter */
+ __le32 cnt_tx;
+ __le32 cnt_ack;
+ __le32 cnt_nack;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox {
+ __le32 cnt_send_ok; /* fw send mailbox ok counter */
+ __le32 cnt_send_fail; /* fw send mailbox fail counter */
+ __le32 cnt_recv; /* fw recv mailbox counter */
+ struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty a2dp;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_rpt_ctrl_info rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+ __le32 bt_cnt[BTC_BCNT_STA_MAX];
+ struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
+} __packed;
+
enum rtw89_fbtc_ext_ctrl_type {
CXECTL_OFF = 0x0, /* tdma off */
CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
@@ -1373,10 +1575,9 @@ enum { /* STEP TYPE */
CXSTEP_MAX,
};
-#define FCXGPIODBG_VER 1
#define BTC_DBG_MAX1 32
struct rtw89_btc_fbtc_gpio_dbg {
- u8 fver;
+ u8 fver; /* chip_info::fcxgpiodbg_ver */
u8 rsvd;
u16 rsvd2;
u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
@@ -1384,9 +1585,8 @@ struct rtw89_btc_fbtc_gpio_dbg {
u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
} __packed;
-#define FCXMREG_VER 1
struct rtw89_btc_fbtc_mreg_val {
- u8 fver;
+ u8 fver; /* chip_info::fcxmreg_ver */
u8 reg_num;
__le16 rsvd;
__le32 mreg_val[CXMREG_MAX];
@@ -1408,16 +1608,14 @@ struct rtw89_btc_fbtc_slot {
__le16 cxtype;
} __packed;
-#define FCXSLOTS_VER 1
struct rtw89_btc_fbtc_slots {
- u8 fver;
+ u8 fver; /* chip_info::fcxslots_ver */
u8 tbl_num;
__le16 rsvd;
__le32 update_map;
struct rtw89_btc_fbtc_slot slot[CXST_MAX];
} __packed;
-#define FCXSTEP_VER 2
struct rtw89_btc_fbtc_step {
u8 type;
u8 val;
@@ -1425,7 +1623,7 @@ struct rtw89_btc_fbtc_step {
} __packed;
struct rtw89_btc_fbtc_steps {
- u8 fver;
+ u8 fver; /* chip_info::fcxstep_ver */
u8 rsvd;
__le16 cnt;
__le16 pos_old;
@@ -1433,9 +1631,16 @@ struct rtw89_btc_fbtc_steps {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-#define FCXCYSTA_VER 2
-struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+struct rtw89_btc_fbtc_steps_v1 {
u8 fver;
+ u8 en;
+ __le16 rsvd;
+ __le32 cnt;
+ struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
+} __packed;
+
+struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+ u8 fver; /* chip_info::fcxcysta_ver */
u8 rsvd;
__le16 cycles; /* total cycle number */
__le16 cycles_a2dp[CXT_FLCTRL_MAX];
@@ -1460,19 +1665,80 @@ struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
__le16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
} __packed;
-#define FCXNULLSTA_VER 1
-struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+struct rtw89_btc_fbtc_fdd_try_info {
+ __le16 cycles[CXT_FLCTRL_MAX];
+ __le16 tavg[CXT_FLCTRL_MAX]; /* avg try BT-Slot-TDD/BT-slot-FDD time */
+ __le16 tmax[CXT_FLCTRL_MAX]; /* max try BT-Slot-TDD/BT-slot-FDD time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_time_info {
+ __le16 tavg[CXT_MAX]; /* avg wl/bt cycle time */
+ __le16 tmax[CXT_MAX]; /* max wl/bt cycle time */
+ __le16 tmaxdiff[CXT_MAX]; /* max wl-wl bt-bt cycle diff time */
+} __packed;
+
+struct rtw89_btc_fbtc_a2dp_trx_stat {
+ u8 empty_cnt;
+ u8 retry_cnt;
+ u8 tx_rate;
+ u8 tx_cnt;
+ u8 ack_cnt;
+ u8 nack_cnt;
+ u8 rsvd1;
+ u8 rsvd2;
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_a2dp_empty_info {
+ __le16 cnt; /* a2dp empty cnt */
+ __le16 cnt_timeout; /* a2dp empty timeout cnt*/
+ __le16 tavg; /* avg a2dp empty time */
+ __le16 tmax; /* max a2dp empty time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_leak_info {
+ __le32 cnt_rximr; /* the rximr occur at leak slot */
+ __le16 tavg; /* avg leak-slot time */
+ __le16 tmax; /* max leak-slot time */
+} __packed;
+
+struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
u8 fver;
u8 rsvd;
+ __le16 cycles; /* total cycle number */
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_time_info cycle_time;
+ struct rtw89_btc_fbtc_fdd_try_info fdd_try;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_leak_info leak_slot;
+ __le32 slot_cnt[CXST_MAX]; /* slot count */
+ __le32 bcn_cnt[CXBCN_MAX];
+ __le32 collision_cnt; /* counter for event/timer occur at the same time */
+ __le32 skip_cnt;
+ __le32 except_cnt;
+ __le32 except_map;
+} __packed;
+
+struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
__le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
__le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
} __packed;
-#define FCX_BTVER_VER 1
+struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
+ __le16 rsvd2;
+ __le32 max_t[2]; /* max_t for 0:null0/1:null1 */
+ __le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
+ __le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
+} __packed;
+
struct rtw89_btc_fbtc_btver {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtver_ver */
u8 rsvd;
__le16 rsvd2;
__le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
@@ -1480,17 +1746,15 @@ struct rtw89_btc_fbtc_btver {
__le32 feature;
} __packed;
-#define FCX_BTSCAN_VER 1
struct rtw89_btc_fbtc_btscan {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtscan_ver */
u8 rsvd;
__le16 rsvd2;
u8 scan[6];
} __packed;
-#define FCX_BTAFH_VER 1
struct rtw89_btc_fbtc_btafh {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtafh_ver */
u8 rsvd;
__le16 rsvd2;
u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
@@ -1498,9 +1762,8 @@ struct rtw89_btc_fbtc_btafh {
u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
} __packed;
-#define FCX_BTDEVINFO_VER 1
struct rtw89_btc_fbtc_btdevinfo {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtdevinfo_ver */
u8 rsvd;
__le16 vendor_id;
__le32 dev_name; /* only 24 bits valid */
@@ -1525,6 +1788,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
+ struct rtw89_btc_wl_scc_ctrl wl_scc;
union rtw89_btc_dm_error_map error;
u32 cnt_dm[BTC_DCNT_NUM];
u32 cnt_notify[BTC_NCNT_NUM];
@@ -1544,7 +1808,9 @@ struct rtw89_btc_dm {
u32 wl_btg_rx: 1;
u32 trx_para_level: 8;
u32 wl_stb_chg: 1;
- u32 rsvd: 3;
+ u32 pta_owner: 1;
+ u32 tdma_instant_excute: 1;
+ u32 rsvd: 1;
u16 slot_dur[CXST_MAX];
@@ -1566,8 +1832,6 @@ struct rtw89_btc_dbg {
u32 rb_val;
};
-#define FCXTDMA_VER 1
-
enum rtw89_btc_btf_fw_event {
BTF_EVNT_RPT = 0,
BTF_EVNT_BT_INFO = 1,
@@ -1620,12 +1884,18 @@ struct rtw89_btc_rpt_cmn_info {
struct rtw89_btc_report_ctrl_state {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ struct rtw89_btc_fbtc_tdma_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_slots {
@@ -1635,17 +1905,26 @@ struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_fbtc_cysta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cysta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cysta finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_cysta_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_step {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ struct rtw89_btc_fbtc_steps_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_nullsta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ struct rtw89_btc_fbtc_cynullsta_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_mreg {
@@ -1803,7 +2082,9 @@ struct rtw89_ra_info {
u8 ra_csi_rate_en:1;
u8 fixed_csi_rate_en:1;
u8 cr_tbl_sel:1;
- u8 rsvd2:5;
+ u8 fix_giltf_en:1;
+ u8 fix_giltf:3;
+ u8 rsvd2:1;
u8 csi_mcs_ss_idx;
u8 csi_mode:2;
u8 csi_gi_ltf:3;
@@ -1827,36 +2108,20 @@ struct rtw89_ra_report {
struct rate_info txrate;
u32 bit_rate;
u16 hw_rate;
+ bool might_fallback_legacy;
};
DECLARE_EWMA(rssi, 10, 16);
-struct rtw89_sta {
- u8 mac_id;
- bool disassoc;
- struct rtw89_vif *rtwvif;
- struct rtw89_ra_info ra;
- struct rtw89_ra_report ra_report;
- int max_agg_wait;
- u8 prev_rssi;
- struct ewma_rssi avg_rssi;
- struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
- struct ieee80211_rx_status rx_status;
- u16 rx_hw_rate;
- __le32 htc_template;
-
- bool use_cfg_mask;
- struct cfg80211_bitrate_mask mask;
-
- bool cctl_tx_time;
- u32 ampdu_max_time:4;
- bool cctl_tx_retry_limit;
- u32 data_tx_cnt_lmt:6;
+struct rtw89_ba_cam_entry {
+ struct list_head list;
+ u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
+#define RTW89_MAX_BA_CAM_NUM 8
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -1868,7 +2133,6 @@ struct rtw89_addr_cam_entry {
u8 wapi : 1;
u8 mask_sel : 2;
u8 bssid_cam_idx: 6;
- u8 sma[ETH_ALEN];
u8 sec_ent_mode;
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
@@ -1898,6 +2162,34 @@ struct rtw89_sec_cam_entry {
u8 key[32];
};
+struct rtw89_sta {
+ u8 mac_id;
+ bool disassoc;
+ struct rtw89_dev *rtwdev;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_ra_info ra;
+ struct rtw89_ra_report ra_report;
+ int max_agg_wait;
+ u8 prev_rssi;
+ struct ewma_rssi avg_rssi;
+ struct ewma_rssi rssi[RF_PATH_MAX];
+ struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ struct ieee80211_rx_status rx_status;
+ u16 rx_hw_rate;
+ __le32 htc_template;
+ struct rtw89_addr_cam_entry addr_cam; /* AP mode or TDLS peer only */
+ struct rtw89_bssid_cam_entry bssid_cam; /* TDLS peer only */
+ struct list_head ba_cam_list;
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask mask;
+
+ bool cctl_tx_time;
+ u32 ampdu_max_time:4;
+ bool cctl_tx_retry_limit;
+ u32 data_tx_cnt_lmt:6;
+};
+
struct rtw89_efuse {
bool valid;
u8 xtal_cap;
@@ -1913,8 +2205,11 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+#define RTW89_P2P_MAX_NOA_NUM 2
+
struct rtw89_vif {
struct list_head list;
+ struct rtw89_dev *rtwdev;
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
@@ -1927,6 +2222,7 @@ struct rtw89_vif {
u8 wmm;
u8 bcn_hit_cond;
u8 hit_rule;
+ u8 last_noa_nr;
bool trigger;
bool lsig_txop;
u8 tgt_ind;
@@ -1936,11 +2232,14 @@ struct rtw89_vif {
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
+ struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
struct rtw89_traffic_stats stats;
struct rtw89_phy_rate_pattern rate_pattern;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_scan_ies *scan_ies;
};
enum rtw89_lv1_rcvy_step {
@@ -1955,6 +2254,8 @@ struct rtw89_hci_ops {
void (*reset)(struct rtw89_dev *rtwdev);
int (*start)(struct rtw89_dev *rtwdev);
void (*stop)(struct rtw89_dev *rtwdev);
+ void (*pause)(struct rtw89_dev *rtwdev, bool pause);
+ void (*switch_mode)(struct rtw89_dev *rtwdev, bool low_power);
void (*recalc_int_mit)(struct rtw89_dev *rtwdev);
u8 (*read8)(struct rtw89_dev *rtwdev, u32 addr);
@@ -1972,6 +2273,13 @@ struct rtw89_hci_ops {
int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step);
void (*dump_err_status)(struct rtw89_dev *rtwdev);
int (*napi_poll)(struct napi_struct *napi, int budget);
+
+ /* Deal with locks inside recovery_start and recovery_complete callbacks
+ * by hci instance, and handle things which need to consider under SER.
+ * e.g. turn on/off interrupts except for the one for halt notification.
+ */
+ void (*recovery_start)(struct rtw89_dev *rtwdev);
+ void (*recovery_complete)(struct rtw89_dev *rtwdev);
};
struct rtw89_hci_info {
@@ -1979,9 +2287,12 @@ struct rtw89_hci_info {
enum rtw89_hci_type type;
u32 rpwm_addr;
u32 cpwm_addr;
+ bool paused;
};
struct rtw89_chip_ops {
+ int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
+ int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -1990,20 +2301,29 @@ struct rtw89_chip_ops {
bool (*write_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void (*set_channel)(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param);
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
void (*set_channel_help)(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p);
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
- void (*rfk_band_changed)(struct rtw89_dev *rtwdev);
+ void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
void (*power_trim)(struct rtw89_dev *rtwdev);
- void (*set_txpwr)(struct rtw89_dev *rtwdev);
- void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg);
@@ -2011,8 +2331,26 @@ struct rtw89_chip_ops {
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
+ void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx);
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ int (*pwr_on_func)(struct rtw89_dev *rtwdev);
+ int (*pwr_off_func)(struct rtw89_dev *rtwdev);
+ void (*fill_txdesc)(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+ void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+ int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
+ int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+ int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+ int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+ int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -2022,6 +2360,8 @@ struct rtw89_chip_ops {
void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev);
void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+ void (*btc_set_policy)(struct rtw89_dev *rtwdev, u16 policy_type);
+ void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level);
};
enum rtw89_dma_ch {
@@ -2133,6 +2473,7 @@ struct rtw89_ple_quota {
u16 bb_rpt;
u16 wd_rel;
u16 cpu_io;
+ u16 tx_rpt;
};
struct rtw89_dle_mem {
@@ -2173,6 +2514,8 @@ struct rtw89_phy_table {
const struct rtw89_reg2_def *regs;
u32 n_regs;
enum rtw89_rf_path rf_path;
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
};
struct rtw89_txpwr_table {
@@ -2182,47 +2525,148 @@ struct rtw89_txpwr_table {
const struct rtw89_txpwr_table *tbl);
};
+struct rtw89_page_regs {
+ u32 hci_fc_ctrl;
+ u32 ch_page_ctrl;
+ u32 ach_page_ctrl;
+ u32 ach_page_info;
+ u32 pub_page_info3;
+ u32 pub_page_ctrl1;
+ u32 pub_page_ctrl2;
+ u32 pub_page_info1;
+ u32 pub_page_info2;
+ u32 wp_page_ctrl1;
+ u32 wp_page_ctrl2;
+ u32 wp_page_info1;
+};
+
+struct rtw89_imr_info {
+ u32 wdrls_imr_set;
+ u32 wsec_imr_reg;
+ u32 wsec_imr_set;
+ u32 mpdu_tx_imr_set;
+ u32 mpdu_rx_imr_set;
+ u32 sta_sch_imr_set;
+ u32 txpktctl_imr_b0_reg;
+ u32 txpktctl_imr_b0_clr;
+ u32 txpktctl_imr_b0_set;
+ u32 txpktctl_imr_b1_reg;
+ u32 txpktctl_imr_b1_clr;
+ u32 txpktctl_imr_b1_set;
+ u32 wde_imr_clr;
+ u32 wde_imr_set;
+ u32 ple_imr_clr;
+ u32 ple_imr_set;
+ u32 host_disp_imr_clr;
+ u32 host_disp_imr_set;
+ u32 cpu_disp_imr_clr;
+ u32 cpu_disp_imr_set;
+ u32 other_disp_imr_clr;
+ u32 other_disp_imr_set;
+ u32 bbrpt_com_err_imr_reg;
+ u32 bbrpt_chinfo_err_imr_reg;
+ u32 bbrpt_err_imr_set;
+ u32 bbrpt_dfs_err_imr_reg;
+ u32 ptcl_imr_clr;
+ u32 ptcl_imr_set;
+ u32 cdma_imr_0_reg;
+ u32 cdma_imr_0_clr;
+ u32 cdma_imr_0_set;
+ u32 cdma_imr_1_reg;
+ u32 cdma_imr_1_clr;
+ u32 cdma_imr_1_set;
+ u32 phy_intf_imr_reg;
+ u32 phy_intf_imr_clr;
+ u32 phy_intf_imr_set;
+ u32 rmac_imr_reg;
+ u32 rmac_imr_clr;
+ u32 rmac_imr_set;
+ u32 tmac_imr_reg;
+ u32 tmac_imr_clr;
+ u32 tmac_imr_set;
+};
+
+struct rtw89_rrsr_cfgs {
+ struct rtw89_reg3_def ref_rate;
+ struct rtw89_reg3_def rsc;
+};
+
+struct rtw89_dig_regs {
+ u32 seg0_pd_reg;
+ u32 pd_lower_bound_mask;
+ u32 pd_spatial_reuse_en;
+ struct rtw89_reg_def p0_lna_init;
+ struct rtw89_reg_def p1_lna_init;
+ struct rtw89_reg_def p0_tia_init;
+ struct rtw89_reg_def p1_tia_init;
+ struct rtw89_reg_def p0_rxb_init;
+ struct rtw89_reg_def p1_rxb_init;
+ struct rtw89_reg_def p0_p20_pagcugc_en;
+ struct rtw89_reg_def p0_s20_pagcugc_en;
+ struct rtw89_reg_def p1_p20_pagcugc_en;
+ struct rtw89_reg_def p1_s20_pagcugc_en;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
const char *fw_name;
u32 fifo_size;
+ u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
+ u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_chanctx_num;
+ u8 support_bands;
+ bool support_bw160;
+ bool hw_sec_hdr;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
u8 acam_num;
u8 bcam_num;
u8 scam_num;
+ u8 bacam_num;
+ u8 bacam_dynamic_num;
+ bool bacam_v1;
u8 sec_ctrl_efuse_size;
u32 physical_efuse_size;
u32 logical_efuse_size;
u32 limit_efuse_size;
+ u32 dav_phy_efuse_size;
+ u32 dav_log_efuse_size;
u32 phycap_addr;
u32 phycap_size;
const struct rtw89_pwr_cfg * const *pwr_on_seq;
const struct rtw89_pwr_cfg * const *pwr_off_seq;
const struct rtw89_phy_table *bb_table;
+ const struct rtw89_phy_table *bb_gain_table;
const struct rtw89_phy_table *rf_table[RF_PATH_MAX];
const struct rtw89_phy_table *nctl_table;
const struct rtw89_txpwr_table *byr_table;
const struct rtw89_phy_dig_gain_table *dig_table;
+ const struct rtw89_dig_regs *dig_regs;
+ const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table;
const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM];
const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_6g)[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM];
const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_ru_6g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -2232,6 +2676,20 @@ struct rtw89_chip_info {
u8 btcx_desired;
u8 scbd;
u8 mailbox;
+ u16 btc_fwinfo_buf;
+
+ u8 fcxbtcrpt_ver;
+ u8 fcxtdma_ver;
+ u8 fcxslots_ver;
+ u8 fcxcysta_ver;
+ u8 fcxstep_ver;
+ u8 fcxnullsta_ver;
+ u8 fcxmreg_ver;
+ u8 fcxgpiodbg_ver;
+ u8 fcxbtver_ver;
+ u8 fcxbtscan_ver;
+ u8 fcxbtafh_ver;
+ u8 fcxbtdevinfo_ver;
u8 afh_guard_ch;
const u8 *wl_rssi_thres;
@@ -2245,6 +2703,31 @@ struct rtw89_chip_info {
u8 rf_para_dlink_num;
const struct rtw89_btc_rf_trx_para *rf_para_dlink;
u8 ps_mode_supported;
+ u8 low_power_hci_modes;
+
+ u32 h2c_cctl_func_id;
+ u32 hci_func_en_addr;
+ u32 h2c_desc_size;
+ u32 txwd_body_size;
+ u32 h2c_ctrl_reg;
+ const u32 *h2c_regs;
+ u32 c2h_ctrl_reg;
+ const u32 *c2h_regs;
+ const struct rtw89_page_regs *page_regs;
+ const struct rtw89_reg_def *dcfo_comp;
+ u8 dcfo_comp_sft;
+ const struct rtw89_imr_info *imr_info;
+ const struct rtw89_rrsr_cfgs *rrsr_cfgs;
+ u32 dma_ch_mask;
+};
+
+union rtw89_bus_info {
+ const struct rtw89_pci_info *pci;
+};
+
+struct rtw89_driver_info {
+ const struct rtw89_chip_info *chip;
+ union rtw89_bus_info bus;
};
enum rtw89_hcifc_mode {
@@ -2282,6 +2765,15 @@ enum rtw89_fw_type {
RTW89_FW_WOWLAN = 3,
};
+enum rtw89_fw_feature {
+ RTW89_FW_FEATURE_OLD_HT_RA_FORMAT,
+ RTW89_FW_FEATURE_SCAN_OFFLOAD,
+ RTW89_FW_FEATURE_TX_WAKE,
+ RTW89_FW_FEATURE_CRASH_TRIGGER,
+ RTW89_FW_FEATURE_PACKET_DROP,
+ RTW89_FW_FEATURE_NO_DEEP_PS,
+};
+
struct rtw89_fw_suit {
const u8 *data;
u32 size;
@@ -2302,6 +2794,18 @@ struct rtw89_fw_suit {
#define RTW89_FW_SUIT_VER_CODE(s) \
RTW89_FW_VER_CODE((s)->major_ver, (s)->minor_ver, (s)->sub_ver, (s)->sub_idex)
+#define RTW89_MFW_HDR_VER_CODE(mfw_hdr) \
+ RTW89_FW_VER_CODE((mfw_hdr)->ver.major, \
+ (mfw_hdr)->ver.minor, \
+ (mfw_hdr)->ver.sub, \
+ (mfw_hdr)->ver.idx)
+
+#define RTW89_FW_HDR_VER_CODE(fw_hdr) \
+ RTW89_FW_VER_CODE(GET_FW_HDR_MAJOR_VERSION(fw_hdr), \
+ GET_FW_HDR_MINOR_VERSION(fw_hdr), \
+ GET_FW_HDR_SUBVERSION(fw_hdr), \
+ GET_FW_HDR_SUBINDEX(fw_hdr))
+
struct rtw89_fw_info {
const struct firmware *firmware;
struct rtw89_dev *rtwdev;
@@ -2311,13 +2815,21 @@ struct rtw89_fw_info {
struct rtw89_fw_suit normal;
struct rtw89_fw_suit wowlan;
bool fw_log_enable;
- bool old_ht_ra_format;
+ u32 feature_map;
};
+#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
+ (!!((_fw)->feature_map & BIT(RTW89_FW_FEATURE_ ## _feat)))
+
+#define RTW89_SET_FW_FEATURE(_fw_feature, _fw) \
+ ((_fw)->feature_map |= BIT(_fw_feature))
+
struct rtw89_cam_info {
DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM);
DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+ DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM];
};
enum rtw89_sar_sources {
@@ -2327,9 +2839,24 @@ enum rtw89_sar_sources {
RTW89_SAR_SOURCE_NR,
};
+enum rtw89_sar_subband {
+ RTW89_SAR_2GHZ_SUBBAND,
+ RTW89_SAR_5GHZ_SUBBAND_1_2, /* U-NII-1 and U-NII-2 */
+ RTW89_SAR_5GHZ_SUBBAND_2_E, /* U-NII-2-Extended */
+ RTW89_SAR_5GHZ_SUBBAND_3, /* U-NII-3 */
+ RTW89_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_SAR_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_SAR_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_SAR_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ RTW89_SAR_SUBBAND_NR,
+};
+
struct rtw89_sar_cfg_common {
- bool set[RTW89_SUBBAND_NR];
- s32 cfg[RTW89_SUBBAND_NR];
+ bool set[RTW89_SAR_SUBBAND_NR];
+ s32 cfg[RTW89_SAR_SUBBAND_NR];
};
struct rtw89_sar_info {
@@ -2344,23 +2871,38 @@ struct rtw89_sar_info {
};
};
+struct rtw89_chanctx_cfg {
+ enum rtw89_sub_entity_idx idx;
+};
+
+enum rtw89_entity_mode {
+ RTW89_ENTITY_MODE_SCC,
+};
+
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
- u8 current_channel;
- u8 prev_primary_channel;
- u8 current_primary_channel;
- enum rtw89_subband current_subband;
- u8 current_band_width;
- u8 current_band_type;
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool tx_path_diversity;
+ bool support_cckpd;
+ bool support_igi;
+
+ DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ struct cfg80211_chan_def chandef[NUM_OF_RTW89_SUB_ENTITY];
+
+ bool entity_active;
+ enum rtw89_entity_mode entity_mode;
+
+ struct rtw89_chan chan[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_chan_rcd chan_rcd[NUM_OF_RTW89_SUB_ENTITY];
};
#define RTW89_MAX_MAC_ID_NUM 128
+#define RTW89_MAX_PKT_OFLD_NUM 255
enum rtw89_flags {
RTW89_FLAG_POWERON,
@@ -2372,10 +2914,37 @@ enum rtw89_flags {
RTW89_FLAG_LEISURE_PS,
RTW89_FLAG_LOW_POWER_MODE,
RTW89_FLAG_INACTIVE_PS,
+ RTW89_FLAG_CRASH_SIMULATING,
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_pkt_drop_sel {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_ALL,
+ RTW89_PKT_DROP_SEL_MG0_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_HIQ_MBSSID,
+ RTW89_PKT_DROP_SEL_BAND,
+ RTW89_PKT_DROP_SEL_BAND_ONCE,
+ RTW89_PKT_DROP_SEL_REL_MACID,
+ RTW89_PKT_DROP_SEL_REL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_REL_HIQ_MBSSID,
+};
+
+struct rtw89_pkt_drop_params {
+ enum rtw89_pkt_drop_sel sel;
+ enum rtw89_mac_idx mac_band;
+ u8 macid;
+ u8 port;
+ u8 mbssid;
+ bool tf_trs;
+};
+
struct rtw89_pkt_stat {
u16 beacon_nr;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
@@ -2406,9 +2975,25 @@ struct rtw89_dack_info {
#define RTW89_IQK_CHS_NR 2
#define RTW89_IQK_PATH_NR 4
+
+struct rtw89_mcc_info {
+ u8 ch[RTW89_IQK_CHS_NR];
+ u8 band[RTW89_IQK_CHS_NR];
+ u8 table_idx;
+};
+
+struct rtw89_lck_info {
+ u8 thermal[RF_PATH_MAX];
+};
+
+struct rtw89_rx_dck_info {
+ u8 thermal[RF_PATH_MAX];
+};
+
struct rtw89_iqk_info {
bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ bool lok_fail[RTW89_IQK_PATH_NR];
bool iqk_tx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
bool iqk_rx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
u32 iqk_fail_cnt;
@@ -2437,6 +3022,8 @@ struct rtw89_iqk_info {
u32 syn1to2;
u8 iqk_mcc_ch[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
u8 iqk_table_idx[RTW89_IQK_PATH_NR];
+ u32 lok_idac[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ u32 lok_vbuf[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
};
#define RTW89_DPK_RF_PATH 2
@@ -2447,6 +3034,7 @@ struct rtw89_dpk_bkup_para {
enum rtw89_bandwidth bw;
u8 ch;
bool path_ok;
+ u8 mdpd_en;
u8 txagc_dpk;
u8 ther_dpk;
u8 gs;
@@ -2456,11 +3044,12 @@ struct rtw89_dpk_bkup_para {
struct rtw89_dpk_info {
bool is_dpk_enable;
bool is_dpk_reload_en;
- u16 dc_i[RTW89_DPK_RF_PATH];
- u16 dc_q[RTW89_DPK_RF_PATH];
- u8 corr_val[RTW89_DPK_RF_PATH];
- u8 corr_idx[RTW89_DPK_RF_PATH];
+ u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 corr_idx[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u8 cur_idx[RTW89_DPK_RF_PATH];
+ u8 cur_k_set;
struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
};
@@ -2469,6 +3058,7 @@ struct rtw89_fem_info {
bool elna_5g;
bool epa_2g;
bool epa_5g;
+ bool epa_6g;
};
struct rtw89_phy_ch_info {
@@ -2530,13 +3120,20 @@ enum rtw89_multi_cfo_mode {
enum rtw89_phy_cfo_status {
RTW89_PHY_DCFO_STATE_NORMAL = 0,
RTW89_PHY_DCFO_STATE_ENHANCE = 1,
+ RTW89_PHY_DCFO_STATE_HOLD = 2,
RTW89_PHY_DCFO_STATE_MAX
};
+enum rtw89_phy_cfo_ul_ofdma_acc_mode {
+ RTW89_CFO_UL_OFDMA_ACC_DISABLE = 0,
+ RTW89_CFO_UL_OFDMA_ACC_ENABLE = 1
+};
+
struct rtw89_cfo_tracking_info {
u16 cfo_timer_ms;
bool cfo_trig_by_timer_en;
enum rtw89_phy_cfo_status phy_cfo_status;
+ enum rtw89_phy_cfo_ul_ofdma_acc_mode cfo_ul_ofdma_acc_mode;
u8 phy_cfo_trk_cnt;
bool is_adjust;
enum rtw89_multi_cfo_mode rtw89_multi_cfo_mode;
@@ -2556,22 +3153,30 @@ struct rtw89_cfo_tracking_info {
s32 residual_cfo_acc;
u8 phy_cfotrk_state;
u8 phy_cfotrk_cnt;
+ bool divergence_lock_en;
+ u8 x_cap_lb;
+ u8 x_cap_ub;
+ u8 lock_cnt;
};
/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
#define TSSI_TRIM_CH_GROUP_NUM 8
+#define TSSI_TRIM_CH_GROUP_NUM_6G 16
#define TSSI_CCK_CH_GROUP_NUM 6
#define TSSI_MCS_2G_CH_GROUP_NUM 5
#define TSSI_MCS_5G_CH_GROUP_NUM 14
+#define TSSI_MCS_6G_CH_GROUP_NUM 32
#define TSSI_MCS_CH_GROUP_NUM \
(TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
struct rtw89_tssi_info {
u8 thermal[RF_PATH_MAX];
s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM];
+ s8 tssi_trim_6g[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM_6G];
s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM];
s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM];
+ s8 tssi_6g_mcs[RF_PATH_MAX][TSSI_MCS_6G_CH_GROUP_NUM];
s8 extra_ofst[RF_PATH_MAX];
bool tssi_tracking_check[RF_PATH_MAX];
u8 default_txagc_offset[RF_PATH_MAX];
@@ -2733,8 +3338,8 @@ struct rtw89_ser {
struct work_struct ser_hdl_work;
struct delayed_work ser_alarm_work;
- struct state_ent *st_tbl;
- struct event_ent *ev_tbl;
+ const struct state_ent *st_tbl;
+ const struct event_ent *ev_tbl;
struct list_head msg_q;
spinlock_t msg_q_lock; /* lock when read/write ser msg */
DECLARE_BITMAP(flags, RTW89_NUM_OF_SER_FLAGS);
@@ -2769,12 +3374,66 @@ struct rtw89_early_h2c {
u16 h2c_len;
};
+struct rtw89_hw_scan_info {
+ struct ieee80211_vif *scanning_vif;
+ struct list_head pkt_list[NUM_NL80211_BANDS];
+ u8 op_pri_ch;
+ u8 op_chan;
+ u8 op_bw;
+ u8 op_band;
+ u32 last_chan_idx;
+};
+
+enum rtw89_phy_bb_gain_band {
+ RTW89_BB_GAIN_BAND_2G = 0,
+ RTW89_BB_GAIN_BAND_5G_L = 1,
+ RTW89_BB_GAIN_BAND_5G_M = 2,
+ RTW89_BB_GAIN_BAND_5G_H = 3,
+ RTW89_BB_GAIN_BAND_6G_L = 4,
+ RTW89_BB_GAIN_BAND_6G_M = 5,
+ RTW89_BB_GAIN_BAND_6G_H = 6,
+ RTW89_BB_GAIN_BAND_6G_UH = 7,
+
+ RTW89_BB_GAIN_BAND_NR,
+};
+
+enum rtw89_phy_bb_rxsc_num {
+ RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
+ RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
+ RTW89_BB_RXSC_NUM_160 = 15, /* SC: 0, 1~8, 9~12, 13~14 */
+};
+
+struct rtw89_phy_bb_gain_info {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [LNA_GAIN_NUM + 1]; /* TIA0_LNA0~6 + TIA1_LNA6 */
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_40];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_80];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]
+ [RTW89_BB_RXSC_NUM_160];
+};
+
+struct rtw89_phy_efuse_gain {
+ bool offset_valid;
+ s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */
+ s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
+ const struct ieee80211_ops *ops;
bool dbcc_en;
+ struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
+ const struct rtw89_pci_info *pci_info;
struct rtw89_hal hal;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
@@ -2790,29 +3449,38 @@ struct rtw89_dev {
struct workqueue_struct *txq_wq;
struct work_struct txq_work;
struct delayed_work txq_reinvoke_work;
- /* used to protect ba_list */
+ /* used to protect ba_list and forbid_ba_list */
spinlock_t ba_lock;
/* txqs to setup ba session */
struct list_head ba_list;
+ /* txqs to forbid ba session */
+ struct list_head forbid_ba_list;
struct work_struct ba_work;
+ /* used to protect rpwm */
+ spinlock_t rpwm_lock;
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct list_head early_h2c_list;
struct rtw89_ser ser;
- DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM);
+ DECLARE_BITMAP(hw_port, RTW89_PORT_NUM);
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+ DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
+ struct rtw89_mcc_info mcc;
+ struct rtw89_lck_info lck;
+ struct rtw89_rx_dck_info rx_dck;
bool is_tssi_mode[RF_PATH_MAX];
bool is_bt_iqk_timeout;
@@ -2825,11 +3493,15 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
+ struct rtw89_phy_bb_gain_info bb_gain;
+ struct rtw89_phy_efuse_gain efuse_gain;
+
struct delayed_work track_work;
struct delayed_work coex_act1_work;
struct delayed_work coex_bt_devinfo_work;
struct delayed_work coex_rfk_chk_work;
struct delayed_work cfo_track_work;
+ struct delayed_work forbid_ba_work;
struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc;
bool scanning;
@@ -2847,7 +3519,7 @@ struct rtw89_dev {
int napi_budget_countdown;
/* HCI related data, keep last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
@@ -2876,6 +3548,16 @@ static inline int rtw89_hci_deinit(struct rtw89_dev *rtwdev)
return rtwdev->hci.ops->deinit(rtwdev);
}
+static inline void rtw89_hci_pause(struct rtw89_dev *rtwdev, bool pause)
+{
+ rtwdev->hci.ops->pause(rtwdev, pause);
+}
+
+static inline void rtw89_hci_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
+{
+ rtwdev->hci.ops->switch_mode(rtwdev, low_power);
+}
+
static inline void rtw89_hci_recalc_int_mit(struct rtw89_dev *rtwdev)
{
rtwdev->hci.ops->recalc_int_mit(rtwdev);
@@ -2894,10 +3576,25 @@ static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
bool drop)
{
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ return;
+
if (rtwdev->hci.ops->flush_queues)
return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
}
+static inline void rtw89_hci_recovery_start(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->recovery_start)
+ rtwdev->hci.ops->recovery_start(rtwdev);
+}
+
+static inline void rtw89_hci_recovery_complete(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.ops->recovery_complete)
+ rtwdev->hci.ops->recovery_complete(rtwdev);
+}
+
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read8(rtwdev, addr);
@@ -3111,6 +3808,16 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw89_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline struct ieee80211_vif *rtwvif_to_vif_safe(struct rtw89_vif *rtwvif)
+{
+ return rtwvif ? rtwvif_to_vif(rtwvif) : NULL;
+}
+
+static inline struct rtw89_vif *vif_to_rtwvif_safe(struct ieee80211_vif *vif)
+{
+ return vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+}
+
static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
{
void *p = rtwsta;
@@ -3128,18 +3835,138 @@ static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
}
+static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
+{
+ if (hw_bw == RTW89_CHANNEL_WIDTH_160)
+ return RATE_INFO_BW_160;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_80)
+ return RATE_INFO_BW_80;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_40)
+ return RATE_INFO_BW_40;
+ else
+ return RATE_INFO_BW_20;
+}
+
+static inline
+enum nl80211_band rtw89_hw_to_nl80211_band(enum rtw89_band hw_band)
+{
+ switch (hw_band) {
+ default:
+ case RTW89_BAND_2G:
+ return NL80211_BAND_2GHZ;
+ case RTW89_BAND_5G:
+ return NL80211_BAND_5GHZ;
+ case RTW89_BAND_6G:
+ return NL80211_BAND_6GHZ;
+ }
+}
+
+static inline
+enum rtw89_band rtw89_nl80211_to_hw_band(enum nl80211_band nl_band)
+{
+ switch (nl_band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ return RTW89_BAND_2G;
+ case NL80211_BAND_5GHZ:
+ return RTW89_BAND_5G;
+ case NL80211_BAND_6GHZ:
+ return RTW89_BAND_6G;
+ }
+}
+
+static inline
+enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN(1, "Not support bandwidth %d\n", width);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return RTW89_CHANNEL_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return RTW89_CHANNEL_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return RTW89_CHANNEL_WIDTH_160;
+ }
+}
+
+static inline
+struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwsta) {
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ return &rtwsta->addr_cam;
+ }
+ return &rtwvif->addr_cam;
+}
+
+static inline
+struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwsta) {
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+
+ if (sta->tdls)
+ return &rtwsta->bssid_cam;
+ }
+ return &rtwvif->bssid_cam;
+}
+
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, true, p);
+ rtwdev->chip->ops->set_channel_help(rtwdev, true, p, chan,
+ mac_idx, phy_idx);
}
static inline
void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtwdev->chip->ops->set_channel_help(rtwdev, false, p, chan,
+ mac_idx, phy_idx);
+}
+
+static inline
+const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chandef[idx];
+}
+
+static inline
+const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan[idx];
+}
+
+static inline
+const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, false, p);
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan_rcd[idx];
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -3174,12 +4001,13 @@ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
chip->ops->rfk_channel(rtwdev);
}
-static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev)
+static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx);
}
static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -3203,19 +4031,7 @@ static inline void rtw89_chip_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->set_txpwr_ctrl)
- chip->ops->set_txpwr_ctrl(rtwdev);
-}
-
-static inline void rtw89_chip_set_txpwr(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel;
-
- if (!ch)
- return;
-
- if (chip->ops->set_txpwr)
- chip->ops->set_txpwr(rtwdev);
+ chip->ops->set_txpwr_ctrl(rtwdev, RTW89_PHY_0);
}
static inline void rtw89_chip_power_trim(struct rtw89_dev *rtwdev)
@@ -3265,6 +4081,14 @@ static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev,
chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en);
}
+static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->cfg_txrx_path)
+ chip->ops->cfg_txrx_path(rtwdev);
+}
+
static inline
void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
@@ -3272,7 +4096,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ if (!vif->bss_conf.he_support || !vif->cfg.assoc)
return;
if (chip->ops->set_txpwr_ul_tb_offset)
@@ -3298,6 +4122,71 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
chip->ops->ctrl_btg(rtwdev, btg);
}
+static inline
+void rtw89_chip_fill_txdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->fill_txdesc(rtwdev, desc_info, txdesc);
+}
+
+static inline
+void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
+}
+
+static inline
+void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->mac_cfg_gnt(rtwdev, gnt_cfg);
+}
+
+static inline void rtw89_chip_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->cfg_ctrl_path(rtwdev, wl);
+}
+
+static inline
+int rtw89_chip_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->stop_sch_tx(rtwdev, mac_idx, tx_en, sel);
+}
+
+static inline
+int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en);
+}
+
+static inline
+int rtw89_chip_h2c_dctl_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->h2c_dctl_sec_cam)
+ return 0;
+ return chip->ops->h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+}
+
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
__le16 fc = hdr->frame_control;
@@ -3312,10 +4201,12 @@ static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta)
{
- if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
- (sta->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
- (sta->he_cap.he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
+ if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[4] &
+ IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
return true;
return false;
}
@@ -3338,6 +4229,12 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb);
@@ -3363,17 +4260,30 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
void rtw89_core_unregister(struct rtw89_dev *rtwdev);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip);
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_set_channel(struct rtw89_dev *rtwdev);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
-u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
+bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_init(struct rtw89_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request));
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
@@ -3381,5 +4291,10 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
+void rtw89_core_update_beacon_work(struct work_struct *work);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan);
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 22bd1d03e722..730e83d54257 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -525,7 +525,8 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev)
{
- u8 band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
switch (regd) {
@@ -635,6 +636,11 @@ static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v)
start = 0x000;
end = 0x014;
break;
+ case RTW89_DBG_SEL_MAC_30:
+ seq_puts(m, "Debug selected MAC page 0x30\n");
+ start = 0x030;
+ end = 0x033;
+ break;
case RTW89_DBG_SEL_MAC_40:
seq_puts(m, "Debug selected MAC page 0x40\n");
start = 0x040;
@@ -724,26 +730,6 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
return count;
}
-static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
- [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
- [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
- [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
- [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
- [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
- [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
- [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
- [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
- [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
- [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
- [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
- [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
- [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
-};
-
static void rtw89_debug_dump_mac_mem(struct seq_file *m,
struct rtw89_dev *rtwdev,
u8 sel, u32 start_addr, u32 len)
@@ -757,7 +743,7 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m,
pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1;
start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
- base_addr = mac_mem_base_addr_table[sel];
+ base_addr = rtw89_mac_mem_base_addrs[sel];
base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
for (p = 0; p < pages; p++) {
@@ -2204,6 +2190,92 @@ out:
return count;
}
+static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true);
+ switch (pkt_id) {
+ case 0xffff:
+ return -ETIMEDOUT;
+ case 0xfff:
+ return -ENOMEM;
+ default:
+ break;
+ }
+
+ /* intentionally, enqueue two pkt, but has only one pkt id */
+ ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
+ ctrl_para.start_pktid = pkt_id;
+ ctrl_para.end_pktid = pkt_id;
+ ctrl_para.pkt_num = 1; /* start from 0 */
+ ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
+ ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
+
+ if (rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ seq_printf(m, "%d\n",
+ test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
+ return 0;
+}
+
+enum rtw89_dbg_crash_simulation_type {
+ RTW89_DBG_SIM_CPU_EXCEPTION = 1,
+ RTW89_DBG_SIM_CTRL_ERROR = 2,
+};
+
+static ssize_t
+rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ int (*sim)(struct rtw89_dev *rtwdev);
+ u8 crash_type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &crash_type);
+ if (ret)
+ return -EINVAL;
+
+ switch (crash_type) {
+ case RTW89_DBG_SIM_CPU_EXCEPTION:
+ if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
+ return -EOPNOTSUPP;
+ sim = rtw89_fw_h2c_trigger_cpu_exception;
+ break;
+ case RTW89_DBG_SIM_CTRL_ERROR:
+ sim = rtw89_dbg_trigger_ctrl_error;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+ set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+ ret = sim(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
@@ -2262,7 +2334,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct rate_info *rate = &rtwsta->ra_report.txrate;
struct ieee80211_rx_status *status = &rtwsta->rx_status;
struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 rssi;
+ int i;
seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
@@ -2278,9 +2353,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
he_gi_str[rate->he_gi] : "N/A");
else
seq_printf(m, "Legacy %d", rate->legacy);
+ seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : "");
seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
- sta->max_rc_amsdu_len);
+ sta->deflink.agg.max_rc_amsdu_len);
seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id);
@@ -2306,8 +2382,15 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n",
+ seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
+ rssi = ewma_rssi_read(&rtwsta->rssi[i]);
+ seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
+ hal->tx_path_diversity && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == rtwdev->chip->rf_path_num ? "" : ", ");
+ }
+ seq_puts(m, "]\n");
}
static void
@@ -2324,16 +2407,17 @@ rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
static const struct rtw89_rx_rate_cnt_info {
enum rtw89_hw_rate first_rate;
int len;
+ int ext;
const char *rate_mode;
} rtw89_rx_rate_cnt_infos[] = {
- {RTW89_HW_RATE_CCK1, 4, "Legacy:"},
- {RTW89_HW_RATE_OFDM6, 8, "OFDM:"},
- {RTW89_HW_RATE_MCS0, 8, "HT 0:"},
- {RTW89_HW_RATE_MCS8, 8, "HT 1:"},
- {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"},
- {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"},
- {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"},
- {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"},
+ {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"},
+ {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"},
+ {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"},
+ {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"},
+ {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"},
+ {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"},
+ {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"},
+ {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"},
};
static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
@@ -2348,7 +2432,8 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n",
stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv,
stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv);
- seq_printf(m, "Beacon: %u\n", pkt_stat->beacon_nr);
+ seq_printf(m, "Beacon: %u, TF: %u\n", pkt_stat->beacon_nr,
+ stats->rx_tf_periodic);
seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len,
stats->rx_avg_len);
@@ -2358,6 +2443,11 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "%10s [", info->rate_mode);
rtw89_debug_append_rx_rate(m, pkt_stat,
info->first_rate, info->len);
+ if (info->ext) {
+ seq_puts(m, "][");
+ rtw89_debug_append_rx_rate(m, pkt_stat,
+ info->first_rate + info->len, info->ext);
+ }
seq_puts(m, "]\n");
}
@@ -2366,6 +2456,100 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
return 0;
}
+static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_sec_cam_entry *sec_entry;
+ int i;
+
+ seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
+ seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
+ seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
+ for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
+ sec_entry = addr_cam->sec_entries[i];
+ if (!sec_entry)
+ continue;
+ seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ if (sec_entry->ext_key)
+ seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
+ seq_puts(m, "\n");
+ }
+}
+
+static
+void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
+ seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
+ rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+}
+
+static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
+{
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_ba_cam_entry *entry;
+ bool first = true;
+
+ list_for_each_entry(entry, &rtwsta->ba_cam_list, list) {
+ if (first) {
+ seq_puts(m, "\tba_cam ");
+ first = false;
+ } else {
+ seq_puts(m, ", ");
+ }
+ seq_printf(m, "tid[%u]=%d", entry->tid,
+ (int)(entry - rtwdev->cam_info.ba_cam_entry));
+ }
+ seq_puts(m, "\n");
+}
+
+static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+
+ seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
+ sta->tdls ? "(TDLS)" : "");
+ rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+ rtw89_dump_ba_cam(m, rtwsta);
+}
+
+static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ mutex_lock(&rtwdev->mutex);
+
+ seq_puts(m, "map:\n");
+ seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+ seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
+ cam_info->ba_cam_map);
+
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
.cb_read = rtw89_debug_priv_read_reg_get,
.cb_write = rtw89_debug_priv_read_reg_select,
@@ -2416,6 +2600,11 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
.cb_write = rtw89_debug_priv_early_h2c_set,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
+ .cb_read = rtw89_debug_priv_fw_crash_get,
+ .cb_write = rtw89_debug_priv_fw_crash_set,
+};
+
static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
.cb_read = rtw89_debug_priv_btc_info_get,
};
@@ -2432,6 +2621,10 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
.cb_read = rtw89_debug_priv_phy_info_get,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
+ .cb_read = rtw89_debug_priv_stations_get,
+};
+
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -2466,10 +2659,12 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_dbg_port_dump);
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
+ rtw89_debugfs_add_rw(fw_crash);
rtw89_debugfs_add_r(btc_info);
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
rtw89_debugfs_add_r(phy_info);
+ rtw89_debugfs_add_r(stations);
}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index f14b726c1a9f..ee243aadde87 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -23,10 +23,16 @@ enum rtw89_debug_mask {
RTW89_DBG_FW = BIT(12),
RTW89_DBG_BTC = BIT(13),
RTW89_DBG_BF = BIT(14),
+ RTW89_DBG_HW_SCAN = BIT(15),
+ RTW89_DBG_SAR = BIT(16),
+ RTW89_DBG_STATE = BIT(17),
+
+ RTW89_DBG_UNEXP = BIT(31),
};
enum rtw89_debug_mac_reg_sel {
RTW89_DBG_SEL_MAC_00,
+ RTW89_DBG_SEL_MAC_30,
RTW89_DBG_SEL_MAC_40,
RTW89_DBG_SEL_MAC_80,
RTW89_DBG_SEL_MAC_C0,
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
index c0b80f3da56c..7bd4f8558e03 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "efuse.h"
+#include "mac.h"
#include "reg.h"
enum rtw89_efuse_bank {
@@ -16,6 +17,9 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
{
u8 val;
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return 0;
+
val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
B_AX_EF_CELL_SEL_MASK);
if (bank == val)
@@ -32,14 +36,61 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
return -EBUSY;
}
-static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
- u32 dump_addr, u32 dump_size)
+static void rtw89_enable_otp_burst_mode(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_write32_set(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+}
+
+static void rtw89_enable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ rtw89_write8_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+
+ fsleep(1000);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, true);
+}
+
+static void rtw89_disable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, false);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write8_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
{
u32 efuse_ctl;
u32 addr;
int ret;
- rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+ rtw89_enable_efuse_pwr_cut_ddv(rtwdev);
for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK);
@@ -54,6 +105,74 @@ static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
*map++ = (u8)(efuse_ctl & 0xff);
}
+ rtw89_disable_efuse_pwr_cut_ddv(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 addr;
+ u8 val8;
+ int err;
+ int ret;
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR,
+ addr & 0xff, XTAL_SI_LOW_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
+ XTAL_SI_HIGH_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
+ XTAL_SI_MODE_SEL_MASK);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
+ !err && (val8 & XTAL_SI_RDY),
+ 1, 10000, false,
+ rtwdev, XTAL_SI_CTRL, &val8);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read dav efuse\n");
+ return ret;
+ }
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
+ if (ret)
+ return ret;
+ *map++ = val8;
+ }
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size, bool dav)
+{
+ int ret;
+
+ if (!map || dump_size == 0)
+ return 0;
+
+ rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+
+ if (dav) {
+ ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ } else {
+ ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -78,6 +197,9 @@ static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
u8 word_en;
int i;
+ if (!phy_map)
+ return 0;
+
while (phy_idx < physical_size - sec_ctrl_size) {
hdr1 = phy_map[phy_idx];
hdr2 = phy_map[phy_idx + 1];
@@ -109,8 +231,13 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
{
u32 phy_size = rtwdev->chip->physical_efuse_size;
u32 log_size = rtwdev->chip->logical_efuse_size;
+ u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
+ u32 dav_log_size = rtwdev->chip->dav_log_efuse_size;
+ u32 full_log_size = log_size + dav_log_size;
u8 *phy_map = NULL;
u8 *log_map = NULL;
+ u8 *dav_phy_map = NULL;
+ u8 *dav_log_map = NULL;
int ret;
if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS)
@@ -119,27 +246,41 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
rtw89_warn(rtwdev, "failed to check efuse autoload\n");
phy_map = kmalloc(phy_size, GFP_KERNEL);
- log_map = kmalloc(log_size, GFP_KERNEL);
+ log_map = kmalloc(full_log_size, GFP_KERNEL);
+ if (dav_phy_size && dav_log_size) {
+ dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
+ dav_log_map = log_map + log_size;
+ }
- if (!phy_map || !log_map) {
+ if (!phy_map || !log_map || (dav_phy_size && !dav_phy_map)) {
ret = -ENOMEM;
goto out_free;
}
- ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size);
+ ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
goto out_free;
}
+ ret = rtw89_dump_physical_efuse_map(rtwdev, dav_phy_map, 0, dav_phy_size, true);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
+ goto out_free;
+ }
- memset(log_map, 0xff, log_size);
+ memset(log_map, 0xff, full_log_size);
ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse logical map\n");
goto out_free;
}
+ ret = rtw89_dump_logical_efuse_map(rtwdev, dav_phy_map, dav_log_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav logical map\n");
+ goto out_free;
+ }
- rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size);
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size);
ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
if (ret) {
@@ -148,6 +289,7 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
}
out_free:
+ kfree(dav_phy_map);
kfree(log_map);
kfree(phy_map);
@@ -169,7 +311,7 @@ int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
return -ENOMEM;
ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map,
- phycap_addr, phycap_size);
+ phycap_addr, phycap_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump phycap map\n");
goto out_free;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 8a57b75b07c0..d57e3610fb88 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -10,31 +11,33 @@
#include "phy.h"
#include "reg.h"
-static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header)
+static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
+ bool header)
{
struct sk_buff *skb;
u32 header_len = 0;
+ u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
if (header)
header_len = H2C_HEADER_LEN;
- skb = dev_alloc_skb(len + header_len + 24);
+ skb = dev_alloc_skb(len + header_len + h2c_desc_size);
if (!skb)
return NULL;
- skb_reserve(skb, header_len + 24);
+ skb_reserve(skb, header_len + h2c_desc_size);
memset(skb->data, 0, len);
return skb;
}
-struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len)
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
{
- return rtw89_fw_h2c_alloc_skb(len, true);
+ return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
}
-struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len)
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
{
- return rtw89_fw_h2c_alloc_skb(len, false);
+ return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
}
static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
@@ -193,14 +196,102 @@ int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
return 0;
}
+#define __DEF_FW_FEAT_COND(__cond, __op) \
+static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
+{ \
+ return suit_ver_code __op comp_ver_code; \
+}
+
+__DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
+__DEF_FW_FEAT_COND(le, <=); /* less or equal */
+
+struct __fw_feat_cfg {
+ enum rtw89_core_chip_id chip_id;
+ enum rtw89_fw_feature feature;
+ u32 ver_code;
+ bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
+};
+
+#define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
+ { \
+ .chip_id = _chip, \
+ .feature = RTW89_FW_FEATURE_ ## _feat, \
+ .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
+ .cond = __fw_feat_cond_ ## _cond, \
+ }
+
+static const struct __fw_feat_cfg fw_feat_tbl[] = {
+ __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
+};
+
static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ const struct __fw_feat_cfg *ent;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+ int i;
+
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
+
+ for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
+ ent = &fw_feat_tbl[i];
+ if (chip->chip_id != ent->chip_id)
+ continue;
+
+ if (ent->cond(suit_ver_code, ent->ver_code))
+ RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
+ }
+}
+
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map)
+{
+ union {
+ struct rtw89_mfw_hdr mfw_hdr;
+ u8 fw_hdr[RTW89_FW_HDR_SIZE];
+ } buf = {};
+ const struct firmware *firmware;
+ u32 ver_code;
+ int ret;
+ int i;
+
+ ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
+ device, &buf, sizeof(buf), 0);
+ if (ret) {
+ dev_err(device, "failed to early request firmware: %d\n", ret);
+ return;
+ }
+
+ ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ?
+ RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) :
+ RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr);
+ if (!ver_code)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
+ const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
+
+ if (chip->chip_id != ent->chip_id)
+ continue;
+
+ if (ent->cond(ver_code, ent->ver_code))
+ *early_feat_map |= BIT(ent->feature);
+ }
- if (chip->chip_id == RTL8852A &&
- RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
- rtwdev->fw.old_ht_ra_format = true;
+out:
+ release_firmware(firmware);
}
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
@@ -267,7 +358,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
struct sk_buff *skb;
u32 ret = 0;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
return -ENOMEM;
@@ -333,7 +424,7 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
else
pkt_len = residue_len;
- skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len);
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -527,15 +618,16 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
}
skb_put(skb, H2C_CAM_LEN);
rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
- rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
+ rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -543,7 +635,44 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
H2C_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_DCTL_SEC_CAM_LEN 68
+int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
+
+ rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
+ H2C_DCTL_SEC_CAM_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -552,22 +681,45 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
-#define H2C_BA_CAM_LEN 4
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params)
+#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
+ u8 entry_idx;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
+ if (chip->bacam_v1)
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ else
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
@@ -577,9 +729,14 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
else
SET_BA_CAM_BMAP_SIZE(skb->data, 0);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 0);
+ SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
+ if (chip->bacam_v1) {
+ SET_BA_CAM_STD_EN(skb->data, 1);
+ SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ }
+
end:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -587,7 +744,46 @@ end:
H2C_FUNC_MAC_BA_CAM, 0, 1,
H2C_BA_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
+ u8 entry_idx, u8 uid)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BA_CAM_LEN);
+
+ SET_BA_CAM_VALID(skb->data, 1);
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ SET_BA_CAM_UID(skb->data, uid);
+ SET_BA_CAM_BAND(skb->data, 0);
+ SET_BA_CAM_STD_EN(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM, 0, 1,
+ H2C_BA_CAM_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -596,7 +792,21 @@ end:
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 entry_idx = chip->bacam_num;
+ u8 uid = 0;
+ int i;
+
+ for (i = 0; i < chip->bacam_dynamic_num; i++) {
+ rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
+ entry_idx++;
+ uid++;
+ }
}
#define H2C_LOG_CFG_LEN 12
@@ -605,8 +815,9 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
struct sk_buff *skb;
u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
return -ENOMEM;
@@ -624,7 +835,8 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
H2C_FUNC_LOG_CFG, 0, 0,
H2C_LOG_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -633,7 +845,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_GENERAL_PKT_LEN 6
@@ -641,8 +853,9 @@ fail:
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -661,7 +874,8 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
H2C_GENERAL_PKT_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -670,7 +884,7 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LPS_PARM_LEN 8
@@ -678,8 +892,9 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -703,7 +918,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_LPS_PARM, 0, 1,
H2C_LPS_PARM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -712,18 +928,85 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
-#define H2C_CMC_TBL_LEN 68
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
+#define H2C_P2P_ACT_LEN 20
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
+ u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
struct sk_buff *skb;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_P2P_ACT_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
+ RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
+ RTW89_SET_FWCMD_P2P_ACT(cmd, act);
+ RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
+ RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
+ if (desc) {
+ RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
+ RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
+ RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
+ RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
+ RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_P2P_ACT, 0, 0,
+ H2C_P2P_ACT_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
+ SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
+ SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
+}
+
+#define H2C_CMC_TBL_LEN 68
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ u8 macid = rtwvif->mac_id;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -731,25 +1014,26 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
skb_put(skb, H2C_CMC_TBL_LEN);
SET_CTRL_INFO_MACID(skb->data, macid);
SET_CTRL_INFO_OPERATION(skb->data, 1);
- SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
- SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
- SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
- SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
- SET_CMC_TBL_ANTSEL_A(skb->data, 0);
- SET_CMC_TBL_ANTSEL_B(skb->data, 0);
- SET_CMC_TBL_ANTSEL_C(skb->data, 0);
- SET_CMC_TBL_ANTSEL_D(skb->data, 0);
+ if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
+ SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
+ SET_CMC_TBL_ANTSEL_A(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_B(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_C(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_D(skb->data, 0);
+ }
SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
- H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -758,7 +1042,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
@@ -766,26 +1050,28 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
{
bool ppe_th;
u8 ppe16, ppe8;
- u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
- u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0];
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
u8 ru_bitmap;
u8 n, idx, sh;
u16 ppe;
int i;
- if (!sta->he_cap.has_he)
+ if (!sta->deflink.he_cap.has_he)
return;
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
- sta->he_cap.he_cap_elem.phy_cap_info[6]);
+ sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
u8 pad;
pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
- sta->he_cap.he_cap_elem.phy_cap_info[9]);
+ sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
for (i = 0; i < RTW89_PPE_BW_NUM; i++)
pads[i] = pad;
+
+ return;
}
ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
@@ -802,7 +1088,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
sh = n & 7;
n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
- ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx]));
+ ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
@@ -820,29 +1106,38 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u16 lowest_rate;
+ int ret;
memset(pads, 0, sizeof(pads));
- __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (sta)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
}
skb_put(skb, H2C_CMC_TBL_LEN);
- SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_MACID(skb->data, mac_id);
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
- if (hal->current_band_type == RTW89_BAND_2G)
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
- else
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
+ SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -850,17 +1145,30 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
else
SET_CMC_TBL_ULDL(skb->data, 0);
SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
- SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
- SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
- SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
- SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
+ SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
+ } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
+ SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
+ }
+ if (sta)
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
+ sta->deflink.he_cap.has_he);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
- H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -869,15 +1177,17 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
return -ENOMEM;
@@ -896,10 +1206,50 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ chip->h2c_cctl_func_id, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -908,33 +1258,110 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+#define H2C_BCN_BASE_LEN 12
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct sk_buff *skb;
+ struct sk_buff *skb_beacon;
+ u16 tim_offset;
+ int bcn_total_len;
+ u16 beacon_rate;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BCN_BASE_LEN);
+
+ SET_BCN_UPD_PORT(skb->data, rtwvif->port);
+ SET_BCN_UPD_MBSSID(skb->data, 0);
+ SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
+ SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
+ SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
+ SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
+ SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
+ SET_BCN_UPD_RATE(skb->data, beacon_rate);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD, 0, 1,
+ bcn_total_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
}
-#define H2C_VIF_MAINTAIN_LEN 4
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode)
+#define H2C_ROLE_MAINTAIN_LEN 4
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role;
+ int ret;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ if (rtwsta)
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ else
+ self_role = rtwvif->self_role;
+ } else {
+ self_role = rtwvif->self_role;
+ }
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_VIF_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
+ skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
+ SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
+ SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_VIF_MAINTAIN_LEN);
+ H2C_ROLE_MAINTAIN_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -943,22 +1370,31 @@ int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_JOIN_INFO_LEN 4
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn)
+ struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role = rtwvif->self_role;
+ u8 net_type = rtwvif->net_type;
+ int ret;
+
+ if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
+ }
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
+ SET_JOININFO_MACID(skb->data, mac_id);
SET_JOININFO_OP(skb->data, dis_conn);
SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
SET_JOININFO_WMM(skb->data, rtwvif->wmm);
@@ -968,16 +1404,17 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
SET_JOININFO_TF_MAC_PAD(skb->data, 0);
SET_JOININFO_DL_T_PE(skb->data, 0);
SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
+ SET_JOININFO_NET_TYPE(skb->data, net_type);
SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
+ SET_JOININFO_SELF_ROLE(skb->data, self_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_JOININFO, 0, 1,
H2C_JOIN_INFO_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -986,7 +1423,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
@@ -995,8 +1432,9 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
struct rtw89_fw_macid_pause_grp h2c = {{0}};
u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1011,7 +1449,8 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1020,7 +1459,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_EDCA_LEN 12
@@ -1028,8 +1467,9 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u8 ac, u32 val)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
return -ENOMEM;
@@ -1046,7 +1486,48 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_USR_EDCA, 0, 1,
H2C_EDCA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_TSF32_TOGL_LEN 4
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en)
+{
+ struct sk_buff *skb;
+ u16 early_us = en ? 2000 : 0;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_TSF32_TOGL_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
+ RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
+ RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_TSF32_TOGL, 0, 0,
+ H2C_TSF32_TOGL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1055,7 +1536,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_OFLD_CFG_LEN 8
@@ -1063,8 +1544,9 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
{
static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
return -ENOMEM;
@@ -1076,7 +1558,8 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
H2C_FUNC_OFLD_CFG, 0, 1,
H2C_OFLD_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1085,7 +1568,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_RA_LEN 16
@@ -1093,8 +1576,9 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
{
struct sk_buff *skb;
u8 *cmd;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
@@ -1123,6 +1607,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
+ RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
if (csi) {
RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
@@ -1141,7 +1627,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
H2C_RA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1150,7 +1637,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVHDR 2
@@ -1164,8 +1651,9 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
struct rtw89_btc_ant_info *ant = &module->ant;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
return -ENOMEM;
@@ -1200,7 +1688,8 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_INIT);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1209,10 +1698,15 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define PORT_DATA_OFFSET 4
+#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
+#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
+ H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
+ H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1221,10 +1715,12 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role *active = role_info->active_role;
struct sk_buff *skb;
+ u8 offset = 0;
u8 *cmd;
+ int ret;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
@@ -1251,20 +1747,20 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
- RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
- RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
- RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1272,7 +1768,8 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_ROLE);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1281,18 +1778,103 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
+ struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
+ struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
+ struct sk_buff *skb;
+ u8 *cmd, offset;
+ int ret;
+ int i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
+
+ RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
+ RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
+ RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
+ RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
+
+ offset = PORT_DATA_OFFSET;
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
+ }
+
+ offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
+ RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
+ RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_ROLE_V1);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
return -ENOMEM;
@@ -1306,14 +1888,16 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
- RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
+ if (chip->chip_id == RTL8852A)
+ RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_CTRL);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1322,7 +1906,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
@@ -1333,8 +1917,9 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
return -ENOMEM;
@@ -1356,7 +1941,210 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_RFK);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_LEN_PKT_OFLD 4
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+ u8 alloc_id;
+ int ret;
+
+ alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
+ RTW89_MAX_PKT_OFLD_NUM);
+ if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
+ return -ENOSPC;
+
+ *id = alloc_id;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
+ skb_put_data(skb, skb_ofld->data, skb_ofld->len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD + skb_ofld->len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_LEN_SCAN_LIST_OFFLOAD 4
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list)
+{
+ struct rtw89_mac_chinfo *ch_info;
+ struct sk_buff *skb;
+ int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
+ /* in unit of 4 bytes */
+ RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
+
+ RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
+ RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
+ RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
+ RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
+ RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
+ RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
+ RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
+ RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
+ RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
+ RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
+ RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
+ RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
+ RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
+ RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
+ RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
+ RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
+ RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
+ RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
+ RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
+ RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
+ RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
+ RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define H2C_LEN_SCAN_OFFLOAD 28
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct sk_buff *skb;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
+ RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
+ RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
+ RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
+ RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
+ if (option->target_ch_mode) {
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
+ scan_info->op_pri_ch);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
+ scan_info->op_chan);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
+ scan_info->op_band);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD, 1, 1,
+ H2C_LEN_SCAN_OFFLOAD);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1365,7 +2153,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
@@ -1375,8 +2163,9 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 class = info->rf_path == RF_PATH_A ?
H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
return -ENOMEM;
@@ -1387,7 +2176,8 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, class, page, 0, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1396,16 +2186,59 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
+int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_fw_h2c_rf_get_mccch *mccch;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, sizeof(*mccch));
+ mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
+
+ mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]);
+ mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
+ mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
+ mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
+ mccch->current_channel = cpu_to_le32(chan->channel);
+ mccch->current_band_type = cpu_to_le32(chan->band_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
+ H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
+ sizeof(*mccch));
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
return -ENOMEM;
@@ -1416,7 +2249,8 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1425,21 +2259,23 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
{
struct sk_buff *skb;
+ int ret;
- skb = rtw89_fw_h2c_alloc_skb_no_hdr(len);
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
return -ENOMEM;
}
skb_put_data(skb, buf, len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1448,7 +2284,7 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
@@ -1533,15 +2369,13 @@ void rtw89_fw_c2h_work(struct work_struct *work)
static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *info)
{
- static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
- R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
- R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *h2c_reg = chip->h2c_regs;
u8 i, val, len;
int ret;
ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
- rtwdev, R_AX_H2CREG_CTRL);
+ rtwdev, chip->h2c_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "FW does not process h2c registers\n");
return ret;
@@ -1555,7 +2389,7 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_H2CREG_MAX; i++)
rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
- rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
+ rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
return 0;
}
@@ -1563,10 +2397,8 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *info)
{
- static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
- R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
- R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *c2h_reg = chip->c2h_regs;
u32 ret;
u8 i, val;
@@ -1574,7 +2406,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
RTW89_C2H_TIMEOUT, false, rtwdev,
- R_AX_C2HREG_CTRL);
+ chip->c2h_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "c2h reg timeout\n");
return ret;
@@ -1583,7 +2415,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_C2HREG_MAX; i++)
info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
- rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
+ rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
@@ -1640,3 +2472,427 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
rtw89_fw_prog_cnt_dump(rtwdev);
}
+
+static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+{
+ struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+ u8 idx;
+
+ for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(rtwdev->chip->support_bands & BIT(idx)))
+ continue;
+
+ list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+ }
+}
+
+static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct sk_buff *skb)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *new;
+ int ret = 0;
+ u8 band;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+
+ new = skb_copy(skb, GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ skb_put_data(new, ies->ies[band], ies->len[band]);
+ skb_put_data(new, ies->common_ies, ies->common_ie_len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ kfree_skb(new);
+ goto out;
+ }
+
+ list_add_tail(&info->list, &scan_info->pkt_list[band]);
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+ if (ret)
+ goto out;
+
+ kfree_skb(new);
+ }
+out:
+ return ret;
+}
+
+static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct sk_buff *skb;
+ u8 num = req->n_ssids, i;
+ int ret;
+
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ req->ssids[i].ssid,
+ req->ssids[i].ssid_len,
+ req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
+ kfree_skb(skb);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_pkt = true;
+ ch_info->cfg_tx_pwr = false;
+ ch_info->tx_pwr_idx = 0;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+
+ if (ssid_num) {
+ ch_info->num_pkt = ssid_num;
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ ch_info->probe_id = info->id;
+ ch_info->pkt_id[probe_count] = info->id;
+ if (++probe_count >= ssid_num)
+ break;
+ }
+ if (probe_count != ssid_num)
+ rtw89_err(rtwdev, "SSID num differs from list len\n");
+ }
+
+ switch (chan_type) {
+ case RTW89_CHAN_OPERATE:
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ ch_info->central_ch = scan_info->op_chan;
+ ch_info->pri_ch = scan_info->op_pri_ch;
+ ch_info->ch_band = scan_info->op_band;
+ ch_info->bw = scan_info->op_bw;
+ ch_info->tx_null = true;
+ ch_info->num_pkt = 0;
+ break;
+ case RTW89_CHAN_DFS:
+ ch_info->period = max_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_err(rtwdev, "Channel type out of bound\n");
+ }
+}
+
+static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
+ int list_len, off_chan_time = 0;
+ enum rtw89_chan_type type;
+ int ret = 0;
+ u32 idx;
+
+ INIT_LIST_HEAD(&chan_list);
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
+ off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ kfree(ch_info);
+ goto out;
+ }
+
+ type = RTW89_CHAN_OPERATE;
+ tmp->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+ list_add_tail(&tmp->list, &chan_list);
+ off_chan_time = 0;
+ list_len++;
+ }
+ list_add_tail(&ch_info->list, &chan_list);
+ off_chan_time += ch_info->period;
+ }
+ rtwdev->scan_info.last_chan_idx = idx;
+ ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
+static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update probe request failed\n");
+ goto out;
+ }
+ ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &scan_req->req;
+ u32 rx_fltr = rtwdev->hal.rx_fltr;
+ u8 mac_addr[ETH_ALEN];
+
+ rtwdev->scan_info.scanning_vif = vif;
+ rtwdev->scan_info.last_chan_idx = 0;
+ rtwvif->scan_ies = &scan_req->ies;
+ rtwvif->scan_req = req;
+ ieee80211_stop_queues(rtwdev->hw);
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(mac_addr, req->mac_addr,
+ req->mac_addr_mask);
+ else
+ ether_addr_copy(mac_addr, vif->addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
+
+ rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rx_fltr &= ~B_AX_A_BC;
+ rx_fltr &= ~B_AX_A_A1_MATCH;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rx_fltr);
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+ struct rtw89_vif *rtwvif;
+
+ if (!vif)
+ return;
+
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+
+ rtw89_core_scan_complete(rtwdev, vif, true);
+ ieee80211_scan_completed(rtwdev->hw, &info);
+ ieee80211_wake_queues(rtwdev->hw);
+
+ rtw89_release_pkt_list(rtwdev);
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.last_chan_idx = 0;
+ rtwdev->scan_info.scanning_vif = NULL;
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
+ rtw89_store_op_chan(rtwdev, false);
+}
+
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_hw_scan_offload(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, true);
+}
+
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct rtw89_scan_option opt = {0};
+ struct rtw89_vif *rtwvif;
+ int ret = 0;
+
+ rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+ if (!rtwvif)
+ return -EINVAL;
+
+ opt.enable = enable;
+ opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
+ if (enable) {
+ ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
+ if (ret)
+ goto out;
+ }
+ ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_chan new;
+
+ if (backup) {
+ scan_info->op_pri_ch = cur->primary_channel;
+ scan_info->op_chan = cur->channel;
+ scan_info->op_bw = cur->band_width;
+ scan_info->op_band = cur->band_type;
+ } else {
+ rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
+ scan_info->op_band, scan_info->op_bw);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
+ }
+}
+
+#define H2C_FW_CPU_EXCEPTION_LEN 4
+#define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
+int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for fw cpu exception\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
+ RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
+ H2C_FW_CPU_EXCEPTION_TYPE_DEF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_TEST,
+ H2C_CL_FW_STATUS_TEST,
+ H2C_FUNC_CPU_EXCEPTION, 0, 0,
+ H2C_FW_CPU_EXCEPTION_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+#define H2C_PKT_DROP_LEN 24
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for packet drop\n");
+ return -ENOMEM;
+ }
+
+ switch (params->sel) {
+ case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "H2C of pkt drop might not fully support sel: %d yet\n",
+ params->sel);
+ break;
+ }
+
+ skb_put(skb, H2C_PKT_DROP_LEN);
+ RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
+ RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
+ RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
+ RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
+ RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
+ RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PKT_DROP, 0, 0,
+ H2C_PKT_DROP_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2d36dc27222f..0047d5d0e9b1 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -63,21 +63,32 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
};
-struct rtw89_c2h_phy_cap {
- u32 func:7;
- u32 ack:1;
- u32 len:4;
- u32 seq:4;
- u32 rx_nss:8;
- u32 bw:8;
-
- u32 tx_nss:8;
- u32 prot:8;
- u32 nic:8;
- u32 wl_func:8;
-
- u32 hw_type:8;
-} __packed;
+#define RTW89_GET_C2H_PHYCAP_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(6, 0))
+#define RTW89_GET_C2H_PHYCAP_ACK(info) \
+ u32_get_bits(*((const u32 *)(info)), BIT(7))
+#define RTW89_GET_C2H_PHYCAP_LEN(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(11, 8))
+#define RTW89_GET_C2H_PHYCAP_SEQ(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(15, 12))
+#define RTW89_GET_C2H_PHYCAP_RX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_BW(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_TX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_PROT(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_NIC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_WL_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_HW_TYPE(info) \
+ u32_get_bits(*((const u32 *)(info) + 2), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(23, 16))
enum rtw89_fw_c2h_category {
RTW89_C2H_CAT_TEST,
@@ -123,6 +134,34 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_MCC = 20,
};
+enum rtw89_pkt_offload_op {
+ RTW89_PKT_OFLD_OP_ADD,
+ RTW89_PKT_OFLD_OP_DEL,
+ RTW89_PKT_OFLD_OP_READ,
+};
+
+enum rtw89_scanofld_notify_reason {
+ RTW89_SCAN_DWELL_NOTIFY,
+ RTW89_SCAN_PRE_TX_NOTIFY,
+ RTW89_SCAN_POST_TX_NOTIFY,
+ RTW89_SCAN_ENTER_CH_NOTIFY,
+ RTW89_SCAN_LEAVE_CH_NOTIFY,
+ RTW89_SCAN_END_SCAN_NOTIFY,
+};
+
+enum rtw89_chan_type {
+ RTW89_CHAN_OPERATE = 0,
+ RTW89_CHAN_ACTIVE,
+ RTW89_CHAN_DFS,
+};
+
+enum rtw89_p2pps_action {
+ RTW89_P2P_ACT_INIT = 0,
+ RTW89_P2P_ACT_UPDATE = 1,
+ RTW89_P2P_ACT_REMOVE = 2,
+ RTW89_P2P_ACT_TERMINATE = 3,
+};
+
#define FWDL_SECTION_MAX_NUM 10
#define FWDL_SECTION_CHKSUM_LEN 8
#define FWDL_SECTION_PER_PKT_LEN 2020
@@ -156,6 +195,54 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
+#define RTW89_H2C_MAX_SIZE 2048
+#define RTW89_CHANNEL_TIME 45
+#define RTW89_DFS_CHAN_TIME 105
+#define RTW89_OFF_CHAN_TIME 100
+#define RTW89_DWELL_TIME 20
+#define RTW89_SCAN_WIDTH 0
+#define RTW89_SCANOFLD_MAX_SSID 8
+#define RTW89_SCANOFLD_MAX_IE_LEN 512
+#define RTW89_SCANOFLD_PKT_NONE 0xFF
+#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_MAC_CHINFO_SIZE 24
+#define RTW89_SCAN_LIST_GUARD 4
+#define RTW89_SCAN_LIST_LIMIT \
+ ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
+
+struct rtw89_mac_chinfo {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 notify_action:5;
+ u8 num_pkt:4;
+ u8 tx_pkt:1;
+ u8 pause_data:1;
+ u8 ch_band:2;
+ u8 probe_id;
+ u8 dfs_ch:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 cfg_tx_pwr:1;
+ u8 rsvd0: 4;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u16 tx_pwr_idx;
+ u8 rsvd1;
+ struct list_head list;
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+};
+
+struct rtw89_pktofld_info {
+ struct list_head list;
+ u8 id;
+};
+
static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0));
@@ -281,6 +368,16 @@ static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val)
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10));
}
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(11));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(14, 12));
+}
+
static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16));
@@ -908,6 +1005,36 @@ static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D,
BIT(31));
}
+
+#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(1, 0));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(1, 0));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(3, 2));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(3, 2));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(5, 4));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(5, 4));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 6));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(7, 6));
+}
+
#define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0)
static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val)
{
@@ -936,7 +1063,6 @@ static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL,
GENMASK(19, 18));
}
-#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20));
@@ -1041,13 +1167,14 @@ static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val)
le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF,
GENMASK(27, 25));
}
-#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0)
-static inline void SET_CMC_TBL_CSI_GID_SEL(void *table, u32 val)
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160(void *table, u32 val)
{
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29));
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL,
- BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(29, 28));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(29, 28));
}
+
#define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0)
static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
{
@@ -1056,6 +1183,408 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+static inline void SET_DCTL_MACID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
+}
+
+static inline void SET_DCTL_OPERATION_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
+}
+
+#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0)
+static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1,
+ GENMASK(7, 0));
+}
+
+#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0)
+static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID,
+ GENMASK(14, 8));
+}
+
+#define SET_DCTL_MASK_QOS_DATA BIT(0)
+static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA,
+ BIT(15));
+}
+
+#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0)
+static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L,
+ GENMASK(31, 16));
+}
+
+#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0)
+static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H,
+ GENMASK(31, 0));
+}
+
+#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ0_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0,
+ GENMASK(11, 0));
+}
+
+#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ1_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1,
+ GENMASK(23, 12));
+}
+
+#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0)
+static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN,
+ GENMASK(26, 24));
+}
+
+#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0)
+static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN,
+ BIT(27));
+}
+
+#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0)
+static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN,
+ BIT(28));
+}
+
+#define SET_DCTL_MASK_WITH_LLC BIT(0)
+static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC,
+ BIT(29));
+}
+
+#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ2_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2,
+ GENMASK(11, 0));
+}
+
+#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0)
+static inline void SET_DCTL_SEQ3_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3,
+ GENMASK(23, 12));
+}
+
+#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0)
+static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND,
+ GENMASK(27, 24));
+}
+
+#define SET_DCTL_MASK_TGT_IND_EN BIT(0)
+static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN,
+ BIT(28));
+}
+
+#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0)
+static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB,
+ GENMASK(31, 29));
+}
+
+#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0)
+static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN,
+ GENMASK(4, 0));
+}
+
+#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0)
+static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID,
+ BIT(5));
+}
+
+#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0)
+static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL,
+ GENMASK(7, 6));
+}
+
+#define SET_DCTL_MASK_HTC_ORDER BIT(0)
+static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER,
+ BIT(8));
+}
+
+#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0)
+static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID,
+ GENMASK(10, 9));
+}
+
+#define SET_DCTL_MASK_WAPI BIT(0)
+static inline void SET_DCTL_WAPI_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI,
+ BIT(15));
+}
+
+#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0)
+static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE,
+ GENMASK(17, 16));
+}
+
+#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0)
+static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(19, 18));
+}
+
+static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(21, 20));
+}
+
+static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(23, 22));
+}
+
+static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(25, 24));
+}
+
+static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(27, 26));
+}
+
+static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(29, 28));
+}
+
+static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
+ GENMASK(31, 30));
+}
+
+#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0)
+static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID,
+ GENMASK(7, 0));
+}
+
+#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0)
+static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(15, 8));
+}
+
+static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(23, 16));
+}
+
+static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(31, 24));
+}
+
+static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(7, 0));
+}
+
+static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(15, 8));
+}
+
+static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(23, 16));
+}
+
+static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
+ GENMASK(31, 24));
+}
+
+static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
+}
+
+static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
+}
+
+static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
+}
+
+static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
+}
+
+static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
+}
+
+static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
+}
+
+static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
+}
+
+static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
+}
+
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1226,6 +1755,26 @@ static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
}
+static inline void SET_BA_CAM_UID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
+}
+
+static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
+}
+
+static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
+}
+
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1276,6 +1825,41 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_SEL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MBSSID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(15, 8));
+}
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -1316,6 +1900,14 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_MAX,
};
+enum rtw89_scan_mode {
+ RTW89_SCAN_IMMEDIATE,
+};
+
+enum rtw89_scan_type {
+ RTW89_SCAN_ONCE,
+};
+
static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0));
@@ -1476,69 +2068,104 @@ static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NAN(void *cmd, u16 val)
le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(3, 1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(4));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(5));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(7, 6));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n, u8 offset)
+{
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, GENMASK(7, 1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0));
+ u8p_replace_bits((u8 *)cmd + (8 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1));
+ u8p_replace_bits((u8 *)cmd + (9 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (10 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (12 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (14 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (16 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(void *cmd, u32 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + (20 + (12 + offset) * n)), val, GENMASK(31, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(void *cmd, u32 val, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset), val, GENMASK(31, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_NOA(void *cmd, u32 val, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 4), val, GENMASK(31, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_EN(void *cmd, u32 val, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_CHG(void *cmd, u32 val, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(1));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(void *cmd, u32 val, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, GENMASK(3, 2));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(void *cmd, u32 val, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0));
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(4));
}
static inline void RTW89_SET_FWCMD_CXCTRL_MANUAL(void *cmd, u32 val)
@@ -1586,6 +2213,322 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10));
}
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(10, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_CY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PORT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(18, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, BIT(19));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_OPERATION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(21, 20));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 22));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_START_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(4, 3));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 5));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(void *cmd,
+ u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PROBE_REQ_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SLOW_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_HIGH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_P2PID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_NOAID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 12));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ACT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(19, 16));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(20));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ALL_SLEP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(21));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_START_TIME(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 1) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_INTERVAL(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 2) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_DURATION(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 3) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_COUNT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_CTWINDOW(void *cmd, u32 val)
+{
+ u8 ctwnd;
+
+ if (!(val & IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return;
+ ctwnd = FIELD_GET(IEEE80211_P2P_OPPPS_CTWINDOW_MASK, val);
+ le32p_replace_bits((__le32 *)(cmd) + 4, ctwnd, GENMASK(23, 8));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(4, 2));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 16));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -1642,6 +2585,28 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
#define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \
FIELD_PREP(GENMASK(2, 0), mcs))
+#define RTW89_GET_MAC_C2H_PKTOFLD_ID(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_PKTOFLD_OP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(10, 8))
+#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
+
+#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
+#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
+#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
+#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -1660,7 +2625,14 @@ struct rtw89_mfw_info {
struct rtw89_mfw_hdr {
u8 sig; /* RTW89_MFW_SIG */
u8 fw_nr;
- u8 rsvd[14];
+ u8 rsvd0[2];
+ struct {
+ u8 major;
+ u8 minor;
+ u8 sub;
+ u8 idx;
+ } ver;
+ u8 rsvd1[8];
struct rtw89_mfw_info info[];
} __packed;
@@ -1691,6 +2663,12 @@ struct rtw89_fw_h2c_rf_reg_info {
#define FWCMD_TYPE_H2C 0
+#define H2C_CAT_TEST 0x0
+
+/* CLASS 5 - FW STATUS TEST */
+#define H2C_CL_FW_STATUS_TEST 0x5
+#define H2C_FUNC_CPU_EXCEPTION 0x1
+
#define H2C_CAT_MAC 0x1
/* CLASS 0 - FW INFO */
@@ -1701,6 +2679,7 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
#define H2C_FUNC_MAC_LPS_PARM 0x0
+#define H2C_FUNC_P2P_ACT 0x1
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -1709,6 +2688,9 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 5 - Frame Exchange */
#define H2C_CL_MAC_FR_EXCHG 0x5
#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
+#define H2C_FUNC_MAC_BCN_UPD 0x5
+#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
+#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -1721,9 +2703,14 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 9 - FW offload */
#define H2C_CL_MAC_FW_OFLD 0x9
+#define H2C_FUNC_PACKET_OFLD 0x1
#define H2C_FUNC_MAC_MACID_PAUSE 0x8
#define H2C_FUNC_USR_EDCA 0xF
+#define H2C_FUNC_TSF32_TOGL 0x10
#define H2C_FUNC_OFLD_CFG 0x14
+#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
+#define H2C_FUNC_SCANOFLD 0x17
+#define H2C_FUNC_PKT_DROP 0x1b
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -1740,9 +2727,34 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
+#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
+#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+
+struct rtw89_fw_h2c_rf_get_mccch {
+ __le32 ch_0;
+ __le32 ch_1;
+ __le32 band_0;
+ __le32 band_1;
+ __le32 current_channel;
+ __le32 current_band_type;
+} __packed;
+
+#define RTW89_FW_RSVD_PLE_SIZE 0x800
+
+#define RTW89_WCPU_BASE_MASK GENMASK(27, 0)
+
+#define RTW89_FW_BACKTRACE_INFO_SIZE 8
+#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \
+ ((_size) % RTW89_FW_BACKTRACE_INFO_SIZE == 0)
+
+#define RTW89_FW_BACKTRACE_MAX_SIZE 512 /* 8 * 64 (entries) */
+#define RTW89_FW_BACKTRACE_KEY 0xBACEBACE
int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev);
int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map);
int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type);
int rtw89_load_firmware(struct rtw89_dev *rtwdev);
void rtw89_unload_firmware(struct rtw89_dev *rtwdev);
@@ -1750,21 +2762,30 @@ int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev);
void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
+int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode);
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode);
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn);
+ struct rtw89_sta *rtwsta, bool dis_conn);
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause);
int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1773,11 +2794,21 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld);
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list);
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
+int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -1785,16 +2816,42 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
+
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
-struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
-struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *h2c_info,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup);
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted);
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable);
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params);
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id);
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en);
+
+static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->bacam_v1)
+ rtw89_fw_h2c_init_ba_cam_v1(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b98c47e9ecfe..0508dfca8edf 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -10,6 +11,46 @@
#include "reg.h"
#include "util.h"
+const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = {
+ [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
+ [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
+ [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
+ [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
+ [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
+ [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR,
+ [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR,
+};
+
+static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset,
+ u32 val, enum rtw89_mac_mem_sel sel)
+{
+ u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
+
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val);
+}
+
+static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset,
+ enum rtw89_mac_mem_sel sel)
+{
+ u32 addr = rtw89_mac_mem_base_addrs[sel] + offset;
+
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr);
+ return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY);
+}
+
int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_mac_hwmod_sel sel)
{
@@ -172,6 +213,7 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
qempty.dle_type = DLE_CTRL_TYPE_PLE;
qempty.grpsel = 0;
+ qempty.qempty = ~(u32)0;
ret = dle_dfi_qempty(rtwdev, &qempty);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
@@ -236,7 +278,9 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
u32 dmac_err, cmac_err;
if (err != MAC_AX_ERR_L1_ERR_DMAC &&
- err != MAC_AX_ERR_L0_PROMOTE_TO_L1)
+ err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC0 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC1)
return;
rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
@@ -437,7 +481,7 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
{
- u32 err;
+ u32 err, err_scnr;
int ret;
ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000,
@@ -450,6 +494,12 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
err = rtw89_read32(rtwdev, R_AX_HALT_C2H);
rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+ err_scnr = RTW89_ERROR_SCENARIO(err);
+ if (err_scnr == RTW89_WCPU_CPU_EXCEPTION)
+ err = MAC_AX_ERR_CPU_EXCEPTION;
+ else if (err_scnr == RTW89_WCPU_ASSERTION)
+ err = MAC_AX_ERR_ASSERTION;
+
rtw89_fw_st_dbg_dump(rtwdev);
rtw89_mac_dump_err_status(rtwdev, err);
@@ -481,10 +531,6 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
}
EXPORT_SYMBOL(rtw89_mac_set_err_status);
-const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = {
- 2, 40, 0, 0, 1, 0, 0, 0
-};
-
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
@@ -567,6 +613,8 @@ static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
int ret = 0;
@@ -586,13 +634,15 @@ static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
(cfg[ch].grp ? B_AX_GRP : 0);
- rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val);
+ rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val);
return 0;
}
static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
@@ -606,7 +656,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
if (ch > RTW89_DMA_H2C)
return -EINVAL;
- val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4);
+ val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4);
info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
if (ch < RTW89_DMA_H2C)
info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
@@ -618,6 +668,8 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
u32 val;
int ret;
@@ -632,16 +684,18 @@ static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl1, val);
val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl2, val);
return 0;
}
static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -653,20 +707,20 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1);
+ val = rtw89_read32(rtwdev, regs->pub_page_info1);
info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3);
+ val = rtw89_read32(rtwdev, regs->pub_page_info3);
info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
info->pub_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2),
+ u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2),
B_AX_PUB_AVAL_PG_MASK);
info->wp_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1),
+ u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1),
B_AX_WP_AVAL_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
@@ -679,21 +733,21 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
prec_cfg->wp_ch811_full_cond =
u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL);
+ val = rtw89_read32(rtwdev, regs->ch_page_ctrl);
prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl2);
pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl1);
prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl2);
pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
@@ -706,20 +760,24 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
u32 val;
val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
- rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL,
+ rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl,
B_AX_HCI_FC_CH12_FULL_COND_MASK,
prec_cfg->h2c_full_cond);
}
static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -727,18 +785,18 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl2, val);
val = u32_encode_bits(prec_cfg->wp_ch07_prec,
B_AX_PREC_PAGE_WP_CH07_MASK) |
u32_encode_bits(prec_cfg->wp_ch811_prec,
B_AX_PREC_PAGE_WP_CH811_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl1, val);
- val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL),
+ val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl),
param->mode, B_AX_HCI_FC_MODE_MASK);
val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
B_AX_HCI_FC_WD_FULL_COND_MASK);
@@ -748,25 +806,29 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
u32 val;
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = en;
param->h2c_en = h2c_en;
val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
(val & ~B_AX_HCI_FC_CH12_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 dma_ch_mask = chip->dma_ch_mask;
u8 ch;
u32 ret = 0;
@@ -788,6 +850,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_ch_ctrl(rtwdev, ch);
if (ret)
return ret;
@@ -803,6 +867,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
udelay(10);
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_upd_ch_info(rtwdev, ch);
if (ret)
return ret;
@@ -915,23 +981,31 @@ rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
}
static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
- enum rtw89_rpwm_req_pwr_state req_pwr_state)
+ enum rtw89_rpwm_req_pwr_state req_pwr_state,
+ bool notify_wake)
{
u16 request;
+ spin_lock_bh(&rtwdev->rpwm_lock);
+
request = rtw89_read16(rtwdev, R_AX_RPWM);
request ^= request | PS_RPWM_TOGGLE;
-
- rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
- RPWM_SEQ_NUM_MAX;
- request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num);
-
request |= req_pwr_state;
- if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
- request |= PS_RPWM_ACK;
+ if (notify_wake) {
+ request |= PS_RPWM_NOTIFY_WAKE;
+ } else {
+ rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
+ RPWM_SEQ_NUM_MAX;
+ request |= FIELD_PREP(PS_RPWM_SEQ_NUM,
+ rtwdev->mac.rpwm_seq_num);
+ if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+ request |= PS_RPWM_ACK;
+ }
rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ spin_unlock_bh(&rtwdev->rpwm_lock);
}
static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
@@ -961,7 +1035,7 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
return 0;
rpwm_req_num = rtwdev->mac.rpwm_seq_num;
- cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM,
+ cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr,
PS_CPWM_RSP_SEQ_NUM);
if (rpwm_req_num != cpwm_rsp_seq)
@@ -970,11 +1044,11 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) &
CPWM_SEQ_NUM_MAX;
- cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM);
+ cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM);
if (cpwm_seq != rtwdev->mac.cpwm_seq_num)
return -EPERM;
- cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE);
+ cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE);
if (cpwm_status != req_pwr_state)
return -EPERM;
@@ -984,19 +1058,39 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
{
enum rtw89_rpwm_req_pwr_state state;
+ unsigned long delay = enter ? 10 : 150;
int ret;
+ int i;
if (enter)
state = rtw89_mac_get_req_pwr_state(rtwdev);
else
state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
- rtw89_mac_send_rpwm(rtwdev, state);
- ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
- 1000, 15000, false, rtwdev, state);
- if (ret)
- rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
- enter ? "entering" : "leaving");
+ for (i = 0; i < RPWM_TRY_CNT; i++) {
+ rtw89_mac_send_rpwm(rtwdev, state, false);
+ ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret,
+ !ret, delay, 15000, false,
+ rtwdev, state);
+ if (!ret)
+ break;
+
+ if (i == RPWM_TRY_CNT - 1)
+ rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
+ enter ? "entering" : "leaving");
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "%d time firmware failed to ack for %s ps mode\n",
+ i + 1, enter ? "entering" : "leaving");
+ }
+}
+
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_rpwm_req_pwr_state state;
+
+ state = rtw89_mac_get_req_pwr_state(rtwdev);
+ rtw89_mac_send_rpwm(rtwdev, state, true);
}
static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
@@ -1004,14 +1098,17 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
#define PWR_ACT 1
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
- struct rtw89_hal *hal = &rtwdev->hal;
+ int (*cfg_func)(struct rtw89_dev *rtwdev);
int ret;
u8 val;
- if (on)
+ if (on) {
cfg_seq = chip->pwr_on_seq;
- else
+ cfg_func = chip->ops->pwr_on_func;
+ } else {
cfg_seq = chip->pwr_off_seq;
+ cfg_func = chip->ops->pwr_off_func;
+ }
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
__rtw89_leave_ps_mode(rtwdev);
@@ -1022,7 +1119,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
return -EBUSY;
}
- ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq);
+ ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
if (ret)
return ret;
@@ -1033,7 +1130,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
- hal->current_channel = 0;
+ rtw89_set_entity_state(rtwdev, false);
}
return 0;
@@ -1055,7 +1152,8 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN |
- B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN;
+ B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN |
+ B_AX_CMAC_CRPRT;
ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN |
B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN |
B_AX_RMAC_CKEN;
@@ -1092,18 +1190,31 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
static int dmac_func_en(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
- val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
- B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
- B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN |
- B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN);
+ if (chip_id == RTL8852C)
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN);
+ else
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT);
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
- B_AX_WD_RLS_CLK_EN);
+ B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return 0;
@@ -1111,7 +1222,11 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
static int chip_func_en(struct rtw89_dev *rtwdev)
{
- rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK);
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
+ B_AX_OCP_L1_MASK);
return 0;
}
@@ -1135,50 +1250,62 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
return ret;
}
-/* PCIE 64 */
-const struct rtw89_dle_size wde_size0 = {
- RTW89_WDE_PG_64, 4095, 1,
-};
-
-/* DLFW */
-const struct rtw89_dle_size wde_size4 = {
- RTW89_WDE_PG_64, 0, 4096,
-};
-
-/* PCIE */
-const struct rtw89_dle_size ple_size0 = {
- RTW89_PLE_PG_128, 1520, 16,
-};
-
-/* DLFW */
-const struct rtw89_dle_size ple_size4 = {
- RTW89_PLE_PG_128, 64, 1472,
-};
-
-/* PCIE 64 */
-const struct rtw89_wde_quota wde_qt0 = {
- 3792, 196, 0, 107,
-};
-
-/* DLFW */
-const struct rtw89_wde_quota wde_qt4 = {
- 0, 0, 0, 0,
-};
-
-/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt4 = {
- 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
-};
-
-/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt5 = {
- 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
-};
-
-/* DLFW */
-const struct rtw89_ple_quota ple_qt13 = {
- 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
+const struct rtw89_mac_size_set rtw89_mac_size = {
+ .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0},
+ /* PCIE 64 */
+ .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
+ /* DLFW */
+ .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
+ /* PCIE 64 */
+ .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+ /* DLFW */
+ .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
+ /* 8852C DLFW */
+ .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
+ /* 8852C PCIE SCC */
+ .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
+ /* PCIE */
+ .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
+ /* DLFW */
+ .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
+ /* PCIE 64 */
+ .ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
+ /* DLFW */
+ .ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
+ /* 8852C DLFW */
+ .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
+ /* 8852C PCIE SCC */
+ .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
+ /* PCIE 64 */
+ .wde_qt0 = {3792, 196, 0, 107,},
+ /* DLFW */
+ .wde_qt4 = {0, 0, 0, 0,},
+ /* PCIE 64 */
+ .wde_qt6 = {448, 48, 0, 16,},
+ /* 8852C DLFW */
+ .wde_qt17 = {0, 0, 0, 0,},
+ /* 8852C PCIE SCC */
+ .wde_qt18 = {3228, 60, 0, 40,},
+ /* PCIE SCC */
+ .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
+ /* PCIE SCC */
+ .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
+ /* DLFW */
+ .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
+ /* PCIE 64 */
+ .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,},
+ /* DLFW 52C */
+ .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
+ /* DLFW 52C */
+ .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
+ /* 8852C PCIE SCC */
+ .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
+ /* 8852C PCIE SCC */
+ .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
+ /* PCIE 64 */
+ .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
};
+EXPORT_SYMBOL(rtw89_mac_size);
static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
@@ -1211,6 +1338,17 @@ static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
}
+static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
+ enum rtw89_qta_mode mode)
+{
+ u32 size = rtwdev->chip->fifo_size;
+
+ if (mode == RTW89_QTA_SCC)
+ size -= rtwdev->chip->dle_scc_rsvd_size;
+
+ return size;
+}
+
static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
{
if (enable)
@@ -1334,6 +1472,8 @@ static void ple_quota_cfg(struct rtw89_dev *rtwdev,
SET_QUOTA(bb_rpt, PLE, 8);
SET_QUOTA(wd_rel, PLE, 9);
SET_QUOTA(cpu_io, PLE, 10);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ SET_QUOTA(tx_rpt, PLE, 11);
}
#undef SET_QUOTA
@@ -1376,7 +1516,8 @@ static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
ret = -EINVAL;
goto error;
@@ -1421,6 +1562,43 @@ error:
return ret;
}
+static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ u32 reg, max_preld_size, min_rsvd_size;
+
+ max_preld_size = (mac_idx == RTW89_MAC_0 ?
+ PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size);
+ rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN);
+
+ min_rsvd_size = PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND);
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size);
+
+ return 0;
+}
+
+static bool is_qta_poh(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
+}
+
+static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || !is_qta_poh(rtwdev))
+ return 0;
+
+ return preload_init_set(rtwdev, mac_idx, mode);
+}
+
static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
{
u32 msk32;
@@ -1447,6 +1625,17 @@ static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
return false;
}
+static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ return;
+
+ rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK,
+ SS2F_PATH_WLCPU);
+}
+
static int sta_sch_init(struct rtw89_dev *rtwdev)
{
u32 p_val;
@@ -1469,6 +1658,9 @@ static int sta_sch_init(struct rtw89_dev *rtwdev)
}
rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG);
+ rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN);
+
+ _patch_ss2f_path(rtwdev);
return 0;
}
@@ -1492,6 +1684,7 @@ static int mpdu_proc_init(struct rtw89_dev *rtwdev)
static int sec_eng_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val = 0;
int ret;
@@ -1505,7 +1698,8 @@ static int sec_eng_init(struct rtw89_dev *rtwdev)
/* init TX encryption */
val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
val |= (B_AX_MC_DEC | B_AX_BC_DEC);
- val &= ~B_AX_TX_PARTIAL_MODE;
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ val &= ~B_AX_TX_PARTIAL_MODE;
rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val);
/* init MIC ICV append */
@@ -1515,6 +1709,10 @@ static int sec_eng_init(struct rtw89_dev *rtwdev)
/* option init */
rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val);
+ if (chip->chip_id == RTL8852C)
+ rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1,
+ B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL);
+
return 0;
}
@@ -1528,6 +1726,12 @@ static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
+ return ret;
+ }
+
ret = hfc_init(rtwdev, true, true, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
@@ -1573,7 +1777,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val);
ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR),
- 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR);
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, reg);
if (ret) {
rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
return ret;
@@ -1586,13 +1790,39 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 ret;
u32 reg;
+ u32 val;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
return ret;
+ reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1_V1);
+ else
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1);
+
+ if (rtwdev->chip->chip_id == RTL8852B) {
+ reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV);
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN);
+
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US);
+ if (rtwdev->chip->chip_id == RTL8852C) {
+ val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
+ B_AX_TX_PARTIAL_MODE);
+ if (!val)
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ } else {
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ }
return 0;
}
@@ -1719,11 +1949,12 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA |
B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 |
B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 |
- B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA);
+ B_AX_CTN_CHK_CCA_P20);
val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 |
B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 |
B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 |
- B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV);
+ B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV |
+ B_AX_SIFS_CHK_EDCCA);
rtw89_write32(rtwdev, reg, val);
@@ -1732,6 +1963,16 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
+static int nav_ctrl_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
+ B_AX_WMAC_TF_UP_NAV_EN |
+ B_AX_WMAC_NAV_UPPER_EN);
+ rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS);
+
+ return 0;
+}
+
static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
@@ -1758,11 +1999,20 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx);
rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN);
+ reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE);
+ rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE);
+
return 0;
}
static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
u32 reg, val, sifs;
int ret;
@@ -1793,15 +2043,36 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN);
+ reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data);
+ reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data);
+
return 0;
}
+static void rst_bacam(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK,
+ S_AX_BACAM_RST_ALL);
+
+ ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0,
+ 1, 1000, false,
+ rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to reset BA CAM\n");
+}
+
static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
#define TRXCFG_RMAC_CCA_TO 32
#define TRXCFG_RMAC_DATA_TO 15
#define RX_MAX_LEN_UNIT 512
#define PLD_RLS_MAX_PG 127
+#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
int ret;
u32 reg, rx_max_len, rx_qta;
u16 val;
@@ -1810,6 +2081,9 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
if (ret)
return ret;
+ if (mac_idx == RTW89_MAC_0)
+ rst_bacam(rtwdev);
+
reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx);
rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL);
@@ -1829,11 +2103,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rx_qta = rtwdev->mac.dle_info.c0_rx_qta;
else
rx_qta = rtwdev->mac.dle_info.c1_rx_qta;
- rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta;
- rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size /
- RX_MAX_LEN_UNIT;
- rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ?
- B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len;
+ rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG);
+ rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size;
+ rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN);
+ rx_max_len /= RX_MAX_LEN_UNIT;
rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
if (rtwdev->chip->chip_id == RTL8852A &&
@@ -1853,6 +2126,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -1867,6 +2141,11 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
rtw89_write32(rtwdev, reg, val);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
+ }
+
return 0;
}
@@ -1897,6 +2176,8 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
val = rtw89_read32(rtwdev, reg);
val = u32_replace_bits(val, S_AX_CTS2S_TH_1K,
B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK);
+ val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B,
+ B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
val |= B_AX_HW_CTS2SELF_EN;
rtw89_write32(rtwdev, reg, val);
@@ -1907,11 +2188,38 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val);
}
- reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx);
- val = rtw89_read32(rtwdev, reg);
- val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
- val |= B_AX_HW_CTS2SELF_EN;
- rtw89_write32(rtwdev, reg, val);
+ if (mac_idx == RTW89_MAC_0) {
+ rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
+ B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1);
+ rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0,
+ B_AX_PTCL_TRIGGER_SS_EN_0 |
+ B_AX_PTCL_TRIGGER_SS_EN_1 |
+ B_AX_PTCL_TRIGGER_SS_EN_UL);
+ rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL,
+ B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
+ } else if (mac_idx == RTW89_MAC_1) {
+ rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1,
+ B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
+ }
+
+ return 0;
+}
+
+static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg;
+ int ret;
+
+ if (chip_id != RTL8852A && chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE);
return 0;
}
@@ -1947,6 +2255,13 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = nav_ctrl_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
ret = spatial_reuse_init(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
@@ -1984,6 +2299,12 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = cmac_dma_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
+ return ret;
+ }
+
return ret;
}
@@ -2011,23 +2332,42 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_mac_c2h_info c2h_info = {0};
- struct rtw89_c2h_phy_cap *cap =
- (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0];
+ u8 tx_nss;
+ u8 rx_nss;
+ u8 tx_ant;
+ u8 rx_ant;
u32 ret;
ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
if (ret)
return ret;
- hal->tx_nss = cap->tx_nss ?
- min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss;
- hal->rx_nss = cap->rx_nss ?
- min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss;
+ tx_nss = RTW89_GET_C2H_PHYCAP_TX_NSS(c2h_info.c2hreg);
+ rx_nss = RTW89_GET_C2H_PHYCAP_RX_NSS(c2h_info.c2hreg);
+ tx_ant = RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(c2h_info.c2hreg);
+ rx_ant = RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(c2h_info.c2hreg);
+
+ hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss;
+ hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss;
+
+ if (tx_ant == 1)
+ hal->antenna_tx = RF_B;
+ if (rx_ant == 1)
+ hal->antenna_rx = RF_B;
+
+ if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) {
+ hal->antenna_tx = RF_B;
+ hal->tx_path_diversity = true;
+ }
rtw89_debug(rtwdev, RTW89_DBG_FW,
"phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
- hal->tx_nss, cap->tx_nss, chip->tx_nss,
- hal->rx_nss, cap->rx_nss, chip->rx_nss);
+ hal->tx_nss, tx_nss, chip->tx_nss,
+ hal->rx_nss, rx_nss, chip->rx_nss);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n",
+ tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx);
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
return 0;
}
@@ -2079,8 +2419,26 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel)
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
{
int ret;
@@ -2089,7 +2447,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
switch (sel) {
case RTW89_SCH_TX_SEL_ALL:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
break;
@@ -2106,7 +2465,49 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return ret;
break;
case RTW89_SCH_TX_SEL_MACID:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
+
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
if (ret)
return ret;
break;
@@ -2116,20 +2517,34 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
{
int ret;
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
-static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
- bool wd)
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
+
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd)
{
u32 val, reg;
int ret;
@@ -2149,9 +2564,8 @@ static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val);
}
-static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
- struct rtw89_cpuio_ctrl *ctrl_para,
- bool wd)
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
{
u32 val, cmd_type, reg;
int ret;
@@ -2216,7 +2630,8 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
return -EINVAL;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
return -EINVAL;
}
@@ -2290,9 +2705,9 @@ static int band1_enable(struct rtw89_dev *rtwdev)
int ret, i;
u32 sleep_bak[4] = {0};
u32 pause_bak[4] = {0};
- u16 tx_en;
+ u32 tx_en;
- ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
if (ret) {
rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
return ret;
@@ -2322,7 +2737,7 @@ static int band1_enable(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
}
- ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en);
+ ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
return ret;
@@ -2346,10 +2761,206 @@ static int band1_enable(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR);
+ rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set);
+}
+
+static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set);
+}
+
+static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ B_AX_TX_GET_ERRPKTID_INT_EN |
+ B_AX_TX_NXT_ERRPKTID_INT_EN |
+ B_AX_TX_MPDU_SIZE_ZERO_INT_EN |
+ B_AX_TX_OFFSET_ERR_INT_EN |
+ B_AX_TX_HDR3_SIZE_ERR_INT_EN);
+ if (chip_id == RTL8852C)
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ B_AX_TX_ETH_TYPE_ERR_EN |
+ B_AX_TX_LLC_PRE_ERR_EN |
+ B_AX_TX_NW_TYPE_ERR_EN |
+ B_AX_TX_KSRCH_ERR_EN);
+ rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR,
+ imr->mpdu_tx_imr_set);
+
+ rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR,
+ B_AX_GETPKTID_ERR_INT_EN |
+ B_AX_MHDRLEN_ERR_INT_EN |
+ B_AX_RPT_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR,
+ imr->mpdu_rx_imr_set);
+}
+
+static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
+ B_AX_SEARCH_HANG_TIMEOUT_INT_EN |
+ B_AX_RPT_HANG_TIMEOUT_INT_EN |
+ B_AX_PLE_B_PKTID_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR,
+ imr->sta_sch_imr_set);
+}
+
+static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg,
+ imr->txpktctl_imr_b0_clr);
+ rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg,
+ imr->txpktctl_imr_b0_set);
+ rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg,
+ imr->txpktctl_imr_b1_clr);
+ rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg,
+ imr->txpktctl_imr_b1_set);
+}
+
+static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set);
+}
+
+static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set);
+}
+
+static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR,
+ B_AX_PKTIN_GETPKTID_ERR_INT_EN);
+}
+
+static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
+ imr->host_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
+ imr->host_disp_imr_set);
+ rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
+ imr->cpu_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
+ imr->cpu_disp_imr_set);
+ rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
+ imr->other_disp_imr_clr);
+ rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR,
+ imr->other_disp_imr_set);
+}
+
+static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR);
+ rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET);
+}
+
+static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+
+ rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg,
+ B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
+ B_AX_BBRPT_CHINFO_IMR_CLR);
+ rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
+ imr->bbrpt_err_imr_set);
+ rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg,
+ B_AX_BBRPT_DFS_TO_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR);
+}
+
+static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN |
+ B_AX_FSM_TIMEOUT_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN);
+}
+
+static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set);
+}
+
+static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr);
+ rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set);
+
+ if (chip_id == RTL8852C) {
+ reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr);
+ rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set);
+ }
+}
+
+static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set);
+}
+
+static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set);
+}
+
+static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr);
+ rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set);
+}
+
static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_mac_hwmod_sel sel)
{
- u32 reg, val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel);
@@ -2360,60 +2971,24 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
}
if (sel == RTW89_DMAC_SEL) {
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
- B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
- B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN |
- B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
- B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
- B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
- B_AX_HDT_PKT_FAIL_DBG_INT_EN |
- B_AX_HDT_OFFSET_UNMATCH_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
- B_AX_CPU_SHIFT_EN_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR,
- B_AX_PLE_GETNPG_STRPG_ERR_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR,
- B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN);
- rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN);
- rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
- B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN);
+ rtw89_wdrls_imr_enable(rtwdev);
+ rtw89_wsec_imr_enable(rtwdev);
+ rtw89_mpdu_trx_imr_enable(rtwdev);
+ rtw89_sta_sch_imr_enable(rtwdev);
+ rtw89_txpktctl_imr_enable(rtwdev);
+ rtw89_wde_imr_enable(rtwdev);
+ rtw89_ple_imr_enable(rtwdev);
+ rtw89_pktin_imr_enable(rtwdev);
+ rtw89_dispatcher_imr_enable(rtwdev);
+ rtw89_cpuio_imr_enable(rtwdev);
+ rtw89_bbrpt_imr_enable(rtwdev);
} else if (sel == RTW89_CMAC_SEL) {
- reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
- rtw89_write32_clr(rtwdev, reg,
- B_AX_SORT_NON_IDLE_ERR_INT_EN);
-
- reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx);
- rtw89_write32_clr(rtwdev, reg,
- B_AX_NO_RESERVE_PAGE_ERR_IMR |
- B_AX_RXDATA_FSM_HANG_ERROR_IMR);
-
- reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
- val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN |
- B_AX_TX_RECORD_PKTID_ERR_INT_EN |
- B_AX_FSM_TIMEOUT_ERR_INT_EN;
- rtw89_write32(rtwdev, reg, val);
-
- reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx);
- rtw89_write32_set(rtwdev, reg,
- B_AX_PHY_TXON_TIMEOUT_INT_EN |
- B_AX_CCK_CCA_TIMEOUT_INT_EN |
- B_AX_OFDM_CCA_TIMEOUT_INT_EN |
- B_AX_DATA_ON_TIMEOUT_INT_EN |
- B_AX_STS_ON_TIMEOUT_INT_EN |
- B_AX_CSI_ON_TIMEOUT_INT_EN);
-
- reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx);
- val = rtw89_read32(rtwdev, reg);
- val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN |
- B_AX_RMAC_RX_TIMEOUT_INT_EN |
- B_AX_RMAC_CSI_TIMEOUT_INT_EN);
- val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN |
- B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN |
- B_AX_RMAC_CCA_TIMEOUT_INT_EN |
- B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN);
- rtw89_write32(rtwdev, reg, val);
+ rtw89_scheduler_imr_enable(rtwdev, mac_idx);
+ rtw89_ptcl_imr_enable(rtwdev, mac_idx);
+ rtw89_cdma_imr_enable(rtwdev, mac_idx);
+ rtw89_phy_intf_imr_enable(rtwdev, mac_idx);
+ rtw89_rmac_imr_enable(rtwdev, mac_idx);
+ rtw89_tmac_imr_enable(rtwdev, mac_idx);
} else {
return -EINVAL;
}
@@ -2421,6 +2996,19 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR,
+ en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS);
+ rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR,
+ en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS);
+ if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta)
+ rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1,
+ en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
+}
+
static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
{
int ret = 0;
@@ -2502,6 +3090,8 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_mac_err_imr_ctrl(rtwdev, true);
+
ret = set_host_rpr(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
@@ -2511,12 +3101,32 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+
+ rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL,
+ WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL);
+
+ val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL);
+ val32 |= B_AX_FS_WDT_INT;
+ val32 &= ~B_AX_FS_WDT_INT_MSK;
+ rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL);
+}
+
static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
{
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+ rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
+ B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+
+ rtw89_disable_fw_watchdog(rtwdev);
+
+ rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
}
static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -2530,6 +3140,8 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_H2C, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H, 0);
rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
@@ -2557,18 +3169,41 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
return 0;
}
-static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
+static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val;
int ret;
- val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
- B_AX_PKT_BUF_EN;
+ if (chip_id == RTL8852C)
+ val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
+ B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN;
+ else
+ val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
+ B_AX_PKT_BUF_EN;
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val);
val = B_AX_DISPATCHER_CLK_EN;
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val);
+ if (chip_id != RTL8852C)
+ goto dle;
+
+ val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1);
+ val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST);
+ val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) |
+ B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1;
+ rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val);
+
+ rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1,
+ B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 |
+ B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 |
+ B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 |
+ B_AX_STOP_CH12 | B_AX_STOP_ACH2);
+ rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN);
+
+dle:
ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret);
@@ -2584,13 +3219,7 @@ static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
-static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
-{
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN,
- B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
-}
-
-void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2598,9 +3227,12 @@ void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+
+ return 0;
}
+EXPORT_SYMBOL(rtw89_mac_enable_bb_rf);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2608,7 +3240,10 @@ void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+
+ return 0;
}
+EXPORT_SYMBOL(rtw89_mac_disable_bb_rf);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
{
@@ -2622,7 +3257,11 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_mac_hci_func_en(rtwdev);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
+
+ ret = rtw89_mac_dmac_pre_init(rtwdev);
+ if (ret)
+ return ret;
if (rtwdev->hci.ops->mac_pre_init) {
ret = rtwdev->hci.ops->mac_pre_init(rtwdev);
@@ -2630,10 +3269,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_fw_dl_pre_init(rtwdev);
- if (ret)
- return ret;
-
rtw89_mac_disable_cpu(rtwdev);
ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
if (ret)
@@ -2654,7 +3289,9 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
- rtw89_mac_enable_bb_rf(rtwdev);
+ ret = rtw89_chip_enable_bb_rf(rtwdev);
+ if (ret)
+ goto fail;
ret = rtw89_mac_sys_init(rtwdev);
if (ret)
@@ -2705,7 +3342,7 @@ static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
}
-static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
{
u8 sh = FIELD_GET(GENMASK(4, 0), macid);
u8 grp = macid >> 5;
@@ -2864,6 +3501,36 @@ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
bcn_int);
}
+static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u32 hiq_win_addr[RTW89_PORT_NUM] = {
+ R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
+ R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
+ R_AX_PORT_HGQ_WINDOW_CFG + 3,
+ };
+ u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
+ u8 port = rtwvif->port;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx);
+ rtw89_write8(rtwdev, reg, win);
+}
+
+static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx);
+ rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
+ vif->bss_conf.dtim_period);
+}
+
static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
@@ -2967,6 +3634,26 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
+static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u16 val;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
+ B_AX_TBTT_SHIFT_OFST_SIGN;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift,
+ B_AX_TBTT_SHIFT_OFST_MASK, val);
+}
+
int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -2978,11 +3665,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
- ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false);
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
if (ret)
return ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
if (ret)
return ret;
@@ -2994,7 +3681,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id);
+ ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
if (ret)
return ret;
@@ -3005,7 +3692,7 @@ int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
if (ret)
return ret;
@@ -3034,13 +3721,16 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
- rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
@@ -3048,6 +3738,50 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
}
+static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *data)
+{
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+ bool *tolerated = data;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ *tolerated = false;
+ rcu_read_unlock();
+}
+
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_hw *hw = rtwdev->hw;
+ bool tolerated = true;
+ u32 reg;
+
+ if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ rtw89_mac_check_he_obss_narrow_bw_ru_iter,
+ &tolerated);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ if (tolerated)
+ rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ else
+ rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+}
+
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -3084,6 +3818,69 @@ rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
{
}
+static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ return band == scan_info->op_band && channel == scan_info->op_pri_ch;
+}
+
+static void
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ struct rtw89_chan new;
+ u8 reason, status, tx_fail, band, actual_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx;
+ u16 chan;
+ int ret;
+
+ tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
+ status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
+ chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
+ reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
+ band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+ actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
+
+ if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
+ band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ band, chan, reason, status, tx_fail, actual_period);
+
+ switch (reason) {
+ case RTW89_SCAN_LEAVE_CH_NOTIFY:
+ if (rtw89_is_op_chan(rtwdev, band, chan))
+ ieee80211_stop_queues(rtwdev->hw);
+ return;
+ case RTW89_SCAN_END_SCAN_NOTIFY:
+ if (rtwvif && rtwvif->scan_req &&
+ last_chan < rtwvif->scan_req->n_channels) {
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
+ }
+ } else {
+ rtw89_hw_scan_complete(rtwdev, vif, false);
+ }
+ break;
+ case RTW89_SCAN_ENTER_CH_NOTIFY:
+ rtw89_chan_create(&new, chan, chan, band, RTW89_CHANNEL_WIDTH_20);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
+ if (rtw89_is_op_chan(rtwdev, band, chan)) {
+ rtw89_store_op_chan(rtwdev, false);
+ ieee80211_wake_queues(rtwdev->hw);
+ }
+ break;
+ default:
+ return;
+ }
+}
+
static void
rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
@@ -3114,14 +3911,33 @@ rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
}
+static void
+rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
+rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+}
+
+static void
+rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
[RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
- [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
+ [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
+ [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
+ [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt,
};
static
@@ -3130,6 +3946,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
+ [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
};
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
@@ -3192,6 +4009,7 @@ error:
return false;
}
+EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -3216,6 +4034,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return ret;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -3349,33 +4168,65 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_coex_init);
+
+int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex *coex)
+{
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG,
+ B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL);
+ rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN);
+ rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN);
+ rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN);
+
+ switch (coex->pta_mode) {
+ case RTW89_MAC_AX_COEX_RTK_MODE:
+ rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
+ MAC_AX_RTK_MODE);
+ rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1,
+ B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE);
+ break;
+ case RTW89_MAC_AX_COEX_CSR_MODE:
+ rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK,
+ MAC_AX_CSR_MODE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_coex_init_v1);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
- u32 val, ret;
+ u32 val = 0, ret;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL;
- ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (ret) {
- rtw89_err(rtwdev, "Read LTE fail!\n");
- return ret;
- }
- val = (gnt_cfg->band[0].gnt_bt ?
- B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[0].gnt_wl ?
- B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_bt ?
- B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_wl ?
- B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0);
ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
if (ret) {
rtw89_err(rtwdev, "Write LTE fail!\n");
@@ -3384,11 +4235,59 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt);
+
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
- u8 val;
+ u16 val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
@@ -3403,8 +4302,9 @@ int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
(plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
- (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0);
- rtw89_write8(rtwdev, reg, val);
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) |
+ B_AX_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
return 0;
}
@@ -3442,6 +4342,28 @@ int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path);
+
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
{
@@ -3514,6 +4436,10 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx)
u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) |
u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK));
+ reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx);
+ rtw89_write32_set(rtwdev, reg,
+ B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN);
+
return 0;
}
@@ -3526,7 +4452,7 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
u8 port_sel = rtwvif->port;
u8 sound_dim = 3, t;
- u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info;
+ u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
u32 reg;
u16 val;
int ret;
@@ -3543,12 +4469,12 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
phy_cap[5]);
sound_dim = min(sound_dim, t);
}
- if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
- ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
+ if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
- sta->vht_cap.cap);
+ sta->deflink.vht_cap.cap);
sound_dim = min(sound_dim, t);
}
nc = min(nc, sound_dim);
@@ -3589,17 +4515,17 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev,
if (ret)
return ret;
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->deflink.vht_cap.vht_supported) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
}
- if (sta->ht_cap.ht_supported) {
+ if (sta->deflink.ht_cap.ht_supported) {
rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
@@ -3845,3 +4771,96 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
return 0;
}
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
+ offset, val, mask);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
+
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
+ return ret;
+ }
+
+ *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
+
+ return 0;
+}
+
+static
+void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+{
+ static const enum rtw89_pkt_drop_sel sels[] = {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ };
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_pkt_drop_params params = {0};
+ int i;
+
+ params.mac_band = RTW89_MAC_0;
+ params.macid = rtwsta->mac_id;
+ params.port = rtwvif->port;
+ params.mbssid = 0;
+ params.tf_trs = rtwvif->trigger;
+
+ for (i = 0; i < ARRAY_SIZE(sels); i++) {
+ params.sel = sels[i];
+ rtw89_fw_h2c_pkt_drop(rtwdev, &params);
+ }
+}
+
+static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif *target = data;
+
+ if (rtwvif != target)
+ return;
+
+ rtw89_mac_pkt_drop_sta(rtwdev, rtwsta);
+}
+
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_mac_pkt_drop_vif_iter,
+ rtwvif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index b7d13edf7dd1..6f4ada1869a1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -6,11 +6,13 @@
#define __RTW89_MAC_H__
#include "core.h"
+#include "reg.h"
#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
#define ADDR_CAM_ENT_SIZE 0x40
#define BSSID_CAM_ENT_SIZE 0x08
#define HFC_PAGE_UNIT 64
+#define RPWM_TRY_CNT 3
enum rtw89_mac_hwmod_sel {
RTW89_DMAC_SEL = 0,
@@ -245,6 +247,7 @@ enum rtw89_mac_dbg_port_sel {
#define TXD_FIFO_1_BASE_ADDR 0x188A1080
#define TXDATA_FIFO_0_BASE_ADDR 0x18856000
#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000
+#define CPU_LOCAL_BASE_ADDR 0x18003000
#define CCTL_INFO_SIZE 32
@@ -266,13 +269,15 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXD_FIFO_1,
RTW89_MAC_MEM_TXDATA_FIFO_0,
RTW89_MAC_MEM_TXDATA_FIFO_1,
+ RTW89_MAC_MEM_CPU_LOCAL,
+ RTW89_MAC_MEM_BSSID_CAM,
/* keep last */
- RTW89_MAC_MEM_LAST,
- RTW89_MAC_MEM_MAX = RTW89_MAC_MEM_LAST,
- RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST,
+ RTW89_MAC_MEM_NUM,
};
+extern const u32 rtw89_mac_mem_base_addrs[];
+
enum rtw89_rpwm_req_pwr_state {
RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0,
RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1,
@@ -301,6 +306,8 @@ enum rtw89_mac_c2h_ofld_func {
RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
RTW89_MAC_C2H_FUNC_BCN_RESEND,
RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT = 0x6,
+ RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
RTW89_MAC_C2H_FUNC_OFLD_MAX,
};
@@ -308,6 +315,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_REC_ACK,
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
+ RTW89_MAC_C2H_FUNC_BCN_CNT,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
@@ -517,6 +525,13 @@ struct rtw89_mac_dle_dfi_qempty {
u32 qempty;
};
+enum rtw89_mac_error_scenario {
+ RTW89_WCPU_CPU_EXCEPTION = 2,
+ RTW89_WCPU_ASSERTION = 3,
+};
+
+#define RTW89_ERROR_SCENARIO(__err) ((__err) >> 28)
+
/* Define DBG and recovery enum */
enum mac_ax_err_info {
/* Get error info */
@@ -654,7 +669,9 @@ enum mac_ax_err_info {
MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RX4281 = 0x2360,
MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370,
MAC_AX_ERR_L2_RESET_DONE = 0x2400,
+ MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT = 0x2599,
MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
+ MAC_AX_ERR_ASSERTION = 0x4000,
MAC_AX_GET_ERR_MAX,
MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000,
@@ -670,16 +687,37 @@ enum mac_ax_err_info {
MAC_AX_SET_ERR_MAX,
};
-extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie;
-extern const struct rtw89_dle_size wde_size0;
-extern const struct rtw89_dle_size wde_size4;
-extern const struct rtw89_dle_size ple_size0;
-extern const struct rtw89_dle_size ple_size4;
-extern const struct rtw89_wde_quota wde_qt0;
-extern const struct rtw89_wde_quota wde_qt4;
-extern const struct rtw89_ple_quota ple_qt4;
-extern const struct rtw89_ple_quota ple_qt5;
-extern const struct rtw89_ple_quota ple_qt13;
+struct rtw89_mac_size_set {
+ const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie;
+ const struct rtw89_dle_size wde_size0;
+ const struct rtw89_dle_size wde_size4;
+ const struct rtw89_dle_size wde_size6;
+ const struct rtw89_dle_size wde_size9;
+ const struct rtw89_dle_size wde_size18;
+ const struct rtw89_dle_size wde_size19;
+ const struct rtw89_dle_size ple_size0;
+ const struct rtw89_dle_size ple_size4;
+ const struct rtw89_dle_size ple_size6;
+ const struct rtw89_dle_size ple_size8;
+ const struct rtw89_dle_size ple_size18;
+ const struct rtw89_dle_size ple_size19;
+ const struct rtw89_wde_quota wde_qt0;
+ const struct rtw89_wde_quota wde_qt4;
+ const struct rtw89_wde_quota wde_qt6;
+ const struct rtw89_wde_quota wde_qt17;
+ const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_ple_quota ple_qt4;
+ const struct rtw89_ple_quota ple_qt5;
+ const struct rtw89_ple_quota ple_qt13;
+ const struct rtw89_ple_quota ple_qt18;
+ const struct rtw89_ple_quota ple_qt44;
+ const struct rtw89_ple_quota ple_qt45;
+ const struct rtw89_ple_quota ple_qt46;
+ const struct rtw89_ple_quota ple_qt47;
+ const struct rtw89_ple_quota ple_qt58;
+};
+
+extern const struct rtw89_mac_size_set rtw89_mac_size;
static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
{
@@ -770,33 +808,59 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
-void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+
+static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->enable_bb_rf(rtwdev);
+}
+
+static inline int rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->disable_bb_rf(rtwdev);
+}
+
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en);
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
+int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex *coex);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -810,6 +874,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en);
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause);
static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
@@ -858,6 +923,45 @@ static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev,
return 0;
}
+static inline void rtw89_mac_ctrl_hci_dma_tx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_rx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_trx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+}
+
int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool resume, u32 tx_time);
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
@@ -868,4 +972,52 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta, u8 *tx_retry);
+enum rtw89_mac_xtal_si_offset {
+ XTAL0 = 0x0,
+ XTAL3 = 0x3,
+ XTAL_SI_XTAL_SC_XI = 0x04,
+#define XTAL_SC_XI_MASK GENMASK(7, 0)
+ XTAL_SI_XTAL_SC_XO = 0x05,
+#define XTAL_SC_XO_MASK GENMASK(7, 0)
+ XTAL_SI_PWR_CUT = 0x10,
+#define XTAL_SI_SMALL_PWR_CUT BIT(0)
+#define XTAL_SI_BIG_PWR_CUT BIT(1)
+ XTAL_SI_XTAL_XMD_2 = 0x24,
+#define XTAL_SI_LDO_LPS GENMASK(6, 4)
+ XTAL_SI_XTAL_XMD_4 = 0x26,
+#define XTAL_SI_LPS_CAP GENMASK(3, 0)
+ XTAL_SI_CV = 0x41,
+ XTAL_SI_LOW_ADDR = 0x62,
+#define XTAL_SI_LOW_ADDR_MASK GENMASK(7, 0)
+ XTAL_SI_CTRL = 0x63,
+#define XTAL_SI_MODE_SEL_MASK GENMASK(7, 6)
+#define XTAL_SI_RDY BIT(5)
+#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
+ XTAL_SI_READ_VAL = 0x7A,
+ XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00S_EN GENMASK(2, 0)
+#define XTAL_SI_RF00 BIT(0)
+ XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10S_EN GENMASK(2, 0)
+#define XTAL_SI_RF10 BIT(0)
+ XTAL_SI_ANAPAR_WL = 0x90,
+#define XTAL_SI_SRAM2RFC BIT(7)
+#define XTAL_SI_GND_SHDN_WL BIT(6)
+#define XTAL_SI_SHDN_WL BIT(5)
+#define XTAL_SI_RFC2RF BIT(4)
+#define XTAL_SI_OFF_EI BIT(3)
+#define XTAL_SI_OFF_WEI BIT(2)
+#define XTAL_SI_PON_EI BIT(1)
+#define XTAL_SI_PON_WEI BIT(0)
+ XTAL_SI_SRAM_CTRL = 0xA1,
+#define FULL_BIT_MASK GENMASK(7, 0)
+};
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd);
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a322259f4cc4..a296bfa8188f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -12,6 +13,7 @@
#include "reg.h"
#include "sar.h"
#include "ser.h"
+#include "util.h"
static void rtw89_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -66,6 +68,9 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
@@ -82,8 +87,11 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ &hw->conf.chandef);
rtw89_set_channel(rtwdev);
+ }
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
(hw->conf.flags & IEEE80211_CONF_IDLE))
@@ -101,15 +109,20 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
int ret = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
mutex_lock(&rtwdev->mutex);
+ rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
rtw89_vif_type_mapping(vif, false);
rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
- RTW89_MAX_HW_PORT_NUM);
- if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) {
+ RTW89_PORT_NUM);
+ if (rtwvif->port == RTW89_PORT_NUM) {
ret = -ENOSPC;
goto out;
}
@@ -141,6 +154,11 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
+ cancel_work_sync(&rtwvif->update_beacon_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
@@ -150,6 +168,23 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype type, bool p2p)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
+
+ rtw89_ops_remove_interface(hw, vif);
+
+ vif->type = type;
+ vif->p2p = p2p;
+
+ return rtw89_ops_add_interface(hw, vif);
+}
+
static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -161,7 +196,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
- FIF_BCN_PRBRESP_PROMISC;
+ FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ;
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
@@ -192,6 +227,15 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
}
}
+ if (changed_flags & FIF_PROBE_REQ) {
+ if (*new_flags & FIF_PROBE_REQ) {
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_UC_CAM_MATCH;
+ } else {
+ rtwdev->hal.rx_fltr |= B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr |= B_AX_A_UC_CAM_MATCH;
+ }
+ }
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
@@ -219,11 +263,12 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 aifsn)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 slot_time;
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = rtwdev->hal.current_band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
return aifsn * slot_time + sifs;
}
@@ -311,13 +356,16 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
}
+
+ rtw89_vif_type_mapping(vif, true);
+
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changed)
+ u64 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -326,11 +374,19 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_leave_ps_mode(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
- if (conf->assoc) {
+ if (vif->cfg.assoc) {
rtw89_station_mode_sta_assoc(rtwdev, vif, conf);
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif);
+ rtw89_store_op_chan(rtwdev, true);
+ } else {
+ /* Abort ongoing scan if cancel_scan isn't issued
+ * when disconnected by peer
+ */
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, vif);
}
}
@@ -340,6 +396,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
}
+ if (changed & BSS_CHANGED_BEACON)
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -349,11 +408,61 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MU_GROUPS)
rtw89_mac_bf_set_gid_table(rtwdev, vif, conf);
+ if (changed & BSS_CHANGED_P2P_PS)
+ rtw89_process_p2p_ps(rtwdev, vif);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ rtw89_chip_rfk_channel(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+
+ ieee80211_queue_work(rtwdev->hw, &rtwvif->update_beacon_work);
+
+ return 0;
+}
+
static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 ac,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -382,7 +491,7 @@ static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0; /* defer to bss_info_changed to have vif info */
return rtw89_core_sta_assoc(rtwdev, vif, sta);
}
@@ -476,7 +585,6 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
- rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -486,11 +594,17 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
rtw89_leave_ps_mode(rtwdev);
- rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ mutex_unlock(&rtwdev->mutex);
+ break;
case IEEE80211_AMPDU_RX_STOP:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
@@ -524,6 +638,20 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
+static
+void __rtw89_drop_packets(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif;
+
+ if (vif) {
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ } else {
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ }
+}
+
static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -532,7 +660,12 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
rtw89_hci_flush_queues(rtwdev, queues, drop);
- rtw89_mac_flush_txq(rtwdev, queues, drop);
+
+ if (drop && RTW89_CHK_FW_FEATURE(PACKET_DROP, &rtwdev->fw))
+ __rtw89_drop_packets(rtwdev, vif);
+ else
+ rtw89_mac_flush_txq(rtwdev, queues, drop);
+
mutex_unlock(&rtwdev->mutex);
}
@@ -548,12 +681,12 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
- if (vif != br_data->vif)
+ if (vif != br_data->vif || vif->p2p)
return;
rtwsta->use_cfg_mask = true;
rtwsta->mask = *br_data->mask;
- rtw89_phy_ra_updata_sta(br_data->rtwdev, sta);
+ rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev,
@@ -588,12 +721,13 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
- if (rx_ant != hw->wiphy->available_antennas_rx)
+ if (rx_ant != hw->wiphy->available_antennas_rx && rx_ant != hal->antenna_rx)
return -EINVAL;
mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
+ hal->tx_path_diversity = false;
mutex_unlock(&rtwdev->mutex);
return 0;
@@ -617,15 +751,9 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
- struct rtw89_hal *hal = &rtwdev->hal;
mutex_lock(&rtwdev->mutex);
- rtwdev->scanning = true;
- rtw89_leave_lps(rtwdev);
- rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_scan(rtwdev, true);
- rtw89_hci_recalc_int_mit(rtwdev);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -633,14 +761,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw89_dev *rtwdev = hw->priv;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_scan(rtwdev, false);
- rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
- rtwdev->scanning = false;
- rtwdev->dig.bypass_dig = true;
+ rtw89_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -653,6 +776,146 @@ static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
rtw89_ser_recfg_done(rtwdev);
}
+static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ return 1;
+
+ if (rtwdev->scanning)
+ return -EBUSY;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_start(rtwdev, vif, req);
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
+ }
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_abort(rtwdev, vif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
+}
+
+static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_add(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_remove(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_change(rtwdev, ctx, changed);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_set_tid_config_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct cfg80211_tid_config *tid_config = data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwvif->rtwdev;
+
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+}
+
+static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ if (sta)
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+ else
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_set_tid_config_iter,
+ tid_config);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -660,9 +923,13 @@ const struct ieee80211_ops rtw89_ops = {
.stop = rtw89_ops_stop,
.config = rtw89_ops_config,
.add_interface = rtw89_ops_add_interface,
+ .change_interface = rtw89_ops_change_interface,
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
+ .start_ap = rtw89_ops_start_ap,
+ .stop_ap = rtw89_ops_stop_ap,
+ .set_tim = rtw89_ops_set_tim,
.conf_tx = rtw89_ops_conf_tx,
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
@@ -676,6 +943,15 @@ const struct ieee80211_ops rtw89_ops = {
.sw_scan_start = rtw89_ops_sw_scan_start,
.sw_scan_complete = rtw89_ops_sw_scan_complete,
.reconfig_complete = rtw89_ops_reconfig_complete,
+ .hw_scan = rtw89_ops_hw_scan,
+ .cancel_hw_scan = rtw89_ops_cancel_hw_scan,
+ .add_chanctx = rtw89_ops_add_chanctx,
+ .remove_chanctx = rtw89_ops_remove_chanctx,
+ .change_chanctx = rtw89_ops_change_chanctx,
+ .assign_vif_chanctx = rtw89_ops_assign_vif_chanctx,
+ .unassign_vif_chanctx = rtw89_ops_unassign_vif_chanctx,
.set_sar_specs = rtw89_ops_set_sar_specs,
+ .sta_rc_update = rtw89_ops_sta_rc_update,
+ .set_tid_config = rtw89_ops_set_tid_config,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 2c94762e4f93..5f8e19639362 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -62,7 +62,7 @@ static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -121,7 +121,7 @@ static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -169,6 +169,23 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
+ const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
+
+ if (enable) {
+ rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ } else {
+ rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ }
+}
+
static bool
rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
struct sk_buff *new,
@@ -228,7 +245,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
if (fs) {
if (new) {
- rtw89_err(rtwdev, "skb should not be ready before first segment start\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "skb should not be ready before first segment start\n");
goto err_sync_device;
}
if (desc_info->ready) {
@@ -251,7 +269,7 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
} else {
offset = sizeof(struct rtw89_pci_rxbd_info);
if (!new) {
- rtw89_warn(rtwdev, "no last skb\n");
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
goto err_sync_device;
}
}
@@ -304,7 +322,7 @@ static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
cnt -= rx_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
@@ -382,6 +400,10 @@ static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx
}
list_del_init(&txwd->list);
+
+ /* this skb has been freed by RPP */
+ if (skb_queue_len(&txwd->queue) == 0)
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
}
@@ -412,16 +434,13 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
u8 txch = tx_ring->txch;
if (!list_empty(&txwd->list)) {
- rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
- txch, seq);
- return;
- }
-
- /* currently, support for only one frame */
- if (skb_queue_len(&txwd->queue) != 1) {
- rtw89_warn(rtwdev, "empty pending queue %d page %d\n",
- txch, seq);
- return;
+ rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
+ /* In low power mode, RPP can receive before updating of TX BD.
+ * In normal mode, it should not happen so give it a warning.
+ */
+ if (!rtwpci->low_power && !list_empty(&txwd->list))
+ rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
+ txch, seq);
}
skb_queue_walk_safe(&txwd->queue, skb, tmp) {
@@ -434,7 +453,8 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
}
- rtw89_pci_enqueue_txwd(tx_ring, txwd);
+ if (list_empty(&txwd->list))
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
@@ -458,7 +478,6 @@ static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
}
tx_ring = &rtwpci->tx_rings[txch];
- rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
wd_ring = &tx_ring->wd_ring;
txwd = &wd_ring->pages[seq];
@@ -555,7 +574,7 @@ static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
cnt -= release_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
@@ -598,13 +617,13 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
hw_idx_next = (hw_idx + 1) % bd_ring->len;
if (hw_idx_next == host_idx)
- rtw89_warn(rtwdev, "%d RXD unavailable\n", i);
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"%d RXD unavailable, idx=0x%08x, len=%d\n",
@@ -612,9 +631,9 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci,
- struct rtw89_pci_isrs *isrs)
+void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
{
isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
@@ -624,6 +643,28 @@ static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
+
+void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
+ isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
+ isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
+ isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
+ rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
+
+ if (isrs->halt_c2h_isrs)
+ rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
+ if (isrs->isrs[0])
+ rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
+ if (isrs->isrs[1])
+ rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
+}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
{
@@ -631,21 +672,72 @@ static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
}
-static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci)
+void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
}
+EXPORT_SYMBOL(rtw89_pci_enable_intr);
-static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev,
- struct rtw89_pci *rtwpci)
+void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
}
+EXPORT_SYMBOL(rtw89_pci_disable_intr);
+
+void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
+ rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
+ rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
+}
+EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
+
+void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
+}
+EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
+
+static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ int budget = NAPI_POLL_WEIGHT;
+
+ /* To prevent RXQ get stuck due to run out of budget. */
+ rtwdev->napi_budget_countdown = budget;
+
+ rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
+ rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
+}
static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
{
@@ -655,7 +747,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
- rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs);
+ rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
@@ -664,6 +756,17 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
+ if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
+ rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
+
+ if (unlikely(rtwpci->under_recovery))
+ goto enable_intr;
+
+ if (unlikely(rtwpci->low_power)) {
+ rtw89_pci_low_power_interrupt_handler(rtwdev);
+ goto enable_intr;
+ }
+
if (likely(rtwpci->running)) {
local_bh_disable();
napi_schedule(&rtwdev->napi);
@@ -671,6 +774,13 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
}
return IRQ_HANDLED;
+
+enable_intr:
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ if (likely(rtwpci->running))
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ return IRQ_HANDLED;
}
static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
@@ -690,78 +800,117 @@ static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
goto exit;
}
- rtw89_pci_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
exit:
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return irqret;
}
-#define case_TXCHADDRS(txch) \
- case RTW89_TXCH_##txch: \
- *addr_num = R_AX_##txch##_TXBD_NUM; \
- *addr_idx = R_AX_##txch##_TXBD_IDX; \
- *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \
- *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \
- *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_bdram,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
-{
- switch (txch) {
- case_TXCHADDRS(ACH0);
- case_TXCHADDRS(ACH1);
- case_TXCHADDRS(ACH2);
- case_TXCHADDRS(ACH3);
- case_TXCHADDRS(ACH4);
- case_TXCHADDRS(ACH5);
- case_TXCHADDRS(ACH6);
- case_TXCHADDRS(ACH7);
- case_TXCHADDRS(CH8);
- case_TXCHADDRS(CH9);
- case_TXCHADDRS(CH10);
- case_TXCHADDRS(CH11);
- case_TXCHADDRS(CH12);
- default:
+#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM ##v, \
+ .idx = R_AX_##txch##_TXBD_IDX ##v, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_TXCHADDRS(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM, \
+ .idx = R_AX_##txch##_TXBD_IDX, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_RXCHADDRS(info, rxch, v...) \
+ [RTW89_RXCH_##rxch] = { \
+ .num = R_AX_##rxch##_RXBD_NUM ##v, \
+ .idx = R_AX_##rxch##_RXBD_IDX ##v, \
+ .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
+ .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
+ }
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0),
+ DEF_TXCHADDRS(info, ACH1),
+ DEF_TXCHADDRS(info, ACH2),
+ DEF_TXCHADDRS(info, ACH3),
+ DEF_TXCHADDRS(info, ACH4),
+ DEF_TXCHADDRS(info, ACH5),
+ DEF_TXCHADDRS(info, ACH6),
+ DEF_TXCHADDRS(info, ACH7),
+ DEF_TXCHADDRS(info, CH8),
+ DEF_TXCHADDRS(info, CH9),
+ DEF_TXCHADDRS_TYPE1(info, CH10),
+ DEF_TXCHADDRS_TYPE1(info, CH11),
+ DEF_TXCHADDRS(info, CH12),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ),
+ DEF_RXCHADDRS(info, RPQ),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0, _V1),
+ DEF_TXCHADDRS(info, ACH1, _V1),
+ DEF_TXCHADDRS(info, ACH2, _V1),
+ DEF_TXCHADDRS(info, ACH3, _V1),
+ DEF_TXCHADDRS(info, ACH4, _V1),
+ DEF_TXCHADDRS(info, ACH5, _V1),
+ DEF_TXCHADDRS(info, ACH6, _V1),
+ DEF_TXCHADDRS(info, ACH7, _V1),
+ DEF_TXCHADDRS(info, CH8, _V1),
+ DEF_TXCHADDRS(info, CH9, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
+ DEF_TXCHADDRS(info, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ, _V1),
+ DEF_RXCHADDRS(info, RPQ, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
+
+#undef DEF_TXCHADDRS_TYPE1
+#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS
+
+static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_tx_channel txch,
+ const struct rtw89_pci_ch_dma_addr **addr)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (txch >= RTW89_TXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->tx[txch];
return 0;
}
-#undef case_TXCHADDRS
-
-#define case_RXCHADDRS(rxch) \
- case RTW89_RXCH_##rxch: \
- *addr_num = R_AX_##rxch##_RXBD_NUM; \
- *addr_idx = R_AX_##rxch##_RXBD_IDX; \
- *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \
- *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
+static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_rx_channel rxch,
+ const struct rtw89_pci_ch_dma_addr **addr)
{
- switch (rxch) {
- case_RXCHADDRS(RXQ);
- case_RXCHADDRS(RPQ);
- default:
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (rxch >= RTW89_RXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->rx[rxch];
return 0;
}
-#undef case_RXCHADDRS
-
static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
{
struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
@@ -788,6 +937,23 @@ u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
return cnt;
}
+static
+u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
+ u8 txch)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ u32 cnt;
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ cnt = min(cnt, wd_ring->curr_num);
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ return cnt;
+}
+
static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
@@ -806,16 +972,23 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
if (wd_cnt == 0 || bd_cnt == 0) {
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
- if (!cnt)
+ if (cnt)
+ rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+ else if (wd_cnt == 0)
goto out_unlock;
- rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+
+ bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ if (bd_cnt == 0)
+ rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
}
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
wd_cnt = wd_ring->curr_num;
min_cnt = min(bd_cnt, wd_cnt);
if (min_cnt == 0)
- rtw89_warn(rtwdev, "still no tx resource after reclaim\n");
+ rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP,
+ "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
+ wd_cnt, bd_cnt);
out_unlock:
spin_unlock_bh(&rtwpci->trx_lock);
@@ -826,6 +999,9 @@ out_unlock:
static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
+ if (rtwdev->hci.paused)
+ return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
+
if (txch == RTW89_TXCH_CH12)
return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
@@ -834,12 +1010,17 @@ static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, addr;
- addr = bd_ring->addr_idx;
+ spin_lock_bh(&rtwpci->trx_lock);
+
+ addr = bd_ring->addr.idx;
host_idx = bd_ring->wp;
rtw89_write16(rtwdev, addr, host_idx);
+
+ spin_unlock_bh(&rtwpci->trx_lock);
}
static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
@@ -860,9 +1041,27 @@ static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
- spin_lock_bh(&rtwpci->trx_lock);
+ if (rtwdev->hci.paused) {
+ set_bit(txch, rtwpci->kick_map);
+ return;
+ }
+
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
- spin_unlock_bh(&rtwpci->trx_lock);
+}
+
+static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ int txch;
+
+ for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (!test_and_clear_bit(txch, rtwpci->kick_map))
+ continue;
+
+ tx_ring = &rtwpci->tx_rings[txch];
+ __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
+ }
}
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
@@ -879,7 +1078,7 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
* just use for loop with udelay here.
*/
for (i = 0; i < 60; i++) {
- cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
if (cur_rp == bd_ring->wp)
return;
@@ -894,12 +1093,15 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
bool drop)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u8 i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
/* It may be unnecessary to flush FWCMD queue. */
if (i == RTW89_TXCH_CH12)
continue;
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
if (txchs & BIT(i))
__pci_flush_txch(rtwdev, i, drop);
@@ -912,17 +1114,69 @@ static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
}
+u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
+
+ txaddr_info->length = cpu_to_le16(total_len);
+ txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
+ RTW89_PCI_ADDR_NUM(1));
+ txaddr_info->dma = cpu_to_le32(dma);
+
+ *add_info_nr = 1;
+
+ return sizeof(*txaddr_info);
+}
+EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
+
+u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
+ u32 remain = total_len;
+ u32 len;
+ u16 length_option;
+ int n;
+
+ for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
+ len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
+ TXADDR_INFO_LENTHG_V1_MAX : remain;
+ remain -= len;
+
+ length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
+ FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
+ FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
+ txaddr_info->length_opt = cpu_to_le16(length_option);
+ txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
+ txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
+
+ dma += len;
+ txaddr_info++;
+ }
+
+ WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
+ remain, total_len);
+
+ *add_info_nr = n;
+
+ return n * sizeof(*txaddr_info);
+}
+EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
+
static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring,
struct rtw89_pci_tx_wd *txwd,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
- struct rtw89_txwd_body *txwd_body;
struct rtw89_txwd_info *txwd_info;
struct rtw89_pci_tx_wp_info *txwp_info;
- struct rtw89_pci_tx_addr_info_32 *txaddr_info;
+ void *txaddr_info_addr;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
@@ -933,8 +1187,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
dma_addr_t dma;
int ret;
- rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
-
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
rtw89_err(rtwdev, "failed to map skb dma data\n");
@@ -944,9 +1196,8 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
tx_data->dma = dma;
- txaddr_info_len = sizeof(*txaddr_info);
txwp_len = sizeof(*txwp_info);
- txwd_len = sizeof(*txwd_body);
+ txwd_len = chip->txwd_body_size;
txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
txwp_info = txwd->vaddr + txwd_len;
@@ -956,14 +1207,15 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
txwp_info->seq3 = 0;
tx_ring->tx_cnt++;
- txaddr_info = txwd->vaddr + txwd_len + txwp_len;
- txaddr_info->length = cpu_to_le16(skb->len);
- txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
- RTW89_PCI_ADDR_NUM(1));
- txaddr_info->dma = cpu_to_le32(dma);
+ txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
+ txaddr_info_len =
+ rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
+ dma, &desc_info->addr_info_nr);
txwd->len = txwd_len + txwp_len + txaddr_info_len;
+ rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
+
skb_queue_tail(&txwd->queue, skb);
return 0;
@@ -978,16 +1230,18 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
- struct rtw89_txwd_body *txwd_body;
+ void *txdesc;
+ int txdesc_size = chip->h2c_desc_size;
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_addr_t dma;
- txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body));
- memset(txwd_body, 0, sizeof(*txwd_body));
- rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body);
+ txdesc = skb_push(skb, txdesc_size);
+ memset(txdesc, 0, txdesc_size);
+ rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, dma)) {
@@ -1126,6 +1380,7 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
@@ -1137,12 +1392,15 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
+
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = &bd_ram_table[i];
- addr_num = bd_ring->addr_num;
- addr_bdram = bd_ring->addr_bdram;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_bdram = bd_ring->addr.bdram;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
@@ -1158,8 +1416,8 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- addr_num = bd_ring->addr_num;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
@@ -1180,12 +1438,15 @@ static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
int txch;
rtw89_pci_reset_trx_rings(rtwdev);
spin_lock_bh(&rtwpci->trx_lock);
for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (info->tx_dma_ch_mask & BIT(txch))
+ continue;
if (txch == RTW89_TXCH_CH12) {
rtw89_pci_release_fwcmd(rtwdev, rtwpci,
skb_queue_len(&rtwpci->h2c_queue), true);
@@ -1196,36 +1457,102 @@ static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
spin_unlock_bh(&rtwpci->trx_lock);
}
-static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
+static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
unsigned long flags;
- rtw89_core_napi_start(rtwdev);
-
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = true;
- rtw89_pci_enable_intr(rtwdev, rtwpci);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
-
- return 0;
}
-static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
+static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct pci_dev *pdev = rtwpci->pdev;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtwpci->running = false;
- rtw89_pci_disable_intr(rtwdev, rtwpci);
+ rtw89_chip_disable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
+{
+ rtw89_core_napi_start(rtwdev);
+ rtw89_pci_enable_intr_lock(rtwdev);
+
+ return 0;
+}
+
+static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ rtw89_pci_disable_intr_lock(rtwdev);
synchronize_irq(pdev->irq);
rtw89_core_napi_stop(rtwdev);
}
+static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+
+ if (pause) {
+ rtw89_pci_disable_intr_lock(rtwdev);
+ synchronize_irq(pdev->irq);
+ if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
+ napi_synchronize(&rtwdev->napi);
+ } else {
+ rtw89_pci_enable_intr_lock(rtwdev);
+ rtw89_pci_tx_kick_off_pending(rtwdev);
+ }
+}
+
+static
+void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
+ const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
+ struct rtw89_pci_tx_ring *tx_ring;
+ struct rtw89_pci_rx_ring *rx_ring;
+ int i;
+
+ if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
+ return;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ tx_ring->bd_ring.addr.idx = low_power ?
+ bd_idx_addr->tx_bd_addrs[i] :
+ dma_addr_set->tx[i].idx;
+ }
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rx_ring->bd_ring.addr.idx = low_power ?
+ bd_idx_addr->rx_bd_addrs[i] :
+ dma_addr_set->rx[i].idx;
+ }
+}
+
+static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
+{
+ enum rtw89_pci_intr_mask_cfg cfg;
+
+ WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
+
+ cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
+ rtw89_chip_config_intr_mask(rtwdev, cfg);
+ rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
+}
+
static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
@@ -1307,19 +1634,41 @@ static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
writel(data, rtwpci->mmap + addr);
}
-static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
{
- if (enable) {
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_TXHCI_EN | B_AX_RXHCI_EN);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
- B_AX_STOP_PCIEIO);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+ else
+ rtw89_write32_clr(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+}
+
+static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg, mask;
+
+ if (chip_id == RTL8852C) {
+ reg = R_AX_HAXI_INIT_CFG1;
+ mask = B_AX_STOP_AXI_MST;
} else {
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1,
- B_AX_STOP_PCIEIO);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_TXHCI_EN | B_AX_RXHCI_EN);
+ reg = R_AX_PCIE_DMA_STOP1;
+ mask = B_AX_STOP_PCIEIO;
}
+
+ if (enable)
+ rtw89_write32_clr(rtwdev, reg, mask);
+ else
+ rtw89_write32_set(rtwdev, reg, mask);
+}
+
+static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_pci_ctrl_dma_io(rtwdev, enable);
+ rtw89_pci_ctrl_dma_trx(rtwdev, enable);
}
static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
@@ -1383,22 +1732,29 @@ rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
return 0;
}
-static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
+static int
+rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
{
+ u32 shift;
int ret;
u16 val;
ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
if (ret)
return ret;
- ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
+
+ shift = __ffs(mask);
+ val &= ~mask;
+ val |= ((data << shift) & mask);
+
+ ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
if (ret)
return ret;
return 0;
}
-static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
+static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
{
int ret;
u16 val;
@@ -1406,86 +1762,74 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
if (ret)
return ret;
- ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
+ ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
if (ret)
return ret;
return 0;
}
-static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
{
- u16 write_addr;
- u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK);
- u8 flag;
int ret;
+ u16 val;
- write_addr = addr & B_AX_DBI_ADDR_MSK;
- write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK);
- rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data);
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
+ ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
if (ret)
- WARN(flag, "failed to write to DBI register, addr=0x%04x\n",
- addr);
+ return ret;
- return ret;
+ return 0;
}
-static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 data)
{
- u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
- u8 flag;
- int ret;
-
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
+ return pci_write_config_byte(pdev, addr, data);
+}
- if (!ret) {
- read_addr = R_AX_DBI_RDATA + (addr & 3);
- *value = rtw89_read8(rtwdev, read_addr);
- } else {
- WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
- ret = -EIO;
- }
+static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 *value)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_read_config_byte(pdev, addr, value);
}
-static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value |= bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
-static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value &= ~bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
@@ -1530,6 +1874,18 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate
return 0;
}
+static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
+ PCIE_AUTOK_4, PCIE_PHY_GEN1);
+ return ret;
+}
+
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
enum rtw89_pcie_phy phy_rate;
@@ -1538,13 +1894,13 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
bool l1_flag = false;
int ret = 0;
- if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) ||
- rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852B)
return 0;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n",
+ RTW89_PCIE_PHY_RATE);
return ret;
}
@@ -1557,17 +1913,18 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
return -EOPNOTSUPP;
}
/* Disable L1BD */
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
return ret;
}
if (bdr_ori & RTW89_PCIE_BIT_L1) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL,
- bdr_ori & ~RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori & ~RTW89_PCIE_BIT_L1);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
l1_flag = true;
@@ -1662,14 +2019,17 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
}
/* CLK delay = 0 */
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_0);
end:
/* Set L1BD to ori */
if (l1_flag) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
}
@@ -1679,31 +2039,39 @@ end:
static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (rtwdev->chip->chip_id != RTL8852A)
- return 0;
-
- ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
- PCIE_PHY_GEN1);
- if (ret)
- return ret;
- ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
- PCIE_PHY_GEN2);
- if (ret)
- return ret;
+ if (chip_id == RTL8852A) {
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN1);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN2);
+ if (ret)
+ return ret;
+ } else if (chip_id == RTL8852C) {
+ rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
+ B_AX_DEGLITCH);
+ rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
+ B_AX_DEGLITCH);
+ }
return 0;
}
static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
{
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return;
+
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
}
static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
return;
rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
@@ -1713,7 +2081,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
{
int ret;
- if (rtwdev->chip->chip_id == RTL8852C)
+ if (rtwdev->chip->chip_id != RTL8852A)
return 0;
ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
@@ -1731,7 +2099,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id != RTL8852A)
+ if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
return;
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
@@ -1739,13 +2107,78 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id != RTL8852A)
+ if (rtwdev->chip->chip_id == RTL8852A ||
+ rtwdev->chip->chip_id == RTL8852B) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_WLSUS_AFT_PDN);
+ } else if (rtwdev->chip->chip_id == RTL8852C) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ }
+}
+
+static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852B)
+ return 0;
+
+ return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
+ PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
+}
+
+static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+{
+ if (pwr_up)
+ rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+}
+
+static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
return;
- rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_WLSUS_AFT_PDN);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
+}
+
+static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
+{
+ if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
+}
+
+static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
+{
+ if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
+ B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
+ B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
+}
+
+static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
}
static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
@@ -1757,6 +2190,52 @@ static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
B_AX_SIC_EN_FORCE_CLKREQ);
}
+static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 lbc;
+
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
+ if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
+ lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
+ lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
+ rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
+ } else {
+ lbc &= ~B_AX_LBC_EN;
+ }
+ rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
+}
+
+static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 val32;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
+ val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
+ info->io_rcy_tmr);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
+ rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
+ } else {
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
+ }
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
+}
+
static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
{
if (rtwdev->chip->chip_id == RTL8852C)
@@ -1770,30 +2249,197 @@ static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
B_AX_EN_CHKDSC_NO_RX_STUCK);
}
+static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
+}
+
static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
B_AX_CLR_CH12_IDX;
+ u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
+ u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
- if (rtwdev->chip->chip_id == RTL8852A)
+ if (chip_id == RTL8852A || chip_id == RTL8852C)
val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
/* clear DMA indexes */
rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
- if (rtwdev->chip->chip_id == RTL8852A)
- rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2,
+ if (chip_id == RTL8852A || chip_id == RTL8852C)
+ rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
- rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR,
+ rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
}
+static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 ret, check, dma_busy;
+ u32 dma_busy1 = info->dma_busy1.addr;
+ u32 dma_busy2 = info->dma_busy2_reg;
+
+ check = info->dma_busy1.mask;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy1);
+ if (ret)
+ return ret;
+
+ if (!dma_busy2)
+ return 0;
+
+ check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 ret, check, dma_busy;
+ u32 dma_busy3 = info->dma_busy3_reg;
+
+ check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
+
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 10, 100, false, rtwdev, dma_busy3);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
+{
+ u32 ret;
+
+ ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "txdma ch busy\n");
+ return ret;
+ }
+
+ ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "rxdma ch busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
+ enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
+ enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
+ enum mac_ax_tag_mode tag_mode = info->tag_mode;
+ enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
+ enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
+ enum mac_ax_tx_burst tx_burst = info->tx_burst;
+ enum mac_ax_rx_burst rx_burst = info->rx_burst;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 cv = rtwdev->hal.cv;
+ u32 val32;
+
+ if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ if (chip_id == RTL8852A && cv == CHIP_CBV)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
+ } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
+ }
+
+ if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ if (chip_id == RTL8852A && cv == CHIP_CBV)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
+ } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
+ }
+
+ if (rxbd_mode == MAC_AX_RXBD_PKT) {
+ rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
+ } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
+ rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
+ B_AX_PCIE_RX_APPLEN_MASK, 0);
+ }
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
+ }
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (tag_mode == MAC_AX_TAG_SGL) {
+ val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
+ ~B_AX_LATENCY_CONTROL;
+ rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ } else if (tag_mode == MAC_AX_TAG_MULTI) {
+ val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
+ B_AX_LATENCY_CONTROL;
+ rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ }
+ }
+
+ rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
+ info->multi_tag_num);
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
+ wd_dma_idle_intvl);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
+ wd_dma_act_intvl);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
+ wd_dma_idle_intvl);
+ rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
+ wd_dma_act_intvl);
+ }
+
+ if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
+ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
+ rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ }
+
+ return 0;
+}
+
static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
}
+ info->ltr_set(rtwdev, false);
rtw89_pci_ctrl_dma_all(rtwdev, false);
rtw89_pci_clr_idx_all(rtwdev);
@@ -1802,9 +2448,7 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
{
- u32 dma_busy;
- u32 check;
- u32 lbc;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
rtw89_pci_rxdma_prefth(rtwdev);
@@ -1818,6 +2462,13 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_aphy_pwrcut(rtwdev);
rtw89_pci_hci_ldo(rtwdev);
+ rtw89_pci_dphy_delay(rtwdev);
+
+ ret = rtw89_pci_autok_x(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
+ return ret;
+ }
ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
if (ret) {
@@ -1825,50 +2476,31 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_pci_power_wake(rtwdev, true);
+ rtw89_pci_autoload_hang(rtwdev);
+ rtw89_pci_l12_vmain(rtwdev);
+ rtw89_pci_gen2_force_ib(rtwdev);
+ rtw89_pci_l1_ent_lat(rtwdev);
+ rtw89_pci_wd_exit_l1(rtwdev);
rtw89_pci_set_sic(rtwdev);
+ rtw89_pci_set_lbc(rtwdev);
+ rtw89_pci_set_io_rcy(rtwdev);
rtw89_pci_set_dbg(rtwdev);
+ rtw89_pci_set_keep_reg(rtwdev);
- if (rtwdev->chip->chip_id == RTL8852A)
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_AUXCLK_GATE);
-
- lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
- lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER);
- lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
- rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
-
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA);
+ rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
/* stop DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, false);
- /* check PCI at idle state */
- check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
- ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
- 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1);
+ ret = rtw89_pci_poll_dma_all_idle(rtwdev);
if (ret) {
- rtw89_err(rtwdev, "failed to poll io busy\n");
+ rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
return ret;
}
rtw89_pci_clr_idx_all(rtwdev);
-
- /* configure TX/RX op modes */
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE |
- B_AX_RX_TRUNC_MODE);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3);
- /* multi-tag mode */
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM,
- RTW89_MAC_TAG_NUM_8);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
- RTW89_MAC_WD_DMA_INTVL_256NS);
- rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
- RTW89_MAC_WD_DMA_INTVL_256NS);
+ rtw89_pci_mode_op(rtwdev);
/* fill TRX BD indexes */
rtw89_pci_ops_reset(rtwdev);
@@ -1879,10 +2511,9 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
- /* enable FW CMD queue to download firmware */
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12);
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+ /* disable all channels except to FW CMD channel to download firmware */
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr, B_AX_STOP_CH12);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -1890,10 +2521,13 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
+int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
+ if (!en)
+ return 0;
+
val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
if (rtw89_pci_ltr_is_err_reg_val(val))
return -EINVAL;
@@ -1907,44 +2541,95 @@ static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
if (rtw89_pci_ltr_is_err_reg_val(val))
return -EINVAL;
- rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
- rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
+ B_AX_LTR_WD_NOEMP_CHK);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
PCI_LTR_SPC_500US);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
- PCI_LTR_IDLE_TIMER_800US);
+ PCI_LTR_IDLE_TIMER_3_2MS);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
- rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
+ rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
return 0;
}
+EXPORT_SYMBOL(rtw89_pci_ltr_set);
+
+int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
+{
+ u32 dec_ctrl;
+ u32 val32;
+
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
+ if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+ val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
+ if (rtw89_pci_ltr_is_err_reg_val(val32))
+ return -EINVAL;
+
+ if (!en) {
+ dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
+ dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
+ B_AX_LTR_REQ_DRV;
+ } else {
+ dec_ctrl |= B_AX_LTR_HW_DEC_EN;
+ }
+
+ dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
+ dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
+
+ if (en)
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
+ B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
+ PCI_LTR_IDLE_TIMER_3_2MS);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
+ rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
+ rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
+ rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- ret = rtw89_pci_ltr_set(rtwdev);
+ ret = info->ltr_set(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "pci ltr set fail\n");
return ret;
}
- if (rtwdev->chip->chip_id == RTL8852A) {
+ if (chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
}
- /* ADDR info 8-byte mode */
- rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
- B_AX_HOST_ADDR_INFO_8B_SEL);
- rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ /* ADDR info 8-byte mode */
+ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+ }
/* enable DMA for all queues */
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
/* Release PCI IO */
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
return 0;
@@ -2065,10 +2750,13 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
@@ -2210,14 +2898,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
u32 desc_size, u32 len,
enum rtw89_tx_channel txch)
{
+ const struct rtw89_pci_ch_dma_addr *txch_addr;
int ring_sz = desc_size * len;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -2226,8 +2910,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err;
}
- ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of txch %d", txch);
goto err_free_wd_ring;
@@ -2244,11 +2927,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
tx_ring->bd_ring.dma = dma;
tx_ring->bd_ring.len = len;
tx_ring->bd_ring.desc_size = desc_size;
- tx_ring->bd_ring.addr_num = addr_num;
- tx_ring->bd_ring.addr_idx = addr_idx;
- tx_ring->bd_ring.addr_bdram = addr_bdram;
- tx_ring->bd_ring.addr_desa_l = addr_desa_l;
- tx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ tx_ring->bd_ring.addr = *txch_addr;
tx_ring->bd_ring.wp = 0;
tx_ring->bd_ring.rp = 0;
tx_ring->txch = txch;
@@ -2265,6 +2944,7 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
u32 desc_size;
u32 len;
@@ -2272,6 +2952,8 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
int ret;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
desc_size = sizeof(struct rtw89_pci_tx_bd_32);
len = RTW89_PCI_TXBD_NUM_MAX;
@@ -2300,20 +2982,16 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 desc_size, u32 len, u32 rxch)
{
+ const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
- ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
return ret;
@@ -2329,10 +3007,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
rx_ring->bd_ring.desc_size = desc_size;
- rx_ring->bd_ring.addr_num = addr_num;
- rx_ring->bd_ring.addr_idx = addr_idx;
- rx_ring->bd_ring.addr_desa_l = addr_desa_l;
- rx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ rx_ring->bd_ring.addr = *rxch_addr;
rx_ring->bd_ring.wp = 0;
rx_ring->bd_ring.rp = 0;
rx_ring->buf_sz = buf_sz;
@@ -2489,23 +3164,82 @@ static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
skb_queue_len(&rtwpci->h2c_queue), true);
}
-static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev)
+void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
+
+ if (rtwpci->under_recovery) {
+ rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->intrs[1] = 0;
+ } else {
+ rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
+ B_AX_RXDMA_INT_EN |
+ B_AX_RXP1DMA_INT_EN |
+ B_AX_RPQDMA_INT_EN |
+ B_AX_RXDMA_STUCK_INT_EN |
+ B_AX_RDU_INT_EN |
+ B_AX_RPQBD_FULL_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+
+ rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
+ }
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
+
+static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = 0;
+}
+
+static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
+ B_AX_HS1ISR_IND_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
B_AX_RXDMA_INT_EN |
B_AX_RXP1DMA_INT_EN |
B_AX_RPQDMA_INT_EN |
B_AX_RXDMA_STUCK_INT_EN |
B_AX_RDU_INT_EN |
- B_AX_RPQBD_FULL_INT_EN |
- B_AX_HS0ISR_IND_INT_EN;
+ B_AX_RPQBD_FULL_INT_EN;
+ rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
+}
- rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
+static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
}
+void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->under_recovery)
+ rtw89_pci_recovery_intr_mask_v1(rtwdev);
+ else if (rtwpci->low_power)
+ rtw89_pci_low_power_intr_mask_v1(rtwdev);
+ else
+ rtw89_pci_default_intr_mask_v1(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
+
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -2528,7 +3262,7 @@ static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
goto err_free_vector;
}
- rtw89_pci_default_intr_mask(rtwdev);
+ rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
return 0;
@@ -2545,38 +3279,123 @@ static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
pci_free_irq_vectors(pdev);
}
+static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
+{
+ u16 bin = 0, gray_bit;
+ u32 bit_idx;
+
+ for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
+ gray_bit = (gray_code >> bit_idx) & 0x1;
+ if (bit_num - bit_idx > 1)
+ gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
+ bin |= (gray_bit << bit_idx);
+ }
+
+ return bin;
+}
+
+static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 val16, filter_out_val;
+ u32 val, phy_offset;
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return 0;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
+ if (val == B_AX_ASPM_CTRL_L1)
+ return 0;
+
+ ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
+ if (val == RTW89_PCIE_GEN1_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ } else if (val == RTW89_PCIE_GEN2_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
+ val16 | B_PCIE_BIT_PINOUT_DIS);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
+ val16 & ~B_PCIE_BIT_RD_SEL);
+
+ val16 = rtw89_read16_mask(rtwdev,
+ phy_offset + RAC_ANA1F * RAC_MULT,
+ FILTER_OUT_EQ_MASK);
+ val16 = gray_code_to_bin(val16, hweight16(val16));
+ filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
+ RAC_MULT);
+ filter_out_val &= ~REG_FILTER_OUT_MASK;
+ filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
+
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
+ filter_out_val);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
+ B_BAC_EQ_SEL);
+ rtw89_write16_set(rtwdev,
+ R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+
+ return 0;
+}
+
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
if (rtw89_pci_disable_clkreq)
return;
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL,
- PCIE_CLKDLY_HW_30US);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_30US);
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- if (ret)
- rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
+ B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ }
}
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u8 value = 0;
int ret;
if (rtw89_pci_disable_aspm_l1)
return;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
@@ -2584,16 +3403,27 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
- if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
- else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ } else if (chip_id == RTL8852C) {
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ }
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -2654,17 +3484,34 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- if (ret)
- rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
+ RTW89_PCIE_BIT_ASPM_L11 |
+ RTW89_PCIE_BIT_PCI_L11);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
+ if (enable)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ else
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ }
}
static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
@@ -2686,25 +3533,6 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
-static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
-{
- u32 val32;
-
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- } else {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- }
-}
-
static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
@@ -2724,10 +3552,13 @@ static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
{
- u32 val, dma_rst = 0;
+ u32 val;
int ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
ret = rtw89_pci_poll_io_idle(rtwdev);
if (ret) {
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
@@ -2735,12 +3566,10 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
"[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
R_AX_DBG_ERR_FLAG, val);
if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
- dma_rst |= B_AX_HCI_TXDMA_EN;
+ rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
if (val & B_AX_RX_STUCK)
- dma_rst |= B_AX_HCI_RXDMA_EN;
- val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
+ rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
ret = rtw89_pci_poll_io_idle(rtwdev);
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
rtw89_debug(rtwdev, RTW89_DBG_HCI,
@@ -2751,18 +3580,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
return ret;
}
-static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
-{
- u32 val32;
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
- } else {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
- }
-}
static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
{
@@ -2782,15 +3600,18 @@ static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
{
u32 ret;
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
rtw89_pci_clr_idx_all(rtwdev);
ret = rtw89_pci_rst_bdram(rtwdev);
if (ret)
return ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
+ rtw89_pci_ctrl_dma_all(rtwdev, true);
return ret;
}
@@ -2849,7 +3670,7 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
if (likely(rtwpci->running))
- rtw89_pci_enable_intr(rtwdev, rtwpci);
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
@@ -2860,14 +3681,20 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ }
return 0;
}
@@ -2878,25 +3705,34 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
return;
/* Hardware need write the reg twice to ensure the setting work */
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
}
static int __maybe_unused rtw89_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_SEL_REQ_ENTR_L1);
+ }
rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -2913,6 +3749,8 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.reset = rtw89_pci_ops_reset,
.start = rtw89_pci_ops_start,
.stop = rtw89_pci_ops_stop,
+ .pause = rtw89_pci_ops_pause,
+ .switch_mode = rtw89_pci_ops_switch_mode,
.recalc_int_mit = rtw89_pci_recalc_int_mit,
.read8 = rtw89_pci_ops_read8,
@@ -2930,41 +3768,38 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
.dump_err_status = rtw89_pci_ops_dump_err_status,
.napi_poll = rtw89_pci_napi_poll,
+
+ .recovery_start = rtw89_pci_ops_recovery_start,
+ .recovery_complete = rtw89_pci_ops_recovery_complete,
};
-static int rtw89_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
- int driver_data_size;
+ const struct rtw89_driver_info *info;
+ const struct rtw89_pci_info *pci_info;
int ret;
- driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
- hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
- if (!hw) {
+ info = (const struct rtw89_driver_info *)id->driver_data;
+
+ rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
+ sizeof(struct rtw89_pci),
+ info->chip);
+ if (!rtwdev) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
}
- rtwdev = hw->priv;
- rtwdev->hw = hw;
- rtwdev->dev = &pdev->dev;
+ pci_info = info->bus.pci;
+
+ rtwdev->pci_info = info->bus.pci;
rtwdev->hci.ops = &rtw89_pci_ops;
rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
- rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM;
- rtwdev->hci.cpwm_addr = R_AX_CPWM;
+ rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
+ rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
- switch (id->driver_data) {
- case RTL8852A:
- rtwdev->chip = &rtw8852a_chip_info;
- break;
- default:
- return -ENOENT;
- }
-
ret = rtw89_core_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to initialise core\n");
@@ -2989,6 +3824,7 @@ static int rtw89_pci_probe(struct pci_dev *pdev,
goto err_clear_resource;
}
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -3018,12 +3854,13 @@ err_declaim_pci:
err_core_deinit:
rtw89_core_deinit(rtwdev);
err_release_hw:
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
return ret;
}
+EXPORT_SYMBOL(rtw89_pci_probe);
-static void rtw89_pci_remove(struct pci_dev *pdev)
+void rtw89_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw89_dev *rtwdev;
@@ -3036,24 +3873,9 @@ static void rtw89_pci_remove(struct pci_dev *pdev)
rtw89_pci_clear_resource(rtwdev, pdev);
rtw89_pci_declaim_device(rtwdev, pdev);
rtw89_core_deinit(rtwdev);
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
}
-
-static const struct pci_device_id rtw89_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A },
- {},
-};
-MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table);
-
-static struct pci_driver rtw89_pci_driver = {
- .name = "rtw89_pci",
- .id_table = rtw89_pci_id_table,
- .probe = rtw89_pci_probe,
- .remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
-};
-module_pci_driver(rtw89_pci_driver);
+EXPORT_SYMBOL(rtw89_pci_remove);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 20e6767ea5c4..179740607778 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -11,8 +11,21 @@
#define MDIO_PG1_G1 1
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
+#define RAC_CTRL_PPR 0x00
+#define RAC_ANA0A 0x0A
+#define B_BAC_EQ_SEL BIT(5)
+#define RAC_ANA0C 0x0C
+#define B_PCIE_BIT_PSAVE BIT(15)
#define RAC_ANA10 0x10
+#define B_PCIE_BIT_PINOUT_DIS BIT(3)
+#define RAC_REG_REV2 0x1B
+#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
+#define PCIE_DPHY_DLY_25US 0x1
#define RAC_ANA19 0x19
+#define B_PCIE_BIT_RD_SEL BIT(2)
+#define RAC_REG_FLD_0 0x1D
+#define BAC_AUTOK_N_MASK GENMASK(3, 2)
+#define PCIE_AUTOK_4 0x3
#define RAC_ANA1F 0x1F
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
@@ -35,13 +48,96 @@
#define R_AX_MDIO_WDATA 0x10A4
#define R_AX_MDIO_RDATA 0x10A6
+#define R_AX_PCIE_PS_CTRL_V1 0x3008
+#define B_AX_CMAC_EXIT_L1_EN BIT(7)
+#define B_AX_DMAC0_EXIT_L1_EN BIT(6)
+#define B_AX_SEL_XFER_PENDING BIT(3)
+#define B_AX_SEL_REQ_ENTR_L1 BIT(2)
+#define B_AX_SEL_REQ_EXIT_L1 BIT(0)
+
+#define R_AX_PCIE_MIX_CFG_V1 0x300C
+#define B_AX_ASPM_CTRL_L1 BIT(17)
+#define B_AX_ASPM_CTRL_L0 BIT(16)
+#define B_AX_ASPM_CTRL_MASK GENMASK(17, 16)
+#define B_AX_XFER_PENDING_FW BIT(11)
+#define B_AX_XFER_PENDING BIT(10)
+#define B_AX_REQ_EXIT_L1 BIT(9)
+#define B_AX_REQ_ENTR_L1 BIT(8)
+#define B_AX_L1SUB_DISABLE BIT(0)
+
+#define R_AX_L1_CLK_CTRL 0x3010
+#define B_AX_CLK_REQ_N BIT(1)
+
+#define R_AX_PCIE_BG_CLR 0x303C
+#define B_AX_BG_CLR_ASYNC_M3 BIT(4)
+
+#define R_AX_PCIE_LAT_CTRL 0x3044
+#define B_AX_CLK_REQ_SEL_OPT BIT(1)
+#define B_AX_CLK_REQ_SEL BIT(0)
+
+#define R_AX_PCIE_IO_RCY_M1 0x3100
+#define B_AX_PCIE_IO_RCY_P_M1 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_M1 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_M1 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_M1 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_M1 0x3104
+#define B_AX_PCIE_WDT_TIMER_M1_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_M2 0x310C
+#define B_AX_PCIE_IO_RCY_P_M2 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_M2 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_M2 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_M2 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_M2 0x3110
+#define B_AX_PCIE_WDT_TIMER_M2_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_E0 0x3118
+#define B_AX_PCIE_IO_RCY_P_E0 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_P_E0 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_E0 BIT(3)
+#define B_AX_PCIE_IO_RCY_TRIG_E0 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_E0 0x311C
+#define B_AX_PCIE_WDT_TIMER_E0_MASK GENMASK(31, 0)
+
+#define R_AX_PCIE_IO_RCY_S1 0x3124
+#define B_AX_PCIE_IO_RCY_RP_S1 BIT(7)
+#define B_AX_PCIE_IO_RCY_WP_S1 BIT(6)
+#define B_AX_PCIE_IO_RCY_WDT_RP_S1 BIT(5)
+#define B_AX_PCIE_IO_RCY_WDT_WP_S1 BIT(4)
+#define B_AX_PCIE_IO_RCY_WDT_MODE_S1 BIT(3)
+#define B_AX_PCIE_IO_RCY_RTRIG_S1 BIT(1)
+#define B_AX_PCIE_IO_RCY_WTRIG_S1 BIT(0)
+
+#define R_AX_PCIE_WDT_TIMER_S1 0x3128
+#define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0)
+
+#define R_RAC_DIRECT_OFFSET_G1 0x3800
+#define FILTER_OUT_EQ_MASK GENMASK(14, 10)
+#define R_RAC_DIRECT_OFFSET_G2 0x3880
+#define REG_FILTER_OUT_MASK GENMASK(6, 2)
+#define RAC_MULT 2
+
#define RTW89_PCI_WR_RETRY_CNT 20
/* Interrupts */
#define R_AX_HIMR0 0x01A0
+#define B_AX_WDT_TIMEOUT_INT_EN BIT(22)
#define B_AX_HALT_C2H_INT_EN BIT(21)
#define R_AX_HISR0 0x01A4
+#define R_AX_HIMR1 0x01A8
+#define B_AX_GPIO18_INT_EN BIT(2)
+#define B_AX_GPIO17_INT_EN BIT(1)
+#define B_AX_GPIO16_INT_EN BIT(0)
+
+#define R_AX_HISR1 0x01AC
+#define B_AX_GPIO18_INT BIT(2)
+#define B_AX_GPIO17_INT BIT(1)
+#define B_AX_GPIO16_INT BIT(0)
+
#define R_AX_MDIO_CFG 0x10A0
#define B_AX_MDIO_PHY_ADDR_MASK GENMASK(13, 12)
#define B_AX_MDIO_RFLAG BIT(9)
@@ -49,6 +145,7 @@
#define B_AX_MDIO_ADDR_MASK GENMASK(4, 0)
#define R_AX_PCIE_HIMR00 0x10B0
+#define R_AX_HAXI_HIMR00 0x10B0
#define B_AX_HC00ISR_IND_INT_EN BIT(27)
#define B_AX_HD1ISR_IND_INT_EN BIT(26)
#define B_AX_HD0ISR_IND_INT_EN BIT(25)
@@ -77,6 +174,7 @@
#define B_AX_RXDMA_INT_EN BIT(0)
#define R_AX_PCIE_HISR00 0x10B4
+#define R_AX_HAXI_HISR00 0x10B4
#define B_AX_HC00ISR_IND_INT BIT(27)
#define B_AX_HD1ISR_IND_INT BIT(26)
#define B_AX_HD0ISR_IND_INT BIT(25)
@@ -104,6 +202,10 @@
#define B_AX_RXP1DMA_INT BIT(1)
#define B_AX_RXDMA_INT BIT(0)
+#define R_AX_HAXI_HIMR10 0x11E0
+#define B_AX_TXDMA_CH11_INT_EN_V1 BIT(1)
+#define B_AX_TXDMA_CH10_INT_EN_V1 BIT(0)
+
#define R_AX_PCIE_HIMR10 0x13B0
#define B_AX_HC10ISR_IND_INT_EN BIT(28)
#define B_AX_TXDMA_CH11_INT_EN BIT(12)
@@ -114,7 +216,32 @@
#define B_AX_TXDMA_CH11_INT BIT(12)
#define B_AX_TXDMA_CH10_INT BIT(11)
+#define R_AX_PCIE_HIMR00_V1 0x30B0
+#define B_AX_HCI_AXIDMA_INT_EN BIT(29)
+#define B_AX_HC00ISR_IND_INT_EN_V1 BIT(28)
+#define B_AX_HD1ISR_IND_INT_EN_V1 BIT(27)
+#define B_AX_HD0ISR_IND_INT_EN_V1 BIT(26)
+#define B_AX_HS1ISR_IND_INT_EN BIT(25)
+#define B_AX_PCIE_DBG_STE_INT_EN BIT(13)
+
+#define R_AX_PCIE_HISR00_V1 0x30B4
+#define B_AX_HCI_AXIDMA_INT BIT(29)
+#define B_AX_HC00ISR_IND_INT_V1 BIT(28)
+#define B_AX_HD1ISR_IND_INT_V1 BIT(27)
+#define B_AX_HD0ISR_IND_INT_V1 BIT(26)
+#define B_AX_HS1ISR_IND_INT BIT(25)
+#define B_AX_PCIE_DBG_STE_INT BIT(13)
+
/* TX/RX */
+#define R_AX_DRV_FW_HSK_0 0x01B0
+#define R_AX_DRV_FW_HSK_1 0x01B4
+#define R_AX_DRV_FW_HSK_2 0x01B8
+#define R_AX_DRV_FW_HSK_3 0x01BC
+#define R_AX_DRV_FW_HSK_4 0x01C0
+#define R_AX_DRV_FW_HSK_5 0x01C4
+#define R_AX_DRV_FW_HSK_6 0x01C8
+#define R_AX_DRV_FW_HSK_7 0x01CC
+
#define R_AX_RXQ_RXBD_IDX 0x1050
#define R_AX_RPQ_RXBD_IDX 0x1054
#define R_AX_ACH0_TXBD_IDX 0x1058
@@ -130,6 +257,10 @@
#define R_AX_CH10_TXBD_IDX 0x137C /* Management Queue band 1 */
#define R_AX_CH11_TXBD_IDX 0x1380 /* HI Queue band 1 */
#define R_AX_CH12_TXBD_IDX 0x1080 /* FWCMD Queue */
+#define R_AX_CH10_TXBD_IDX_V1 0x11D0
+#define R_AX_CH11_TXBD_IDX_V1 0x11D4
+#define R_AX_RXQ_RXBD_IDX_V1 0x1218
+#define R_AX_RPQ_RXBD_IDX_V1 0x121C
#define TXBD_HW_IDX_MASK GENMASK(27, 16)
#define TXBD_HOST_IDX_MASK GENMASK(11, 0)
@@ -163,6 +294,36 @@
#define R_AX_RXQ_RXBD_DESA_H 0x1104
#define R_AX_RPQ_RXBD_DESA_L 0x1108
#define R_AX_RPQ_RXBD_DESA_H 0x110C
+#define R_AX_RXQ_RXBD_DESA_L_V1 0x1220
+#define R_AX_RXQ_RXBD_DESA_H_V1 0x1224
+#define R_AX_RPQ_RXBD_DESA_L_V1 0x1228
+#define R_AX_RPQ_RXBD_DESA_H_V1 0x122C
+#define R_AX_ACH0_TXBD_DESA_L_V1 0x1230
+#define R_AX_ACH0_TXBD_DESA_H_V1 0x1234
+#define R_AX_ACH1_TXBD_DESA_L_V1 0x1238
+#define R_AX_ACH1_TXBD_DESA_H_V1 0x123C
+#define R_AX_ACH2_TXBD_DESA_L_V1 0x1240
+#define R_AX_ACH2_TXBD_DESA_H_V1 0x1244
+#define R_AX_ACH3_TXBD_DESA_L_V1 0x1248
+#define R_AX_ACH3_TXBD_DESA_H_V1 0x124C
+#define R_AX_ACH4_TXBD_DESA_L_V1 0x1250
+#define R_AX_ACH4_TXBD_DESA_H_V1 0x1254
+#define R_AX_ACH5_TXBD_DESA_L_V1 0x1258
+#define R_AX_ACH5_TXBD_DESA_H_V1 0x125C
+#define R_AX_ACH6_TXBD_DESA_L_V1 0x1260
+#define R_AX_ACH6_TXBD_DESA_H_V1 0x1264
+#define R_AX_ACH7_TXBD_DESA_L_V1 0x1268
+#define R_AX_ACH7_TXBD_DESA_H_V1 0x126C
+#define R_AX_CH8_TXBD_DESA_L_V1 0x1270
+#define R_AX_CH8_TXBD_DESA_H_V1 0x1274
+#define R_AX_CH9_TXBD_DESA_L_V1 0x1278
+#define R_AX_CH9_TXBD_DESA_H_V1 0x127C
+#define R_AX_CH12_TXBD_DESA_L_V1 0x1280
+#define R_AX_CH12_TXBD_DESA_H_V1 0x1284
+#define R_AX_CH10_TXBD_DESA_L_V1 0x1458
+#define R_AX_CH10_TXBD_DESA_H_V1 0x145C
+#define R_AX_CH11_TXBD_DESA_L_V1 0x1460
+#define R_AX_CH11_TXBD_DESA_H_V1 0x1464
#define B_AX_DESC_NUM_MSK GENMASK(11, 0)
#define R_AX_RXQ_RXBD_NUM 0x1020
@@ -180,6 +341,10 @@
#define R_AX_CH10_TXBD_NUM 0x1338
#define R_AX_CH11_TXBD_NUM 0x133A
#define R_AX_CH12_TXBD_NUM 0x1038
+#define R_AX_RXQ_RXBD_NUM_V1 0x1210
+#define R_AX_RPQ_RXBD_NUM_V1 0x1212
+#define R_AX_CH10_TXBD_NUM_V1 0x1438
+#define R_AX_CH11_TXBD_NUM_V1 0x143A
#define R_AX_ACH0_BDRAM_CTRL 0x1200
#define R_AX_ACH1_BDRAM_CTRL 0x1204
@@ -194,6 +359,19 @@
#define R_AX_CH10_BDRAM_CTRL 0x1320
#define R_AX_CH11_BDRAM_CTRL 0x1324
#define R_AX_CH12_BDRAM_CTRL 0x1228
+#define R_AX_ACH0_BDRAM_CTRL_V1 0x1300
+#define R_AX_ACH1_BDRAM_CTRL_V1 0x1304
+#define R_AX_ACH2_BDRAM_CTRL_V1 0x1308
+#define R_AX_ACH3_BDRAM_CTRL_V1 0x130C
+#define R_AX_ACH4_BDRAM_CTRL_V1 0x1310
+#define R_AX_ACH5_BDRAM_CTRL_V1 0x1314
+#define R_AX_ACH6_BDRAM_CTRL_V1 0x1318
+#define R_AX_ACH7_BDRAM_CTRL_V1 0x131C
+#define R_AX_CH8_BDRAM_CTRL_V1 0x1320
+#define R_AX_CH9_BDRAM_CTRL_V1 0x1324
+#define R_AX_CH12_BDRAM_CTRL_V1 0x1328
+#define R_AX_CH10_BDRAM_CTRL_V1 0x1420
+#define R_AX_CH11_BDRAM_CTRL_V1 0x1424
#define BDRAM_SIDX_MASK GENMASK(7, 0)
#define BDRAM_MAX_MASK GENMASK(15, 8)
#define BDRAM_MIN_MASK GENMASK(23, 16)
@@ -235,6 +413,16 @@
#define B_AX_STOP_RPQ BIT(1)
#define B_AX_STOP_RXQ BIT(0)
#define B_AX_TX_STOP1_ALL GENMASK(18, 8)
+#define B_AX_TX_STOP1_MASK (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | \
+ B_AX_STOP_ACH6 | B_AX_STOP_ACH7 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
+#define B_AX_TX_STOP1_MASK_V1 (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
#define R_AX_PCIE_DMA_STOP2 0x1310
#define B_AX_STOP_CH11 BIT(1)
@@ -270,6 +458,26 @@
#define B_AX_PCIEIO_TX_BUSY BIT(21)
#define B_AX_PCIEIO_BUSY BIT(20)
#define B_AX_WPDMA_BUSY BIT(19)
+#define B_AX_CH12_BUSY BIT(18)
+#define B_AX_CH9_BUSY BIT(17)
+#define B_AX_CH8_BUSY BIT(16)
+#define B_AX_ACH7_BUSY BIT(15)
+#define B_AX_ACH6_BUSY BIT(14)
+#define B_AX_ACH5_BUSY BIT(13)
+#define B_AX_ACH4_BUSY BIT(12)
+#define B_AX_ACH3_BUSY BIT(11)
+#define B_AX_ACH2_BUSY BIT(10)
+#define B_AX_ACH1_BUSY BIT(9)
+#define B_AX_ACH0_BUSY BIT(8)
+#define B_AX_RPQ_BUSY BIT(1)
+#define B_AX_RXQ_BUSY BIT(0)
+#define DMA_BUSY1_CHECK (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | \
+ B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | \
+ B_AX_CH9_BUSY | B_AX_CH12_BUSY)
+#define DMA_BUSY1_CHECK_V1 (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_CH8_BUSY | B_AX_CH9_BUSY | \
+ B_AX_CH12_BUSY)
#define R_AX_PCIE_DMA_BUSY2 0x131C
#define B_AX_CH11_BUSY BIT(1)
@@ -279,6 +487,7 @@
#define R_AX_PCIE_INIT_CFG2 0x1004
#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
#define B_AX_WD_ITVL_ACT GENMASK(19, 16)
+#define B_AX_PCIE_RX_APPLEN_MASK GENMASK(13, 0)
#define R_AX_PCIE_PS_CTRL 0x1008
#define B_AX_L1OFF_PWR_OFF_EN BIT(5)
@@ -305,11 +514,22 @@
#define B_AX_PCIE_TXBD_LEN0 BIT(1)
#define B_AX_PCIE_TXBD_4KBOUD_LENERR BIT(0)
+#define R_AX_TXBD_RWPTR_CLR2_V1 0x11C4
+#define B_AX_CLR_CH11_IDX BIT(1)
+#define B_AX_CLR_CH10_IDX BIT(0)
+
#define R_AX_LBC_WATCHDOG 0x11D8
#define B_AX_LBC_TIMER GENMASK(7, 4)
#define B_AX_LBC_FLAG BIT(1)
#define B_AX_LBC_EN BIT(0)
+#define R_AX_RXBD_RWPTR_CLR_V1 0x1200
+#define B_AX_CLR_RPQ_IDX BIT(1)
+#define B_AX_CLR_RXQ_IDX BIT(0)
+
+#define R_AX_HAXI_EXP_CTRL 0x1204
+#define B_AX_MAX_TAG_NUM_V1_MASK GENMASK(2, 0)
+
#define R_AX_PCIE_EXP_CTRL 0x13F0
#define B_AX_EN_CHKDSC_NO_RX_STUCK BIT(20)
#define B_AX_MAX_TAG_NUM GENMASK(18, 16)
@@ -318,6 +538,9 @@
#define R_AX_PCIE_RX_PREF_ADV 0x13F4
#define B_AX_RXDMA_PREF_ADV_EN BIT(0)
+#define R_AX_PCIE_HRPWM_V1 0x30C0
+#define R_AX_PCIE_CRPWM 0x30C4
+
#define RTW89_PCI_TXBD_NUM_MAX 256
#define RTW89_PCI_RXBD_NUM_MAX 256
#define RTW89_PCI_TXWD_NUM_MAX 512
@@ -329,6 +552,17 @@
#define RTW89_PCI_MULTITAG 8
/* PCIE CFG register */
+#define RTW89_PCIE_L1_STS_V1 0x80
+#define RTW89_BCFG_LINK_SPEED_MASK GENMASK(19, 16)
+#define RTW89_PCIE_GEN1_SPEED 0x01
+#define RTW89_PCIE_GEN2_SPEED 0x02
+#define RTW89_PCIE_PHY_RATE 0x82
+#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+#define RTW89_PCIE_L1SS_STS_V1 0x0168
+#define RTW89_PCIE_BIT_ASPM_L11 BIT(3)
+#define RTW89_PCIE_BIT_ASPM_L12 BIT(2)
+#define RTW89_PCIE_BIT_PCI_L11 BIT(1)
+#define RTW89_PCIE_BIT_PCI_L12 BIT(0)
#define RTW89_PCIE_ASPM_CTRL 0x070F
#define RTW89_L1DLY_MASK GENMASK(5, 3)
#define RTW89_L0DLY_MASK GENMASK(2, 0)
@@ -340,8 +574,7 @@
#define RTW89_PCIE_CLK_CTRL 0x0725
#define RTW89_PCIE_RST_MSTATE 0x0B48
#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0)
-#define RTW89_PCIE_PHY_RATE 0x82
-#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+
#define INTF_INTGRA_MINREF_V1 90
#define INTF_INTGRA_HOSTREF_V1 100
@@ -351,11 +584,6 @@ enum rtw89_pcie_phy {
PCIE_PHY_GEN1_UNDEFINE = 0x7F,
};
-enum mac_ax_func_sw {
- MAC_AX_FUNC_DIS,
- MAC_AX_FUNC_EN,
-};
-
enum rtw89_pcie_l0sdly {
PCIE_L0SDLY_1US = 0,
PCIE_L0SDLY_2US = 1,
@@ -382,6 +610,182 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+enum mac_ax_bd_trunc_mode {
+ MAC_AX_BD_NORM,
+ MAC_AX_BD_TRUNC,
+ MAC_AX_BD_DEF = 0xFE
+};
+
+enum mac_ax_rxbd_mode {
+ MAC_AX_RXBD_PKT,
+ MAC_AX_RXBD_SEP,
+ MAC_AX_RXBD_DEF = 0xFE
+};
+
+enum mac_ax_tag_mode {
+ MAC_AX_TAG_SGL,
+ MAC_AX_TAG_MULTI,
+ MAC_AX_TAG_DEF = 0xFE
+};
+
+enum mac_ax_tx_burst {
+ MAC_AX_TX_BURST_16B = 0,
+ MAC_AX_TX_BURST_32B = 1,
+ MAC_AX_TX_BURST_64B = 2,
+ MAC_AX_TX_BURST_V1_64B = 0,
+ MAC_AX_TX_BURST_128B = 3,
+ MAC_AX_TX_BURST_V1_128B = 1,
+ MAC_AX_TX_BURST_256B = 4,
+ MAC_AX_TX_BURST_V1_256B = 2,
+ MAC_AX_TX_BURST_512B = 5,
+ MAC_AX_TX_BURST_1024B = 6,
+ MAC_AX_TX_BURST_2048B = 7,
+ MAC_AX_TX_BURST_DEF = 0xFE
+};
+
+enum mac_ax_rx_burst {
+ MAC_AX_RX_BURST_16B = 0,
+ MAC_AX_RX_BURST_32B = 1,
+ MAC_AX_RX_BURST_64B = 2,
+ MAC_AX_RX_BURST_V1_64B = 0,
+ MAC_AX_RX_BURST_128B = 3,
+ MAC_AX_RX_BURST_V1_128B = 1,
+ MAC_AX_RX_BURST_V1_256B = 0,
+ MAC_AX_RX_BURST_DEF = 0xFE
+};
+
+enum mac_ax_wd_dma_intvl {
+ MAC_AX_WD_DMA_INTVL_0S,
+ MAC_AX_WD_DMA_INTVL_256NS,
+ MAC_AX_WD_DMA_INTVL_512NS,
+ MAC_AX_WD_DMA_INTVL_768NS,
+ MAC_AX_WD_DMA_INTVL_1US,
+ MAC_AX_WD_DMA_INTVL_1_5US,
+ MAC_AX_WD_DMA_INTVL_2US,
+ MAC_AX_WD_DMA_INTVL_4US,
+ MAC_AX_WD_DMA_INTVL_8US,
+ MAC_AX_WD_DMA_INTVL_16US,
+ MAC_AX_WD_DMA_INTVL_DEF = 0xFE
+};
+
+enum mac_ax_multi_tag_num {
+ MAC_AX_TAG_NUM_1,
+ MAC_AX_TAG_NUM_2,
+ MAC_AX_TAG_NUM_3,
+ MAC_AX_TAG_NUM_4,
+ MAC_AX_TAG_NUM_5,
+ MAC_AX_TAG_NUM_6,
+ MAC_AX_TAG_NUM_7,
+ MAC_AX_TAG_NUM_8,
+ MAC_AX_TAG_NUM_DEF = 0xFE
+};
+
+enum mac_ax_lbc_tmr {
+ MAC_AX_LBC_TMR_8US = 0,
+ MAC_AX_LBC_TMR_16US,
+ MAC_AX_LBC_TMR_32US,
+ MAC_AX_LBC_TMR_64US,
+ MAC_AX_LBC_TMR_128US,
+ MAC_AX_LBC_TMR_256US,
+ MAC_AX_LBC_TMR_512US,
+ MAC_AX_LBC_TMR_1MS,
+ MAC_AX_LBC_TMR_2MS,
+ MAC_AX_LBC_TMR_4MS,
+ MAC_AX_LBC_TMR_8MS,
+ MAC_AX_LBC_TMR_DEF = 0xFE
+};
+
+enum mac_ax_pcie_func_ctrl {
+ MAC_AX_PCIE_DISABLE = 0,
+ MAC_AX_PCIE_ENABLE = 1,
+ MAC_AX_PCIE_DEFAULT = 0xFE,
+ MAC_AX_PCIE_IGNORE = 0xFF
+};
+
+enum mac_ax_io_rcy_tmr {
+ MAC_AX_IO_RCY_ANA_TMR_2MS = 24000,
+ MAC_AX_IO_RCY_ANA_TMR_4MS = 48000,
+ MAC_AX_IO_RCY_ANA_TMR_6MS = 72000,
+ MAC_AX_IO_RCY_ANA_TMR_DEF = 0xFE
+};
+
+enum rtw89_pci_intr_mask_cfg {
+ RTW89_PCI_INTR_MASK_RESET,
+ RTW89_PCI_INTR_MASK_NORMAL,
+ RTW89_PCI_INTR_MASK_LOW_POWER,
+ RTW89_PCI_INTR_MASK_RECOVERY_START,
+ RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE,
+};
+
+struct rtw89_pci_isrs;
+struct rtw89_pci;
+
+struct rtw89_pci_bd_idx_addr {
+ u32 tx_bd_addrs[RTW89_TXCH_NUM];
+ u32 rx_bd_addrs[RTW89_RXCH_NUM];
+};
+
+struct rtw89_pci_ch_dma_addr {
+ u32 num;
+ u32 idx;
+ u32 bdram;
+ u32 desa_l;
+ u32 desa_h;
+};
+
+struct rtw89_pci_ch_dma_addr_set {
+ struct rtw89_pci_ch_dma_addr tx[RTW89_TXCH_NUM];
+ struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
+};
+
+struct rtw89_pci_info {
+ enum mac_ax_bd_trunc_mode txbd_trunc_mode;
+ enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
+ enum mac_ax_rxbd_mode rxbd_mode;
+ enum mac_ax_tag_mode tag_mode;
+ enum mac_ax_tx_burst tx_burst;
+ enum mac_ax_rx_burst rx_burst;
+ enum mac_ax_wd_dma_intvl wd_dma_idle_intvl;
+ enum mac_ax_wd_dma_intvl wd_dma_act_intvl;
+ enum mac_ax_multi_tag_num multi_tag_num;
+ enum mac_ax_pcie_func_ctrl lbc_en;
+ enum mac_ax_lbc_tmr lbc_tmr;
+ enum mac_ax_pcie_func_ctrl autok_en;
+ enum mac_ax_pcie_func_ctrl io_rcy_en;
+ enum mac_ax_io_rcy_tmr io_rcy_tmr;
+
+ u32 init_cfg_reg;
+ u32 txhci_en_bit;
+ u32 rxhci_en_bit;
+ u32 rxbd_mode_bit;
+ u32 exp_ctrl_reg;
+ u32 max_tag_num_mask;
+ u32 rxbd_rwptr_clr_reg;
+ u32 txbd_rwptr_clr2_reg;
+ struct rtw89_reg_def dma_stop1;
+ struct rtw89_reg_def dma_stop2;
+ struct rtw89_reg_def dma_busy1;
+ u32 dma_busy2_reg;
+ u32 dma_busy3_reg;
+
+ u32 rpwm_addr;
+ u32 cpwm_addr;
+ u32 tx_dma_ch_mask;
+ const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
+ const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+
+ int (*ltr_set)(struct rtw89_dev *rtwdev, bool en);
+ u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+ void (*config_intr_mask)(struct rtw89_dev *rtwdev);
+ void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+ void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+ void (*recognize_intrs)(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
+};
+
struct rtw89_pci_bd_ram {
u8 start_idx;
u8 max_num;
@@ -425,6 +829,18 @@ struct rtw89_pci_tx_addr_info_32 {
__le32 dma;
} __packed;
+#define RTW89_TXADDR_INFO_NR_V1 10
+
+struct rtw89_pci_tx_addr_info_32_v1 {
+ __le16 length_opt;
+#define B_PCIADDR_LEN_V1_MASK GENMASK(10, 0)
+#define B_PCIADDR_HIGH_SEL_V1_MASK GENMASK(14, 11)
+#define B_PCIADDR_LS_V1_MASK BIT(15)
+#define TXADDR_INFO_LENTHG_V1_MAX ALIGN_DOWN(BIT(11) - 1, 4)
+ __le16 dma_low_lsb;
+ __le16 dma_low_msb;
+} __packed;
+
#define RTW89_PCI_RPP_POLLUTED BIT(31)
#define RTW89_PCI_RPP_SEQ GENMASK(30, 16)
#define RTW89_PCI_RPP_TX_STATUS GENMASK(15, 13)
@@ -469,11 +885,7 @@ struct rtw89_pci_dma_ring {
u8 desc_size;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
+ struct rtw89_pci_ch_dma_addr addr;
u32 len;
u32 wp; /* host idx */
@@ -518,6 +930,7 @@ struct rtw89_pci_rx_ring {
};
struct rtw89_pci_isrs {
+ u32 ind_isrs;
u32 halt_c2h_isrs;
u32 isrs[2];
};
@@ -530,11 +943,15 @@ struct rtw89_pci {
/* protect TRX resources (exclude RXQ) */
spinlock_t trx_lock;
bool running;
+ bool low_power;
+ bool under_recovery;
struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
struct sk_buff_head h2c_queue;
struct sk_buff_head h2c_release_queue;
+ DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM);
+ u32 ind_intrs;
u32 halt_c2h_intrs;
u32 intrs[2];
void __iomem *mmap;
@@ -626,5 +1043,102 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+
+struct pci_device_id;
+
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw89_pci_remove(struct pci_dev *pdev);
+int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en);
+int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en);
+u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr);
+void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
+void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
+void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
+void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
+
+static inline
+u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,
+ void *txaddr_info_addr, u32 total_len,
+ dma_addr_t dma, u8 *add_info_nr)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ return info->fill_txaddr_info(rtwdev, txaddr_info_addr, total_len,
+ dma, add_info_nr);
+}
+
+static inline void rtw89_chip_config_intr_mask(struct rtw89_dev *rtwdev,
+ enum rtw89_pci_intr_mask_cfg cfg)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ switch (cfg) {
+ default:
+ case RTW89_PCI_INTR_MASK_RESET:
+ rtwpci->low_power = false;
+ rtwpci->under_recovery = false;
+ break;
+ case RTW89_PCI_INTR_MASK_NORMAL:
+ rtwpci->low_power = false;
+ break;
+ case RTW89_PCI_INTR_MASK_LOW_POWER:
+ rtwpci->low_power = true;
+ break;
+ case RTW89_PCI_INTR_MASK_RECOVERY_START:
+ rtwpci->under_recovery = true;
+ break;
+ case RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE:
+ rtwpci->under_recovery = false;
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "Configure PCI interrupt mask mode low_power=%d under_recovery=%d\n",
+ rtwpci->low_power, rtwpci->under_recovery);
+
+ info->config_intr_mask(rtwdev);
+}
+
+static inline
+void rtw89_chip_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->enable_intr(rtwdev, rtwpci);
+}
+
+static inline
+void rtw89_chip_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->disable_intr(rtwdev, rtwpci);
+}
+
+static inline
+void rtw89_chip_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ info->recognize_intrs(rtwdev, rtwpci, isrs);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 147009888de0..6a6bdc652e09 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "fw.h"
+#include "mac.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
@@ -13,23 +14,14 @@
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
- const struct rate_info *txrate = &report->txrate;
u32 bit_rate = report->bit_rate;
- u8 mcs;
/* lower than ofdm, do not aggregate */
if (bit_rate < 550)
return 1;
- /* prevent hardware rate fallback to G mode rate */
- if (txrate->flags & RATE_INFO_FLAGS_MCS)
- mcs = txrate->mcs & 0x07;
- else if (txrate->flags & (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))
- mcs = txrate->mcs;
- else
- mcs = 0;
-
- if (mcs <= 2)
+ /* avoid AMSDU for legacy rate */
+ if (report->might_fallback_legacy)
return 1;
/* lower than 20M vht 2ss mcs8, make it small */
@@ -75,10 +67,10 @@ static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
static u64 get_he_ra_mask(struct ieee80211_sta *sta)
{
- struct ieee80211_sta_he_cap cap = sta->he_cap;
+ struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
u16 mcs_map;
- switch (sta->bandwidth) {
+ switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
@@ -117,21 +109,32 @@ static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
else if (rssi_lv == 1)
return 0xfffffffffffffff0ULL;
else if (rssi_lv == 2)
- return 0xffffffffffffffe0ULL;
+ return 0xffffffffffffefe0ULL;
else if (rssi_lv == 3)
- return 0xffffffffffffffc0ULL;
+ return 0xffffffffffffcfc0ULL;
else if (rssi_lv == 4)
- return 0xffffffffffffff80ULL;
+ return 0xffffffffffff8f80ULL;
else if (rssi_lv >= 5)
- return 0xffffffffffffff00ULL;
+ return 0xffffffffffff0f00ULL;
return 0xffffffffffffffffULL;
}
+static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
enum nl80211_band band;
u64 cfg_mask;
@@ -139,7 +142,7 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
if (!rtwsta->use_cfg_mask)
return -1;
- switch (hal->current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
band = NL80211_BAND_2GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
@@ -150,22 +153,27 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
RA_MASK_OFDM_RATES);
break;
+ case RTW89_BAND_6G:
+ band = NL80211_BAND_6GHZ;
+ cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
+ RA_MASK_OFDM_RATES);
+ break;
default:
- rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
+ rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
return -1;
}
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
RA_MASK_HE_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
RA_MASK_HE_2SS_RATES);
- } else if (sta->vht_cap.vht_supported) {
+ } else if (sta->deflink.vht_cap.vht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
RA_MASK_VHT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
RA_MASK_VHT_2SS_RATES);
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
RA_MASK_HT_1SS_RATES);
cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
@@ -185,6 +193,40 @@ static const u64
rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
+static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ bool *fix_giltf_en, u8 *fix_giltf)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
+ u8 band = chan->band_type;
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u8 he_gi = mask->control[nl_band].he_gi;
+ u8 he_ltf = mask->control[nl_band].he_ltf;
+
+ if (!rtwsta->use_cfg_mask)
+ return;
+
+ if (he_ltf == 2 && he_gi == 2) {
+ *fix_giltf = RTW89_GILTF_LGI_4XHE32;
+ } else if (he_ltf == 2 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ } else if (he_ltf == 1 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_2XHE16;
+ } else if (he_ltf == 1 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_2XHE08;
+ } else if (he_ltf == 0 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_1XHE16;
+ } else if (he_ltf == 0 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_1XHE08;
+ } else {
+ *fix_giltf_en = false;
+ return;
+ }
+
+ *fix_giltf_en = true;
+}
+
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, bool csi)
{
@@ -192,110 +234,132 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta->ra;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- u64 high_rate_mask = 0;
u64 ra_mask = 0;
+ u64 ra_mask_bak;
u8 mode = 0;
u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
u8 bw_mode = 0;
u8 stbc_en = 0;
u8 ldpc_en = 0;
+ u8 fix_giltf = 0;
u8 i;
bool sgi = false;
+ bool fix_giltf_en = false;
memset(ra, 0, sizeof(*ra));
/* Set the ra mask from sta's capability */
- if (sta->he_cap.has_he) {
+ if (sta->deflink.he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
ra_mask |= get_he_ra_mask(sta);
high_rate_masks = rtw89_ra_mask_he_rates;
- if (sta->he_cap.he_cap_elem.phy_cap_info[2] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
stbc_en = 1;
- if (sta->he_cap.he_cap_elem.phy_cap_info[1] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
- } else if (sta->vht_cap.vht_supported) {
- u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
mode |= RTW89_RA_MODE_VHT;
csi_mode = RTW89_RA_RPT_MODE_VHT;
/* MCS9, MCS8, MCS7 */
ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
high_rate_masks = rtw89_ra_mask_vht_rates;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = 1;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = 1;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->deflink.ht_cap.ht_supported) {
mode |= RTW89_RA_MODE_HT;
csi_mode = RTW89_RA_RPT_MODE_HT;
- ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) |
- ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) |
- (sta->ht_cap.mcs.rx_mask[1] << 24) |
- (sta->ht_cap.mcs.rx_mask[0] << 12);
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = 1;
}
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
- if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
mode |= RTW89_RA_MODE_CCK;
- else
- mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
- } else {
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
+ mode |= RTW89_RA_MODE_OFDM;
+ break;
+ case RTW89_BAND_5G:
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
+ break;
+ case RTW89_BAND_6G:
+ ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
+ mode |= RTW89_RA_MODE_OFDM;
+ break;
+ default:
+ rtw89_err(rtwdev, "Unknown band type\n");
+ break;
}
+ ra_mask_bak = ra_mask;
+
if (mode >= RTW89_RA_MODE_HT) {
+ u64 mask = 0;
for (i = 0; i < rtwdev->hal.tx_nss; i++)
- high_rate_mask |= high_rate_masks[i];
- ra_mask &= high_rate_mask;
+ mask |= high_rate_masks[i];
if (mode & RTW89_RA_MODE_OFDM)
- ra_mask |= RA_MASK_SUBOFDM_RATES;
+ mask |= RA_MASK_SUBOFDM_RATES;
if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
+ mask |= RA_MASK_SUBCCK_RATES;
+ ra_mask &= mask;
} else if (mode & RTW89_RA_MODE_OFDM) {
- if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
- ra_mask |= RA_MASK_OFDM_RATES;
- } else {
- ra_mask = RA_MASK_CCK_RATES;
+ ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
}
- if (mode != RTW89_RA_MODE_CCK) {
+ if (mode != RTW89_RA_MODE_CCK)
ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
- ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
- }
- switch (sta->bandwidth) {
+ ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ bw_mode = RTW89_CHANNEL_WIDTH_160;
+ sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ break;
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW89_CHANNEL_WIDTH_80;
- sgi = sta->vht_cap.vht_supported &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ sgi = sta->deflink.vht_cap.vht_supported &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
break;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW89_CHANNEL_WIDTH_40;
- sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
break;
default:
bw_mode = RTW89_CHANNEL_WIDTH_20;
- sgi = sta->ht_cap.ht_supported &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ sgi = sta->deflink.ht_cap.ht_supported &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
break;
}
- if (sta->he_cap.he_cap_elem.phy_cap_info[3] &
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
ra->dcm_cap = 1;
- if (rate_pattern->enable) {
+ if (rate_pattern->enable && !vif->p2p) {
ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
ra_mask &= rate_pattern->ra_mask;
mode = rate_pattern->ra_mode;
@@ -306,9 +370,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->macid = rtwsta->mac_id;
ra->stbc_cap = stbc_en;
ra->ldpc_cap = ldpc_en;
- ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
ra->en_sgi = sgi;
ra->ra_mask = ra_mask;
+ ra->fix_giltf_en = fix_giltf_en;
+ ra->fix_giltf = fix_giltf;
if (!csi)
return;
@@ -323,13 +389,19 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+ u32 changed)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_ra_info *ra = &rtwsta->ra;
rtw89_phy_ra_sta_update(rtwdev, sta, false);
- ra->upd_mask = 1;
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
+ ra->upd_mask = 1;
+ if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
+ ra->upd_bw_nss_mask = 1;
+
rtw89_debug(rtwdev, RTW89_DBG_RA,
"ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
ra->macid,
@@ -376,6 +448,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_supported_band *sband;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern next_pattern = {0};
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0,
RTW89_HW_RATE_HE_NSS2_MCS0,
RTW89_HW_RATE_HE_NSS3_MCS0,
@@ -388,28 +461,29 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
RTW89_HW_RATE_MCS8,
RTW89_HW_RATE_MCS16,
RTW89_HW_RATE_MCS24};
- u8 band = rtwdev->hal.current_band_type;
+ u8 band = chan->band_type;
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 tx_nss = rtwdev->hal.tx_nss;
u8 i;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_he[i],
RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
- mask->control[band].he_mcs[i],
+ mask->control[nl_band].he_mcs[i],
0, true))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i],
RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
- mask->control[band].vht_mcs[i],
+ mask->control[nl_band].vht_mcs[i],
0, true))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i],
RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
- mask->control[band].ht_mcs[i],
+ mask->control[nl_band].ht_mcs[i],
0, true))
goto out;
@@ -417,18 +491,18 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
* require at least one basic rate for ieee80211_set_bitrate_mask,
* so the decision just depends on if all bitrates are set or not.
*/
- sband = rtwdev->hw->wiphy->bands[band];
+ sband = rtwdev->hw->wiphy->bands[nl_band];
if (band == RTW89_BAND_2G) {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
- mask->control[band].legacy,
+ mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false))
goto out;
} else {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
- mask->control[band].legacy,
+ mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false))
goto out;
}
@@ -453,7 +527,7 @@ static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- rtw89_phy_ra_updata_sta(rtwdev, sta);
+ rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
@@ -501,12 +575,12 @@ void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
}
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw)
{
- enum rtw89_bandwidth cbw = param->bandwidth;
- u8 pri_ch = param->primary_chan;
- u8 central_ch = param->center_chan;
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
u8 txsc_idx = 0;
u8 tmp = 0;
@@ -568,6 +642,13 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
return txsc_idx;
}
+EXPORT_SYMBOL(rtw89_phy_get_txsc);
+
+static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
+{
+ return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
+ !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
+}
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
@@ -591,6 +672,56 @@ u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf);
+static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ bool busy;
+ bool done;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "read rf busy swsi\n");
+ return INV_RF_DATA;
+ }
+
+ mask &= RFREG_MASK;
+
+ val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
+ 30, false, rtwdev, R_SWSI_V1,
+ B_SWSI_R_DATA_DONE_V1);
+ if (ret) {
+ rtw89_err(rtwdev, "read swsi busy\n");
+ return INV_RF_DATA;
+ }
+
+ return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
+}
+
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -616,6 +747,60 @@ bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf);
+static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask,
+ u32 data)
+{
+ u8 bit_shift;
+ u32 val;
+ bool busy, b_msk_en = false;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "write rf busy swsi\n");
+ return false;
+ }
+
+ data &= RFREG_MASK;
+ mask &= RFREG_MASK;
+
+ if (mask != RFREG_MASK) {
+ b_msk_en = true;
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
+ mask);
+ bit_shift = __ffs(mask);
+ data = (data << bit_shift) & RFREG_MASK;
+ }
+
+ val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
+ FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
+ FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
+
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
+
+ return true;
+}
+
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+
static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -645,6 +830,245 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
rtw89_phy_write32(rtwdev, reg->addr, reg->data);
}
+union rtw89_phy_bb_gain_arg {
+ u32 addr;
+ struct {
+ union {
+ u8 type;
+ struct {
+ u8 rxsc_start:4;
+ u8 bw:4;
+ };
+ };
+ u8 path;
+ u8 gain_band;
+ u8 cfg_type;
+ };
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+enum rtw89_phy_bb_rxsc_start_idx {
+ RTW89_BB_RXSC_START_IDX_FULL = 0,
+ RTW89_BB_RXSC_START_IDX_20 = 1,
+ RTW89_BB_RXSC_START_IDX_20_1 = 5,
+ RTW89_BB_RXSC_START_IDX_40 = 9,
+ RTW89_BB_RXSC_START_IDX_80 = 13,
+};
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 rxsc_start = arg.rxsc_start;
+ u8 bw = arg.bw;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ u8 rxsc;
+ s8 ofst;
+ int i;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ gain->rpl_ofst_20[gband][path] = (s8)data;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_40[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_40[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_80[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_80[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_80[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
+ gain->rpl_ofst_160[gband][path][0] = (s8)data;
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
+ for (i = 0; i < 4; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
+ for (i = 0; i < 2; i++, data >>= 8) {
+ rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
+ ofst = (s8)(data & 0xff);
+ gain->rpl_ofst_160[gband][path][rxsc] = ofst;
+ }
+ }
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
+ arg.addr, data, bw);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain_bypass[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain_bypass[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 type = arg.type;
+ u8 path = arg.path;
+ u8 gband = arg.gain_band;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
+ return;
+
+ if (arg.path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
static void
rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@@ -717,6 +1141,21 @@ static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
}
}
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
+
+ if (reg->addr < 0x100)
+ return;
+
+ rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+ (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+}
+EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
+
static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
const struct rtw89_phy_table *table,
u32 *headline_size, u32 *headline_idx,
@@ -873,9 +1312,13 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *bb_table = chip->bb_table;
+ const struct rtw89_phy_table *bb_gain_table = chip->bb_gain_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
+ if (bb_gain_table)
+ rtw89_phy_init_reg(rtwdev, bb_gain_table,
+ rtw89_phy_config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -888,6 +1331,8 @@ static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
{
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *rf_table;
struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
@@ -898,13 +1343,13 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
return;
for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
- rf_reg_info->rf_path = path;
rf_table = chip->rf_table[path];
- rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg,
- (void *)rf_reg_info);
+ rf_reg_info->rf_path = rf_table->rf_path;
+ config = rf_table->config ? rf_table->config : rtw89_phy_config_rf_reg;
+ rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
- path);
+ rf_reg_info->rf_path);
}
kfree(rf_reg_info);
}
@@ -972,6 +1417,7 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_mask(rtwdev, addr, mask, data);
}
+EXPORT_SYMBOL(rtw89_phy_write32_idx);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
@@ -995,6 +1441,7 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
}
}
+EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
@@ -1003,6 +1450,7 @@ const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
};
+EXPORT_SYMBOL(rtw89_rs_idx_max);
const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_CCK] = 1,
@@ -1011,6 +1459,7 @@ const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
[RTW89_RS_OFFSET] = 1,
};
+EXPORT_SYMBOL(rtw89_rs_nss_max);
static const u8 _byr_of_rs[] = {
[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
@@ -1044,6 +1493,7 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
}
}
+EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \
({ \
@@ -1051,10 +1501,9 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \
})
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
s8 *byr;
u8 idx;
@@ -1074,9 +1523,38 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
+
+static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
+{
+ switch (channel_6g) {
+ case 1 ... 29:
+ return (channel_6g - 1) / 2;
+ case 33 ... 61:
+ return (channel_6g - 3) / 2;
+ case 65 ... 93:
+ return (channel_6g - 5) / 2;
+ case 97 ... 125:
+ return (channel_6g - 7) / 2;
+ case 129 ... 157:
+ return (channel_6g - 9) / 2;
+ case 161 ... 189:
+ return (channel_6g - 11) / 2;
+ case 193 ... 221:
+ return (channel_6g - 13) / 2;
+ case 225 ... 253:
+ return (channel_6g - 15) / 2;
+ default:
+ rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
+ return 0;
+ }
+}
-static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
{
+ if (band == RTW89_BAND_6G)
+ return rtw89_channel_6g_to_idx(rtwdev, channel);
+
switch (channel) {
case 1 ... 14:
return channel - 1;
@@ -1092,12 +1570,11 @@ static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
}
}
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
- u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt = 0, sar;
@@ -1114,6 +1591,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf][regd][ch_idx];
+ if (!lmt)
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1124,12 +1607,14 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
return min(lmt, sar);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
-#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
+#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \
do { \
u8 __i; \
for (__i = 0; __i < RTW89_BF_NUM; __i++) \
ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
+ band, \
bw, ntx, \
rs, __i, \
(ch)); \
@@ -1137,98 +1622,204 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, pri_ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
+{
+ s8 val_0p5_n[RTW89_BF_NUM];
+ s8 val_0p5_p[RTW89_BF_NUM];
+ u8 i;
+
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, pri_ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch);
+
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+}
+
+static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
+ s8 val_2p5_n[RTW89_BF_NUM];
+ s8 val_2p5_p[RTW89_BF_NUM];
u8 i;
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ /* fill ofdm section */
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, pri_ch);
+
+ /* fill mcs 20m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 14);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
+ RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 14);
+
+ /* fill mcs 40m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 12);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
+ RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 12);
+
+ /* fill mcs 80m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
+ RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ /* fill mcs 160m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
+ RTW89_CHANNEL_WIDTH_160,
ntx, RTW89_RS_MCS, ch);
- __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ /* fill mcs 40m 0p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
for (i = 0; i < RTW89_BF_NUM; i++)
lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+
+ /* fill mcs 40m 2p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
}
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx)
{
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 pri_ch = chan->primary_channel;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt, 0, sizeof(*lmt));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
-static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
- u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt_ru = 0, sar;
@@ -1245,6 +1836,12 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx][regd][ch_idx];
+ if (!lmt_ru)
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1259,87 +1856,143 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
static void
rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch);
}
static void
rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
}
static void
rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 6);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 6);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 6);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 6);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 6);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
- lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 6);
}
+static void
+rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 band, u8 ntx, u8 ch)
+{
+ static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
+ int i;
+
+ static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
+ for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
+ lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
+ ntx,
+ ch + ofst[i]);
+ }
+}
+
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx)
{
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt_ru, 0, sizeof(*lmt_ru));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
+ ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
struct rtw89_phy_iter_ra_data {
struct rtw89_dev *rtwdev;
@@ -1354,25 +2007,34 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
struct sk_buff *c2h = ra_data->c2h;
u8 mode, rate, bw, giltf, mac_id;
+ u16 legacy_bitrate;
+ bool valid;
+ u8 mcs = 0;
mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data);
if (mac_id != rtwsta->mac_id)
return;
- memset(ra_report, 0, sizeof(*ra_report));
-
rate = RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h->data);
bw = RTW89_GET_PHY_C2H_RA_RPT_BW(c2h->data);
giltf = RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h->data);
mode = RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h->data);
+ if (mode == RTW89_RA_RPT_MODE_LEGACY) {
+ valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
+ if (!valid)
+ return;
+ }
+
+ memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
+
switch (mode) {
case RTW89_RA_RPT_MODE_LEGACY:
- ra_report->txrate.legacy = rtw89_ra_report_to_bitrate(rtwdev, rate);
+ ra_report->txrate.legacy = legacy_bitrate;
break;
case RTW89_RA_RPT_MODE_HT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
- if (rtwdev->fw.old_ht_ra_format)
+ if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
else
@@ -1380,6 +2042,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.mcs = rate;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs & 0x07;
break;
case RTW89_RA_RPT_MODE_VHT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
@@ -1387,6 +2050,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs;
break;
case RTW89_RA_RPT_MODE_HE:
ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -1398,21 +2062,17 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ mcs = ra_report->txrate.mcs;
break;
}
- if (bw == RTW89_CHANNEL_WIDTH_80)
- ra_report->txrate.bw = RATE_INFO_BW_80;
- else if (bw == RTW89_CHANNEL_WIDTH_40)
- ra_report->txrate.bw = RATE_INFO_BW_40;
- else
- ra_report->txrate.bw = RATE_INFO_BW_20;
-
+ ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
- sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
- rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1;
+ ra_report->might_fallback_legacy = mcs <= 2;
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
+ rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
}
static void
@@ -1487,15 +2147,25 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
u8 crystal_cap, bool force)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 sc_xi_val, sc_xo_val;
if (!force && cfo->crystal_cap == crystal_cap)
return;
crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
- sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
- sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ if (chip->chip_id == RTL8852A) {
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+ sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
+ sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ } else {
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
+ crystal_cap, XTAL_SC_XO_MASK);
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
+ crystal_cap, XTAL_SC_XI_MASK);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
+ }
cfo->crystal_cap = sc_xi_val;
cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
@@ -1525,9 +2195,11 @@ static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
{
+ const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
bool is_linked = rtwdev->total_sta_assoc > 0;
s32 cfo_avg_312;
- s32 dcfo_comp;
+ s32 dcfo_comp_val;
+ u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
int sign;
if (!is_linked) {
@@ -1538,13 +2210,13 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
if (curr_cfo == 0)
return;
- dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
+ dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
sign = curr_cfo > 0 ? 1 : -1;
- cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp;
+ cfo_avg_312 = (curr_cfo << dcfo_comp_sft) / 5 + sign * dcfo_comp_val;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312);
if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
cfo_avg_312 = -cfo_avg_312;
- rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK,
+ rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
cfo_avg_312);
}
@@ -1563,8 +2235,12 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
cfo->crystal_cap = cfo->crystal_cap_default;
cfo->def_x_cap = cfo->crystal_cap;
+ cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
+ cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
cfo->is_adjust = false;
+ cfo->divergence_lock_en = false;
cfo->x_cap_ofst = 0;
+ cfo->lock_cnt = 0;
cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
cfo->apply_compensation = false;
cfo->residual_cfo_acc = 0;
@@ -1577,6 +2253,7 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
cfo->cfo_trig_by_timer_en = false;
cfo->phy_cfo_trk_cnt = 0;
cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
}
static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
@@ -1782,6 +2459,23 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
return;
}
+ if (cfo->divergence_lock_en) {
+ cfo->lock_cnt++;
+ if (cfo->lock_cnt > CFO_PERIOD_CNT) {
+ cfo->divergence_lock_en = false;
+ cfo->lock_cnt = 0;
+ } else {
+ rtw89_phy_cfo_reset(rtwdev);
+ }
+ return;
+ }
+ if (cfo->crystal_cap >= cfo->x_cap_ub ||
+ cfo->crystal_cap <= cfo->x_cap_lb) {
+ cfo->divergence_lock_en = true;
+ rtw89_phy_cfo_reset(rtwdev);
+ return;
+ }
+
rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
cfo->cfo_avg_pre = new_cfo;
x_cap_update = cfo->crystal_cap != pre_x_cap;
@@ -1828,6 +2522,13 @@ void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ bool is_ul_ofdma = false, ofdma_acc_en = false;
+
+ if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
+ is_ul_ofdma = true;
+ if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
+ is_ul_ofdma)
+ ofdma_acc_en = true;
switch (cfo->phy_cfo_status) {
case RTW89_PHY_DCFO_STATE_NORMAL:
@@ -1839,16 +2540,26 @@ void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
}
break;
case RTW89_PHY_DCFO_STATE_ENHANCE:
- if (cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) {
+ if (stats->tx_throughput <= CFO_TP_LOWER)
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ else if (ofdma_acc_en &&
+ cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
+ else
+ cfo->phy_cfo_trk_cnt++;
+
+ if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
cfo->phy_cfo_trk_cnt = 0;
cfo->cfo_trig_by_timer_en = false;
}
- if (cfo->cfo_trig_by_timer_en == 1)
- cfo->phy_cfo_trk_cnt++;
+ break;
+ case RTW89_PHY_DCFO_STATE_HOLD:
if (stats->tx_throughput <= CFO_TP_LOWER) {
cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
cfo->phy_cfo_trk_cnt = 0;
cfo->cfo_trig_by_timer_en = false;
+ } else {
+ cfo->phy_cfo_trk_cnt++;
}
break;
default:
@@ -1872,6 +2583,11 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
u8 macid = phy_ppdu->mac_id;
+ if (macid >= CFO_TRACK_MAX_USER) {
+ rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
+ return;
+ }
+
cfo->cfo_tail[macid] += cfo_val;
cfo->cfo_cnt[macid]++;
cfo->packet_count++;
@@ -2503,11 +3219,9 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
u8 i;
- if (chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
- rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
+ rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
if (i >= RTW89_CCK_PKT)
@@ -2589,6 +3303,9 @@ static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
u32 tmp;
u8 i;
+ if (!rtwdev->hal.support_igi)
+ return;
+
tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
B_PATH0_IB_PKPW_MSK);
dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
@@ -2624,10 +3341,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
- switch (rtwdev->hal.current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
dig->lna_gain = dig->lna_gain_g;
dig->tia_gain = dig->tia_gain_g;
@@ -2787,26 +3505,32 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_LNA_INIT,
- B_PATH0_LNA_INIT_IDX_MSK, lna_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_LNA_INIT,
- B_PATH1_LNA_INIT_IDX_MSK, lna_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
+ dig_regs->p0_lna_init.mask, lna_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
+ dig_regs->p1_lna_init.mask, lna_idx);
}
static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_TIA_INIT,
- B_PATH0_TIA_INIT_IDX_MSK, tia_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_TIA_INIT,
- B_PATH1_TIA_INIT_IDX_MSK, tia_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
+ dig_regs->p0_tia_init.mask, tia_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
+ dig_regs->p1_tia_init.mask, tia_idx);
}
static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_RXB_INIT,
- B_PATH0_RXB_INIT_IDX_MSK, rxb_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_RXB_INIT,
- B_PATH1_RXB_INIT_IDX_MSK, rxb_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
+ dig_regs->p0_rxb_init.mask, rxb_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
+ dig_regs->p1_rxb_init.mask, rxb_idx);
}
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
@@ -2820,32 +3544,52 @@ static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
set.lna_idx, set.tia_idx, set.rxb_idx);
}
-static const struct rtw89_reg_def sdagc_config[4] = {
- {R_PATH0_P20_FOLLOW_BY_PAGCUGC, B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH0_S20_FOLLOW_BY_PAGCUGC, B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_P20_FOLLOW_BY_PAGCUGC, B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_S20_FOLLOW_BY_PAGCUGC, B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
-};
-
static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
bool enable)
{
- u8 i = 0;
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- for (i = 0; i < ARRAY_SIZE(sdagc_config); i++)
- rtw89_phy_write32_mask(rtwdev, sdagc_config[i].addr,
- sdagc_config[i].mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
+ dig_regs->p0_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
+ dig_regs->p0_s20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
+ dig_regs->p1_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
+ dig_regs->p1_s20_pagcugc_en.mask, enable);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
+static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+
+ if (!rtwdev->hal.support_igi)
+ return;
+
+ if (dig->force_gaincode_idx_en) {
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "Force gaincode index enabled.\n");
+ } else {
+ rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
+ &dig->cur_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
+ }
+}
+
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
- enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
- u32 val = 0;
+ u8 ofdm_cca_th;
+ s8 cck_cca_th;
+ u32 pd_val = 0;
under_region += PD_TH_SB_FLTR_CMP_VAL;
@@ -2856,6 +3600,9 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
case RTW89_CHANNEL_WIDTH_80:
under_region += PD_TH_BW80_CMP_VAL;
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ under_region += PD_TH_BW160_CMP_VAL;
+ break;
case RTW89_CHANNEL_WIDTH_20:
fallthrough;
default:
@@ -2866,23 +3613,38 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
dig->dyn_pd_th_max = dig->igi_rssi;
final_rssi = min_t(u8, rssi, dig->igi_rssi);
- final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
- PD_TH_MAX_RSSI + under_region);
+ ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
+ PD_TH_MAX_RSSI + under_region);
if (enable) {
- val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1;
+ pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n",
- dig->igi_rssi, final_rssi, under_region, val);
+ "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
+ final_rssi, ofdm_cca_th, under_region, pd_val);
} else {
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- val);
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
- B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, enable);
+
+ if (!rtwdev->hal.support_cckpd)
+ return;
+
+ cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
+ pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
+ final_rssi, cck_cca_th, under_region, pd_val);
+
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1,
+ B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable);
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1,
+ B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val);
}
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
@@ -2933,15 +3695,7 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
- if (dig->force_gaincode_idx_en) {
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
- rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "Force gaincode index enabled.\n");
- } else {
- rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
- &dig->cur_gaincode);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
- }
+ rtw89_phy_dig_config_igi(rtwdev);
rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
@@ -2951,6 +3705,62 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
}
+static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool *done = data;
+ u8 rssi_a, rssi_b;
+ u32 candidate;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
+ return;
+
+ if (*done)
+ return;
+
+ *done = true;
+
+ rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
+ rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
+
+ if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_A;
+ else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_B;
+ else
+ return;
+
+ if (hal->antenna_tx == candidate)
+ return;
+
+ hal->antenna_tx = candidate;
+ rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
+
+ if (hal->antenna_tx == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
+ } else if (hal->antenna_tx == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
+ }
+}
+
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool done = false;
+
+ if (!hal->tx_path_diversity)
+ return;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_tx_path_div_sta_iter,
+ &done);
+}
+
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
rtw89_phy_ccx_top_setting_init(rtwdev);
@@ -2975,6 +3785,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_load_txpwr_table(rtwdev, chip->byr_table);
rtw89_chip_set_txpwr_ctrl(rtwdev);
rtw89_chip_power_trim(rtwdev);
+ rtw89_chip_cfg_txrx_path(rtwdev);
}
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
@@ -2982,7 +3793,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
u8 bss_color;
- if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ if (!vif->bss_conf.he_support || !vif->cfg.assoc)
return;
bss_color = vif->bss_conf.he_bss_color.color;
@@ -2992,5 +3803,163 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color,
phy_idx);
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
- vif->bss_conf.aid, phy_idx);
+ vif->cfg.aid, phy_idx);
+}
+
+static void
+_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ udelay(def->data);
+}
+
+static void
+(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
+ [RTW89_RFK_F_WRF] = _rfk_write_rf,
+ [RTW89_RFK_F_WM] = _rfk_write32_mask,
+ [RTW89_RFK_F_WS] = _rfk_write32_set,
+ [RTW89_RFK_F_WC] = _rfk_write32_clr,
+ [RTW89_RFK_F_DELAY] = _rfk_delay,
+};
+
+static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
+{
+ const struct rtw89_reg5_def *p = tbl->defs;
+ const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
+
+ for (; p < end; p++)
+ _rfk_handler[p->flag](rtwdev, p);
+}
+EXPORT_SYMBOL(rtw89_rfk_parser);
+
+#define RTW89_TSSI_FAST_MODE_NUM 4
+
+static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
+ {0xD934, 0xff0000},
+ {0xD934, 0xff000000},
+ {0xD938, 0xff},
+ {0xD934, 0xff00},
+};
+
+static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
+ {0xD930, 0xff0000},
+ {0xD930, 0xff000000},
+ {0xD934, 0xff},
+ {0xD930, 0xff00},
+};
+
+static
+void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg,
+ u32 val)
+{
+ const struct rtw89_reg_def *regs;
+ u32 reg;
+ int i;
+
+ if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
+ regs = rtw89_tssi_fastmode_regs_flat;
+ else
+ regs = rtw89_tssi_fastmode_regs_level;
+
+ for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
+ reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
+ }
+}
+
+static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
+ {0xD91C, 0xff000000},
+ {0xD920, 0xff},
+ {0xD920, 0xff00},
+ {0xD920, 0xff0000},
+ {0xD920, 0xff000000},
+ {0xD924, 0xff},
+ {0xD924, 0xff00},
+ {0xD914, 0xff000000},
+ {0xD918, 0xff},
+ {0xD918, 0xff00},
+ {0xD918, 0xff0000},
+ {0xD918, 0xff000000},
+ {0xD91C, 0xff},
+ {0xD91C, 0xff00},
+ {0xD91C, 0xff0000},
+};
+
+static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
+ {0xD910, 0xff},
+ {0xD910, 0xff00},
+ {0xD910, 0xff0000},
+ {0xD910, 0xff000000},
+ {0xD914, 0xff},
+ {0xD914, 0xff00},
+ {0xD914, 0xff0000},
+ {0xD908, 0xff},
+ {0xD908, 0xff00},
+ {0xD908, 0xff0000},
+ {0xD908, 0xff000000},
+ {0xD90C, 0xff},
+ {0xD90C, 0xff00},
+ {0xD90C, 0xff0000},
+ {0xD90C, 0xff000000},
+};
+
+void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_reg_def *regs;
+ const u32 *data;
+ u32 reg;
+ int i;
+
+ if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
+ return;
+
+ if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
+ regs = rtw89_tssi_bandedge_regs_flat;
+ else
+ regs = rtw89_tssi_bandedge_regs_level;
+
+ data = chip->tssi_dbw_table->data[bandedge_cfg];
+
+ for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
+ reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
+
+ rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
+ data[RTW89_TSSI_SBW20]);
}
+EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index b1f059b725a1..ee3bc5e111e1 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -8,6 +8,7 @@
#include "core.h"
#define RTW89_PHY_ADDR_OFFSET 0x10000
+#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
#define PHY_HEADLINE_VALID 0xf
@@ -55,11 +56,13 @@
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
+#define CFO_BOUND 64
#define CFO_TP_UPPER 100
#define CFO_TP_LOWER 50
#define CFO_COMP_PERIOD 250
#define CFO_COMP_WEIGHT 8
#define MAX_CFO_TOLERANCE 30
+#define CFO_TF_CNT_TH 300
#define CCX_MAX_PERIOD 2097
#define CCX_MAX_PERIOD_UNIT 32
@@ -87,8 +90,11 @@
#define RXB_IDX_MAX 31
#define RXB_IDX_MIN 0
+#define IGI_RSSI_MAX 110
#define PD_TH_MAX_RSSI 70
#define PD_TH_MIN_RSSI 8
+#define CCKPD_TH_MIN_RSSI (-18)
+#define PD_TH_BW160_CMP_VAL 9
#define PD_TH_BW80_CMP_VAL 6
#define PD_TH_BW40_CMP_VAL 3
#define PD_TH_BW20_CMP_VAL 0
@@ -216,6 +222,35 @@ enum rtw89_dig_gain_tia_idx {
RTW89_DIG_GAIN_TIA_IDX1 = 1
};
+enum rtw89_tssi_bandedge_cfg {
+ RTW89_TSSI_BANDEDGE_FLAT,
+ RTW89_TSSI_BANDEDGE_LOW,
+ RTW89_TSSI_BANDEDGE_MID,
+ RTW89_TSSI_BANDEDGE_HIGH,
+
+ RTW89_TSSI_CFG_NUM,
+};
+
+enum rtw89_tssi_sbw_idx {
+ RTW89_TSSI_SBW20,
+ RTW89_TSSI_SBW40_0,
+ RTW89_TSSI_SBW40_1,
+ RTW89_TSSI_SBW80_0,
+ RTW89_TSSI_SBW80_1,
+ RTW89_TSSI_SBW80_2,
+ RTW89_TSSI_SBW80_3,
+ RTW89_TSSI_SBW160_0,
+ RTW89_TSSI_SBW160_1,
+ RTW89_TSSI_SBW160_2,
+ RTW89_TSSI_SBW160_3,
+ RTW89_TSSI_SBW160_4,
+ RTW89_TSSI_SBW160_5,
+ RTW89_TSSI_SBW160_6,
+ RTW89_TSSI_SBW160_7,
+
+ RTW89_TSSI_SBW_NUM,
+};
+
struct rtw89_txpwr_byrate_cfg {
enum rtw89_band band;
enum rtw89_nss nss;
@@ -228,18 +263,22 @@ struct rtw89_txpwr_byrate_cfg {
#define DELTA_SWINGIDX_SIZE 30
struct rtw89_txpwr_track_cfg {
- const u8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE];
- const u8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE];
- const u8 *delta_swingidx_2gb_n;
- const u8 *delta_swingidx_2gb_p;
- const u8 *delta_swingidx_2ga_n;
- const u8 *delta_swingidx_2ga_p;
- const u8 *delta_swingidx_2g_cck_b_n;
- const u8 *delta_swingidx_2g_cck_b_p;
- const u8 *delta_swingidx_2g_cck_a_n;
- const u8 *delta_swingidx_2g_cck_a_p;
+ const s8 (*delta_swingidx_6gb_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6gb_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6ga_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_6ga_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE];
+ const s8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE];
+ const s8 *delta_swingidx_2gb_n;
+ const s8 *delta_swingidx_2gb_p;
+ const s8 *delta_swingidx_2ga_n;
+ const s8 *delta_swingidx_2ga_p;
+ const s8 *delta_swingidx_2g_cck_b_n;
+ const s8 *delta_swingidx_2g_cck_b_p;
+ const s8 *delta_swingidx_2g_cck_a_n;
+ const s8 *delta_swingidx_2g_cck_a_p;
};
struct rtw89_phy_dig_gain_cfg {
@@ -254,6 +293,10 @@ struct rtw89_phy_dig_gain_table {
const struct rtw89_phy_dig_gain_cfg *cfg_tia_a;
};
+struct rtw89_phy_tssi_dbw_table {
+ u32 data[RTW89_TSSI_CFG_NUM][RTW89_TSSI_SBW_NUM];
+};
+
struct rtw89_phy_reg3_tbl {
const struct rtw89_reg3_def *reg3;
int size;
@@ -265,6 +308,18 @@ const struct rtw89_phy_reg3_tbl _name ## _tbl = { \
.size = ARRAY_SIZE(_name), \
}
+struct rtw89_nbi_reg_def {
+ struct rtw89_reg_def notch1_idx;
+ struct rtw89_reg_def notch1_frac_idx;
+ struct rtw89_reg_def notch1_en;
+ struct rtw89_reg_def notch2_idx;
+ struct rtw89_reg_def notch2_frac_idx;
+ struct rtw89_reg_def notch2_en;
+};
+
+extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
+extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
+
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
{
@@ -322,35 +377,105 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
}
+enum rtw89_rfk_flag {
+ RTW89_RFK_F_WRF = 0,
+ RTW89_RFK_F_WM = 1,
+ RTW89_RFK_F_WS = 2,
+ RTW89_RFK_F_WC = 3,
+ RTW89_RFK_F_DELAY = 4,
+ RTW89_RFK_F_NUM,
+};
+
+struct rtw89_rfk_tbl {
+ const struct rtw89_reg5_def *defs;
+ u32 size;
+};
+
+#define RTW89_DECLARE_RFK_TBL(_name) \
+const struct rtw89_rfk_tbl _name ## _tbl = { \
+ .defs = _name, \
+ .size = ARRAY_SIZE(_name), \
+}
+
+#define RTW89_DECL_RFK_WRF(_path, _addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WRF, \
+ .path = _path, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WM(_addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WM, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WS(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WS, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_WC(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WC, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_DELAY(_data) \
+ {.flag = RTW89_RFK_F_DELAY, \
+ .data = _data,}
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
+
+#define rtw89_rfk_parser_by_cond(dev, cond, tbl_t, tbl_f) \
+ do { \
+ typeof(dev) __dev = (dev); \
+ if (cond) \
+ rtw89_rfk_parser(__dev, (tbl_t)); \
+ else \
+ rtw89_rfk_parser(__dev, (tbl_f)); \
+ } while (0)
+
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl);
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc);
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx);
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx);
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+ u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
@@ -366,6 +491,10 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_tssi_bandedge_cfg bandedge_cfg);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 7eaa01e41ef2..bf41a1141679 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -29,15 +29,48 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
return 0;
}
-static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
+static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
+ bool enter)
{
+ ieee80211_stop_queues(rtwdev->hw);
+ rtwdev->hci.paused = true;
+ flush_work(&rtwdev->txq_work);
+ ieee80211_wake_queues(rtwdev->hw);
+
+ rtw89_hci_pause(rtwdev, true);
+ rtw89_mac_power_mode_change(rtwdev, enter);
+ rtw89_hci_switch_mode(rtwdev, enter);
+ rtw89_hci_pause(rtwdev, false);
+
+ rtwdev->hci.paused = false;
+
+ if (!enter) {
+ local_bh_disable();
+ napi_schedule(&rtwdev->napi);
+ local_bh_enable();
+ }
+}
+
+static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
+{
+ if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
+ rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
+ else
+ rtw89_mac_power_mode_change(rtwdev, enter);
+}
+
+static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ if (rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ return;
+
if (!rtwdev->ps_mode)
return;
if (test_and_set_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
return;
- rtw89_mac_power_mode_change(rtwdev, true);
+ rtw89_ps_power_mode_change(rtwdev, true);
}
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -46,7 +79,7 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
return;
if (test_and_clear_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
- rtw89_mac_power_mode_change(rtwdev, false);
+ rtw89_ps_power_mode_change(rtwdev, false);
}
static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
@@ -81,23 +114,23 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, mac_id);
- __rtw89_enter_ps_mode(rtwdev);
+ __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_ps_mode(rtwdev, rtwvif);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_ps_mode(rtwdev);
__rtw89_leave_lps(rtwdev, rtwvif->mac_id);
}
@@ -110,6 +143,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
+ __rtw89_leave_ps_mode(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_leave_lps_vif(rtwdev, rtwvif);
}
@@ -148,3 +183,64 @@ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl)
if (btc_ctrl)
rtw89_leave_lps(rtwdev);
}
+
+static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ enum rtw89_p2pps_action act)
+{
+ if (act == RTW89_P2P_ACT_UPDATE || act == RTW89_P2P_ACT_REMOVE)
+ return;
+
+ if (act == RTW89_P2P_ACT_INIT)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, true);
+ else if (act == RTW89_P2P_ACT_TERMINATE)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, false);
+}
+
+static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ if (rtwvif->last_noa_nr == 0)
+ return;
+
+ for (noa_id = 0; noa_id < rtwvif->last_noa_nr; noa_id++) {
+ if (noa_id == rtwvif->last_noa_nr - 1)
+ act = RTW89_P2P_ACT_TERMINATE;
+ else
+ act = RTW89_P2P_ACT_REMOVE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, NULL, act, noa_id);
+ }
+}
+
+static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_p2p_noa_desc *desc;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ for (noa_id = 0; noa_id < RTW89_P2P_MAX_NOA_NUM; noa_id++) {
+ desc = &vif->bss_conf.p2p_noa_attr.desc[noa_id];
+ if (!desc->count || !desc->duration)
+ break;
+
+ if (noa_id == 0)
+ act = RTW89_P2P_ACT_INIT;
+ else
+ act = RTW89_P2P_ACT_UPDATE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, desc, act, noa_id);
+ }
+ rtwvif->last_noa_nr = noa_id;
+}
+
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_p2p_disable_all_noa(rtwdev, vif);
+ rtw89_p2p_update_noa(rtwdev, vif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index a184b68994aa..0feae3991623 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,12 +5,13 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id);
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index e0a416d37d0e..ca20bb024b40 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -8,16 +8,36 @@
#define R_AX_SYS_WL_EFUSE_CTRL 0x000A
#define B_AX_AUTOLOAD_SUS BIT(5)
+#define R_AX_SYS_ISO_CTRL 0x0000
+#define B_AX_PWC_EV2EF_MASK GENMASK(15, 14)
+#define B_AX_PWC_EV2EF_B15 BIT(15)
+#define B_AX_PWC_EV2EF_B14 BIT(14)
+#define B_AX_ISO_EB2CORE BIT(8)
+
#define R_AX_SYS_FUNC_EN 0x0002
#define B_AX_FEN_BB_GLB_RSTN BIT(1)
#define B_AX_FEN_BBRSTB BIT(0)
#define R_AX_SYS_PW_CTRL 0x0004
+#define B_AX_XTAL_OFF_A_DIE BIT(22)
+#define B_AX_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
+#define B_AX_RDY_SYSPWR BIT(17)
+#define B_AX_EN_WLON BIT(16)
+#define B_AX_APDM_HPDN BIT(15)
#define B_AX_PSUS_OFF_CAPC_EN BIT(14)
+#define B_AX_AFSM_PCIE_SUS_EN BIT(12)
+#define B_AX_AFSM_WLSUS_EN BIT(11)
+#define B_AX_APFM_SWLPS BIT(10)
+#define B_AX_APFM_OFFMAC BIT(9)
+#define B_AX_APFN_ONMAC BIT(8)
#define R_AX_SYS_CLK_CTRL 0x0008
#define B_AX_CPU_CLK_EN BIT(14)
+#define R_AX_SYS_ADIE_PAD_PWR_CTRL 0x0018
+#define B_AX_SYM_PADPDN_WL_PTA_1P3 BIT(6)
+#define B_AX_SYM_PADPDN_WL_RFC_1P3 BIT(5)
+
#define R_AX_RSV_CTRL 0x001C
#define B_AX_R_DIS_PRST BIT(6)
#define B_AX_WLOCK_1C_BIT6 BIT(5)
@@ -31,9 +51,6 @@
#define B_AX_EF_POR BIT(10)
#define B_AX_EF_CELL_SEL_MASK GENMASK(9, 8)
-#define R_AX_SPSLDO_ON_CTRL0 0x0200
-#define B_AX_OCP_L1_MASK GENMASK(15, 13)
-
#define R_AX_EFUSE_CTRL 0x0030
#define B_AX_EF_MODE_SEL_MASK GENMASK(31, 30)
#define B_AX_EF_RDY BIT(29)
@@ -41,6 +58,17 @@
#define B_AX_EF_ADDR_MASK GENMASK(26, 16)
#define B_AX_EF_DATA_MASK GENMASK(15, 0)
+#define R_AX_EFUSE_CTRL_1_V1 0x0038
+#define B_AX_EF_ENT BIT(31)
+#define B_AX_EF_BURST BIT(19)
+#define B_AX_EF_TEST_SEL_MASK GENMASK(18, 16)
+#define B_AX_EF_TROW_EN BIT(15)
+#define B_AX_EF_ERR_FLAG BIT(14)
+#define B_AX_EF_DSB_EN BIT(11)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
+#define B_AX_WDT_WAKE_PCIE_EN BIT(10)
+#define B_AX_WDT_WAKE_USB_EN BIT(9)
+
#define R_AX_GPIO_MUXCFG 0x0040
#define B_AX_BOOT_MODE BIT(19)
#define B_AX_WL_EECS_EXT_32K_SEL BIT(18)
@@ -56,6 +84,8 @@
#define B_AX_BTMODE_MASK GENMASK(7, 6)
#define MAC_AX_BT_MODE_0_3 0
#define MAC_AX_BT_MODE_2 2
+#define MAC_AX_RTK_MODE 0
+#define MAC_AX_CSR_MODE 1
#define B_AX_ENBT BIT(5)
#define B_AX_EROM_EN BIT(4)
#define B_AX_ENUARTRX BIT(2)
@@ -72,11 +102,29 @@
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_FORCE_PWR_NGAT BIT(13)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
#define B_AX_PCIE_AUXCLK_GATE BIT(11)
#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
+#define R_AX_HCI_OPT_CTRL 0x0074
+#define BIT_WAKE_CTRL BIT(5)
+
+#define R_AX_HCI_BG_CTRL 0x0078
+#define B_AX_IBX_EN_VALUE BIT(15)
+#define B_AX_IB_EN_VALUE BIT(14)
+#define B_AX_FORCED_IB_EN BIT(4)
+#define B_AX_EN_REGBG BIT(3)
+#define B_AX_R_AX_BG_LPF BIT(2)
+#define B_AX_R_AX_BG GENMASK(1, 0)
+
#define R_AX_PLATFORM_ENABLE 0x0088
+#define B_AX_AXIDMA_EN BIT(3)
#define B_AX_WCPU_EN BIT(1)
+#define B_AX_PLATFORM_EN BIT(0)
+
+#define R_AX_WLLPS_CTRL 0x0090
+#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
#define R_AX_SCOREBOARD 0x00AC
#define B_AX_TOGGLE BIT(31)
@@ -89,11 +137,32 @@
#define R_AX_DBG_PORT_SEL 0x00C0
#define B_AX_DEBUG_ST_MASK GENMASK(31, 0)
+#define R_AX_PMC_DBG_CTRL2 0x00CC
+#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+
+#define R_AX_PCIE_MIO_INTF 0x00E4
+#define B_AX_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
+#define B_AX_PCIE_MIO_BYIOREG BIT(13)
+#define B_AX_PCIE_MIO_RE BIT(12)
+#define B_AX_PCIE_MIO_WE_MASK GENMASK(11, 8)
+#define MIO_WRITE_BYTE_ALL 0xF
+#define B_AX_PCIE_MIO_ADDR_MASK GENMASK(7, 0)
+#define MIO_ADDR_PAGE_MASK GENMASK(12, 8)
+
+#define R_AX_PCIE_MIO_INTD 0x00E8
+#define B_AX_PCIE_MIO_DATA_MASK GENMASK(31, 0)
+
#define R_AX_SYS_CFG1 0x00F0
#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
#define R_AX_SYS_STATUS1 0x00F4
#define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+#define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
+#define MAC_AX_HCI_SEL_SDIO_UART 0
+#define MAC_AX_HCI_SEL_MULTI_USB 1
+#define MAC_AX_HCI_SEL_PCIE_UART 2
+#define MAC_AX_HCI_SEL_PCIE_USB 3
+#define MAC_AX_HCI_SEL_MULTI_SDIO 4
#define R_AX_HALT_H2C_CTRL 0x0160
#define R_AX_HALT_H2C 0x0168
@@ -112,6 +181,7 @@
#define PS_RPWM_TOGGLE BIT(15)
#define PS_RPWM_ACK BIT(14)
#define PS_RPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_RPWM_NOTIFY_WAKE BIT(8)
#define PS_RPWM_STATE 0x7
#define RPWM_SEQ_NUM_MAX 3
#define PS_CPWM_SEQ_NUM GENMASK(13, 12)
@@ -130,6 +200,27 @@
#define R_AX_UDM2 0x01F8
#define R_AX_UDM3 0x01FC
+#define R_AX_SPS_DIG_ON_CTRL0 0x0200
+#define B_AX_VREFPFM_L_MASK GENMASK(25, 22)
+#define B_AX_REG_ZCDC_H_MASK GENMASK(18, 17)
+#define B_AX_OCP_L1_MASK GENMASK(15, 13)
+#define B_AX_VOL_L1_MASK GENMASK(3, 0)
+
+#define R_AX_LDO_AON_CTRL0 0x0218
+#define B_AX_PD_REGU_L BIT(16)
+
+#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
+#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
+#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
+#define B_AX_WL_XTAL_GNT BIT(29)
+#define B_AX_BT_XTAL_GNT BIT(28)
+#define B_AX_WL_XTAL_SI_MODE_MASK GENMASK(25, 24)
+#define XTAL_SI_NORMAL_WRITE 0x00
+#define XTAL_SI_NORMAL_READ 0x01
+#define B_AX_WL_XTAL_SI_BITMASK_MASK GENMASK(23, 16)
+#define B_AX_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
+#define B_AX_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+
#define R_AX_XTAL_ON_CTRL0 0x0280
#define B_AX_XTAL_SC_LPS BIT(31)
#define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17)
@@ -138,7 +229,13 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN 0x02E4
+#define B_AX_LED1_PULL_LOW_EN BIT(18)
+#define B_AX_EESK_PULL_LOW_EN BIT(17)
+#define B_AX_EECS_PULL_LOW_EN BIT(16)
+
#define R_AX_WLRF_CTRL 0x02F0
+#define B_AX_AFC_AFEDIG BIT(17)
#define B_AX_WLRF1_CTRL_7 BIT(15)
#define B_AX_WLRF1_CTRL_1 BIT(9)
#define B_AX_WLRF_CTRL_7 BIT(7)
@@ -152,8 +249,60 @@
#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
+#define R_AX_AFE_OFF_CTRL1 0x0444
+#define B_AX_S1_LDO_VSEL_F_MASK GENMASK(25, 24)
+#define B_AX_S1_LDO2PWRCUT_F BIT(23)
+#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+
#define R_AX_FILTER_MODEL_ADDR 0x0C04
+#define R_AX_HAXI_INIT_CFG1 0x1000
+#define B_AX_WD_ITVL_IDLE_V1_MASK GENMASK(31, 28)
+#define B_AX_WD_ITVL_ACT_V1_MASK GENMASK(27, 24)
+#define B_AX_DMA_MODE_MASK GENMASK(19, 18)
+#define DMA_MOD_PCIE_1B 0x0
+#define DMA_MOD_PCIE_4B 0x1
+#define DMA_MOD_USB 0x2
+#define DMA_MOD_SDIO 0x3
+#define B_AX_STOP_AXI_MST BIT(17)
+#define B_AX_HAXI_RST_KEEP_REG BIT(16)
+#define B_AX_RXHCI_EN_V1 BIT(15)
+#define B_AX_RXBD_MODE_V1 BIT(14)
+#define B_AX_HAXI_MAX_RXDMA_MASK GENMASK(9, 8)
+#define B_AX_TXHCI_EN_V1 BIT(7)
+#define B_AX_FLUSH_AXI_MST BIT(4)
+#define B_AX_RST_BDRAM BIT(3)
+#define B_AX_HAXI_MAX_TXDMA_MASK GENMASK(1, 0)
+
+#define R_AX_HAXI_DMA_STOP1 0x1010
+#define B_AX_STOP_WPDMA BIT(19)
+#define B_AX_STOP_CH12 BIT(18)
+#define B_AX_STOP_CH9 BIT(17)
+#define B_AX_STOP_CH8 BIT(16)
+#define B_AX_STOP_ACH7 BIT(15)
+#define B_AX_STOP_ACH6 BIT(14)
+#define B_AX_STOP_ACH5 BIT(13)
+#define B_AX_STOP_ACH4 BIT(12)
+#define B_AX_STOP_ACH3 BIT(11)
+#define B_AX_STOP_ACH2 BIT(10)
+#define B_AX_STOP_ACH1 BIT(9)
+#define B_AX_STOP_ACH0 BIT(8)
+
+#define R_AX_HAXI_DMA_BUSY1 0x101C
+#define B_AX_HAXIIO_BUSY BIT(20)
+#define B_AX_WPDMA_BUSY BIT(19)
+#define B_AX_CH12_BUSY BIT(18)
+#define B_AX_CH9_BUSY BIT(17)
+#define B_AX_CH8_BUSY BIT(16)
+#define B_AX_ACH7_BUSY BIT(15)
+#define B_AX_ACH6_BUSY BIT(14)
+#define B_AX_ACH5_BUSY BIT(13)
+#define B_AX_ACH4_BUSY BIT(12)
+#define B_AX_ACH3_BUSY BIT(11)
+#define B_AX_ACH2_BUSY BIT(10)
+#define B_AX_ACH1_BUSY BIT(9)
+#define B_AX_ACH0_BUSY BIT(8)
+
#define R_AX_PCIE_DBG_CTRL 0x11C0
#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16)
#define B_AX_DBG_SEL_MASK GENMASK(15, 13)
@@ -162,8 +311,94 @@
#define B_AX_ASFF_FULL_NO_STK BIT(1)
#define B_AX_EN_STUCK_DBG BIT(0)
+#define R_AX_HAXI_DMA_STOP2 0x11C0
+#define B_AX_STOP_CH11 BIT(1)
+#define B_AX_STOP_CH10 BIT(0)
+
+#define R_AX_HAXI_DMA_BUSY2 0x11C8
+#define B_AX_CH11_BUSY BIT(1)
+#define B_AX_CH10_BUSY BIT(0)
+
+#define R_AX_HAXI_DMA_BUSY3 0x1208
+#define B_AX_RPQ_BUSY BIT(1)
+#define B_AX_RXQ_BUSY BIT(0)
+
+#define R_AX_LTR_DEC_CTRL 0x1600
+#define B_AX_LTR_IDX_DRV_VLD BIT(16)
+#define B_AX_LTR_CURR_IDX_DRV_MASK GENMASK(15, 14)
+#define B_AX_LTR_IDX_FW_VLD BIT(13)
+#define B_AX_LTR_CURR_IDX_FW_MASK GENMASK(12, 11)
+#define B_AX_LTR_IDX_HW_VLD BIT(10)
+#define B_AX_LTR_CURR_IDX_HW_MASK GENMASK(9, 8)
+#define B_AX_LTR_REQ_DRV BIT(7)
+#define B_AX_LTR_IDX_DRV_MASK GENMASK(6, 5)
+#define PCIE_LTR_IDX_IDLE 3
+#define B_AX_LTR_DRV_DEC_EN BIT(4)
+#define B_AX_LTR_FW_DEC_EN BIT(3)
+#define B_AX_LTR_HW_DEC_EN BIT(2)
+#define B_AX_LTR_SPACE_IDX_V1_MASK GENMASK(1, 0)
+#define LTR_EN_BITS (B_AX_LTR_HW_DEC_EN | B_AX_LTR_FW_DEC_EN | B_AX_LTR_DRV_DEC_EN)
+
+#define R_AX_LTR_LATENCY_IDX0 0x1604
+#define R_AX_LTR_LATENCY_IDX1 0x1608
+#define R_AX_LTR_LATENCY_IDX2 0x160C
+#define R_AX_LTR_LATENCY_IDX3 0x1610
+
+#define R_AX_HCI_FC_CTRL_V1 0x1700
+#define R_AX_CH_PAGE_CTRL_V1 0x1704
+
+#define R_AX_ACH0_PAGE_CTRL_V1 0x1710
+#define R_AX_ACH1_PAGE_CTRL_V1 0x1714
+#define R_AX_ACH2_PAGE_CTRL_V1 0x1718
+#define R_AX_ACH3_PAGE_CTRL_V1 0x171C
+#define R_AX_ACH4_PAGE_CTRL_V1 0x1720
+#define R_AX_ACH5_PAGE_CTRL_V1 0x1724
+#define R_AX_ACH6_PAGE_CTRL_V1 0x1728
+#define R_AX_ACH7_PAGE_CTRL_V1 0x172C
+#define R_AX_CH8_PAGE_CTRL_V1 0x1730
+#define R_AX_CH9_PAGE_CTRL_V1 0x1734
+#define R_AX_CH10_PAGE_CTRL_V1 0x1738
+#define R_AX_CH11_PAGE_CTRL_V1 0x173C
+
+#define R_AX_ACH0_PAGE_INFO_V1 0x1750
+#define R_AX_ACH1_PAGE_INFO_V1 0x1754
+#define R_AX_ACH2_PAGE_INFO_V1 0x1758
+#define R_AX_ACH3_PAGE_INFO_V1 0x175C
+#define R_AX_ACH4_PAGE_INFO_V1 0x1760
+#define R_AX_ACH5_PAGE_INFO_V1 0x1764
+#define R_AX_ACH6_PAGE_INFO_V1 0x1768
+#define R_AX_ACH7_PAGE_INFO_V1 0x176C
+#define R_AX_CH8_PAGE_INFO_V1 0x1770
+#define R_AX_CH9_PAGE_INFO_V1 0x1774
+#define R_AX_CH10_PAGE_INFO_V1 0x1778
+#define R_AX_CH11_PAGE_INFO_V1 0x177C
+#define R_AX_CH12_PAGE_INFO_V1 0x1780
+
+#define R_AX_PUB_PAGE_INFO3_V1 0x178C
+#define R_AX_PUB_PAGE_CTRL1_V1 0x1790
+#define R_AX_PUB_PAGE_CTRL2_V1 0x1794
+#define R_AX_PUB_PAGE_INFO1_V1 0x1798
+#define R_AX_PUB_PAGE_INFO2_V1 0x179C
+#define R_AX_WP_PAGE_CTRL1_V1 0x17A0
+#define R_AX_WP_PAGE_CTRL2_V1 0x17A4
+#define R_AX_WP_PAGE_INFO1_V1 0x17A8
+
+#define R_AX_H2CREG_DATA0_V1 0x7140
+#define R_AX_H2CREG_DATA1_V1 0x7144
+#define R_AX_H2CREG_DATA2_V1 0x7148
+#define R_AX_H2CREG_DATA3_V1 0x714C
+#define R_AX_C2HREG_DATA0_V1 0x7150
+#define R_AX_C2HREG_DATA1_V1 0x7154
+#define R_AX_C2HREG_DATA2_V1 0x7158
+#define R_AX_C2HREG_DATA3_V1 0x715C
+#define R_AX_H2CREG_CTRL_V1 0x7160
+#define R_AX_C2HREG_CTRL_V1 0x7164
+
+#define R_AX_HCI_FUNC_EN_V1 0x7880
+
#define R_AX_PHYREG_SET 0x8040
#define PHYREG_SET_ALL_CYCLE 0x8
+#define PHYREG_SET_XYN_CYCLE 0xE
#define R_AX_HD0IMR 0x8110
#define B_AX_WDT_PTFM_INT_EN BIT(5)
@@ -194,6 +429,7 @@
#define R_AX_BOOT_DBG 0x83F0
#define R_AX_DMAC_FUNC_EN 0x8400
+#define B_AX_DMAC_CRPRT BIT(31)
#define B_AX_MAC_FUNC_EN BIT(30)
#define B_AX_DMAC_FUNC_EN BIT(29)
#define B_AX_MPDU_PROC_EN BIT(28)
@@ -207,7 +443,10 @@
#define B_AX_PKT_IN_EN BIT(20)
#define B_AX_DLE_CPUIO_EN BIT(19)
#define B_AX_DISPATCHER_EN BIT(18)
+#define B_AX_BBRPT_EN BIT(17)
#define B_AX_MAC_SEC_EN BIT(16)
+#define B_AX_MAC_UN_EN BIT(15)
+#define B_AX_H_AXIDMA_EN BIT(14)
#define R_AX_DMAC_CLK_EN 0x8404
#define B_AX_WD_RLS_CLK_EN BIT(27)
@@ -218,6 +457,7 @@
#define B_AX_PKT_IN_CLK_EN BIT(20)
#define B_AX_DLE_CPUIO_CLK_EN BIT(19)
#define B_AX_DISPATCHER_CLK_EN BIT(18)
+#define B_AX_BBRPT_CLK_EN BIT(17)
#define B_AX_MAC_SEC_CLK_EN BIT(16)
#define PCI_LTR_IDLE_TIMER_1US 0
@@ -243,9 +483,11 @@
#define R_AX_LTR_CTRL_0 0x8410
#define B_AX_LTR_SPACE_IDX_MASK GENMASK(13, 12)
#define B_AX_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_AX_LTR_WD_NOEMP_CHK BIT(6)
#define B_AX_APP_LTR_ACT BIT(5)
#define B_AX_APP_LTR_IDLE BIT(4)
#define B_AX_LTR_EN BIT(1)
+#define B_AX_LTR_WD_NOEMP_CHK_V1 BIT(1)
#define B_AX_LTR_HW_EN BIT(0)
#define R_AX_LTR_CTRL_1 0x8414
@@ -281,6 +523,21 @@
#define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1)
#define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0)
+#define R_AX_DMAC_ERR_IMR 0x8520
+#define B_AX_DLE_CPUIO_ERR_INT_EN BIT(10)
+#define B_AX_APB_BRIDGE_ERR_INT_EN BIT(9)
+#define B_AX_DISPATCH_ERR_INT_EN BIT(8)
+#define B_AX_PKTIN_ERR_INT_EN BIT(7)
+#define B_AX_PLE_DLE_ERR_INT_EN BIT(6)
+#define B_AX_TXPKTCTRL_ERR_INT_EN BIT(5)
+#define B_AX_WDE_DLE_ERR_INT_EN BIT(4)
+#define B_AX_STA_SCHEDULER_ERR_INT_EN BIT(3)
+#define B_AX_MPDU_ERR_INT_EN BIT(2)
+#define B_AX_WSEC_ERR_INT_EN BIT(1)
+#define B_AX_WDRLS_ERR_INT_EN BIT(0)
+#define DMAC_ERR_IMR_EN GENMASK(31, 0)
+#define DMAC_ERR_IMR_DIS 0
+
#define R_AX_DMAC_ERR_ISR 0x8524
#define B_AX_DLE_CPUIO_ERR_FLAG BIT(10)
#define B_AX_APB_BRIDGE_ERR_FLAG BIT(9)
@@ -304,13 +561,361 @@
#define B_AX_HOST_ADDR_INFO_8B_SEL BIT(0)
#define R_AX_HOST_DISPATCHER_ERR_IMR 0x8850
+#define B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN BIT(31)
+#define B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN BIT(30)
+#define B_AX_HDT_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_HDT_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_HDT_TOTAL_LEN_ERR_INT_EN BIT(26)
+#define B_AX_HDT_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_HDT_RXAGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_HDT_OUTPUT_ERR_INT_EN BIT(21)
+#define B_AX_HDT_RES_ERR_INT_EN BIT(20)
+#define B_AX_HDT_BURST_NUM_ERR_INT_EN BIT(19)
+#define B_AX_HDT_NULLPKT_ERR_INT_EN BIT(18)
+#define B_AX_HDT_FLOW_CTRL_ERR_INT_EN BIT(17)
+#define B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN BIT(16)
+#define B_AX_HDT_PLD_CMD_OVERLOW_INT_EN BIT(15)
+#define B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN BIT(14)
+#define B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN BIT(13)
+#define B_AX_HDT_TCP_CHK_ERR_INT_EN BIT(12)
+#define B_AX_HDT_TXPKTSIZE_ERR_INT_EN BIT(11)
+#define B_AX_HDT_PRE_COST_ERR_INT_EN BIT(10)
+#define B_AX_HDT_WD_CHK_ERR_INT_EN BIT(9)
+#define B_AX_HDT_CHANNEL_DMA_ERR_INT_EN BIT(8)
#define B_AX_HDT_OFFSET_UNMATCH_INT_EN BIT(7)
+#define B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN BIT(6)
+#define B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN BIT(5)
+#define B_AX_HDT_PERMU_UNDERFLOW_INT_EN BIT(4)
+#define B_AX_HDT_PERMU_OVERFLOW_INT_EN BIT(3)
#define B_AX_HDT_PKT_FAIL_DBG_INT_EN BIT(2)
+#define B_AX_HDT_CHANNEL_ID_ERR_INT_EN BIT(1)
+#define B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_HOST_DISP_IMR_CLR (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_CHANNEL_ID_ERR_INT_EN | \
+ B_AX_HDT_PKT_FAIL_DBG_INT_EN | \
+ B_AX_HDT_PERMU_OVERFLOW_INT_EN | \
+ B_AX_HDT_PERMU_UNDERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_OFFSET_UNMATCH_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_WD_CHK_ERR_INT_EN | \
+ B_AX_HDT_PRE_COST_ERR_INT_EN | \
+ B_AX_HDT_TXPKTSIZE_ERR_INT_EN | \
+ B_AX_HDT_TCP_CHK_ERR_INT_EN | \
+ B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN | \
+ B_AX_HDT_PLD_CMD_OVERLOW_INT_EN | \
+ B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_HDT_NULLPKT_ERR_INT_EN | \
+ B_AX_HDT_BURST_NUM_ERR_INT_EN | \
+ B_AX_HDT_RXAGG_CFG_ERR_INT_EN | \
+ B_AX_HDT_SHIFT_EN_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_HDT_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_HDT_DMA_PROCESS_ERR_INT_EN)
+
+#define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
+#define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30)
+#define B_AX_HR_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_HR_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(26)
+#define B_AX_HR_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_HR_AGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN BIT(23)
+#define B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN BIT(22)
+#define B_AX_HT_ILL_CH_ERR_INT_EN BIT(20)
+#define B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN BIT(18)
+#define B_AX_HT_WD_LEN_OVER_ERR_INT_EN BIT(17)
+#define B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(16)
+#define B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(15)
+#define B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN BIT(14)
+#define B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN BIT(13)
+#define B_AX_HT_CHKSUM_FSM_ERR_INT_EN BIT(12)
+#define B_AX_HT_TXPKTSIZE_ERR_INT_EN BIT(11)
+#define B_AX_HT_PRE_SUB_ERR_INT_EN BIT(10)
+#define B_AX_HT_WD_CHKSUM_ERR_INT_EN BIT(9)
+#define B_AX_HT_CHANNEL_DMA_ERR_INT_EN BIT(8)
+#define B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN BIT(7)
+#define B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_AX_HT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_AX_HT_PKT_FAIL_ERR_INT_EN BIT(2)
+#define B_AX_HT_CH_ID_ERR_INT_EN BIT(1)
+#define B_AX_HT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_HOST_DISP_IMR_CLR_V1 (B_AX_HT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_HT_CH_ID_ERR_INT_EN | \
+ B_AX_HT_PKT_FAIL_ERR_INT_EN | \
+ B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_AX_HT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_HT_WD_CHKSUM_ERR_INT_EN | \
+ B_AX_HT_PRE_SUB_ERR_INT_EN | \
+ B_AX_HT_TXPKTSIZE_ERR_INT_EN | \
+ B_AX_HT_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_HT_WD_LEN_OVER_ERR_INT_EN | \
+ B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_HT_ILL_CH_ERR_INT_EN | \
+ B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN | \
+ B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN | \
+ B_AX_HR_AGG_CFG_ERR_INT_EN | \
+ B_AX_HR_SHIFT_EN_ERR_INT_EN | \
+ B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_HR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_HR_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define B_AX_HOST_DISP_IMR_SET_V1 (B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_HT_ILL_CH_ERR_INT_EN | \
+ B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_HR_DMA_PROCESS_ERR_INT_EN)
#define R_AX_CPU_DISPATCHER_ERR_IMR 0x8854
+#define B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN BIT(31)
+#define B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN BIT(30)
+#define B_AX_CPU_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_AX_CPU_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_AX_CPU_TOTAL_LEN_ERR_INT_EN BIT(26)
#define B_AX_CPU_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_AX_CPU_RXAGG_CFG_ERR_INT_EN BIT(24)
+#define B_AX_CPU_OUTPUT_ERR_INT_EN BIT(20)
+#define B_AX_CPU_RESP_ERR_INT_EN BIT(19)
+#define B_AX_CPU_BURST_NUM_ERR_INT_EN BIT(18)
+#define B_AX_CPU_NULLPKT_ERR_INT_EN BIT(17)
+#define B_AX_CPU_FLOW_CTRL_ERR_INT_EN BIT(16)
+#define B_AX_CPU_F2P_SEQ_ERR_INT_EN BIT(15)
+#define B_AX_CPU_F2P_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN BIT(13)
+#define B_AX_CPU_PLD_CMD_OVERLOW_INT_EN BIT(12)
+#define B_AX_CPU_PRE_COST_ERR_INT_EN BIT(11)
+#define B_AX_CPU_WD_CHK_ERR_INT_EN BIT(10)
+#define B_AX_CPU_CHANNEL_DMA_ERR_INT_EN BIT(9)
+#define B_AX_CPU_OFFSET_UNMATCH_INT_EN BIT(8)
+#define B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7)
+#define B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN BIT(6)
+#define B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN BIT(5)
+#define B_AX_CPU_PERMU_UNDERFLOW_INT_EN BIT(4)
+#define B_AX_CPU_PERMU_OVERFLOW_INT_EN BIT(3)
+#define B_AX_CPU_CHANNEL_ID_ERR_INT_EN BIT(2)
+#define B_AX_CPU_PKT_FAIL_DBG_INT_EN BIT(1)
+#define B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_CPU_DISP_IMR_CLR (B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN | \
+ B_AX_CPU_PKT_FAIL_DBG_INT_EN | \
+ B_AX_CPU_CHANNEL_ID_ERR_INT_EN | \
+ B_AX_CPU_PERMU_OVERFLOW_INT_EN | \
+ B_AX_CPU_PERMU_UNDERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN | \
+ B_AX_CPU_OFFSET_UNMATCH_INT_EN | \
+ B_AX_CPU_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_CPU_WD_CHK_ERR_INT_EN | \
+ B_AX_CPU_PRE_COST_ERR_INT_EN | \
+ B_AX_CPU_PLD_CMD_OVERLOW_INT_EN | \
+ B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_F2P_QSEL_ERR_INT_EN | \
+ B_AX_CPU_F2P_SEQ_ERR_INT_EN | \
+ B_AX_CPU_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_CPU_NULLPKT_ERR_INT_EN | \
+ B_AX_CPU_BURST_NUM_ERR_INT_EN | \
+ B_AX_CPU_RXAGG_CFG_ERR_INT_EN | \
+ B_AX_CPU_SHIFT_EN_ERR_INT_EN | \
+ B_AX_CPU_TOTAL_LEN_ERR_INT_EN | \
+ B_AX_CPU_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_CPU_CHKSUM_FSM_ERR_INT_EN | \
+ B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN | \
+ B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN)
+#define B_AX_CPU_DISP_IMR_SET (B_AX_CPU_PKT_FAIL_DBG_INT_EN | \
+ B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \
+ B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \
+ B_AX_CPU_TOTAL_LEN_ERR_INT_EN)
+
+#define B_AX_CR_PLD_LEN_ERR_INT_EN BIT(30)
+#define B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN BIT(27)
+#define B_AX_CR_DMA_PROCESS_ERR_INT_EN BIT(26)
+#define B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(25)
+#define B_AX_CR_SHIFT_EN_ERR_INT_EN BIT(24)
+#define B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN BIT(22)
+#define B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN BIT(21)
+#define B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN BIT(20)
+#define B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN BIT(19)
+#define B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN BIT(17)
+#define B_AX_CT_WD_LEN_OVER_ERR_INT_EN BIT(16)
+#define B_AX_CT_F2P_SEQ_ERR_INT_EN BIT(15)
+#define B_AX_CT_F2P_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(13)
+#define B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(12)
+#define B_AX_CT_PRE_SUB_ERR_INT_EN BIT(11)
+#define B_AX_CT_WD_CHKSUM_ERR_INT_EN BIT(10)
+#define B_AX_CT_CHANNEL_DMA_ERR_INT_EN BIT(9)
+#define B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN BIT(8)
+#define B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7)
+#define B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_AX_CT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_AX_CT_CH_ID_ERR_INT_EN BIT(2)
+#define B_AX_CT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_AX_CPU_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_CT_CH_ID_ERR_INT_EN | \
+ B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN | \
+ B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_AX_CT_CHANNEL_DMA_ERR_INT_EN | \
+ B_AX_CT_WD_CHKSUM_ERR_INT_EN | \
+ B_AX_CT_PRE_SUB_ERR_INT_EN | \
+ B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CT_F2P_QSEL_ERR_INT_EN | \
+ B_AX_CT_F2P_SEQ_ERR_INT_EN | \
+ B_AX_CT_WD_LEN_OVER_ERR_INT_EN | \
+ B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN | \
+ B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN | \
+ B_AX_CR_SHIFT_EN_ERR_INT_EN | \
+ B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_CR_PLD_LEN_ERR_INT_EN)
+#define B_AX_CPU_DISP_IMR_SET_V1 (B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_AX_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN)
#define R_AX_OTHER_DISPATCHER_ERR_IMR 0x8858
+#define B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN BIT(29)
+#define B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN BIT(28)
+#define B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN BIT(27)
+#define B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN BIT(26)
+#define B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN BIT(25)
+#define B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN BIT(24)
+#define B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(17)
+#define B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(16)
+#define B_AX_PLE_OUTPUT_ERR_INT_EN BIT(12)
+#define B_AX_PLE_RESP_ERR_INT_EN BIT(11)
+#define B_AX_PLE_BURST_NUM_ERR_INT_EN BIT(10)
+#define B_AX_PLE_NULL_PKT_ERR_INT_EN BIT(9)
+#define B_AX_PLE_FLOW_CTRL_ERR_INT_EN BIT(8)
+#define B_AX_WDE_OUTPUT_ERR_INT_EN BIT(4)
+#define B_AX_WDE_RESP_ERR_INT_EN BIT(3)
+#define B_AX_WDE_BURST_NUM_ERR_INT_EN BIT(2)
+#define B_AX_WDE_NULL_PKT_ERR_INT_EN BIT(1)
+#define B_AX_WDE_FLOW_CTRL_ERR_INT_EN BIT(0)
+#define B_AX_OTHER_DISP_IMR_CLR (B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN | \
+ B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN | \
+ B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN | \
+ B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \
+ B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \
+ B_AX_PLE_OUTPUT_ERR_INT_EN | \
+ B_AX_PLE_RESP_ERR_INT_EN | \
+ B_AX_PLE_BURST_NUM_ERR_INT_EN | \
+ B_AX_PLE_NULL_PKT_ERR_INT_EN | \
+ B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_WDE_OUTPUT_ERR_INT_EN | \
+ B_AX_WDE_RESP_ERR_INT_EN | \
+ B_AX_WDE_BURST_NUM_ERR_INT_EN | \
+ B_AX_WDE_NULL_PKT_ERR_INT_EN | \
+ B_AX_WDE_FLOW_CTRL_ERR_INT_EN)
+
+#define B_AX_REUSE_SIZE_ERR_INT_EN BIT(31)
+#define B_AX_REUSE_EN_ERR_INT_EN BIT(30)
+#define B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_AX_STF_OQT_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN BIT(27)
+#define B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN BIT(26)
+#define B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN BIT(25)
+#define B_AX_STF_CMD_OVERFLOW_ERR_INT_EN BIT(24)
+#define B_AX_REUSE_SIZE_ZERO_ERR_INT_EN BIT(23)
+#define B_AX_REUSE_PKT_CNT_ERR_INT_EN BIT(22)
+#define B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN BIT(21)
+#define B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN BIT(20)
+#define B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN BIT(19)
+#define B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN BIT(18)
+#define B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN BIT(17)
+#define B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN BIT(16)
+#define B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN BIT(15)
+#define B_AX_CDR_RX_TIMEOUT_ERR_INT_EN BIT(14)
+#define B_AX_PLE_RESPOSE_ERR_INT_EN BIT(11)
+#define B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN BIT(7)
+#define B_AX_HDR_RX_TIMEOUT_ERR_INT_EN BIT(6)
+#define B_AX_WDE_RESPONSE_ERR_INT_EN BIT(3)
+#define B_AX_OTHER_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_AX_WDE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_WDE_NULL_PKT_ERR_INT_EN | \
+ B_AX_WDE_BURST_NUM_ERR_INT_EN | \
+ B_AX_WDE_RESPONSE_ERR_INT_EN | \
+ B_AX_WDE_OUTPUT_ERR_INT_EN | \
+ B_AX_HDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \
+ B_AX_PLE_NULL_PKT_ERR_INT_EN | \
+ B_AX_PLE_BURST_NUM_ERR_INT_EN | \
+ B_AX_PLE_RESPOSE_ERR_INT_EN | \
+ B_AX_PLE_OUTPUT_ERR_INT_EN | \
+ B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_REUSE_PKT_CNT_ERR_INT_EN | \
+ B_AX_REUSE_SIZE_ZERO_ERR_INT_EN | \
+ B_AX_STF_CMD_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN | \
+ B_AX_REUSE_EN_ERR_INT_EN | \
+ B_AX_REUSE_SIZE_ERR_INT_EN)
+#define B_AX_OTHER_DISP_IMR_SET_V1 (B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN)
#define R_AX_HCI_FC_CTRL 0x8A00
#define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10)
@@ -389,9 +994,163 @@
#define B_AX_WDE_START_BOUND_MASK GENMASK(13, 8)
#define B_AX_WDE_PAGE_SEL_MASK GENMASK(1, 0)
#define B_AX_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+
+#define R_AX_WDE_ERRFLAG_MSG 0x8C30
+#define B_AX_WDE_ERR_FLAG_MSG_MASK GENMASK(31, 0)
+
#define R_AX_WDE_ERR_FLAG_CFG 0x8C34
+
#define R_AX_WDE_ERR_IMR 0x8C38
+#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN BIT(13)
+#define B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN BIT(12)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN BIT(7)
+#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN BIT(6)
+#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN BIT(5)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4)
+#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN BIT(3)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1)
+#define B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_AX_WDE_IMR_CLR (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
+
+#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9)
+#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8)
+#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6)
+#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3)
+#define B_AX_WDE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_AX_WDE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_AX_WDE_IMR_CLR_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_AX_WDE_IMR_SET_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+
#define R_AX_WDE_ERR_ISR 0x8C3C
+#define B_AX_WDE_DATCHN_RRDY_ERR BIT(27)
+#define B_AX_WDE_DATCHN_FRZTO_ERR BIT(26)
+#define B_AX_WDE_DATCHN_NULLPG_ERR BIT(25)
+#define B_AX_WDE_DATCHN_ARBT_ERR BIT(24)
+#define B_AX_WDE_QUEMGN_FRZTO_ERR BIT(19)
+#define B_AX_WDE_NXTPKTLL_AD_ERR BIT(18)
+#define B_AX_WDE_PREPKTLLT_AD_ERR BIT(17)
+#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR BIT(16)
+#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR BIT(15)
+#define B_AX_WDE_QUE_SRCQUEID_ERR BIT(14)
+#define B_AX_WDE_QUE_DSTQUEID_ERR BIT(13)
+#define B_AX_WDE_QUE_CMDTYPE_ERR BIT(12)
+#define B_AX_WDE_BUFMGN_FRZTO_ERR BIT(7)
+#define B_AX_WDE_GETNPG_PGOFST_ERR BIT(6)
+#define B_AX_WDE_GETNPG_STRPG_ERR BIT(5)
+#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR BIT(4)
+#define B_AX_WDE_BUFRTN_SIZE_ERR BIT(3)
+#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR BIT(2)
+#define B_AX_WDE_BUFREQ_UNAVAL_ERR BIT(1)
+#define B_AX_WDE_BUFREQ_QTAID_ERR BIT(0)
#define B_AX_WDE_MAX_SIZE_MASK GENMASK(27, 16)
#define B_AX_WDE_MIN_SIZE_MASK GENMASK(11, 0)
@@ -426,7 +1185,123 @@
#define R_AX_PLE_ERR_FLAG_CFG 0x9034
#define R_AX_PLE_ERR_IMR 0x9038
+#define B_AX_PLE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_AX_PLE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN BIT(19)
+#define B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN BIT(18)
+#define B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN BIT(17)
+#define B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16)
+#define B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15)
+#define B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN BIT(14)
+#define B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN BIT(13)
+#define B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN BIT(12)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN BIT(7)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN BIT(6)
#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN BIT(5)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN BIT(3)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1)
+#define B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_AX_PLE_IMR_CLR (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN)
+#define B_AX_PLE_IMR_SET (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN)
+
+#define B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9)
+#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8)
+#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7)
+#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6)
+#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5)
+#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4)
+#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3)
+#define B_AX_PLE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_AX_PLE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_AX_PLE_IMR_CLR_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_AX_PLE_IMR_SET_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \
+ B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
+ B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN)
#define R_AX_PLE_ERR_FLAG_ISR 0x903C
#define B_AX_PLE_MAX_SIZE_MASK GENMASK(27, 16)
@@ -444,6 +1319,7 @@
#define R_AX_PLE_QTA8_CFG 0x9060
#define R_AX_PLE_QTA9_CFG 0x9064
#define R_AX_PLE_QTA10_CFG 0x9068
+#define R_AX_PLE_QTA11_CFG 0x906C
#define R_AX_PLE_INI_STATUS 0x9100
#define B_AX_PLE_Q_MGN_INI_RDY BIT(1)
@@ -480,12 +1356,97 @@
#define B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2)
#define B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1)
#define B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0)
+#define B_AX_WDRLS_IMR_EN_CLR (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define B_AX_WDRLS_IMR_SET (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define B_AX_WDRLS_IMR_SET_V1 (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \
+ B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+
#define R_AX_WDRLS_ERR_ISR 0x9434
+#define R_AX_BBRPT_COM_ERR_IMR 0x9608
+#define B_AX_BBRPT_COM_HANG_EN BIT(1)
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0)
+
#define R_AX_BBRPT_COM_ERR_IMR_ISR 0x960C
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR BIT(16)
+#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0)
+
+#define R_AX_BBRPT_CHINFO_ERR_IMR 0x9628
+#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7)
+#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4)
+#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3)
+#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2)
+#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0)
+#define R_AX_BBRPT_CHINFO_IMR_SET_V1 (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_TO_ERR_INT_EN)
+
#define R_AX_BBRPT_CHINFO_ERR_IMR_ISR 0x962C
+#define B_AX_BBPRT_CHIF_TO_ERR BIT(23)
+#define B_AX_BBPRT_CHIF_NULL_ERR BIT(22)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR BIT(21)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR BIT(20)
+#define B_AX_BBPRT_CHIF_HDRL_ERR BIT(19)
+#define B_AX_BBPRT_CHIF_BOVF_ERR BIT(18)
+#define B_AX_BBPRT_CHIF_OVF_ERR BIT(17)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR BIT(16)
+#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7)
+#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6)
+#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5)
+#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4)
+#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3)
+#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2)
+#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1)
+#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0)
+#define B_AX_BBRPT_CHINFO_IMR_CLR (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \
+ B_AX_BBPRT_CHIF_TO_ERR_INT_EN)
+
+#define R_AX_BBRPT_DFS_ERR_IMR 0x9638
+#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+
#define R_AX_BBRPT_DFS_ERR_IMR_ISR 0x963C
+#define B_AX_BBRPT_DFS_TO_ERR BIT(16)
+#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+
#define R_AX_LA_ERRFLAG 0x966C
+#define B_AX_LA_ISR_DATA_LOSS_ERR BIT(16)
+#define B_AX_LA_IMR_DATA_LOSS_ERR BIT(0)
#define R_AX_WD_BUF_REQ 0x9800
#define R_AX_PL_BUF_REQ 0x9820
@@ -521,18 +1482,51 @@
#define R_AX_PL_CPUQ_OP_STATUS 0x983C
#define B_AX_WD_CPUQ_OP_STAT_DONE BIT(31)
#define B_AX_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0)
+
#define R_AX_CPUIO_ERR_IMR 0x9840
+#define B_AX_PLEQUE_OP_ERR_INT_EN BIT(12)
+#define B_AX_PLEBUF_OP_ERR_INT_EN BIT(8)
+#define B_AX_WDEQUE_OP_ERR_INT_EN BIT(4)
+#define B_AX_WDEBUF_OP_ERR_INT_EN BIT(0)
+#define B_AX_CPUIO_IMR_CLR (B_AX_WDEBUF_OP_ERR_INT_EN | \
+ B_AX_WDEQUE_OP_ERR_INT_EN | \
+ B_AX_PLEBUF_OP_ERR_INT_EN | \
+ B_AX_PLEQUE_OP_ERR_INT_EN)
+#define B_AX_CPUIO_IMR_SET (B_AX_WDEBUF_OP_ERR_INT_EN | \
+ B_AX_WDEQUE_OP_ERR_INT_EN | \
+ B_AX_PLEBUF_OP_ERR_INT_EN | \
+ B_AX_PLEQUE_OP_ERR_INT_EN)
+
#define R_AX_CPUIO_ERR_ISR 0x9844
#define R_AX_SEC_ERR_IMR_ISR 0x991C
#define R_AX_PKTIN_SETTING 0x9A00
#define B_AX_WD_ADDR_INFO_LENGTH BIT(1)
+
#define R_AX_PKTIN_ERR_IMR 0x9A20
+#define B_AX_PKTIN_GETPKTID_ERR_INT_EN BIT(0)
+
#define R_AX_PKTIN_ERR_ISR 0x9A24
#define R_AX_MPDU_TX_ERR_ISR 0x9BF0
#define R_AX_MPDU_TX_ERR_IMR 0x9BF4
+#define B_AX_TX_KSRCH_ERR_EN BIT(9)
+#define B_AX_TX_NW_TYPE_ERR_EN BIT(8)
+#define B_AX_TX_LLC_PRE_ERR_EN BIT(7)
+#define B_AX_TX_ETH_TYPE_ERR_EN BIT(6)
+#define B_AX_TX_HDR3_SIZE_ERR_INT_EN BIT(5)
+#define B_AX_TX_OFFSET_ERR_INT_EN BIT(4)
+#define B_AX_TX_MPDU_SIZE_ZERO_INT_EN BIT(3)
+#define B_AX_TX_NXT_ERRPKTID_INT_EN BIT(2)
+#define B_AX_TX_GET_ERRPKTID_INT_EN BIT(1)
+#define B_AX_MPDU_TX_IMR_SET_V1 (B_AX_TX_GET_ERRPKTID_INT_EN | \
+ B_AX_TX_NXT_ERRPKTID_INT_EN | \
+ B_AX_TX_MPDU_SIZE_ZERO_INT_EN | \
+ B_AX_TX_HDR3_SIZE_ERR_INT_EN | \
+ B_AX_TX_ETH_TYPE_ERR_EN | \
+ B_AX_TX_NW_TYPE_ERR_EN | \
+ B_AX_TX_KSRCH_ERR_EN)
#define R_AX_MPDU_PROC 0x9C00
#define B_AX_A_ICV_ERR BIT(1)
@@ -554,6 +1548,10 @@
#define R_AX_MPDU_RX_ERR_ISR 0x9CF0
#define R_AX_MPDU_RX_ERR_IMR 0x9CF4
+#define B_AX_RPT_ERR_INT_EN BIT(3)
+#define B_AX_MHDRLEN_ERR_INT_EN BIT(1)
+#define B_AX_GETPKTID_ERR_INT_EN BIT(0)
+#define B_AX_MPDU_RX_IMR_SET_V1 B_AX_RPT_ERR_INT_EN
#define R_AX_SEC_ENG_CTRL 0x9D00
#define B_AX_TX_PARTIAL_MODE BIT(11)
@@ -574,17 +1572,37 @@
#define R_AX_SEC_CAM_ACCESS 0x9D10
#define R_AX_SEC_CAM_RDATA 0x9D14
#define R_AX_SEC_CAM_WDATA 0x9D18
+
#define R_AX_SEC_DEBUG 0x9D1C
+#define B_AX_IMR_ERROR BIT(3)
+
+#define R_AX_SEC_DEBUG1 0x9D1C
+#define B_AX_TX_TIMEOUT_SEL_MASK GENMASK(31, 30)
+#define AX_TX_TO_VAL 0x2
+
#define R_AX_SEC_TX_DEBUG 0x9D20
#define R_AX_SEC_RX_DEBUG 0x9D24
#define R_AX_SEC_TRX_PKT_CNT 0x9D28
#define R_AX_SEC_TRX_BLK_CNT 0x9D2C
+#define R_AX_SEC_ERROR_FLAG_IMR 0x9D2C
+#define B_AX_RX_HANG_IMR BIT(1)
+#define B_AX_TX_HANG_IMR BIT(0)
+
#define R_AX_SS_CTRL 0x9E10
#define B_AX_SS_INIT_DONE_1 BIT(31)
#define B_AX_SS_WARM_INIT_FLG BIT(29)
+#define B_AX_SS_NONEMPTY_SS2FINFO_EN BIT(28)
#define B_AX_SS_EN BIT(0)
+#define R_AX_SS2FINFO_PATH 0x9E50
+#define B_AX_SS_UL_REL BIT(31)
+#define B_AX_SS_REL_QUEUE_MASK GENMASK(29, 24)
+#define B_AX_SS_REL_PORT_MASK GENMASK(18, 16)
+#define B_AX_SS_DEST_QUEUE_MASK GENMASK(13, 8)
+#define SS2F_PATH_WLCPU 0x0A
+#define B_AX_SS_DEST_PORT_MASK GENMASK(2, 0)
+
#define R_AX_SS_MACID_PAUSE_0 0x9EB0
#define B_AX_SS_MACID31_0_PAUSE_SH 0
#define B_AX_SS_MACID31_0_PAUSE_MASK GENMASK(31, 0)
@@ -602,9 +1620,47 @@
#define B_AX_SS_MACID127_96_PAUSE_MASK GENMASK(31, 0)
#define R_AX_STA_SCHEDULER_ERR_IMR 0x9EF0
+#define B_AX_PLE_B_PKTID_ERR_INT_EN BIT(2)
+#define B_AX_RPT_HANG_TIMEOUT_INT_EN BIT(1)
+#define B_AX_SEARCH_HANG_TIMEOUT_INT_EN BIT(0)
+#define B_AX_STA_SCHEDULER_IMR_SET (B_AX_SEARCH_HANG_TIMEOUT_INT_EN | \
+ B_AX_RPT_HANG_TIMEOUT_INT_EN | \
+ B_AX_PLE_B_PKTID_ERR_INT_EN)
+
#define R_AX_STA_SCHEDULER_ERR_ISR 0x9EF4
#define R_AX_TXPKTCTL_ERR_IMR_ISR 0x9F1C
+#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR BIT(25)
+#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR BIT(24)
+#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR BIT(19)
+#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR BIT(18)
+#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR BIT(17)
+#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR BIT(16)
+#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9)
+#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN BIT(8)
+#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3)
+#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN BIT(2)
+#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN BIT(1)
+#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN BIT(0)
+#define B_AX_TXPKTCTL_IMR_B0_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B1_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B0_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN)
+#define B_AX_TXPKTCTL_IMR_B1_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN)
+
#define R_AX_TXPKTCTL_ERR_IMR_ISR_B1 0x9F2C
#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9)
#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3)
@@ -618,6 +1674,102 @@
#define R_AX_DBG_FUN_INTF_DATA 0x9F34
#define B_AX_DFI_DATA_MASK GENMASK(31, 0)
+#define R_AX_TXPKTCTL_B0_PRELD_CFG0 0x9F48
+#define B_AX_B0_PRELD_FEN BIT(31)
+#define B_AX_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B0_ENT_NUM 10
+#define PRELD_AMSDU_SIZE 52
+#define B_AX_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
+#define B_AX_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define PRELD_NEXT_WND 1
+#define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_AX_TXPKTCTL_B0_ERRFLAG_IMR 0x9F78
+#define B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B0_IMR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B0_IMR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B0_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B0_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B0_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B0_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_AX_TXPKTCTL_IMR_B0_CLR_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD | \
+ B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN | \
+ B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_AX_TXPKTCTL_IMR_B0_SET_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
+#define B_AX_B1_PRELD_FEN BIT(31)
+#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B1_ENT_NUM 4
+#define B_AX_B1_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B1_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG1 0x9F8C
+#define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_AX_TXPKTCTL_B1_ERRFLAG_IMR 0x9FB8
+#define B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG BIT(21)
+#define B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR BIT(20)
+#define B_AX_B1_IMR_ERR_MPDUIF_DATAERR BIT(18)
+#define B_AX_B1_IMR_ERR_MPDUINFO_RECFG BIT(16)
+#define B_AX_B1_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_AX_B1_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN BIT(3)
+#define B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD BIT(2)
+#define B_AX_B1_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_AX_B1_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_AX_TXPKTCTL_IMR_B1_CLR_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD | \
+ B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN | \
+ B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_AX_TXPKTCTL_IMR_B1_SET_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \
+ B_AX_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \
+ B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \
+ B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG)
+
#define R_AX_AFE_CTRL1 0x0024
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
@@ -662,6 +1814,10 @@
#define R_AX_WMAC_RFMOD 0xC010
#define R_AX_WMAC_RFMOD_C1 0xE010
#define B_AX_WMAC_RFMOD_MASK GENMASK(1, 0)
+#define AX_WMAC_RFMOD_20M 0
+#define AX_WMAC_RFMOD_40M 1
+#define AX_WMAC_RFMOD_80M 2
+#define AX_WMAC_RFMOD_160M 3
#define R_AX_GID_POSITION0 0xC070
#define R_AX_GID_POSITION0_C1 0xE070
@@ -682,6 +1838,27 @@
#define B_AX_TXSC_40M_MASK GENMASK(7, 4)
#define B_AX_TXSC_20M_MASK GENMASK(3, 0)
+#define R_AX_PTCL_RRSR1 0xC090
+#define R_AX_PTCL_RRSR1_C1 0xE090
+#define B_AX_RRSR_RATE_EN_MASK GENMASK(11, 8)
+#define RRSR_OFDM_CCK_EN 3
+#define B_AX_RSC_MASK GENMASK(7, 6)
+#define B_AX_RRSR_CCK_MASK GENMASK(3, 0)
+
+#define R_AX_CMAC_ERR_IMR 0xC160
+#define R_AX_CMAC_ERR_IMR_C1 0xE160
+#define B_AX_WMAC_TX_ERR_IND_EN BIT(7)
+#define B_AX_WMAC_RX_ERR_IND_EN BIT(6)
+#define B_AX_TXPWR_CTRL_ERR_IND_EN BIT(5)
+#define B_AX_PHYINTF_ERR_IND_EN BIT(4)
+#define B_AX_DMA_TOP_ERR_IND_EN BIT(3)
+#define B_AX_PTCL_TOP_ERR_IND_EN BIT(1)
+#define B_AX_SCHEDULE_TOP_ERR_IND_EN BIT(0)
+#define CMAC0_ERR_IMR_EN GENMASK(31, 0)
+#define CMAC1_ERR_IMR_EN GENMASK(31, 0)
+#define CMAC0_ERR_IMR_DIS 0
+#define CMAC1_ERR_IMR_DIS 0
+
#define R_AX_CMAC_ERR_ISR 0xC164
#define R_AX_CMAC_ERR_ISR_C1 0xE164
#define B_AX_WMAC_TX_ERR_IND BIT(7)
@@ -717,6 +1894,15 @@
#define R_AX_PREBKF_CFG_0_C1 0xE338
#define B_AX_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_AX_PREBKF_CFG_1 0xC33C
+#define R_AX_PREBKF_CFG_1_C1 0xE33C
+#define B_AX_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(30, 24)
+#define B_AX_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_AX_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_AX_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+#define SIFS_MACTXEN_T1 0x47
+#define SIFS_MACTXEN_T1_V1 0x41
+
#define R_AX_CCA_CFG_0 0xC340
#define R_AX_CCA_CFG_0_C1 0xE340
#define B_AX_BTCCA_BRK_TXOP_EN BIT(9)
@@ -745,6 +1931,7 @@
#define B_AX_CTN_TXEN_VI_0 BIT(2)
#define B_AX_CTN_TXEN_BK_0 BIT(1)
#define B_AX_CTN_TXEN_BE_0 BIT(0)
+#define B_AX_CTN_TXEN_ALL_MASK GENMASK(15, 0)
#define R_AX_MUEDCA_BE_PARAM_0 0xC350
#define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350
@@ -791,10 +1978,15 @@
#define B_AX_CTN_CHK_CCA_S20 BIT(1)
#define B_AX_CTN_CHK_CCA_P20 BIT(0)
+#define R_AX_CTN_DRV_TXEN 0xC398
+#define R_AX_CTN_DRV_TXEN_C1 0xE398
+#define B_AX_CTN_TXEN_TWT_3 BIT(17)
+#define B_AX_CTN_TXEN_TWT_2 BIT(16)
+#define B_AX_CTN_TXEN_ALL_MASK_V1 GENMASK(17, 0)
+
#define R_AX_SCHEDULE_ERR_IMR 0xC3E8
#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
-#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
#define R_AX_SCHEDULE_ERR_ISR 0xC3EC
#define R_AX_SCHEDULE_ERR_ISR_C1 0xE3EC
@@ -809,6 +2001,10 @@
#define R_AX_SCH_DBG_C1 0xE3F8
#define B_AX_SCHEDULER_DBG_MASK GENMASK(31, 0)
+#define R_AX_SCH_EXT_CTRL 0xC3FC
+#define R_AX_SCH_EXT_CTRL_C1 0xE3FC
+#define B_AX_PORT_RST_TSF_ADV BIT(1)
+
#define R_AX_PORT_CFG_P0 0xC400
#define R_AX_PORT_CFG_P1 0xC440
#define R_AX_PORT_CFG_P2 0xC480
@@ -913,7 +2109,7 @@
#define R_AX_DTIM_CTRL_P2 0xC4A6
#define R_AX_DTIM_CTRL_P3 0xC4E6
#define R_AX_DTIM_CTRL_P4 0xC526
-#define B_AX_DTIM_NUM_MASK GENMASK(15, 0)
+#define B_AX_DTIM_NUM_MASK GENMASK(15, 8)
#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0)
#define R_AX_TBTT_SHIFT_P0 0xC428
@@ -922,6 +2118,8 @@
#define R_AX_TBTT_SHIFT_P3 0xC4E8
#define R_AX_TBTT_SHIFT_P4 0xC528
#define B_AX_TBTT_SHIFT_OFST_MASK GENMASK(11, 0)
+#define B_AX_TBTT_SHIFT_OFST_SIGN BIT(11)
+#define B_AX_TBTT_SHIFT_OFST_MAG GENMASK(10, 0)
#define R_AX_BCN_CNT_TMR_P0 0xC434
#define R_AX_BCN_CNT_TMR_P1 0xC474
@@ -964,6 +2162,23 @@
#define B_AX_P0MB2_EN BIT(2)
#define B_AX_P0MB1_EN BIT(1)
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0 0xC590
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0_C1 0xE590
+#define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0
+#define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0
+
+#define R_AX_PTCL_COMMON_SETTING_0 0xC600
+#define R_AX_PTCL_COMMON_SETTING_0_C1 0xE600
+#define B_AX_PCIE_MODE_MASK GENMASK(15, 14)
+#define B_AX_CPUMGQ_LIFETIME_EN BIT(8)
+#define B_AX_MGQ_LIFETIME_EN BIT(7)
+#define B_AX_LIFETIME_EN BIT(6)
+#define B_AX_PTCL_TRIGGER_SS_EN_UL BIT(4)
+#define B_AX_PTCL_TRIGGER_SS_EN_1 BIT(3)
+#define B_AX_PTCL_TRIGGER_SS_EN_0 BIT(2)
+#define B_AX_CMAC_TX_MODE_1 BIT(1)
+#define B_AX_CMAC_TX_MODE_0 BIT(0)
+
#define R_AX_AMPDU_AGG_LIMIT 0xC610
#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
@@ -1008,6 +2223,18 @@
#define B_AX_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_AX_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_AX_PTCLRPT_FULL_HDL 0xC660
+#define R_AX_PTCLRPT_FULL_HDL_C1 0xE660
+#define B_AX_RPT_LATCH_PHY_TIME_MASK GENMASK(15, 12)
+#define B_AX_F2PCMD_FWWD_RLS_MODE BIT(9)
+#define B_AX_F2PCMD_RPT_EN BIT(8)
+#define B_AX_BCN_RPT_PATH_MASK GENMASK(7, 6)
+#define B_AX_SPE_RPT_PATH_MASK GENMASK(5, 4)
+#define FWD_TO_WLCPU 1
+#define B_AX_TX_RPT_PATH_MASK GENMASK(3, 2)
+#define B_AX_F2PCMDRPT_FULL_DROP BIT(1)
+#define B_AX_NON_F2PCMDRPT_FULL_DROP BIT(0)
+
#define R_AX_BT_PLT 0xC67C
#define R_AX_BT_PLT_C1 0xE67C
#define B_AX_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
@@ -1035,8 +2262,49 @@
#define R_AX_PTCL_IMR0 0xC6C0
#define R_AX_PTCL_IMR0_C1 0xE6C0
+#define B_AX_F2PCMD_PKTID_ERR_INT_EN BIT(31)
+#define B_AX_F2PCMD_RD_PKTID_ERR_INT_EN BIT(30)
+#define B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN BIT(29)
#define B_AX_F2PCMD_USER_ALLC_ERR_INT_EN BIT(28)
+#define B_AX_RX_SPF_U0_PKTID_ERR_INT_EN BIT(27)
+#define B_AX_TX_SPF_U1_PKTID_ERR_INT_EN BIT(26)
+#define B_AX_TX_SPF_U2_PKTID_ERR_INT_EN BIT(25)
+#define B_AX_TX_SPF_U3_PKTID_ERR_INT_EN BIT(24)
#define B_AX_TX_RECORD_PKTID_ERR_INT_EN BIT(23)
+#define B_AX_F2PCMD_EMPTY_ERR_INT_EN BIT(15)
+#define B_AX_TWTSP_QSEL_ERR_INT_EN BIT(14)
+#define B_AX_BCNQ_ORDER_ERR_INT_EN BIT(12)
+#define B_AX_Q_PKTID_ERR_INT_EN BIT(11)
+#define B_AX_D_PKTID_ERR_INT_EN BIT(10)
+#define B_AX_TXPRT_FULL_DROP_ERR_INT_EN BIT(9)
+#define B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN BIT(8)
+#define B_AX_FSM1_TIMEOUT_ERR_INT_EN BIT(1)
+#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_AX_PTCL_IMR_CLR_ALL GENMASK(31, 0)
+#define B_AX_PTCL_IMR_CLR (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
+ B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN | \
+ B_AX_TXPRT_FULL_DROP_ERR_INT_EN | \
+ B_AX_D_PKTID_ERR_INT_EN | \
+ B_AX_Q_PKTID_ERR_INT_EN | \
+ B_AX_BCNQ_ORDER_ERR_INT_EN | \
+ B_AX_TWTSP_QSEL_ERR_INT_EN | \
+ B_AX_F2PCMD_EMPTY_ERR_INT_EN | \
+ B_AX_TX_RECORD_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U3_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U2_PKTID_ERR_INT_EN | \
+ B_AX_TX_SPF_U1_PKTID_ERR_INT_EN | \
+ B_AX_RX_SPF_U0_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | \
+ B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_RD_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_PKTID_ERR_INT_EN)
+#define B_AX_PTCL_IMR_SET (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
+ B_AX_TX_RECORD_PKTID_ERR_INT_EN | \
+ B_AX_F2PCMD_USER_ALLC_ERR_INT_EN)
+#define B_AX_PTCL_IMR_CLR_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_AX_FSM_TIMEOUT_ERR_INT_EN)
+#define B_AX_PTCL_IMR_SET_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_AX_FSM_TIMEOUT_ERR_INT_EN)
#define R_AX_PTCL_ISR0 0xC6C4
#define R_AX_PTCL_ISR0_C1 0xE6C4
@@ -1063,10 +2331,182 @@
#define R_AX_DLE_CTRL_C1 0xE800
#define B_AX_NO_RESERVE_PAGE_ERR_IMR BIT(23)
#define B_AX_RXDATA_FSM_HANG_ERROR_IMR BIT(15)
+#define B_AX_RXSTS_FSM_HANG_ERROR_IMR BIT(14)
+#define B_AX_DLE_IMR_CLR (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_AX_RXDATA_FSM_HANG_ERROR_IMR | \
+ B_AX_NO_RESERVE_PAGE_ERR_IMR)
+#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_AX_RXDATA_FSM_HANG_ERROR_IMR)
+
+#define R_AX_RXDMA_CTRL_0 0xC804
+#define R_AX_RXDMA_CTRL_0_C1 0xE804
+#define B_AX_RXDMA_DBGOUT_EN BIT(31)
+#define B_AX_RXDMA_DBG_SEL_MASK GENMASK(30, 29)
+#define B_AX_RXDMA_FIFO_DBG_SEL_MASK GENMASK(28, 25)
+#define B_AX_RXDMA_DEFAULT_PAGE_MASK GENMASK(22, 21)
+#define B_AX_RXDMA_BUFF_REQ_PRI_MASK GENMASK(20, 19)
+#define B_AX_RXDMA_TGT_QUEID_MASK GENMASK(18, 13)
+#define B_AX_RXDMA_TGT_PRID_MASK GENMASK(12, 10)
+#define B_AX_RXDMA_DIS_CSI_RELEASE BIT(9)
+#define B_AX_RXDMA_DIS_RXSTS_WAIT_PTR_CLR BIT(7)
+#define B_AX_RXDMA_DIS_CSI_WAIT_PTR_CLR BIT(6)
+#define B_AX_RXSTS_PTR_FULL_MODE BIT(5)
+#define B_AX_CSI_PTR_FULL_MODE BIT(4)
+#define B_AX_RU3_PTR_FULL_MODE BIT(3)
+#define B_AX_RU2_PTR_FULL_MODE BIT(2)
+#define B_AX_RU1_PTR_FULL_MODE BIT(1)
+#define B_AX_RU0_PTR_FULL_MODE BIT(0)
+#define RX_FULL_MODE (B_AX_RU0_PTR_FULL_MODE | B_AX_RU1_PTR_FULL_MODE | \
+ B_AX_RU2_PTR_FULL_MODE | B_AX_RU3_PTR_FULL_MODE | \
+ B_AX_CSI_PTR_FULL_MODE | B_AX_RXSTS_PTR_FULL_MODE)
+
#define R_AX_RXDMA_PKT_INFO_0 0xC814
#define R_AX_RXDMA_PKT_INFO_1 0xC818
#define R_AX_RXDMA_PKT_INFO_2 0xC81C
+#define R_AX_RX_ERR_FLAG_IMR 0xC804
+#define R_AX_RX_ERR_FLAG_IMR_C1 0xE804
+#define B_AX_RX_GET_NULL_PKT_ERR_MSK BIT(30)
+#define B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK BIT(29)
+#define B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK BIT(28)
+#define B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK BIT(27)
+#define B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK BIT(26)
+#define B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK BIT(25)
+#define B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK BIT(24)
+#define B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK BIT(23)
+#define B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK BIT(22)
+#define B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK BIT(21)
+#define B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK BIT(20)
+#define B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK BIT(19)
+#define B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK BIT(18)
+#define B_AX_RX_RU0_ZERO_LEN_ERR_MSK BIT(17)
+#define B_AX_RX_RU1_ZERO_LEN_ERR_MSK BIT(16)
+#define B_AX_RX_RU2_ZERO_LEN_ERR_MSK BIT(15)
+#define B_AX_RX_RU3_ZERO_LEN_ERR_MSK BIT(14)
+#define B_AX_RX_RU4_ZERO_LEN_ERR_MSK BIT(13)
+#define B_AX_RX_RU5_ZERO_LEN_ERR_MSK BIT(12)
+#define B_AX_RX_RU6_ZERO_LEN_ERR_MSK BIT(11)
+#define B_AX_RX_RU7_ZERO_LEN_ERR_MSK BIT(10)
+#define B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK BIT(9)
+#define B_AX_RX_CSI_ZERO_LEN_ERR_MSK BIT(8)
+#define B_AX_PLE_DATA_OPT_FSM_HANG_MSK BIT(7)
+#define B_AX_PLE_RXDATA_REQ_BUF_FSM_HANG_MSK BIT(6)
+#define B_AX_PLE_TXRPT_REQ_BUF_FSM_HANG_MSK BIT(5)
+#define B_AX_PLE_WD_OPT_FSM_HANG_MSK BIT(4)
+#define B_AX_PLE_ENQ_FSM_HANG_MSK BIT(3)
+#define B_AX_RXDATA_ENQUE_ORDER_ERR_MSK BIT(2)
+#define B_AX_RXSTS_ENQUE_ORDER_ERR_MSK BIT(1)
+#define B_AX_RX_CSI_PKT_NUM_ERR_MSK BIT(0)
+#define B_AX_RX_ERR_IMR_CLR_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_GET_NULL_PKT_ERR_MSK)
+#define B_AX_RX_ERR_IMR_SET_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \
+ B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \
+ B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \
+ B_AX_RX_GET_NULL_PKT_ERR_MSK)
+
+#define R_AX_TX_ERR_FLAG_IMR 0xC870
+#define R_AX_TX_ERR_FLAG_IMR_C1 0xE870
+#define B_AX_TX_RU0_FSM_HANG_ERR_MSK BIT(31)
+#define B_AX_TX_RU1_FSM_HANG_ERR_MSK BIT(30)
+#define B_AX_TX_RU2_FSM_HANG_ERR_MSK BIT(29)
+#define B_AX_TX_RU3_FSM_HANG_ERR_MSK BIT(28)
+#define B_AX_TX_RU4_FSM_HANG_ERR_MSK BIT(27)
+#define B_AX_TX_RU5_FSM_HANG_ERR_MSK BIT(26)
+#define B_AX_TX_RU6_FSM_HANG_ERR_MSK BIT(25)
+#define B_AX_TX_RU7_FSM_HANG_ERR_MSK BIT(24)
+#define B_AX_TX_RU8_FSM_HANG_ERR_MSK BIT(23)
+#define B_AX_TX_RU9_FSM_HANG_ERR_MSK BIT(22)
+#define B_AX_TX_RU10_FSM_HANG_ERR_MSK BIT(21)
+#define B_AX_TX_RU11_FSM_HANG_ERR_MSK BIT(20)
+#define B_AX_TX_RU12_FSM_HANG_ERR_MSK BIT(19)
+#define B_AX_TX_RU13_FSM_HANG_ERR_MSK BIT(18)
+#define B_AX_TX_RU14_FSM_HANG_ERR_MSK BIT(17)
+#define B_AX_TX_RU15_FSM_HANG_ERR_MSK BIT(16)
+#define B_AX_TX_CSI_FSM_HANG_ERR_MSK BIT(15)
+#define B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK BIT(14)
+#define B_AX_TX_ERR_IMR_CLR_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \
+ B_AX_TX_CSI_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU7_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU6_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU5_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU4_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU3_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU2_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU1_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU0_FSM_HANG_ERR_MSK)
+#define B_AX_TX_ERR_IMR_SET_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \
+ B_AX_TX_CSI_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU7_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU6_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU5_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU4_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU3_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU2_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU1_FSM_HANG_ERR_MSK | \
+ B_AX_TX_RU0_FSM_HANG_ERR_MSK)
+
+#define R_AX_TCR0 0xCA00
+#define R_AX_TCR0_C1 0xEA00
+#define B_AX_TCR_ZLD_NUM_MASK GENMASK(31, 24)
+#define B_AX_TCR_UDF_EN BIT(23)
+#define B_AX_TCR_UDF_THSD_MASK GENMASK(22, 16)
+#define TCR_UDF_THSD 0x6
+#define B_AX_TCR_ERRSTEN_MASK GENMASK(15, 10)
+#define B_AX_TCR_VHTSIGA1_TXPS BIT(9)
+#define B_AX_TCR_PLCP_ERRHDL_EN BIT(8)
+#define B_AX_TCR_PADSEL BIT(7)
+#define B_AX_TCR_MASK_SIGBCRC BIT(6)
+#define B_AX_TCR_SR_VAL15_ALLOW BIT(5)
+#define B_AX_TCR_EN_EOF BIT(4)
+#define B_AX_TCR_EN_SCRAM_INC BIT(3)
+#define B_AX_TCR_EN_20MST BIT(2)
+#define B_AX_TCR_CRC BIT(1)
+#define B_AX_TCR_DISGCLK BIT(0)
+
#define R_AX_TCR1 0xCA04
#define R_AX_TCR1_C1 0xEA04
#define B_AX_TXDFIFO_THRESHOLD GENMASK(31, 28)
@@ -1080,9 +2520,27 @@
#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8)
#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0)
+#define R_AX_MD_TSFT_STMP_CTL 0xCA08
+#define R_AX_MD_TSFT_STMP_CTL_C1 0xEA08
+#define B_AX_TSFT_OFS_MASK GENMASK(31, 16)
+#define B_AX_STMP_THSD_MASK GENMASK(15, 8)
+#define B_AX_UPD_HGQMD BIT(1)
+#define B_AX_UPD_TIMIE BIT(0)
+
#define R_AX_PPWRBIT_SETTING 0xCA0C
#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
+#define R_AX_TXD_FIFO_CTRL 0xCA1C
+#define R_AX_TXD_FIFO_CTRL_C1 0xEA1C
+#define B_AX_NON_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(28, 24)
+#define B_AX_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(20, 16)
+#define B_AX_TXDFIFO_HIGH_MCS_THRE_MASK GENMASK(15, 12)
+#define TXDFIFO_HIGH_MCS_THRE 0x7
+#define B_AX_TXDFIFO_LOW_MCS_THRE_MASK GENMASK(11, 8)
+#define TXDFIFO_LOW_MCS_THRE 0x7
+#define B_AX_HIGH_MCS_PHY_RATE_MASK GENMASK(7, 4)
+#define B_AX_BW_PHY_RATE_MASK GENMASK(1, 0)
+
#define R_AX_MACTX_DBG_SEL_CNT 0xCA20
#define R_AX_MACTX_DBG_SEL_CNT_C1 0xEA20
#define B_AX_MACTX_MPDU_CNT GENMASK(31, 24)
@@ -1140,10 +2598,35 @@
#define WMAC_SPEC_SIFS_OFDM_52C 0x11
#define WMAC_SPEC_SIFS_CCK 0xA
+#define R_AX_TRXPTCL_RRSR_CTL_0 0xCC08
+#define R_AX_TRXPTCL_RRSR_CTL_0_C1 0xEC08
+#define B_AX_RESP_TX_MACID_CCA_TH_EN BIT(31)
+#define B_AX_RESP_TX_PWRMODE_MASK GENMASK(30, 28)
+#define B_AX_FTM_RRSR_RATE_EN_MASK GENMASK(27, 24)
+#define B_AX_NESS_MASK GENMASK(23, 22)
+#define B_AX_WMAC_RESP_DOPPLEB_AX_EN BIT(21)
+#define B_AX_WMAC_RESP_DCM_EN BIT(20)
+#define B_AX_WMAC_RRSB_AX_CCK_MASK GENMASK(19, 16)
+#define B_AX_WMAC_RESP_RATE_EN_MASK GENMASK(15, 12)
+#define B_AX_WMAC_RESP_RSC_MASK GENMASK(11, 10)
+#define B_AX_WMAC_RESP_REF_RATE_SEL BIT(9)
+#define B_AX_WMAC_RESP_REF_RATE_MASK GENMASK(8, 0)
+
#define R_AX_MAC_LOOPBACK 0xCC20
#define R_AX_MAC_LOOPBACK_C1 0xEC20
#define B_AX_MACLBK_EN BIT(0)
+#define R_AX_WMAC_NAV_CTL 0xCC80
+#define R_AX_WMAC_NAV_CTL_C1 0xEC80
+#define B_AX_WMAC_NAV_UPPER_EN BIT(26)
+#define B_AX_WMAC_0P125US_TIMER_MASK GENMASK(25, 18)
+#define B_AX_WMAC_PLCP_UP_NAV_EN BIT(17)
+#define B_AX_WMAC_TF_UP_NAV_EN BIT(16)
+#define B_AX_WMAC_NAV_UPPER_MASK GENMASK(15, 8)
+#define NAV_12MS 0xBC
+#define NAV_25MS 0xC4
+#define B_AX_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0)
+
#define R_AX_RXTRIG_TEST_USER_2 0xCCB0
#define R_AX_RXTRIG_TEST_USER_2_C1 0xECB0
#define B_AX_RXTRIG_MACID_MASK GENMASK(31, 24)
@@ -1153,6 +2636,37 @@
#define B_AX_RXTRIG_EN BIT(16)
#define B_AX_RXTRIG_USERINFO_2_MASK GENMASK(15, 0)
+#define R_AX_TRXPTCL_ERROR_INDICA_MASK 0xCCBC
+#define R_AX_TRXPTCL_ERROR_INDICA_MASK_C1 0xECBC
+#define B_AX_WMAC_MODE BIT(22)
+#define B_AX_WMAC_TIMETOUT_THR_MASK GENMASK(21, 16)
+#define B_AX_RMAC_FTM BIT(8)
+#define B_AX_RMAC_CSI BIT(7)
+#define B_AX_TMAC_MIMO_CTRL BIT(6)
+#define B_AX_TMAC_RXTB BIT(5)
+#define B_AX_TMAC_HWSIGB_GEN BIT(4)
+#define B_AX_TMAC_TXPLCP BIT(3)
+#define B_AX_TMAC_RESP BIT(2)
+#define B_AX_TMAC_TXCTL BIT(1)
+#define B_AX_TMAC_MACTX BIT(0)
+#define B_AX_TMAC_IMR_CLR_V1 (B_AX_TMAC_MACTX | \
+ B_AX_TMAC_TXCTL | \
+ B_AX_TMAC_RESP | \
+ B_AX_TMAC_TXPLCP | \
+ B_AX_TMAC_HWSIGB_GEN | \
+ B_AX_TMAC_RXTB | \
+ B_AX_TMAC_MIMO_CTRL | \
+ B_AX_RMAC_CSI | \
+ B_AX_RMAC_FTM)
+#define B_AX_TMAC_IMR_SET_V1 (B_AX_TMAC_MACTX | \
+ B_AX_TMAC_TXCTL | \
+ B_AX_TMAC_RESP | \
+ B_AX_TMAC_TXPLCP | \
+ B_AX_TMAC_HWSIGB_GEN | \
+ B_AX_TMAC_RXTB | \
+ B_AX_TMAC_MIMO_CTRL | \
+ B_AX_RMAC_FTM)
+
#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0
#define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0
#define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0)
@@ -1167,11 +2681,55 @@
#define R_AX_TMAC_ERR_IMR_ISR 0xCCEC
#define R_AX_TMAC_ERR_IMR_ISR_C1 0xECEC
+#define B_AX_TMAC_TXPLCP_ERR_CLR BIT(19)
+#define B_AX_TMAC_RESP_ERR_CLR BIT(18)
+#define B_AX_TMAC_TXCTL_ERR_CLR BIT(17)
+#define B_AX_TMAC_MACTX_ERR_CLR BIT(16)
+#define B_AX_TMAC_TXPLCP_ERR BIT(14)
+#define B_AX_TMAC_RESP_ERR BIT(13)
+#define B_AX_TMAC_TXCTL_ERR BIT(12)
+#define B_AX_TMAC_MACTX_ERR BIT(11)
+#define B_AX_TMAC_TXPLCP_INT_EN BIT(10)
+#define B_AX_TMAC_RESP_INT_EN BIT(9)
+#define B_AX_TMAC_TXCTL_INT_EN BIT(8)
+#define B_AX_TMAC_MACTX_INT_EN BIT(7)
+#define B_AX_WMAC_INT_MODE BIT(6)
+#define B_AX_TMAC_TIMETOUT_THR_MASK GENMASK(5, 0)
+#define B_AX_TMAC_IMR_CLR (B_AX_TMAC_MACTX_INT_EN | \
+ B_AX_TMAC_TXCTL_INT_EN | \
+ B_AX_TMAC_RESP_INT_EN | \
+ B_AX_TMAC_TXPLCP_INT_EN)
+#define B_AX_TMAC_IMR_SET (B_AX_TMAC_MACTX_INT_EN | \
+ B_AX_TMAC_TXCTL_INT_EN | \
+ B_AX_TMAC_RESP_INT_EN | \
+ B_AX_TMAC_TXPLCP_INT_EN)
#define R_AX_DBGSEL_TRXPTCL 0xCCF4
#define R_AX_DBGSEL_TRXPTCL_C1 0xECF4
#define B_AX_DBGSEL_TRXPTCL_MASK GENMASK(7, 0)
+#define R_AX_PHYINFO_ERR_IMR_V1 0xCCF8
+#define R_AX_PHYINFO_ERR_IMR_V1_C1 0xECF8
+#define B_AX_PHYINTF_TIMEOUT_THR_MSAK_V1 GENMASK(21, 16)
+#define B_AX_CSI_ON_TIMEOUT_EN BIT(5)
+#define B_AX_STS_ON_TIMEOUT_EN BIT(4)
+#define B_AX_DATA_ON_TIMEOUT_EN BIT(3)
+#define B_AX_OFDM_CCA_TIMEOUT_EN BIT(2)
+#define B_AX_CCK_CCA_TIMEOUT_EN BIT(1)
+#define B_AX_PHY_TXON_TIMEOUT_EN BIT(0)
+#define B_AX_PHYINFO_IMR_CLR_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_EN | \
+ B_AX_DATA_ON_TIMEOUT_EN | \
+ B_AX_STS_ON_TIMEOUT_EN | \
+ B_AX_CSI_ON_TIMEOUT_EN)
+#define B_AX_PHYINFO_IMR_SET_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_EN | \
+ B_AX_DATA_ON_TIMEOUT_EN | \
+ B_AX_STS_ON_TIMEOUT_EN | \
+ B_AX_CSI_ON_TIMEOUT_EN)
+
#define R_AX_PHYINFO_ERR_IMR 0xCCFC
#define R_AX_PHYINFO_ERR_IMR_C1 0xECFC
#define B_AX_CSI_ON_TIMEOUT BIT(29)
@@ -1187,6 +2745,12 @@
#define B_AX_CCK_CCA_TIMEOUT_INT_EN BIT(17)
#define B_AX_PHY_TXON_TIMEOUT_INT_EN BIT(16)
#define B_AX_PHYINTF_TIMEOUT_THR_MSAK GENMASK(5, 0)
+#define B_AX_PHYINFO_IMR_EN_ALL (B_AX_PHY_TXON_TIMEOUT_INT_EN | \
+ B_AX_CCK_CCA_TIMEOUT_INT_EN | \
+ B_AX_OFDM_CCA_TIMEOUT_INT_EN | \
+ B_AX_DATA_ON_TIMEOUT_INT_EN | \
+ B_AX_STS_ON_TIMEOUT_INT_EN | \
+ B_AX_CSI_ON_TIMEOUT_INT_EN)
#define R_AX_PHYINFO_ERR_ISR 0xCCFC
#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC
@@ -1320,6 +2884,8 @@
#define R_AX_RESPBA_CAM_CTRL 0xCE3C
#define R_AX_RESPBA_CAM_CTRL_C1 0xEE3C
#define B_AX_SSN_SEL BIT(2)
+#define B_AX_BACAM_RST_MASK GENMASK(1, 0)
+#define S_AX_BACAM_RST_ALL 2
#define R_AX_PPDU_STAT 0xCE40
#define R_AX_PPDU_STAT_C1 0xEE40
@@ -1335,6 +2901,11 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_CSIRPT_OPTION 0xCE64
+#define R_AX_CSIRPT_OPTION_C1 0xEE64
+#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
+#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24)
+
#define R_AX_RX_STATE_MONITOR 0xCEF0
#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
@@ -1362,6 +2933,51 @@
#define B_AX_BMAC_DMA_TIMEOUT_FLAG BIT(2)
#define B_AX_BMAC_DATA_ON_TO_IDLE_TIMEOUT_FLAG BIT(1)
#define B_AX_BMAC_CCA_TO_IDLE_TIMEOUT_FLAG BIT(0)
+#define B_AX_RMAC_IMR_CLR (B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DMA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CCA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CSI_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN)
+#define B_AX_RMAC_IMR_SET (B_AX_RMAC_DMA_TIMEOUT_INT_EN | \
+ B_AX_RMAC_CSI_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_TIMEOUT_INT_EN | \
+ B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN)
+
+#define R_AX_RX_ERR_IMR 0xCEF8
+#define R_AX_RX_ERR_IMR_C1 0xEEF8
+#define B_AX_RX_ERR_TRIG_ACT_TO_MSK BIT(9)
+#define B_AX_RX_ERR_STS_ACT_TO_MSK BIT(8)
+#define B_AX_RX_ERR_CSI_ACT_TO_MSK BIT(7)
+#define B_AX_RX_ERR_ACT_TO_MSK BIT(6)
+#define B_AX_CSI_DATAON_ASSERT_TO_MSK BIT(5)
+#define B_AX_DATAON_ASSERT_TO_MSK BIT(4)
+#define B_AX_CCA_ASSERT_TO_MSK BIT(3)
+#define B_AX_RX_ERR_DMA_TO_MSK BIT(2)
+#define B_AX_RX_ERR_DATA_TO_MSK BIT(1)
+#define B_AX_RX_ERR_CCA_TO_MSK BIT(0)
+#define B_AX_RMAC_IMR_CLR_V1 (B_AX_RX_ERR_CCA_TO_MSK | \
+ B_AX_RX_ERR_DATA_TO_MSK | \
+ B_AX_RX_ERR_DMA_TO_MSK | \
+ B_AX_CCA_ASSERT_TO_MSK | \
+ B_AX_DATAON_ASSERT_TO_MSK | \
+ B_AX_CSI_DATAON_ASSERT_TO_MSK | \
+ B_AX_RX_ERR_ACT_TO_MSK | \
+ B_AX_RX_ERR_CSI_ACT_TO_MSK | \
+ B_AX_RX_ERR_STS_ACT_TO_MSK | \
+ B_AX_RX_ERR_TRIG_ACT_TO_MSK)
+#define B_AX_RMAC_IMR_SET_V1 (B_AX_RX_ERR_CCA_TO_MSK | \
+ B_AX_RX_ERR_DATA_TO_MSK | \
+ B_AX_RX_ERR_DMA_TO_MSK | \
+ B_AX_CCA_ASSERT_TO_MSK | \
+ B_AX_DATAON_ASSERT_TO_MSK | \
+ B_AX_CSI_DATAON_ASSERT_TO_MSK | \
+ B_AX_RX_ERR_ACT_TO_MSK | \
+ B_AX_RX_ERR_CSI_ACT_TO_MSK | \
+ B_AX_RX_ERR_STS_ACT_TO_MSK | \
+ B_AX_RX_ERR_TRIG_ACT_TO_MSK)
#define R_AX_RMAC_PLCP_MON 0xCEF8
#define R_AX_RMAC_PLCP_MON_C1 0xEEF8
@@ -1391,8 +3007,10 @@
#define B_AX_PWR_UL_TB_CTRL_EN BIT(31)
#define R_AX_PWR_UL_TB_1T 0xD28C
#define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_UL_TB_2T 0xD290
#define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_BY_RATE_TABLE0 0xD2C0
#define R_AX_PWR_BY_RATE_TABLE10 0xD2E8
#define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0
@@ -1408,22 +3026,104 @@
#define R_AX_PWR_MACID_LMT_TABLE0 0xD36C
#define R_AX_PWR_MACID_LMT_TABLE127 0xD568
+#define R_AX_PATH_COM0 0xD800
+#define AX_PATH_COM0_DFVAL 0x00000000
+#define AX_PATH_COM0_PATHA 0x08889880
+#define AX_PATH_COM0_PATHB 0x11111900
+#define AX_PATH_COM0_PATHAB 0x19999980
+#define R_AX_PATH_COM1 0xD804
+#define AX_PATH_COM1_DFVAL 0x00000000
+#define AX_PATH_COM1_PATHA 0x13111111
+#define AX_PATH_COM1_PATHB 0x23222222
+#define AX_PATH_COM1_PATHAB 0x33333333
+#define R_AX_PATH_COM2 0xD808
+#define AX_PATH_COM2_DFVAL 0x00000000
+#define AX_PATH_COM2_PATHA 0x01209313
+#define AX_PATH_COM2_PATHB 0x01209323
+#define AX_PATH_COM2_PATHAB 0x01209333
+#define R_AX_PATH_COM3 0xD80C
+#define AX_PATH_COM3_DFVAL 0x49249249
+#define R_AX_PATH_COM4 0xD810
+#define AX_PATH_COM4_DFVAL 0x1C9C9C49
+#define R_AX_PATH_COM5 0xD814
+#define AX_PATH_COM5_DFVAL 0x39393939
+#define R_AX_PATH_COM6 0xD818
+#define AX_PATH_COM6_DFVAL 0x39393939
+#define R_AX_PATH_COM7 0xD81C
+#define AX_PATH_COM7_DFVAL 0x39393939
+#define AX_PATH_COM7_PATHA 0x39393939
+#define AX_PATH_COM7_PATHB 0x39383939
+#define AX_PATH_COM7_PATHAB 0x39393939
+#define R_AX_PATH_COM8 0xD820
+#define AX_PATH_COM8_DFVAL 0x00000000
+#define AX_PATH_COM8_PATHA 0x00003939
+#define AX_PATH_COM8_PATHB 0x00003938
+#define AX_PATH_COM8_PATHAB 0x00003939
+#define R_AX_PATH_COM9 0xD824
+#define AX_PATH_COM9_DFVAL 0x000007C0
+#define R_AX_PATH_COM10 0xD828
+#define AX_PATH_COM10_DFVAL 0xE0000000
+#define R_AX_PATH_COM11 0xD82C
+#define AX_PATH_COM11_DFVAL 0x00000000
+#define R_P80_AT_HIGH_FREQ_BB_WRP 0xD848
+#define B_P80_AT_HIGH_FREQ_BB_WRP BIT(28)
+#define R_AX_TSSI_CTRL_HEAD 0xD908
+#define R_AX_BANDEDGE_CFG 0xD94C
+#define B_AX_BANDEDGE_CFG_IDX_MASK GENMASK(31, 30)
+#define R_AX_TSSI_CTRL_TAIL 0xD95C
+
#define R_AX_TXPWR_IMR 0xD9E0
#define R_AX_TXPWR_IMR_C1 0xF9E0
#define R_AX_TXPWR_ISR 0xD9E4
#define R_AX_TXPWR_ISR_C1 0xF9E4
#define R_AX_BTC_CFG 0xDA00
+#define B_AX_BTC_EN BIT(31)
+#define B_AX_EN_EXT_BT_PINMUX BIT(29)
+#define B_AX_BTC_RST BIT(28)
+#define B_AX_BTC_DBG_SRC_SEL BIT(27)
+#define B_AX_BTC_MODE_MASK GENMASK(25, 24)
+#define B_AX_INV_WL_ACT2 BIT(17)
+#define B_AX_BTG_LNA1_GAIN_SEL BIT(16)
+#define B_AX_COEX_DLY_CLK_MASK GENMASK(15, 8)
+#define B_AX_IGN_GNT_BT2_RX BIT(7)
+#define B_AX_IGN_GNT_BT2_TX BIT(6)
+#define B_AX_IGN_GNT_BT2 BIT(5)
+#define B_AX_BTC_DBG_SEL_MASK GENMASK(4, 3)
#define B_AX_DIS_BTC_CLK_G BIT(2)
+#define B_AX_GNT_WL_RX_CTRL BIT(1)
+#define B_AX_WL_SRC BIT(0)
+
+#define R_AX_RTK_MODE_CFG_V1 0xDA04
+#define R_AX_RTK_MODE_CFG_V1_C1 0xFA04
+#define B_AX_BT_BLE_EN_V1 BIT(24)
+#define B_AX_BT_ULTRA_EN BIT(16)
+#define B_AX_BT_L_RX_ULTRA_MASK GENMASK(15, 14)
+#define B_AX_BT_L_TX_ULTRA_MASK GENMASK(13, 12)
+#define B_AX_BT_H_RX_ULTRA_MASK GENMASK(11, 10)
+#define B_AX_BT_H_TX_ULTRA_MASK GENMASK(9, 8)
+#define B_AX_SAMPLE_CLK_MASK GENMASK(7, 0)
#define R_AX_WL_PRI_MSK 0xDA10
#define B_AX_PTA_WL_PRI_MASK_BCNQ BIT(8)
+#define R_AX_BT_CNT_CFG 0xDA10
+#define R_AX_BT_CNT_CFG_C1 0xFA10
+#define B_AX_BT_CNT_RST_V1 BIT(1)
+#define B_AX_BT_CNT_EN BIT(0)
+
+#define R_BTC_BT_CNT_HIGH 0xDA14
+#define R_BTC_BT_CNT_LOW 0xDA18
+
#define R_AX_BTC_FUNC_EN 0xDA20
#define R_AX_BTC_FUNC_EN_C1 0xFA20
#define B_AX_PTA_WL_TX_EN BIT(1)
#define B_AX_PTA_EDCCA_EN BIT(0)
+#define R_BTC_COEX_WL_REQ 0xDA24
+#define B_BTC_TX_BCN_HI BIT(22)
+#define B_BTC_RSP_ACK_HI BIT(10)
+
#define R_BTC_BREAK_TABLE 0xDA2C
#define BTC_BREAK_PARAM 0xf0ffffff
@@ -1451,6 +3151,8 @@
#define B_AX_WL_ACT_MASK_ENABLE BIT(1)
#define B_AX_ENHANCED_BT BIT(0)
+#define R_AX_BT_BREAK_TABLE 0xDA44
+
#define R_AX_BT_STAST_HIGH 0xDA44
#define B_AX_STATIS_BT_HI_RX_MASK GENMASK(31, 16)
#define B_AX_STATIS_BT_HI_TX_MASK GENMASK(15, 0)
@@ -1458,6 +3160,43 @@
#define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0)
#define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16)
+#define R_AX_GNT_SW_CTRL 0xDA48
+#define R_AX_GNT_SW_CTRL_C1 0xFA48
+#define B_AX_WL_ACT2_VAL BIT(21)
+#define B_AX_WL_ACT2_SWCTRL BIT(20)
+#define B_AX_WL_ACT_VAL BIT(19)
+#define B_AX_WL_ACT_SWCTRL BIT(18)
+#define B_AX_GNT_BT_RX_VAL BIT(17)
+#define B_AX_GNT_BT_RX_SWCTRL BIT(16)
+#define B_AX_GNT_BT_TX_VAL BIT(15)
+#define B_AX_GNT_BT_TX_SWCTRL BIT(14)
+#define B_AX_GNT_WL_RX_VAL BIT(13)
+#define B_AX_GNT_WL_RX_SWCTRL BIT(12)
+#define B_AX_GNT_WL_TX_VAL BIT(11)
+#define B_AX_GNT_WL_TX_SWCTRL BIT(10)
+#define B_AX_GNT_BT_RFC_S1_VAL BIT(9)
+#define B_AX_GNT_BT_RFC_S1_SWCTRL BIT(8)
+#define B_AX_GNT_WL_RFC_S1_VAL BIT(7)
+#define B_AX_GNT_WL_RFC_S1_SWCTRL BIT(6)
+#define B_AX_GNT_BT_RFC_S0_VAL BIT(5)
+#define B_AX_GNT_BT_RFC_S0_SWCTRL BIT(4)
+#define B_AX_GNT_WL_RFC_S0_VAL BIT(3)
+#define B_AX_GNT_WL_RFC_S0_SWCTRL BIT(2)
+#define B_AX_GNT_WL_BB_VAL BIT(1)
+#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+
+#define R_AX_GNT_VAL 0x0054
+#define B_AX_GNT_BT_RFC_S1_STA BIT(5)
+#define B_AX_GNT_WL_RFC_S1_STA BIT(4)
+#define B_AX_GNT_BT_RFC_S0_STA BIT(3)
+#define B_AX_GNT_WL_RFC_S0_STA BIT(2)
+
+#define R_AX_GNT_VAL_V1 0xDA4C
+#define B_AX_GNT_BT_RFC_S1 BIT(4)
+#define B_AX_GNT_BT_RFC_S0 BIT(3)
+#define B_AX_GNT_WL_RFC_S1 BIT(2)
+#define B_AX_GNT_WL_RFC_S0 BIT(1)
+
#define R_AX_TDMA_MODE 0xDA4C
#define R_AX_TDMA_MODE_C1 0xFA4C
#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
@@ -1480,6 +3219,9 @@
#define R_AX_LTE_WDATA 0xDAF4
#define R_AX_LTE_RDATA 0xDAF8
+#define R_AX_MACID_ANT_TABLE 0xDC00
+#define R_AX_MACID_ANT_TABLE_LAST 0xDDFC
+
#define CMAC1_START_ADDR 0xE000
#define CMAC1_END_ADDR 0xFFFF
#define R_AX_CMAC_REG_END 0xFFFF
@@ -1525,6 +3267,7 @@
#define B_AX_GNT_BT_TX_SW_CTRL BIT(0)
#define RR_MOD 0x00
+#define RR_MOD_V1 0x10000
#define RR_MOD_IQK GENMASK(19, 4)
#define RR_MOD_DPK GENMASK(19, 5)
#define RR_MOD_MASK GENMASK(19, 16)
@@ -1536,6 +3279,7 @@
#define RR_MOD_V_DPK 0x5
#define RR_MOD_V_RXK1 0x6
#define RR_MOD_V_RXK2 0x7
+#define RR_MOD_NBW GENMASK(15, 14)
#define RR_MOD_M_RXG GENMASK(13, 4)
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MODOPT 0x01
@@ -1544,9 +3288,38 @@
#define RR_WLSEL_AG GENMASK(18, 16)
#define RR_RSV1 0x05
#define RR_RSV1_RST BIT(0)
+#define RR_BBDC 0x10005
+#define RR_BBDC_SEL BIT(0)
#define RR_DTXLOK 0x08
#define RR_RSV2 0x09
+#define RR_LOKVB 0x0a
+#define RR_LOKVB_COI GENMASK(19, 14)
+#define RR_LOKVB_COQ GENMASK(9, 4)
+#define RR_TXIG 0x11
+#define RR_TXIG_TG GENMASK(16, 12)
+#define RR_TXIG_GR1 GENMASK(6, 4)
+#define RR_TXIG_GR0 GENMASK(1, 0)
+#define RR_CHTR 0x17
+#define RR_CHTR_MOD GENMASK(11, 10)
+#define RR_CHTR_TXRX GENMASK(9, 0)
#define RR_CFGCH 0x18
+#define RR_CFGCH_V1 0x10018
+#define RR_CFGCH_BAND1 GENMASK(17, 16)
+#define CFGCH_BAND1_2G 0
+#define CFGCH_BAND1_5G 1
+#define CFGCH_BAND1_6G 3
+#define RR_CFGCH_BAND0 GENMASK(9, 8)
+#define CFGCH_BAND0_2G 0
+#define CFGCH_BAND0_5G 1
+#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW GENMASK(11, 10)
+#define RR_CFGCH_CH GENMASK(7, 0)
+#define CFGCH_BW_20M 3
+#define CFGCH_BW_40M 2
+#define CFGCH_BW_80M 1
+#define CFGCH_BW_160M 0
+#define RR_APK 0x19
+#define RR_APK_MOD GENMASK(5, 4)
#define RR_BTC 0x1a
#define RR_BTC_TXBB GENMASK(14, 12)
#define RR_BTC_RXBB GENMASK(11, 10)
@@ -1559,14 +3332,18 @@
#define RR_RXKPLL_OFF GENMASK(5, 0)
#define RR_RXKPLL_POW BIT(19)
#define RR_RSV4 0x1f
+#define RR_RSV4_AGH GENMASK(17, 16)
+#define RR_RSV4_PLLCH GENMASK(9, 0)
#define RR_RXK 0x20
-#define RR_RXK_PLLEN BIT(5)
-#define RR_RXK_SEL5G BIT(7)
#define RR_RXK_SEL2G BIT(8)
+#define RR_RXK_SEL5G BIT(7)
+#define RR_RXK_PLLEN BIT(5)
#define RR_LUTWA 0x33
#define RR_LUTWA_MASK GENMASK(9, 0)
+#define RR_LUTWA_M2 GENMASK(4, 0)
#define RR_LUTWD1 0x3e
#define RR_LUTWD0 0x3f
+#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
#define RR_TM_VAL GENMASK(6, 1)
@@ -1579,8 +3356,9 @@
#define RR_TXG2_ATT0 BIT(11)
#define RR_BSPAD 0x54
#define RR_TXGA 0x55
-#define RR_TXGA_LOK_EN BIT(0)
#define RR_TXGA_TRK_EN BIT(7)
+#define RR_TXGA_LOK_EXT GENMASK(4, 0)
+#define RR_TXGA_LOK_EN BIT(0)
#define RR_GAINTX 0x56
#define RR_GAINTX_ALL GENMASK(15, 0)
#define RR_GAINTX_PAD GENMASK(9, 5)
@@ -1603,52 +3381,85 @@
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
#define RR_TXATANK 0x64
+#define RR_TXATANK_LBSW2 GENMASK(17, 15)
#define RR_TXATANK_LBSW GENMASK(16, 15)
+#define RR_TXA2 0x65
+#define RR_TXA2_LDO GENMASK(19, 16)
#define RR_TRXIQ 0x66
#define RR_RSV6 0x6d
#define RR_TXPOW 0x7f
-#define RR_TXPOW_TXG BIT(1)
#define RR_TXPOW_TXA BIT(8)
+#define RR_TXPOW_TXAS BIT(7)
+#define RR_TXPOW_TXG BIT(1)
#define RR_RXPOW 0x80
#define RR_RXPOW_IQK GENMASK(17, 16)
#define RR_RXBB 0x83
+#define RR_RXBB_VOBUF GENMASK(15, 12)
#define RR_RXBB_C2G GENMASK(16, 10)
#define RR_RXBB_C1G GENMASK(9, 8)
#define RR_RXBB_ATTR GENMASK(7, 4)
#define RR_RXBB_ATTC GENMASK(2, 0)
+#define RR_RXG 0x84
+#define RR_RXG_IQKMOD GENMASK(19, 16)
#define RR_XGLNA2 0x85
#define RR_XGLNA2_SW GENMASK(1, 0)
+#define RR_RXAE 0x89
+#define RR_RXAE_IQKMOD GENMASK(3, 0)
#define RR_RXA 0x8a
#define RR_RXA_DPK GENMASK(9, 8)
#define RR_RXA2 0x8c
-#define RR_RXA2_C2 GENMASK(9, 3)
#define RR_RXA2_C1 GENMASK(12, 10)
+#define RR_RXA2_C2 GENMASK(9, 3)
+#define RR_RXA2_IATT GENMASK(7, 4)
+#define RR_RXA2_ATT GENMASK(3, 0)
#define RR_RXIQGEN 0x8d
#define RR_RXIQGEN_ATTL GENMASK(12, 8)
#define RR_RXIQGEN_ATTH GENMASK(14, 13)
#define RR_RXBB2 0x8f
-#define RR_EN_TIA_IDA GENMASK(11, 10)
#define RR_RXBB2_DAC_EN BIT(13)
+#define RR_RXBB2_CKT BIT(12)
+#define RR_EN_TIA_IDA GENMASK(11, 10)
+#define RR_RXBB2_IDAC GENMASK(11, 9)
+#define RR_RXBB2_EBW GENMASK(6, 5)
#define RR_XALNA2 0x90
#define RR_XALNA2_SW GENMASK(1, 0)
#define RR_DCK 0x92
+#define RR_DCK_DONE GENMASK(7, 5)
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_DONE BIT(5)
+#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
#define RR_DCK2 0x94
#define RR_DCK2_CYCLE GENMASK(7, 2)
+#define RR_DCKC 0x95
+#define RR_DCKC_CHK BIT(3)
+#define RR_IQGEN 0x97
+#define RR_IQGEN_BIAS GENMASK(11, 8)
+#define RR_TXIQK 0x98
+#define RR_TXIQK_ATT2 GENMASK(15, 12)
+#define RR_TIA 0x9e
+#define RR_TIA_N6 BIT(8)
#define RR_MIXER 0x9f
#define RR_MIXER_GN GENMASK(4, 3)
+#define RR_LOGEN 0xa3
+#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_XTALX2 0xb8
#define RR_MALSEL 0xbe
+#define RR_LCK_TRG 0xd3
+#define RR_LCK_TRGSEL BIT(8)
+#define RR_IQKPLL 0xdc
+#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_RCKD 0xde
#define RR_RCKD_POW GENMASK(19, 13)
#define RR_RCKD_BW BIT(2)
#define RR_TXADBG 0xde
#define RR_LUTDBG 0xdf
+#define RR_LUTDBG_TIA BIT(12)
#define RR_LUTDBG_LOK BIT(2)
#define RR_LUTWE2 0xee
+#define RR_LUTWE2_RTXBW BIT(2)
#define RR_LUTWE 0xef
#define RR_LUTWE_LOK BIT(2)
#define RR_RFC 0xf0
@@ -1669,16 +3480,33 @@
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
#define B_ANAPAR_14 GENMASK(15, 0)
+#define R_RFE_E_A2 0x0334
+#define R_RFE_O_SEL_A2 0x0338
+#define R_RFE_SEL0_A2 0x033C
+#define R_RFE_SEL32_A2 0x0340
+#define R_SWSI_DATA_V1 0x0370
+#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
+#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
+#define B_SWSI_DATA_PATH_V1 GENMASK(30, 28)
+#define B_SWSI_DATA_BIT_MASK_EN_V1 BIT(31)
+#define R_SWSI_BIT_MASK_V1 0x0374
+#define B_SWSI_BIT_MASK_V1 GENMASK(19, 0)
+#define R_SWSI_READ_ADDR_V1 0x0378
+#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
+#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
+#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
#define R_UPD_CLK_ADC 0x0700
-#define B_UPD_CLK_ADC_ON BIT(24)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
+#define B_UPD_CLK_ADC_ON BIT(24)
+#define B_ENABLE_CCK BIT(5)
#define R_RSTB_ASYNC 0x0704
#define B_RSTB_ASYNC_ALL BIT(1)
#define R_MAC_PIN_SEL 0x0734
#define B_CH_IDX_SEG0 GENMASK(23, 16)
#define R_PLCP_HISTOGRAM 0x0738
-#define B_STS_DIS_TRIG_BY_BRK BIT(2)
+#define B_STS_PARSING_TIME GENMASK(19, 16)
#define B_STS_DIS_TRIG_BY_FAIL BIT(3)
+#define B_STS_DIS_TRIG_BY_BRK BIT(2)
#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL
#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2)
#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C
@@ -1707,9 +3535,10 @@
#define R_PMAC_RXMOD 0x0994
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
-#define B_MAC_SEL_MOD GENMASK(4, 2)
-#define B_MAC_SEL_DPD_EN BIT(10)
+#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
#define B_MAC_SEL_PWR_EN BIT(16)
+#define B_MAC_SEL_DPD_EN BIT(10)
+#define B_MAC_SEL_MOD GENMASK(4, 2)
#define R_PMAC_TX_CTRL 0x09C0
#define B_PMAC_TXEN_DIS BIT(0)
#define R_PMAC_TX_PRD 0x09C4
@@ -1718,6 +3547,10 @@
#define B_PMAC_PTX_EN BIT(4)
#define R_PMAC_TX_CNT 0x09C8
#define B_PMAC_TX_CNT_MSK GENMASK(31, 0)
+#define R_P80_AT_HIGH_FREQ 0x09D8
+#define B_P80_AT_HIGH_FREQ BIT(26)
+#define R_DBCC_80P80_SEL_EVM_RPT 0x0A10
+#define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0)
#define R_CCX 0x0C00
#define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4)
#define B_MEASUREMENT_TRIG_MSK BIT(2)
@@ -1748,8 +3581,24 @@
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
#define B_IOQ_IQK_DPK_EN BIT(1)
+#define R_GNT_BT_WGT_EN 0x0C6C
+#define B_GNT_BT_WGT_EN BIT(21)
+#define R_PD_ARBITER_OFF 0x0C80
+#define B_PD_ARBITER_OFF BIT(31)
+#define R_SNDCCA_A1 0x0C9C
+#define B_SNDCCA_A1_EN GENMASK(19, 12)
+#define R_SNDCCA_A2 0x0CA0
+#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_RXHT_MCS_LIMIT 0x0D18
+#define B_RXHT_MCS_LIMIT GENMASK(9, 8)
+#define R_RXVHT_MCS_LIMIT 0x0D18
+#define B_RXVHT_MCS_LIMIT GENMASK(22, 21)
#define R_P0_EN_SOUND_WO_NDP 0x0D7C
#define B_P0_EN_SOUND_WO_NDP BIT(1)
+#define R_RXHE 0x0D80
+#define B_RXHETB_MAX_NSS GENMASK(25, 23)
+#define B_RXHE_MAX_NSS GENMASK(16, 14)
+#define B_RXHE_USER_MAX GENMASK(13, 6)
#define R_SPOOF_ASYNC_RST 0x0D84
#define B_SPOOF_ASYNC_RST BIT(15)
#define R_NDP_BRK0 0xDA0
@@ -1758,10 +3607,18 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_S0_HW_SI_DIS 0x1200
+#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
-#define B_P0_RXCK_VAL GENMASK(18, 16)
-#define B_P0_RXCK_ON BIT(19)
#define B_P0_RXCK_BW3 BIT(30)
+#define B_P0_TXCK_ALL GENMASK(19, 12)
+#define B_P0_RXCK_ON BIT(19)
+#define B_P0_RXCK_VAL GENMASK(18, 16)
+#define B_P0_TXCK_ON BIT(15)
+#define B_P0_TXCK_VAL GENMASK(14, 12)
+#define R_P0_RFMODE 0x12AC
+#define B_P0_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P0_RFMODE_MUX GENMASK(11, 4)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
#define R_S0_RXDC 0x12D4
@@ -1776,6 +3633,10 @@
#define R_CFO_COMP_SEG0_H 0x1388
#define R_CFO_COMP_SEG0_CTRL 0x138C
#define R_DBG32_D 0x1730
+#define R_SWSI_V1 0x174C
+#define B_SWSI_W_BUSY_V1 BIT(24)
+#define B_SWSI_R_BUSY_V1 BIT(25)
+#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
#define R_IFS_CLM_TX_CNT 0x1ACC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@@ -1810,6 +3671,8 @@
#define B_TXAGC_TP GENMASK(2, 0)
#define R_TSSI_THER 0x1C10
#define B_TSSI_THER GENMASK(29, 24)
+#define R_TXAGC_BTP 0x1CA0
+#define B_TXAGC_BTP GENMASK(31, 24)
#define R_TXAGC_BB 0x1C60
#define B_TXAGC_BB_OFT GENMASK(31, 16)
#define B_TXAGC_BB GENMASK(31, 24)
@@ -1818,6 +3681,11 @@
#define B_S0_ADDCK_Q GENMASK(19, 10)
#define R_ADC_FIFO 0x20fc
#define B_ADC_FIFO_RST GENMASK(31, 24)
+#define B_ADC_FIFO_RXK GENMASK(31, 16)
+#define B_ADC_FIFO_A3 BIT(28)
+#define B_ADC_FIFO_A2 BIT(24)
+#define B_ADC_FIFO_A1 BIT(20)
+#define B_ADC_FIFO_A0 BIT(16)
#define R_TXFIR0 0x2300
#define B_TXFIR_C01 GENMASK(23, 0)
#define R_TXFIR2 0x2304
@@ -1834,16 +3702,32 @@
#define B_TXFIR_CCD GENMASK(23, 0)
#define R_TXFIRE 0x231c
#define B_TXFIR_CEF GENMASK(23, 0)
+#define R_11B_RX_V1 0x2320
+#define B_11B_RXCCA_DIS_V1 BIT(0)
+#define R_RPL_OFST 0x2340
+#define B_RPL_OFST_MASK GENMASK(14, 8)
#define R_RXCCA 0x2344
#define B_RXCCA_DIS BIT(31)
+#define R_RXCCA_V1 0x2320
+#define B_RXCCA_DIS_V1 BIT(0)
#define R_RXSC 0x237C
#define B_RXSC_EN BIT(0)
#define R_RXSCOBC 0x23B0
#define B_RXSCOBC_TH GENMASK(18, 0)
#define R_RXSCOCCK 0x23B4
#define B_RXSCOCCK_TH GENMASK(18, 0)
+#define R_P80_AT_HIGH_FREQ_RU_ALLOC 0x2410
+#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1 BIT(14)
+#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
+#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
+#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
+#define R_S1_HW_SI_DIS 0x3200
+#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
+#define R_P1_RFMODE 0x32AC
+#define B_P1_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P1_RFMODE_MUX GENMASK(11, 4)
#define R_P1_DBGMOD 0x32B8
#define B_P1_DBGMOD_ON BIT(30)
#define R_S1_RXDC 0x32D4
@@ -1859,10 +3743,12 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_MUIC 0x40F8
+#define B_MUIC_EN BIT(0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(1, 0)
#define R_SEG0CSI 0x42AC
-#define B_SEG0CSI_IDX GENMASK(10, 0)
+#define B_SEG0CSI_IDX GENMASK(11, 0)
#define R_SEG0CSI_EN 0x42C4
#define B_SEG0CSI_EN BIT(23)
#define R_BSS_CLR_MAP 0x43ac
@@ -1872,6 +3758,12 @@
#define R_CFO_TRK0 0x4404
#define R_CFO_TRK1 0x440C
#define B_CFO_TRK_MSK GENMASK(14, 10)
+#define R_T2F_GI_COMB 0x4424
+#define B_T2F_GI_COMB_EN BIT(2)
+#define R_BT_DYN_DC_EST_EN 0x441C
+#define B_BT_DYN_DC_EST_EN_MSK BIT(31)
+#define R_ASSIGN_SBD_OPT 0x4450
+#define B_ASSIGN_SBD_OPT_EN BIT(24)
#define R_DCFO_COMP_S0 0x448C
#define B_DCFO_COMP_S0_MSK GENMASK(11, 0)
#define R_DCFO_WEIGHT 0x4490
@@ -1886,6 +3778,22 @@
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
#define B_TXNSS_MAP_MSK GENMASK(20, 17)
+#define R_PCOEFF0_V1 0x45BC
+#define B_PCOEFF01_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF2_V1 0x45CC
+#define B_PCOEFF23_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF4_V1 0x45D0
+#define B_PCOEFF45_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF6_V1 0x45D4
+#define B_PCOEFF67_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFF8_V1 0x45D8
+#define B_PCOEFF89_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFA_V1 0x45C0
+#define B_PCOEFFAB_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFC_V1 0x45C4
+#define B_PCOEFFCD_MSK_V1 GENMASK(23, 0)
+#define R_PCOEFFE_V1 0x45C8
+#define B_PCOEFFEF_MSK_V1 GENMASK(23, 0)
#define R_PATH0_IB_PKPW 0x4628
#define B_PATH0_IB_PKPW_MSK GENMASK(11, 6)
#define R_PATH0_LNA_ERR1 0x462C
@@ -1919,36 +3827,84 @@
#define R_PATH0_RXB_INIT 0x4658
#define B_PATH0_RXB_INIT_IDX_MSK GENMASK(9, 5)
#define R_PATH0_LNA_INIT 0x4668
+#define R_PATH0_LNA_INIT_V1 0x472C
#define B_PATH0_LNA_INIT_IDX_MSK GENMASK(26, 24)
#define R_PATH0_BTG 0x466C
#define B_PATH0_BTG_SHEN GENMASK(18, 17)
#define R_PATH0_TIA_INIT 0x4674
#define B_PATH0_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH0_RXB_INIT_V1 0x46A8
+#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
+#define R_PATH0_G_LNA6_OP1DB_V1 0x4688
+#define B_PATH0_G_LNA6_OP1DB_V1 GENMASK(31, 24)
+#define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694
+#define B_PATH0_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
+#define R_PATH0_G_TIA1_LNA6_OP1DB_V1 0x4694
+#define B_PATH0_R_G_OFST_MASK GENMASK(23, 16)
+#define B_PATH0_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
+#define R_CDD_EVM_CHK_EN 0x46C0
+#define B_CDD_EVM_CHK_EN BIT(0)
+#define R_PATH0_BAND_SEL_V1 0x4738
+#define B_PATH0_BAND_SEL_MSK_V1 BIT(17)
+#define R_PATH0_BT_SHARE_V1 0x4738
+#define B_PATH0_BT_SHARE_V1 BIT(19)
+#define R_PATH0_BTG_PATH_V1 0x4738
+#define B_PATH0_BTG_PATH_V1 BIT(22)
#define R_P0_NBIIDX 0x469C
#define B_P0_NBIIDX_VAL GENMASK(11, 0)
#define B_P0_NBIIDX_NOTCH_EN BIT(12)
+#define R_P0_BACKOFF_IBADC_V1 0x469C
+#define B_P0_BACKOFF_IBADC_V1 GENMASK(31, 26)
+#define B_P0_NBIIDX_NOTCH_EN_V1 BIT(12)
#define R_P1_MODE 0x4718
#define B_P1_MODE_SEL GENMASK(31, 30)
+#define R_P0_AGC_CTL 0x4730
+#define B_P0_AGC_EN BIT(31)
#define R_PATH1_LNA_INIT 0x473C
+#define R_PATH1_LNA_INIT_V1 0x4A80
#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24)
+#define R_PATH0_TIA_INIT_V1 0x473C
+#define B_PATH0_TIA_INIT_IDX_MSK_V1 BIT(9)
#define R_PATH1_TIA_INIT 0x4748
#define B_PATH1_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH1_BTG 0x4740
#define B_PATH1_BTG_SHEN GENMASK(18, 17)
#define R_PATH1_RXB_INIT 0x472C
#define B_PATH1_RXB_INIT_IDX_MSK GENMASK(9, 5)
+#define R_PATH1_G_LNA6_OP1DB_V1 0x476C
+#define B_PATH1_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
+#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
+#define R_PATH1_G_TIA1_LNA6_OP1DB_V1 0x4778
+#define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
+#define R_PATH1_BAND_SEL_V1 0x4AA4
+#define B_PATH1_BAND_SEL_MSK_V1 BIT(17)
+#define R_PATH1_BT_SHARE_V1 0x4AA4
+#define B_PATH1_BT_SHARE_V1 BIT(19)
+#define R_PATH1_BTG_PATH_V1 0x4AA4
+#define B_PATH1_BTG_PATH_V1 BIT(22)
#define R_P1_NBIIDX 0x4770
#define B_P1_NBIIDX_VAL GENMASK(11, 0)
#define B_P1_NBIIDX_NOTCH_EN BIT(12)
#define R_SEG0R_PD 0x481C
+#define R_SEG0R_PD_V1 0x4860
+#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
#define R_2P4G_BAND 0x4970
@@ -1956,19 +3912,101 @@
#define R_FC0_BW 0x4974
#define B_FC0_BW_INV GENMASK(6, 0)
#define B_FC0_BW_SET GENMASK(31, 30)
+#define B_ANT_RX_BT_SEG0 GENMASK(25, 22)
+#define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18)
+#define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14)
#define R_CHBW_MOD 0x4978
-#define B_CHBW_MOD_PRICH GENMASK(11, 8)
+#define B_BT_SHARE BIT(14)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define B_CHBW_MOD_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SEG0 GENMASK(3, 0)
+#define R_PD_BOOST_EN 0x49E8
+#define B_PD_BOOST_EN BIT(7)
+#define R_P1_BACKOFF_IBADC_V1 0x49F0
+#define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26)
+#define R_BK_FC0_INV_V1 0x4A1C
+#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_CCK_FC0_INV_V1 0x4A20
+#define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_PATH1_RXB_INIT_V1 0x4A5C
+#define B_PATH1_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
+#define R_P1_AGC_CTL 0x4A9C
+#define B_P1_AGC_EN BIT(31)
+#define R_PATH1_TIA_INIT_V1 0x4AA8
+#define B_PATH1_TIA_INIT_IDX_MSK_V1 BIT(9)
+#define R_PATH0_RXBB_V1 0x4AD4
+#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0)
+#define R_PATH1_RXBB_V1 0x4AE0
+#define B_PATH1_RXBB_MSK_V1 GENMASK(31, 0)
+#define R_PATH0_BT_BACKOFF_V1 0x4AE4
+#define B_PATH0_BT_BACKOFF_V1 GENMASK(23, 0)
+#define R_PATH1_BT_BACKOFF_V1 0x4AEC
+#define B_PATH1_BT_BACKOFF_V1 GENMASK(23, 0)
+#define R_PATH0_FRC_FIR_TYPE_V1 0x4C00
+#define B_PATH0_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH0_NOTCH 0x4C14
+#define B_PATH0_NOTCH_EN BIT(12)
+#define B_PATH0_NOTCH_VAL GENMASK(11, 0)
+#define R_PATH0_NOTCH2 0x4C20
+#define B_PATH0_NOTCH2_EN BIT(12)
+#define B_PATH0_NOTCH2_VAL GENMASK(11, 0)
+#define R_PATH0_5MDET 0x4C4C
+#define B_PATH0_5MDET_EN BIT(12)
+#define B_PATH0_5MDET_SB2 BIT(8)
+#define B_PATH0_5MDET_SB0 BIT(6)
+#define B_PATH0_5MDET_TH GENMASK(5, 0)
+#define R_PATH1_FRC_FIR_TYPE_V1 0x4CC4
+#define B_PATH1_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH1_NOTCH 0x4CD8
+#define B_PATH1_NOTCH_EN BIT(12)
+#define B_PATH1_NOTCH_VAL GENMASK(11, 0)
+#define R_PATH1_NOTCH2 0x4CE4
+#define B_PATH1_NOTCH2_EN BIT(12)
+#define B_PATH1_NOTCH2_VAL GENMASK(11, 0)
+#define R_PATH1_5MDET 0x4D10
+#define B_PATH1_5MDET_EN BIT(12)
+#define B_PATH1_5MDET_SB2 BIT(8)
+#define B_PATH1_5MDET_SB0 BIT(6)
+#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_RPL_BIAS_COMP 0x4DF0
+#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
+#define R_RPL_PATHAB 0x4E0C
+#define B_RPL_PATHB_MASK GENMASK(23, 16)
+#define B_RPL_PATHA_MASK GENMASK(15, 8)
+#define R_RSSI_M_PATHAB 0x4E2C
+#define B_RSSI_M_PATHB_MASK GENMASK(15, 8)
+#define B_RSSI_M_PATHA_MASK GENMASK(7, 0)
+#define R_FC0_V1 0x4E30
+#define B_FC0_MSK_V1 GENMASK(12, 0)
+#define R_RX_BW40_2XFFT_EN_V1 0x4E30
+#define B_RX_BW40_2XFFT_EN_MSK_V1 BIT(26)
+#define R_DCFO_COMP_S0_V1 0x4A40
+#define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0)
+#define R_BMODE_PDTH_V1 0x4B64
+#define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24)
+#define R_BMODE_PDTH_EN_V1 0x4B74
+#define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30)
#define R_CFO_COMP_SEG1_L 0x5384
#define R_CFO_COMP_SEG1_H 0x5388
#define R_CFO_COMP_SEG1_CTRL 0x538C
#define B_CFO_COMP_VALID_BIT BIT(29)
#define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24)
#define B_CFO_COMP_VAL_MSK GENMASK(11, 0)
+#define R_UPD_CLK 0x5670
+#define B_DAC_VAL BIT(31)
+#define B_ACK_VAL GENMASK(30, 29)
+#define B_DPD_DIS BIT(14)
+#define B_DPD_GDIS BIT(13)
+#define B_IQK_RFC_ON BIT(1)
+#define R_TXPWRB 0x56CC
+#define B_TXPWRB_ON BIT(28)
+#define B_TXPWRB_VAL GENMASK(27, 19)
#define R_DPD_OFT_EN 0x5800
#define B_DPD_OFT_EN BIT(28)
#define R_DPD_OFT_ADDR 0x5804
#define B_DPD_OFT_ADDR GENMASK(31, 27)
+#define R_TXPWRB_H 0x580c
+#define B_TXPWRB_RDY BIT(15)
#define R_P0_TMETER 0x5810
#define B_P0_TMETER GENMASK(15, 10)
#define B_P0_TMETER_DIS BIT(16)
@@ -1982,6 +4020,16 @@
#define R_P0_RFCTM 0x5864
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
+#define R_P0_TRSW 0x5868
+#define B_P0_TRSW_B BIT(0)
+#define B_P0_TRSW_A BIT(1)
+#define B_P0_TRSW_X BIT(2)
+#define B_P0_TRSW_SO_A2 GENMASK(7, 5)
+#define R_P0_RFM 0x5894
+#define B_P0_RFM_DIS_WL BIT(7)
+#define B_P0_RFM_TX_OPT BIT(6)
+#define B_P0_RFM_BT_EN BIT(5)
+#define B_P0_RFM_OUT GENMASK(4, 0)
#define R_P0_TXDPD 0x58D4
#define B_P0_TXDPD GENMASK(31, 28)
#define R_P0_TXPW_RSTB 0x58DC
@@ -2011,6 +4059,8 @@
#define B_S0_DACKQ7_K GENMASK(15, 8)
#define R_S0_DACKQ8 0x5E98
#define B_S0_DACKQ8_K GENMASK(15, 8)
+#define R_RPL_BIAS_COMP1 0x6DF0
+#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
#define R_P1_TMETER 0x7810
#define B_P1_TMETER GENMASK(15, 10)
#define B_P1_TMETER_DIS BIT(16)
@@ -2063,20 +4113,28 @@
#define R_IQK_DIF2 0x8024
#define B_IQK_DIF2_RXPI GENMASK(19, 0)
#define R_IQK_DIF4 0x802C
-#define B_IQK_DIF4_TXT GENMASK(11, 0)
#define B_IQK_DIF4_RXT GENMASK(27, 16)
+#define B_IQK_DIF4_TXT GENMASK(11, 0)
+#define IQK_DF4_TXT_8_25MHZ 0x021
#define R_IQK_CFG 0x8034
#define B_IQK_CFG_SET GENMASK(5, 4)
+#define R_TPG_SEL 0x8068
#define R_TPG_MOD 0x806C
#define B_TPG_MOD_F GENMASK(2, 1)
#define R_MDPK_SYNC 0x8070
#define B_MDPK_SYNC_SEL BIT(31)
#define B_MDPK_SYNC_MAN GENMASK(31, 28)
#define R_MDPK_RX_DCK 0x8074
+#define B_MDPK_RX_DCK_EN BIT(31)
+#define R_KIP_MOD 0x8078
+#define B_KIP_MOD GENMASK(19, 0)
#define R_NCTL_RW 0x8080
#define R_KIP_SYSCFG 0x8088
#define R_KIP_CLK 0x808C
+#define R_DPK_IDL 0x809C
+#define B_DPK_IDL BIT(8)
#define R_LDL_NORM 0x80A0
+#define B_LDL_NORM_MA BIT(16)
#define B_LDL_NORM_PN GENMASK(12, 8)
#define B_LDL_NORM_OP GENMASK(1, 0)
#define R_DPK_CTL 0x80B0
@@ -2087,12 +4145,19 @@
#define B_DPK_CFG2_ST BIT(14)
#define R_DPK_CFG3 0x80C0
#define R_KPATH_CFG 0x80D0
+#define B_KPATH_CFG_ED GENMASK(21, 20)
#define R_KIP_RPT1 0x80D4
#define B_KIP_RPT1_SEL GENMASK(21, 16)
#define R_SRAM_IQRX 0x80D8
#define R_GAPK 0x80E0
#define B_GAPK_ADR BIT(0)
#define R_SRAM_IQRX2 0x80E8
+#define R_DPK_MPA 0x80EC
+#define B_DPK_MPA_T0 BIT(10)
+#define B_DPK_MPA_T1 BIT(9)
+#define B_DPK_MPA_T2 BIT(8)
+#define R_DPK_WR 0x80F4
+#define B_DPK_WR_ST BIT(29)
#define R_DPK_TRK 0x80f0
#define B_DPK_TRK_DIS BIT(31)
#define R_RPT_COM 0x80FC
@@ -2100,8 +4165,11 @@
#define B_PRT_COM_DCI GENMASK(27, 16)
#define B_PRT_COM_CORV GENMASK(15, 8)
#define B_PRT_COM_DCQ GENMASK(11, 0)
+#define B_PRT_COM_RXOV BIT(8)
#define B_PRT_COM_GL GENMASK(7, 4)
#define B_PRT_COM_CORI GENMASK(7, 0)
+#define B_PRT_COM_RXBB GENMASK(5, 0)
+#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
#define B_COEF_SEL_IQC BIT(0)
#define B_COEF_SEL_MDPD BIT(8)
@@ -2126,17 +4194,27 @@
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
#define B_CFIR_LUT_SEL BIT(8)
+#define B_CFIR_LUT_SET BIT(4)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
+#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
#define B_CFIR_LUT_GP GENMASK(1, 0)
+#define R_DPK_GN 0x819C
+#define B_DPK_GN_EN GENMASK(17, 16)
+#define B_DPK_GN_AG GENMASK(9, 0)
#define R_DPD_V1 0x81a0
+#define B_DPD_LBK BIT(7)
#define R_DPD_CH0 0x81AC
#define R_DPD_BND 0x81B4
#define R_DPD_CH0A 0x81BC
+#define B_DPD_MEN GENMASK(31, 28)
+#define B_DPD_ORDER GENMASK(26, 24)
+#define B_DPD_SEL GENMASK(13, 8)
#define R_TXAGC_RFK 0x81C4
#define B_TXAGC_RFK_CH0 GENMASK(5, 0)
#define R_DPD_COM 0x81C8
#define R_KIP_IQP 0x81CC
+#define B_KIP_IQP_SW GENMASK(13, 12)
#define B_KIP_IQP_IQSW GENMASK(5, 0)
#define R_KIP_RPT 0x81D4
#define B_KIP_RPT_SEL GENMASK(21, 16)
@@ -2144,8 +4222,15 @@
#define R_LOAD_COEF 0x81DC
#define B_LOAD_COEF_MDPD BIT(16)
#define B_LOAD_COEF_CFIR GENMASK(1, 0)
+#define B_LOAD_COEF_DI BIT(1)
#define B_LOAD_COEF_AUTO BIT(0)
+#define R_DPK_GL 0x81F0
+#define B_DPK_GL_A0 GENMASK(31, 28)
+#define B_DPK_GL_A1 GENMASK(17, 0)
#define R_RPT_PER 0x81FC
+#define B_RPT_PER_TSSI GENMASK(28, 16)
+#define B_RPT_PER_OF GENMASK(15, 8)
+#define B_RPT_PER_TH GENMASK(5, 0)
#define R_RXCFIR_P0C0 0x8D40
#define R_RXCFIR_P0C1 0x8D84
#define R_RXCFIR_P0C2 0x8DC8
@@ -2178,5 +4263,112 @@
#define R_IQKINF2 0x9FE8
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
-#define B_IQKINF2_NCTLV GENMAKS(7, 0)
+#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_DCOF0 0xC000
+#define B_DCOF0_V GENMASK(4, 1)
+#define R_DCOF1 0xC004
+#define B_DCOF1_S BIT(0)
+#define R_DCOF8 0xC020
+#define B_DCOF8_V GENMASK(4, 1)
+#define R_DACK_S0P0 0xC040
+#define B_DACK_S0P0_OK BIT(31)
+#define R_DACK_BIAS00 0xc048
+#define B_DACK_BIAS00 GENMASK(11, 2)
+#define R_DACK_S0P2 0xC05C
+#define B_DACK_S0M0 GENMASK(31, 24)
+#define B_DACK_S0P2_OK BIT(2)
+#define R_DACK_DADCK00 0xC060
+#define B_DACK_DADCK00 GENMASK(31, 24)
+#define R_DACK_S0P1 0xC064
+#define B_DACK_S0P1_OK BIT(31)
+#define R_DACK_BIAS01 0xC06C
+#define B_DACK_BIAS01 GENMASK(11, 2)
+#define R_DACK_S0P3 0xC080
+#define B_DACK_S0M1 GENMASK(31, 24)
+#define B_DACK_S0P3_OK BIT(2)
+#define R_DACK_DADCK01 0xC084
+#define B_DACK_DADCK01 GENMASK(31, 24)
+#define R_DRCK 0xC0C4
+#define B_DRCK_IDLE BIT(9)
+#define B_DRCK_EN BIT(6)
+#define B_DRCK_VAL GENMASK(4, 0)
+#define R_DRCK_RES 0xC0C8
+#define B_DRCK_RES GENMASK(19, 15)
+#define B_DRCK_POL BIT(3)
+#define R_PATH0_SAMPL_DLY_T_V1 0xC0D4
+#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
+#define R_P0_CFCH_BW0 0xC0D4
+#define B_P0_CFCH_BW0 GENMASK(27, 26)
+#define R_P0_CFCH_BW1 0xC0D8
+#define B_P0_CFCH_BW1 GENMASK(8, 5)
+#define R_ADDCK0 0xC0F4
+#define B_ADDCK0 GENMASK(9, 8)
+#define B_ADDCK0_EN BIT(4)
+#define B_ADDCK0_RST BIT(2)
+#define R_ADDCK0_RL 0xC0F8
+#define B_ADDCK0_RLS GENMASK(29, 28)
+#define B_ADDCK0_RL1 GENMASK(27, 18)
+#define B_ADDCK0_RL0 GENMASK(17, 8)
+#define R_ADDCKR0 0xC0FC
+#define B_ADDCKR0_A0 GENMASK(19, 10)
+#define B_ADDCKR0_A1 GENMASK(9, 0)
+#define R_DACK10 0xC100
+#define B_DACK10 GENMASK(4, 1)
+#define R_DACK1_K 0xc104
+#define B_DACK1_EN BIT(0)
+#define R_DACK11 0xC120
+#define B_DACK11 GENMASK(4, 1)
+#define R_DACK_S1P0 0xC140
+#define B_DACK_S1P0_OK BIT(31)
+#define R_DACK_BIAS10 0xC148
+#define B_DACK_BIAS10 GENMASK(11, 2)
+#define R_DACK10S 0xC15C
+#define B_DACK10S GENMASK(31, 24)
+#define R_DACK_S1P2 0xC15C
+#define B_DACK_S1P2_OK BIT(2)
+#define R_DACK_DADCK10 0xC160
+#define B_DACK_DADCK10 GENMASK(31, 24)
+#define R_DACK_S1P1 0xC164
+#define B_DACK_S1P1_OK BIT(31)
+#define R_DACK_BIAS11 0xC16C
+#define B_DACK_BIAS11 GENMASK(11, 2)
+#define R_DACK11S 0xC180
+#define B_DACK11S GENMASK(31, 24)
+#define R_DACK_S1P3 0xC180
+#define B_DACK_S1P3_OK BIT(2)
+#define R_DACK_DADCK11 0xC184
+#define B_DACK_DADCK11 GENMASK(31, 24)
+#define R_PATH1_SAMPL_DLY_T_V1 0xC1D4
+#define B_PATH1_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
+#define R_PATH0_BW_SEL_V1 0xC0D8
+#define B_PATH0_BW_SEL_MSK_V1 GENMASK(8, 5)
+#define R_PATH1_BW_SEL_V1 0xC1D8
+#define B_PATH1_BW_SEL_MSK_V1 GENMASK(8, 5)
+#define R_ADDCK1 0xC1F4
+#define B_ADDCK1 GENMASK(9, 8)
+#define B_ADDCK1_EN BIT(4)
+#define B_ADDCK1_RST BIT(2)
+#define R_ADDCK1_RL 0xC1F8
+#define B_ADDCK1_RLS GENMASK(29, 28)
+#define B_ADDCK1_RL1 GENMASK(27, 18)
+#define B_ADDCK1_RL0 GENMASK(17, 8)
+#define R_ADDCKR1 0xC1fC
+#define B_ADDCKR1_A0 GENMASK(19, 10)
+#define B_ADDCKR1_A1 GENMASK(9, 0)
+
+/* WiFi CPU local domain */
+#define R_AX_WDT_CTRL 0x0040
+#define B_AX_WDT_EN BIT(31)
+#define B_AX_WDT_OPT_RESET_PLATFORM_EN BIT(29)
+#define B_AX_IO_HANG_IMR BIT(27)
+#define B_AX_IO_HANG_CMAC_RDATA_EN BIT(26)
+#define B_AX_IO_HANG_DMAC_EN BIT(25)
+#define B_AX_WDT_CLR BIT(16)
+#define B_AX_WDT_COUNT_MASK GENMASK(15, 0)
+#define WDT_CTRL_ALL_DIS 0
+
+#define R_AX_WDT_STATUS 0x0044
+#define B_AX_FS_WDT_INT BIT(8)
+#define B_AX_FS_WDT_INT_MSK BIT(0)
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 4c37e590e43c..6e5a740b128f 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -5,254 +5,253 @@
#include "debug.h"
#include "ps.h"
-#define COUNTRY_REGD(_alpha2, _txpwr_regd_2g, _txpwr_regd_5g) \
+#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \
{.alpha2 = (_alpha2), \
- .txpwr_regd[RTW89_BAND_2G] = (_txpwr_regd_2g), \
- .txpwr_regd[RTW89_BAND_5G] = (_txpwr_regd_5g) \
+ .txpwr_regd = {_txpwr_regd}, \
}
static const struct rtw89_regulatory rtw89_ww_regd =
COUNTRY_REGD("00", RTW89_WW, RTW89_WW);
static const struct rtw89_regulatory rtw89_regd_map[] = {
- COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE),
- COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO),
- COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR),
- COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE),
- COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CN", RTW89_CN, RTW89_CN),
- COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC),
- COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CA", RTW89_IC, RTW89_IC),
- COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK),
- COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC),
- COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
+ COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA),
+ COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK),
+ COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
+ COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
+ COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
+ COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
+ COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC),
+ COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_NA),
+ COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC, RTW89_NA),
+ COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
+ COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
+ COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2)
@@ -272,6 +271,17 @@ static bool rtw89_regd_is_ww(const struct rtw89_regulatory *regd)
return regd == &rtw89_ww_regd;
}
+#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \
+do { \
+ typeof(_regd) __r = _regd; \
+ rtw89_debug(_dev, RTW89_DBG_REGD, _desc \
+ ": %c%c: mapping txregd to {2g: %d, 5g: %d, 6g: %d}\n", \
+ ##_argv, __r->alpha2[0], __r->alpha2[1], \
+ __r->txpwr_regd[RTW89_BAND_2G], \
+ __r->txpwr_regd[RTW89_BAND_5G], \
+ __r->txpwr_regd[RTW89_BAND_6G]); \
+} while (0)
+
int rtw89_regd_init(struct rtw89_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
@@ -294,20 +304,12 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
if (ret)
rtw89_warn(rtwdev, "failed to hint regulatory:%d\n", ret);
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "efuse country code %c%c, mapping to 2g txregd %d, 5g txregd %d\n",
- rtwdev->efuse.country_code[0], rtwdev->efuse.country_code[1],
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
-
+ rtw89_debug_regd(rtwdev, chip_regd, "efuse country code");
return 0;
}
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "worldwide roaming chip, follow the setting of stack(%c%c), mapping to 2g txregd %d, 5g txregd %d\n",
- rtwdev->regd->alpha2[0], rtwdev->regd->alpha2[1],
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+ rtw89_debug_regd(rtwdev, rtwdev->regd,
+ "worldwide roaming chip, follow the setting of stack");
return 0;
}
@@ -341,13 +343,10 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
goto exit;
}
rtw89_regd_notifier_apply(rtwdev, wiphy, request);
- rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "get alpha2 %c%c from initiator %d, mapping to 2g txregd %d, 5g txregd %d\n",
- request->alpha2[0], request->alpha2[1], request->initiator,
- rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
- rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+ rtw89_debug_regd(rtwdev, rtwdev->regd, "get from initiator %d, alpha2",
+ request->initiator);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 6b75e4bc7352..784147680353 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -3,6 +3,7 @@
*/
#include "coex.h"
+#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "reg.h"
@@ -36,16 +37,21 @@ static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
- &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
- [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
};
static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0,
- &wde_qt0, &ple_qt4, &ple_qt5},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4,
- &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size0,
+ &rtw89_mac_size.ple_size0, &rtw89_mac_size.wde_qt0,
+ &rtw89_mac_size.wde_qt0, &rtw89_mac_size.ple_qt4,
+ &rtw89_mac_size.ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4,
+ &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
@@ -373,6 +379,106 @@ static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = {
rtw8852a_pwroff, NULL
};
+static const u32 rtw8852a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8852a_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
+static const struct rtw89_imr_info rtw8852a_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET,
+ .wsec_imr_reg = R_AX_SEC_DEBUG,
+ .wsec_imr_set = B_AX_IMR_ERROR,
+ .mpdu_tx_imr_set = 0,
+ .mpdu_rx_imr_set = 0,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR,
+ .wde_imr_set = B_AX_WDE_IMR_SET,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR,
+ .ple_imr_set = B_AX_PLE_IMR_SET,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
+ .other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
+ .bbrpt_err_imr_set = 0,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET,
+ .cdma_imr_0_reg = R_AX_DLE_CTRL,
+ .cdma_imr_0_clr = B_AX_DLE_IMR_CLR,
+ .cdma_imr_0_set = B_AX_DLE_IMR_SET,
+ .cdma_imr_1_reg = 0,
+ .cdma_imr_1_clr = 0,
+ .cdma_imr_1_set = 0,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR,
+ .phy_intf_imr_clr = 0,
+ .phy_intf_imr_set = 0,
+ .rmac_imr_reg = R_AX_RMAC_ERR_ISR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET,
+ .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET,
+};
+
+static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT, B_PATH0_TIA_INIT_IDX_MSK},
+ .p1_tia_init = {R_PATH1_TIA_INIT, B_PATH1_TIA_INIT_IDX_MSK},
+ .p0_rxb_init = {R_PATH0_RXB_INIT, B_PATH0_RXB_INIT_IDX_MSK},
+ .p1_rxb_init = {R_PATH1_RXB_INIT, B_PATH1_RXB_INIT_IDX_MSK},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -580,7 +686,7 @@ static void rtw8852a_power_trim(struct rtw89_dev *rtwdev)
}
static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 mac_idx)
{
u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
@@ -589,20 +695,20 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
u8 txsc20 = 0, txsc40 = 0;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_40);
fallthrough;
case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_20);
break;
default:
break;
}
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
@@ -619,7 +725,7 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
break;
}
- if (param->center_chan > 14)
+ if (chan->channel > 14)
rtw89_write8_set(rtwdev, chk_rate,
B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
else
@@ -1022,11 +1128,12 @@ static void rtw8852a_bb_sethw(struct rtw89_dev *rtwdev)
if (rtwdev->hal.cv <= CHIP_CCV) {
rtw89_phy_write32_set(rtwdev, R_RSTB_WATCH_DOG, B_P0_RSTB_WATCH_DOG);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_1, 0x864FA000);
- rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x3F);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x43F);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_3, 0x7FFF);
rtw89_phy_write32_set(rtwdev, R_SPOOF_ASYNC_RST, B_SPOOF_ASYNC_RST);
rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, B_STS_PARSING_TIME);
}
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK0, B_CFO_TRK_MSK, 0x1f);
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK1, B_CFO_TRK_MSK, 0x0c);
@@ -1050,35 +1157,38 @@ static void rtw8852a_bbrst_for_rfk(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->center_chan <= 14;
- u8 pri_ch_idx = param->pri_ch_idx;
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
if (cck_en)
- rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
- param->primary_chan, param->bandwidth);
+ rtw8852a_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
- rtw8852a_ctrl_ch(rtwdev, param->center_chan, phy_idx);
- rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ rtw8852a_ctrl_ch(rtwdev, chan->channel, phy_idx);
+ rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
if (cck_en) {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
} else {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
rtw8852a_bbrst_for_rfk(rtwdev, phy_idx);
}
- rtw8852a_spur_elimination(rtwdev, param->center_chan);
+ rtw8852a_spur_elimination(rtwdev, chan->channel);
rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0,
- param->primary_chan);
+ chan->primary_channel);
rtw8852a_bb_reset_all(rtwdev, phy_idx);
}
static void rtw8852a_set_channel(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *params)
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_channel_mac(rtwdev, params, RTW89_MAC_0);
- rtw8852a_set_channel_bb(rtwdev, params, RTW89_PHY_0);
+ rtw8852a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852a_set_channel_bb(rtwdev, chan, phy_idx);
}
static void rtw8852a_dfs_en(struct rtw89_dev *rtwdev, bool en)
@@ -1129,25 +1239,27 @@ static void rtw8852a_adc_en(struct rtw89_dev *rtwdev, bool en)
}
static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- u8 phy_idx = RTW89_PHY_0;
-
if (enter) {
- rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852a_dfs_en(rtwdev, false);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
rtw8852a_adc_en(rtwdev, false);
fsleep(40);
rtw8852a_bb_reset_en(rtwdev, phy_idx, false);
} else {
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852a_adc_en(rtwdev, true);
rtw8852a_dfs_en(rtwdev, true);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
}
@@ -1197,9 +1309,10 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
rtw8852a_dpk(rtwdev, phy_idx);
}
-static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev)
+static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_tssi_scan(rtwdev, RTW89_PHY_0);
+ rtw8852a_tssi_scan(rtwdev, phy_idx);
}
static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -1242,10 +1355,10 @@ static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
static
void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx)
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
{
- s32 val_1t = 0;
- s32 val_2t = 0;
+ s8 val_1t = 0;
+ s8 val_2t = 0;
u32 reg;
if (pw_ofst < -16 || pw_ofst > 15) {
@@ -1255,7 +1368,7 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
- val_1t = (s32)pw_ofst;
+ val_1t = pw_ofst;
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t);
val_2t = max(val_1t - 3, -16);
@@ -1298,9 +1411,11 @@ static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 ch = rtwdev->hal.current_channel;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -1326,7 +1441,8 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
cur.idx = j;
shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
val |= (tmp << shf);
if ((j + 1) % 4)
@@ -1341,8 +1457,10 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 band = chan->band_type;
struct rtw89_rate_desc desc = {
.nss = RTW89_NSS_1,
.rs = RTW89_RS_OFFSET,
@@ -1353,7 +1471,7 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
val |= ((v & 0xf) << (4 * desc.idx));
}
@@ -1362,29 +1480,31 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit lmt[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
ptr = (s8 *)&lmt[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1393,30 +1513,32 @@ static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_RU_LMT + j +
__MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
ptr = (s8 *)&lmt_ru[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1425,17 +1547,20 @@ static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
}
-static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
-static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_ref(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_offset(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_ref(rtwdev, phy_idx);
}
static int
@@ -1512,10 +1637,12 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
enum rtw89_phy_idx idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G)
+ if (chan->band_type == RTW89_BAND_2G)
rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
return;
}
@@ -1717,6 +1844,9 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
} else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
@@ -1808,7 +1938,8 @@ rtw8852a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
u32 _cur, _wrt; \
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
"btc ctrl %s: 0x%x\n", #_case, _val); \
- rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur);\
+ if (rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur))\
+ break; \
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
"btc ctrl ori 0x%x: 0x%x\n", _reg, _cur); \
_wrt = __do_clr(_val) ? \
@@ -1929,6 +2060,51 @@ void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852a_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
static void rtw8852a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -1949,18 +2125,20 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
u8 path;
- s8 *rx_power = phy_ppdu->rssi;
+ u8 *rx_power = phy_ppdu->rssi;
- status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
- status->chain_signal[path] = rx_power[path];
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
}
if (phy_ppdu->valid)
rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
static const struct rtw89_chip_ops rtw8852a_chip_ops = {
+ .enable_bb_rf = rtw89_mac_enable_bb_rf,
+ .disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -1983,7 +2161,17 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.ctrl_btg = rtw8852a_ctrl_btg,
.query_ppdu = rtw8852a_query_ppdu,
.bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc,
+ .cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .pwr_on_func = NULL,
+ .pwr_off_func = NULL,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
+ .h2c_dctl_sec_cam = NULL,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -1993,6 +2181,8 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852a_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy,
};
const struct rtw89_chip_info rtw8852a_chip_info = {
@@ -2000,14 +2190,17 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ops = &rtw8852a_chip_ops,
.fw_name = "rtw89/rtw8852a_fw.bin",
.fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
.dle_mem = rtw8852a_dle_mem_pcie,
.rf_base_addr = {0xc000, 0xd000},
.pwr_on_seq = pwr_on_seq_8852a,
.pwr_off_seq = pwr_off_seq_8852a,
.bb_table = &rtw89_8852a_phy_bb_table,
+ .bb_gain_table = NULL,
.rf_table = {&rtw89_8852a_phy_radioa_table,
&rtw89_8852a_phy_radiob_table,},
.nctl_table = &rtw89_8852a_phy_nctl_table,
@@ -2019,23 +2212,50 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .dig_regs = &rtw8852a_dig_regs,
+ .tssi_dbw_table = NULL,
+ .support_chanctx_num = 1,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bw160 = false,
+ .hw_sec_hdr = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
.acam_num = 128,
.bcam_num = 10,
.scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_v1 = false,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
.limit_efuse_size = 1152,
+ .dav_phy_efuse_size = 0,
+ .dav_log_efuse_size = 0,
.phycap_addr = 0x580,
.phycap_size = 128,
- .para_ver = 0x05050864,
- .wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
+ .para_ver = 0x0,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
+ .btc_fwinfo_buf = 1024,
+
+ .fcxbtcrpt_ver = 1,
+ .fcxtdma_ver = 1,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 2,
+ .fcxstep_ver = 2,
+ .fcxnullsta_ver = 1,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852a_bt_rssi_thres,
@@ -2049,7 +2269,25 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_regs = rtw8852a_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_regs = rtw8852a_c2h_regs,
+ .page_regs = &rtw8852a_page_regs,
+ .dcfo_comp = &rtw8852a_dcfo_comp,
+ .dcfo_comp_sft = 3,
+ .imr_info = &rtw8852a_imr_info,
+ .rrsr_cfgs = &rtw8852a_rrsr_cfgs,
+ .dma_ch_mask = 0,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852A driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index 633384374de0..fcff1194c009 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -93,6 +93,8 @@ struct rtw8852a_bb_pmac_info {
u8 duty_cycle;
};
+extern const struct rtw89_chip_info rtw8852a_chip_info;
+
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index c021e93eb07b..582ff0d3a9ea 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -12,66 +12,6 @@
#include "rtw8852a_rfk_table.h"
#include "rtw8852a_table.h"
-static void
-_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- udelay(def->data);
-}
-
-static void
-(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
- [RTW89_RFK_F_WRF] = _rfk_write_rf,
- [RTW89_RFK_F_WM] = _rfk_write32_mask,
- [RTW89_RFK_F_WS] = _rfk_write32_set,
- [RTW89_RFK_F_WC] = _rfk_write32_clr,
- [RTW89_RFK_F_DELAY] = _rfk_delay,
-};
-
-static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
-
-static void
-rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
-{
- const struct rtw89_reg5_def *p = tbl->defs;
- const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
-
- for (; p < end; p++)
- _rfk_handler[p->flag](rtwdev, p);
-}
-
-#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f) \
- do { \
- typeof(rtwdev) _dev = (rtwdev); \
- if (cond) \
- rtw89_rfk_parser(_dev, (tbl_t)); \
- else \
- rtw89_rfk_parser(_dev, (tbl_f)); \
- } while (0)
-
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
@@ -1419,7 +1359,7 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, u8 path)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1440,9 +1380,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
- iqk_info->iqk_band[path] = hal->current_band_type;
- iqk_info->iqk_bw[path] = hal->current_band_width;
- iqk_info->iqk_ch[path] = hal->current_channel;
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
@@ -1939,13 +1879,12 @@ static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- struct rtw89_hal *hal = &rtwdev->hal;
-
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 kidx = dpk->cur_idx[path];
- dpk->bp[path][kidx].band = hal->current_band_type;
- dpk->bp[path][kidx].ch = hal->current_channel;
- dpk->bp[path][kidx].bw = hal->current_band_width;
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
@@ -2249,8 +2188,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
"[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
corr_val);
- dpk->corr_idx[path] = corr_idx;
- dpk->corr_val[path] = corr_val;
+ dpk->corr_idx[path][0] = corr_idx;
+ dpk->corr_val[path][0] = corr_val;
rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
@@ -2263,8 +2202,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
path, dc_i, dc_q);
- dpk->dc_i[path] = dc_i;
- dpk->dc_q[path] = dc_q;
+ dpk->dc_i[path][0] = dc_i;
+ dpk->dc_q[path][0] = dc_q;
if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
corr_val < DPK_SYNC_TH_CORR)
@@ -2390,8 +2329,8 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
val2_q = abs(sign_extend32(val2_q, 11));
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
- (val1_i * val1_i + val1_q * val1_q) /
- (val2_i * val2_i + val2_q * val2_q));
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
} else {
for (i = 0; i < 32; i++) {
@@ -2418,6 +2357,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2464,7 +2404,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
"[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
tmp_rxbb);
if (offset != 0 || agc_cnt == 0) {
- if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
+ if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
_dpk_lbk_rxiqk(rtwdev, phy, path);
@@ -2608,11 +2548,12 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
- cur_band = rtwdev->hal.current_band_type;
- cur_ch = rtwdev->hal.current_channel;
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
if (cur_band != dpk->bp[path][idx].band ||
@@ -2741,12 +2682,13 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
return true;
@@ -2902,7 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
@@ -2912,7 +2855,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2923,7 +2867,8 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
@@ -2965,18 +2910,20 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
- u8 subband = rtwdev->hal.current_subband;
- const u8 *thm_up_a = NULL;
- const u8 *thm_down_a = NULL;
- const u8 *thm_up_b = NULL;
- const u8 *thm_down_b = NULL;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
u8 thermal = 0xff;
s8 thm_ofst[64] = {0};
u32 tmp = 0;
u8 i, j;
switch (subband) {
+ default:
case RTW89_CH_2G:
thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
@@ -3158,9 +3105,11 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 subband = chan->subband_type;
switch (subband) {
+ default:
case RTW89_CH_2G:
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_pak_defs_a_2g_tbl,
@@ -3333,7 +3282,8 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
s8 de_2nd = 0;
@@ -3370,7 +3320,8 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
s8 tde_2nd = 0;
@@ -3408,6 +3359,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3416,7 +3368,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
- u8 ch = rtwdev->hal.current_channel;
+ u8 ch = chan->channel;
u8 i, gidx;
s8 ofdm_de;
s8 trim_de;
@@ -3536,9 +3488,11 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
s8 power;
s32 xdbm;
@@ -3549,7 +3503,7 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
xdbm = power * 100 / 4;
@@ -3581,10 +3535,12 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
- u16 tx_en;
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
s8 power;
s16 xdbm;
@@ -3597,8 +3553,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
- RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
+ RTW89_1TX, RTW89_RS_OFDM,
+ RTW89_NONBF, ch_tmp);
xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
@@ -3612,7 +3569,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
__func__, phy, power, xdbm);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
@@ -3658,7 +3615,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
- rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
@@ -3681,11 +3638,11 @@ void rtw8852a_dack(struct rtw89_dev *rtwdev)
void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
@@ -3694,7 +3651,7 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
else
_iqk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
@@ -3706,33 +3663,33 @@ void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
bool is_afe)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_rx_dck(rtwdev, phy_idx, is_afe);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
_dpk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
index 510570090502..dd2a978b9bae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
@@ -5,1603 +5,1603 @@
#include "rtw8852a_rfk_table.h"
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
- DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
- DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
- DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
- DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
- DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
- DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
- DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
- DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
+ RTW89_DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
+ RTW89_DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
+ RTW89_DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
- DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
- DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
+ RTW89_DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
- DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
+ RTW89_DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
- DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
- DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
- DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
- DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
- DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
- DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x0),
- DECL_RFK_WM(0x5820, 0x80000000, 0x1),
- DECL_RFK_WM(0x5818, 0x18000000, 0x3),
- DECL_RFK_WM(0x7820, 0x80000000, 0x0),
- DECL_RFK_WM(0x7820, 0x80000000, 0x1),
- DECL_RFK_WM(0x7818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x3),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
- DECL_RFK_WC(0x12ec, 0x00008000),
- DECL_RFK_WS(0x12ec, 0x00008000),
- DECL_RFK_WC(0x5e00, 0x00000001),
- DECL_RFK_WS(0x5e00, 0x00000001),
- DECL_RFK_WC(0x32ec, 0x00008000),
- DECL_RFK_WS(0x32ec, 0x00008000),
- DECL_RFK_WC(0x7e00, 0x00000001),
- DECL_RFK_WS(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WS(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e60, 0x80000000),
- DECL_RFK_WC(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WS(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e60, 0x80000000),
- DECL_RFK_WC(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
- DECL_RFK_WC(0x12d8, 0x00000030),
- DECL_RFK_WC(0x32d8, 0x00000030),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000030),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000030),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
- DECL_RFK_WS(0x12d8, 0x000000c0),
- DECL_RFK_WS(0x12d8, 0x00000800),
- DECL_RFK_WC(0x12d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
- DECL_RFK_WS(0x32d8, 0x000000c0),
- DECL_RFK_WS(0x32d8, 0x00000800),
- DECL_RFK_WC(0x32d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000001),
- DECL_RFK_WS(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000001),
- DECL_RFK_WS(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000001),
- DECL_RFK_WC(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000001),
- DECL_RFK_WC(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WC(0x5e60, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x12e0, 0x00010000),
- DECL_RFK_WS(0x12e4, 0x0c000000),
- DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x5e00, 0x0c000000),
- DECL_RFK_WC(0x5e50, 0x0c000000),
- DECL_RFK_WC(0x5e0c, 0x00000008),
- DECL_RFK_WC(0x5e5c, 0x00000008),
- DECL_RFK_WS(0x5e0c, 0x00000001),
- DECL_RFK_WS(0x5e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x5e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
- DECL_RFK_WC(0x12e4, 0x0c000000),
- DECL_RFK_WS(0x5e0c, 0x00000008),
- DECL_RFK_WS(0x5e5c, 0x00000008),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
- DECL_RFK_WC(0x5e0c, 0x00000001),
- DECL_RFK_WC(0x5e5c, 0x00000001),
- DECL_RFK_WC(0x12e0, 0x00010000),
- DECL_RFK_WC(0x12a0, 0x00008000),
- DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WC(0x7e60, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x32e0, 0x00010000),
- DECL_RFK_WS(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x7e00, 0x0c000000),
- DECL_RFK_WC(0x7e50, 0x0c000000),
- DECL_RFK_WC(0x7e0c, 0x00000008),
- DECL_RFK_WC(0x7e5c, 0x00000008),
- DECL_RFK_WS(0x7e0c, 0x00000001),
- DECL_RFK_WS(0x7e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x7e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
- DECL_RFK_WC(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
- DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
- DECL_RFK_WC(0x7e0c, 0x00000001),
- DECL_RFK_WC(0x7e5c, 0x00000001),
- DECL_RFK_WC(0x32e0, 0x00010000),
- DECL_RFK_WC(0x32a0, 0x00008000),
- DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x32b8, 0x10000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x2c1c, 0x00000004),
- DECL_RFK_WS(0x2700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x2c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x32b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x2c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
- DECL_RFK_WS(0x6490, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00007000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x2700, 0x01000000),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x6490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x2700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
- DECL_RFK_WS(0x2344, 0x80000000),
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WS(0x2344, 0x80000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x2c1c, 0x00000004),
- DECL_RFK_WC(0x2700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x2700, 0x07000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x2c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x2700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x2c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
- DECL_RFK_WS(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
+ RTW89_DECL_RFK_WS(0x8074, 0x80000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
- DECL_RFK_WC(0x8074, 0x80000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WC(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
- DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
- DECL_RFK_WC(0x80bc, 0x00004000),
- DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
+ RTW89_DECL_RFK_WC(0x80bc, 0x00004000),
+ RTW89_DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
index 4a4a45d778ff..33e6c404ecf9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
@@ -5,54 +5,7 @@
#ifndef __RTW89_8852A_RFK_TABLE_H__
#define __RTW89_8852A_RFK_TABLE_H__
-#include "core.h"
-
-enum rtw89_rfk_flag {
- RTW89_RFK_F_WRF = 0,
- RTW89_RFK_F_WM = 1,
- RTW89_RFK_F_WS = 2,
- RTW89_RFK_F_WC = 3,
- RTW89_RFK_F_DELAY = 4,
- RTW89_RFK_F_NUM,
-};
-
-struct rtw89_rfk_tbl {
- const struct rtw89_reg5_def *defs;
- u32 size;
-};
-
-#define DECLARE_RFK_TBL(_name) \
-const struct rtw89_rfk_tbl _name ## _tbl = { \
- .defs = _name, \
- .size = ARRAY_SIZE(_name), \
-}
-
-#define DECL_RFK_WRF(_path, _addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WRF, \
- .path = _path, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WM(_addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WM, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WS(_addr, _mask) \
- {.flag = RTW89_RFK_F_WS, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_WC(_addr, _mask) \
- {.flag = RTW89_RFK_F_WC, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_DELAY(_data) \
- {.flag = RTW89_RFK_F_DELAY, \
- .data = _data,}
+#include "phy.h"
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
index 253b5f8fc4f9..320bcd4852c6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -1281,7 +1281,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -20496,7 +20495,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20516,7 +20515,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20542,7 +20541,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20562,7 +20561,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20588,7 +20587,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20608,7 +20607,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20622,17 +20621,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20644,15 +20643,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20664,21 +20663,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20690,15 +20689,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20710,21 +20709,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20736,15 +20735,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20756,21 +20755,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20782,15 +20781,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20802,21 +20801,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20828,15 +20827,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20848,21 +20847,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20874,15 +20873,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20894,21 +20893,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20920,15 +20919,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20940,21 +20939,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20966,15 +20965,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20986,21 +20985,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21012,15 +21011,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21032,21 +21031,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21058,15 +21057,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21078,21 +21077,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21104,15 +21103,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21124,21 +21123,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21150,15 +21149,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21170,21 +21169,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21196,15 +21195,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21216,21 +21215,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21242,15 +21241,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21262,21 +21261,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21288,15 +21287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21308,21 +21307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21334,15 +21333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21354,21 +21353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21380,15 +21379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21400,21 +21399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21426,15 +21425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21446,7 +21445,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -21596,8 +21595,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
@@ -21671,7 +21669,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -41142,7 +41139,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41162,7 +41159,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41188,7 +41185,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41208,7 +41205,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41234,7 +41231,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41254,7 +41251,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41268,17 +41265,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41290,15 +41287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41310,21 +41307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41336,15 +41333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41356,21 +41353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41382,15 +41379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41402,21 +41399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41428,15 +41425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41448,21 +41445,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41474,15 +41471,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41494,21 +41491,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41520,15 +41517,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41540,21 +41537,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41566,15 +41563,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41586,21 +41583,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41612,15 +41609,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41632,21 +41629,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41658,15 +41655,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41678,21 +41675,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41704,15 +41701,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41724,21 +41721,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41750,15 +41747,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41770,21 +41767,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41796,15 +41793,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41816,21 +41813,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41842,15 +41839,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41862,21 +41859,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41888,15 +41885,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41908,21 +41905,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41934,15 +41931,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41954,21 +41951,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41980,15 +41977,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42000,21 +41997,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42026,15 +42023,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42046,21 +42043,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42072,15 +42069,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42092,7 +42089,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -42243,8 +42240,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = {
@@ -43313,7 +43309,7 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852a_txpwr_byrate[] = {
{ 1, 0, 4, 0, 4, 0x00000000, },
};
-static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43322,7 +43318,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43331,7 +43327,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43340,7 +43336,7 @@ static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
{0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
{0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
@@ -43349,35 +43345,35 @@ static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
};
-static const u8 _txpwr_track_delta_swingidx_2gb_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2gb_n[] = {
0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
-static const u8 _txpwr_track_delta_swingidx_2gb_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2gb_p[] = {
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
-static const u8 _txpwr_track_delta_swingidx_2ga_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2ga_n[] = {
0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
-static const u8 _txpwr_track_delta_swingidx_2ga_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2ga_p[] = {
0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
-static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
@@ -43563,6 +43559,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][0] = 76,
[0][0][0][0][RTW89_CN][0] = 56,
[0][0][0][0][RTW89_QATAR][0] = 56,
+ [0][0][0][0][RTW89_UK][0] = 56,
[0][0][0][0][RTW89_FCC][1] = 76,
[0][0][0][0][RTW89_ETSI][1] = 56,
[0][0][0][0][RTW89_MKK][1] = 68,
@@ -43574,6 +43571,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][1] = 76,
[0][0][0][0][RTW89_CN][1] = 56,
[0][0][0][0][RTW89_QATAR][1] = 56,
+ [0][0][0][0][RTW89_UK][1] = 56,
[0][0][0][0][RTW89_FCC][2] = 76,
[0][0][0][0][RTW89_ETSI][2] = 56,
[0][0][0][0][RTW89_MKK][2] = 68,
@@ -43585,6 +43583,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][2] = 76,
[0][0][0][0][RTW89_CN][2] = 56,
[0][0][0][0][RTW89_QATAR][2] = 56,
+ [0][0][0][0][RTW89_UK][2] = 56,
[0][0][0][0][RTW89_FCC][3] = 76,
[0][0][0][0][RTW89_ETSI][3] = 56,
[0][0][0][0][RTW89_MKK][3] = 68,
@@ -43596,6 +43595,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][3] = 76,
[0][0][0][0][RTW89_CN][3] = 56,
[0][0][0][0][RTW89_QATAR][3] = 56,
+ [0][0][0][0][RTW89_UK][3] = 56,
[0][0][0][0][RTW89_FCC][4] = 76,
[0][0][0][0][RTW89_ETSI][4] = 56,
[0][0][0][0][RTW89_MKK][4] = 68,
@@ -43607,6 +43607,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][4] = 76,
[0][0][0][0][RTW89_CN][4] = 56,
[0][0][0][0][RTW89_QATAR][4] = 56,
+ [0][0][0][0][RTW89_UK][4] = 56,
[0][0][0][0][RTW89_FCC][5] = 76,
[0][0][0][0][RTW89_ETSI][5] = 56,
[0][0][0][0][RTW89_MKK][5] = 68,
@@ -43618,6 +43619,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][5] = 76,
[0][0][0][0][RTW89_CN][5] = 56,
[0][0][0][0][RTW89_QATAR][5] = 56,
+ [0][0][0][0][RTW89_UK][5] = 56,
[0][0][0][0][RTW89_FCC][6] = 76,
[0][0][0][0][RTW89_ETSI][6] = 56,
[0][0][0][0][RTW89_MKK][6] = 68,
@@ -43629,6 +43631,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][6] = 76,
[0][0][0][0][RTW89_CN][6] = 56,
[0][0][0][0][RTW89_QATAR][6] = 56,
+ [0][0][0][0][RTW89_UK][6] = 56,
[0][0][0][0][RTW89_FCC][7] = 76,
[0][0][0][0][RTW89_ETSI][7] = 56,
[0][0][0][0][RTW89_MKK][7] = 68,
@@ -43640,6 +43643,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][7] = 76,
[0][0][0][0][RTW89_CN][7] = 56,
[0][0][0][0][RTW89_QATAR][7] = 56,
+ [0][0][0][0][RTW89_UK][7] = 56,
[0][0][0][0][RTW89_FCC][8] = 76,
[0][0][0][0][RTW89_ETSI][8] = 56,
[0][0][0][0][RTW89_MKK][8] = 68,
@@ -43651,6 +43655,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][8] = 76,
[0][0][0][0][RTW89_CN][8] = 56,
[0][0][0][0][RTW89_QATAR][8] = 56,
+ [0][0][0][0][RTW89_UK][8] = 56,
[0][0][0][0][RTW89_FCC][9] = 76,
[0][0][0][0][RTW89_ETSI][9] = 56,
[0][0][0][0][RTW89_MKK][9] = 68,
@@ -43662,6 +43667,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][9] = 76,
[0][0][0][0][RTW89_CN][9] = 56,
[0][0][0][0][RTW89_QATAR][9] = 56,
+ [0][0][0][0][RTW89_UK][9] = 56,
[0][0][0][0][RTW89_FCC][10] = 76,
[0][0][0][0][RTW89_ETSI][10] = 56,
[0][0][0][0][RTW89_MKK][10] = 68,
@@ -43673,6 +43679,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][10] = 76,
[0][0][0][0][RTW89_CN][10] = 56,
[0][0][0][0][RTW89_QATAR][10] = 56,
+ [0][0][0][0][RTW89_UK][10] = 56,
[0][0][0][0][RTW89_FCC][11] = 68,
[0][0][0][0][RTW89_ETSI][11] = 56,
[0][0][0][0][RTW89_MKK][11] = 68,
@@ -43684,6 +43691,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][11] = 68,
[0][0][0][0][RTW89_CN][11] = 56,
[0][0][0][0][RTW89_QATAR][11] = 56,
+ [0][0][0][0][RTW89_UK][11] = 56,
[0][0][0][0][RTW89_FCC][12] = 48,
[0][0][0][0][RTW89_ETSI][12] = 56,
[0][0][0][0][RTW89_MKK][12] = 68,
@@ -43695,6 +43703,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][12] = 48,
[0][0][0][0][RTW89_CN][12] = 56,
[0][0][0][0][RTW89_QATAR][12] = 56,
+ [0][0][0][0][RTW89_UK][12] = 56,
[0][0][0][0][RTW89_FCC][13] = 127,
[0][0][0][0][RTW89_ETSI][13] = 127,
[0][0][0][0][RTW89_MKK][13] = 76,
@@ -43706,6 +43715,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_MEXICO][13] = 127,
[0][0][0][0][RTW89_CN][13] = 127,
[0][0][0][0][RTW89_QATAR][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
[0][1][0][0][RTW89_FCC][0] = 74,
[0][1][0][0][RTW89_ETSI][0] = 44,
[0][1][0][0][RTW89_MKK][0] = 56,
@@ -43717,6 +43727,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][0] = 74,
[0][1][0][0][RTW89_CN][0] = 44,
[0][1][0][0][RTW89_QATAR][0] = 44,
+ [0][1][0][0][RTW89_UK][0] = 44,
[0][1][0][0][RTW89_FCC][1] = 76,
[0][1][0][0][RTW89_ETSI][1] = 44,
[0][1][0][0][RTW89_MKK][1] = 56,
@@ -43728,6 +43739,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][1] = 76,
[0][1][0][0][RTW89_CN][1] = 44,
[0][1][0][0][RTW89_QATAR][1] = 44,
+ [0][1][0][0][RTW89_UK][1] = 44,
[0][1][0][0][RTW89_FCC][2] = 76,
[0][1][0][0][RTW89_ETSI][2] = 44,
[0][1][0][0][RTW89_MKK][2] = 56,
@@ -43739,6 +43751,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][2] = 76,
[0][1][0][0][RTW89_CN][2] = 44,
[0][1][0][0][RTW89_QATAR][2] = 44,
+ [0][1][0][0][RTW89_UK][2] = 44,
[0][1][0][0][RTW89_FCC][3] = 76,
[0][1][0][0][RTW89_ETSI][3] = 44,
[0][1][0][0][RTW89_MKK][3] = 56,
@@ -43750,6 +43763,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][3] = 76,
[0][1][0][0][RTW89_CN][3] = 44,
[0][1][0][0][RTW89_QATAR][3] = 44,
+ [0][1][0][0][RTW89_UK][3] = 44,
[0][1][0][0][RTW89_FCC][4] = 76,
[0][1][0][0][RTW89_ETSI][4] = 44,
[0][1][0][0][RTW89_MKK][4] = 56,
@@ -43761,6 +43775,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][4] = 76,
[0][1][0][0][RTW89_CN][4] = 44,
[0][1][0][0][RTW89_QATAR][4] = 44,
+ [0][1][0][0][RTW89_UK][4] = 44,
[0][1][0][0][RTW89_FCC][5] = 76,
[0][1][0][0][RTW89_ETSI][5] = 44,
[0][1][0][0][RTW89_MKK][5] = 56,
@@ -43772,6 +43787,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][5] = 76,
[0][1][0][0][RTW89_CN][5] = 44,
[0][1][0][0][RTW89_QATAR][5] = 44,
+ [0][1][0][0][RTW89_UK][5] = 44,
[0][1][0][0][RTW89_FCC][6] = 76,
[0][1][0][0][RTW89_ETSI][6] = 44,
[0][1][0][0][RTW89_MKK][6] = 56,
@@ -43783,6 +43799,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][6] = 76,
[0][1][0][0][RTW89_CN][6] = 44,
[0][1][0][0][RTW89_QATAR][6] = 44,
+ [0][1][0][0][RTW89_UK][6] = 44,
[0][1][0][0][RTW89_FCC][7] = 76,
[0][1][0][0][RTW89_ETSI][7] = 44,
[0][1][0][0][RTW89_MKK][7] = 56,
@@ -43794,6 +43811,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][7] = 76,
[0][1][0][0][RTW89_CN][7] = 44,
[0][1][0][0][RTW89_QATAR][7] = 44,
+ [0][1][0][0][RTW89_UK][7] = 44,
[0][1][0][0][RTW89_FCC][8] = 76,
[0][1][0][0][RTW89_ETSI][8] = 44,
[0][1][0][0][RTW89_MKK][8] = 56,
@@ -43805,6 +43823,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][8] = 76,
[0][1][0][0][RTW89_CN][8] = 44,
[0][1][0][0][RTW89_QATAR][8] = 44,
+ [0][1][0][0][RTW89_UK][8] = 44,
[0][1][0][0][RTW89_FCC][9] = 76,
[0][1][0][0][RTW89_ETSI][9] = 44,
[0][1][0][0][RTW89_MKK][9] = 56,
@@ -43816,6 +43835,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][9] = 76,
[0][1][0][0][RTW89_CN][9] = 44,
[0][1][0][0][RTW89_QATAR][9] = 44,
+ [0][1][0][0][RTW89_UK][9] = 44,
[0][1][0][0][RTW89_FCC][10] = 62,
[0][1][0][0][RTW89_ETSI][10] = 44,
[0][1][0][0][RTW89_MKK][10] = 56,
@@ -43827,6 +43847,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][10] = 62,
[0][1][0][0][RTW89_CN][10] = 44,
[0][1][0][0][RTW89_QATAR][10] = 44,
+ [0][1][0][0][RTW89_UK][10] = 44,
[0][1][0][0][RTW89_FCC][11] = 52,
[0][1][0][0][RTW89_ETSI][11] = 44,
[0][1][0][0][RTW89_MKK][11] = 56,
@@ -43838,6 +43859,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][11] = 52,
[0][1][0][0][RTW89_CN][11] = 44,
[0][1][0][0][RTW89_QATAR][11] = 44,
+ [0][1][0][0][RTW89_UK][11] = 44,
[0][1][0][0][RTW89_FCC][12] = 38,
[0][1][0][0][RTW89_ETSI][12] = 44,
[0][1][0][0][RTW89_MKK][12] = 56,
@@ -43849,6 +43871,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][12] = 38,
[0][1][0][0][RTW89_CN][12] = 44,
[0][1][0][0][RTW89_QATAR][12] = 44,
+ [0][1][0][0][RTW89_UK][12] = 44,
[0][1][0][0][RTW89_FCC][13] = 127,
[0][1][0][0][RTW89_ETSI][13] = 127,
[0][1][0][0][RTW89_MKK][13] = 64,
@@ -43860,6 +43883,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_MEXICO][13] = 127,
[0][1][0][0][RTW89_CN][13] = 127,
[0][1][0][0][RTW89_QATAR][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
[1][0][0][0][RTW89_FCC][0] = 127,
[1][0][0][0][RTW89_ETSI][0] = 127,
[1][0][0][0][RTW89_MKK][0] = 127,
@@ -43871,6 +43895,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][0] = 127,
[1][0][0][0][RTW89_CN][0] = 127,
[1][0][0][0][RTW89_QATAR][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
[1][0][0][0][RTW89_FCC][1] = 127,
[1][0][0][0][RTW89_ETSI][1] = 127,
[1][0][0][0][RTW89_MKK][1] = 127,
@@ -43882,6 +43907,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][1] = 127,
[1][0][0][0][RTW89_CN][1] = 127,
[1][0][0][0][RTW89_QATAR][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
[1][0][0][0][RTW89_FCC][2] = 60,
[1][0][0][0][RTW89_ETSI][2] = 58,
[1][0][0][0][RTW89_MKK][2] = 68,
@@ -43893,6 +43919,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][2] = 60,
[1][0][0][0][RTW89_CN][2] = 58,
[1][0][0][0][RTW89_QATAR][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 58,
[1][0][0][0][RTW89_FCC][3] = 60,
[1][0][0][0][RTW89_ETSI][3] = 58,
[1][0][0][0][RTW89_MKK][3] = 68,
@@ -43904,6 +43931,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][3] = 60,
[1][0][0][0][RTW89_CN][3] = 58,
[1][0][0][0][RTW89_QATAR][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 58,
[1][0][0][0][RTW89_FCC][4] = 60,
[1][0][0][0][RTW89_ETSI][4] = 58,
[1][0][0][0][RTW89_MKK][4] = 68,
@@ -43915,6 +43943,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][4] = 60,
[1][0][0][0][RTW89_CN][4] = 58,
[1][0][0][0][RTW89_QATAR][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 58,
[1][0][0][0][RTW89_FCC][5] = 60,
[1][0][0][0][RTW89_ETSI][5] = 58,
[1][0][0][0][RTW89_MKK][5] = 68,
@@ -43926,6 +43955,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][5] = 60,
[1][0][0][0][RTW89_CN][5] = 58,
[1][0][0][0][RTW89_QATAR][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 58,
[1][0][0][0][RTW89_FCC][6] = 46,
[1][0][0][0][RTW89_ETSI][6] = 58,
[1][0][0][0][RTW89_MKK][6] = 68,
@@ -43937,6 +43967,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][6] = 46,
[1][0][0][0][RTW89_CN][6] = 58,
[1][0][0][0][RTW89_QATAR][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 58,
[1][0][0][0][RTW89_FCC][7] = 46,
[1][0][0][0][RTW89_ETSI][7] = 58,
[1][0][0][0][RTW89_MKK][7] = 68,
@@ -43948,6 +43979,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][7] = 46,
[1][0][0][0][RTW89_CN][7] = 58,
[1][0][0][0][RTW89_QATAR][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 58,
[1][0][0][0][RTW89_FCC][8] = 46,
[1][0][0][0][RTW89_ETSI][8] = 58,
[1][0][0][0][RTW89_MKK][8] = 68,
@@ -43959,6 +43991,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][8] = 46,
[1][0][0][0][RTW89_CN][8] = 58,
[1][0][0][0][RTW89_QATAR][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 58,
[1][0][0][0][RTW89_FCC][9] = 32,
[1][0][0][0][RTW89_ETSI][9] = 58,
[1][0][0][0][RTW89_MKK][9] = 68,
@@ -43970,6 +44003,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][9] = 32,
[1][0][0][0][RTW89_CN][9] = 58,
[1][0][0][0][RTW89_QATAR][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 58,
[1][0][0][0][RTW89_FCC][10] = 32,
[1][0][0][0][RTW89_ETSI][10] = 58,
[1][0][0][0][RTW89_MKK][10] = 68,
@@ -43981,6 +44015,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][10] = 32,
[1][0][0][0][RTW89_CN][10] = 58,
[1][0][0][0][RTW89_QATAR][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 58,
[1][0][0][0][RTW89_FCC][11] = 127,
[1][0][0][0][RTW89_ETSI][11] = 127,
[1][0][0][0][RTW89_MKK][11] = 127,
@@ -43992,6 +44027,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][11] = 127,
[1][0][0][0][RTW89_CN][11] = 127,
[1][0][0][0][RTW89_QATAR][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
[1][0][0][0][RTW89_FCC][12] = 127,
[1][0][0][0][RTW89_ETSI][12] = 127,
[1][0][0][0][RTW89_MKK][12] = 127,
@@ -44003,6 +44039,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][12] = 127,
[1][0][0][0][RTW89_CN][12] = 127,
[1][0][0][0][RTW89_QATAR][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
[1][0][0][0][RTW89_FCC][13] = 127,
[1][0][0][0][RTW89_ETSI][13] = 127,
[1][0][0][0][RTW89_MKK][13] = 127,
@@ -44014,6 +44051,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MEXICO][13] = 127,
[1][0][0][0][RTW89_CN][13] = 127,
[1][0][0][0][RTW89_QATAR][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
[1][1][0][0][RTW89_FCC][0] = 127,
[1][1][0][0][RTW89_ETSI][0] = 127,
[1][1][0][0][RTW89_MKK][0] = 127,
@@ -44025,6 +44063,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][0] = 127,
[1][1][0][0][RTW89_CN][0] = 127,
[1][1][0][0][RTW89_QATAR][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
[1][1][0][0][RTW89_FCC][1] = 127,
[1][1][0][0][RTW89_ETSI][1] = 127,
[1][1][0][0][RTW89_MKK][1] = 127,
@@ -44036,6 +44075,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][1] = 127,
[1][1][0][0][RTW89_CN][1] = 127,
[1][1][0][0][RTW89_QATAR][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
[1][1][0][0][RTW89_FCC][2] = 48,
[1][1][0][0][RTW89_ETSI][2] = 46,
[1][1][0][0][RTW89_MKK][2] = 56,
@@ -44047,6 +44087,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][2] = 48,
[1][1][0][0][RTW89_CN][2] = 46,
[1][1][0][0][RTW89_QATAR][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 46,
[1][1][0][0][RTW89_FCC][3] = 48,
[1][1][0][0][RTW89_ETSI][3] = 46,
[1][1][0][0][RTW89_MKK][3] = 56,
@@ -44058,6 +44099,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][3] = 48,
[1][1][0][0][RTW89_CN][3] = 46,
[1][1][0][0][RTW89_QATAR][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 46,
[1][1][0][0][RTW89_FCC][4] = 48,
[1][1][0][0][RTW89_ETSI][4] = 46,
[1][1][0][0][RTW89_MKK][4] = 56,
@@ -44069,6 +44111,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][4] = 48,
[1][1][0][0][RTW89_CN][4] = 46,
[1][1][0][0][RTW89_QATAR][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 46,
[1][1][0][0][RTW89_FCC][5] = 58,
[1][1][0][0][RTW89_ETSI][5] = 46,
[1][1][0][0][RTW89_MKK][5] = 56,
@@ -44080,6 +44123,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][5] = 58,
[1][1][0][0][RTW89_CN][5] = 46,
[1][1][0][0][RTW89_QATAR][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 46,
[1][1][0][0][RTW89_FCC][6] = 46,
[1][1][0][0][RTW89_ETSI][6] = 46,
[1][1][0][0][RTW89_MKK][6] = 56,
@@ -44091,6 +44135,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][6] = 46,
[1][1][0][0][RTW89_CN][6] = 46,
[1][1][0][0][RTW89_QATAR][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 46,
[1][1][0][0][RTW89_FCC][7] = 46,
[1][1][0][0][RTW89_ETSI][7] = 46,
[1][1][0][0][RTW89_MKK][7] = 56,
@@ -44102,6 +44147,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][7] = 46,
[1][1][0][0][RTW89_CN][7] = 46,
[1][1][0][0][RTW89_QATAR][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 46,
[1][1][0][0][RTW89_FCC][8] = 46,
[1][1][0][0][RTW89_ETSI][8] = 46,
[1][1][0][0][RTW89_MKK][8] = 56,
@@ -44113,6 +44159,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][8] = 46,
[1][1][0][0][RTW89_CN][8] = 46,
[1][1][0][0][RTW89_QATAR][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 46,
[1][1][0][0][RTW89_FCC][9] = 24,
[1][1][0][0][RTW89_ETSI][9] = 46,
[1][1][0][0][RTW89_MKK][9] = 56,
@@ -44124,6 +44171,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][9] = 24,
[1][1][0][0][RTW89_CN][9] = 46,
[1][1][0][0][RTW89_QATAR][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 46,
[1][1][0][0][RTW89_FCC][10] = 24,
[1][1][0][0][RTW89_ETSI][10] = 46,
[1][1][0][0][RTW89_MKK][10] = 56,
@@ -44135,6 +44183,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][10] = 24,
[1][1][0][0][RTW89_CN][10] = 46,
[1][1][0][0][RTW89_QATAR][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 46,
[1][1][0][0][RTW89_FCC][11] = 127,
[1][1][0][0][RTW89_ETSI][11] = 127,
[1][1][0][0][RTW89_MKK][11] = 127,
@@ -44146,6 +44195,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][11] = 127,
[1][1][0][0][RTW89_CN][11] = 127,
[1][1][0][0][RTW89_QATAR][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
[1][1][0][0][RTW89_FCC][12] = 127,
[1][1][0][0][RTW89_ETSI][12] = 127,
[1][1][0][0][RTW89_MKK][12] = 127,
@@ -44157,6 +44207,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][12] = 127,
[1][1][0][0][RTW89_CN][12] = 127,
[1][1][0][0][RTW89_QATAR][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
[1][1][0][0][RTW89_FCC][13] = 127,
[1][1][0][0][RTW89_ETSI][13] = 127,
[1][1][0][0][RTW89_MKK][13] = 127,
@@ -44168,6 +44219,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MEXICO][13] = 127,
[1][1][0][0][RTW89_CN][13] = 127,
[1][1][0][0][RTW89_QATAR][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
[0][0][1][0][RTW89_FCC][0] = 66,
[0][0][1][0][RTW89_ETSI][0] = 58,
[0][0][1][0][RTW89_MKK][0] = 76,
@@ -44179,6 +44231,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][0] = 66,
[0][0][1][0][RTW89_CN][0] = 58,
[0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
[0][0][1][0][RTW89_FCC][1] = 66,
[0][0][1][0][RTW89_ETSI][1] = 58,
[0][0][1][0][RTW89_MKK][1] = 76,
@@ -44190,6 +44243,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][1] = 66,
[0][0][1][0][RTW89_CN][1] = 58,
[0][0][1][0][RTW89_QATAR][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 58,
[0][0][1][0][RTW89_FCC][2] = 70,
[0][0][1][0][RTW89_ETSI][2] = 58,
[0][0][1][0][RTW89_MKK][2] = 76,
@@ -44201,6 +44255,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][2] = 70,
[0][0][1][0][RTW89_CN][2] = 58,
[0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
[0][0][1][0][RTW89_FCC][3] = 74,
[0][0][1][0][RTW89_ETSI][3] = 58,
[0][0][1][0][RTW89_MKK][3] = 76,
@@ -44212,6 +44267,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][3] = 74,
[0][0][1][0][RTW89_CN][3] = 58,
[0][0][1][0][RTW89_QATAR][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 58,
[0][0][1][0][RTW89_FCC][4] = 78,
[0][0][1][0][RTW89_ETSI][4] = 58,
[0][0][1][0][RTW89_MKK][4] = 76,
@@ -44223,6 +44279,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][4] = 78,
[0][0][1][0][RTW89_CN][4] = 58,
[0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
[0][0][1][0][RTW89_FCC][5] = 78,
[0][0][1][0][RTW89_ETSI][5] = 58,
[0][0][1][0][RTW89_MKK][5] = 76,
@@ -44234,6 +44291,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][5] = 78,
[0][0][1][0][RTW89_CN][5] = 58,
[0][0][1][0][RTW89_QATAR][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 58,
[0][0][1][0][RTW89_FCC][6] = 78,
[0][0][1][0][RTW89_ETSI][6] = 58,
[0][0][1][0][RTW89_MKK][6] = 76,
@@ -44245,6 +44303,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][6] = 78,
[0][0][1][0][RTW89_CN][6] = 58,
[0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
[0][0][1][0][RTW89_FCC][7] = 74,
[0][0][1][0][RTW89_ETSI][7] = 58,
[0][0][1][0][RTW89_MKK][7] = 76,
@@ -44256,6 +44315,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][7] = 74,
[0][0][1][0][RTW89_CN][7] = 58,
[0][0][1][0][RTW89_QATAR][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 58,
[0][0][1][0][RTW89_FCC][8] = 70,
[0][0][1][0][RTW89_ETSI][8] = 58,
[0][0][1][0][RTW89_MKK][8] = 76,
@@ -44267,6 +44327,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][8] = 70,
[0][0][1][0][RTW89_CN][8] = 58,
[0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
[0][0][1][0][RTW89_FCC][9] = 66,
[0][0][1][0][RTW89_ETSI][9] = 58,
[0][0][1][0][RTW89_MKK][9] = 76,
@@ -44278,6 +44339,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][9] = 66,
[0][0][1][0][RTW89_CN][9] = 58,
[0][0][1][0][RTW89_QATAR][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 58,
[0][0][1][0][RTW89_FCC][10] = 66,
[0][0][1][0][RTW89_ETSI][10] = 58,
[0][0][1][0][RTW89_MKK][10] = 76,
@@ -44289,6 +44351,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][10] = 66,
[0][0][1][0][RTW89_CN][10] = 58,
[0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
[0][0][1][0][RTW89_FCC][11] = 56,
[0][0][1][0][RTW89_ETSI][11] = 58,
[0][0][1][0][RTW89_MKK][11] = 76,
@@ -44300,6 +44363,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][11] = 56,
[0][0][1][0][RTW89_CN][11] = 58,
[0][0][1][0][RTW89_QATAR][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 58,
[0][0][1][0][RTW89_FCC][12] = 52,
[0][0][1][0][RTW89_ETSI][12] = 58,
[0][0][1][0][RTW89_MKK][12] = 76,
@@ -44311,6 +44375,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][12] = 52,
[0][0][1][0][RTW89_CN][12] = 58,
[0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
[0][0][1][0][RTW89_FCC][13] = 127,
[0][0][1][0][RTW89_ETSI][13] = 127,
[0][0][1][0][RTW89_MKK][13] = 127,
@@ -44322,6 +44387,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][13] = 127,
[0][0][1][0][RTW89_CN][13] = 127,
[0][0][1][0][RTW89_QATAR][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
[0][1][1][0][RTW89_FCC][0] = 62,
[0][1][1][0][RTW89_ETSI][0] = 46,
[0][1][1][0][RTW89_MKK][0] = 64,
@@ -44333,6 +44399,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 62,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][1] = 62,
[0][1][1][0][RTW89_ETSI][1] = 46,
[0][1][1][0][RTW89_MKK][1] = 64,
@@ -44344,6 +44411,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][1] = 62,
[0][1][1][0][RTW89_CN][1] = 46,
[0][1][1][0][RTW89_QATAR][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 46,
[0][1][1][0][RTW89_FCC][2] = 66,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 64,
@@ -44355,6 +44423,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][2] = 66,
[0][1][1][0][RTW89_CN][2] = 46,
[0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
[0][1][1][0][RTW89_FCC][3] = 70,
[0][1][1][0][RTW89_ETSI][3] = 46,
[0][1][1][0][RTW89_MKK][3] = 64,
@@ -44366,6 +44435,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][3] = 70,
[0][1][1][0][RTW89_CN][3] = 46,
[0][1][1][0][RTW89_QATAR][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 46,
[0][1][1][0][RTW89_FCC][4] = 78,
[0][1][1][0][RTW89_ETSI][4] = 46,
[0][1][1][0][RTW89_MKK][4] = 64,
@@ -44377,6 +44447,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][4] = 78,
[0][1][1][0][RTW89_CN][4] = 46,
[0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
[0][1][1][0][RTW89_FCC][5] = 78,
[0][1][1][0][RTW89_ETSI][5] = 46,
[0][1][1][0][RTW89_MKK][5] = 64,
@@ -44388,6 +44459,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][5] = 78,
[0][1][1][0][RTW89_CN][5] = 46,
[0][1][1][0][RTW89_QATAR][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 46,
[0][1][1][0][RTW89_FCC][6] = 78,
[0][1][1][0][RTW89_ETSI][6] = 46,
[0][1][1][0][RTW89_MKK][6] = 64,
@@ -44399,6 +44471,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][6] = 78,
[0][1][1][0][RTW89_CN][6] = 46,
[0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
[0][1][1][0][RTW89_FCC][7] = 70,
[0][1][1][0][RTW89_ETSI][7] = 46,
[0][1][1][0][RTW89_MKK][7] = 64,
@@ -44410,6 +44483,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][7] = 70,
[0][1][1][0][RTW89_CN][7] = 46,
[0][1][1][0][RTW89_QATAR][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 46,
[0][1][1][0][RTW89_FCC][8] = 66,
[0][1][1][0][RTW89_ETSI][8] = 46,
[0][1][1][0][RTW89_MKK][8] = 64,
@@ -44421,6 +44495,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][8] = 66,
[0][1][1][0][RTW89_CN][8] = 46,
[0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
[0][1][1][0][RTW89_FCC][9] = 62,
[0][1][1][0][RTW89_ETSI][9] = 46,
[0][1][1][0][RTW89_MKK][9] = 64,
@@ -44432,6 +44507,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][9] = 62,
[0][1][1][0][RTW89_CN][9] = 46,
[0][1][1][0][RTW89_QATAR][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 46,
[0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 46,
[0][1][1][0][RTW89_MKK][10] = 64,
@@ -44443,6 +44519,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][10] = 62,
[0][1][1][0][RTW89_CN][10] = 46,
[0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
[0][1][1][0][RTW89_FCC][11] = 42,
[0][1][1][0][RTW89_ETSI][11] = 46,
[0][1][1][0][RTW89_MKK][11] = 64,
@@ -44454,6 +44531,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][11] = 42,
[0][1][1][0][RTW89_CN][11] = 46,
[0][1][1][0][RTW89_QATAR][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 46,
[0][1][1][0][RTW89_FCC][12] = 40,
[0][1][1][0][RTW89_ETSI][12] = 46,
[0][1][1][0][RTW89_MKK][12] = 64,
@@ -44465,6 +44543,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][12] = 40,
[0][1][1][0][RTW89_CN][12] = 46,
[0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
[0][1][1][0][RTW89_FCC][13] = 127,
[0][1][1][0][RTW89_ETSI][13] = 127,
[0][1][1][0][RTW89_MKK][13] = 127,
@@ -44476,6 +44555,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][13] = 127,
[0][1][1][0][RTW89_CN][13] = 127,
[0][1][1][0][RTW89_QATAR][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
[0][0][2][0][RTW89_FCC][0] = 66,
[0][0][2][0][RTW89_ETSI][0] = 58,
[0][0][2][0][RTW89_MKK][0] = 76,
@@ -44487,6 +44567,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][0] = 66,
[0][0][2][0][RTW89_CN][0] = 58,
[0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 58,
[0][0][2][0][RTW89_FCC][1] = 66,
[0][0][2][0][RTW89_ETSI][1] = 58,
[0][0][2][0][RTW89_MKK][1] = 76,
@@ -44498,6 +44579,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][1] = 66,
[0][0][2][0][RTW89_CN][1] = 58,
[0][0][2][0][RTW89_QATAR][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 58,
[0][0][2][0][RTW89_FCC][2] = 70,
[0][0][2][0][RTW89_ETSI][2] = 58,
[0][0][2][0][RTW89_MKK][2] = 76,
@@ -44509,6 +44591,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][2] = 70,
[0][0][2][0][RTW89_CN][2] = 58,
[0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 58,
[0][0][2][0][RTW89_FCC][3] = 74,
[0][0][2][0][RTW89_ETSI][3] = 58,
[0][0][2][0][RTW89_MKK][3] = 76,
@@ -44520,6 +44603,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][3] = 74,
[0][0][2][0][RTW89_CN][3] = 58,
[0][0][2][0][RTW89_QATAR][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 58,
[0][0][2][0][RTW89_FCC][4] = 76,
[0][0][2][0][RTW89_ETSI][4] = 58,
[0][0][2][0][RTW89_MKK][4] = 76,
@@ -44531,6 +44615,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][4] = 76,
[0][0][2][0][RTW89_CN][4] = 58,
[0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 58,
[0][0][2][0][RTW89_FCC][5] = 76,
[0][0][2][0][RTW89_ETSI][5] = 58,
[0][0][2][0][RTW89_MKK][5] = 76,
@@ -44542,6 +44627,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][5] = 76,
[0][0][2][0][RTW89_CN][5] = 58,
[0][0][2][0][RTW89_QATAR][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 58,
[0][0][2][0][RTW89_FCC][6] = 76,
[0][0][2][0][RTW89_ETSI][6] = 58,
[0][0][2][0][RTW89_MKK][6] = 76,
@@ -44553,6 +44639,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][6] = 76,
[0][0][2][0][RTW89_CN][6] = 58,
[0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 58,
[0][0][2][0][RTW89_FCC][7] = 74,
[0][0][2][0][RTW89_ETSI][7] = 58,
[0][0][2][0][RTW89_MKK][7] = 76,
@@ -44564,6 +44651,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][7] = 74,
[0][0][2][0][RTW89_CN][7] = 58,
[0][0][2][0][RTW89_QATAR][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 58,
[0][0][2][0][RTW89_FCC][8] = 70,
[0][0][2][0][RTW89_ETSI][8] = 58,
[0][0][2][0][RTW89_MKK][8] = 76,
@@ -44575,6 +44663,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][8] = 70,
[0][0][2][0][RTW89_CN][8] = 58,
[0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 58,
[0][0][2][0][RTW89_FCC][9] = 66,
[0][0][2][0][RTW89_ETSI][9] = 58,
[0][0][2][0][RTW89_MKK][9] = 76,
@@ -44586,6 +44675,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][9] = 66,
[0][0][2][0][RTW89_CN][9] = 58,
[0][0][2][0][RTW89_QATAR][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 58,
[0][0][2][0][RTW89_FCC][10] = 66,
[0][0][2][0][RTW89_ETSI][10] = 58,
[0][0][2][0][RTW89_MKK][10] = 76,
@@ -44597,6 +44687,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][10] = 66,
[0][0][2][0][RTW89_CN][10] = 58,
[0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 58,
[0][0][2][0][RTW89_FCC][11] = 54,
[0][0][2][0][RTW89_ETSI][11] = 58,
[0][0][2][0][RTW89_MKK][11] = 76,
@@ -44608,6 +44699,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][11] = 54,
[0][0][2][0][RTW89_CN][11] = 58,
[0][0][2][0][RTW89_QATAR][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 58,
[0][0][2][0][RTW89_FCC][12] = 50,
[0][0][2][0][RTW89_ETSI][12] = 58,
[0][0][2][0][RTW89_MKK][12] = 76,
@@ -44619,6 +44711,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][12] = 50,
[0][0][2][0][RTW89_CN][12] = 58,
[0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 58,
[0][0][2][0][RTW89_FCC][13] = 127,
[0][0][2][0][RTW89_ETSI][13] = 127,
[0][0][2][0][RTW89_MKK][13] = 127,
@@ -44630,6 +44723,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][13] = 127,
[0][0][2][0][RTW89_CN][13] = 127,
[0][0][2][0][RTW89_QATAR][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
[0][1][2][0][RTW89_FCC][0] = 62,
[0][1][2][0][RTW89_ETSI][0] = 46,
[0][1][2][0][RTW89_MKK][0] = 64,
@@ -44641,6 +44735,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][0] = 62,
[0][1][2][0][RTW89_CN][0] = 46,
[0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 46,
[0][1][2][0][RTW89_FCC][1] = 62,
[0][1][2][0][RTW89_ETSI][1] = 46,
[0][1][2][0][RTW89_MKK][1] = 64,
@@ -44652,6 +44747,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][1] = 62,
[0][1][2][0][RTW89_CN][1] = 46,
[0][1][2][0][RTW89_QATAR][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 46,
[0][1][2][0][RTW89_FCC][2] = 66,
[0][1][2][0][RTW89_ETSI][2] = 46,
[0][1][2][0][RTW89_MKK][2] = 64,
@@ -44663,6 +44759,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][2] = 66,
[0][1][2][0][RTW89_CN][2] = 46,
[0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 46,
[0][1][2][0][RTW89_FCC][3] = 70,
[0][1][2][0][RTW89_ETSI][3] = 46,
[0][1][2][0][RTW89_MKK][3] = 64,
@@ -44674,6 +44771,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][3] = 70,
[0][1][2][0][RTW89_CN][3] = 46,
[0][1][2][0][RTW89_QATAR][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 46,
[0][1][2][0][RTW89_FCC][4] = 76,
[0][1][2][0][RTW89_ETSI][4] = 46,
[0][1][2][0][RTW89_MKK][4] = 64,
@@ -44685,6 +44783,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][4] = 76,
[0][1][2][0][RTW89_CN][4] = 46,
[0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 46,
[0][1][2][0][RTW89_FCC][5] = 76,
[0][1][2][0][RTW89_ETSI][5] = 46,
[0][1][2][0][RTW89_MKK][5] = 64,
@@ -44696,6 +44795,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][5] = 76,
[0][1][2][0][RTW89_CN][5] = 46,
[0][1][2][0][RTW89_QATAR][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 46,
[0][1][2][0][RTW89_FCC][6] = 76,
[0][1][2][0][RTW89_ETSI][6] = 46,
[0][1][2][0][RTW89_MKK][6] = 64,
@@ -44707,6 +44807,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][6] = 76,
[0][1][2][0][RTW89_CN][6] = 46,
[0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 46,
[0][1][2][0][RTW89_FCC][7] = 68,
[0][1][2][0][RTW89_ETSI][7] = 46,
[0][1][2][0][RTW89_MKK][7] = 64,
@@ -44718,6 +44819,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][7] = 68,
[0][1][2][0][RTW89_CN][7] = 46,
[0][1][2][0][RTW89_QATAR][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 46,
[0][1][2][0][RTW89_FCC][8] = 64,
[0][1][2][0][RTW89_ETSI][8] = 46,
[0][1][2][0][RTW89_MKK][8] = 64,
@@ -44729,6 +44831,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][8] = 64,
[0][1][2][0][RTW89_CN][8] = 46,
[0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 46,
[0][1][2][0][RTW89_FCC][9] = 60,
[0][1][2][0][RTW89_ETSI][9] = 46,
[0][1][2][0][RTW89_MKK][9] = 64,
@@ -44740,6 +44843,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][9] = 60,
[0][1][2][0][RTW89_CN][9] = 46,
[0][1][2][0][RTW89_QATAR][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 46,
[0][1][2][0][RTW89_FCC][10] = 60,
[0][1][2][0][RTW89_ETSI][10] = 46,
[0][1][2][0][RTW89_MKK][10] = 64,
@@ -44751,6 +44855,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][10] = 60,
[0][1][2][0][RTW89_CN][10] = 46,
[0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 46,
[0][1][2][0][RTW89_FCC][11] = 42,
[0][1][2][0][RTW89_ETSI][11] = 46,
[0][1][2][0][RTW89_MKK][11] = 64,
@@ -44762,6 +44867,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][11] = 42,
[0][1][2][0][RTW89_CN][11] = 46,
[0][1][2][0][RTW89_QATAR][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 46,
[0][1][2][0][RTW89_FCC][12] = 40,
[0][1][2][0][RTW89_ETSI][12] = 46,
[0][1][2][0][RTW89_MKK][12] = 64,
@@ -44773,6 +44879,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][12] = 40,
[0][1][2][0][RTW89_CN][12] = 46,
[0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 46,
[0][1][2][0][RTW89_FCC][13] = 127,
[0][1][2][0][RTW89_ETSI][13] = 127,
[0][1][2][0][RTW89_MKK][13] = 127,
@@ -44784,6 +44891,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][13] = 127,
[0][1][2][0][RTW89_CN][13] = 127,
[0][1][2][0][RTW89_QATAR][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
[0][1][2][1][RTW89_FCC][0] = 62,
[0][1][2][1][RTW89_ETSI][0] = 34,
[0][1][2][1][RTW89_MKK][0] = 64,
@@ -44795,6 +44903,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][0] = 62,
[0][1][2][1][RTW89_CN][0] = 34,
[0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_UK][0] = 34,
[0][1][2][1][RTW89_FCC][1] = 62,
[0][1][2][1][RTW89_ETSI][1] = 34,
[0][1][2][1][RTW89_MKK][1] = 64,
@@ -44806,6 +44915,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][1] = 62,
[0][1][2][1][RTW89_CN][1] = 34,
[0][1][2][1][RTW89_QATAR][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 34,
[0][1][2][1][RTW89_FCC][2] = 66,
[0][1][2][1][RTW89_ETSI][2] = 34,
[0][1][2][1][RTW89_MKK][2] = 64,
@@ -44817,6 +44927,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][2] = 66,
[0][1][2][1][RTW89_CN][2] = 34,
[0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 34,
[0][1][2][1][RTW89_FCC][3] = 70,
[0][1][2][1][RTW89_ETSI][3] = 34,
[0][1][2][1][RTW89_MKK][3] = 64,
@@ -44828,6 +44939,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][3] = 70,
[0][1][2][1][RTW89_CN][3] = 34,
[0][1][2][1][RTW89_QATAR][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 34,
[0][1][2][1][RTW89_FCC][4] = 76,
[0][1][2][1][RTW89_ETSI][4] = 34,
[0][1][2][1][RTW89_MKK][4] = 64,
@@ -44839,6 +44951,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][4] = 76,
[0][1][2][1][RTW89_CN][4] = 34,
[0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 34,
[0][1][2][1][RTW89_FCC][5] = 76,
[0][1][2][1][RTW89_ETSI][5] = 34,
[0][1][2][1][RTW89_MKK][5] = 64,
@@ -44850,6 +44963,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][5] = 76,
[0][1][2][1][RTW89_CN][5] = 34,
[0][1][2][1][RTW89_QATAR][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 34,
[0][1][2][1][RTW89_FCC][6] = 76,
[0][1][2][1][RTW89_ETSI][6] = 34,
[0][1][2][1][RTW89_MKK][6] = 64,
@@ -44861,6 +44975,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][6] = 76,
[0][1][2][1][RTW89_CN][6] = 34,
[0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 34,
[0][1][2][1][RTW89_FCC][7] = 68,
[0][1][2][1][RTW89_ETSI][7] = 34,
[0][1][2][1][RTW89_MKK][7] = 64,
@@ -44872,6 +44987,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][7] = 68,
[0][1][2][1][RTW89_CN][7] = 34,
[0][1][2][1][RTW89_QATAR][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 34,
[0][1][2][1][RTW89_FCC][8] = 64,
[0][1][2][1][RTW89_ETSI][8] = 34,
[0][1][2][1][RTW89_MKK][8] = 64,
@@ -44883,6 +44999,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][8] = 64,
[0][1][2][1][RTW89_CN][8] = 34,
[0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 34,
[0][1][2][1][RTW89_FCC][9] = 60,
[0][1][2][1][RTW89_ETSI][9] = 34,
[0][1][2][1][RTW89_MKK][9] = 64,
@@ -44894,6 +45011,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][9] = 60,
[0][1][2][1][RTW89_CN][9] = 34,
[0][1][2][1][RTW89_QATAR][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 34,
[0][1][2][1][RTW89_FCC][10] = 60,
[0][1][2][1][RTW89_ETSI][10] = 34,
[0][1][2][1][RTW89_MKK][10] = 64,
@@ -44905,6 +45023,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][10] = 60,
[0][1][2][1][RTW89_CN][10] = 34,
[0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 34,
[0][1][2][1][RTW89_FCC][11] = 42,
[0][1][2][1][RTW89_ETSI][11] = 34,
[0][1][2][1][RTW89_MKK][11] = 64,
@@ -44916,6 +45035,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][11] = 42,
[0][1][2][1][RTW89_CN][11] = 34,
[0][1][2][1][RTW89_QATAR][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 34,
[0][1][2][1][RTW89_FCC][12] = 40,
[0][1][2][1][RTW89_ETSI][12] = 34,
[0][1][2][1][RTW89_MKK][12] = 64,
@@ -44927,6 +45047,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][12] = 40,
[0][1][2][1][RTW89_CN][12] = 34,
[0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 34,
[0][1][2][1][RTW89_FCC][13] = 127,
[0][1][2][1][RTW89_ETSI][13] = 127,
[0][1][2][1][RTW89_MKK][13] = 127,
@@ -44938,6 +45059,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][13] = 127,
[0][1][2][1][RTW89_CN][13] = 127,
[0][1][2][1][RTW89_QATAR][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
[1][0][2][0][RTW89_FCC][0] = 127,
[1][0][2][0][RTW89_ETSI][0] = 127,
[1][0][2][0][RTW89_MKK][0] = 127,
@@ -44949,6 +45071,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][0] = 127,
[1][0][2][0][RTW89_CN][0] = 127,
[1][0][2][0][RTW89_QATAR][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
[1][0][2][0][RTW89_FCC][1] = 127,
[1][0][2][0][RTW89_ETSI][1] = 127,
[1][0][2][0][RTW89_MKK][1] = 127,
@@ -44960,6 +45083,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][1] = 127,
[1][0][2][0][RTW89_CN][1] = 127,
[1][0][2][0][RTW89_QATAR][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
[1][0][2][0][RTW89_FCC][2] = 56,
[1][0][2][0][RTW89_ETSI][2] = 58,
[1][0][2][0][RTW89_MKK][2] = 68,
@@ -44971,6 +45095,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][2] = 56,
[1][0][2][0][RTW89_CN][2] = 58,
[1][0][2][0][RTW89_QATAR][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 58,
[1][0][2][0][RTW89_FCC][3] = 56,
[1][0][2][0][RTW89_ETSI][3] = 58,
[1][0][2][0][RTW89_MKK][3] = 68,
@@ -44982,6 +45107,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][3] = 56,
[1][0][2][0][RTW89_CN][3] = 58,
[1][0][2][0][RTW89_QATAR][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 58,
[1][0][2][0][RTW89_FCC][4] = 60,
[1][0][2][0][RTW89_ETSI][4] = 58,
[1][0][2][0][RTW89_MKK][4] = 68,
@@ -44993,6 +45119,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][4] = 60,
[1][0][2][0][RTW89_CN][4] = 58,
[1][0][2][0][RTW89_QATAR][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 58,
[1][0][2][0][RTW89_FCC][5] = 64,
[1][0][2][0][RTW89_ETSI][5] = 58,
[1][0][2][0][RTW89_MKK][5] = 68,
@@ -45004,6 +45131,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][5] = 64,
[1][0][2][0][RTW89_CN][5] = 58,
[1][0][2][0][RTW89_QATAR][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 58,
[1][0][2][0][RTW89_FCC][6] = 54,
[1][0][2][0][RTW89_ETSI][6] = 58,
[1][0][2][0][RTW89_MKK][6] = 68,
@@ -45015,6 +45143,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][6] = 54,
[1][0][2][0][RTW89_CN][6] = 58,
[1][0][2][0][RTW89_QATAR][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 58,
[1][0][2][0][RTW89_FCC][7] = 50,
[1][0][2][0][RTW89_ETSI][7] = 58,
[1][0][2][0][RTW89_MKK][7] = 68,
@@ -45026,6 +45155,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][7] = 50,
[1][0][2][0][RTW89_CN][7] = 58,
[1][0][2][0][RTW89_QATAR][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 58,
[1][0][2][0][RTW89_FCC][8] = 50,
[1][0][2][0][RTW89_ETSI][8] = 58,
[1][0][2][0][RTW89_MKK][8] = 68,
@@ -45037,6 +45167,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][8] = 50,
[1][0][2][0][RTW89_CN][8] = 58,
[1][0][2][0][RTW89_QATAR][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 58,
[1][0][2][0][RTW89_FCC][9] = 42,
[1][0][2][0][RTW89_ETSI][9] = 58,
[1][0][2][0][RTW89_MKK][9] = 68,
@@ -45048,6 +45179,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][9] = 42,
[1][0][2][0][RTW89_CN][9] = 58,
[1][0][2][0][RTW89_QATAR][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 58,
[1][0][2][0][RTW89_FCC][10] = 40,
[1][0][2][0][RTW89_ETSI][10] = 58,
[1][0][2][0][RTW89_MKK][10] = 68,
@@ -45059,6 +45191,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][10] = 40,
[1][0][2][0][RTW89_CN][10] = 58,
[1][0][2][0][RTW89_QATAR][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 58,
[1][0][2][0][RTW89_FCC][11] = 127,
[1][0][2][0][RTW89_ETSI][11] = 127,
[1][0][2][0][RTW89_MKK][11] = 127,
@@ -45070,6 +45203,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][11] = 127,
[1][0][2][0][RTW89_CN][11] = 127,
[1][0][2][0][RTW89_QATAR][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
[1][0][2][0][RTW89_FCC][12] = 127,
[1][0][2][0][RTW89_ETSI][12] = 127,
[1][0][2][0][RTW89_MKK][12] = 127,
@@ -45081,6 +45215,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][12] = 127,
[1][0][2][0][RTW89_CN][12] = 127,
[1][0][2][0][RTW89_QATAR][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
[1][0][2][0][RTW89_FCC][13] = 127,
[1][0][2][0][RTW89_ETSI][13] = 127,
[1][0][2][0][RTW89_MKK][13] = 127,
@@ -45092,6 +45227,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][13] = 127,
[1][0][2][0][RTW89_CN][13] = 127,
[1][0][2][0][RTW89_QATAR][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
[1][1][2][0][RTW89_FCC][0] = 127,
[1][1][2][0][RTW89_ETSI][0] = 127,
[1][1][2][0][RTW89_MKK][0] = 127,
@@ -45103,6 +45239,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][0] = 127,
[1][1][2][0][RTW89_CN][0] = 127,
[1][1][2][0][RTW89_QATAR][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
[1][1][2][0][RTW89_FCC][1] = 127,
[1][1][2][0][RTW89_ETSI][1] = 127,
[1][1][2][0][RTW89_MKK][1] = 127,
@@ -45114,6 +45251,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][1] = 127,
[1][1][2][0][RTW89_CN][1] = 127,
[1][1][2][0][RTW89_QATAR][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
[1][1][2][0][RTW89_FCC][2] = 52,
[1][1][2][0][RTW89_ETSI][2] = 46,
[1][1][2][0][RTW89_MKK][2] = 64,
@@ -45125,6 +45263,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][2] = 52,
[1][1][2][0][RTW89_CN][2] = 46,
[1][1][2][0][RTW89_QATAR][2] = 46,
+ [1][1][2][0][RTW89_UK][2] = 46,
[1][1][2][0][RTW89_FCC][3] = 52,
[1][1][2][0][RTW89_ETSI][3] = 46,
[1][1][2][0][RTW89_MKK][3] = 64,
@@ -45136,6 +45275,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][3] = 52,
[1][1][2][0][RTW89_CN][3] = 46,
[1][1][2][0][RTW89_QATAR][3] = 46,
+ [1][1][2][0][RTW89_UK][3] = 46,
[1][1][2][0][RTW89_FCC][4] = 56,
[1][1][2][0][RTW89_ETSI][4] = 46,
[1][1][2][0][RTW89_MKK][4] = 64,
@@ -45147,6 +45287,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][4] = 56,
[1][1][2][0][RTW89_CN][4] = 46,
[1][1][2][0][RTW89_QATAR][4] = 46,
+ [1][1][2][0][RTW89_UK][4] = 46,
[1][1][2][0][RTW89_FCC][5] = 60,
[1][1][2][0][RTW89_ETSI][5] = 46,
[1][1][2][0][RTW89_MKK][5] = 64,
@@ -45158,6 +45299,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][5] = 60,
[1][1][2][0][RTW89_CN][5] = 46,
[1][1][2][0][RTW89_QATAR][5] = 46,
+ [1][1][2][0][RTW89_UK][5] = 46,
[1][1][2][0][RTW89_FCC][6] = 54,
[1][1][2][0][RTW89_ETSI][6] = 46,
[1][1][2][0][RTW89_MKK][6] = 64,
@@ -45169,6 +45311,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][6] = 54,
[1][1][2][0][RTW89_CN][6] = 46,
[1][1][2][0][RTW89_QATAR][6] = 46,
+ [1][1][2][0][RTW89_UK][6] = 46,
[1][1][2][0][RTW89_FCC][7] = 50,
[1][1][2][0][RTW89_ETSI][7] = 46,
[1][1][2][0][RTW89_MKK][7] = 64,
@@ -45180,6 +45323,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][7] = 50,
[1][1][2][0][RTW89_CN][7] = 46,
[1][1][2][0][RTW89_QATAR][7] = 46,
+ [1][1][2][0][RTW89_UK][7] = 46,
[1][1][2][0][RTW89_FCC][8] = 50,
[1][1][2][0][RTW89_ETSI][8] = 46,
[1][1][2][0][RTW89_MKK][8] = 64,
@@ -45191,6 +45335,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][8] = 50,
[1][1][2][0][RTW89_CN][8] = 46,
[1][1][2][0][RTW89_QATAR][8] = 46,
+ [1][1][2][0][RTW89_UK][8] = 46,
[1][1][2][0][RTW89_FCC][9] = 38,
[1][1][2][0][RTW89_ETSI][9] = 46,
[1][1][2][0][RTW89_MKK][9] = 64,
@@ -45202,6 +45347,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][9] = 38,
[1][1][2][0][RTW89_CN][9] = 46,
[1][1][2][0][RTW89_QATAR][9] = 46,
+ [1][1][2][0][RTW89_UK][9] = 46,
[1][1][2][0][RTW89_FCC][10] = 36,
[1][1][2][0][RTW89_ETSI][10] = 46,
[1][1][2][0][RTW89_MKK][10] = 64,
@@ -45213,6 +45359,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][10] = 36,
[1][1][2][0][RTW89_CN][10] = 46,
[1][1][2][0][RTW89_QATAR][10] = 46,
+ [1][1][2][0][RTW89_UK][10] = 46,
[1][1][2][0][RTW89_FCC][11] = 127,
[1][1][2][0][RTW89_ETSI][11] = 127,
[1][1][2][0][RTW89_MKK][11] = 127,
@@ -45224,6 +45371,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][11] = 127,
[1][1][2][0][RTW89_CN][11] = 127,
[1][1][2][0][RTW89_QATAR][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
[1][1][2][0][RTW89_FCC][12] = 127,
[1][1][2][0][RTW89_ETSI][12] = 127,
[1][1][2][0][RTW89_MKK][12] = 127,
@@ -45235,6 +45383,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][12] = 127,
[1][1][2][0][RTW89_CN][12] = 127,
[1][1][2][0][RTW89_QATAR][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
[1][1][2][0][RTW89_FCC][13] = 127,
[1][1][2][0][RTW89_ETSI][13] = 127,
[1][1][2][0][RTW89_MKK][13] = 127,
@@ -45246,6 +45395,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][13] = 127,
[1][1][2][0][RTW89_CN][13] = 127,
[1][1][2][0][RTW89_QATAR][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
[1][1][2][1][RTW89_FCC][0] = 127,
[1][1][2][1][RTW89_ETSI][0] = 127,
[1][1][2][1][RTW89_MKK][0] = 127,
@@ -45257,6 +45407,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][0] = 127,
[1][1][2][1][RTW89_CN][0] = 127,
[1][1][2][1][RTW89_QATAR][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
[1][1][2][1][RTW89_FCC][1] = 127,
[1][1][2][1][RTW89_ETSI][1] = 127,
[1][1][2][1][RTW89_MKK][1] = 127,
@@ -45268,6 +45419,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][1] = 127,
[1][1][2][1][RTW89_CN][1] = 127,
[1][1][2][1][RTW89_QATAR][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
[1][1][2][1][RTW89_FCC][2] = 52,
[1][1][2][1][RTW89_ETSI][2] = 34,
[1][1][2][1][RTW89_MKK][2] = 64,
@@ -45279,6 +45431,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][2] = 52,
[1][1][2][1][RTW89_CN][2] = 34,
[1][1][2][1][RTW89_QATAR][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 34,
[1][1][2][1][RTW89_FCC][3] = 52,
[1][1][2][1][RTW89_ETSI][3] = 34,
[1][1][2][1][RTW89_MKK][3] = 64,
@@ -45290,6 +45443,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][3] = 52,
[1][1][2][1][RTW89_CN][3] = 34,
[1][1][2][1][RTW89_QATAR][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 34,
[1][1][2][1][RTW89_FCC][4] = 56,
[1][1][2][1][RTW89_ETSI][4] = 34,
[1][1][2][1][RTW89_MKK][4] = 64,
@@ -45301,6 +45455,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][4] = 56,
[1][1][2][1][RTW89_CN][4] = 34,
[1][1][2][1][RTW89_QATAR][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 34,
[1][1][2][1][RTW89_FCC][5] = 60,
[1][1][2][1][RTW89_ETSI][5] = 34,
[1][1][2][1][RTW89_MKK][5] = 64,
@@ -45312,6 +45467,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][5] = 60,
[1][1][2][1][RTW89_CN][5] = 34,
[1][1][2][1][RTW89_QATAR][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 34,
[1][1][2][1][RTW89_FCC][6] = 54,
[1][1][2][1][RTW89_ETSI][6] = 34,
[1][1][2][1][RTW89_MKK][6] = 64,
@@ -45323,6 +45479,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][6] = 54,
[1][1][2][1][RTW89_CN][6] = 34,
[1][1][2][1][RTW89_QATAR][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 34,
[1][1][2][1][RTW89_FCC][7] = 50,
[1][1][2][1][RTW89_ETSI][7] = 34,
[1][1][2][1][RTW89_MKK][7] = 64,
@@ -45334,6 +45491,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][7] = 50,
[1][1][2][1][RTW89_CN][7] = 34,
[1][1][2][1][RTW89_QATAR][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 34,
[1][1][2][1][RTW89_FCC][8] = 50,
[1][1][2][1][RTW89_ETSI][8] = 34,
[1][1][2][1][RTW89_MKK][8] = 64,
@@ -45345,6 +45503,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][8] = 50,
[1][1][2][1][RTW89_CN][8] = 34,
[1][1][2][1][RTW89_QATAR][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 34,
[1][1][2][1][RTW89_FCC][9] = 38,
[1][1][2][1][RTW89_ETSI][9] = 34,
[1][1][2][1][RTW89_MKK][9] = 64,
@@ -45356,6 +45515,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][9] = 38,
[1][1][2][1][RTW89_CN][9] = 34,
[1][1][2][1][RTW89_QATAR][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 34,
[1][1][2][1][RTW89_FCC][10] = 36,
[1][1][2][1][RTW89_ETSI][10] = 34,
[1][1][2][1][RTW89_MKK][10] = 64,
@@ -45367,6 +45527,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][10] = 36,
[1][1][2][1][RTW89_CN][10] = 34,
[1][1][2][1][RTW89_QATAR][10] = 34,
+ [1][1][2][1][RTW89_UK][10] = 34,
[1][1][2][1][RTW89_FCC][11] = 127,
[1][1][2][1][RTW89_ETSI][11] = 127,
[1][1][2][1][RTW89_MKK][11] = 127,
@@ -45378,6 +45539,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][11] = 127,
[1][1][2][1][RTW89_CN][11] = 127,
[1][1][2][1][RTW89_QATAR][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
[1][1][2][1][RTW89_FCC][12] = 127,
[1][1][2][1][RTW89_ETSI][12] = 127,
[1][1][2][1][RTW89_MKK][12] = 127,
@@ -45389,6 +45551,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][12] = 127,
[1][1][2][1][RTW89_CN][12] = 127,
[1][1][2][1][RTW89_QATAR][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
[1][1][2][1][RTW89_FCC][13] = 127,
[1][1][2][1][RTW89_ETSI][13] = 127,
[1][1][2][1][RTW89_MKK][13] = 127,
@@ -45400,6 +45563,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][13] = 127,
[1][1][2][1][RTW89_CN][13] = 127,
[1][1][2][1][RTW89_QATAR][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
@@ -45595,6 +45759,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][0] = 62,
[0][0][1][0][RTW89_CN][0] = 58,
[0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 58,
[0][0][1][0][RTW89_FCC][2] = 76,
[0][0][1][0][RTW89_ETSI][2] = 58,
[0][0][1][0][RTW89_MKK][2] = 62,
@@ -45606,6 +45771,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][2] = 62,
[0][0][1][0][RTW89_CN][2] = 58,
[0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 58,
[0][0][1][0][RTW89_FCC][4] = 76,
[0][0][1][0][RTW89_ETSI][4] = 58,
[0][0][1][0][RTW89_MKK][4] = 62,
@@ -45617,6 +45783,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][4] = 62,
[0][0][1][0][RTW89_CN][4] = 58,
[0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 58,
[0][0][1][0][RTW89_FCC][6] = 76,
[0][0][1][0][RTW89_ETSI][6] = 58,
[0][0][1][0][RTW89_MKK][6] = 62,
@@ -45628,6 +45795,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][6] = 62,
[0][0][1][0][RTW89_CN][6] = 58,
[0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 58,
[0][0][1][0][RTW89_FCC][8] = 76,
[0][0][1][0][RTW89_ETSI][8] = 58,
[0][0][1][0][RTW89_MKK][8] = 62,
@@ -45639,6 +45807,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][8] = 76,
[0][0][1][0][RTW89_CN][8] = 58,
[0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 58,
[0][0][1][0][RTW89_FCC][10] = 76,
[0][0][1][0][RTW89_ETSI][10] = 58,
[0][0][1][0][RTW89_MKK][10] = 62,
@@ -45650,6 +45819,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][10] = 76,
[0][0][1][0][RTW89_CN][10] = 58,
[0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 58,
[0][0][1][0][RTW89_FCC][12] = 76,
[0][0][1][0][RTW89_ETSI][12] = 58,
[0][0][1][0][RTW89_MKK][12] = 62,
@@ -45661,6 +45831,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][12] = 76,
[0][0][1][0][RTW89_CN][12] = 58,
[0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 58,
[0][0][1][0][RTW89_FCC][14] = 76,
[0][0][1][0][RTW89_ETSI][14] = 58,
[0][0][1][0][RTW89_MKK][14] = 62,
@@ -45672,6 +45843,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 76,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -45683,6 +45855,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][15] = 76,
[0][0][1][0][RTW89_CN][15] = 127,
[0][0][1][0][RTW89_QATAR][15] = 52,
+ [0][0][1][0][RTW89_UK][15] = 58,
[0][0][1][0][RTW89_FCC][17] = 76,
[0][0][1][0][RTW89_ETSI][17] = 58,
[0][0][1][0][RTW89_MKK][17] = 76,
@@ -45694,6 +45867,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][17] = 76,
[0][0][1][0][RTW89_CN][17] = 127,
[0][0][1][0][RTW89_QATAR][17] = 52,
+ [0][0][1][0][RTW89_UK][17] = 58,
[0][0][1][0][RTW89_FCC][19] = 76,
[0][0][1][0][RTW89_ETSI][19] = 58,
[0][0][1][0][RTW89_MKK][19] = 76,
@@ -45705,6 +45879,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][19] = 76,
[0][0][1][0][RTW89_CN][19] = 127,
[0][0][1][0][RTW89_QATAR][19] = 52,
+ [0][0][1][0][RTW89_UK][19] = 58,
[0][0][1][0][RTW89_FCC][21] = 76,
[0][0][1][0][RTW89_ETSI][21] = 58,
[0][0][1][0][RTW89_MKK][21] = 76,
@@ -45716,6 +45891,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][21] = 76,
[0][0][1][0][RTW89_CN][21] = 127,
[0][0][1][0][RTW89_QATAR][21] = 52,
+ [0][0][1][0][RTW89_UK][21] = 58,
[0][0][1][0][RTW89_FCC][23] = 76,
[0][0][1][0][RTW89_ETSI][23] = 58,
[0][0][1][0][RTW89_MKK][23] = 76,
@@ -45727,6 +45903,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][23] = 76,
[0][0][1][0][RTW89_CN][23] = 127,
[0][0][1][0][RTW89_QATAR][23] = 52,
+ [0][0][1][0][RTW89_UK][23] = 58,
[0][0][1][0][RTW89_FCC][25] = 76,
[0][0][1][0][RTW89_ETSI][25] = 58,
[0][0][1][0][RTW89_MKK][25] = 76,
@@ -45738,6 +45915,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][25] = 76,
[0][0][1][0][RTW89_CN][25] = 127,
[0][0][1][0][RTW89_QATAR][25] = 52,
+ [0][0][1][0][RTW89_UK][25] = 58,
[0][0][1][0][RTW89_FCC][27] = 76,
[0][0][1][0][RTW89_ETSI][27] = 58,
[0][0][1][0][RTW89_MKK][27] = 76,
@@ -45749,6 +45927,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][27] = 76,
[0][0][1][0][RTW89_CN][27] = 127,
[0][0][1][0][RTW89_QATAR][27] = 52,
+ [0][0][1][0][RTW89_UK][27] = 58,
[0][0][1][0][RTW89_FCC][29] = 76,
[0][0][1][0][RTW89_ETSI][29] = 58,
[0][0][1][0][RTW89_MKK][29] = 76,
@@ -45760,6 +45939,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][29] = 76,
[0][0][1][0][RTW89_CN][29] = 127,
[0][0][1][0][RTW89_QATAR][29] = 52,
+ [0][0][1][0][RTW89_UK][29] = 58,
[0][0][1][0][RTW89_FCC][31] = 76,
[0][0][1][0][RTW89_ETSI][31] = 58,
[0][0][1][0][RTW89_MKK][31] = 76,
@@ -45771,6 +45951,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][31] = 76,
[0][0][1][0][RTW89_CN][31] = 127,
[0][0][1][0][RTW89_QATAR][31] = 52,
+ [0][0][1][0][RTW89_UK][31] = 58,
[0][0][1][0][RTW89_FCC][33] = 76,
[0][0][1][0][RTW89_ETSI][33] = 58,
[0][0][1][0][RTW89_MKK][33] = 76,
@@ -45782,6 +45963,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][33] = 76,
[0][0][1][0][RTW89_CN][33] = 127,
[0][0][1][0][RTW89_QATAR][33] = 52,
+ [0][0][1][0][RTW89_UK][33] = 58,
[0][0][1][0][RTW89_FCC][35] = 74,
[0][0][1][0][RTW89_ETSI][35] = 58,
[0][0][1][0][RTW89_MKK][35] = 76,
@@ -45793,6 +45975,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][35] = 74,
[0][0][1][0][RTW89_CN][35] = 127,
[0][0][1][0][RTW89_QATAR][35] = 52,
+ [0][0][1][0][RTW89_UK][35] = 58,
[0][0][1][0][RTW89_FCC][37] = 76,
[0][0][1][0][RTW89_ETSI][37] = 127,
[0][0][1][0][RTW89_MKK][37] = 76,
@@ -45804,6 +45987,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][37] = 76,
[0][0][1][0][RTW89_CN][37] = 127,
[0][0][1][0][RTW89_QATAR][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 76,
[0][0][1][0][RTW89_FCC][38] = 76,
[0][0][1][0][RTW89_ETSI][38] = 28,
[0][0][1][0][RTW89_MKK][38] = 127,
@@ -45815,6 +45999,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][38] = 76,
[0][0][1][0][RTW89_CN][38] = 72,
[0][0][1][0][RTW89_QATAR][38] = 28,
+ [0][0][1][0][RTW89_UK][38] = 56,
[0][0][1][0][RTW89_FCC][40] = 76,
[0][0][1][0][RTW89_ETSI][40] = 28,
[0][0][1][0][RTW89_MKK][40] = 127,
@@ -45826,6 +46011,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][40] = 76,
[0][0][1][0][RTW89_CN][40] = 76,
[0][0][1][0][RTW89_QATAR][40] = 28,
+ [0][0][1][0][RTW89_UK][40] = 56,
[0][0][1][0][RTW89_FCC][42] = 76,
[0][0][1][0][RTW89_ETSI][42] = 28,
[0][0][1][0][RTW89_MKK][42] = 127,
@@ -45837,6 +46023,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][42] = 76,
[0][0][1][0][RTW89_CN][42] = 76,
[0][0][1][0][RTW89_QATAR][42] = 28,
+ [0][0][1][0][RTW89_UK][42] = 56,
[0][0][1][0][RTW89_FCC][44] = 76,
[0][0][1][0][RTW89_ETSI][44] = 28,
[0][0][1][0][RTW89_MKK][44] = 127,
@@ -45848,6 +46035,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][44] = 76,
[0][0][1][0][RTW89_CN][44] = 76,
[0][0][1][0][RTW89_QATAR][44] = 28,
+ [0][0][1][0][RTW89_UK][44] = 56,
[0][0][1][0][RTW89_FCC][46] = 76,
[0][0][1][0][RTW89_ETSI][46] = 28,
[0][0][1][0][RTW89_MKK][46] = 127,
@@ -45859,6 +46047,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][46] = 76,
[0][0][1][0][RTW89_CN][46] = 76,
[0][0][1][0][RTW89_QATAR][46] = 28,
+ [0][0][1][0][RTW89_UK][46] = 56,
[0][1][1][0][RTW89_FCC][0] = 68,
[0][1][1][0][RTW89_ETSI][0] = 46,
[0][1][1][0][RTW89_MKK][0] = 50,
@@ -45870,6 +46059,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 50,
@@ -45881,6 +46071,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][2] = 50,
[0][1][1][0][RTW89_CN][2] = 46,
[0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 46,
[0][1][1][0][RTW89_FCC][4] = 68,
[0][1][1][0][RTW89_ETSI][4] = 46,
[0][1][1][0][RTW89_MKK][4] = 50,
@@ -45892,6 +46083,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][4] = 50,
[0][1][1][0][RTW89_CN][4] = 46,
[0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 46,
[0][1][1][0][RTW89_FCC][6] = 68,
[0][1][1][0][RTW89_ETSI][6] = 46,
[0][1][1][0][RTW89_MKK][6] = 50,
@@ -45903,6 +46095,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][6] = 50,
[0][1][1][0][RTW89_CN][6] = 46,
[0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 46,
[0][1][1][0][RTW89_FCC][8] = 68,
[0][1][1][0][RTW89_ETSI][8] = 46,
[0][1][1][0][RTW89_MKK][8] = 50,
@@ -45914,6 +46107,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][8] = 68,
[0][1][1][0][RTW89_CN][8] = 46,
[0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 46,
[0][1][1][0][RTW89_FCC][10] = 68,
[0][1][1][0][RTW89_ETSI][10] = 46,
[0][1][1][0][RTW89_MKK][10] = 50,
@@ -45925,6 +46119,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][10] = 68,
[0][1][1][0][RTW89_CN][10] = 46,
[0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 46,
[0][1][1][0][RTW89_FCC][12] = 68,
[0][1][1][0][RTW89_ETSI][12] = 46,
[0][1][1][0][RTW89_MKK][12] = 50,
@@ -45936,6 +46131,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][12] = 68,
[0][1][1][0][RTW89_CN][12] = 46,
[0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 46,
[0][1][1][0][RTW89_FCC][14] = 68,
[0][1][1][0][RTW89_ETSI][14] = 46,
[0][1][1][0][RTW89_MKK][14] = 50,
@@ -45947,6 +46143,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][14] = 68,
[0][1][1][0][RTW89_CN][14] = 46,
[0][1][1][0][RTW89_QATAR][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 46,
[0][1][1][0][RTW89_FCC][15] = 68,
[0][1][1][0][RTW89_ETSI][15] = 46,
[0][1][1][0][RTW89_MKK][15] = 70,
@@ -45958,6 +46155,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][15] = 68,
[0][1][1][0][RTW89_CN][15] = 127,
[0][1][1][0][RTW89_QATAR][15] = 40,
+ [0][1][1][0][RTW89_UK][15] = 46,
[0][1][1][0][RTW89_FCC][17] = 68,
[0][1][1][0][RTW89_ETSI][17] = 46,
[0][1][1][0][RTW89_MKK][17] = 70,
@@ -45969,6 +46167,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][17] = 68,
[0][1][1][0][RTW89_CN][17] = 127,
[0][1][1][0][RTW89_QATAR][17] = 40,
+ [0][1][1][0][RTW89_UK][17] = 46,
[0][1][1][0][RTW89_FCC][19] = 68,
[0][1][1][0][RTW89_ETSI][19] = 46,
[0][1][1][0][RTW89_MKK][19] = 70,
@@ -45980,6 +46179,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][19] = 68,
[0][1][1][0][RTW89_CN][19] = 127,
[0][1][1][0][RTW89_QATAR][19] = 40,
+ [0][1][1][0][RTW89_UK][19] = 46,
[0][1][1][0][RTW89_FCC][21] = 68,
[0][1][1][0][RTW89_ETSI][21] = 46,
[0][1][1][0][RTW89_MKK][21] = 70,
@@ -45991,6 +46191,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][21] = 68,
[0][1][1][0][RTW89_CN][21] = 127,
[0][1][1][0][RTW89_QATAR][21] = 40,
+ [0][1][1][0][RTW89_UK][21] = 46,
[0][1][1][0][RTW89_FCC][23] = 68,
[0][1][1][0][RTW89_ETSI][23] = 46,
[0][1][1][0][RTW89_MKK][23] = 70,
@@ -46002,6 +46203,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][23] = 68,
[0][1][1][0][RTW89_CN][23] = 127,
[0][1][1][0][RTW89_QATAR][23] = 40,
+ [0][1][1][0][RTW89_UK][23] = 46,
[0][1][1][0][RTW89_FCC][25] = 68,
[0][1][1][0][RTW89_ETSI][25] = 46,
[0][1][1][0][RTW89_MKK][25] = 70,
@@ -46013,6 +46215,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][25] = 68,
[0][1][1][0][RTW89_CN][25] = 127,
[0][1][1][0][RTW89_QATAR][25] = 40,
+ [0][1][1][0][RTW89_UK][25] = 46,
[0][1][1][0][RTW89_FCC][27] = 68,
[0][1][1][0][RTW89_ETSI][27] = 46,
[0][1][1][0][RTW89_MKK][27] = 70,
@@ -46024,6 +46227,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][27] = 68,
[0][1][1][0][RTW89_CN][27] = 127,
[0][1][1][0][RTW89_QATAR][27] = 40,
+ [0][1][1][0][RTW89_UK][27] = 46,
[0][1][1][0][RTW89_FCC][29] = 68,
[0][1][1][0][RTW89_ETSI][29] = 46,
[0][1][1][0][RTW89_MKK][29] = 70,
@@ -46035,6 +46239,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][29] = 68,
[0][1][1][0][RTW89_CN][29] = 127,
[0][1][1][0][RTW89_QATAR][29] = 40,
+ [0][1][1][0][RTW89_UK][29] = 46,
[0][1][1][0][RTW89_FCC][31] = 68,
[0][1][1][0][RTW89_ETSI][31] = 46,
[0][1][1][0][RTW89_MKK][31] = 70,
@@ -46046,6 +46251,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][31] = 68,
[0][1][1][0][RTW89_CN][31] = 127,
[0][1][1][0][RTW89_QATAR][31] = 40,
+ [0][1][1][0][RTW89_UK][31] = 46,
[0][1][1][0][RTW89_FCC][33] = 68,
[0][1][1][0][RTW89_ETSI][33] = 46,
[0][1][1][0][RTW89_MKK][33] = 70,
@@ -46057,6 +46263,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][33] = 68,
[0][1][1][0][RTW89_CN][33] = 127,
[0][1][1][0][RTW89_QATAR][33] = 40,
+ [0][1][1][0][RTW89_UK][33] = 46,
[0][1][1][0][RTW89_FCC][35] = 66,
[0][1][1][0][RTW89_ETSI][35] = 46,
[0][1][1][0][RTW89_MKK][35] = 70,
@@ -46068,6 +46275,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][35] = 66,
[0][1][1][0][RTW89_CN][35] = 127,
[0][1][1][0][RTW89_QATAR][35] = 40,
+ [0][1][1][0][RTW89_UK][35] = 46,
[0][1][1][0][RTW89_FCC][37] = 68,
[0][1][1][0][RTW89_ETSI][37] = 127,
[0][1][1][0][RTW89_MKK][37] = 70,
@@ -46079,6 +46287,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][37] = 68,
[0][1][1][0][RTW89_CN][37] = 127,
[0][1][1][0][RTW89_QATAR][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 74,
[0][1][1][0][RTW89_FCC][38] = 76,
[0][1][1][0][RTW89_ETSI][38] = 16,
[0][1][1][0][RTW89_MKK][38] = 127,
@@ -46090,6 +46299,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][38] = 76,
[0][1][1][0][RTW89_CN][38] = 72,
[0][1][1][0][RTW89_QATAR][38] = 16,
+ [0][1][1][0][RTW89_UK][38] = 44,
[0][1][1][0][RTW89_FCC][40] = 76,
[0][1][1][0][RTW89_ETSI][40] = 16,
[0][1][1][0][RTW89_MKK][40] = 127,
@@ -46101,6 +46311,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][40] = 76,
[0][1][1][0][RTW89_CN][40] = 76,
[0][1][1][0][RTW89_QATAR][40] = 16,
+ [0][1][1][0][RTW89_UK][40] = 44,
[0][1][1][0][RTW89_FCC][42] = 76,
[0][1][1][0][RTW89_ETSI][42] = 16,
[0][1][1][0][RTW89_MKK][42] = 127,
@@ -46112,6 +46323,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][42] = 76,
[0][1][1][0][RTW89_CN][42] = 76,
[0][1][1][0][RTW89_QATAR][42] = 16,
+ [0][1][1][0][RTW89_UK][42] = 44,
[0][1][1][0][RTW89_FCC][44] = 76,
[0][1][1][0][RTW89_ETSI][44] = 16,
[0][1][1][0][RTW89_MKK][44] = 127,
@@ -46123,6 +46335,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][44] = 76,
[0][1][1][0][RTW89_CN][44] = 76,
[0][1][1][0][RTW89_QATAR][44] = 16,
+ [0][1][1][0][RTW89_UK][44] = 44,
[0][1][1][0][RTW89_FCC][46] = 76,
[0][1][1][0][RTW89_ETSI][46] = 16,
[0][1][1][0][RTW89_MKK][46] = 127,
@@ -46134,6 +46347,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][46] = 76,
[0][1][1][0][RTW89_CN][46] = 76,
[0][1][1][0][RTW89_QATAR][46] = 16,
+ [0][1][1][0][RTW89_UK][46] = 44,
[0][0][2][0][RTW89_FCC][0] = 76,
[0][0][2][0][RTW89_ETSI][0] = 58,
[0][0][2][0][RTW89_MKK][0] = 62,
@@ -46145,6 +46359,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][0] = 62,
[0][0][2][0][RTW89_CN][0] = 58,
[0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 58,
[0][0][2][0][RTW89_FCC][2] = 76,
[0][0][2][0][RTW89_ETSI][2] = 58,
[0][0][2][0][RTW89_MKK][2] = 62,
@@ -46156,6 +46371,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][2] = 62,
[0][0][2][0][RTW89_CN][2] = 58,
[0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 58,
[0][0][2][0][RTW89_FCC][4] = 76,
[0][0][2][0][RTW89_ETSI][4] = 58,
[0][0][2][0][RTW89_MKK][4] = 62,
@@ -46167,6 +46383,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][4] = 62,
[0][0][2][0][RTW89_CN][4] = 58,
[0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 58,
[0][0][2][0][RTW89_FCC][6] = 76,
[0][0][2][0][RTW89_ETSI][6] = 58,
[0][0][2][0][RTW89_MKK][6] = 62,
@@ -46178,6 +46395,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][6] = 62,
[0][0][2][0][RTW89_CN][6] = 58,
[0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 58,
[0][0][2][0][RTW89_FCC][8] = 76,
[0][0][2][0][RTW89_ETSI][8] = 58,
[0][0][2][0][RTW89_MKK][8] = 62,
@@ -46189,6 +46407,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][8] = 76,
[0][0][2][0][RTW89_CN][8] = 58,
[0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 58,
[0][0][2][0][RTW89_FCC][10] = 76,
[0][0][2][0][RTW89_ETSI][10] = 58,
[0][0][2][0][RTW89_MKK][10] = 62,
@@ -46200,6 +46419,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][10] = 76,
[0][0][2][0][RTW89_CN][10] = 58,
[0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 58,
[0][0][2][0][RTW89_FCC][12] = 76,
[0][0][2][0][RTW89_ETSI][12] = 58,
[0][0][2][0][RTW89_MKK][12] = 62,
@@ -46211,6 +46431,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][12] = 76,
[0][0][2][0][RTW89_CN][12] = 58,
[0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 58,
[0][0][2][0][RTW89_FCC][14] = 76,
[0][0][2][0][RTW89_ETSI][14] = 58,
[0][0][2][0][RTW89_MKK][14] = 62,
@@ -46222,6 +46443,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][14] = 76,
[0][0][2][0][RTW89_CN][14] = 58,
[0][0][2][0][RTW89_QATAR][14] = 58,
+ [0][0][2][0][RTW89_UK][14] = 58,
[0][0][2][0][RTW89_FCC][15] = 74,
[0][0][2][0][RTW89_ETSI][15] = 58,
[0][0][2][0][RTW89_MKK][15] = 76,
@@ -46233,6 +46455,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][15] = 74,
[0][0][2][0][RTW89_CN][15] = 127,
[0][0][2][0][RTW89_QATAR][15] = 52,
+ [0][0][2][0][RTW89_UK][15] = 58,
[0][0][2][0][RTW89_FCC][17] = 76,
[0][0][2][0][RTW89_ETSI][17] = 58,
[0][0][2][0][RTW89_MKK][17] = 76,
@@ -46244,6 +46467,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][17] = 76,
[0][0][2][0][RTW89_CN][17] = 127,
[0][0][2][0][RTW89_QATAR][17] = 52,
+ [0][0][2][0][RTW89_UK][17] = 58,
[0][0][2][0][RTW89_FCC][19] = 76,
[0][0][2][0][RTW89_ETSI][19] = 58,
[0][0][2][0][RTW89_MKK][19] = 76,
@@ -46255,6 +46479,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][19] = 76,
[0][0][2][0][RTW89_CN][19] = 127,
[0][0][2][0][RTW89_QATAR][19] = 52,
+ [0][0][2][0][RTW89_UK][19] = 58,
[0][0][2][0][RTW89_FCC][21] = 76,
[0][0][2][0][RTW89_ETSI][21] = 58,
[0][0][2][0][RTW89_MKK][21] = 76,
@@ -46266,6 +46491,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][21] = 76,
[0][0][2][0][RTW89_CN][21] = 127,
[0][0][2][0][RTW89_QATAR][21] = 52,
+ [0][0][2][0][RTW89_UK][21] = 58,
[0][0][2][0][RTW89_FCC][23] = 76,
[0][0][2][0][RTW89_ETSI][23] = 58,
[0][0][2][0][RTW89_MKK][23] = 76,
@@ -46277,6 +46503,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][23] = 76,
[0][0][2][0][RTW89_CN][23] = 127,
[0][0][2][0][RTW89_QATAR][23] = 52,
+ [0][0][2][0][RTW89_UK][23] = 58,
[0][0][2][0][RTW89_FCC][25] = 76,
[0][0][2][0][RTW89_ETSI][25] = 58,
[0][0][2][0][RTW89_MKK][25] = 76,
@@ -46288,6 +46515,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][25] = 76,
[0][0][2][0][RTW89_CN][25] = 127,
[0][0][2][0][RTW89_QATAR][25] = 52,
+ [0][0][2][0][RTW89_UK][25] = 58,
[0][0][2][0][RTW89_FCC][27] = 76,
[0][0][2][0][RTW89_ETSI][27] = 58,
[0][0][2][0][RTW89_MKK][27] = 76,
@@ -46299,6 +46527,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][27] = 76,
[0][0][2][0][RTW89_CN][27] = 127,
[0][0][2][0][RTW89_QATAR][27] = 52,
+ [0][0][2][0][RTW89_UK][27] = 58,
[0][0][2][0][RTW89_FCC][29] = 76,
[0][0][2][0][RTW89_ETSI][29] = 58,
[0][0][2][0][RTW89_MKK][29] = 76,
@@ -46310,6 +46539,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][29] = 76,
[0][0][2][0][RTW89_CN][29] = 127,
[0][0][2][0][RTW89_QATAR][29] = 52,
+ [0][0][2][0][RTW89_UK][29] = 58,
[0][0][2][0][RTW89_FCC][31] = 76,
[0][0][2][0][RTW89_ETSI][31] = 58,
[0][0][2][0][RTW89_MKK][31] = 76,
@@ -46321,6 +46551,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][31] = 76,
[0][0][2][0][RTW89_CN][31] = 127,
[0][0][2][0][RTW89_QATAR][31] = 52,
+ [0][0][2][0][RTW89_UK][31] = 58,
[0][0][2][0][RTW89_FCC][33] = 76,
[0][0][2][0][RTW89_ETSI][33] = 58,
[0][0][2][0][RTW89_MKK][33] = 76,
@@ -46332,6 +46563,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][33] = 76,
[0][0][2][0][RTW89_CN][33] = 127,
[0][0][2][0][RTW89_QATAR][33] = 52,
+ [0][0][2][0][RTW89_UK][33] = 58,
[0][0][2][0][RTW89_FCC][35] = 70,
[0][0][2][0][RTW89_ETSI][35] = 58,
[0][0][2][0][RTW89_MKK][35] = 76,
@@ -46343,6 +46575,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][35] = 70,
[0][0][2][0][RTW89_CN][35] = 127,
[0][0][2][0][RTW89_QATAR][35] = 52,
+ [0][0][2][0][RTW89_UK][35] = 58,
[0][0][2][0][RTW89_FCC][37] = 76,
[0][0][2][0][RTW89_ETSI][37] = 127,
[0][0][2][0][RTW89_MKK][37] = 76,
@@ -46354,6 +46587,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][37] = 76,
[0][0][2][0][RTW89_CN][37] = 127,
[0][0][2][0][RTW89_QATAR][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 76,
[0][0][2][0][RTW89_FCC][38] = 76,
[0][0][2][0][RTW89_ETSI][38] = 28,
[0][0][2][0][RTW89_MKK][38] = 127,
@@ -46365,6 +46599,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][38] = 76,
[0][0][2][0][RTW89_CN][38] = 68,
[0][0][2][0][RTW89_QATAR][38] = 28,
+ [0][0][2][0][RTW89_UK][38] = 58,
[0][0][2][0][RTW89_FCC][40] = 76,
[0][0][2][0][RTW89_ETSI][40] = 28,
[0][0][2][0][RTW89_MKK][40] = 127,
@@ -46376,6 +46611,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][40] = 76,
[0][0][2][0][RTW89_CN][40] = 76,
[0][0][2][0][RTW89_QATAR][40] = 28,
+ [0][0][2][0][RTW89_UK][40] = 58,
[0][0][2][0][RTW89_FCC][42] = 76,
[0][0][2][0][RTW89_ETSI][42] = 28,
[0][0][2][0][RTW89_MKK][42] = 127,
@@ -46387,6 +46623,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][42] = 76,
[0][0][2][0][RTW89_CN][42] = 76,
[0][0][2][0][RTW89_QATAR][42] = 28,
+ [0][0][2][0][RTW89_UK][42] = 58,
[0][0][2][0][RTW89_FCC][44] = 76,
[0][0][2][0][RTW89_ETSI][44] = 28,
[0][0][2][0][RTW89_MKK][44] = 127,
@@ -46398,6 +46635,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][44] = 76,
[0][0][2][0][RTW89_CN][44] = 76,
[0][0][2][0][RTW89_QATAR][44] = 28,
+ [0][0][2][0][RTW89_UK][44] = 58,
[0][0][2][0][RTW89_FCC][46] = 76,
[0][0][2][0][RTW89_ETSI][46] = 28,
[0][0][2][0][RTW89_MKK][46] = 127,
@@ -46409,6 +46647,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MEXICO][46] = 76,
[0][0][2][0][RTW89_CN][46] = 76,
[0][0][2][0][RTW89_QATAR][46] = 28,
+ [0][0][2][0][RTW89_UK][46] = 58,
[0][1][2][0][RTW89_FCC][0] = 68,
[0][1][2][0][RTW89_ETSI][0] = 46,
[0][1][2][0][RTW89_MKK][0] = 50,
@@ -46420,6 +46659,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][0] = 50,
[0][1][2][0][RTW89_CN][0] = 46,
[0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 46,
[0][1][2][0][RTW89_FCC][2] = 68,
[0][1][2][0][RTW89_ETSI][2] = 46,
[0][1][2][0][RTW89_MKK][2] = 50,
@@ -46431,6 +46671,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][2] = 50,
[0][1][2][0][RTW89_CN][2] = 46,
[0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 46,
[0][1][2][0][RTW89_FCC][4] = 68,
[0][1][2][0][RTW89_ETSI][4] = 46,
[0][1][2][0][RTW89_MKK][4] = 50,
@@ -46442,6 +46683,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][4] = 50,
[0][1][2][0][RTW89_CN][4] = 46,
[0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 46,
[0][1][2][0][RTW89_FCC][6] = 68,
[0][1][2][0][RTW89_ETSI][6] = 46,
[0][1][2][0][RTW89_MKK][6] = 50,
@@ -46453,6 +46695,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][6] = 50,
[0][1][2][0][RTW89_CN][6] = 46,
[0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 46,
[0][1][2][0][RTW89_FCC][8] = 68,
[0][1][2][0][RTW89_ETSI][8] = 46,
[0][1][2][0][RTW89_MKK][8] = 50,
@@ -46464,6 +46707,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][8] = 68,
[0][1][2][0][RTW89_CN][8] = 46,
[0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 46,
[0][1][2][0][RTW89_FCC][10] = 68,
[0][1][2][0][RTW89_ETSI][10] = 46,
[0][1][2][0][RTW89_MKK][10] = 50,
@@ -46475,6 +46719,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][10] = 68,
[0][1][2][0][RTW89_CN][10] = 46,
[0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 46,
[0][1][2][0][RTW89_FCC][12] = 68,
[0][1][2][0][RTW89_ETSI][12] = 46,
[0][1][2][0][RTW89_MKK][12] = 50,
@@ -46486,6 +46731,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][12] = 68,
[0][1][2][0][RTW89_CN][12] = 46,
[0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 46,
[0][1][2][0][RTW89_FCC][14] = 68,
[0][1][2][0][RTW89_ETSI][14] = 46,
[0][1][2][0][RTW89_MKK][14] = 50,
@@ -46497,6 +46743,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][14] = 68,
[0][1][2][0][RTW89_CN][14] = 46,
[0][1][2][0][RTW89_QATAR][14] = 46,
+ [0][1][2][0][RTW89_UK][14] = 46,
[0][1][2][0][RTW89_FCC][15] = 68,
[0][1][2][0][RTW89_ETSI][15] = 46,
[0][1][2][0][RTW89_MKK][15] = 70,
@@ -46508,6 +46755,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][15] = 68,
[0][1][2][0][RTW89_CN][15] = 127,
[0][1][2][0][RTW89_QATAR][15] = 40,
+ [0][1][2][0][RTW89_UK][15] = 46,
[0][1][2][0][RTW89_FCC][17] = 68,
[0][1][2][0][RTW89_ETSI][17] = 46,
[0][1][2][0][RTW89_MKK][17] = 70,
@@ -46519,6 +46767,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][17] = 68,
[0][1][2][0][RTW89_CN][17] = 127,
[0][1][2][0][RTW89_QATAR][17] = 40,
+ [0][1][2][0][RTW89_UK][17] = 46,
[0][1][2][0][RTW89_FCC][19] = 68,
[0][1][2][0][RTW89_ETSI][19] = 46,
[0][1][2][0][RTW89_MKK][19] = 70,
@@ -46530,6 +46779,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][19] = 68,
[0][1][2][0][RTW89_CN][19] = 127,
[0][1][2][0][RTW89_QATAR][19] = 40,
+ [0][1][2][0][RTW89_UK][19] = 46,
[0][1][2][0][RTW89_FCC][21] = 68,
[0][1][2][0][RTW89_ETSI][21] = 46,
[0][1][2][0][RTW89_MKK][21] = 70,
@@ -46541,6 +46791,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][21] = 68,
[0][1][2][0][RTW89_CN][21] = 127,
[0][1][2][0][RTW89_QATAR][21] = 40,
+ [0][1][2][0][RTW89_UK][21] = 46,
[0][1][2][0][RTW89_FCC][23] = 68,
[0][1][2][0][RTW89_ETSI][23] = 46,
[0][1][2][0][RTW89_MKK][23] = 70,
@@ -46552,6 +46803,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][23] = 68,
[0][1][2][0][RTW89_CN][23] = 127,
[0][1][2][0][RTW89_QATAR][23] = 40,
+ [0][1][2][0][RTW89_UK][23] = 46,
[0][1][2][0][RTW89_FCC][25] = 68,
[0][1][2][0][RTW89_ETSI][25] = 46,
[0][1][2][0][RTW89_MKK][25] = 70,
@@ -46563,6 +46815,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][25] = 68,
[0][1][2][0][RTW89_CN][25] = 127,
[0][1][2][0][RTW89_QATAR][25] = 40,
+ [0][1][2][0][RTW89_UK][25] = 46,
[0][1][2][0][RTW89_FCC][27] = 68,
[0][1][2][0][RTW89_ETSI][27] = 46,
[0][1][2][0][RTW89_MKK][27] = 70,
@@ -46574,6 +46827,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][27] = 68,
[0][1][2][0][RTW89_CN][27] = 127,
[0][1][2][0][RTW89_QATAR][27] = 40,
+ [0][1][2][0][RTW89_UK][27] = 46,
[0][1][2][0][RTW89_FCC][29] = 68,
[0][1][2][0][RTW89_ETSI][29] = 46,
[0][1][2][0][RTW89_MKK][29] = 70,
@@ -46585,6 +46839,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][29] = 68,
[0][1][2][0][RTW89_CN][29] = 127,
[0][1][2][0][RTW89_QATAR][29] = 40,
+ [0][1][2][0][RTW89_UK][29] = 46,
[0][1][2][0][RTW89_FCC][31] = 68,
[0][1][2][0][RTW89_ETSI][31] = 46,
[0][1][2][0][RTW89_MKK][31] = 70,
@@ -46596,6 +46851,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][31] = 68,
[0][1][2][0][RTW89_CN][31] = 127,
[0][1][2][0][RTW89_QATAR][31] = 40,
+ [0][1][2][0][RTW89_UK][31] = 46,
[0][1][2][0][RTW89_FCC][33] = 68,
[0][1][2][0][RTW89_ETSI][33] = 46,
[0][1][2][0][RTW89_MKK][33] = 70,
@@ -46607,6 +46863,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][33] = 68,
[0][1][2][0][RTW89_CN][33] = 127,
[0][1][2][0][RTW89_QATAR][33] = 40,
+ [0][1][2][0][RTW89_UK][33] = 46,
[0][1][2][0][RTW89_FCC][35] = 64,
[0][1][2][0][RTW89_ETSI][35] = 46,
[0][1][2][0][RTW89_MKK][35] = 70,
@@ -46618,6 +46875,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][35] = 64,
[0][1][2][0][RTW89_CN][35] = 127,
[0][1][2][0][RTW89_QATAR][35] = 40,
+ [0][1][2][0][RTW89_UK][35] = 46,
[0][1][2][0][RTW89_FCC][37] = 68,
[0][1][2][0][RTW89_ETSI][37] = 127,
[0][1][2][0][RTW89_MKK][37] = 70,
@@ -46629,6 +46887,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][37] = 68,
[0][1][2][0][RTW89_CN][37] = 127,
[0][1][2][0][RTW89_QATAR][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 74,
[0][1][2][0][RTW89_FCC][38] = 76,
[0][1][2][0][RTW89_ETSI][38] = 16,
[0][1][2][0][RTW89_MKK][38] = 127,
@@ -46640,6 +46899,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][38] = 76,
[0][1][2][0][RTW89_CN][38] = 68,
[0][1][2][0][RTW89_QATAR][38] = 16,
+ [0][1][2][0][RTW89_UK][38] = 46,
[0][1][2][0][RTW89_FCC][40] = 76,
[0][1][2][0][RTW89_ETSI][40] = 16,
[0][1][2][0][RTW89_MKK][40] = 127,
@@ -46651,6 +46911,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][40] = 76,
[0][1][2][0][RTW89_CN][40] = 76,
[0][1][2][0][RTW89_QATAR][40] = 16,
+ [0][1][2][0][RTW89_UK][40] = 46,
[0][1][2][0][RTW89_FCC][42] = 76,
[0][1][2][0][RTW89_ETSI][42] = 16,
[0][1][2][0][RTW89_MKK][42] = 127,
@@ -46662,6 +46923,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][42] = 76,
[0][1][2][0][RTW89_CN][42] = 76,
[0][1][2][0][RTW89_QATAR][42] = 16,
+ [0][1][2][0][RTW89_UK][42] = 46,
[0][1][2][0][RTW89_FCC][44] = 76,
[0][1][2][0][RTW89_ETSI][44] = 16,
[0][1][2][0][RTW89_MKK][44] = 127,
@@ -46673,6 +46935,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][44] = 76,
[0][1][2][0][RTW89_CN][44] = 76,
[0][1][2][0][RTW89_QATAR][44] = 16,
+ [0][1][2][0][RTW89_UK][44] = 46,
[0][1][2][0][RTW89_FCC][46] = 76,
[0][1][2][0][RTW89_ETSI][46] = 16,
[0][1][2][0][RTW89_MKK][46] = 127,
@@ -46684,6 +46947,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MEXICO][46] = 76,
[0][1][2][0][RTW89_CN][46] = 76,
[0][1][2][0][RTW89_QATAR][46] = 16,
+ [0][1][2][0][RTW89_UK][46] = 46,
[0][1][2][1][RTW89_FCC][0] = 68,
[0][1][2][1][RTW89_ETSI][0] = 34,
[0][1][2][1][RTW89_MKK][0] = 50,
@@ -46695,6 +46959,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][0] = 50,
[0][1][2][1][RTW89_CN][0] = 34,
[0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_UK][0] = 34,
[0][1][2][1][RTW89_FCC][2] = 68,
[0][1][2][1][RTW89_ETSI][2] = 34,
[0][1][2][1][RTW89_MKK][2] = 50,
@@ -46706,6 +46971,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][2] = 50,
[0][1][2][1][RTW89_CN][2] = 34,
[0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 34,
[0][1][2][1][RTW89_FCC][4] = 68,
[0][1][2][1][RTW89_ETSI][4] = 34,
[0][1][2][1][RTW89_MKK][4] = 50,
@@ -46717,6 +46983,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][4] = 50,
[0][1][2][1][RTW89_CN][4] = 34,
[0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 34,
[0][1][2][1][RTW89_FCC][6] = 68,
[0][1][2][1][RTW89_ETSI][6] = 34,
[0][1][2][1][RTW89_MKK][6] = 50,
@@ -46728,6 +46995,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][6] = 50,
[0][1][2][1][RTW89_CN][6] = 34,
[0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 34,
[0][1][2][1][RTW89_FCC][8] = 68,
[0][1][2][1][RTW89_ETSI][8] = 34,
[0][1][2][1][RTW89_MKK][8] = 50,
@@ -46739,6 +47007,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][8] = 68,
[0][1][2][1][RTW89_CN][8] = 34,
[0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 34,
[0][1][2][1][RTW89_FCC][10] = 68,
[0][1][2][1][RTW89_ETSI][10] = 34,
[0][1][2][1][RTW89_MKK][10] = 50,
@@ -46750,6 +47019,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][10] = 68,
[0][1][2][1][RTW89_CN][10] = 34,
[0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 34,
[0][1][2][1][RTW89_FCC][12] = 68,
[0][1][2][1][RTW89_ETSI][12] = 34,
[0][1][2][1][RTW89_MKK][12] = 50,
@@ -46761,6 +47031,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][12] = 68,
[0][1][2][1][RTW89_CN][12] = 34,
[0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 34,
[0][1][2][1][RTW89_FCC][14] = 68,
[0][1][2][1][RTW89_ETSI][14] = 34,
[0][1][2][1][RTW89_MKK][14] = 50,
@@ -46772,6 +47043,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][14] = 68,
[0][1][2][1][RTW89_CN][14] = 34,
[0][1][2][1][RTW89_QATAR][14] = 34,
+ [0][1][2][1][RTW89_UK][14] = 34,
[0][1][2][1][RTW89_FCC][15] = 68,
[0][1][2][1][RTW89_ETSI][15] = 34,
[0][1][2][1][RTW89_MKK][15] = 70,
@@ -46783,6 +47055,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][15] = 68,
[0][1][2][1][RTW89_CN][15] = 127,
[0][1][2][1][RTW89_QATAR][15] = 28,
+ [0][1][2][1][RTW89_UK][15] = 34,
[0][1][2][1][RTW89_FCC][17] = 68,
[0][1][2][1][RTW89_ETSI][17] = 34,
[0][1][2][1][RTW89_MKK][17] = 70,
@@ -46794,6 +47067,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][17] = 68,
[0][1][2][1][RTW89_CN][17] = 127,
[0][1][2][1][RTW89_QATAR][17] = 28,
+ [0][1][2][1][RTW89_UK][17] = 34,
[0][1][2][1][RTW89_FCC][19] = 68,
[0][1][2][1][RTW89_ETSI][19] = 34,
[0][1][2][1][RTW89_MKK][19] = 70,
@@ -46805,6 +47079,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][19] = 68,
[0][1][2][1][RTW89_CN][19] = 127,
[0][1][2][1][RTW89_QATAR][19] = 28,
+ [0][1][2][1][RTW89_UK][19] = 34,
[0][1][2][1][RTW89_FCC][21] = 68,
[0][1][2][1][RTW89_ETSI][21] = 34,
[0][1][2][1][RTW89_MKK][21] = 70,
@@ -46816,6 +47091,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][21] = 68,
[0][1][2][1][RTW89_CN][21] = 127,
[0][1][2][1][RTW89_QATAR][21] = 28,
+ [0][1][2][1][RTW89_UK][21] = 34,
[0][1][2][1][RTW89_FCC][23] = 68,
[0][1][2][1][RTW89_ETSI][23] = 34,
[0][1][2][1][RTW89_MKK][23] = 70,
@@ -46827,6 +47103,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][23] = 68,
[0][1][2][1][RTW89_CN][23] = 127,
[0][1][2][1][RTW89_QATAR][23] = 28,
+ [0][1][2][1][RTW89_UK][23] = 34,
[0][1][2][1][RTW89_FCC][25] = 68,
[0][1][2][1][RTW89_ETSI][25] = 34,
[0][1][2][1][RTW89_MKK][25] = 70,
@@ -46838,6 +47115,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][25] = 68,
[0][1][2][1][RTW89_CN][25] = 127,
[0][1][2][1][RTW89_QATAR][25] = 28,
+ [0][1][2][1][RTW89_UK][25] = 34,
[0][1][2][1][RTW89_FCC][27] = 68,
[0][1][2][1][RTW89_ETSI][27] = 34,
[0][1][2][1][RTW89_MKK][27] = 70,
@@ -46849,6 +47127,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][27] = 68,
[0][1][2][1][RTW89_CN][27] = 127,
[0][1][2][1][RTW89_QATAR][27] = 28,
+ [0][1][2][1][RTW89_UK][27] = 34,
[0][1][2][1][RTW89_FCC][29] = 68,
[0][1][2][1][RTW89_ETSI][29] = 34,
[0][1][2][1][RTW89_MKK][29] = 70,
@@ -46860,6 +47139,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][29] = 68,
[0][1][2][1][RTW89_CN][29] = 127,
[0][1][2][1][RTW89_QATAR][29] = 28,
+ [0][1][2][1][RTW89_UK][29] = 34,
[0][1][2][1][RTW89_FCC][31] = 68,
[0][1][2][1][RTW89_ETSI][31] = 34,
[0][1][2][1][RTW89_MKK][31] = 70,
@@ -46871,6 +47151,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][31] = 68,
[0][1][2][1][RTW89_CN][31] = 127,
[0][1][2][1][RTW89_QATAR][31] = 28,
+ [0][1][2][1][RTW89_UK][31] = 34,
[0][1][2][1][RTW89_FCC][33] = 68,
[0][1][2][1][RTW89_ETSI][33] = 34,
[0][1][2][1][RTW89_MKK][33] = 70,
@@ -46882,6 +47163,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][33] = 68,
[0][1][2][1][RTW89_CN][33] = 127,
[0][1][2][1][RTW89_QATAR][33] = 28,
+ [0][1][2][1][RTW89_UK][33] = 34,
[0][1][2][1][RTW89_FCC][35] = 64,
[0][1][2][1][RTW89_ETSI][35] = 34,
[0][1][2][1][RTW89_MKK][35] = 70,
@@ -46893,6 +47175,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][35] = 64,
[0][1][2][1][RTW89_CN][35] = 127,
[0][1][2][1][RTW89_QATAR][35] = 28,
+ [0][1][2][1][RTW89_UK][35] = 34,
[0][1][2][1][RTW89_FCC][37] = 68,
[0][1][2][1][RTW89_ETSI][37] = 127,
[0][1][2][1][RTW89_MKK][37] = 70,
@@ -46904,6 +47187,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][37] = 68,
[0][1][2][1][RTW89_CN][37] = 127,
[0][1][2][1][RTW89_QATAR][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 62,
[0][1][2][1][RTW89_FCC][38] = 76,
[0][1][2][1][RTW89_ETSI][38] = 4,
[0][1][2][1][RTW89_MKK][38] = 127,
@@ -46915,6 +47199,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][38] = 76,
[0][1][2][1][RTW89_CN][38] = 68,
[0][1][2][1][RTW89_QATAR][38] = 4,
+ [0][1][2][1][RTW89_UK][38] = 34,
[0][1][2][1][RTW89_FCC][40] = 76,
[0][1][2][1][RTW89_ETSI][40] = 4,
[0][1][2][1][RTW89_MKK][40] = 127,
@@ -46926,6 +47211,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][40] = 76,
[0][1][2][1][RTW89_CN][40] = 70,
[0][1][2][1][RTW89_QATAR][40] = 4,
+ [0][1][2][1][RTW89_UK][40] = 34,
[0][1][2][1][RTW89_FCC][42] = 76,
[0][1][2][1][RTW89_ETSI][42] = 4,
[0][1][2][1][RTW89_MKK][42] = 127,
@@ -46937,6 +47223,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][42] = 76,
[0][1][2][1][RTW89_CN][42] = 70,
[0][1][2][1][RTW89_QATAR][42] = 4,
+ [0][1][2][1][RTW89_UK][42] = 34,
[0][1][2][1][RTW89_FCC][44] = 76,
[0][1][2][1][RTW89_ETSI][44] = 4,
[0][1][2][1][RTW89_MKK][44] = 127,
@@ -46948,6 +47235,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][44] = 76,
[0][1][2][1][RTW89_CN][44] = 70,
[0][1][2][1][RTW89_QATAR][44] = 4,
+ [0][1][2][1][RTW89_UK][44] = 34,
[0][1][2][1][RTW89_FCC][46] = 76,
[0][1][2][1][RTW89_ETSI][46] = 4,
[0][1][2][1][RTW89_MKK][46] = 127,
@@ -46959,6 +47247,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_MEXICO][46] = 76,
[0][1][2][1][RTW89_CN][46] = 70,
[0][1][2][1][RTW89_QATAR][46] = 4,
+ [0][1][2][1][RTW89_UK][46] = 34,
[1][0][2][0][RTW89_FCC][1] = 68,
[1][0][2][0][RTW89_ETSI][1] = 64,
[1][0][2][0][RTW89_MKK][1] = 62,
@@ -46970,6 +47259,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][1] = 62,
[1][0][2][0][RTW89_CN][1] = 64,
[1][0][2][0][RTW89_QATAR][1] = 64,
+ [1][0][2][0][RTW89_UK][1] = 64,
[1][0][2][0][RTW89_FCC][5] = 72,
[1][0][2][0][RTW89_ETSI][5] = 64,
[1][0][2][0][RTW89_MKK][5] = 62,
@@ -46981,6 +47271,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][5] = 62,
[1][0][2][0][RTW89_CN][5] = 64,
[1][0][2][0][RTW89_QATAR][5] = 64,
+ [1][0][2][0][RTW89_UK][5] = 64,
[1][0][2][0][RTW89_FCC][9] = 72,
[1][0][2][0][RTW89_ETSI][9] = 64,
[1][0][2][0][RTW89_MKK][9] = 62,
@@ -46992,6 +47283,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][9] = 72,
[1][0][2][0][RTW89_CN][9] = 64,
[1][0][2][0][RTW89_QATAR][9] = 64,
+ [1][0][2][0][RTW89_UK][9] = 64,
[1][0][2][0][RTW89_FCC][13] = 66,
[1][0][2][0][RTW89_ETSI][13] = 64,
[1][0][2][0][RTW89_MKK][13] = 62,
@@ -47003,6 +47295,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][13] = 66,
[1][0][2][0][RTW89_CN][13] = 64,
[1][0][2][0][RTW89_QATAR][13] = 64,
+ [1][0][2][0][RTW89_UK][13] = 64,
[1][0][2][0][RTW89_FCC][16] = 62,
[1][0][2][0][RTW89_ETSI][16] = 64,
[1][0][2][0][RTW89_MKK][16] = 72,
@@ -47014,6 +47307,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][16] = 62,
[1][0][2][0][RTW89_CN][16] = 127,
[1][0][2][0][RTW89_QATAR][16] = 52,
+ [1][0][2][0][RTW89_UK][16] = 64,
[1][0][2][0][RTW89_FCC][20] = 72,
[1][0][2][0][RTW89_ETSI][20] = 64,
[1][0][2][0][RTW89_MKK][20] = 72,
@@ -47025,6 +47319,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][20] = 72,
[1][0][2][0][RTW89_CN][20] = 127,
[1][0][2][0][RTW89_QATAR][20] = 52,
+ [1][0][2][0][RTW89_UK][20] = 64,
[1][0][2][0][RTW89_FCC][24] = 72,
[1][0][2][0][RTW89_ETSI][24] = 64,
[1][0][2][0][RTW89_MKK][24] = 72,
@@ -47036,6 +47331,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][24] = 72,
[1][0][2][0][RTW89_CN][24] = 127,
[1][0][2][0][RTW89_QATAR][24] = 52,
+ [1][0][2][0][RTW89_UK][24] = 64,
[1][0][2][0][RTW89_FCC][28] = 72,
[1][0][2][0][RTW89_ETSI][28] = 64,
[1][0][2][0][RTW89_MKK][28] = 72,
@@ -47047,6 +47343,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][28] = 72,
[1][0][2][0][RTW89_CN][28] = 127,
[1][0][2][0][RTW89_QATAR][28] = 52,
+ [1][0][2][0][RTW89_UK][28] = 64,
[1][0][2][0][RTW89_FCC][32] = 72,
[1][0][2][0][RTW89_ETSI][32] = 64,
[1][0][2][0][RTW89_MKK][32] = 72,
@@ -47058,6 +47355,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][32] = 72,
[1][0][2][0][RTW89_CN][32] = 127,
[1][0][2][0][RTW89_QATAR][32] = 52,
+ [1][0][2][0][RTW89_UK][32] = 64,
[1][0][2][0][RTW89_FCC][36] = 72,
[1][0][2][0][RTW89_ETSI][36] = 127,
[1][0][2][0][RTW89_MKK][36] = 72,
@@ -47069,6 +47367,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][36] = 72,
[1][0][2][0][RTW89_CN][36] = 127,
[1][0][2][0][RTW89_QATAR][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 72,
[1][0][2][0][RTW89_FCC][39] = 72,
[1][0][2][0][RTW89_ETSI][39] = 28,
[1][0][2][0][RTW89_MKK][39] = 127,
@@ -47080,6 +47379,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][39] = 72,
[1][0][2][0][RTW89_CN][39] = 68,
[1][0][2][0][RTW89_QATAR][39] = 28,
+ [1][0][2][0][RTW89_UK][39] = 64,
[1][0][2][0][RTW89_FCC][43] = 72,
[1][0][2][0][RTW89_ETSI][43] = 28,
[1][0][2][0][RTW89_MKK][43] = 127,
@@ -47091,6 +47391,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MEXICO][43] = 72,
[1][0][2][0][RTW89_CN][43] = 72,
[1][0][2][0][RTW89_QATAR][43] = 28,
+ [1][0][2][0][RTW89_UK][43] = 64,
[1][1][2][0][RTW89_FCC][1] = 58,
[1][1][2][0][RTW89_ETSI][1] = 52,
[1][1][2][0][RTW89_MKK][1] = 50,
@@ -47102,6 +47403,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][1] = 50,
[1][1][2][0][RTW89_CN][1] = 52,
[1][1][2][0][RTW89_QATAR][1] = 52,
+ [1][1][2][0][RTW89_UK][1] = 52,
[1][1][2][0][RTW89_FCC][5] = 72,
[1][1][2][0][RTW89_ETSI][5] = 52,
[1][1][2][0][RTW89_MKK][5] = 50,
@@ -47113,6 +47415,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][5] = 50,
[1][1][2][0][RTW89_CN][5] = 52,
[1][1][2][0][RTW89_QATAR][5] = 52,
+ [1][1][2][0][RTW89_UK][5] = 52,
[1][1][2][0][RTW89_FCC][9] = 72,
[1][1][2][0][RTW89_ETSI][9] = 52,
[1][1][2][0][RTW89_MKK][9] = 50,
@@ -47124,6 +47427,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][9] = 72,
[1][1][2][0][RTW89_CN][9] = 52,
[1][1][2][0][RTW89_QATAR][9] = 52,
+ [1][1][2][0][RTW89_UK][9] = 52,
[1][1][2][0][RTW89_FCC][13] = 58,
[1][1][2][0][RTW89_ETSI][13] = 52,
[1][1][2][0][RTW89_MKK][13] = 50,
@@ -47135,6 +47439,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][13] = 58,
[1][1][2][0][RTW89_CN][13] = 52,
[1][1][2][0][RTW89_QATAR][13] = 52,
+ [1][1][2][0][RTW89_UK][13] = 52,
[1][1][2][0][RTW89_FCC][16] = 56,
[1][1][2][0][RTW89_ETSI][16] = 52,
[1][1][2][0][RTW89_MKK][16] = 72,
@@ -47146,6 +47451,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][16] = 56,
[1][1][2][0][RTW89_CN][16] = 127,
[1][1][2][0][RTW89_QATAR][16] = 40,
+ [1][1][2][0][RTW89_UK][16] = 52,
[1][1][2][0][RTW89_FCC][20] = 72,
[1][1][2][0][RTW89_ETSI][20] = 52,
[1][1][2][0][RTW89_MKK][20] = 72,
@@ -47157,6 +47463,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][20] = 72,
[1][1][2][0][RTW89_CN][20] = 127,
[1][1][2][0][RTW89_QATAR][20] = 40,
+ [1][1][2][0][RTW89_UK][20] = 52,
[1][1][2][0][RTW89_FCC][24] = 72,
[1][1][2][0][RTW89_ETSI][24] = 52,
[1][1][2][0][RTW89_MKK][24] = 72,
@@ -47168,6 +47475,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][24] = 72,
[1][1][2][0][RTW89_CN][24] = 127,
[1][1][2][0][RTW89_QATAR][24] = 40,
+ [1][1][2][0][RTW89_UK][24] = 52,
[1][1][2][0][RTW89_FCC][28] = 72,
[1][1][2][0][RTW89_ETSI][28] = 52,
[1][1][2][0][RTW89_MKK][28] = 72,
@@ -47179,6 +47487,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][28] = 72,
[1][1][2][0][RTW89_CN][28] = 127,
[1][1][2][0][RTW89_QATAR][28] = 40,
+ [1][1][2][0][RTW89_UK][28] = 52,
[1][1][2][0][RTW89_FCC][32] = 68,
[1][1][2][0][RTW89_ETSI][32] = 52,
[1][1][2][0][RTW89_MKK][32] = 72,
@@ -47190,6 +47499,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][32] = 68,
[1][1][2][0][RTW89_CN][32] = 127,
[1][1][2][0][RTW89_QATAR][32] = 40,
+ [1][1][2][0][RTW89_UK][32] = 52,
[1][1][2][0][RTW89_FCC][36] = 72,
[1][1][2][0][RTW89_ETSI][36] = 127,
[1][1][2][0][RTW89_MKK][36] = 72,
@@ -47201,6 +47511,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][36] = 72,
[1][1][2][0][RTW89_CN][36] = 127,
[1][1][2][0][RTW89_QATAR][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 72,
[1][1][2][0][RTW89_FCC][39] = 72,
[1][1][2][0][RTW89_ETSI][39] = 16,
[1][1][2][0][RTW89_MKK][39] = 127,
@@ -47212,6 +47523,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][39] = 72,
[1][1][2][0][RTW89_CN][39] = 68,
[1][1][2][0][RTW89_QATAR][39] = 16,
+ [1][1][2][0][RTW89_UK][39] = 52,
[1][1][2][0][RTW89_FCC][43] = 72,
[1][1][2][0][RTW89_ETSI][43] = 16,
[1][1][2][0][RTW89_MKK][43] = 127,
@@ -47223,6 +47535,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MEXICO][43] = 72,
[1][1][2][0][RTW89_CN][43] = 72,
[1][1][2][0][RTW89_QATAR][43] = 16,
+ [1][1][2][0][RTW89_UK][43] = 52,
[1][1][2][1][RTW89_FCC][1] = 58,
[1][1][2][1][RTW89_ETSI][1] = 40,
[1][1][2][1][RTW89_MKK][1] = 50,
@@ -47234,6 +47547,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][1] = 50,
[1][1][2][1][RTW89_CN][1] = 40,
[1][1][2][1][RTW89_QATAR][1] = 40,
+ [1][1][2][1][RTW89_UK][1] = 40,
[1][1][2][1][RTW89_FCC][5] = 68,
[1][1][2][1][RTW89_ETSI][5] = 40,
[1][1][2][1][RTW89_MKK][5] = 50,
@@ -47245,6 +47559,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][5] = 50,
[1][1][2][1][RTW89_CN][5] = 40,
[1][1][2][1][RTW89_QATAR][5] = 40,
+ [1][1][2][1][RTW89_UK][5] = 40,
[1][1][2][1][RTW89_FCC][9] = 68,
[1][1][2][1][RTW89_ETSI][9] = 40,
[1][1][2][1][RTW89_MKK][9] = 50,
@@ -47256,6 +47571,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][9] = 68,
[1][1][2][1][RTW89_CN][9] = 40,
[1][1][2][1][RTW89_QATAR][9] = 40,
+ [1][1][2][1][RTW89_UK][9] = 40,
[1][1][2][1][RTW89_FCC][13] = 58,
[1][1][2][1][RTW89_ETSI][13] = 40,
[1][1][2][1][RTW89_MKK][13] = 50,
@@ -47267,6 +47583,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][13] = 58,
[1][1][2][1][RTW89_CN][13] = 40,
[1][1][2][1][RTW89_QATAR][13] = 40,
+ [1][1][2][1][RTW89_UK][13] = 40,
[1][1][2][1][RTW89_FCC][16] = 56,
[1][1][2][1][RTW89_ETSI][16] = 40,
[1][1][2][1][RTW89_MKK][16] = 72,
@@ -47278,6 +47595,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][16] = 56,
[1][1][2][1][RTW89_CN][16] = 127,
[1][1][2][1][RTW89_QATAR][16] = 28,
+ [1][1][2][1][RTW89_UK][16] = 40,
[1][1][2][1][RTW89_FCC][20] = 68,
[1][1][2][1][RTW89_ETSI][20] = 40,
[1][1][2][1][RTW89_MKK][20] = 72,
@@ -47289,6 +47607,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][20] = 68,
[1][1][2][1][RTW89_CN][20] = 127,
[1][1][2][1][RTW89_QATAR][20] = 28,
+ [1][1][2][1][RTW89_UK][20] = 40,
[1][1][2][1][RTW89_FCC][24] = 68,
[1][1][2][1][RTW89_ETSI][24] = 40,
[1][1][2][1][RTW89_MKK][24] = 72,
@@ -47300,6 +47619,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][24] = 68,
[1][1][2][1][RTW89_CN][24] = 127,
[1][1][2][1][RTW89_QATAR][24] = 28,
+ [1][1][2][1][RTW89_UK][24] = 40,
[1][1][2][1][RTW89_FCC][28] = 68,
[1][1][2][1][RTW89_ETSI][28] = 40,
[1][1][2][1][RTW89_MKK][28] = 72,
@@ -47311,6 +47631,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][28] = 68,
[1][1][2][1][RTW89_CN][28] = 127,
[1][1][2][1][RTW89_QATAR][28] = 28,
+ [1][1][2][1][RTW89_UK][28] = 40,
[1][1][2][1][RTW89_FCC][32] = 68,
[1][1][2][1][RTW89_ETSI][32] = 40,
[1][1][2][1][RTW89_MKK][32] = 72,
@@ -47322,6 +47643,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][32] = 68,
[1][1][2][1][RTW89_CN][32] = 127,
[1][1][2][1][RTW89_QATAR][32] = 28,
+ [1][1][2][1][RTW89_UK][32] = 40,
[1][1][2][1][RTW89_FCC][36] = 68,
[1][1][2][1][RTW89_ETSI][36] = 127,
[1][1][2][1][RTW89_MKK][36] = 72,
@@ -47333,6 +47655,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][36] = 68,
[1][1][2][1][RTW89_CN][36] = 127,
[1][1][2][1][RTW89_QATAR][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 66,
[1][1][2][1][RTW89_FCC][39] = 72,
[1][1][2][1][RTW89_ETSI][39] = 4,
[1][1][2][1][RTW89_MKK][39] = 127,
@@ -47344,6 +47667,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][39] = 72,
[1][1][2][1][RTW89_CN][39] = 62,
[1][1][2][1][RTW89_QATAR][39] = 4,
+ [1][1][2][1][RTW89_UK][39] = 40,
[1][1][2][1][RTW89_FCC][43] = 72,
[1][1][2][1][RTW89_ETSI][43] = 4,
[1][1][2][1][RTW89_MKK][43] = 127,
@@ -47355,6 +47679,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MEXICO][43] = 72,
[1][1][2][1][RTW89_CN][43] = 72,
[1][1][2][1][RTW89_QATAR][43] = 4,
+ [1][1][2][1][RTW89_UK][43] = 40,
[2][0][2][0][RTW89_FCC][3] = 64,
[2][0][2][0][RTW89_ETSI][3] = 64,
[2][0][2][0][RTW89_MKK][3] = 64,
@@ -47366,6 +47691,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][3] = 62,
[2][0][2][0][RTW89_CN][3] = 64,
[2][0][2][0][RTW89_QATAR][3] = 64,
+ [2][0][2][0][RTW89_UK][3] = 64,
[2][0][2][0][RTW89_FCC][11] = 64,
[2][0][2][0][RTW89_ETSI][11] = 64,
[2][0][2][0][RTW89_MKK][11] = 64,
@@ -47377,6 +47703,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][11] = 64,
[2][0][2][0][RTW89_CN][11] = 64,
[2][0][2][0][RTW89_QATAR][11] = 64,
+ [2][0][2][0][RTW89_UK][11] = 64,
[2][0][2][0][RTW89_FCC][18] = 62,
[2][0][2][0][RTW89_ETSI][18] = 64,
[2][0][2][0][RTW89_MKK][18] = 72,
@@ -47388,6 +47715,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][18] = 62,
[2][0][2][0][RTW89_CN][18] = 127,
[2][0][2][0][RTW89_QATAR][18] = 52,
+ [2][0][2][0][RTW89_UK][18] = 64,
[2][0][2][0][RTW89_FCC][26] = 72,
[2][0][2][0][RTW89_ETSI][26] = 64,
[2][0][2][0][RTW89_MKK][26] = 72,
@@ -47399,6 +47727,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][26] = 72,
[2][0][2][0][RTW89_CN][26] = 127,
[2][0][2][0][RTW89_QATAR][26] = 52,
+ [2][0][2][0][RTW89_UK][26] = 64,
[2][0][2][0][RTW89_FCC][34] = 72,
[2][0][2][0][RTW89_ETSI][34] = 127,
[2][0][2][0][RTW89_MKK][34] = 72,
@@ -47410,6 +47739,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][34] = 72,
[2][0][2][0][RTW89_CN][34] = 127,
[2][0][2][0][RTW89_QATAR][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 72,
[2][0][2][0][RTW89_FCC][41] = 72,
[2][0][2][0][RTW89_ETSI][41] = 28,
[2][0][2][0][RTW89_MKK][41] = 127,
@@ -47421,6 +47751,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_MEXICO][41] = 72,
[2][0][2][0][RTW89_CN][41] = 68,
[2][0][2][0][RTW89_QATAR][41] = 28,
+ [2][0][2][0][RTW89_UK][41] = 64,
[2][1][2][0][RTW89_FCC][3] = 56,
[2][1][2][0][RTW89_ETSI][3] = 52,
[2][1][2][0][RTW89_MKK][3] = 52,
@@ -47432,6 +47763,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][3] = 50,
[2][1][2][0][RTW89_CN][3] = 52,
[2][1][2][0][RTW89_QATAR][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 52,
[2][1][2][0][RTW89_FCC][11] = 56,
[2][1][2][0][RTW89_ETSI][11] = 52,
[2][1][2][0][RTW89_MKK][11] = 52,
@@ -47443,6 +47775,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][11] = 56,
[2][1][2][0][RTW89_CN][11] = 52,
[2][1][2][0][RTW89_QATAR][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 52,
[2][1][2][0][RTW89_FCC][18] = 56,
[2][1][2][0][RTW89_ETSI][18] = 52,
[2][1][2][0][RTW89_MKK][18] = 72,
@@ -47454,6 +47787,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][18] = 56,
[2][1][2][0][RTW89_CN][18] = 127,
[2][1][2][0][RTW89_QATAR][18] = 40,
+ [2][1][2][0][RTW89_UK][18] = 52,
[2][1][2][0][RTW89_FCC][26] = 72,
[2][1][2][0][RTW89_ETSI][26] = 52,
[2][1][2][0][RTW89_MKK][26] = 72,
@@ -47465,6 +47799,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][26] = 72,
[2][1][2][0][RTW89_CN][26] = 127,
[2][1][2][0][RTW89_QATAR][26] = 40,
+ [2][1][2][0][RTW89_UK][26] = 52,
[2][1][2][0][RTW89_FCC][34] = 72,
[2][1][2][0][RTW89_ETSI][34] = 127,
[2][1][2][0][RTW89_MKK][34] = 72,
@@ -47476,6 +47811,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][34] = 72,
[2][1][2][0][RTW89_CN][34] = 127,
[2][1][2][0][RTW89_QATAR][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 72,
[2][1][2][0][RTW89_FCC][41] = 72,
[2][1][2][0][RTW89_ETSI][41] = 16,
[2][1][2][0][RTW89_MKK][41] = 127,
@@ -47487,6 +47823,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_MEXICO][41] = 72,
[2][1][2][0][RTW89_CN][41] = 68,
[2][1][2][0][RTW89_QATAR][41] = 16,
+ [2][1][2][0][RTW89_UK][41] = 52,
[2][1][2][1][RTW89_FCC][3] = 56,
[2][1][2][1][RTW89_ETSI][3] = 40,
[2][1][2][1][RTW89_MKK][3] = 52,
@@ -47498,6 +47835,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][3] = 50,
[2][1][2][1][RTW89_CN][3] = 40,
[2][1][2][1][RTW89_QATAR][3] = 40,
+ [2][1][2][1][RTW89_UK][3] = 40,
[2][1][2][1][RTW89_FCC][11] = 56,
[2][1][2][1][RTW89_ETSI][11] = 40,
[2][1][2][1][RTW89_MKK][11] = 52,
@@ -47509,6 +47847,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][11] = 56,
[2][1][2][1][RTW89_CN][11] = 40,
[2][1][2][1][RTW89_QATAR][11] = 40,
+ [2][1][2][1][RTW89_UK][11] = 40,
[2][1][2][1][RTW89_FCC][18] = 56,
[2][1][2][1][RTW89_ETSI][18] = 40,
[2][1][2][1][RTW89_MKK][18] = 72,
@@ -47520,6 +47859,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][18] = 56,
[2][1][2][1][RTW89_CN][18] = 127,
[2][1][2][1][RTW89_QATAR][18] = 28,
+ [2][1][2][1][RTW89_UK][18] = 40,
[2][1][2][1][RTW89_FCC][26] = 68,
[2][1][2][1][RTW89_ETSI][26] = 40,
[2][1][2][1][RTW89_MKK][26] = 72,
@@ -47531,6 +47871,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][26] = 68,
[2][1][2][1][RTW89_CN][26] = 127,
[2][1][2][1][RTW89_QATAR][26] = 28,
+ [2][1][2][1][RTW89_UK][26] = 40,
[2][1][2][1][RTW89_FCC][34] = 68,
[2][1][2][1][RTW89_ETSI][34] = 127,
[2][1][2][1][RTW89_MKK][34] = 72,
@@ -47542,6 +47883,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][34] = 68,
[2][1][2][1][RTW89_CN][34] = 127,
[2][1][2][1][RTW89_QATAR][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 66,
[2][1][2][1][RTW89_FCC][41] = 72,
[2][1][2][1][RTW89_ETSI][41] = 4,
[2][1][2][1][RTW89_MKK][41] = 127,
@@ -47553,6 +47895,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_MEXICO][41] = 72,
[2][1][2][1][RTW89_CN][41] = 64,
[2][1][2][1][RTW89_QATAR][41] = 4,
+ [2][1][2][1][RTW89_UK][41] = 40,
};
const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -47652,6 +47995,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][0] = 70,
[0][0][RTW89_CN][0] = 32,
[0][0][RTW89_QATAR][0] = 32,
+ [0][0][RTW89_UK][0] = 32,
[0][0][RTW89_FCC][1] = 70,
[0][0][RTW89_ETSI][1] = 32,
[0][0][RTW89_MKK][1] = 40,
@@ -47663,6 +48007,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][1] = 70,
[0][0][RTW89_CN][1] = 32,
[0][0][RTW89_QATAR][1] = 32,
+ [0][0][RTW89_UK][1] = 32,
[0][0][RTW89_FCC][2] = 74,
[0][0][RTW89_ETSI][2] = 32,
[0][0][RTW89_MKK][2] = 40,
@@ -47674,6 +48019,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][2] = 74,
[0][0][RTW89_CN][2] = 32,
[0][0][RTW89_QATAR][2] = 32,
+ [0][0][RTW89_UK][2] = 32,
[0][0][RTW89_FCC][3] = 78,
[0][0][RTW89_ETSI][3] = 32,
[0][0][RTW89_MKK][3] = 40,
@@ -47685,6 +48031,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][3] = 78,
[0][0][RTW89_CN][3] = 32,
[0][0][RTW89_QATAR][3] = 32,
+ [0][0][RTW89_UK][3] = 32,
[0][0][RTW89_FCC][4] = 78,
[0][0][RTW89_ETSI][4] = 32,
[0][0][RTW89_MKK][4] = 40,
@@ -47696,6 +48043,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][4] = 78,
[0][0][RTW89_CN][4] = 32,
[0][0][RTW89_QATAR][4] = 32,
+ [0][0][RTW89_UK][4] = 32,
[0][0][RTW89_FCC][5] = 78,
[0][0][RTW89_ETSI][5] = 32,
[0][0][RTW89_MKK][5] = 40,
@@ -47707,6 +48055,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][5] = 78,
[0][0][RTW89_CN][5] = 32,
[0][0][RTW89_QATAR][5] = 32,
+ [0][0][RTW89_UK][5] = 32,
[0][0][RTW89_FCC][6] = 78,
[0][0][RTW89_ETSI][6] = 32,
[0][0][RTW89_MKK][6] = 40,
@@ -47718,6 +48067,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][6] = 78,
[0][0][RTW89_CN][6] = 32,
[0][0][RTW89_QATAR][6] = 32,
+ [0][0][RTW89_UK][6] = 32,
[0][0][RTW89_FCC][7] = 78,
[0][0][RTW89_ETSI][7] = 32,
[0][0][RTW89_MKK][7] = 40,
@@ -47729,6 +48079,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][7] = 78,
[0][0][RTW89_CN][7] = 32,
[0][0][RTW89_QATAR][7] = 32,
+ [0][0][RTW89_UK][7] = 32,
[0][0][RTW89_FCC][8] = 74,
[0][0][RTW89_ETSI][8] = 32,
[0][0][RTW89_MKK][8] = 40,
@@ -47740,6 +48091,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][8] = 74,
[0][0][RTW89_CN][8] = 32,
[0][0][RTW89_QATAR][8] = 32,
+ [0][0][RTW89_UK][8] = 32,
[0][0][RTW89_FCC][9] = 70,
[0][0][RTW89_ETSI][9] = 32,
[0][0][RTW89_MKK][9] = 40,
@@ -47751,6 +48103,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][9] = 70,
[0][0][RTW89_CN][9] = 32,
[0][0][RTW89_QATAR][9] = 32,
+ [0][0][RTW89_UK][9] = 32,
[0][0][RTW89_FCC][10] = 70,
[0][0][RTW89_ETSI][10] = 32,
[0][0][RTW89_MKK][10] = 40,
@@ -47762,6 +48115,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][10] = 70,
[0][0][RTW89_CN][10] = 32,
[0][0][RTW89_QATAR][10] = 32,
+ [0][0][RTW89_UK][10] = 32,
[0][0][RTW89_FCC][11] = 58,
[0][0][RTW89_ETSI][11] = 32,
[0][0][RTW89_MKK][11] = 40,
@@ -47773,6 +48127,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][11] = 58,
[0][0][RTW89_CN][11] = 32,
[0][0][RTW89_QATAR][11] = 32,
+ [0][0][RTW89_UK][11] = 32,
[0][0][RTW89_FCC][12] = 34,
[0][0][RTW89_ETSI][12] = 32,
[0][0][RTW89_MKK][12] = 40,
@@ -47784,6 +48139,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][12] = 34,
[0][0][RTW89_CN][12] = 32,
[0][0][RTW89_QATAR][12] = 32,
+ [0][0][RTW89_UK][12] = 32,
[0][0][RTW89_FCC][13] = 127,
[0][0][RTW89_ETSI][13] = 127,
[0][0][RTW89_MKK][13] = 127,
@@ -47795,6 +48151,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][13] = 127,
[0][0][RTW89_CN][13] = 127,
[0][0][RTW89_QATAR][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
[0][1][RTW89_FCC][0] = 64,
[0][1][RTW89_ETSI][0] = 20,
[0][1][RTW89_MKK][0] = 28,
@@ -47806,6 +48163,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][0] = 64,
[0][1][RTW89_CN][0] = 20,
[0][1][RTW89_QATAR][0] = 20,
+ [0][1][RTW89_UK][0] = 20,
[0][1][RTW89_FCC][1] = 64,
[0][1][RTW89_ETSI][1] = 20,
[0][1][RTW89_MKK][1] = 28,
@@ -47817,6 +48175,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][1] = 64,
[0][1][RTW89_CN][1] = 20,
[0][1][RTW89_QATAR][1] = 20,
+ [0][1][RTW89_UK][1] = 20,
[0][1][RTW89_FCC][2] = 68,
[0][1][RTW89_ETSI][2] = 20,
[0][1][RTW89_MKK][2] = 28,
@@ -47828,6 +48187,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][2] = 68,
[0][1][RTW89_CN][2] = 20,
[0][1][RTW89_QATAR][2] = 20,
+ [0][1][RTW89_UK][2] = 20,
[0][1][RTW89_FCC][3] = 72,
[0][1][RTW89_ETSI][3] = 20,
[0][1][RTW89_MKK][3] = 28,
@@ -47839,6 +48199,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][3] = 72,
[0][1][RTW89_CN][3] = 20,
[0][1][RTW89_QATAR][3] = 20,
+ [0][1][RTW89_UK][3] = 20,
[0][1][RTW89_FCC][4] = 76,
[0][1][RTW89_ETSI][4] = 20,
[0][1][RTW89_MKK][4] = 28,
@@ -47850,6 +48211,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][4] = 76,
[0][1][RTW89_CN][4] = 20,
[0][1][RTW89_QATAR][4] = 20,
+ [0][1][RTW89_UK][4] = 20,
[0][1][RTW89_FCC][5] = 78,
[0][1][RTW89_ETSI][5] = 20,
[0][1][RTW89_MKK][5] = 28,
@@ -47861,6 +48223,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][5] = 78,
[0][1][RTW89_CN][5] = 20,
[0][1][RTW89_QATAR][5] = 20,
+ [0][1][RTW89_UK][5] = 20,
[0][1][RTW89_FCC][6] = 76,
[0][1][RTW89_ETSI][6] = 20,
[0][1][RTW89_MKK][6] = 28,
@@ -47872,6 +48235,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][6] = 76,
[0][1][RTW89_CN][6] = 20,
[0][1][RTW89_QATAR][6] = 20,
+ [0][1][RTW89_UK][6] = 20,
[0][1][RTW89_FCC][7] = 72,
[0][1][RTW89_ETSI][7] = 20,
[0][1][RTW89_MKK][7] = 28,
@@ -47883,6 +48247,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][7] = 72,
[0][1][RTW89_CN][7] = 20,
[0][1][RTW89_QATAR][7] = 20,
+ [0][1][RTW89_UK][7] = 20,
[0][1][RTW89_FCC][8] = 68,
[0][1][RTW89_ETSI][8] = 20,
[0][1][RTW89_MKK][8] = 28,
@@ -47894,6 +48259,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][8] = 68,
[0][1][RTW89_CN][8] = 20,
[0][1][RTW89_QATAR][8] = 20,
+ [0][1][RTW89_UK][8] = 20,
[0][1][RTW89_FCC][9] = 64,
[0][1][RTW89_ETSI][9] = 20,
[0][1][RTW89_MKK][9] = 28,
@@ -47905,6 +48271,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][9] = 64,
[0][1][RTW89_CN][9] = 20,
[0][1][RTW89_QATAR][9] = 20,
+ [0][1][RTW89_UK][9] = 20,
[0][1][RTW89_FCC][10] = 64,
[0][1][RTW89_ETSI][10] = 20,
[0][1][RTW89_MKK][10] = 28,
@@ -47916,6 +48283,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][10] = 64,
[0][1][RTW89_CN][10] = 20,
[0][1][RTW89_QATAR][10] = 20,
+ [0][1][RTW89_UK][10] = 20,
[0][1][RTW89_FCC][11] = 54,
[0][1][RTW89_ETSI][11] = 20,
[0][1][RTW89_MKK][11] = 28,
@@ -47927,6 +48295,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][11] = 54,
[0][1][RTW89_CN][11] = 20,
[0][1][RTW89_QATAR][11] = 20,
+ [0][1][RTW89_UK][11] = 20,
[0][1][RTW89_FCC][12] = 32,
[0][1][RTW89_ETSI][12] = 20,
[0][1][RTW89_MKK][12] = 28,
@@ -47938,6 +48307,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][12] = 32,
[0][1][RTW89_CN][12] = 20,
[0][1][RTW89_QATAR][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
[0][1][RTW89_FCC][13] = 127,
[0][1][RTW89_ETSI][13] = 127,
[0][1][RTW89_MKK][13] = 127,
@@ -47949,6 +48319,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][13] = 127,
[0][1][RTW89_CN][13] = 127,
[0][1][RTW89_QATAR][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
[1][0][RTW89_FCC][0] = 72,
[1][0][RTW89_ETSI][0] = 42,
[1][0][RTW89_MKK][0] = 50,
@@ -47960,6 +48331,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][0] = 72,
[1][0][RTW89_CN][0] = 42,
[1][0][RTW89_QATAR][0] = 42,
+ [1][0][RTW89_UK][0] = 42,
[1][0][RTW89_FCC][1] = 72,
[1][0][RTW89_ETSI][1] = 42,
[1][0][RTW89_MKK][1] = 50,
@@ -47971,6 +48343,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][1] = 72,
[1][0][RTW89_CN][1] = 42,
[1][0][RTW89_QATAR][1] = 42,
+ [1][0][RTW89_UK][1] = 42,
[1][0][RTW89_FCC][2] = 76,
[1][0][RTW89_ETSI][2] = 42,
[1][0][RTW89_MKK][2] = 50,
@@ -47982,6 +48355,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][2] = 76,
[1][0][RTW89_CN][2] = 42,
[1][0][RTW89_QATAR][2] = 42,
+ [1][0][RTW89_UK][2] = 42,
[1][0][RTW89_FCC][3] = 78,
[1][0][RTW89_ETSI][3] = 42,
[1][0][RTW89_MKK][3] = 50,
@@ -47993,6 +48367,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][3] = 78,
[1][0][RTW89_CN][3] = 42,
[1][0][RTW89_QATAR][3] = 42,
+ [1][0][RTW89_UK][3] = 42,
[1][0][RTW89_FCC][4] = 78,
[1][0][RTW89_ETSI][4] = 42,
[1][0][RTW89_MKK][4] = 50,
@@ -48004,6 +48379,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][4] = 78,
[1][0][RTW89_CN][4] = 42,
[1][0][RTW89_QATAR][4] = 42,
+ [1][0][RTW89_UK][4] = 42,
[1][0][RTW89_FCC][5] = 78,
[1][0][RTW89_ETSI][5] = 42,
[1][0][RTW89_MKK][5] = 50,
@@ -48015,6 +48391,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][5] = 78,
[1][0][RTW89_CN][5] = 42,
[1][0][RTW89_QATAR][5] = 42,
+ [1][0][RTW89_UK][5] = 42,
[1][0][RTW89_FCC][6] = 78,
[1][0][RTW89_ETSI][6] = 42,
[1][0][RTW89_MKK][6] = 50,
@@ -48026,6 +48403,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][6] = 78,
[1][0][RTW89_CN][6] = 42,
[1][0][RTW89_QATAR][6] = 42,
+ [1][0][RTW89_UK][6] = 42,
[1][0][RTW89_FCC][7] = 78,
[1][0][RTW89_ETSI][7] = 42,
[1][0][RTW89_MKK][7] = 50,
@@ -48037,6 +48415,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][7] = 78,
[1][0][RTW89_CN][7] = 42,
[1][0][RTW89_QATAR][7] = 42,
+ [1][0][RTW89_UK][7] = 42,
[1][0][RTW89_FCC][8] = 78,
[1][0][RTW89_ETSI][8] = 42,
[1][0][RTW89_MKK][8] = 50,
@@ -48048,6 +48427,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][8] = 78,
[1][0][RTW89_CN][8] = 42,
[1][0][RTW89_QATAR][8] = 42,
+ [1][0][RTW89_UK][8] = 42,
[1][0][RTW89_FCC][9] = 74,
[1][0][RTW89_ETSI][9] = 42,
[1][0][RTW89_MKK][9] = 50,
@@ -48059,6 +48439,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][9] = 74,
[1][0][RTW89_CN][9] = 42,
[1][0][RTW89_QATAR][9] = 42,
+ [1][0][RTW89_UK][9] = 42,
[1][0][RTW89_FCC][10] = 74,
[1][0][RTW89_ETSI][10] = 42,
[1][0][RTW89_MKK][10] = 50,
@@ -48070,6 +48451,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][10] = 74,
[1][0][RTW89_CN][10] = 42,
[1][0][RTW89_QATAR][10] = 42,
+ [1][0][RTW89_UK][10] = 42,
[1][0][RTW89_FCC][11] = 64,
[1][0][RTW89_ETSI][11] = 42,
[1][0][RTW89_MKK][11] = 50,
@@ -48081,6 +48463,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][11] = 64,
[1][0][RTW89_CN][11] = 42,
[1][0][RTW89_QATAR][11] = 42,
+ [1][0][RTW89_UK][11] = 42,
[1][0][RTW89_FCC][12] = 36,
[1][0][RTW89_ETSI][12] = 42,
[1][0][RTW89_MKK][12] = 50,
@@ -48092,6 +48475,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][12] = 36,
[1][0][RTW89_CN][12] = 42,
[1][0][RTW89_QATAR][12] = 42,
+ [1][0][RTW89_UK][12] = 42,
[1][0][RTW89_FCC][13] = 127,
[1][0][RTW89_ETSI][13] = 127,
[1][0][RTW89_MKK][13] = 127,
@@ -48103,6 +48487,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][13] = 127,
[1][0][RTW89_CN][13] = 127,
[1][0][RTW89_QATAR][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
[1][1][RTW89_FCC][0] = 66,
[1][1][RTW89_ETSI][0] = 30,
[1][1][RTW89_MKK][0] = 38,
@@ -48114,6 +48499,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][0] = 66,
[1][1][RTW89_CN][0] = 30,
[1][1][RTW89_QATAR][0] = 30,
+ [1][1][RTW89_UK][0] = 30,
[1][1][RTW89_FCC][1] = 66,
[1][1][RTW89_ETSI][1] = 30,
[1][1][RTW89_MKK][1] = 38,
@@ -48125,6 +48511,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][1] = 66,
[1][1][RTW89_CN][1] = 30,
[1][1][RTW89_QATAR][1] = 30,
+ [1][1][RTW89_UK][1] = 30,
[1][1][RTW89_FCC][2] = 70,
[1][1][RTW89_ETSI][2] = 30,
[1][1][RTW89_MKK][2] = 38,
@@ -48136,6 +48523,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][2] = 70,
[1][1][RTW89_CN][2] = 30,
[1][1][RTW89_QATAR][2] = 30,
+ [1][1][RTW89_UK][2] = 30,
[1][1][RTW89_FCC][3] = 74,
[1][1][RTW89_ETSI][3] = 30,
[1][1][RTW89_MKK][3] = 38,
@@ -48147,6 +48535,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][3] = 74,
[1][1][RTW89_CN][3] = 30,
[1][1][RTW89_QATAR][3] = 30,
+ [1][1][RTW89_UK][3] = 30,
[1][1][RTW89_FCC][4] = 78,
[1][1][RTW89_ETSI][4] = 30,
[1][1][RTW89_MKK][4] = 38,
@@ -48158,6 +48547,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][4] = 78,
[1][1][RTW89_CN][4] = 30,
[1][1][RTW89_QATAR][4] = 30,
+ [1][1][RTW89_UK][4] = 30,
[1][1][RTW89_FCC][5] = 78,
[1][1][RTW89_ETSI][5] = 30,
[1][1][RTW89_MKK][5] = 38,
@@ -48169,6 +48559,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][5] = 78,
[1][1][RTW89_CN][5] = 30,
[1][1][RTW89_QATAR][5] = 30,
+ [1][1][RTW89_UK][5] = 30,
[1][1][RTW89_FCC][6] = 78,
[1][1][RTW89_ETSI][6] = 30,
[1][1][RTW89_MKK][6] = 38,
@@ -48180,6 +48571,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][6] = 78,
[1][1][RTW89_CN][6] = 30,
[1][1][RTW89_QATAR][6] = 30,
+ [1][1][RTW89_UK][6] = 30,
[1][1][RTW89_FCC][7] = 74,
[1][1][RTW89_ETSI][7] = 30,
[1][1][RTW89_MKK][7] = 38,
@@ -48191,6 +48583,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][7] = 74,
[1][1][RTW89_CN][7] = 30,
[1][1][RTW89_QATAR][7] = 30,
+ [1][1][RTW89_UK][7] = 30,
[1][1][RTW89_FCC][8] = 70,
[1][1][RTW89_ETSI][8] = 30,
[1][1][RTW89_MKK][8] = 38,
@@ -48202,6 +48595,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][8] = 70,
[1][1][RTW89_CN][8] = 30,
[1][1][RTW89_QATAR][8] = 30,
+ [1][1][RTW89_UK][8] = 30,
[1][1][RTW89_FCC][9] = 66,
[1][1][RTW89_ETSI][9] = 30,
[1][1][RTW89_MKK][9] = 38,
@@ -48213,6 +48607,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][9] = 66,
[1][1][RTW89_CN][9] = 30,
[1][1][RTW89_QATAR][9] = 30,
+ [1][1][RTW89_UK][9] = 30,
[1][1][RTW89_FCC][10] = 66,
[1][1][RTW89_ETSI][10] = 30,
[1][1][RTW89_MKK][10] = 38,
@@ -48224,6 +48619,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][10] = 66,
[1][1][RTW89_CN][10] = 30,
[1][1][RTW89_QATAR][10] = 30,
+ [1][1][RTW89_UK][10] = 30,
[1][1][RTW89_FCC][11] = 60,
[1][1][RTW89_ETSI][11] = 30,
[1][1][RTW89_MKK][11] = 38,
@@ -48235,6 +48631,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][11] = 60,
[1][1][RTW89_CN][11] = 30,
[1][1][RTW89_QATAR][11] = 30,
+ [1][1][RTW89_UK][11] = 30,
[1][1][RTW89_FCC][12] = 32,
[1][1][RTW89_ETSI][12] = 30,
[1][1][RTW89_MKK][12] = 38,
@@ -48246,6 +48643,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][12] = 32,
[1][1][RTW89_CN][12] = 30,
[1][1][RTW89_QATAR][12] = 30,
+ [1][1][RTW89_UK][12] = 30,
[1][1][RTW89_FCC][13] = 127,
[1][1][RTW89_ETSI][13] = 127,
[1][1][RTW89_MKK][13] = 127,
@@ -48257,6 +48655,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][13] = 127,
[1][1][RTW89_CN][13] = 127,
[1][1][RTW89_QATAR][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
[2][0][RTW89_FCC][0] = 76,
[2][0][RTW89_ETSI][0] = 52,
[2][0][RTW89_MKK][0] = 64,
@@ -48268,6 +48667,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][0] = 76,
[2][0][RTW89_CN][0] = 52,
[2][0][RTW89_QATAR][0] = 52,
+ [2][0][RTW89_UK][0] = 52,
[2][0][RTW89_FCC][1] = 76,
[2][0][RTW89_ETSI][1] = 52,
[2][0][RTW89_MKK][1] = 64,
@@ -48279,6 +48679,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][1] = 76,
[2][0][RTW89_CN][1] = 52,
[2][0][RTW89_QATAR][1] = 52,
+ [2][0][RTW89_UK][1] = 52,
[2][0][RTW89_FCC][2] = 78,
[2][0][RTW89_ETSI][2] = 52,
[2][0][RTW89_MKK][2] = 64,
@@ -48290,6 +48691,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][2] = 78,
[2][0][RTW89_CN][2] = 52,
[2][0][RTW89_QATAR][2] = 52,
+ [2][0][RTW89_UK][2] = 52,
[2][0][RTW89_FCC][3] = 78,
[2][0][RTW89_ETSI][3] = 52,
[2][0][RTW89_MKK][3] = 64,
@@ -48301,6 +48703,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][3] = 78,
[2][0][RTW89_CN][3] = 52,
[2][0][RTW89_QATAR][3] = 52,
+ [2][0][RTW89_UK][3] = 52,
[2][0][RTW89_FCC][4] = 78,
[2][0][RTW89_ETSI][4] = 52,
[2][0][RTW89_MKK][4] = 64,
@@ -48312,6 +48715,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][4] = 78,
[2][0][RTW89_CN][4] = 52,
[2][0][RTW89_QATAR][4] = 52,
+ [2][0][RTW89_UK][4] = 52,
[2][0][RTW89_FCC][5] = 78,
[2][0][RTW89_ETSI][5] = 52,
[2][0][RTW89_MKK][5] = 64,
@@ -48323,6 +48727,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][5] = 78,
[2][0][RTW89_CN][5] = 52,
[2][0][RTW89_QATAR][5] = 52,
+ [2][0][RTW89_UK][5] = 52,
[2][0][RTW89_FCC][6] = 78,
[2][0][RTW89_ETSI][6] = 52,
[2][0][RTW89_MKK][6] = 64,
@@ -48334,6 +48739,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][6] = 78,
[2][0][RTW89_CN][6] = 52,
[2][0][RTW89_QATAR][6] = 52,
+ [2][0][RTW89_UK][6] = 52,
[2][0][RTW89_FCC][7] = 78,
[2][0][RTW89_ETSI][7] = 52,
[2][0][RTW89_MKK][7] = 64,
@@ -48345,6 +48751,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][7] = 78,
[2][0][RTW89_CN][7] = 52,
[2][0][RTW89_QATAR][7] = 52,
+ [2][0][RTW89_UK][7] = 52,
[2][0][RTW89_FCC][8] = 78,
[2][0][RTW89_ETSI][8] = 52,
[2][0][RTW89_MKK][8] = 64,
@@ -48356,6 +48763,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][8] = 78,
[2][0][RTW89_CN][8] = 52,
[2][0][RTW89_QATAR][8] = 52,
+ [2][0][RTW89_UK][8] = 52,
[2][0][RTW89_FCC][9] = 76,
[2][0][RTW89_ETSI][9] = 52,
[2][0][RTW89_MKK][9] = 64,
@@ -48367,6 +48775,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][9] = 76,
[2][0][RTW89_CN][9] = 52,
[2][0][RTW89_QATAR][9] = 52,
+ [2][0][RTW89_UK][9] = 52,
[2][0][RTW89_FCC][10] = 76,
[2][0][RTW89_ETSI][10] = 52,
[2][0][RTW89_MKK][10] = 64,
@@ -48378,6 +48787,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][10] = 76,
[2][0][RTW89_CN][10] = 52,
[2][0][RTW89_QATAR][10] = 52,
+ [2][0][RTW89_UK][10] = 52,
[2][0][RTW89_FCC][11] = 68,
[2][0][RTW89_ETSI][11] = 52,
[2][0][RTW89_MKK][11] = 64,
@@ -48389,6 +48799,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][11] = 68,
[2][0][RTW89_CN][11] = 52,
[2][0][RTW89_QATAR][11] = 52,
+ [2][0][RTW89_UK][11] = 52,
[2][0][RTW89_FCC][12] = 40,
[2][0][RTW89_ETSI][12] = 52,
[2][0][RTW89_MKK][12] = 64,
@@ -48400,6 +48811,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][12] = 40,
[2][0][RTW89_CN][12] = 52,
[2][0][RTW89_QATAR][12] = 52,
+ [2][0][RTW89_UK][12] = 52,
[2][0][RTW89_FCC][13] = 127,
[2][0][RTW89_ETSI][13] = 127,
[2][0][RTW89_MKK][13] = 127,
@@ -48411,6 +48823,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][13] = 127,
[2][0][RTW89_CN][13] = 127,
[2][0][RTW89_QATAR][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
[2][1][RTW89_FCC][0] = 68,
[2][1][RTW89_ETSI][0] = 40,
[2][1][RTW89_MKK][0] = 52,
@@ -48422,6 +48835,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][0] = 68,
[2][1][RTW89_CN][0] = 40,
[2][1][RTW89_QATAR][0] = 40,
+ [2][1][RTW89_UK][0] = 40,
[2][1][RTW89_FCC][1] = 68,
[2][1][RTW89_ETSI][1] = 40,
[2][1][RTW89_MKK][1] = 52,
@@ -48433,6 +48847,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][1] = 68,
[2][1][RTW89_CN][1] = 40,
[2][1][RTW89_QATAR][1] = 40,
+ [2][1][RTW89_UK][1] = 40,
[2][1][RTW89_FCC][2] = 72,
[2][1][RTW89_ETSI][2] = 40,
[2][1][RTW89_MKK][2] = 52,
@@ -48444,6 +48859,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][2] = 72,
[2][1][RTW89_CN][2] = 40,
[2][1][RTW89_QATAR][2] = 40,
+ [2][1][RTW89_UK][2] = 40,
[2][1][RTW89_FCC][3] = 76,
[2][1][RTW89_ETSI][3] = 40,
[2][1][RTW89_MKK][3] = 52,
@@ -48455,6 +48871,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][3] = 76,
[2][1][RTW89_CN][3] = 40,
[2][1][RTW89_QATAR][3] = 40,
+ [2][1][RTW89_UK][3] = 40,
[2][1][RTW89_FCC][4] = 78,
[2][1][RTW89_ETSI][4] = 40,
[2][1][RTW89_MKK][4] = 52,
@@ -48466,6 +48883,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][4] = 78,
[2][1][RTW89_CN][4] = 40,
[2][1][RTW89_QATAR][4] = 40,
+ [2][1][RTW89_UK][4] = 40,
[2][1][RTW89_FCC][5] = 78,
[2][1][RTW89_ETSI][5] = 40,
[2][1][RTW89_MKK][5] = 52,
@@ -48477,6 +48895,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][5] = 78,
[2][1][RTW89_CN][5] = 40,
[2][1][RTW89_QATAR][5] = 40,
+ [2][1][RTW89_UK][5] = 40,
[2][1][RTW89_FCC][6] = 78,
[2][1][RTW89_ETSI][6] = 40,
[2][1][RTW89_MKK][6] = 52,
@@ -48488,6 +48907,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][6] = 78,
[2][1][RTW89_CN][6] = 40,
[2][1][RTW89_QATAR][6] = 40,
+ [2][1][RTW89_UK][6] = 40,
[2][1][RTW89_FCC][7] = 78,
[2][1][RTW89_ETSI][7] = 40,
[2][1][RTW89_MKK][7] = 52,
@@ -48499,6 +48919,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][7] = 78,
[2][1][RTW89_CN][7] = 40,
[2][1][RTW89_QATAR][7] = 40,
+ [2][1][RTW89_UK][7] = 40,
[2][1][RTW89_FCC][8] = 74,
[2][1][RTW89_ETSI][8] = 40,
[2][1][RTW89_MKK][8] = 52,
@@ -48510,6 +48931,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][8] = 74,
[2][1][RTW89_CN][8] = 40,
[2][1][RTW89_QATAR][8] = 40,
+ [2][1][RTW89_UK][8] = 40,
[2][1][RTW89_FCC][9] = 70,
[2][1][RTW89_ETSI][9] = 40,
[2][1][RTW89_MKK][9] = 52,
@@ -48521,6 +48943,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][9] = 70,
[2][1][RTW89_CN][9] = 40,
[2][1][RTW89_QATAR][9] = 40,
+ [2][1][RTW89_UK][9] = 40,
[2][1][RTW89_FCC][10] = 70,
[2][1][RTW89_ETSI][10] = 40,
[2][1][RTW89_MKK][10] = 52,
@@ -48532,6 +48955,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][10] = 70,
[2][1][RTW89_CN][10] = 40,
[2][1][RTW89_QATAR][10] = 40,
+ [2][1][RTW89_UK][10] = 40,
[2][1][RTW89_FCC][11] = 48,
[2][1][RTW89_ETSI][11] = 40,
[2][1][RTW89_MKK][11] = 52,
@@ -48543,6 +48967,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][11] = 48,
[2][1][RTW89_CN][11] = 40,
[2][1][RTW89_QATAR][11] = 40,
+ [2][1][RTW89_UK][11] = 40,
[2][1][RTW89_FCC][12] = 26,
[2][1][RTW89_ETSI][12] = 40,
[2][1][RTW89_MKK][12] = 52,
@@ -48554,6 +48979,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][12] = 26,
[2][1][RTW89_CN][12] = 40,
[2][1][RTW89_QATAR][12] = 40,
+ [2][1][RTW89_UK][12] = 40,
[2][1][RTW89_FCC][13] = 127,
[2][1][RTW89_ETSI][13] = 127,
[2][1][RTW89_MKK][13] = 127,
@@ -48565,6 +48991,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][13] = 127,
[2][1][RTW89_CN][13] = 127,
[2][1][RTW89_QATAR][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -48730,6 +49157,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][0] = 48,
[0][0][RTW89_CN][0] = 24,
[0][0][RTW89_QATAR][0] = 24,
+ [0][0][RTW89_UK][0] = 24,
[0][0][RTW89_FCC][2] = 48,
[0][0][RTW89_ETSI][2] = 24,
[0][0][RTW89_MKK][2] = 26,
@@ -48741,6 +49169,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][2] = 48,
[0][0][RTW89_CN][2] = 24,
[0][0][RTW89_QATAR][2] = 24,
+ [0][0][RTW89_UK][2] = 24,
[0][0][RTW89_FCC][4] = 48,
[0][0][RTW89_ETSI][4] = 24,
[0][0][RTW89_MKK][4] = 26,
@@ -48752,6 +49181,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][4] = 48,
[0][0][RTW89_CN][4] = 24,
[0][0][RTW89_QATAR][4] = 24,
+ [0][0][RTW89_UK][4] = 24,
[0][0][RTW89_FCC][6] = 48,
[0][0][RTW89_ETSI][6] = 24,
[0][0][RTW89_MKK][6] = 26,
@@ -48763,6 +49193,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][6] = 48,
[0][0][RTW89_CN][6] = 24,
[0][0][RTW89_QATAR][6] = 24,
+ [0][0][RTW89_UK][6] = 24,
[0][0][RTW89_FCC][8] = 48,
[0][0][RTW89_ETSI][8] = 24,
[0][0][RTW89_MKK][8] = 26,
@@ -48774,6 +49205,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][8] = 48,
[0][0][RTW89_CN][8] = 24,
[0][0][RTW89_QATAR][8] = 24,
+ [0][0][RTW89_UK][8] = 24,
[0][0][RTW89_FCC][10] = 48,
[0][0][RTW89_ETSI][10] = 24,
[0][0][RTW89_MKK][10] = 26,
@@ -48785,6 +49217,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][10] = 48,
[0][0][RTW89_CN][10] = 24,
[0][0][RTW89_QATAR][10] = 24,
+ [0][0][RTW89_UK][10] = 24,
[0][0][RTW89_FCC][12] = 48,
[0][0][RTW89_ETSI][12] = 24,
[0][0][RTW89_MKK][12] = 26,
@@ -48796,6 +49229,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][12] = 48,
[0][0][RTW89_CN][12] = 24,
[0][0][RTW89_QATAR][12] = 24,
+ [0][0][RTW89_UK][12] = 24,
[0][0][RTW89_FCC][14] = 48,
[0][0][RTW89_ETSI][14] = 24,
[0][0][RTW89_MKK][14] = 26,
@@ -48807,6 +49241,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][14] = 48,
[0][0][RTW89_CN][14] = 24,
[0][0][RTW89_QATAR][14] = 24,
+ [0][0][RTW89_UK][14] = 24,
[0][0][RTW89_FCC][15] = 48,
[0][0][RTW89_ETSI][15] = 24,
[0][0][RTW89_MKK][15] = 44,
@@ -48818,6 +49253,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][15] = 48,
[0][0][RTW89_CN][15] = 127,
[0][0][RTW89_QATAR][15] = 24,
+ [0][0][RTW89_UK][15] = 24,
[0][0][RTW89_FCC][17] = 48,
[0][0][RTW89_ETSI][17] = 24,
[0][0][RTW89_MKK][17] = 44,
@@ -48829,6 +49265,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][17] = 48,
[0][0][RTW89_CN][17] = 127,
[0][0][RTW89_QATAR][17] = 24,
+ [0][0][RTW89_UK][17] = 24,
[0][0][RTW89_FCC][19] = 48,
[0][0][RTW89_ETSI][19] = 24,
[0][0][RTW89_MKK][19] = 44,
@@ -48840,6 +49277,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][19] = 48,
[0][0][RTW89_CN][19] = 127,
[0][0][RTW89_QATAR][19] = 24,
+ [0][0][RTW89_UK][19] = 24,
[0][0][RTW89_FCC][21] = 48,
[0][0][RTW89_ETSI][21] = 24,
[0][0][RTW89_MKK][21] = 44,
@@ -48851,6 +49289,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][21] = 48,
[0][0][RTW89_CN][21] = 127,
[0][0][RTW89_QATAR][21] = 24,
+ [0][0][RTW89_UK][21] = 24,
[0][0][RTW89_FCC][23] = 48,
[0][0][RTW89_ETSI][23] = 24,
[0][0][RTW89_MKK][23] = 44,
@@ -48862,6 +49301,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][23] = 48,
[0][0][RTW89_CN][23] = 127,
[0][0][RTW89_QATAR][23] = 24,
+ [0][0][RTW89_UK][23] = 24,
[0][0][RTW89_FCC][25] = 48,
[0][0][RTW89_ETSI][25] = 24,
[0][0][RTW89_MKK][25] = 44,
@@ -48873,6 +49313,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][25] = 48,
[0][0][RTW89_CN][25] = 127,
[0][0][RTW89_QATAR][25] = 24,
+ [0][0][RTW89_UK][25] = 24,
[0][0][RTW89_FCC][27] = 48,
[0][0][RTW89_ETSI][27] = 24,
[0][0][RTW89_MKK][27] = 44,
@@ -48884,6 +49325,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][27] = 48,
[0][0][RTW89_CN][27] = 127,
[0][0][RTW89_QATAR][27] = 24,
+ [0][0][RTW89_UK][27] = 24,
[0][0][RTW89_FCC][29] = 48,
[0][0][RTW89_ETSI][29] = 24,
[0][0][RTW89_MKK][29] = 44,
@@ -48895,6 +49337,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][29] = 48,
[0][0][RTW89_CN][29] = 127,
[0][0][RTW89_QATAR][29] = 24,
+ [0][0][RTW89_UK][29] = 24,
[0][0][RTW89_FCC][31] = 48,
[0][0][RTW89_ETSI][31] = 24,
[0][0][RTW89_MKK][31] = 44,
@@ -48906,6 +49349,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][31] = 48,
[0][0][RTW89_CN][31] = 127,
[0][0][RTW89_QATAR][31] = 24,
+ [0][0][RTW89_UK][31] = 24,
[0][0][RTW89_FCC][33] = 48,
[0][0][RTW89_ETSI][33] = 24,
[0][0][RTW89_MKK][33] = 44,
@@ -48917,6 +49361,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][33] = 48,
[0][0][RTW89_CN][33] = 127,
[0][0][RTW89_QATAR][33] = 24,
+ [0][0][RTW89_UK][33] = 24,
[0][0][RTW89_FCC][35] = 48,
[0][0][RTW89_ETSI][35] = 24,
[0][0][RTW89_MKK][35] = 44,
@@ -48928,6 +49373,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][35] = 48,
[0][0][RTW89_CN][35] = 127,
[0][0][RTW89_QATAR][35] = 24,
+ [0][0][RTW89_UK][35] = 24,
[0][0][RTW89_FCC][37] = 48,
[0][0][RTW89_ETSI][37] = 127,
[0][0][RTW89_MKK][37] = 44,
@@ -48939,6 +49385,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][37] = 48,
[0][0][RTW89_CN][37] = 127,
[0][0][RTW89_QATAR][37] = 127,
+ [0][0][RTW89_UK][37] = 58,
[0][0][RTW89_FCC][38] = 76,
[0][0][RTW89_ETSI][38] = 28,
[0][0][RTW89_MKK][38] = 127,
@@ -48950,6 +49397,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][38] = 76,
[0][0][RTW89_CN][38] = 62,
[0][0][RTW89_QATAR][38] = 28,
+ [0][0][RTW89_UK][38] = 28,
[0][0][RTW89_FCC][40] = 76,
[0][0][RTW89_ETSI][40] = 28,
[0][0][RTW89_MKK][40] = 127,
@@ -48961,6 +49409,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][40] = 76,
[0][0][RTW89_CN][40] = 62,
[0][0][RTW89_QATAR][40] = 28,
+ [0][0][RTW89_UK][40] = 28,
[0][0][RTW89_FCC][42] = 76,
[0][0][RTW89_ETSI][42] = 28,
[0][0][RTW89_MKK][42] = 127,
@@ -48972,6 +49421,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][42] = 76,
[0][0][RTW89_CN][42] = 62,
[0][0][RTW89_QATAR][42] = 28,
+ [0][0][RTW89_UK][42] = 28,
[0][0][RTW89_FCC][44] = 76,
[0][0][RTW89_ETSI][44] = 28,
[0][0][RTW89_MKK][44] = 127,
@@ -48983,6 +49433,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][44] = 76,
[0][0][RTW89_CN][44] = 62,
[0][0][RTW89_QATAR][44] = 28,
+ [0][0][RTW89_UK][44] = 28,
[0][0][RTW89_FCC][46] = 76,
[0][0][RTW89_ETSI][46] = 28,
[0][0][RTW89_MKK][46] = 127,
@@ -48994,6 +49445,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MEXICO][46] = 76,
[0][0][RTW89_CN][46] = 62,
[0][0][RTW89_QATAR][46] = 28,
+ [0][0][RTW89_UK][46] = 28,
[0][1][RTW89_FCC][0] = 36,
[0][1][RTW89_ETSI][0] = 12,
[0][1][RTW89_MKK][0] = 14,
@@ -49005,6 +49457,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][0] = 36,
[0][1][RTW89_CN][0] = 12,
[0][1][RTW89_QATAR][0] = 12,
+ [0][1][RTW89_UK][0] = 12,
[0][1][RTW89_FCC][2] = 36,
[0][1][RTW89_ETSI][2] = 12,
[0][1][RTW89_MKK][2] = 14,
@@ -49016,6 +49469,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][2] = 36,
[0][1][RTW89_CN][2] = 12,
[0][1][RTW89_QATAR][2] = 12,
+ [0][1][RTW89_UK][2] = 12,
[0][1][RTW89_FCC][4] = 36,
[0][1][RTW89_ETSI][4] = 12,
[0][1][RTW89_MKK][4] = 14,
@@ -49027,6 +49481,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][4] = 36,
[0][1][RTW89_CN][4] = 12,
[0][1][RTW89_QATAR][4] = 12,
+ [0][1][RTW89_UK][4] = 12,
[0][1][RTW89_FCC][6] = 36,
[0][1][RTW89_ETSI][6] = 12,
[0][1][RTW89_MKK][6] = 14,
@@ -49038,6 +49493,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][6] = 36,
[0][1][RTW89_CN][6] = 12,
[0][1][RTW89_QATAR][6] = 12,
+ [0][1][RTW89_UK][6] = 12,
[0][1][RTW89_FCC][8] = 36,
[0][1][RTW89_ETSI][8] = 12,
[0][1][RTW89_MKK][8] = 14,
@@ -49049,6 +49505,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][8] = 36,
[0][1][RTW89_CN][8] = 12,
[0][1][RTW89_QATAR][8] = 12,
+ [0][1][RTW89_UK][8] = 12,
[0][1][RTW89_FCC][10] = 36,
[0][1][RTW89_ETSI][10] = 12,
[0][1][RTW89_MKK][10] = 14,
@@ -49060,6 +49517,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][10] = 36,
[0][1][RTW89_CN][10] = 12,
[0][1][RTW89_QATAR][10] = 12,
+ [0][1][RTW89_UK][10] = 12,
[0][1][RTW89_FCC][12] = 36,
[0][1][RTW89_ETSI][12] = 12,
[0][1][RTW89_MKK][12] = 14,
@@ -49071,6 +49529,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][12] = 36,
[0][1][RTW89_CN][12] = 12,
[0][1][RTW89_QATAR][12] = 12,
+ [0][1][RTW89_UK][12] = 12,
[0][1][RTW89_FCC][14] = 36,
[0][1][RTW89_ETSI][14] = 12,
[0][1][RTW89_MKK][14] = 14,
@@ -49082,6 +49541,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][14] = 36,
[0][1][RTW89_CN][14] = 12,
[0][1][RTW89_QATAR][14] = 12,
+ [0][1][RTW89_UK][14] = 12,
[0][1][RTW89_FCC][15] = 36,
[0][1][RTW89_ETSI][15] = 12,
[0][1][RTW89_MKK][15] = 32,
@@ -49093,6 +49553,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][15] = 36,
[0][1][RTW89_CN][15] = 127,
[0][1][RTW89_QATAR][15] = 12,
+ [0][1][RTW89_UK][15] = 12,
[0][1][RTW89_FCC][17] = 36,
[0][1][RTW89_ETSI][17] = 12,
[0][1][RTW89_MKK][17] = 32,
@@ -49104,6 +49565,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][17] = 36,
[0][1][RTW89_CN][17] = 127,
[0][1][RTW89_QATAR][17] = 12,
+ [0][1][RTW89_UK][17] = 12,
[0][1][RTW89_FCC][19] = 36,
[0][1][RTW89_ETSI][19] = 12,
[0][1][RTW89_MKK][19] = 32,
@@ -49115,6 +49577,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][19] = 36,
[0][1][RTW89_CN][19] = 127,
[0][1][RTW89_QATAR][19] = 12,
+ [0][1][RTW89_UK][19] = 12,
[0][1][RTW89_FCC][21] = 36,
[0][1][RTW89_ETSI][21] = 12,
[0][1][RTW89_MKK][21] = 32,
@@ -49126,6 +49589,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][21] = 36,
[0][1][RTW89_CN][21] = 127,
[0][1][RTW89_QATAR][21] = 12,
+ [0][1][RTW89_UK][21] = 12,
[0][1][RTW89_FCC][23] = 36,
[0][1][RTW89_ETSI][23] = 12,
[0][1][RTW89_MKK][23] = 32,
@@ -49137,6 +49601,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][23] = 36,
[0][1][RTW89_CN][23] = 127,
[0][1][RTW89_QATAR][23] = 12,
+ [0][1][RTW89_UK][23] = 12,
[0][1][RTW89_FCC][25] = 36,
[0][1][RTW89_ETSI][25] = 12,
[0][1][RTW89_MKK][25] = 32,
@@ -49148,6 +49613,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][25] = 36,
[0][1][RTW89_CN][25] = 127,
[0][1][RTW89_QATAR][25] = 12,
+ [0][1][RTW89_UK][25] = 12,
[0][1][RTW89_FCC][27] = 36,
[0][1][RTW89_ETSI][27] = 12,
[0][1][RTW89_MKK][27] = 32,
@@ -49159,6 +49625,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][27] = 36,
[0][1][RTW89_CN][27] = 127,
[0][1][RTW89_QATAR][27] = 12,
+ [0][1][RTW89_UK][27] = 12,
[0][1][RTW89_FCC][29] = 36,
[0][1][RTW89_ETSI][29] = 12,
[0][1][RTW89_MKK][29] = 32,
@@ -49170,6 +49637,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][29] = 36,
[0][1][RTW89_CN][29] = 127,
[0][1][RTW89_QATAR][29] = 12,
+ [0][1][RTW89_UK][29] = 12,
[0][1][RTW89_FCC][31] = 36,
[0][1][RTW89_ETSI][31] = 12,
[0][1][RTW89_MKK][31] = 32,
@@ -49181,6 +49649,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][31] = 36,
[0][1][RTW89_CN][31] = 127,
[0][1][RTW89_QATAR][31] = 12,
+ [0][1][RTW89_UK][31] = 12,
[0][1][RTW89_FCC][33] = 36,
[0][1][RTW89_ETSI][33] = 12,
[0][1][RTW89_MKK][33] = 32,
@@ -49192,6 +49661,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][33] = 36,
[0][1][RTW89_CN][33] = 127,
[0][1][RTW89_QATAR][33] = 12,
+ [0][1][RTW89_UK][33] = 12,
[0][1][RTW89_FCC][35] = 36,
[0][1][RTW89_ETSI][35] = 12,
[0][1][RTW89_MKK][35] = 32,
@@ -49203,6 +49673,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][35] = 36,
[0][1][RTW89_CN][35] = 127,
[0][1][RTW89_QATAR][35] = 12,
+ [0][1][RTW89_UK][35] = 12,
[0][1][RTW89_FCC][37] = 36,
[0][1][RTW89_ETSI][37] = 127,
[0][1][RTW89_MKK][37] = 32,
@@ -49214,6 +49685,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][37] = 36,
[0][1][RTW89_CN][37] = 127,
[0][1][RTW89_QATAR][37] = 127,
+ [0][1][RTW89_UK][37] = 46,
[0][1][RTW89_FCC][38] = 72,
[0][1][RTW89_ETSI][38] = 16,
[0][1][RTW89_MKK][38] = 127,
@@ -49225,6 +49697,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][38] = 72,
[0][1][RTW89_CN][38] = 50,
[0][1][RTW89_QATAR][38] = 16,
+ [0][1][RTW89_UK][38] = 16,
[0][1][RTW89_FCC][40] = 76,
[0][1][RTW89_ETSI][40] = 16,
[0][1][RTW89_MKK][40] = 127,
@@ -49236,6 +49709,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][40] = 76,
[0][1][RTW89_CN][40] = 50,
[0][1][RTW89_QATAR][40] = 16,
+ [0][1][RTW89_UK][40] = 16,
[0][1][RTW89_FCC][42] = 76,
[0][1][RTW89_ETSI][42] = 16,
[0][1][RTW89_MKK][42] = 127,
@@ -49247,6 +49721,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][42] = 76,
[0][1][RTW89_CN][42] = 50,
[0][1][RTW89_QATAR][42] = 16,
+ [0][1][RTW89_UK][42] = 16,
[0][1][RTW89_FCC][44] = 76,
[0][1][RTW89_ETSI][44] = 16,
[0][1][RTW89_MKK][44] = 127,
@@ -49258,6 +49733,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][44] = 76,
[0][1][RTW89_CN][44] = 50,
[0][1][RTW89_QATAR][44] = 16,
+ [0][1][RTW89_UK][44] = 16,
[0][1][RTW89_FCC][46] = 76,
[0][1][RTW89_ETSI][46] = 16,
[0][1][RTW89_MKK][46] = 127,
@@ -49269,6 +49745,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MEXICO][46] = 76,
[0][1][RTW89_CN][46] = 50,
[0][1][RTW89_QATAR][46] = 16,
+ [0][1][RTW89_UK][46] = 16,
[1][0][RTW89_FCC][0] = 62,
[1][0][RTW89_ETSI][0] = 36,
[1][0][RTW89_MKK][0] = 36,
@@ -49280,6 +49757,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][0] = 62,
[1][0][RTW89_CN][0] = 36,
[1][0][RTW89_QATAR][0] = 36,
+ [1][0][RTW89_UK][0] = 36,
[1][0][RTW89_FCC][2] = 62,
[1][0][RTW89_ETSI][2] = 36,
[1][0][RTW89_MKK][2] = 36,
@@ -49291,6 +49769,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][2] = 62,
[1][0][RTW89_CN][2] = 36,
[1][0][RTW89_QATAR][2] = 36,
+ [1][0][RTW89_UK][2] = 36,
[1][0][RTW89_FCC][4] = 62,
[1][0][RTW89_ETSI][4] = 36,
[1][0][RTW89_MKK][4] = 36,
@@ -49302,6 +49781,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][4] = 62,
[1][0][RTW89_CN][4] = 36,
[1][0][RTW89_QATAR][4] = 36,
+ [1][0][RTW89_UK][4] = 36,
[1][0][RTW89_FCC][6] = 62,
[1][0][RTW89_ETSI][6] = 36,
[1][0][RTW89_MKK][6] = 36,
@@ -49313,6 +49793,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][6] = 62,
[1][0][RTW89_CN][6] = 36,
[1][0][RTW89_QATAR][6] = 36,
+ [1][0][RTW89_UK][6] = 36,
[1][0][RTW89_FCC][8] = 62,
[1][0][RTW89_ETSI][8] = 36,
[1][0][RTW89_MKK][8] = 36,
@@ -49324,6 +49805,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][8] = 62,
[1][0][RTW89_CN][8] = 36,
[1][0][RTW89_QATAR][8] = 36,
+ [1][0][RTW89_UK][8] = 36,
[1][0][RTW89_FCC][10] = 62,
[1][0][RTW89_ETSI][10] = 36,
[1][0][RTW89_MKK][10] = 36,
@@ -49335,6 +49817,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][10] = 62,
[1][0][RTW89_CN][10] = 36,
[1][0][RTW89_QATAR][10] = 36,
+ [1][0][RTW89_UK][10] = 36,
[1][0][RTW89_FCC][12] = 62,
[1][0][RTW89_ETSI][12] = 36,
[1][0][RTW89_MKK][12] = 36,
@@ -49346,6 +49829,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][12] = 62,
[1][0][RTW89_CN][12] = 36,
[1][0][RTW89_QATAR][12] = 36,
+ [1][0][RTW89_UK][12] = 36,
[1][0][RTW89_FCC][14] = 62,
[1][0][RTW89_ETSI][14] = 36,
[1][0][RTW89_MKK][14] = 36,
@@ -49357,6 +49841,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][14] = 62,
[1][0][RTW89_CN][14] = 36,
[1][0][RTW89_QATAR][14] = 36,
+ [1][0][RTW89_UK][14] = 36,
[1][0][RTW89_FCC][15] = 62,
[1][0][RTW89_ETSI][15] = 36,
[1][0][RTW89_MKK][15] = 58,
@@ -49368,6 +49853,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][15] = 62,
[1][0][RTW89_CN][15] = 127,
[1][0][RTW89_QATAR][15] = 36,
+ [1][0][RTW89_UK][15] = 36,
[1][0][RTW89_FCC][17] = 62,
[1][0][RTW89_ETSI][17] = 36,
[1][0][RTW89_MKK][17] = 58,
@@ -49379,6 +49865,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][17] = 62,
[1][0][RTW89_CN][17] = 127,
[1][0][RTW89_QATAR][17] = 36,
+ [1][0][RTW89_UK][17] = 36,
[1][0][RTW89_FCC][19] = 62,
[1][0][RTW89_ETSI][19] = 36,
[1][0][RTW89_MKK][19] = 58,
@@ -49390,6 +49877,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][19] = 62,
[1][0][RTW89_CN][19] = 127,
[1][0][RTW89_QATAR][19] = 36,
+ [1][0][RTW89_UK][19] = 36,
[1][0][RTW89_FCC][21] = 62,
[1][0][RTW89_ETSI][21] = 36,
[1][0][RTW89_MKK][21] = 58,
@@ -49401,6 +49889,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][21] = 62,
[1][0][RTW89_CN][21] = 127,
[1][0][RTW89_QATAR][21] = 36,
+ [1][0][RTW89_UK][21] = 36,
[1][0][RTW89_FCC][23] = 62,
[1][0][RTW89_ETSI][23] = 36,
[1][0][RTW89_MKK][23] = 58,
@@ -49412,6 +49901,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][23] = 62,
[1][0][RTW89_CN][23] = 127,
[1][0][RTW89_QATAR][23] = 36,
+ [1][0][RTW89_UK][23] = 36,
[1][0][RTW89_FCC][25] = 62,
[1][0][RTW89_ETSI][25] = 36,
[1][0][RTW89_MKK][25] = 58,
@@ -49423,6 +49913,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][25] = 62,
[1][0][RTW89_CN][25] = 127,
[1][0][RTW89_QATAR][25] = 36,
+ [1][0][RTW89_UK][25] = 36,
[1][0][RTW89_FCC][27] = 62,
[1][0][RTW89_ETSI][27] = 36,
[1][0][RTW89_MKK][27] = 58,
@@ -49434,6 +49925,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][27] = 62,
[1][0][RTW89_CN][27] = 127,
[1][0][RTW89_QATAR][27] = 36,
+ [1][0][RTW89_UK][27] = 36,
[1][0][RTW89_FCC][29] = 62,
[1][0][RTW89_ETSI][29] = 36,
[1][0][RTW89_MKK][29] = 58,
@@ -49445,6 +49937,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][29] = 62,
[1][0][RTW89_CN][29] = 127,
[1][0][RTW89_QATAR][29] = 36,
+ [1][0][RTW89_UK][29] = 36,
[1][0][RTW89_FCC][31] = 62,
[1][0][RTW89_ETSI][31] = 36,
[1][0][RTW89_MKK][31] = 58,
@@ -49456,6 +49949,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][31] = 62,
[1][0][RTW89_CN][31] = 127,
[1][0][RTW89_QATAR][31] = 36,
+ [1][0][RTW89_UK][31] = 36,
[1][0][RTW89_FCC][33] = 62,
[1][0][RTW89_ETSI][33] = 36,
[1][0][RTW89_MKK][33] = 58,
@@ -49467,6 +49961,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][33] = 62,
[1][0][RTW89_CN][33] = 127,
[1][0][RTW89_QATAR][33] = 36,
+ [1][0][RTW89_UK][33] = 36,
[1][0][RTW89_FCC][35] = 62,
[1][0][RTW89_ETSI][35] = 36,
[1][0][RTW89_MKK][35] = 58,
@@ -49478,6 +49973,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][35] = 62,
[1][0][RTW89_CN][35] = 127,
[1][0][RTW89_QATAR][35] = 36,
+ [1][0][RTW89_UK][35] = 36,
[1][0][RTW89_FCC][37] = 62,
[1][0][RTW89_ETSI][37] = 127,
[1][0][RTW89_MKK][37] = 58,
@@ -49489,6 +49985,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][37] = 62,
[1][0][RTW89_CN][37] = 127,
[1][0][RTW89_QATAR][37] = 127,
+ [1][0][RTW89_UK][37] = 64,
[1][0][RTW89_FCC][38] = 76,
[1][0][RTW89_ETSI][38] = 28,
[1][0][RTW89_MKK][38] = 127,
@@ -49500,6 +49997,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][38] = 76,
[1][0][RTW89_CN][38] = 74,
[1][0][RTW89_QATAR][38] = 28,
+ [1][0][RTW89_UK][38] = 34,
[1][0][RTW89_FCC][40] = 76,
[1][0][RTW89_ETSI][40] = 28,
[1][0][RTW89_MKK][40] = 127,
@@ -49511,6 +50009,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][40] = 76,
[1][0][RTW89_CN][40] = 74,
[1][0][RTW89_QATAR][40] = 28,
+ [1][0][RTW89_UK][40] = 34,
[1][0][RTW89_FCC][42] = 76,
[1][0][RTW89_ETSI][42] = 28,
[1][0][RTW89_MKK][42] = 127,
@@ -49522,6 +50021,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][42] = 76,
[1][0][RTW89_CN][42] = 74,
[1][0][RTW89_QATAR][42] = 28,
+ [1][0][RTW89_UK][42] = 34,
[1][0][RTW89_FCC][44] = 76,
[1][0][RTW89_ETSI][44] = 28,
[1][0][RTW89_MKK][44] = 127,
@@ -49533,6 +50033,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][44] = 76,
[1][0][RTW89_CN][44] = 74,
[1][0][RTW89_QATAR][44] = 28,
+ [1][0][RTW89_UK][44] = 34,
[1][0][RTW89_FCC][46] = 76,
[1][0][RTW89_ETSI][46] = 28,
[1][0][RTW89_MKK][46] = 127,
@@ -49544,6 +50045,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MEXICO][46] = 76,
[1][0][RTW89_CN][46] = 74,
[1][0][RTW89_QATAR][46] = 28,
+ [1][0][RTW89_UK][46] = 34,
[1][1][RTW89_FCC][0] = 46,
[1][1][RTW89_ETSI][0] = 22,
[1][1][RTW89_MKK][0] = 24,
@@ -49555,6 +50057,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][0] = 46,
[1][1][RTW89_CN][0] = 22,
[1][1][RTW89_QATAR][0] = 22,
+ [1][1][RTW89_UK][0] = 22,
[1][1][RTW89_FCC][2] = 46,
[1][1][RTW89_ETSI][2] = 22,
[1][1][RTW89_MKK][2] = 24,
@@ -49566,6 +50069,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][2] = 46,
[1][1][RTW89_CN][2] = 22,
[1][1][RTW89_QATAR][2] = 22,
+ [1][1][RTW89_UK][2] = 22,
[1][1][RTW89_FCC][4] = 46,
[1][1][RTW89_ETSI][4] = 22,
[1][1][RTW89_MKK][4] = 24,
@@ -49577,6 +50081,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][4] = 46,
[1][1][RTW89_CN][4] = 22,
[1][1][RTW89_QATAR][4] = 22,
+ [1][1][RTW89_UK][4] = 22,
[1][1][RTW89_FCC][6] = 46,
[1][1][RTW89_ETSI][6] = 22,
[1][1][RTW89_MKK][6] = 24,
@@ -49588,6 +50093,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][6] = 46,
[1][1][RTW89_CN][6] = 22,
[1][1][RTW89_QATAR][6] = 22,
+ [1][1][RTW89_UK][6] = 22,
[1][1][RTW89_FCC][8] = 46,
[1][1][RTW89_ETSI][8] = 22,
[1][1][RTW89_MKK][8] = 24,
@@ -49599,6 +50105,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][8] = 46,
[1][1][RTW89_CN][8] = 22,
[1][1][RTW89_QATAR][8] = 22,
+ [1][1][RTW89_UK][8] = 22,
[1][1][RTW89_FCC][10] = 46,
[1][1][RTW89_ETSI][10] = 22,
[1][1][RTW89_MKK][10] = 24,
@@ -49610,6 +50117,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][10] = 46,
[1][1][RTW89_CN][10] = 22,
[1][1][RTW89_QATAR][10] = 22,
+ [1][1][RTW89_UK][10] = 22,
[1][1][RTW89_FCC][12] = 46,
[1][1][RTW89_ETSI][12] = 22,
[1][1][RTW89_MKK][12] = 24,
@@ -49621,6 +50129,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][12] = 46,
[1][1][RTW89_CN][12] = 22,
[1][1][RTW89_QATAR][12] = 22,
+ [1][1][RTW89_UK][12] = 22,
[1][1][RTW89_FCC][14] = 46,
[1][1][RTW89_ETSI][14] = 22,
[1][1][RTW89_MKK][14] = 24,
@@ -49632,6 +50141,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][14] = 46,
[1][1][RTW89_CN][14] = 22,
[1][1][RTW89_QATAR][14] = 22,
+ [1][1][RTW89_UK][14] = 22,
[1][1][RTW89_FCC][15] = 46,
[1][1][RTW89_ETSI][15] = 22,
[1][1][RTW89_MKK][15] = 46,
@@ -49643,6 +50153,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][15] = 46,
[1][1][RTW89_CN][15] = 127,
[1][1][RTW89_QATAR][15] = 22,
+ [1][1][RTW89_UK][15] = 22,
[1][1][RTW89_FCC][17] = 46,
[1][1][RTW89_ETSI][17] = 22,
[1][1][RTW89_MKK][17] = 46,
@@ -49654,6 +50165,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][17] = 46,
[1][1][RTW89_CN][17] = 127,
[1][1][RTW89_QATAR][17] = 22,
+ [1][1][RTW89_UK][17] = 22,
[1][1][RTW89_FCC][19] = 46,
[1][1][RTW89_ETSI][19] = 22,
[1][1][RTW89_MKK][19] = 46,
@@ -49665,6 +50177,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][19] = 46,
[1][1][RTW89_CN][19] = 127,
[1][1][RTW89_QATAR][19] = 22,
+ [1][1][RTW89_UK][19] = 22,
[1][1][RTW89_FCC][21] = 46,
[1][1][RTW89_ETSI][21] = 22,
[1][1][RTW89_MKK][21] = 46,
@@ -49676,6 +50189,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][21] = 46,
[1][1][RTW89_CN][21] = 127,
[1][1][RTW89_QATAR][21] = 22,
+ [1][1][RTW89_UK][21] = 22,
[1][1][RTW89_FCC][23] = 46,
[1][1][RTW89_ETSI][23] = 22,
[1][1][RTW89_MKK][23] = 46,
@@ -49687,6 +50201,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][23] = 46,
[1][1][RTW89_CN][23] = 127,
[1][1][RTW89_QATAR][23] = 22,
+ [1][1][RTW89_UK][23] = 22,
[1][1][RTW89_FCC][25] = 46,
[1][1][RTW89_ETSI][25] = 22,
[1][1][RTW89_MKK][25] = 46,
@@ -49698,6 +50213,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][25] = 46,
[1][1][RTW89_CN][25] = 127,
[1][1][RTW89_QATAR][25] = 22,
+ [1][1][RTW89_UK][25] = 22,
[1][1][RTW89_FCC][27] = 46,
[1][1][RTW89_ETSI][27] = 22,
[1][1][RTW89_MKK][27] = 46,
@@ -49709,6 +50225,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][27] = 46,
[1][1][RTW89_CN][27] = 127,
[1][1][RTW89_QATAR][27] = 22,
+ [1][1][RTW89_UK][27] = 22,
[1][1][RTW89_FCC][29] = 46,
[1][1][RTW89_ETSI][29] = 22,
[1][1][RTW89_MKK][29] = 46,
@@ -49720,6 +50237,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][29] = 46,
[1][1][RTW89_CN][29] = 127,
[1][1][RTW89_QATAR][29] = 22,
+ [1][1][RTW89_UK][29] = 22,
[1][1][RTW89_FCC][31] = 46,
[1][1][RTW89_ETSI][31] = 22,
[1][1][RTW89_MKK][31] = 46,
@@ -49731,6 +50249,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][31] = 46,
[1][1][RTW89_CN][31] = 127,
[1][1][RTW89_QATAR][31] = 22,
+ [1][1][RTW89_UK][31] = 22,
[1][1][RTW89_FCC][33] = 46,
[1][1][RTW89_ETSI][33] = 22,
[1][1][RTW89_MKK][33] = 46,
@@ -49742,6 +50261,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][33] = 46,
[1][1][RTW89_CN][33] = 127,
[1][1][RTW89_QATAR][33] = 22,
+ [1][1][RTW89_UK][33] = 22,
[1][1][RTW89_FCC][35] = 46,
[1][1][RTW89_ETSI][35] = 22,
[1][1][RTW89_MKK][35] = 46,
@@ -49753,6 +50273,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][35] = 46,
[1][1][RTW89_CN][35] = 127,
[1][1][RTW89_QATAR][35] = 22,
+ [1][1][RTW89_UK][35] = 22,
[1][1][RTW89_FCC][37] = 46,
[1][1][RTW89_ETSI][37] = 127,
[1][1][RTW89_MKK][37] = 46,
@@ -49764,6 +50285,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][37] = 46,
[1][1][RTW89_CN][37] = 127,
[1][1][RTW89_QATAR][37] = 127,
+ [1][1][RTW89_UK][37] = 52,
[1][1][RTW89_FCC][38] = 74,
[1][1][RTW89_ETSI][38] = 16,
[1][1][RTW89_MKK][38] = 127,
@@ -49775,6 +50297,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][38] = 74,
[1][1][RTW89_CN][38] = 62,
[1][1][RTW89_QATAR][38] = 16,
+ [1][1][RTW89_UK][38] = 22,
[1][1][RTW89_FCC][40] = 76,
[1][1][RTW89_ETSI][40] = 16,
[1][1][RTW89_MKK][40] = 127,
@@ -49786,6 +50309,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][40] = 76,
[1][1][RTW89_CN][40] = 62,
[1][1][RTW89_QATAR][40] = 16,
+ [1][1][RTW89_UK][40] = 22,
[1][1][RTW89_FCC][42] = 76,
[1][1][RTW89_ETSI][42] = 16,
[1][1][RTW89_MKK][42] = 127,
@@ -49797,6 +50321,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][42] = 76,
[1][1][RTW89_CN][42] = 62,
[1][1][RTW89_QATAR][42] = 16,
+ [1][1][RTW89_UK][42] = 22,
[1][1][RTW89_FCC][44] = 76,
[1][1][RTW89_ETSI][44] = 16,
[1][1][RTW89_MKK][44] = 127,
@@ -49808,6 +50333,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][44] = 76,
[1][1][RTW89_CN][44] = 62,
[1][1][RTW89_QATAR][44] = 16,
+ [1][1][RTW89_UK][44] = 22,
[1][1][RTW89_FCC][46] = 76,
[1][1][RTW89_ETSI][46] = 16,
[1][1][RTW89_MKK][46] = 127,
@@ -49819,6 +50345,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MEXICO][46] = 76,
[1][1][RTW89_CN][46] = 62,
[1][1][RTW89_QATAR][46] = 16,
+ [1][1][RTW89_UK][46] = 22,
[2][0][RTW89_FCC][0] = 74,
[2][0][RTW89_ETSI][0] = 46,
[2][0][RTW89_MKK][0] = 50,
@@ -49830,6 +50357,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][0] = 62,
[2][0][RTW89_CN][0] = 46,
[2][0][RTW89_QATAR][0] = 46,
+ [2][0][RTW89_UK][0] = 46,
[2][0][RTW89_FCC][2] = 74,
[2][0][RTW89_ETSI][2] = 46,
[2][0][RTW89_MKK][2] = 50,
@@ -49841,6 +50369,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][2] = 62,
[2][0][RTW89_CN][2] = 46,
[2][0][RTW89_QATAR][2] = 46,
+ [2][0][RTW89_UK][2] = 46,
[2][0][RTW89_FCC][4] = 74,
[2][0][RTW89_ETSI][4] = 46,
[2][0][RTW89_MKK][4] = 50,
@@ -49852,6 +50381,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][4] = 62,
[2][0][RTW89_CN][4] = 46,
[2][0][RTW89_QATAR][4] = 46,
+ [2][0][RTW89_UK][4] = 46,
[2][0][RTW89_FCC][6] = 74,
[2][0][RTW89_ETSI][6] = 46,
[2][0][RTW89_MKK][6] = 50,
@@ -49863,6 +50393,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][6] = 62,
[2][0][RTW89_CN][6] = 46,
[2][0][RTW89_QATAR][6] = 46,
+ [2][0][RTW89_UK][6] = 46,
[2][0][RTW89_FCC][8] = 74,
[2][0][RTW89_ETSI][8] = 46,
[2][0][RTW89_MKK][8] = 50,
@@ -49874,6 +50405,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][8] = 74,
[2][0][RTW89_CN][8] = 46,
[2][0][RTW89_QATAR][8] = 46,
+ [2][0][RTW89_UK][8] = 46,
[2][0][RTW89_FCC][10] = 74,
[2][0][RTW89_ETSI][10] = 46,
[2][0][RTW89_MKK][10] = 50,
@@ -49885,6 +50417,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][10] = 74,
[2][0][RTW89_CN][10] = 46,
[2][0][RTW89_QATAR][10] = 46,
+ [2][0][RTW89_UK][10] = 46,
[2][0][RTW89_FCC][12] = 74,
[2][0][RTW89_ETSI][12] = 46,
[2][0][RTW89_MKK][12] = 50,
@@ -49896,6 +50429,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][12] = 74,
[2][0][RTW89_CN][12] = 46,
[2][0][RTW89_QATAR][12] = 46,
+ [2][0][RTW89_UK][12] = 46,
[2][0][RTW89_FCC][14] = 74,
[2][0][RTW89_ETSI][14] = 46,
[2][0][RTW89_MKK][14] = 50,
@@ -49907,6 +50441,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][14] = 74,
[2][0][RTW89_CN][14] = 46,
[2][0][RTW89_QATAR][14] = 46,
+ [2][0][RTW89_UK][14] = 46,
[2][0][RTW89_FCC][15] = 74,
[2][0][RTW89_ETSI][15] = 46,
[2][0][RTW89_MKK][15] = 70,
@@ -49918,6 +50453,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][15] = 74,
[2][0][RTW89_CN][15] = 127,
[2][0][RTW89_QATAR][15] = 46,
+ [2][0][RTW89_UK][15] = 46,
[2][0][RTW89_FCC][17] = 74,
[2][0][RTW89_ETSI][17] = 46,
[2][0][RTW89_MKK][17] = 70,
@@ -49929,6 +50465,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][17] = 74,
[2][0][RTW89_CN][17] = 127,
[2][0][RTW89_QATAR][17] = 46,
+ [2][0][RTW89_UK][17] = 46,
[2][0][RTW89_FCC][19] = 74,
[2][0][RTW89_ETSI][19] = 46,
[2][0][RTW89_MKK][19] = 70,
@@ -49940,6 +50477,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][19] = 74,
[2][0][RTW89_CN][19] = 127,
[2][0][RTW89_QATAR][19] = 46,
+ [2][0][RTW89_UK][19] = 46,
[2][0][RTW89_FCC][21] = 74,
[2][0][RTW89_ETSI][21] = 46,
[2][0][RTW89_MKK][21] = 70,
@@ -49951,6 +50489,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][21] = 74,
[2][0][RTW89_CN][21] = 127,
[2][0][RTW89_QATAR][21] = 46,
+ [2][0][RTW89_UK][21] = 46,
[2][0][RTW89_FCC][23] = 74,
[2][0][RTW89_ETSI][23] = 46,
[2][0][RTW89_MKK][23] = 70,
@@ -49962,6 +50501,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][23] = 74,
[2][0][RTW89_CN][23] = 127,
[2][0][RTW89_QATAR][23] = 46,
+ [2][0][RTW89_UK][23] = 46,
[2][0][RTW89_FCC][25] = 74,
[2][0][RTW89_ETSI][25] = 46,
[2][0][RTW89_MKK][25] = 70,
@@ -49973,6 +50513,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][25] = 74,
[2][0][RTW89_CN][25] = 127,
[2][0][RTW89_QATAR][25] = 46,
+ [2][0][RTW89_UK][25] = 46,
[2][0][RTW89_FCC][27] = 74,
[2][0][RTW89_ETSI][27] = 46,
[2][0][RTW89_MKK][27] = 70,
@@ -49984,6 +50525,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][27] = 74,
[2][0][RTW89_CN][27] = 127,
[2][0][RTW89_QATAR][27] = 46,
+ [2][0][RTW89_UK][27] = 46,
[2][0][RTW89_FCC][29] = 74,
[2][0][RTW89_ETSI][29] = 46,
[2][0][RTW89_MKK][29] = 70,
@@ -49995,6 +50537,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][29] = 74,
[2][0][RTW89_CN][29] = 127,
[2][0][RTW89_QATAR][29] = 46,
+ [2][0][RTW89_UK][29] = 46,
[2][0][RTW89_FCC][31] = 74,
[2][0][RTW89_ETSI][31] = 46,
[2][0][RTW89_MKK][31] = 70,
@@ -50006,6 +50549,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][31] = 74,
[2][0][RTW89_CN][31] = 127,
[2][0][RTW89_QATAR][31] = 46,
+ [2][0][RTW89_UK][31] = 46,
[2][0][RTW89_FCC][33] = 74,
[2][0][RTW89_ETSI][33] = 46,
[2][0][RTW89_MKK][33] = 70,
@@ -50017,6 +50561,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][33] = 74,
[2][0][RTW89_CN][33] = 127,
[2][0][RTW89_QATAR][33] = 46,
+ [2][0][RTW89_UK][33] = 46,
[2][0][RTW89_FCC][35] = 74,
[2][0][RTW89_ETSI][35] = 46,
[2][0][RTW89_MKK][35] = 70,
@@ -50028,6 +50573,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][35] = 74,
[2][0][RTW89_CN][35] = 127,
[2][0][RTW89_QATAR][35] = 46,
+ [2][0][RTW89_UK][35] = 46,
[2][0][RTW89_FCC][37] = 74,
[2][0][RTW89_ETSI][37] = 127,
[2][0][RTW89_MKK][37] = 70,
@@ -50039,6 +50585,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][37] = 74,
[2][0][RTW89_CN][37] = 127,
[2][0][RTW89_QATAR][37] = 127,
+ [2][0][RTW89_UK][37] = 74,
[2][0][RTW89_FCC][38] = 76,
[2][0][RTW89_ETSI][38] = 28,
[2][0][RTW89_MKK][38] = 127,
@@ -50050,6 +50597,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][38] = 76,
[2][0][RTW89_CN][38] = 76,
[2][0][RTW89_QATAR][38] = 28,
+ [2][0][RTW89_UK][38] = 44,
[2][0][RTW89_FCC][40] = 76,
[2][0][RTW89_ETSI][40] = 28,
[2][0][RTW89_MKK][40] = 127,
@@ -50061,6 +50609,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][40] = 76,
[2][0][RTW89_CN][40] = 76,
[2][0][RTW89_QATAR][40] = 28,
+ [2][0][RTW89_UK][40] = 44,
[2][0][RTW89_FCC][42] = 76,
[2][0][RTW89_ETSI][42] = 28,
[2][0][RTW89_MKK][42] = 127,
@@ -50072,6 +50621,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][42] = 76,
[2][0][RTW89_CN][42] = 76,
[2][0][RTW89_QATAR][42] = 28,
+ [2][0][RTW89_UK][42] = 44,
[2][0][RTW89_FCC][44] = 76,
[2][0][RTW89_ETSI][44] = 28,
[2][0][RTW89_MKK][44] = 127,
@@ -50083,6 +50633,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][44] = 76,
[2][0][RTW89_CN][44] = 76,
[2][0][RTW89_QATAR][44] = 28,
+ [2][0][RTW89_UK][44] = 44,
[2][0][RTW89_FCC][46] = 76,
[2][0][RTW89_ETSI][46] = 28,
[2][0][RTW89_MKK][46] = 127,
@@ -50094,6 +50645,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MEXICO][46] = 76,
[2][0][RTW89_CN][46] = 76,
[2][0][RTW89_QATAR][46] = 28,
+ [2][0][RTW89_UK][46] = 44,
[2][1][RTW89_FCC][0] = 58,
[2][1][RTW89_ETSI][0] = 32,
[2][1][RTW89_MKK][0] = 38,
@@ -50105,6 +50657,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][0] = 50,
[2][1][RTW89_CN][0] = 32,
[2][1][RTW89_QATAR][0] = 32,
+ [2][1][RTW89_UK][0] = 32,
[2][1][RTW89_FCC][2] = 58,
[2][1][RTW89_ETSI][2] = 32,
[2][1][RTW89_MKK][2] = 38,
@@ -50116,6 +50669,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][2] = 50,
[2][1][RTW89_CN][2] = 32,
[2][1][RTW89_QATAR][2] = 32,
+ [2][1][RTW89_UK][2] = 32,
[2][1][RTW89_FCC][4] = 58,
[2][1][RTW89_ETSI][4] = 32,
[2][1][RTW89_MKK][4] = 38,
@@ -50127,6 +50681,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][4] = 50,
[2][1][RTW89_CN][4] = 32,
[2][1][RTW89_QATAR][4] = 32,
+ [2][1][RTW89_UK][4] = 32,
[2][1][RTW89_FCC][6] = 58,
[2][1][RTW89_ETSI][6] = 32,
[2][1][RTW89_MKK][6] = 38,
@@ -50138,6 +50693,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][6] = 50,
[2][1][RTW89_CN][6] = 32,
[2][1][RTW89_QATAR][6] = 32,
+ [2][1][RTW89_UK][6] = 32,
[2][1][RTW89_FCC][8] = 58,
[2][1][RTW89_ETSI][8] = 32,
[2][1][RTW89_MKK][8] = 38,
@@ -50149,6 +50705,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][8] = 58,
[2][1][RTW89_CN][8] = 32,
[2][1][RTW89_QATAR][8] = 32,
+ [2][1][RTW89_UK][8] = 32,
[2][1][RTW89_FCC][10] = 58,
[2][1][RTW89_ETSI][10] = 32,
[2][1][RTW89_MKK][10] = 38,
@@ -50160,6 +50717,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][10] = 58,
[2][1][RTW89_CN][10] = 32,
[2][1][RTW89_QATAR][10] = 32,
+ [2][1][RTW89_UK][10] = 32,
[2][1][RTW89_FCC][12] = 58,
[2][1][RTW89_ETSI][12] = 32,
[2][1][RTW89_MKK][12] = 38,
@@ -50171,6 +50729,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][12] = 58,
[2][1][RTW89_CN][12] = 32,
[2][1][RTW89_QATAR][12] = 32,
+ [2][1][RTW89_UK][12] = 32,
[2][1][RTW89_FCC][14] = 58,
[2][1][RTW89_ETSI][14] = 32,
[2][1][RTW89_MKK][14] = 38,
@@ -50182,6 +50741,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][14] = 58,
[2][1][RTW89_CN][14] = 32,
[2][1][RTW89_QATAR][14] = 32,
+ [2][1][RTW89_UK][14] = 32,
[2][1][RTW89_FCC][15] = 58,
[2][1][RTW89_ETSI][15] = 32,
[2][1][RTW89_MKK][15] = 58,
@@ -50193,6 +50753,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][15] = 58,
[2][1][RTW89_CN][15] = 127,
[2][1][RTW89_QATAR][15] = 32,
+ [2][1][RTW89_UK][15] = 32,
[2][1][RTW89_FCC][17] = 58,
[2][1][RTW89_ETSI][17] = 32,
[2][1][RTW89_MKK][17] = 58,
@@ -50204,6 +50765,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][17] = 58,
[2][1][RTW89_CN][17] = 127,
[2][1][RTW89_QATAR][17] = 32,
+ [2][1][RTW89_UK][17] = 32,
[2][1][RTW89_FCC][19] = 58,
[2][1][RTW89_ETSI][19] = 32,
[2][1][RTW89_MKK][19] = 58,
@@ -50215,6 +50777,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][19] = 58,
[2][1][RTW89_CN][19] = 127,
[2][1][RTW89_QATAR][19] = 32,
+ [2][1][RTW89_UK][19] = 32,
[2][1][RTW89_FCC][21] = 58,
[2][1][RTW89_ETSI][21] = 32,
[2][1][RTW89_MKK][21] = 58,
@@ -50226,6 +50789,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][21] = 58,
[2][1][RTW89_CN][21] = 127,
[2][1][RTW89_QATAR][21] = 32,
+ [2][1][RTW89_UK][21] = 32,
[2][1][RTW89_FCC][23] = 58,
[2][1][RTW89_ETSI][23] = 32,
[2][1][RTW89_MKK][23] = 58,
@@ -50237,6 +50801,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][23] = 58,
[2][1][RTW89_CN][23] = 127,
[2][1][RTW89_QATAR][23] = 32,
+ [2][1][RTW89_UK][23] = 32,
[2][1][RTW89_FCC][25] = 58,
[2][1][RTW89_ETSI][25] = 32,
[2][1][RTW89_MKK][25] = 58,
@@ -50248,6 +50813,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][25] = 58,
[2][1][RTW89_CN][25] = 127,
[2][1][RTW89_QATAR][25] = 32,
+ [2][1][RTW89_UK][25] = 32,
[2][1][RTW89_FCC][27] = 58,
[2][1][RTW89_ETSI][27] = 32,
[2][1][RTW89_MKK][27] = 58,
@@ -50259,6 +50825,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][27] = 58,
[2][1][RTW89_CN][27] = 127,
[2][1][RTW89_QATAR][27] = 32,
+ [2][1][RTW89_UK][27] = 32,
[2][1][RTW89_FCC][29] = 58,
[2][1][RTW89_ETSI][29] = 32,
[2][1][RTW89_MKK][29] = 58,
@@ -50270,6 +50837,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][29] = 58,
[2][1][RTW89_CN][29] = 127,
[2][1][RTW89_QATAR][29] = 32,
+ [2][1][RTW89_UK][29] = 32,
[2][1][RTW89_FCC][31] = 58,
[2][1][RTW89_ETSI][31] = 32,
[2][1][RTW89_MKK][31] = 58,
@@ -50281,6 +50849,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][31] = 58,
[2][1][RTW89_CN][31] = 127,
[2][1][RTW89_QATAR][31] = 32,
+ [2][1][RTW89_UK][31] = 32,
[2][1][RTW89_FCC][33] = 58,
[2][1][RTW89_ETSI][33] = 32,
[2][1][RTW89_MKK][33] = 58,
@@ -50292,6 +50861,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][33] = 58,
[2][1][RTW89_CN][33] = 127,
[2][1][RTW89_QATAR][33] = 32,
+ [2][1][RTW89_UK][33] = 32,
[2][1][RTW89_FCC][35] = 58,
[2][1][RTW89_ETSI][35] = 32,
[2][1][RTW89_MKK][35] = 58,
@@ -50303,6 +50873,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][35] = 58,
[2][1][RTW89_CN][35] = 127,
[2][1][RTW89_QATAR][35] = 32,
+ [2][1][RTW89_UK][35] = 32,
[2][1][RTW89_FCC][37] = 58,
[2][1][RTW89_ETSI][37] = 127,
[2][1][RTW89_MKK][37] = 58,
@@ -50314,6 +50885,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][37] = 58,
[2][1][RTW89_CN][37] = 127,
[2][1][RTW89_QATAR][37] = 127,
+ [2][1][RTW89_UK][37] = 62,
[2][1][RTW89_FCC][38] = 76,
[2][1][RTW89_ETSI][38] = 16,
[2][1][RTW89_MKK][38] = 127,
@@ -50325,6 +50897,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][38] = 76,
[2][1][RTW89_CN][38] = 64,
[2][1][RTW89_QATAR][38] = 16,
+ [2][1][RTW89_UK][38] = 32,
[2][1][RTW89_FCC][40] = 76,
[2][1][RTW89_ETSI][40] = 16,
[2][1][RTW89_MKK][40] = 127,
@@ -50336,6 +50909,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][40] = 76,
[2][1][RTW89_CN][40] = 64,
[2][1][RTW89_QATAR][40] = 16,
+ [2][1][RTW89_UK][40] = 32,
[2][1][RTW89_FCC][42] = 76,
[2][1][RTW89_ETSI][42] = 16,
[2][1][RTW89_MKK][42] = 127,
@@ -50347,6 +50921,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][42] = 76,
[2][1][RTW89_CN][42] = 64,
[2][1][RTW89_QATAR][42] = 16,
+ [2][1][RTW89_UK][42] = 32,
[2][1][RTW89_FCC][44] = 76,
[2][1][RTW89_ETSI][44] = 16,
[2][1][RTW89_MKK][44] = 127,
@@ -50358,6 +50933,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][44] = 76,
[2][1][RTW89_CN][44] = 64,
[2][1][RTW89_QATAR][44] = 16,
+ [2][1][RTW89_UK][44] = 32,
[2][1][RTW89_FCC][46] = 76,
[2][1][RTW89_ETSI][46] = 16,
[2][1][RTW89_MKK][46] = 127,
@@ -50369,6 +50945,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_MEXICO][46] = 76,
[2][1][RTW89_CN][46] = 64,
[2][1][RTW89_QATAR][46] = 16,
+ [2][1][RTW89_UK][46] = 32,
};
#define DECLARE_DIG_TABLE(name) \
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
new file mode 100644
index 000000000000..0cd8c0c44d19
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2021 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852a.h"
+
+static const struct rtw89_pci_info rtw8852a_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2,
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK},
+ .dma_busy2_reg = R_AX_PCIE_DMA_BUSY2,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
+ .tx_dma_ch_mask = 0,
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
+};
+
+static const struct rtw89_driver_info rtw89_8852ae_info = {
+ .chip = &rtw8852a_chip_info,
+ .bus = {
+ .pci = &rtw8852a_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ae_id_table);
+
+static struct pci_driver rtw89_8852ae_driver = {
+ .name = "rtw89_8852ae",
+ .id_table = rtw89_8852ae_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ae_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852AE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
new file mode 100644
index 000000000000..9f9908418ee4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "core.h"
+#include "mac.h"
+#include "reg.h"
+
+static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
+ &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ &rtw89_mac_size.ple_qt58},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
+
+ return 0;
+}
+
+static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ u8 wl_rfc_s0;
+ u8 wl_rfc_s1;
+ int ret;
+
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
+ if (ret)
+ return ret;
+ wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
+ if (ret)
+ return ret;
+ wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
+ FULL_BIT_MASK);
+ return ret;
+}
+
+static const struct rtw89_chip_ops rtw8852b_chip_ops = {
+ .enable_bb_rf = rtw8852b_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+};
+
+const struct rtw89_chip_info rtw8852b_chip_info = {
+ .chip_id = RTL8852B,
+ .fifo_size = 196608,
+ .dle_scc_rsvd_size = 98304,
+ .dle_mem = rtw8852b_dle_mem_pcie,
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+};
+EXPORT_SYMBOL(rtw8852b_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852b_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
new file mode 100644
index 000000000000..7bf95c38d3eb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+
+static const struct rtw89_pci_info rtw8852b_pci_info = {
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+};
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
new file mode 100644
index 000000000000..67653b3e1a35
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -0,0 +1,3146 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+#include "rtw8852c_rfk.h"
+#include "rtw8852c_table.h"
+#include "util.h"
+
+static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = {
+ {13, 1614, grp_0}, /* ACH 0 */
+ {13, 1614, grp_0}, /* ACH 1 */
+ {13, 1614, grp_0}, /* ACH 2 */
+ {13, 1614, grp_0}, /* ACH 3 */
+ {13, 1614, grp_1}, /* ACH 4 */
+ {13, 1614, grp_1}, /* ACH 5 */
+ {13, 1614, grp_1}, /* ACH 6 */
+ {13, 1614, grp_1}, /* ACH 7 */
+ {13, 1614, grp_0}, /* B0MGQ */
+ {13, 1614, grp_0}, /* B0HIQ */
+ {13, 1614, grp_1}, /* B1MGQ */
+ {13, 1614, grp_1}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852c_hfc_pubcfg_pcie = {
+ 1614, /* Group 0 */
+ 1614, /* Group 1 */
+ 3228, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852c_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852c_hfc_chcfg_pcie, &rtw8852c_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size19,
+ &rtw89_mac_size.ple_size19, &rtw89_mac_size.wde_qt18,
+ &rtw89_mac_size.wde_qt18, &rtw89_mac_size.ple_qt46,
+ &rtw89_mac_size.ple_qt47},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size18,
+ &rtw89_mac_size.ple_size18, &rtw89_mac_size.wde_qt17,
+ &rtw89_mac_size.wde_qt17, &rtw89_mac_size.ple_qt44,
+ &rtw89_mac_size.ple_qt45},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const u32 rtw8852c_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0_V1, R_AX_H2CREG_DATA1_V1, R_AX_H2CREG_DATA2_V1,
+ R_AX_H2CREG_DATA3_V1
+};
+
+static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_AX_C2HREG_DATA0_V1, R_AX_C2HREG_DATA1_V1, R_AX_C2HREG_DATA2_V1,
+ R_AX_C2HREG_DATA3_V1
+};
+
+static const struct rtw89_page_regs rtw8852c_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL_V1,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO_V1,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3_V1,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1_V1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2_V1,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1_V1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2_V1,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1_V1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2_V1,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1_V1,
+};
+
+static const struct rtw89_reg_def rtw8852c_dcfo_comp = {
+ R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK
+};
+
+static const struct rtw89_imr_info rtw8852c_imr_info = {
+ .wdrls_imr_set = B_AX_WDRLS_IMR_SET_V1,
+ .wsec_imr_reg = R_AX_SEC_ERROR_FLAG_IMR,
+ .wsec_imr_set = B_AX_TX_HANG_IMR | B_AX_RX_HANG_IMR,
+ .mpdu_tx_imr_set = B_AX_MPDU_TX_IMR_SET_V1,
+ .mpdu_rx_imr_set = B_AX_MPDU_RX_IMR_SET_V1,
+ .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET,
+ .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_B0_ERRFLAG_IMR,
+ .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR_V1,
+ .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET_V1,
+ .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_B1_ERRFLAG_IMR,
+ .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR_V1,
+ .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET_V1,
+ .wde_imr_clr = B_AX_WDE_IMR_CLR_V1,
+ .wde_imr_set = B_AX_WDE_IMR_SET_V1,
+ .ple_imr_clr = B_AX_PLE_IMR_CLR_V1,
+ .ple_imr_set = B_AX_PLE_IMR_SET_V1,
+ .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR_V1,
+ .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V1,
+ .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR_V1,
+ .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET_V1,
+ .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR_V1,
+ .other_disp_imr_set = B_AX_OTHER_DISP_IMR_SET_V1,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR,
+ .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR,
+ .bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1,
+ .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR,
+ .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_V1,
+ .ptcl_imr_set = B_AX_PTCL_IMR_SET_V1,
+ .cdma_imr_0_reg = R_AX_RX_ERR_FLAG_IMR,
+ .cdma_imr_0_clr = B_AX_RX_ERR_IMR_CLR_V1,
+ .cdma_imr_0_set = B_AX_RX_ERR_IMR_SET_V1,
+ .cdma_imr_1_reg = R_AX_TX_ERR_FLAG_IMR,
+ .cdma_imr_1_clr = B_AX_TX_ERR_IMR_CLR_V1,
+ .cdma_imr_1_set = B_AX_TX_ERR_IMR_SET_V1,
+ .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR_V1,
+ .phy_intf_imr_clr = B_AX_PHYINFO_IMR_CLR_V1,
+ .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET_V1,
+ .rmac_imr_reg = R_AX_RX_ERR_IMR,
+ .rmac_imr_clr = B_AX_RMAC_IMR_CLR_V1,
+ .rmac_imr_set = B_AX_RMAC_IMR_SET_V1,
+ .tmac_imr_reg = R_AX_TRXPTCL_ERROR_INDICA_MASK,
+ .tmac_imr_clr = B_AX_TMAC_IMR_CLR_V1,
+ .tmac_imr_set = B_AX_TMAC_IMR_SET_V1,
+};
+
+static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852c_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg);
+static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
+ enum rtw89_mac_idx mac_idx);
+
+static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ val32 = rtw89_read32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_PAD_HCI_SEL_V2_MASK);
+ if (val32 == MAC_AX_HCI_SEL_PCIE_USB)
+ rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, B_AX_R_SYM_WLCMAC1_P4_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P3_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P2_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P1_PC_EN |
+ B_AX_R_SYM_WLCMAC1_PC_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN,
+ B_AX_EECS_PULL_LOW_EN | B_AX_EESK_PULL_LOW_EN |
+ B_AX_LED1_PULL_LOW_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_MAC_UN_EN | B_AX_H_AXIDMA_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN |
+ B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN |
+ B_AX_TMAC_EN | B_AX_RMAC_EN);
+
+ return 0;
+}
+
+static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852c_e_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852c_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852c_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852c_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+ memcpy(tssi->tssi_6g_mcs[i], bw40_1s_tssi_6g_ofst[i],
+ sizeof(tssi->tssi_6g_mcs[i]));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
+{
+ if (high)
+ *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
+ if (low)
+ *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
+
+ return data != 0xff;
+}
+
+static void rtw8852c_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8852c_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool valid = false;
+
+ valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
+ valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_low,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_high,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+
+ gain->offset_valid = valid;
+}
+
+static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852c_efuse *map;
+
+ map = (struct rtw8852c_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852c_efuse_parsing_tssi(rtwdev, map);
+ rtw8852c_efuse_parsing_gain_offset(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852c_e_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852c_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852C] = {0x5D6, 0x5AB};
+ static const u32 tssi_trim_addr_6g[RF_PATH_NUM_8852C] = {0x5CE, 0x5A3};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM_6G; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr_6g[i] - addr - j;
+ tssi->tssi_trim_6g[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ memset(tssi->tssi_trim_6g, 0, sizeof(tssi->tssi_trim_6g));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852c_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852C] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852c_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852c_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852C] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852c_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static int rtw8852c_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852c_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_thermal_trim(rtwdev);
+ rtw8852c_pa_bias_trim(rtwdev);
+}
+
+static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE,
+ mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0, txsc80 = 0;
+ u8 rf_mod_val = 0, chk_rate_mask = 0;
+ u32 txsc;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsc80 = rtw89_phy_get_txsc(rtwdev, chan,
+ RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
+ RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
+ RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = AX_WMAC_RFMOD_160M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
+ FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40) |
+ FIELD_PREP(B_AX_TXSC_80M_MASK, txsc80);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = AX_WMAC_RFMOD_80M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
+ FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = AX_WMAC_RFMOD_40M;
+ txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = AX_WMAC_RFMOD_20M;
+ txsc = 0;
+ break;
+ }
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsc);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_AX_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
+ return;
+ }
+ rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE | B_AX_CHECK_CCK_EN |
+ B_AX_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+}
+
+static const u32 rtw8852c_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8852c_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8852c_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 central_ch,
+ u8 primary_ch, enum rtw89_bandwidth bw)
+{
+ u8 ch_element;
+
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ ch_element = central_ch - 1;
+ } else if (bw == RTW89_CHANNEL_WIDTH_40) {
+ if (primary_ch == 1)
+ ch_element = central_ch - 1 + 2;
+ else
+ ch_element = central_ch - 1 - 2;
+ } else {
+ rtw89_warn(rtwdev, "Invalid BW:%d for CCK\n", bw);
+ return -EINVAL;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_BK_FC0_INV_V1, B_BK_FC0_INV_MSK_V1,
+ rtw8852c_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_CCK_FC0_INV_V1, B_CCK_FC0_INV_MSK_V1,
+ rtw8852c_sco_cck_threshold[ch_element]);
+
+ return 0;
+}
+
+struct rtw8852c_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8852C];
+ u32 gain_a[BB_PATH_NUM_8852C];
+ u32 gain_mask;
+};
+
+static const struct rtw8852c_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x000000ff },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x0000ff00 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x000000ff },
+};
+
+static const struct rtw8852c_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0xff000000 },
+};
+
+struct rtw8852c_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8852C];
+ u32 gain_a[BB_PATH_NUM_8852C];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static
+const struct rtw8852c_bb_gain_bypass bb_gain_bypass_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4BB8, 0x4C7C}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff000000, .gain_mask_a = 0xff},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff, .gain_mask_a = 0xff00},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78},
+ .gain_mask_g = 0xff0000, .gain_mask_a = 0xff000000},
+ { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff000000, .gain_mask_a = 0xff},
+ { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff, .gain_mask_a = 0xff00},
+ { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C},
+ .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000},
+};
+
+struct rtw8852c_bb_gain_op1db {
+ struct {
+ u32 lna[BB_PATH_NUM_8852C];
+ u32 tia_lna[BB_PATH_NUM_8852C];
+ u32 mask;
+ } reg[LNA_GAIN_NUM];
+ u32 reg_tia0_lna6[BB_PATH_NUM_8852C];
+ u32 mask_tia0_lna6;
+};
+
+static const struct rtw8852c_bb_gain_op1db bb_gain_op1db_a = {
+ .reg = {
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff00},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff0000},
+ { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754},
+ .mask = 0xff000000},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff00},
+ { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758},
+ .mask = 0xff0000},
+ },
+ .reg_tia0_lna6 = {0x4674, 0x4758},
+ .mask_tia0_lna6 = 0xff000000,
+};
+
+static enum rtw89_phy_bb_gain_band
+rtw8852c_mapping_gain_band(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH;
+ }
+}
+
+static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ u8 gain_band = rtw8852c_mapping_gain_band(subband);
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_lna[i].gain_g[path];
+ else
+ reg = bb_gain_lna[i].gain_a[path];
+
+ mask = bb_gain_lna[i].gain_mask;
+ val = gain->lna_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ if (subband == RTW89_CH_2G) {
+ reg = bb_gain_bypass_lna[i].gain_g[path];
+ mask = bb_gain_bypass_lna[i].gain_mask_g;
+ } else {
+ reg = bb_gain_bypass_lna[i].gain_a[path];
+ mask = bb_gain_bypass_lna[i].gain_mask_a;
+ }
+
+ val = gain->lna_gain_bypass[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ if (subband != RTW89_CH_2G) {
+ reg = bb_gain_op1db_a.reg[i].lna[path];
+ mask = bb_gain_op1db_a.reg[i].mask;
+ val = gain->lna_op1db[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+
+ reg = bb_gain_op1db_a.reg[i].tia_lna[path];
+ mask = bb_gain_op1db_a.reg[i].mask;
+ val = gain->tia_lna_op1db[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+ }
+
+ if (subband != RTW89_CH_2G) {
+ reg = bb_gain_op1db_a.reg_tia0_lna6[path];
+ mask = bb_gain_op1db_a.mask_tia0_lna6;
+ val = gain->tia_lna_op1db[gain_band][path][7];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_tia[i].gain_g[path];
+ else
+ reg = bb_gain_tia[i].gain_a[path];
+
+ mask = bb_gain_tia[i].gain_mask;
+ val = gain->tia_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+}
+
+static
+const u8 rtw8852c_ch_base_table[16] = {1, 0xff,
+ 36, 100, 132, 149, 0xff,
+ 1, 33, 65, 97, 129, 161, 193, 225, 0xff};
+#define RTW8852C_CH_BASE_IDX_2G 0
+#define RTW8852C_CH_BASE_IDX_5G_FIRST 2
+#define RTW8852C_CH_BASE_IDX_5G_LAST 5
+#define RTW8852C_CH_BASE_IDX_6G_FIRST 7
+#define RTW8852C_CH_BASE_IDX_6G_LAST 14
+
+#define RTW8852C_CH_BASE_IDX_MASK GENMASK(7, 4)
+#define RTW8852C_CH_OFFSET_MASK GENMASK(3, 0)
+
+static u8 rtw8852c_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
+{
+ u8 chan_idx;
+ u8 last, first;
+ u8 idx;
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, RTW8852C_CH_BASE_IDX_2G) |
+ FIELD_PREP(RTW8852C_CH_OFFSET_MASK, central_ch);
+ return chan_idx;
+ case RTW89_BAND_5G:
+ first = RTW8852C_CH_BASE_IDX_5G_FIRST;
+ last = RTW8852C_CH_BASE_IDX_5G_LAST;
+ break;
+ case RTW89_BAND_6G:
+ first = RTW8852C_CH_BASE_IDX_6G_FIRST;
+ last = RTW8852C_CH_BASE_IDX_6G_LAST;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Unsupported band %d\n", band);
+ return 0;
+ }
+
+ for (idx = last; idx >= first; idx--)
+ if (central_ch >= rtw8852c_ch_base_table[idx])
+ break;
+
+ if (idx < first) {
+ rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
+ return 0;
+ }
+
+ chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, idx) |
+ FIELD_PREP(RTW8852C_CH_OFFSET_MASK,
+ (central_ch - rtw8852c_ch_base_table[idx]) >> 1);
+ return chan_idx;
+}
+
+static void rtw8852c_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
+ u8 *ch, enum nl80211_band *band)
+{
+ u8 idx, offset;
+
+ idx = FIELD_GET(RTW8852C_CH_BASE_IDX_MASK, chan_idx);
+ offset = FIELD_GET(RTW8852C_CH_OFFSET_MASK, chan_idx);
+
+ if (idx == RTW8852C_CH_BASE_IDX_2G) {
+ *band = NL80211_BAND_2GHZ;
+ *ch = offset;
+ return;
+ }
+
+ *band = idx <= RTW8852C_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
+ *ch = rtw8852c_ch_base_table[idx] + (offset << 1);
+}
+
+static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ R_PATH1_G_TIA0_LNA6_OP1DB_V1};
+ static const u32 rpl_mask[2] = {B_RPL_PATHA_MASK, B_RPL_PATHB_MASK};
+ static const u32 rpl_tb_mask[2] = {B_RSSI_M_PATHA_MASK, B_RSSI_M_PATHB_MASK};
+ struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s32 offset_q0, offset_base_q4;
+ s32 tmp = 0;
+
+ if (!efuse_gain->offset_valid)
+ return;
+
+ if (rtwdev->dbcc_en && path == RF_PATH_B)
+ phy_idx = RTW89_PHY_1;
+
+ if (chan->band_type == RTW89_BAND_2G) {
+ offset_q0 = efuse_gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK];
+ offset_base_q4 = efuse_gain->offset_base[phy_idx];
+
+ tmp = clamp_t(s32, (-offset_q0 << 3) + (offset_base_q4 >> 1),
+ S8_MIN >> 1, S8_MAX >> 1);
+ rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f);
+ }
+
+ switch (chan->subband_type) {
+ default:
+ case RTW89_CH_2G:
+ gain_band = RTW89_GAIN_OFFSET_2G_OFDM;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ gain_band = RTW89_GAIN_OFFSET_5G_LOW;
+ break;
+ case RTW89_CH_5G_BAND_3:
+ gain_band = RTW89_GAIN_OFFSET_5G_MID;
+ break;
+ case RTW89_CH_5G_BAND_4:
+ gain_band = RTW89_GAIN_OFFSET_5G_HIGH;
+ break;
+ }
+
+ offset_q0 = -efuse_gain->offset[path][gain_band];
+ offset_base_q4 = efuse_gain->offset_base[phy_idx];
+
+ tmp = (offset_q0 << 2) + (offset_base_q4 >> 2);
+ tmp = clamp_t(s32, -tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], B_PATH0_R_G_OFST_MASK, tmp & 0xff);
+
+ tmp = clamp_t(s32, offset_q0 << 4, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_RPL_PATHAB, rpl_mask[path], tmp & 0xff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSSI_M_PATHAB, rpl_tb_mask[path], tmp & 0xff, phy_idx);
+}
+
+static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 sco;
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ if (phy_idx == RTW89_PHY_0) {
+ /* Path A */
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_A);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_A);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 0,
+ phy_idx);
+ /* Path B */
+ if (!rtwdev->dbcc_en) {
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev,
+ R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev,
+ R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 0, phy_idx);
+ rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ } else {
+ if (is_2g)
+ rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ else
+ rtw89_phy_write32_set(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL);
+ }
+ /* SCO compensate FC setting */
+ rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1,
+ central_freq, phy_idx);
+ /* round_up((1/fc0)*pow(2,18)) */
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco,
+ phy_idx);
+ } else {
+ /* Path B */
+ rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
+
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1,
+ 0, phy_idx);
+ /* SCO compensate FC setting */
+ rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1,
+ central_freq, phy_idx);
+ /* round_up((1/fc0)*pow(2,18)) */
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco,
+ phy_idx);
+ }
+ /* CCK parameters */
+ if (band == RTW89_BAND_2G) {
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1,
+ B_PCOEFF01_MSK_V1, 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1,
+ B_PCOEFF23_MSK_V1, 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1,
+ B_PCOEFF45_MSK_V1, 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1,
+ B_PCOEFF67_MSK_V1, 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1,
+ B_PCOEFF89_MSK_V1, 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1,
+ B_PCOEFFAB_MSK_V1, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1,
+ B_PCOEFFCD_MSK_V1, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1,
+ B_PCOEFFEF_MSK_V1, 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1,
+ B_PCOEFF01_MSK_V1, 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1,
+ B_PCOEFF23_MSK_V1, 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1,
+ B_PCOEFF45_MSK_V1, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1,
+ B_PCOEFF67_MSK_V1, 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1,
+ B_PCOEFF89_MSK_V1, 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1,
+ B_PCOEFFAB_MSK_V1, 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1,
+ B_PCOEFFCD_MSK_V1, 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1,
+ B_PCOEFFEF_MSK_V1, 0xffdff5);
+ }
+ }
+
+ chan_idx = rtw8852c_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void rtw8852c_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+}
+
+static void rtw8852c_edcca_per20_bitmap_sifs(struct rtw89_dev *rtwdev, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0xff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx);
+ }
+}
+
+static void
+rtw8852c_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 mod_sbw = 0;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ if (bw == RTW89_CHANNEL_WIDTH_5)
+ mod_sbw = 0x1;
+ else if (bw == RTW89_CHANNEL_WIDTH_10)
+ mod_sbw = 0x2;
+ else if (bw == RTW89_CHANNEL_WIDTH_20)
+ mod_sbw = 0x0;
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW,
+ mod_sbw, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, 0x0,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x2,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xd);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xd);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x3,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1,
+ B_PATH0_BW_SEL_MSK_V1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xb);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40) {
+ rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1,
+ B_RX_BW40_2XFFT_EN_MSK_V1, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1,
+ B_RX_BW40_2XFFT_EN_MSK_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 0, phy_idx);
+ }
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B);
+ } else {
+ rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B);
+ }
+
+ rtw8852c_edcca_per20_bitmap_sifs(rtwdev, bw, phy_idx);
+}
+
+static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ u8 center_chan = chan->channel;
+ u8 bw = chan->band_width;
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ if (center_chan >= 5 && center_chan <= 8)
+ return 2440;
+ if (center_chan == 13)
+ return 2480;
+ } else if (bw == RTW89_CHANNEL_WIDTH_40) {
+ if (center_chan >= 3 && center_chan <= 10)
+ return 2440;
+ }
+ break;
+ case RTW89_BAND_5G:
+ if (center_chan == 151 || center_chan == 153 ||
+ center_chan == 155 || center_chan == 163)
+ return 5760;
+ break;
+ case RTW89_BAND_6G:
+ if (center_chan == 195 || center_chan == 197 ||
+ center_chan == 199 || center_chan == 207)
+ return 6920;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8852c_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 spur_freq;
+ s32 freq_diff, csi_idx, csi_tone_idx;
+
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8852c_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x4C14, 0xFF},
+ .notch1_frac_idx = {0x4C14, 0xC00},
+ .notch1_en = {0x4C14, 0x1000},
+ .notch2_idx = {0x4C20, 0xFF},
+ .notch2_frac_idx = {0x4C20, 0xC00},
+ .notch2_en = {0x4C20, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x4CD8, 0xFF},
+ .notch1_frac_idx = {0x4CD8, 0xC00},
+ .notch1_en = {0x4CD8, 0x1000},
+ .notch2_idx = {0x4CE4, 0xFF},
+ .notch2_frac_idx = {0x4CE4, 0xC00},
+ .notch2_en = {0x4CE4, 0x1000},
+ },
+};
+
+static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8852c_nbi_reg_def[path];
+ u32 spur_freq, fc;
+ s32 freq_diff;
+ s32 nbi_idx, nbi_tone_idx;
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ bool notch2_chk = false;
+
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ return;
+ }
+
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5, &nbi_frac_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx = s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 1);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 1);
+ rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0);
+ }
+}
+
+static void rtw8852c_spur_notch(struct rtw89_dev *rtwdev, u32 val,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 notch;
+ u32 notch2;
+
+ if (phy_idx == RTW89_PHY_0) {
+ notch = R_PATH0_NOTCH;
+ notch2 = R_PATH0_NOTCH2;
+ } else {
+ notch = R_PATH1_NOTCH;
+ notch2 = R_PATH1_NOTCH2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, notch,
+ B_PATH0_NOTCH_VAL | B_PATH0_NOTCH_EN, val);
+ rtw89_phy_write32_set(rtwdev, notch, B_PATH0_NOTCH_EN);
+ rtw89_phy_write32_mask(rtwdev, notch2,
+ B_PATH0_NOTCH2_VAL | B_PATH0_NOTCH2_EN, val);
+ rtw89_phy_write32_set(rtwdev, notch2, B_PATH0_NOTCH2_EN);
+}
+
+static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 pri_ch_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_set_csi_tone_idx(rtwdev, chan, phy_idx);
+
+ if (phy_idx == RTW89_PHY_0) {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_LOWER ||
+ pri_ch_idx == RTW89_SC_20_UP3X)) {
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_0);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_UPPER ||
+ pri_ch_idx == RTW89_SC_20_LOW3X)) {
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_0);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
+ } else {
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan,
+ RF_PATH_B);
+ }
+ } else {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_LOWER ||
+ pri_ch_idx == RTW89_SC_20_UP3X)) {
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_UPPER ||
+ pri_ch_idx == RTW89_SC_20_LOW3X)) {
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
+ } else {
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B);
+ }
+ }
+
+ if (pri_ch_idx == RTW89_SC_20_UP3X || pri_ch_idx == RTW89_SC_20_LOW3X)
+ rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 0, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 1, phy_idx);
+}
+
+static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 pri_ch = chan->primary_channel;
+ bool mask_5m_low;
+ bool mask_5m_en;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_40:
+ mask_5m_en = true;
+ mask_5m_low = pri_ch == 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ mask_5m_en = ((pri_ch == 3) || (pri_ch == 4));
+ mask_5m_low = pri_ch == 4;
+ break;
+ default:
+ mask_5m_en = false;
+ mask_5m_low = false;
+ break;
+ }
+
+ if (!mask_5m_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT,
+ B_ASSIGN_SBD_OPT_EN, 0x0, phy_idx);
+ } else {
+ if (mask_5m_low) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x0);
+ }
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT, B_ASSIGN_SBD_OPT_EN, 0x1, phy_idx);
+ }
+}
+
+static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ /*HW SI reset*/
+ rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG,
+ 0x7);
+ rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG,
+ 0x7);
+
+ udelay(1);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0,
+ phy_idx);
+ /*HW SI reset*/
+ rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG,
+ 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG,
+ 0x0);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+}
+
+static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
+ B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS,
+ B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0,
+ phy_idx);
+ }
+}
+
+static void rtw8852c_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_bb_reset_all(rtwdev, phy_idx);
+}
+
+static
+void rtw8852c_bb_gpio_trsw(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 tx_path_en, u8 trsw_tx,
+ u8 trsw_rx, u8 trsw, u8 trsw_b)
+{
+ static const u32 path_cr_bases[] = {0x5868, 0x7868};
+ u32 mask_ofst = 16;
+ u32 cr;
+ u32 val;
+
+ if (path >= ARRAY_SIZE(path_cr_bases))
+ return;
+
+ cr = path_cr_bases[path];
+
+ mask_ofst += (tx_path_en * 4 + trsw_tx * 2 + trsw_rx) * 2;
+ val = FIELD_PREP(B_P0_TRSW_A, trsw) | FIELD_PREP(B_P0_TRSW_B, trsw_b);
+
+ rtw89_phy_write32_mask(rtwdev, cr, (B_P0_TRSW_A | B_P0_TRSW_B) << mask_ofst, val);
+}
+
+enum rtw8852c_rfe_src {
+ PAPE_RFM,
+ TRSW_RFM,
+ LNAON_RFM,
+};
+
+static
+void rtw8852c_bb_gpio_rfm(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw8852c_rfe_src src, u8 dis_tx_gnt_wl,
+ u8 active_tx_opt, u8 act_bt_en, u8 rfm_output_val)
+{
+ static const u32 path_cr_bases[] = {0x5894, 0x7894};
+ static const u32 masks[] = {0, 8, 16};
+ u32 mask, mask_ofst;
+ u32 cr;
+ u32 val;
+
+ if (src >= ARRAY_SIZE(masks) || path >= ARRAY_SIZE(path_cr_bases))
+ return;
+
+ mask_ofst = masks[src];
+ cr = path_cr_bases[path];
+
+ val = FIELD_PREP(B_P0_RFM_DIS_WL, dis_tx_gnt_wl) |
+ FIELD_PREP(B_P0_RFM_TX_OPT, active_tx_opt) |
+ FIELD_PREP(B_P0_RFM_BT_EN, act_bt_en) |
+ FIELD_PREP(B_P0_RFM_OUT, rfm_output_val);
+ mask = 0xff << mask_ofst;
+
+ rtw89_phy_write32_mask(rtwdev, cr, mask, val);
+}
+
+static void rtw8852c_bb_gpio_init(struct rtw89_dev *rtwdev)
+{
+ static const u32 cr_bases[] = {0x5800, 0x7800};
+ u32 addr;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(cr_bases); i++) {
+ addr = cr_bases[i];
+ rtw89_phy_write32_set(rtwdev, (addr | 0x68), B_P0_TRSW_A);
+ rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_X);
+ rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_SO_A2);
+ rtw89_phy_write32(rtwdev, (addr | 0x80), 0x77777777);
+ rtw89_phy_write32(rtwdev, (addr | 0x84), 0x77777777);
+ }
+
+ rtw89_phy_write32(rtwdev, R_RFE_E_A2, 0xffffffff);
+ rtw89_phy_write32(rtwdev, R_RFE_O_SEL_A2, 0);
+ rtw89_phy_write32(rtwdev, R_RFE_SEL0_A2, 0);
+ rtw89_phy_write32(rtwdev, R_RFE_SEL32_A2, 0);
+
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 1, 1, 0);
+
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 0, 0, 1);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 1, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 0, 1, 0);
+ rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 1, 1, 0);
+
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, PAPE_RFM, 0, 0, 0, 0x0);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, TRSW_RFM, 0, 0, 0, 0x4);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, LNAON_RFM, 0, 0, 0, 0x8);
+
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, PAPE_RFM, 0, 0, 0, 0x0);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, TRSW_RFM, 0, 0, 0, 0x4);
+ rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, LNAON_RFM, 0, 0, 0, 0x8);
+}
+
+static void rtw8852c_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT,
+ B_DBCC_80P80_SEL_EVM_RPT_EN);
+ rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT2,
+ B_DBCC_80P80_SEL_EVM_RPT2_EN);
+
+ rtw8852c_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+ rtw8852c_bb_gpio_init(rtwdev);
+
+ /* read these registers after loading BB parameters */
+ gain->offset_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP, B_RPL_BIAS_COMP_MASK);
+ gain->offset_base[RTW89_PHY_1] =
+ rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP1, B_RPL_BIAS_COMP1_MASK);
+}
+
+static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_ch_idx = chan->pri_ch_idx;
+ u32 mask, reg;
+ u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
+ B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
+ u8 ntx_path;
+
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8852c_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
+
+ rtw8852c_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF,
+ B_PD_ARBITER_OFF, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 1);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF,
+ B_PD_ARBITER_OFF, 0x1, phy_idx);
+ }
+
+ rtw8852c_spur_elimination(rtwdev, chan, pri_ch_idx, phy_idx);
+ rtw8852c_ctrl_btg(rtwdev, chan->band_type == RTW89_BAND_2G);
+ rtw8852c_5m_mask(rtwdev, chan, phy_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ rtwdev->hal.cv != CHIP_CAV) {
+ rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ,
+ B_P80_AT_HIGH_FREQ, 0x0, phy_idx);
+ reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP,
+ phy_idx);
+ if (chan->primary_channel > chan->channel) {
+ rtw89_phy_write32_mask(rtwdev,
+ R_P80_AT_HIGH_FREQ_RU_ALLOC,
+ ru_alloc_msk[phy_idx], 1);
+ rtw89_write32_mask(rtwdev, reg,
+ B_P80_AT_HIGH_FREQ_BB_WRP, 1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev,
+ R_P80_AT_HIGH_FREQ_RU_ALLOC,
+ ru_alloc_msk[phy_idx], 0);
+ rtw89_write32_mask(rtwdev, reg,
+ B_P80_AT_HIGH_FREQ_BB_WRP, 0);
+ }
+ }
+
+ if (chan->band_type == RTW89_BAND_6G &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160)
+ rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
+ B_CDD_EVM_CHK_EN, 0, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
+ B_CDD_EVM_CHK_EN, 1, phy_idx);
+
+ if (!rtwdev->dbcc_en) {
+ mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3);
+ mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3);
+ } else {
+ if (phy_idx == RTW89_PHY_0) {
+ mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3);
+ } else {
+ mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON;
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3);
+ }
+ }
+
+ if (chan->band_type == RTW89_BAND_6G)
+ rtw89_phy_write32_set(rtwdev, R_MUIC, B_MUIC_EN);
+ else
+ rtw89_phy_write32_clr(rtwdev, R_MUIC, B_MUIC_EN);
+
+ if (hal->antenna_tx)
+ ntx_path = hal->antenna_tx;
+ else
+ ntx_path = chan->band_type == RTW89_BAND_6G ? RF_B : RF_AB;
+
+ rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, (enum rtw89_mac_idx)phy_idx);
+
+ rtw8852c_bb_reset_all(rtwdev, phy_idx);
+}
+
+static void rtw8852c_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852c_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852c_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852c_dfs_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 0);
+}
+
+static void rtw8852c_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0xf);
+}
+
+static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
+ rtw8852c_dfs_en(rtwdev, false);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8852c_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
+ rtw8852c_adc_en(rtwdev, true);
+ rtw8852c_dfs_en(rtwdev, true);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
+ }
+}
+
+static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(mcc_info, 0, sizeof(*mcc_info));
+ rtw8852c_lck_init(rtwdev);
+
+ rtw8852c_rck(rtwdev);
+ rtw8852c_dack(rtwdev);
+ rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
+}
+
+static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+
+ rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
+ rtw8852c_rx_dck(rtwdev, phy_idx, false);
+ rtw8852c_iqk(rtwdev, phy_idx);
+ rtw8852c_tssi(rtwdev, phy_idx);
+ rtw8852c_dpk(rtwdev, phy_idx);
+ rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
+}
+
+static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_tssi_scan(rtwdev, phy_idx);
+}
+
+static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+}
+
+static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_dpk_track(rtwdev);
+ rtw8852c_lck_track(rtwdev);
+ rtw8852c_rx_dck_track(rtwdev);
+}
+
+static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ s8 ofst_int = 0;
+ u8 base_cw_0db = 0x27;
+ u16 tssi_16dbm_cw = 0x12c;
+ s16 pwr_s10_3 = 0;
+ s16 rf_pwr_cw = 0;
+ u16 bb_pwr_cw = 0;
+ u32 pwr_cw = 0;
+ u32 tssi_ofst_cw = 0;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
+ rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return (tssi_ofst_cw << 18) | (pwr_cw << 9) | (ref & GENMASK(8, 0));
+}
+
+static
+void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ s8 pw_ofst_2tx;
+ s8 val_1t;
+ s8 val_2t;
+ u32 reg;
+ u8 i;
+
+ if (pw_ofst < -32 || pw_ofst > 31) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+ val_1t = pw_ofst << 2;
+ pw_ofst_2tx = max(pw_ofst - 3, -32);
+ val_2t = pw_ofst_2tx << 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_1tx=0x%x\n", val_1t);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_2tx=0x%x\n", val_2t);
+
+ for (i = 0; i < 4; i++) {
+ /* 1TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i),
+ val_1t);
+ /* 2TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i),
+ val_2t);
+ }
+}
+
+static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800};
+ const u32 mask = 0x7FFFFFF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ GENMASK(27, 10), 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
+ static const u8 rs[] = {
+ RTW89_RS_CCK,
+ RTW89_RS_OFDM,
+ RTW89_RS_MCS,
+ RTW89_RS_HEDCM,
+ };
+ s8 tmp;
+ u8 i, j;
+ u32 val, shf, addr = R_AX_PWR_BY_RATE;
+ struct rtw89_rate_desc cur;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr byrate with ch=%d\n", ch);
+
+ for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (i = 0; i < ARRAY_SIZE(rs); i++) {
+ if (cur.nss >= rtw89_rs_nss_max[rs[i]])
+ continue;
+
+ val = 0;
+ cur.rs = rs[i];
+
+ for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
+ cur.idx = j;
+ shf = (j % 4) * 8;
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
+ val |= (tmp << shf);
+
+ if ((j + 1) % 4)
+ continue;
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ val = 0;
+ addr += 4;
+ }
+ }
+ }
+}
+
+static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 band = chan->band_type;
+ struct rtw89_rate_desc desc = {
+ .nss = RTW89_NSS_1,
+ .rs = RTW89_RS_OFFSET,
+ };
+ u32 val = 0;
+ s8 v;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
+
+ for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
+ val |= ((v & 0xf) << (4 * desc.idx));
+ }
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
+ GENMASK(19, 0), val);
+}
+
+static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ u8 tx_shape_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __DFIR_CFG_MASK 0xffffff
+#define __DFIR_CFG_NR 8
+#define __DECL_DFIR_VAR(_prefix, _name, _val...) \
+ static const u32 _prefix ## _ ## _name[] = {_val}; \
+ static_assert(ARRAY_SIZE(_prefix ## _ ## _name) == __DFIR_CFG_NR)
+#define __DECL_DFIR_PARAM(_name, _val...) __DECL_DFIR_VAR(param, _name, _val)
+#define __DECL_DFIR_ADDR(_name, _val...) __DECL_DFIR_VAR(addr, _name, _val)
+
+ __DECL_DFIR_PARAM(flat,
+ 0x003D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
+ 0x00F86F9A, 0x00FAEF92, 0x00FE5FCC, 0x00FFDFF5);
+ __DECL_DFIR_PARAM(sharp,
+ 0x003D83FF, 0x002C636A, 0x0013F204, 0x00008090,
+ 0x00F87FB0, 0x00F99F83, 0x00FDBFBA, 0x00003FF5);
+ __DECL_DFIR_PARAM(sharp_14,
+ 0x003B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
+ 0x00FD8F92, 0x0002D011, 0x0001C02C, 0x00FFF00A);
+ __DECL_DFIR_ADDR(filter,
+ 0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
+ 0x45C4, 0x45C8);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ const u32 *param;
+ int i;
+
+ if (ch > 14) {
+ rtw89_warn(rtwdev,
+ "set tx shape dfir by unknown ch: %d on 2G\n", ch);
+ return;
+ }
+
+ if (ch == 14)
+ param = param_sharp_14;
+ else
+ param = tx_shape_idx == 0 ? param_flat : param_sharp;
+
+ for (i = 0; i < __DFIR_CFG_NR; i++) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "set tx shape dfir: 0x%x: 0x%x\n", addr_filter[i],
+ param[i]);
+ rtw89_phy_write32_idx(rtwdev, addr_filter[i], __DFIR_CFG_MASK,
+ param[i], phy_idx);
+ }
+
+#undef __DECL_DFIR_ADDR
+#undef __DECL_DFIR_PARAM
+#undef __DECL_DFIR_VAR
+#undef __DFIR_CFG_NR
+#undef __DFIR_CFG_MASK
+}
+
+static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd];
+ u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
+
+ if (band == RTW89_BAND_2G)
+ rtw8852c_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+
+ rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
+ (enum rtw89_mac_idx)phy_idx,
+ tx_shape_ofdm);
+}
+
+static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_PAGE_SIZE 40
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
+ struct rtw89_txpwr_limit lmt[NTX_NUM_8852C];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852C; i++) {
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt[i] + j;
+
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+#undef __MAC_TXPWR_LMT_PAGE_SIZE
+}
+
+static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
+ struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852C; i++) {
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_RU_LMT + j +
+ __MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt_ru[i] + j;
+
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+
+#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
+}
+
+static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static void
+rtw8852c_init_tssi_ctrl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ static const struct rtw89_reg2_def ctrl_ini[] = {
+ {0xD938, 0x00010100},
+ {0xD93C, 0x0500D500},
+ {0xD940, 0x00000500},
+ {0xD944, 0x00000005},
+ {0xD94C, 0x00220000},
+ {0xD950, 0x00030000},
+ };
+ u32 addr;
+ int i;
+
+ for (addr = R_AX_TSSI_CTRL_HEAD; addr <= R_AX_TSSI_CTRL_TAIL; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_ini); i++)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, ctrl_ini[i].addr,
+ ctrl_ini[i].data);
+
+ rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
+ (enum rtw89_mac_idx)phy_idx,
+ RTW89_TSSI_BANDEDGE_FLAT);
+}
+
+static int
+rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ rtw8852c_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
+ RTW89_MAC_1 :
+ RTW89_MAC_0);
+ rtw8852c_init_tssi_ctrl(rtwdev, phy_idx);
+
+ return 0;
+}
+
+static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
+ u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 2,
+ RTW89_PHY_1);
+
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0,
+ 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1,
+ 1);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0, 2,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1, 2,
+ RTW89_PHY_1);
+
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+
+ rtw89_phy_write32_idx(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0, RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0, RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_USER_MAX, 1,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0,
+ RTW89_PHY_1);
+ rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0,
+ RTW89_PHY_1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ } else {
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 3);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 2);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD,
+ B_ANT_RX_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW,
+ B_ANT_RX_1RCCA_SEG1, 3);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT,
+ B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT,
+ B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS,
+ 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
+ 1);
+ rtw8852c_ctrl_btg(rtwdev, band == RTW89_BAND_2G);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
+ rst_mask0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB,
+ rst_mask1, 3);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8);
+ }
+}
+
+static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
+ enum rtw89_mac_idx mac_idx)
+{
+ struct rtw89_reg2_def path_com[] = {
+ {R_AX_PATH_COM0, AX_PATH_COM0_DFVAL},
+ {R_AX_PATH_COM1, AX_PATH_COM1_DFVAL},
+ {R_AX_PATH_COM2, AX_PATH_COM2_DFVAL},
+ {R_AX_PATH_COM3, AX_PATH_COM3_DFVAL},
+ {R_AX_PATH_COM4, AX_PATH_COM4_DFVAL},
+ {R_AX_PATH_COM5, AX_PATH_COM5_DFVAL},
+ {R_AX_PATH_COM6, AX_PATH_COM6_DFVAL},
+ {R_AX_PATH_COM7, AX_PATH_COM7_DFVAL},
+ {R_AX_PATH_COM8, AX_PATH_COM8_DFVAL},
+ {R_AX_PATH_COM9, AX_PATH_COM9_DFVAL},
+ {R_AX_PATH_COM10, AX_PATH_COM10_DFVAL},
+ {R_AX_PATH_COM11, AX_PATH_COM11_DFVAL},
+ };
+ u32 addr;
+ u32 reg;
+ u8 cr_size = ARRAY_SIZE(path_com);
+ u8 i = 0;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_1);
+
+ for (addr = R_AX_MACID_ANT_TABLE;
+ addr <= R_AX_MACID_ANT_TABLE_LAST; addr += 4) {
+ reg = rtw89_mac_reg_by_idx(addr, mac_idx);
+ rtw89_write32(rtwdev, reg, 0);
+ }
+
+ if (tx_path == RF_A) {
+ path_com[0].data = AX_PATH_COM0_PATHA;
+ path_com[1].data = AX_PATH_COM1_PATHA;
+ path_com[2].data = AX_PATH_COM2_PATHA;
+ path_com[7].data = AX_PATH_COM7_PATHA;
+ path_com[8].data = AX_PATH_COM8_PATHA;
+ } else if (tx_path == RF_B) {
+ path_com[0].data = AX_PATH_COM0_PATHB;
+ path_com[1].data = AX_PATH_COM1_PATHB;
+ path_com[2].data = AX_PATH_COM2_PATHB;
+ path_com[7].data = AX_PATH_COM7_PATHB;
+ path_com[8].data = AX_PATH_COM8_PATHB;
+ } else if (tx_path == RF_AB) {
+ path_com[0].data = AX_PATH_COM0_PATHAB;
+ path_com[1].data = AX_PATH_COM1_PATHAB;
+ path_com[2].data = AX_PATH_COM2_PATHAB;
+ path_com[7].data = AX_PATH_COM7_PATHAB;
+ path_com[8].data = AX_PATH_COM8_PATHAB;
+ } else {
+ rtw89_warn(rtwdev, "[Invalid Tx Path]Tx Path: %d\n", tx_path);
+ return;
+ }
+
+ for (i = 0; i < cr_size; i++) {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "0x%x = 0x%x\n",
+ path_com[i].addr, path_com[i].data);
+ reg = rtw89_mac_reg_by_idx(path_com[i].addr, mac_idx);
+ rtw89_write32(rtwdev, reg, path_com[i].data);
+ }
+}
+
+static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
+{
+ if (bt_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1,
+ B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1,
+ B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1,
+ B_PATH0_RXBB_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1,
+ B_PATH1_RXBB_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1,
+ B_PATH0_G_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1,
+ B_PATH0_BT_BACKOFF_V1, 0x780D1E);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1,
+ B_PATH1_BT_BACKOFF_V1, 0x780D1E);
+ rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1,
+ B_P0_BACKOFF_IBADC_V1, 0x34);
+ rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1,
+ B_P1_BACKOFF_IBADC_V1, 0x34);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1,
+ B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1,
+ B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1,
+ B_PATH0_RXBB_MSK_V1, 0x60);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1,
+ B_PATH1_RXBB_MSK_V1, 0x60);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1,
+ B_PATH0_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1,
+ B_PATH0_BT_BACKOFF_V1, 0x79E99E);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1,
+ B_PATH1_BT_BACKOFF_V1, 0x79E99E);
+ rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1,
+ B_P0_BACKOFF_IBADC_V1, 0x26);
+ rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1,
+ B_P1_BACKOFF_IBADC_V1, 0x26);
+ }
+}
+
+static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB);
+
+ if (hal->rx_nss == 1) {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+}
+
+static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->cv = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+
+ if (module->rfe_type > 0)
+ module->ant.num = (module->rfe_type % 2 ? 2 : 3);
+ else
+ module->ant.num = 2;
+
+ module->ant.diversity = 0;
+ module->ant.isolation = 10;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+}
+
+static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
+{
+ if (btg) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x20);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P2, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN,
+ B_BT_DYN_DC_EST_EN_MSK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0);
+ }
+}
+
+static
+void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init_v1(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (module->ant.type == BTC_ANT_SHARED) {
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852c_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_AX_BT_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda10[1:0] = 2b'11 */
+ rtw89_write32_set(rtwdev,
+ R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN |
+ B_AX_BT_CNT_RST_V1);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void rtw8852c_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap = 0;
+ u32 reg = 0;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_COEX_WL_REQ;
+ bitmap = B_BTC_RSP_ACK_HI;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_BTC_COEX_WL_REQ;
+ bitmap = B_BTC_TX_BCN_HI;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+union rtw8852c_btc_wl_txpwr_ctrl {
+ u32 txpwr_val;
+ struct {
+ union {
+ u16 ctrl_all_time;
+ struct {
+ s16 data:9;
+ u16 rsvd:6;
+ u16 flag:1;
+ } all_time;
+ };
+ union {
+ u16 ctrl_gnt_bt;
+ struct {
+ s16 data:9;
+ u16 rsvd:7;
+ } gnt_bt;
+ };
+ };
+} __packed;
+
+static void
+rtw8852c_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ union rtw8852c_btc_wl_txpwr_ctrl arg = { .txpwr_val = txpwr_val };
+ s32 val;
+
+#define __write_ctrl(_reg, _msk, _val, _en, _cond) \
+do { \
+ u32 _wrt = FIELD_PREP(_msk, _val); \
+ BUILD_BUG_ON((_msk & _en) != 0); \
+ if (_cond) \
+ _wrt |= _en; \
+ else \
+ _wrt &= ~_en; \
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \
+ _msk | _en, _wrt); \
+} while (0)
+
+ switch (arg.ctrl_all_time) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.all_time.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_RATE_CTRL, B_AX_FORCE_PWR_BY_RATE_VALUE_MASK,
+ val, B_AX_FORCE_PWR_BY_RATE_EN,
+ arg.ctrl_all_time != 0xffff);
+
+ switch (arg.ctrl_gnt_bt) {
+ case 0xffff:
+ val = 0;
+ break;
+ default:
+ val = arg.gnt_bt.data;
+ break;
+ }
+
+ __write_ctrl(R_AX_PWR_COEXT_CTRL, B_AX_TXAGC_BT_MASK, val,
+ B_AX_TXAGC_BT_EN, arg.ctrl_gnt_bt != 0xffff);
+
+#undef __write_ctrl
+}
+
+static
+s8 rtw8852c_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const u8 rtw89_btc_8852c_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30};
+static const u8 rtw89_btc_8852c_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {40, 36, 31, 28};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852c_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda00),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda04),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda38),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda44),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda48),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+};
+
+static
+void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+
+ /* fix LNA2 = level-5 for BT ACI issue at BTG */
+ if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0)
+ dm->trx_para_level = 1;
+}
+
+static
+void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static
+void rtw8852c_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x620);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0x179c);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0x208);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852c_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852c_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
+static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw8852c_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8852c_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S0_LDO_VSEL_F_MASK, 0x1);
+ rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S1_LDO_VSEL_F_MASK, 0x1);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL0, 0x7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x6c, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xc7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xc7, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL3, 0xd, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ return 0;
+}
+
+static const struct rtw89_chip_ops rtw8852c_chip_ops = {
+ .enable_bb_rf = rtw8852c_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852c_mac_disable_bb_rf,
+ .bb_reset = rtw8852c_bb_reset,
+ .bb_sethw = rtw8852c_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_channel = rtw8852c_set_channel,
+ .set_channel_help = rtw8852c_set_channel_help,
+ .read_efuse = rtw8852c_read_efuse,
+ .read_phycap = rtw8852c_read_phycap,
+ .fem_setup = NULL,
+ .rfk_init = rtw8852c_rfk_init,
+ .rfk_channel = rtw8852c_rfk_channel,
+ .rfk_band_changed = rtw8852c_rfk_band_changed,
+ .rfk_scan = rtw8852c_rfk_scan,
+ .rfk_track = rtw8852c_rfk_track,
+ .power_trim = rtw8852c_power_trim,
+ .set_txpwr = rtw8852c_set_txpwr,
+ .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852c_init_txpwr_unit,
+ .get_thermal = rtw8852c_get_thermal,
+ .ctrl_btg = rtw8852c_ctrl_btg,
+ .query_ppdu = rtw8852c_query_ppdu,
+ .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc,
+ .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .pwr_on_func = rtw8852c_pwr_on_func,
+ .pwr_off_func = rtw8852c_pwr_off_func,
+ .fill_txdesc = rtw89_core_fill_txdesc_v1,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+
+ .btc_set_rfe = rtw8852c_btc_set_rfe,
+ .btc_init_cfg = rtw8852c_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852c_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852c_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852c_btc_get_bt_rssi,
+ .btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp,
+ .btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852c_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
+};
+
+const struct rtw89_chip_info rtw8852c_chip_info = {
+ .chip_id = RTL8852C,
+ .ops = &rtw8852c_chip_ops,
+ .fw_name = "rtw89/rtw8852c_fw.bin",
+ .fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
+ .max_amsdu_limit = 8000,
+ .dis_2g_40m_ul_ofdma = false,
+ .rsvd_ple_ofst = 0x6f800,
+ .hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
+ .dle_mem = rtw8852c_dle_mem_pcie,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = &rtw89_8852c_phy_bb_table,
+ .bb_gain_table = &rtw89_8852c_phy_bb_gain_table,
+ .rf_table = {&rtw89_8852c_phy_radiob_table,
+ &rtw89_8852c_phy_radioa_table,},
+ .nctl_table = &rtw89_8852c_phy_nctl_table,
+ .byr_table = &rtw89_8852c_byr_table,
+ .txpwr_lmt_2g = &rtw89_8852c_txpwr_lmt_2g,
+ .txpwr_lmt_5g = &rtw89_8852c_txpwr_lmt_5g,
+ .txpwr_lmt_6g = &rtw89_8852c_txpwr_lmt_6g,
+ .txpwr_lmt_ru_2g = &rtw89_8852c_txpwr_lmt_ru_2g,
+ .txpwr_lmt_ru_5g = &rtw89_8852c_txpwr_lmt_ru_5g,
+ .txpwr_lmt_ru_6g = &rtw89_8852c_txpwr_lmt_ru_6g,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .dig_regs = &rtw8852c_dig_regs,
+ .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_chanctx_num = 1,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+ .support_bw160 = true,
+ .hw_sec_hdr = true,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 20,
+ .scam_num = 128,
+ .bacam_num = 8,
+ .bacam_dynamic_num = 8,
+ .bacam_v1 = true,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .phycap_addr = 0x590,
+ .phycap_size = 0x60,
+ .para_ver = 0x1,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+ .btc_fwinfo_buf = 1280,
+
+ .fcxbtcrpt_ver = 4,
+ .fcxtdma_ver = 3,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 3,
+ .fcxstep_ver = 3,
+ .fcxnullsta_ver = 2,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852c_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852c_mon_reg),
+ .mon_reg = rtw89_btc_8852c_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852c_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852c_rf_dl,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1,
+ .h2c_desc_size = sizeof(struct rtw89_rxdesc_short),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body_v1),
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1,
+ .h2c_regs = rtw8852c_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1,
+ .c2h_regs = rtw8852c_c2h_regs,
+ .page_regs = &rtw8852c_page_regs,
+ .dcfo_comp = &rtw8852c_dcfo_comp,
+ .dcfo_comp_sft = 5,
+ .imr_info = &rtw8852c_imr_info,
+ .rrsr_cfgs = &rtw8852c_rrsr_cfgs,
+ .dma_ch_mask = 0,
+};
+EXPORT_SYMBOL(rtw8852c_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852c_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852C driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
new file mode 100644
index 000000000000..558dd0f048f2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_H__
+#define __RTW89_8852C_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852C 2
+#define BB_PATH_NUM_8852C 2
+#define NTX_NUM_8852C 2
+
+struct rtw8852c_u_efuse {
+ u8 rsvd[0x38];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852c_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852c_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852c_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[2];
+ u8 rx_gain_2g_ofdm;
+ u8 rsvd9;
+ u8 rx_gain_2g_cck;
+ u8 rsvd10;
+ u8 rx_gain_5g_low;
+ u8 rsvd11;
+ u8 rx_gain_5g_mid;
+ u8 rsvd12;
+ u8 rx_gain_5g_high;
+ u8 rsvd13[35];
+ u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd14[10];
+ u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd15[110];
+ u8 channel_plan_6g;
+ u8 rsvd16[71];
+ union {
+ struct rtw8852c_u_efuse u;
+ struct rtw8852c_e_efuse e;
+ };
+} __packed;
+
+extern const struct rtw89_chip_info rtw8852c_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
new file mode 100644
index 000000000000..006c2cf93111
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -0,0 +1,4082 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+#include "rtw8852c_rfk.h"
+#include "rtw8852c_rfk_table.h"
+#include "rtw8852c_table.h"
+
+#define _TSSI_DE_MASK GENMASK(21, 12)
+static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
+static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
+static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
+static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
+static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
+static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
+static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
+static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
+
+static const u32 rtw8852c_backup_bb_regs[] = {
+ 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220,
+ 0xc1d4, 0xc1d8, 0xc1e8
+};
+
+static const u32 rtw8852c_backup_rf_regs[] = {
+ 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
+};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
+
+#define RXK_GROUP_NR 4
+static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
+static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
+static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
+static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
+static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
+static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
+
+#define TXK_GROUP_NR 3
+static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
+static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
+static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
+static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
+static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
+static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
+static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
+static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
+static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
+static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
+
+static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
+ {0x8190, 0x8194, 0x8198, 0x81a4},
+ {0x81a8, 0x81c4, 0x81c8, 0x81e8},
+};
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en)
+ return RF_AB;
+
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+}
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup bb reg : %x, value =%x\n",
+ rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path,
+ rtw8852c_backup_rf_regs[i], RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
+ rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore bb reg : %x, value =%x\n",
+ rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u8 path;
+ u32 rf_mode;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _addck_backup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+ dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
+ B_ADDCKR0_A0);
+ dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
+ B_ADDCKR0_A1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
+ dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
+ B_ADDCKR1_A0);
+ dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
+ B_ADDCKR1_A1);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
+ dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
+ dack->addck_d[0][1]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
+ dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
+ dack->addck_d[1][1]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK_S0P2,
+ B_DACK_S0M0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK_S0P3,
+ B_DACK_S0M1);
+ }
+ dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
+ B_DACK_BIAS00);
+ dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
+ B_DACK_BIAS01);
+ dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
+ B_DACK_DADCK00);
+ dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
+ B_DACK_DADCK01);
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
+ dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK10S,
+ B_DACK10S);
+ rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
+ dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
+ R_DACK11S,
+ B_DACK11S);
+ }
+ dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
+ B_DACK_BIAS10);
+ dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
+ B_DACK_BIAS11);
+ dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
+ B_DACK_DADCK10);
+ dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
+ B_DACK_DADCK11);
+}
+
+static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 idx_offset, path_offset;
+ u32 val32, offset, addr;
+ u8 i;
+
+ idx_offset = (index == 0 ? 0 : 0x14);
+ path_offset = (path == RF_PATH_A ? 0 : 0x28);
+ offset = idx_offset + path_offset;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
+
+ /* msbk_d: 15/14/13/12 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ addr = 0xc200 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 11/10/9/8 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ addr = 0xc204 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 7/6/5/4 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ addr = 0xc208 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* msbk_d: 3/2/1/0 */
+ val32 = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ val32 |= dack->msbk_d[path][index][i] << (i * 8);
+ addr = 0xc20c + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
+
+ /* dadak_d/biask_d */
+ val32 = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ addr = 0xc210 + offset;
+ rtw89_phy_write32(rtwdev, addr, val32);
+ rtw89_phy_write32_set(rtwdev, addr, BIT(1));
+}
+
+static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 i;
+
+ for (i = 0; i < 2; i++)
+ _dack_reload_by_path(rtwdev, path, i);
+}
+
+static void _addck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ /* S0 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
+ fsleep(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc0fc, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
+
+ /* S1 */
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc1fc, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
+}
+
+static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_dack_reset_defs_a_tbl,
+ &rtw8852c_dack_reset_defs_b_tbl);
+}
+
+enum adc_ck {
+ ADC_NA = 0,
+ ADC_480M = 1,
+ ADC_960M = 2,
+ ADC_1920M = 3,
+};
+
+enum dac_ck {
+ DAC_40M = 0,
+ DAC_80M = 1,
+ DAC_120M = 2,
+ DAC_160M = 3,
+ DAC_240M = 4,
+ DAC_320M = 5,
+ DAC_480M = 6,
+ DAC_960M = 7,
+};
+
+enum rf_mode {
+ RF_SHUT_DOWN = 0x0,
+ RF_STANDBY = 0x1,
+ RF_TX = 0x2,
+ RF_RX = 0x3,
+ RF_TXIQK = 0x4,
+ RF_DPK = 0x5,
+ RF_RXK1 = 0x6,
+ RF_RXK2 = 0x7,
+};
+
+static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
+ enum dac_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
+}
+
+static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
+ enum adc_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+}
+
+static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
+{
+ if (s0) {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+ } else {
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
+
+ _dack_reset(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
+ ret = read_poll_timeout_atomic(_check_dack_done, done, done,
+ 1, 10000, false, rtwdev, true);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
+ rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
+
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
+ rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
+
+ _dack_reset(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
+ ret = read_poll_timeout_atomic(_check_dack_done, done, done,
+ 1, 10000, false, rtwdev, false);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
+ rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
+
+ _dack_backup_s1(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_B);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false, rtwdev, 0xc0c8, BIT(3));
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
+
+ val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 rf0_0, rf1_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+ rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
+ rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
+ _drck(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _addck(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+
+ _addck_backup(rtwdev);
+ _addck_reload(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _dack(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+#define RTW8852C_NCTL_VER 0xd
+#define RTW8852C_IQK_VER 0x2a
+#define RTW8852C_IQK_SS 2
+#define RTW8852C_IQK_THR_REK 8
+#define RTW8852C_IQK_CFIR_GROUP_NR 4
+
+enum rtw8852c_iqk_type {
+ ID_TXAGC,
+ ID_G_FLOK_COARSE,
+ ID_A_FLOK_COARSE,
+ ID_G_FLOK_FINE,
+ ID_A_FLOK_FINE,
+ ID_FLOK_VBUFFER,
+ ID_TXK,
+ ID_RXAGC,
+ ID_RXK,
+ ID_NBTXK,
+ ID_NBRXK,
+};
+
+static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
+{
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
+}
+
+static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
+
+ switch (iqk_info->iqk_bw[path]) {
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
+{
+ u32 tmp;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
+
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
+
+ return false;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
+ u32 iqk_cmd;
+ bool fail;
+
+ switch (ktype) {
+ case ID_TXAGC:
+ iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_A_FLOK_COARSE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x008 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_COARSE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_A_FLOK_FINE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x508 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_FINE:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_FLOK_VBUFFER:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+ iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
+ break;
+ case ID_NBTXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+ iqk_cmd = 0x408 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ fsleep(15);
+ fail = _iqk_check_cal(rtwdev, path, ktype);
+ rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
+
+ return fail;
+}
+
+static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u32 tmp;
+ u32 bkrf0;
+ u8 gp;
+
+ bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
+ if (path == RF_PATH_B) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
+ break;
+ }
+
+ fsleep(10);
+
+ for (gp = 0; gp < RXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
+ _rxk_g_idxattc2[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
+ _rxk_a_idxattc2[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
+ _rxk_a6_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
+ _rxk_a6_idxattc2[gp]);
+ break;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP_V1, gp);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ }
+
+ if (path == RF_PATH_B)
+ rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
+
+ if (fail) {
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->nb_rxcfir[path] = 0x40000000;
+ iqk_info->is_wb_rxiqk[path] = true;
+ }
+
+ return false;
+}
+
+static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u32 tmp;
+ u32 bkrf0;
+ u8 gp = 0x2;
+
+ bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
+ if (path == RF_PATH_B) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
+ break;
+ }
+
+ fsleep(10);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+
+ if (path == RF_PATH_B)
+ rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
+
+ if (fail)
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+
+ iqk_info->is_wb_rxiqk[path] = false;
+ return fail;
+}
+
+static bool _txk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u8 gp;
+
+ for (gp = 0; gp < TXK_GROUP_NR; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a_itqt[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ _txk_a6_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ _txk_a6_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ _txk_a6_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a6_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP, gp + 1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ }
+
+ if (fail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ iqk_info->nb_txcfir[path] = 0x40000000;
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ return fail;
+}
+
+static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+ u8 gp = 0x2;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a_itqt[gp]);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, _txk_a6_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+
+ if (!fail)
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ else
+ iqk_info->nb_txcfir[path] = 0x40000002;
+
+ iqk_info->is_wb_txiqk[path] = false;
+
+ return fail;
+}
+
+static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx = mcc_info->table_idx;
+ bool is_fail1, is_fail2;
+ u32 val;
+ u32 core_i;
+ u32 core_q;
+ u32 vbuff_i;
+ u32 vbuff_q;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
+ core_i = FIELD_GET(RR_TXMO_COI, val);
+ core_q = FIELD_GET(RR_TXMO_COQ, val);
+
+ if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
+ is_fail1 = true;
+ else
+ is_fail1 = false;
+
+ iqk_info->lok_idac[idx][path] = val;
+
+ val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
+ vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
+ vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
+
+ if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
+ is_fail2 = true;
+ else
+ is_fail2 = false;
+
+ iqk_info->lok_vbuf[idx][path] = val;
+
+ return is_fail1 || is_fail2;
+}
+
+static bool _iqk_lok(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 tmp_id = 0x0;
+ bool fail = false;
+ bool tmp = false;
+
+ /* Step 0: Init RF gain & tone idx= 8.25Mhz */
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
+
+ /* Step 1 START: _lok_coarse_fine_wi_swap */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_G_FLOK_COARSE;
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_COARSE;
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_COARSE;
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
+ iqk_info->lok_cor_fail[0][path] = tmp;
+
+ /* Step 2 */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+
+ /* Step 3 */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_G_FLOK_FINE;
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_FINE;
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x9);
+ tmp_id = ID_A_FLOK_FINE;
+ break;
+ default:
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
+ iqk_info->lok_fin_fail[0][path] = tmp;
+
+ /* Step 4 large rf gain */
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, 0x1b);
+ break;
+ }
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
+ fail = _lok_finetune_check(rtwdev, path);
+
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ default:
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ case RTW89_BAND_6G:
+ rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ fsleep(10);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
+ break;
+ }
+}
+
+static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp;
+ bool flag;
+
+ iqk_info->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ iqk_info->thermal_rek_en = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
+ iqk_info->thermal[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
+ iqk_info->lok_cor_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
+ iqk_info->lok_fin_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
+ iqk_info->iqk_tx_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
+ iqk_info->iqk_rx_fail[0][path]);
+
+ flag = iqk_info->lok_cor_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
+ flag = iqk_info->lok_fin_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
+ flag = iqk_info->iqk_tx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
+ flag = iqk_info->iqk_rx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
+ iqk_info->bp_iqkenable[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_txkresult[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_rxkresult[path] = tmp;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
+ iqk_info->iqk_times);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
+ if (tmp != 0x0)
+ iqk_info->iqk_fail_cnt++;
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
+ iqk_info->iqk_fail_cnt);
+}
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ _iqk_txk_setting(rtwdev, path);
+ iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
+
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_rxk_setting(rtwdev, path);
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_info_iqk(rtwdev, phy_idx, path);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
+ iqk_info->iqk_band[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
+ path, iqk_info->iqk_bw[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
+ path, iqk_info->iqk_ch[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
+ rtwdev->dbcc_en ? "on" : "off",
+ iqk_info->iqk_band[path] == 0 ? "2G" :
+ iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
+ iqk_info->iqk_ch[path],
+ iqk_info->iqk_bw[path] == 0 ? "20M" :
+ iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
+ if (!rtwdev->dbcc_en)
+ iqk_info->syn1to2 = 0x1;
+ else
+ iqk_info->syn1to2 = 0x3;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
+ iqk_info->iqk_band[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
+ iqk_info->iqk_bw[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
+ iqk_info->iqk_ch[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail;
+
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00001219 + (path << 4));
+ fsleep(200);
+ fail = _iqk_check_cal(rtwdev, path, 0x12);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_iqk_afebb_restore_defs_a_tbl,
+ &rtw8852c_iqk_afebb_restore_defs_b_tbl);
+
+ rtw8852c_disable_rxagc(rtwdev, path, 0x1);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ u8 idx = 0;
+
+ idx = mcc_info->table_idx;
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
+
+ /* 01_BB_AFE_for DPK_S0_20210820 */
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+
+ /* disable rxgac */
+ rtw8852c_disable_rxagc(rtwdev, path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
+
+ rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
+
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5, rck_val = 0;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
+ false, rtwdev, path, 0x1c, BIT(3));
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 ch, path;
+
+ rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->thermal_rek_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
+ iqk_info->iqk_channel[ch] = 0x0;
+ for (path = 0; path < RTW8852C_IQK_SS; path++) {
+ iqk_info->lok_cor_fail[ch][path] = false;
+ iqk_info->lok_fin_fail[ch][path] = false;
+ iqk_info->iqk_tx_fail[ch][path] = false;
+ iqk_info->iqk_rx_fail[ch][path] = false;
+ iqk_info->iqk_mcc_ch[ch][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK strat!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->kcount = 0;
+ iqk_info->version = RTW8852C_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_restore_bb_reg(rtwdev, backup_bb_val);
+ _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ switch (_kpath(rtwdev, phy_idx)) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
+{
+ int ret;
+ u32 val;
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
+ 2, 2000, false, rtwdev, path,
+ RR_DCK1, RR_DCK1_DONE);
+ if (ret)
+ rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+}
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ bool is_afe)
+{
+ u8 res;
+
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
+
+ _rx_dck_toggle(rtwdev, path);
+ if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
+ return;
+ res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
+ if (res > 1) {
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
+ _rx_dck_toggle(rtwdev, path);
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
+ }
+}
+
+#define RTW8852C_RF_REL_VERSION 34
+#define RTW8852C_DPK_VER 0x10
+#define RTW8852C_DPK_TH_AVG_NUM 4
+#define RTW8852C_DPK_RF_PATH 2
+#define RTW8852C_DPK_KIP_REG_NUM 5
+#define RTW8852C_DPK_RXSRAM_DBG 0
+
+enum rtw8852c_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+ DPK_RXAGC = 0x15,
+ KIP_PRESET = 0x16,
+ KIP_RESTORE = 0x17,
+ DPK_TXAGC = 0x19,
+ D_KIP_PRESET = 0x28,
+ D_TXAGC = 0x29,
+ D_RXAGC = 0x2a,
+ D_SYNC = 0x2b,
+ D_GAIN_LOSS = 0x2c,
+ D_MDPK_IDL = 0x2d,
+ D_GAIN_NORM = 0x2f,
+ D_KIP_THERMAL = 0x30,
+ D_KIP_RESTORE = 0x31
+};
+
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off);
+
+static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
+ reg_bkup[path][i] =
+ rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+ u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
+ MASKDWORD, reg_bkup[path][i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
+{
+ u16 dpk_cmd;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
+ mdelay(10);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
+ dpk_cmd, ret);
+
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot over 20ms!!!!\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ /*1. Keep ADC_fifo reset*/
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+
+ /*2. BB for IQK DBG mode*/
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
+
+ /*3.Set DAC clk*/
+ rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
+
+ /*4. Set ADC clk*/
+ rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
+ B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
+
+ /*5. ADDA fifo rst*/
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
+}
+
+static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
+ B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
+{
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
+ ctrl_by_kip ? "KIP" : "BB");
+}
+
+static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
+{
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n",
+ path, force ? "on" : "off");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ _dpk_txpwr_bb_force(rtwdev, path, false);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
+ u8 cur_rxbb;
+ u32 rf_11, reg_81cc;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+
+ _dpk_kip_control_rfc(rtwdev, path, false);
+
+ cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+ rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
+ reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_SW);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
+
+ _dpk_kip_control_rfc(rtwdev, path, false);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
+
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x50121 | BIT(rtwdev->dbcc_en));
+ rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x50101 | BIT(rtwdev->dbcc_en));
+ rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
+ rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
+ }
+}
+
+static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
+ } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
+ } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u16 dc_i, dc_q;
+ u8 corr_val, corr_idx, rxbb;
+ u8 rxbb_ov;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
+ corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
+
+ dpk->corr_idx[path][kidx] = corr_idx;
+ dpk->corr_val[path][kidx] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+ dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
+ path, corr_idx, corr_val, dc_i, dc_q);
+
+ dpk->dc_i[path][kidx] = dc_i;
+ dpk->dc_q[path][kidx] = dc_q;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
+ rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
+ rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
+ path, rxbb,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
+ rxbb_ov);
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain = 0x0;
+
+ rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+ dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
+
+ return dgain;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ u8 result;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+ result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
+
+ return result;
+}
+
+static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
+ dpk->cur_k_set =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
+{
+ if (set_from_bb) {
+ dbm = clamp_t(u8, dbm, 7, 24);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
+ }
+ _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
+ _dpk_kset_query(rtwdev, path);
+}
+
+static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
+ _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
+
+ return _dpk_gainloss_read(rtwdev);
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+
+ if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+ return true;
+ else
+ return false;
+}
+
+static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
+
+ return _dpk_sync_check(rtwdev, path, kidx);
+}
+
+static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
+
+ for (addr = 0; addr < 0x200; addr++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
+}
+
+static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
+ u8 tmp_rxbb;
+ u8 goout = 0, agc_cnt = 0;
+ u16 dgain = 0;
+ bool is_fail = false;
+ int limit = 200;
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
+
+ if (RTW8852C_DPK_RXSRAM_DBG)
+ _dpk_read_rxsram(rtwdev);
+
+ if (is_fail) {
+ goout = 1;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+
+ if (dgain > 0x5fc || dgain < 0x556) {
+ _dpk_one_shot(rtwdev, phy, path, D_SYNC);
+ dgain = _dpk_dgain_read(rtwdev);
+ }
+
+ if (agc_cnt == 0) {
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ _dpk_bypass_rxiqc(rtwdev, path);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ break;
+
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
+ tmp_gl_idx >= 7)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+ break;
+
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_dbm <= 7) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
+ _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
+ }
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_dbm >= 24) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
+ _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
+ }
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+ if (tmp_rxbb + tmp_gl_idx > 0x1f)
+ tmp_rxbb = 0x1f;
+ else
+ tmp_rxbb = tmp_rxbb + tmp_gl_idx;
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
+ tmp_gl_idx, tmp_rxbb);
+ _dpk_kip_control_rfc(rtwdev, path, true);
+ goout = 1;
+ break;
+ default:
+ goout = 1;
+ break;
+ }
+ } while (!goout && agc_cnt < 6 && --limit > 0);
+
+ if (limit <= 0)
+ rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
+
+ return is_fail;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
+{
+ static const struct rtw89_rfk_tbl *order_tbls[] = {
+ &rtw8852c_dpk_mdpd_order0_defs_tbl,
+ &rtw8852c_dpk_mdpd_order1_defs_tbl,
+ &rtw8852c_dpk_mdpd_order2_defs_tbl,
+ &rtw8852c_dpk_mdpd_order3_defs_tbl,
+ };
+
+ if (order >= ARRAY_SIZE(order_tbls)) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ return;
+ }
+
+ rtw89_rfk_parser(rtwdev, order_tbls[order]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
+ order == 0x0 ? "(5,3,1)" :
+ order == 0x1 ? "(5,3,0)" :
+ order == 0x2 ? "(5,0,0)" : "(7,3,1)");
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 cnt;
+ u8 ov_flag;
+ u32 dpk_sync;
+
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x1);
+ else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ _dpk_set_mdpd_para(rtwdev, 0x1);
+ else
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
+ fsleep(1000);
+
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+ dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
+ ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
+ for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
+ ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
+ }
+
+ if (ov_flag) {
+ _dpk_set_mdpd_para(rtwdev, 0x2);
+ _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
+ }
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ bool is_reload = false;
+ u8 idx, cur_band, cur_ch;
+
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
+{
+ rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
+ &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
+}
+
+static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ if (rtwdev->hal.cv == CHIP_CAV)
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_SEL, 0x01);
+ else
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_SEL, 0x0c);
+
+ _dpk_kip_control_rfc(rtwdev, path, true);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
+
+ _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
+}
+
+static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define _DPK_PARA_TXAGC GENMASK(15, 10)
+#define _DPK_PARA_THER GENMASK(31, 26)
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u32 para;
+
+ para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ MASKDWORD);
+
+ dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
+ dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
+ dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
+}
+
+static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, bool is_execute)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (is_execute) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
+
+ _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ 0x0000007F, 0x5b);
+ }
+ dpk->bp[path][kidx].gs =
+ rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
+ 0x0000007F);
+}
+
+static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
+{
+ u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
+ u8 val;
+
+ switch (val32) {
+ case 0:
+ val = 0x6;
+ break;
+ case 1:
+ val = 0x2;
+ break;
+ case 2:
+ val = 0x0;
+ break;
+ case 3:
+ val = 0x7;
+ break;
+ default:
+ val = 0xff;
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
+
+ return val;
+}
+
+static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_ORDER, _dpk_order_convert(rtwdev));
+
+ dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
+ dpk->bp[path][kidx].path_ok = true;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
+ path, kidx, dpk->bp[path][kidx].mdpd_en);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
+
+ _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 kidx = dpk->cur_idx[path];
+ u8 init_xdbm = 15;
+ bool is_fail;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ _rf_direct_cntrl(rtwdev, path, false);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _set_rx_dck(rtwdev, phy, path, false);
+ _dpk_kip_pwr_clk_onoff(rtwdev, true);
+ _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
+ _dpk_txpwr_bb_force(rtwdev, path, true);
+ _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
+ _dpk_tpg_sel(rtwdev, path, kidx);
+
+ is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
+ if (is_fail)
+ goto _error;
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx);
+ _dpk_para_query(rtwdev, path, kidx);
+ _dpk_on(rtwdev, phy, path, kidx);
+
+_error:
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
+ dpk->cur_k_set, is_fail ? "need Check" : "is Success");
+
+ return is_fail;
+}
+
+static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].path_ok = false;
+}
+
+static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
+ u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
+ u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
+ u8 path;
+ bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
+
+ if (dpk->is_dpk_reload_en) {
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch != 0)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+ } else {
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
+ dpk->cur_idx[path] = 0;
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Init =========\n",
+ path, dpk->cur_idx[path]);
+ _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_information(rtwdev, phy, path);
+ _dpk_init(rtwdev, path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n",
+ path, dpk->cur_idx[path]);
+ rtw8852c_disable_rxagc(rtwdev, path, 0x0);
+ _dpk_drf_direct_cntrl(rtwdev, path, false);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ is_fail = _dpk_main(rtwdev, phy, path, 1);
+ _dpk_onoff(rtwdev, path, is_fail);
+ }
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Restore =========\n",
+ path, dpk->cur_idx[path]);
+ _dpk_kip_restore(rtwdev, phy, path);
+ _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_bb_afe_restore(rtwdev, path);
+ rtw8852c_disable_rxagc(rtwdev, path, 0x1);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+
+ _dpk_kip_pwr_clk_onoff(rtwdev, false);
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
+
+ if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
+ return true;
+ } else if (fem->epa_2g && band == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && band == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_6g && band == RTW89_BAND_6G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
+ RTW8852C_DPK_VER, rtwdev->hal.cv,
+ RTW8852C_RF_REL_VERSION);
+
+ if (_dpk_bypass_check(rtwdev, phy))
+ _dpk_force_bypass(rtwdev, phy);
+ else
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
+ rtw8852c_rx_dck(rtwdev, phy, false);
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
+ dpk->bp[path][kidx].mdpd_en : 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_MEN, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 path, kidx;
+ u8 txagc_rf = 0;
+ s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
+ u8 cur_ther;
+ s8 delta_ther = 0;
+ s16 pwsf_tssi_ofst;
+
+ for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
+ kidx = dpk->cur_idx[path];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ txagc_rf =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
+ txagc_bb =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
+ txagc_bb_tp =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
+
+ /* report from KIP */
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
+ cur_ther =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
+ txagc_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
+ pwsf_tssi_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
+ pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
+ delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ delta_ther = delta_ther * 1 / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
+ delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
+ txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
+ dpk->bp[path][kidx].txagc_dpk);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
+ txagc_ofst, pwsf_tssi_ofst);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
+ txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x07FC0000, 0x78 - delta_ther);
+ }
+ }
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_sys_defs_2g_a_tbl,
+ &rtw8852c_tssi_sys_defs_5g_a_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_sys_defs_2g_b_tbl,
+ &rtw8852c_tssi_sys_defs_5g_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
+ &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
+ &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (path == RF_PATH_A) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_dck_defs_2g_a_tbl,
+ &rtw8852c_tssi_dck_defs_5g_a_tbl);
+ } else {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_dck_defs_2g_b_tbl,
+ &rtw8852c_tssi_dck_defs_5g_b_tbl);
+ }
+}
+
+static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_set_bbgain_split_a_tbl,
+ &rtw8852c_tssi_set_bbgain_split_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef RTW8852C_TSSI_GET_VAL
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (path == RF_PATH_A) {
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
+ &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
+ } else {
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
+ &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
+ }
+}
+
+static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ const struct rtw89_rfk_tbl *tbl;
+
+ if (path == RF_PATH_A) {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
+ else if (band == RTW89_BAND_6G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
+ else
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
+ } else {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
+ else if (band == RTW89_BAND_6G)
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
+ else
+ tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
+ }
+
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_slope_defs_a_tbl,
+ &rtw8852c_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_run_slope_defs_a_tbl,
+ &rtw8852c_tssi_run_slope_defs_b_tbl);
+}
+
+static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_track_defs_a_tbl,
+ &rtw8852c_tssi_track_defs_b_tbl);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
+ &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
+ &rtw8852c_tssi_enable_defs_a_tbl,
+ &rtw8852c_tssi_enable_defs_b_tbl);
+
+ tssi_info->base_thermal[i] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
+ rtwdev->is_tssi_mode[i] = true;
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ if (i == RF_PATH_A) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ } else if (i == RF_PATH_B) {
+ rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ }
+ }
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ s8 val;
+
+ if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+ } else {
+ gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st = 0;
+ s8 tde_2nd = 0;
+ s8 val;
+
+ if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+ } else {
+ tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ for (i = path; i < path_max; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
+ _TSSI_DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
+ _TSSI_DE_MASK));
+ }
+}
+
+static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {0x5818, 0x7818};
+ static const u32 tssi_en[2] = {0x5820, 0x7820};
+
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
+ rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
+ if (rtwdev->dbcc_en && path == RF_PATH_B)
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ else
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
+ rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
+ }
+}
+
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool is_dav)
+{
+ u32 rf_reg18;
+ u32 reg_reg18_addr;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (is_dav)
+ reg_reg18_addr = RR_CFGCH;
+ else
+ reg_reg18_addr = RR_CFGCH_V1;
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ bool is_dav;
+ u8 kpath, path;
+ u32 tmp = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < 2; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ is_dav = true;
+ _bw_setting(rtwdev, path, bw, is_dav);
+ is_dav = false;
+ _bw_setting(rtwdev, path, bw, is_dav);
+ if (rtwdev->dbcc_en)
+ continue;
+
+ if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
+ fsleep(100);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ }
+ }
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, enum rtw89_band band, bool is_dav)
+{
+ u32 rf_reg18;
+ u32 reg_reg18_addr;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (is_dav)
+ reg_reg18_addr = 0x18;
+ else
+ reg_reg18_addr = 0x10018;
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
+ break;
+ case RTW89_BAND_5G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
+ break;
+ default:
+ break;
+ }
+ rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
+ fsleep(100);
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band)
+{
+ u8 kpath, path;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
+ if (band != RTW89_BAND_6G) {
+ if ((central_ch > 14 && central_ch < 36) ||
+ (central_ch > 64 && central_ch < 100) ||
+ (central_ch > 144 && central_ch < 149) || central_ch > 177)
+ return;
+ } else {
+ if (central_ch > 253 || central_ch == 2)
+ return;
+ }
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < 2; path++) {
+ if (kpath & BIT(path)) {
+ _ch_setting(rtwdev, path, central_ch, band, true);
+ _ch_setting(rtwdev, path, central_ch, band, false);
+ }
+ }
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath;
+ u8 path;
+ u32 val;
+
+ kpath = _kpath(rtwdev, phy);
+ for (path = 0; path < 2; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ val = 0x1b;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ val = 0x13;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ val = 0xb;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ default:
+ val = 0x3;
+ break;
+ }
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+ }
+}
+
+static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_lck_info *lck = &rtwdev->lck;
+ int path;
+
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ lck->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
+ }
+}
+
+static void _lck(struct rtw89_dev *rtwdev)
+{
+ u32 tmp18[2];
+ int path = rtwdev->dbcc_en ? 2 : 1;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
+
+ tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+
+ for (i = 0; i < path; i++) {
+ rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
+ rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ _lck_keep_thermal(rtwdev);
+}
+
+#define RTW8852C_LCK_TH 8
+
+void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_lck_info *lck = &rtwdev->lck;
+ u8 cur_thermal;
+ int delta;
+ int path;
+
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ cur_thermal =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ delta = abs((int)cur_thermal - lck->thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
+ path, cur_thermal, delta);
+
+ if (delta >= RTW8852C_LCK_TH) {
+ _lck(rtwdev);
+ return;
+ }
+ }
+}
+
+void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
+{
+ _lck_keep_thermal(rtwdev);
+}
+
+static
+void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, phy, central_ch, band);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
+ chan->band_type,
+ chan->band_width);
+}
+
+void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ u8 idx = mcc_info->table_idx;
+ int i;
+
+ for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
+ if (mcc_info->ch[idx] == 0)
+ break;
+ if (++idx >= RTW89_IQK_CHS_NR)
+ idx = 0;
+ }
+
+ mcc_info->table_idx = idx;
+ mcc_info->ch[idx] = chan->channel;
+ mcc_info->band[idx] = chan->band_type;
+}
+
+void rtw8852c_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < 2; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852c_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+#define RXDCK_VER_8852C 0xe
+
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+{
+ struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
+ u8 path, kpath;
+ u32 rf_reg5;
+
+ kpath = _kpath(rtwdev, phy);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
+ RXDCK_VER_8852C, rtwdev->hal.cv);
+
+ for (path = 0; path < 2; path++) {
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ if (!(kpath & BIT(path)))
+ continue;
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path, is_afe);
+ rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x0);
+ }
+}
+
+#define RTW8852C_RX_DCK_TH 8
+
+void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
+ u8 cur_thermal;
+ int delta;
+ int path;
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++) {
+ cur_thermal =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ delta = abs((int)cur_thermal - rx_dck->thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
+ path, cur_thermal, delta);
+
+ if (delta >= RTW8852C_RX_DCK_TH) {
+ rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
+ return;
+ }
+ }
+}
+
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ rtwdev->dpk.is_dpk_enable = true;
+ rtwdev->dpk.is_dpk_reload_en = false;
+ _dpk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_bbgain_split(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_slope(rtwdev, phy, i);
+ _tssi_run_slope(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
+ __func__, phy);
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A])
+ return;
+ if (!rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (rtwdev->dbcc_en) {
+ if (phy == RTW89_PHY_0) {
+ path = RF_PATH_A;
+ path_max = RF_PATH_B;
+ } else if (phy == RTW89_PHY_1) {
+ path = RF_PATH_B;
+ path_max = RF_PATH_NUM_8852C;
+ }
+ }
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = path; i < path_max; i++) {
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_set_aligk_default(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 i;
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (enable) {
+ /* SCAN_START */
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_A] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
+ B_TXAGC_BB);
+ if (tssi_info->default_txagc_offset[RF_PATH_A])
+ break;
+ }
+ }
+
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_B] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
+ B_TXAGC_BB_S1);
+ if (tssi_info->default_txagc_offset[RF_PATH_B])
+ break;
+ }
+ }
+ } else {
+ /* SCAN_END */
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_A]);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_B]);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+ }
+}
+
+void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
+ bool scan_start, enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start)
+ rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
+ else
+ rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
new file mode 100644
index 000000000000..928a587cdd05
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_RFK_H__
+#define __RTW89_8852C_RFK_H__
+
+#include "core.h"
+
+void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_rck(struct rtw89_dev *rtwdev);
+void rtw8852c_dack(struct rtw89_dev *rtwdev);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
+void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw8852c_lck_init(struct rtw89_dev *rtwdev);
+void rtw8852c_lck_track(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c
new file mode 100644
index 000000000000..d727d528b365
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "rtw8852c_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852c_dack_reload_defs[] = {
+ RTW89_DECL_RFK_WM(0xc004, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc024, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc104, BIT(17), 0x1),
+ RTW89_DECL_RFK_WM(0xc124, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reload_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_a[] = {
+ RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x0),
+ RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_b[] = {
+ RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x0),
+ RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_dack_defs_s0[] = {
+ RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc004, 0xfff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xc024, 0xfff00000, 0x30),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s0);
+
+static const struct rtw89_reg5_def rtw8852c_dack_defs_s1[] = {
+ RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x1),
+ RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1),
+ RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0xc104, 0xfff00000, 0x30),
+ RTW89_DECL_RFK_WM(0xc124, 0xfff00000, 0x30),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s1);
+
+static const struct rtw89_reg5_def rtw8852c_drck_defs[] = {
+ RTW89_DECL_RFK_WM(0xc0c4, BIT(6), 0x0),
+ RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x1),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_drck_defs);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_rxk_cfg_defs[] = {
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x03),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_rxk_cfg_defs);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00100000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5670, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_A, 0x10005, 0x00001, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00200000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x1),
+ RTW89_DECL_RFK_WM(0x20fc, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7670, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00),
+ RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x0),
+ RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x0),
+ RTW89_DECL_RFK_WRF(RF_PATH_B, 0x10005, 0x00001, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_read_rxsram_pre_defs[] = {
+ RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x1),
+ RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x1),
+ RTW89_DECL_RFK_WM(0x80d4, MASKDWORD, 0x00020000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_pre_defs);
+
+static const struct rtw89_reg5_def rtw8852c_read_rxsram_post_defs[] = {
+ RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x0),
+ RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_post_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order0_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x0),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x2),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order0_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order1_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x1),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x1),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order1_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order2_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x2),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x0),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order2_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order3_defs[] = {
+ RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x3),
+ RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x3),
+ RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4),
+ RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order3_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_on_defs[] = {
+ RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000080),
+ RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x807f030a),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_on_defs);
+
+static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_off_defs[] = {
+ RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000000),
+ RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x80000000),
+ RTW89_DECL_RFK_WM(0x80f4, BIT(18), 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_off_defs);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = {
+ RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19),
+ RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041),
+ RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000),
+ RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280),
+ RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000),
+ RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280),
+ RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x0),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00000fff, 0x0),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00fff000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x0),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00000fff, 0x0),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00fff000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x1af),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x1af),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x0003c000, 0xb),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x6),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x0003c000, 0xb),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x6),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_a[] = {
+ RTW89_DECL_RFK_WM(0x5818, 0x08000000, 0x1),
+ RTW89_DECL_RFK_WM(0x58d4, 0xf0000000, 0x7),
+ RTW89_DECL_RFK_WM(0x58f0, 0x000c0000, 0x1),
+ RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x400),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_b[] = {
+ RTW89_DECL_RFK_WM(0x7818, 0x08000000, 0x1),
+ RTW89_DECL_RFK_WM(0x78d4, 0xf0000000, 0x7),
+ RTW89_DECL_RFK_WM(0x78f0, 0x000c0000, 0x1),
+ RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x400),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x0808081e),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x081d),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0204020),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x020),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08081e21),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x1d23),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x2d2721),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x3b8),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3d2),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x042),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x06b),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x3bc),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3d6),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x03e),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x06b),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x2d2721),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x3c0),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3da),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x002),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x071),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x3c8),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e2),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x00c),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x071),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x07d),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x07d),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x080),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9),
+ RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039),
+ RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x080),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x0f),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x28),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x76),
+ RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x0f),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x28),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x76),
+ RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x000),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x121),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x0),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x1ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x080),
+ RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x0),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x1ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x200),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x080),
+ RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58e4, 0x00003800, 0x1),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x1),
+ RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x0),
+ RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78e4, 0x00003800, 0x1),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x1),
+ RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x0),
+ RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WRF(0x0, 0x10055, 0x00080, 0x1),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WRF(0x1, 0x10055, 0x00080, 0x1),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_b);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_a);
+
+static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_b);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h
new file mode 100644
index 000000000000..953a960ef1e8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_RFK_TABLE_H__
+#define __RTW89_8852C_RFK_TABLE_H__
+
+#include "phy.h"
+
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reload_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s0_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s1_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_drck_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_rxk_cfg_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_pre_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_post_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order0_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order1_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order2_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order3_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_on_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_off_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_b_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
new file mode 100644
index 000000000000..11f35e7a7f0e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -0,0 +1,36704 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c_table.h"
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = {
+ {0xF0FF0000, 0x00000000},
+ {0xF03300FF, 0x00000001},
+ {0xF03400FF, 0x00000002},
+ {0x70C, 0x00000020},
+ {0x704, 0x601E0100},
+ {0x4000, 0x00000000},
+ {0x4004, 0xCA014000},
+ {0x4008, 0xC751D4F0},
+ {0x400C, 0x44511475},
+ {0x4010, 0x00000000},
+ {0x4014, 0x00000000},
+ {0x44AC, 0x01F60380},
+ {0x4018, 0x4F4C4B4B},
+ {0x401C, 0x494A4E52},
+ {0x4020, 0x4D504E4B},
+ {0x4024, 0x4F4C4949},
+ {0x4028, 0x49484C50},
+ {0x402C, 0x4C50504C},
+ {0x4030, 0x54544D4A},
+ {0x4034, 0x504B5654},
+ {0x4038, 0x6A6C605A},
+ {0x403C, 0x48484848},
+ {0x4040, 0x48483D47},
+ {0x4044, 0x3D474848},
+ {0x4048, 0x51484848},
+ {0x404C, 0x4A4A404F},
+ {0x4050, 0x514F4C4A},
+ {0x4054, 0x524E4A4A},
+ {0x4058, 0x4A4A5154},
+ {0x405C, 0x53555554},
+ {0x4060, 0x45454545},
+ {0x4064, 0x45454144},
+ {0x4068, 0x40434445},
+ {0x406C, 0x44454545},
+ {0x4070, 0x44444043},
+ {0x4074, 0x42434444},
+ {0x4078, 0x46454444},
+ {0x407C, 0x44444843},
+ {0x4080, 0x4B4E4A47},
+ {0x4084, 0x514D4A49},
+ {0x4088, 0x4A495454},
+ {0x408C, 0x5454514D},
+ {0x4090, 0x524E4B4A},
+ {0x4094, 0x4C4B5455},
+ {0x4098, 0x55565550},
+ {0x409C, 0x5959504D},
+ {0x40A0, 0x544E5D5A},
+ {0x40A4, 0x7975665F},
+ {0x40A8, 0x48484848},
+ {0x40AC, 0x48483D47},
+ {0x40B0, 0x3D474848},
+ {0x40B4, 0x48484848},
+ {0x40B8, 0x48483E48},
+ {0x40BC, 0x3E4A4A49},
+ {0x40C0, 0x514E4948},
+ {0x40C4, 0x4A49404F},
+ {0x40C8, 0x42525555},
+ {0x40CC, 0x47474747},
+ {0x40D0, 0x47474747},
+ {0x40D4, 0x47474747},
+ {0x40D8, 0x48484848},
+ {0x40DC, 0x48474848},
+ {0x40E0, 0x4A484848},
+ {0x40E4, 0x49484847},
+ {0x40E8, 0x4847524D},
+ {0x40EC, 0x55544F4B},
+ {0x40F0, 0x00000000},
+ {0x4604, 0x4C4C4D4E},
+ {0x4608, 0x3D3D6A56},
+ {0x460C, 0x53515140},
+ {0x4610, 0x42404041},
+ {0x4614, 0x54544B48},
+ {0x4618, 0x795D5554},
+ {0x461C, 0x3E3E3D3D},
+ {0x4620, 0x47474240},
+ {0x4624, 0x55524A48},
+ {0x4ED4, 0x00000000},
+ {0x40F4, 0x00000006},
+ {0x4628, 0x00000000},
+ {0x4E9C, 0x26663333},
+ {0x4EA0, 0x6EDA4148},
+ {0x4EA4, 0x599A0000},
+ {0x4EA8, 0x40000000},
+ {0x4ED0, 0x00000001},
+ {0x40F8, 0x00000000},
+ {0x40FC, 0x8C30C30C},
+ {0x4100, 0x4C30C30C},
+ {0x4104, 0x0C30C30C},
+ {0x4108, 0x0C30C30C},
+ {0x410C, 0x0C30C30C},
+ {0x4110, 0x0C30C30C},
+ {0x4114, 0x28A28A28},
+ {0x4118, 0x28A28A28},
+ {0x411C, 0x28A28A28},
+ {0x4120, 0x28A28A28},
+ {0x4124, 0x28A28A28},
+ {0x4128, 0x28A28A28},
+ {0x412C, 0x06666666},
+ {0x4130, 0x33333333},
+ {0x4134, 0x33333333},
+ {0x4138, 0x33333333},
+ {0x413C, 0x00000031},
+ {0x462C, 0x0C30C30C},
+ {0x4630, 0x0C30C30C},
+ {0x4634, 0x28A28A28},
+ {0x4638, 0x28A28A28},
+ {0x463C, 0x33333333},
+ {0x4640, 0x00000033},
+ {0x4140, 0x5100600A},
+ {0x4144, 0x18363113},
+ {0x4148, 0x1D976DDC},
+ {0x414C, 0x1C072DD7},
+ {0x4150, 0x1127CDF4},
+ {0x4154, 0x1E37BDF1},
+ {0x4158, 0x1FB7F1D6},
+ {0x415C, 0x1EA7DDF9},
+ {0x4160, 0x1FE445DD},
+ {0x4164, 0x1F97F1FE},
+ {0x4168, 0x1FF781ED},
+ {0x416C, 0x1FA7F5FE},
+ {0x4170, 0x1E07B913},
+ {0x4174, 0x1FD7FDFF},
+ {0x4178, 0x1E17B9FA},
+ {0x417C, 0x19A66914},
+ {0x4180, 0x10F65598},
+ {0x4184, 0x14A5A111},
+ {0x4188, 0x1D3765DB},
+ {0x418C, 0x17C685CA},
+ {0x4190, 0x1107C5F3},
+ {0x4194, 0x1B5785EB},
+ {0x4198, 0x1F97ED8F},
+ {0x419C, 0x1BC7A5F3},
+ {0x41A0, 0x1FE43595},
+ {0x41A4, 0x1EB7D9FC},
+ {0x41A8, 0x1FE65DBE},
+ {0x41AC, 0x1EC7D9FC},
+ {0x41B0, 0x1976FCFF},
+ {0x41B4, 0x1F77F5FF},
+ {0x41B8, 0x1976FDEC},
+ {0x41BC, 0x198664EF},
+ {0x41C0, 0x11062D93},
+ {0x41C4, 0x10C4E910},
+ {0x41C8, 0x1CA759DB},
+ {0x41CC, 0x1335A9B5},
+ {0x41D0, 0x1097B9F3},
+ {0x41D4, 0x17B72DE1},
+ {0x41D8, 0x1F67ED42},
+ {0x41DC, 0x18074DE9},
+ {0x41E0, 0x1FD40547},
+ {0x41E4, 0x1D57ADF9},
+ {0x41E8, 0x1FE52182},
+ {0x41EC, 0x1D67B1F9},
+ {0x41F0, 0x14860CE1},
+ {0x41F4, 0x1EC7E9FE},
+ {0x41F8, 0x14860DD6},
+ {0x41FC, 0x195664C7},
+ {0x4200, 0x0005E58A},
+ {0x4204, 0x00000000},
+ {0x4208, 0x00000000},
+ {0x420C, 0x7A000000},
+ {0x4210, 0x0F9F3D7A},
+ {0x4214, 0x0040817C},
+ {0x4218, 0x00E10204},
+ {0x421C, 0x257D94CD},
+ {0x4220, 0x0802DB6D},
+ {0x4224, 0x00000200},
+ {0x4228, 0x04688000},
+ {0x4644, 0x00000000},
+ {0x4648, 0x00000000},
+ {0x464C, 0x00000000},
+ {0x4650, 0x00000020},
+ {0x4ECC, 0x00000001},
+ {0x422C, 0x0060B002},
+ {0x4230, 0x9A8249A8},
+ {0x4234, 0x26A1469E},
+ {0x4238, 0x2099A824},
+ {0x423C, 0x2359461C},
+ {0x4240, 0x1631A675},
+ {0x4244, 0x2C6B1D63},
+ {0x4248, 0x0000000E},
+ {0x424C, 0x00000001},
+ {0x4250, 0x00000001},
+ {0x4254, 0x00000000},
+ {0x4258, 0x00000000},
+ {0x425C, 0x00000000},
+ {0x4260, 0x01E0000C},
+ {0x4654, 0x00000000},
+ {0x4658, 0x00000000},
+ {0x465C, 0x0000001E},
+ {0x4E74, 0x00000000},
+ {0x4264, 0x00000000},
+ {0x4268, 0x00000000},
+ {0x426C, 0x0418317C},
+ {0x46C0, 0x00000001},
+ {0x4270, 0x00D6135C},
+ {0x46C4, 0x00000033},
+ {0x4274, 0x00000000},
+ {0x4278, 0x00000000},
+ {0x427C, 0x00000000},
+ {0x4280, 0x00000000},
+ {0x4284, 0x00000000},
+ {0x4288, 0x00000000},
+ {0x46D8, 0x00000000},
+ {0x46DC, 0x00000000},
+ {0x46E0, 0x00000000},
+ {0x46E4, 0x00000000},
+ {0x46E8, 0x00000000},
+ {0x428C, 0x00000000},
+ {0x4290, 0x00000000},
+ {0x4294, 0x00000000},
+ {0x4298, 0x84026000},
+ {0x429C, 0x0051AC20},
+ {0x46EC, 0x1020C040},
+ {0x46F0, 0xB8BEBEB8},
+ {0x46F4, 0x021102BE},
+ {0x46F8, 0x14221142},
+ {0x46FC, 0x18C4098C},
+ {0x4700, 0x00021084},
+ {0x42A0, 0x02024008},
+ {0x42A4, 0x00000000},
+ {0x42A8, 0x00000000},
+ {0x42AC, 0x22CE803C},
+ {0x42B0, 0x32000000},
+ {0x42B4, 0x996FD67D},
+ {0x42B8, 0xBD67D67D},
+ {0x42BC, 0x7D67D65B},
+ {0x42C0, 0x28029F59},
+ {0x42C4, 0x00280280},
+ {0x4704, 0x00000000},
+ {0x42C8, 0x00000000},
+ {0x42CC, 0x00000000},
+ {0x42D0, 0x00000003},
+ {0x4708, 0x00280000},
+ {0x42D4, 0x00000001},
+ {0x42D8, 0x61861800},
+ {0x42DC, 0x830C30C3},
+ {0x42E0, 0xC30C30C3},
+ {0x42E4, 0x830C30C3},
+ {0x42E8, 0x451450C3},
+ {0x42EC, 0x05145145},
+ {0x42F0, 0x05145145},
+ {0x42F4, 0x05145145},
+ {0x42F8, 0x03207145},
+ {0x42FC, 0x041C32C6},
+ {0x4300, 0x031C5247},
+ {0x4304, 0x030C5143},
+ {0x4308, 0x030C30C3},
+ {0x430C, 0x0F3CF3C3},
+ {0x4310, 0x0F3CF3CF},
+ {0x4314, 0x0F3CF3CF},
+ {0x4318, 0x0F3CF3CF},
+ {0x431C, 0x0F3CF3CF},
+ {0x4320, 0x030C10C3},
+ {0x4324, 0x051430C3},
+ {0x4328, 0x051490CB},
+ {0x432C, 0x030C70D1},
+ {0x4330, 0x050C50C7},
+ {0x4334, 0x051492CB},
+ {0x4338, 0x05145145},
+ {0x433C, 0x05145145},
+ {0x4340, 0x05145145},
+ {0x4344, 0x05145145},
+ {0x4348, 0x090CD243},
+ {0x434C, 0x0918A1C5},
+ {0x4350, 0x071C3143},
+ {0x4354, 0x071431C3},
+ {0x4358, 0x0F3CF1C5},
+ {0x435C, 0x0F3CF3CF},
+ {0x4360, 0x0F3CF3CF},
+ {0x4364, 0x0F3CF3CF},
+ {0x4368, 0x0F3CF3CF},
+ {0x436C, 0x090C91CF},
+ {0x4370, 0x11243143},
+ {0x4374, 0x9777A777},
+ {0x4378, 0xBB7BAC95},
+ {0x437C, 0xB667B889},
+ {0x4380, 0x7B9B8899},
+ {0x4384, 0x7A5567C8},
+ {0x4388, 0x2278CCCC},
+ {0x438C, 0x7C222222},
+ {0x4390, 0x0000049B},
+ {0x470C, 0x00000888},
+ {0x4EB4, 0x00000002},
+ {0x4394, 0x001CCCCC},
+ {0x4710, 0xCCCCCAAC},
+ {0x4714, 0x0000AACC},
+ {0x4398, 0x00000000},
+ {0x439C, 0x00000008},
+ {0x49A4, 0x00000000},
+ {0x43A0, 0x00000000},
+ {0x43A4, 0x00000000},
+ {0x43A8, 0x00000000},
+ {0x43AC, 0x10000000},
+ {0x43B0, 0x00401001},
+ {0x43B4, 0x00061003},
+ {0x4718, 0x00003000},
+ {0x43B8, 0x000024D8},
+ {0x43BC, 0x00000000},
+ {0x43C0, 0x10000020},
+ {0x43C4, 0x20000200},
+ {0x43C8, 0x00000000},
+ {0x43CC, 0x04000000},
+ {0x43D0, 0x44000100},
+ {0x43D4, 0x60804060},
+ {0x43D8, 0x44204210},
+ {0x43DC, 0x82108082},
+ {0x43E0, 0x82108402},
+ {0x43E4, 0xC8082108},
+ {0x43E8, 0xC8202084},
+ {0x43EC, 0x44208208},
+ {0x43F0, 0x84108204},
+ {0x43F4, 0xD0108104},
+ {0x43F8, 0xF8210108},
+ {0x43FC, 0x6431E930},
+ {0x4400, 0x02109468},
+ {0x4404, 0x10C61C22},
+ {0x4408, 0x02109469},
+ {0x440C, 0x10C61C22},
+ {0x4410, 0x00041049},
+ {0x471C, 0x0B02C080},
+ {0x4414, 0x00000000},
+ {0x4418, 0x00000000},
+ {0x441C, 0x80000000},
+ {0x4420, 0xB0200000},
+ {0x4424, 0x00001FF0},
+ {0x4780, 0xEC000000},
+ {0x4784, 0x8C400020},
+ {0x4964, 0x51089104},
+ {0x4968, 0x88448844},
+ {0x496C, 0x07000044},
+ {0x4E4C, 0x00000000},
+ {0x4428, 0x00000000},
+ {0x442C, 0x00000000},
+ {0x4430, 0x00000000},
+ {0x4434, 0x00000000},
+ {0x4438, 0x590642D0},
+ {0x443C, 0x398668A0},
+ {0x4440, 0x6C100808},
+ {0x4444, 0x4A145344},
+ {0x4448, 0x0C5B008F},
+ {0x444C, 0x6E30498A},
+ {0x4450, 0x656E371B},
+ {0x4454, 0x00000F53},
+ {0x49A8, 0x68120000},
+ {0x49AC, 0xDA0681E0},
+ {0x49BC, 0x14060180},
+ {0x49D8, 0x600603FF},
+ {0x49DC, 0x3C502000},
+ {0x49E0, 0x2C580050},
+ {0x49E4, 0x45B055EF},
+ {0x49E8, 0x00000290},
+ {0x4A0C, 0x00000001},
+ {0x4A28, 0x0DAC1B58},
+ {0x4A2C, 0x0000001E},
+ {0x4E50, 0x16878003},
+ {0x4E54, 0x0F00F078},
+ {0x4E58, 0x03C1E0B4},
+ {0x4E5C, 0x78584830},
+ {0x4E60, 0x88C0140C},
+ {0x4E64, 0x90302C24},
+ {0x4E68, 0x0F84A00A},
+ {0x4E6C, 0x00000011},
+ {0x4E78, 0x00003039},
+ {0x4E7C, 0x0000D431},
+ {0x4E80, 0x00008235},
+ {0x4E84, 0x00000000},
+ {0x4E88, 0x000056CE},
+ {0x4E8C, 0x00002B67},
+ {0x4E90, 0x00000237},
+ {0x4EB8, 0x00004624},
+ {0x4A30, 0x00000000},
+ {0x4458, 0x00000000},
+ {0x445C, 0x4801442E},
+ {0x4460, 0x0051A0B8},
+ {0x4A34, 0x0000011F},
+ {0x4EBC, 0x00000000},
+ {0x4A38, 0x0000011F},
+ {0x4EC0, 0x00000000},
+ {0x4464, 0x00000000},
+ {0x4468, 0x00000000},
+ {0x446C, 0x00000000},
+ {0x4470, 0x00000000},
+ {0x4474, 0x00000000},
+ {0x4478, 0x00000000},
+ {0x447C, 0x00000000},
+ {0x4480, 0x2A0AA040},
+ {0x4484, 0x0A886926},
+ {0x4488, 0x00000004},
+ {0x4A3C, 0x00002B1C},
+ {0x448C, 0x00000000},
+ {0x4490, 0x88000000},
+ {0x4494, 0x10000000},
+ {0x4498, 0xE0000000},
+ {0x4A08, 0x00000FE6},
+ {0x4A40, 0x00000000},
+ {0x4A44, 0x00000000},
+ {0x4A48, 0x00000000},
+ {0x4A4C, 0x00000000},
+ {0x4A50, 0x00000000},
+ {0x4A54, 0x00000000},
+ {0x449C, 0x00000019},
+ {0x44A0, 0x02B2E394},
+ {0x44A4, 0x00000400},
+ {0x4A58, 0x14285208},
+ {0x4A84, 0x02850A14},
+ {0x4A88, 0x048D0A14},
+ {0x4A8C, 0x01123401},
+ {0x4A90, 0x34011234},
+ {0x4A94, 0x23450112},
+ {0x4A98, 0x45123451},
+ {0x4AAC, 0x12345123},
+ {0x4AB0, 0x00000000},
+ {0x44A8, 0x00000001},
+ {0x44B0, 0x00000000},
+ {0x44B4, 0x00000000},
+ {0x44B8, 0x00000000},
+ {0x44BC, 0x00000000},
+ {0x44C0, 0x00000000},
+ {0x44C4, 0x00000000},
+ {0x44C8, 0x00000000},
+ {0x44CC, 0x00000000},
+ {0x44D0, 0x00000000},
+ {0x44D4, 0x00000000},
+ {0x44D8, 0x00000000},
+ {0x44DC, 0x00000000},
+ {0x44E0, 0x00000000},
+ {0x44E4, 0x00000000},
+ {0x44E8, 0x00000000},
+ {0x44EC, 0x00000000},
+ {0x44F0, 0x00000000},
+ {0x44F4, 0x00000000},
+ {0x44F8, 0x00000000},
+ {0x44FC, 0x00000000},
+ {0x4500, 0x00000000},
+ {0x4504, 0x00000000},
+ {0x4508, 0x00000000},
+ {0x450C, 0x00000000},
+ {0x4510, 0x00000000},
+ {0x4514, 0x00000000},
+ {0x4518, 0x00000000},
+ {0x451C, 0x00000000},
+ {0x4520, 0x00000000},
+ {0x4524, 0x00000000},
+ {0x4528, 0x00000000},
+ {0x452C, 0x00000000},
+ {0x4530, 0x4ED80C81},
+ {0x4534, 0x00001808},
+ {0x4538, 0x000000FF},
+ {0x453C, 0x00000000},
+ {0x4540, 0x00000000},
+ {0x4544, 0x00000000},
+ {0x4548, 0x00000000},
+ {0x454C, 0x00000000},
+ {0x4550, 0x00000000},
+ {0x4554, 0x00000000},
+ {0x4558, 0x00000000},
+ {0x455C, 0x00000000},
+ {0x4560, 0x40600033},
+ {0x4564, 0x40000000},
+ {0x4568, 0x00000000},
+ {0x456C, 0x20000000},
+ {0x4570, 0x04AAA407},
+ {0x4574, 0x0001A2B4},
+ {0x4578, 0x0002024B},
+ {0x457C, 0x00200000},
+ {0x4580, 0x00001B40},
+ {0x4584, 0x00000000},
+ {0x4588, 0x000000C8},
+ {0x458C, 0x30000000},
+ {0x4590, 0x00000000},
+ {0x4594, 0x00000000},
+ {0x4598, 0x00000001},
+ {0x459C, 0x0003FE00},
+ {0x45A0, 0x00000000},
+ {0x45A4, 0x00000000},
+ {0x45A8, 0xC00002C0},
+ {0x45AC, 0x78028000},
+ {0x45B0, 0x80000048},
+ {0x45B4, 0x00098800},
+ {0x45B8, 0x00200002},
+ {0x4AB4, 0x00000000},
+ {0x4AB8, 0x00000000},
+ {0x4ABC, 0x00000000},
+ {0x4AC0, 0x00000000},
+ {0x4AC4, 0x00000000},
+ {0x4AC8, 0x00000000},
+ {0x4AF4, 0x00000000},
+ {0x4AF8, 0x00000000},
+ {0x4AFC, 0x00000000},
+ {0x4B00, 0x00000000},
+ {0x4B04, 0x00000000},
+ {0x4B08, 0x00000000},
+ {0x4B0C, 0x00000000},
+ {0x4B10, 0x00000000},
+ {0x4B14, 0x00000000},
+ {0x4B18, 0xB0000000},
+ {0x4B1C, 0x00000000},
+ {0x4B20, 0x00000000},
+ {0x4B24, 0x00000000},
+ {0x4B28, 0x00000000},
+ {0x4B2C, 0x00000000},
+ {0x4B30, 0x00000000},
+ {0x4B34, 0x00000000},
+ {0x4B38, 0x00000000},
+ {0x4B3C, 0x00000000},
+ {0x4B40, 0x00000000},
+ {0x45BC, 0x06748790},
+ {0x45C0, 0x80000000},
+ {0x45C4, 0x00000000},
+ {0x45C8, 0x00000000},
+ {0x45CC, 0x00558670},
+ {0x45D0, 0x002883F0},
+ {0x45D4, 0x00090120},
+ {0x45D8, 0x00000000},
+ {0x4B44, 0x00000100},
+ {0x4B48, 0xA6DBC4B1},
+ {0x4B4C, 0x64F624C3},
+ {0x4B50, 0x00D4EF15},
+ {0x49B0, 0x11110F0A},
+ {0x49B4, 0x00000003},
+ {0x49B8, 0x0000000A},
+ {0x4B54, 0xBE9007FF},
+ {0x4B58, 0x00000001},
+ {0x49C0, 0x00000007},
+ {0x49C4, 0x000003D9},
+ {0x4A10, 0x00000001},
+ {0x49C8, 0x002B1CB0},
+ {0x4A00, 0xC0000000},
+ {0x4A04, 0x00001000},
+ {0x4B5C, 0x00000005},
+ {0x4A18, 0x00000007},
+ {0x4B60, 0x00000024},
+ {0x49CC, 0x00000001},
+ {0x49D0, 0x00000010},
+ {0x49D4, 0x00000001},
+ {0x4B64, 0x927FBFBF},
+ {0x4B68, 0x1D07BDD0},
+ {0x4B6C, 0x318A4DEF},
+ {0x4B70, 0x158C5318},
+ {0x4B74, 0x18C5318C},
+ {0x4B78, 0x4E7394EC},
+ {0x4B7C, 0xD9081CE5},
+ {0x4B80, 0x00000001},
+ {0x49EC, 0x00000001},
+ {0x4B84, 0x00000000},
+ {0x4B88, 0x00000000},
+ {0x4B8C, 0x00000000},
+ {0x4B90, 0x00000000},
+ {0x4B94, 0x00000000},
+ {0x4B98, 0x00000000},
+ {0x4B9C, 0x00000000},
+ {0x4BA0, 0x00000000},
+ {0x4BA4, 0x00EA99A2},
+ {0x49F8, 0x0000C4C3},
+ {0x4A1C, 0x00020800},
+ {0x4A20, 0x0002CC00},
+ {0x4BA8, 0x002B6456},
+ {0x45E0, 0x00000000},
+ {0x45E4, 0x00000000},
+ {0x45E8, 0x00E2E1E1},
+ {0x45EC, 0xCBCBB6B6},
+ {0x45F0, 0x59100FCA},
+ {0x4BAC, 0x12CAB6DE},
+ {0x4BB0, 0x00001110},
+ {0x45F4, 0x08882550},
+ {0x45F8, 0x08CC2660},
+ {0x45FC, 0x09102660},
+ {0x4600, 0x00000154},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xD1B942F4},
+ {0x4660, 0x41250EF4},
+ {0x4664, 0x6750E458},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0xA0000000, 0x00000000},
+ {0x45DC, 0xE1CB38E8},
+ {0x4660, 0x4A2E1800},
+ {0x4664, 0x6750E462},
+ {0xB0000000, 0x00000000},
+ {0x4668, 0x0E0CFB0A},
+ {0x466C, 0x30100F06},
+ {0x4670, 0x34333333},
+ {0x4674, 0x34343434},
+ {0x4678, 0xC39D38E8},
+ {0x467C, 0x482800E3},
+ {0x4680, 0x5836E46A},
+ {0x4684, 0xFBEBDA00},
+ {0x4688, 0x1A10FF04},
+ {0x468C, 0x282A3000},
+ {0x4690, 0x2A29292A},
+ {0x4694, 0x04FA2A2A},
+ {0x4698, 0xEE0F04D1},
+ {0x469C, 0x89291436},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0xA0000000, 0x00000000},
+ {0x46A0, 0x0701E79E},
+ {0xB0000000, 0x00000000},
+ {0x46A4, 0x08D07CFF},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x4D1E7F14},
+ {0x46AC, 0x60B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0xA0000000, 0x00000000},
+ {0x46A8, 0x2212FF14},
+ {0x46AC, 0x60423537},
+ {0xB0000000, 0x00000000},
+ {0x46B0, 0x63666666},
+ {0x46B4, 0x35374425},
+ {0x46B8, 0x25883043},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x3FFFFD63},
+ {0x4724, 0xB58D11FF},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x27795843},
+ {0x4724, 0xB58D11F5},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x27795303},
+ {0x4724, 0xB58D11F5},
+ {0xA0000000, 0x00000000},
+ {0x46BC, 0x5107C252},
+ {0x4720, 0x3FFFFD63},
+ {0x4724, 0xB58D11FF},
+ {0xB0000000, 0x00000000},
+ {0x4728, 0x07FFFFFF},
+ {0x472C, 0x0E7893B6},
+ {0x4730, 0xE0399201},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x00000020},
+ {0x4738, 0x8325C500},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D4C20},
+ {0x4738, 0x8F25C500},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4734, 0x003D5420},
+ {0x4738, 0x8725C500},
+ {0xA0000000, 0x00000000},
+ {0x4734, 0x00000020},
+ {0x4738, 0x8325C500},
+ {0xB0000000, 0x00000000},
+ {0x473C, 0x00000B7F},
+ {0x4ACC, 0x000F7D00},
+ {0x4AD0, 0x00000000},
+ {0x4AD4, 0x00000040},
+ {0x4AE4, 0x5379E99E},
+ {0x4AE8, 0x00000744},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0x05EBC8AF},
+ {0x4BB8, 0x99543D24},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0xA0000000, 0x00000000},
+ {0x4BB4, 0xFBD5B89F},
+ {0x4BB8, 0x99563918},
+ {0xB0000000, 0x00000000},
+ {0x4BBC, 0x12EED5B8},
+ {0x4BC0, 0x80C4542F},
+ {0x4BC4, 0x005A007F},
+ {0x4BC8, 0x40000000},
+ {0x4BCC, 0x40000000},
+ {0x4BD0, 0x00000000},
+ {0x4BD4, 0x40000000},
+ {0x4BD8, 0xC0000000},
+ {0x4BDC, 0x40000000},
+ {0x4BE0, 0x80000000},
+ {0x4BE4, 0xBAAC8000},
+ {0x4BE8, 0x638A88C5},
+ {0x4BEC, 0x00900000},
+ {0x4EAC, 0x00000000},
+ {0x4BF0, 0x00000000},
+ {0x4BF4, 0x00000000},
+ {0x4BF8, 0x00000219},
+ {0x4EC4, 0x00000001},
+ {0x4EE8, 0x00002020},
+ {0x4BFC, 0x00000000},
+ {0x4C00, 0x00000010},
+ {0x4C04, 0x00000001},
+ {0x4C08, 0x00000001},
+ {0x4C0C, 0x00000000},
+ {0x4C10, 0x00000000},
+ {0x4C14, 0x00000151},
+ {0x4C18, 0x00000000},
+ {0x4C1C, 0x00000000},
+ {0x4C20, 0x00000151},
+ {0x4C24, 0x00000498},
+ {0x4C28, 0x00000498},
+ {0x4C2C, 0x00000498},
+ {0x4C30, 0x00000498},
+ {0x4C34, 0x00000498},
+ {0x4C38, 0x00000498},
+ {0x4C3C, 0x00000498},
+ {0x4C40, 0x00000498},
+ {0x4C44, 0x00000000},
+ {0x4C48, 0x00000000},
+ {0x4C4C, 0x00001146},
+ {0x4C50, 0x00000000},
+ {0x4C54, 0x00000000},
+ {0x4C58, 0x00001146},
+ {0x4C5C, 0x00000000},
+ {0x4C60, 0x00000000},
+ {0x4C64, 0xE2E1E1DE},
+ {0x4C68, 0xB6B600B6},
+ {0x4C6C, 0xCACBCBCA},
+ {0x4C70, 0x8091010F},
+ {0x4C74, 0x00000B11},
+ {0x46C8, 0x08882550},
+ {0x46CC, 0x08CC2660},
+ {0x46D0, 0x09102660},
+ {0x46D4, 0x00000154},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xC5AD42F4},
+ {0x4744, 0x412504E8},
+ {0x4748, 0x6850E459},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0xA0000000, 0x00000000},
+ {0x4740, 0xE4CD38E8},
+ {0x4744, 0x4C321B04},
+ {0x4748, 0x6750E466},
+ {0xB0000000, 0x00000000},
+ {0x474C, 0x0E0CFB0A},
+ {0x4750, 0x30100F06},
+ {0x4754, 0x34333333},
+ {0x4758, 0x34343434},
+ {0x475C, 0xC49E38E8},
+ {0x4760, 0x482800E2},
+ {0x4764, 0x5636E466},
+ {0x4768, 0xFBEBDA00},
+ {0x476C, 0x1A10FF04},
+ {0x4770, 0x282A3000},
+ {0x4774, 0x2A29292A},
+ {0x4778, 0x04FA2A2A},
+ {0x477C, 0xEE0F04D1},
+ {0x49F0, 0x89291436},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0xA0000000, 0x00000000},
+ {0x49F4, 0x0701E79E},
+ {0xB0000000, 0x00000000},
+ {0x49FC, 0x08D07CFF},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x4D1E7F14},
+ {0x4A60, 0x60B37C4E},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0xA0000000, 0x00000000},
+ {0x4A5C, 0x2212FF14},
+ {0x4A60, 0x60423537},
+ {0xB0000000, 0x00000000},
+ {0x4A64, 0x63666666},
+ {0x4A68, 0x35374425},
+ {0x4A6C, 0x25883043},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x3FFFFD63},
+ {0x4A78, 0xB58D11FF},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x27795843},
+ {0x4A78, 0xB58D11F5},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x27795303},
+ {0x4A78, 0xB58D11F5},
+ {0xA0000000, 0x00000000},
+ {0x4A70, 0x5107C252},
+ {0x4A74, 0x3FFFFD63},
+ {0x4A78, 0xB58D11FF},
+ {0xB0000000, 0x00000000},
+ {0x4A7C, 0x07FFFFFF},
+ {0x4A80, 0x0E7893B6},
+ {0x4A9C, 0xE0399201},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x00000020},
+ {0x4AA4, 0x8325C500},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D4C20},
+ {0x4AA4, 0x8F25C500},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4AA0, 0x003D5420},
+ {0x4AA4, 0x8725C500},
+ {0xA0000000, 0x00000000},
+ {0x4AA0, 0x00000020},
+ {0x4AA4, 0x8325C500},
+ {0xB0000000, 0x00000000},
+ {0x4AA8, 0x00000B7F},
+ {0x4AD8, 0x000F7D00},
+ {0x4ADC, 0x00000000},
+ {0x4AE0, 0x00000040},
+ {0x4AEC, 0x5379E99E},
+ {0x4AF0, 0x00000744},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0x07ECC9B0},
+ {0x4C7C, 0x995B4126},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0xA0000000, 0x00000000},
+ {0x4C78, 0xFBD5B89F},
+ {0x4C7C, 0x99563918},
+ {0xB0000000, 0x00000000},
+ {0x4C80, 0x12EED5B8},
+ {0x4C84, 0x80C4542F},
+ {0x4C88, 0x005A007F},
+ {0x4C8C, 0x40000000},
+ {0x4C90, 0x40000000},
+ {0x4C94, 0x00000000},
+ {0x4C98, 0x40000000},
+ {0x4C9C, 0xC0000000},
+ {0x4CA0, 0x40000000},
+ {0x4CA4, 0x80000000},
+ {0x4CA8, 0xBAAC8000},
+ {0x4CAC, 0x638A88C5},
+ {0x4CB0, 0x00900000},
+ {0x4EB0, 0x00000000},
+ {0x4CB4, 0x00000000},
+ {0x4CB8, 0x00000000},
+ {0x4CBC, 0x00000219},
+ {0x4EC8, 0x00000001},
+ {0x4EEC, 0x00002020},
+ {0x4CC0, 0x00000000},
+ {0x4CC4, 0x00000010},
+ {0x4CC8, 0x00000001},
+ {0x4CCC, 0x00000001},
+ {0x4CD0, 0x00000000},
+ {0x4CD4, 0x00000000},
+ {0x4CD8, 0x00000151},
+ {0x4CDC, 0x00000000},
+ {0x4CE0, 0x00000000},
+ {0x4CE4, 0x00000151},
+ {0x4CE8, 0x00000498},
+ {0x4CEC, 0x00000498},
+ {0x4CF0, 0x00000498},
+ {0x4CF4, 0x00000498},
+ {0x4CF8, 0x00000498},
+ {0x4CFC, 0x00000498},
+ {0x4D00, 0x00000498},
+ {0x4D04, 0x00000498},
+ {0x4D08, 0x00000000},
+ {0x4D0C, 0x00000000},
+ {0x4D10, 0x00001146},
+ {0x4D14, 0x00000000},
+ {0x4D18, 0x00000000},
+ {0x4D1C, 0x00001146},
+ {0x4788, 0x00000000},
+ {0x478C, 0xA32103FE},
+ {0x4790, 0xB20A7B28},
+ {0x4794, 0xC6A7B14F},
+ {0x4798, 0x000000D3},
+ {0x4D20, 0x00000000},
+ {0x4D24, 0x0C442416},
+ {0x4D28, 0x00000000},
+ {0x479C, 0x009B902A},
+ {0x47A0, 0x009B902A},
+ {0x47A4, 0x98682C18},
+ {0x47A8, 0x6318C4C1},
+ {0x47AC, 0x6248C631},
+ {0x47B0, 0x922A8253},
+ {0x47B4, 0x00000005},
+ {0x4D2C, 0x0008C0C1},
+ {0x47B8, 0x00001759},
+ {0x47BC, 0x4B702400},
+ {0x47C0, 0x831508BA},
+ {0x4A14, 0x000000E9},
+ {0x4D30, 0x00000001},
+ {0x4E94, 0x000000FC},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x47CC, 0xBBCCBBB3},
+ {0x47D0, 0x57889989},
+ {0x47D4, 0x00000F45},
+ {0x4D34, 0x7BB167AB},
+ {0x4D38, 0xBBBBBB05},
+ {0x4D3C, 0x777777BB},
+ {0x4D40, 0x00015277},
+ {0x47D8, 0x27039CE9},
+ {0x47DC, 0x41414432},
+ {0x47E0, 0x36058342},
+ {0x47E4, 0x00000006},
+ {0x4D44, 0x00000687},
+ {0x47E8, 0x00000001},
+ {0x47EC, 0x00000001},
+ {0x47F0, 0xC7013016},
+ {0x47F4, 0x84413016},
+ {0x47F8, 0x84413016},
+ {0x47FC, 0x8C413016},
+ {0x4800, 0x8C40B028},
+ {0x4804, 0x3140B028},
+ {0x4808, 0x2940B028},
+ {0x480C, 0x8440B028},
+ {0x4810, 0x6318C610},
+ {0x4814, 0x45334753},
+ {0x4818, 0x236A6A88},
+ {0x4D48, 0x8C413016},
+ {0x4D4C, 0xA140B028},
+ {0x4D50, 0x00150A31},
+ {0x481C, 0x576DF814},
+ {0x4820, 0xA08877AC},
+ {0x4824, 0x0000007A},
+ {0x4D54, 0x00001184},
+ {0x4828, 0xBCEB4A14},
+ {0x482C, 0x000A3A4A},
+ {0x4830, 0xBCEB4A14},
+ {0x4834, 0x000A3A4A},
+ {0x4D58, 0x2F63DD3A},
+ {0x4838, 0xBCBDBD85},
+ {0x483C, 0x0CABB99A},
+ {0x4D5C, 0x000000BC},
+ {0x4840, 0x38384242},
+ {0x4844, 0x0086102E},
+ {0x4848, 0xCA24C82A},
+ {0x4D60, 0x00000000},
+ {0x4D64, 0x0000F49D},
+ {0x4ED8, 0x00000001},
+ {0x4D68, 0x000001C4},
+ {0x4D6C, 0x00000000},
+ {0x4D70, 0x38384242},
+ {0x4D74, 0x030E902E},
+ {0x4D78, 0x994C1502},
+ {0x4D7C, 0x00017912},
+ {0x4EDC, 0x00000001},
+ {0x484C, 0x00008A62},
+ {0x4D80, 0x00000002},
+ {0x4850, 0x00000008},
+ {0x4854, 0x009B902A},
+ {0x4858, 0x009B902A},
+ {0x485C, 0x98682C18},
+ {0x4860, 0x6318C4C1},
+ {0x4864, 0x6248C631},
+ {0x4868, 0x922A8253},
+ {0x486C, 0x00000005},
+ {0x4D84, 0x0008C0C1},
+ {0x4870, 0x00001759},
+ {0x4874, 0x4B702400},
+ {0x4878, 0x831508BA},
+ {0x4A24, 0x000000E9},
+ {0x4D88, 0x00000001},
+ {0x4E98, 0x000000FC},
+ {0x487C, 0x9898A8BB},
+ {0x4880, 0x54535368},
+ {0x4884, 0x999999B3},
+ {0x4888, 0x35555589},
+ {0x488C, 0x00000745},
+ {0x4D8C, 0x6AB14487},
+ {0x4D90, 0xBBBBBB04},
+ {0x4D94, 0x777777BB},
+ {0x4D98, 0x00015277},
+ {0x4890, 0x27039CE9},
+ {0x4894, 0x41414432},
+ {0x4898, 0x36058342},
+ {0x489C, 0x00000006},
+ {0x4D9C, 0x00000687},
+ {0x48A0, 0x00000001},
+ {0x48A4, 0x00000001},
+ {0x48A8, 0xC7013016},
+ {0x48AC, 0x84413016},
+ {0x48B0, 0x84413016},
+ {0x48B4, 0x8C413016},
+ {0x48B8, 0x8C40B028},
+ {0x48BC, 0x3140B028},
+ {0x48C0, 0x2940B028},
+ {0x48C4, 0x8440B028},
+ {0x48C8, 0x6318C610},
+ {0x48CC, 0x45334753},
+ {0x48D0, 0x236A6A88},
+ {0x4DA0, 0x8C413016},
+ {0x4DA4, 0xA140B028},
+ {0x4DA8, 0x00150A31},
+ {0x48D4, 0x576DF814},
+ {0x48D8, 0xA08877AC},
+ {0x48DC, 0x0000007A},
+ {0x4DAC, 0x00001184},
+ {0x48E0, 0xBCEB4A14},
+ {0x48E4, 0x000A3A4A},
+ {0x48E8, 0xBCEB4A14},
+ {0x48EC, 0x000A3A4A},
+ {0x4DB0, 0x2F63DD3A},
+ {0x48F0, 0x9A8A8A85},
+ {0x48F4, 0x0C9BB99A},
+ {0x4DB4, 0x0000009A},
+ {0x48F8, 0x38384242},
+ {0x48FC, 0x0086102E},
+ {0x4900, 0xCA24C82A},
+ {0x4DB8, 0x00000000},
+ {0x4DBC, 0x0000F49D},
+ {0x4EE0, 0x00000001},
+ {0x4DC0, 0x000001C4},
+ {0x4DC4, 0x00000000},
+ {0x4DC8, 0x38384242},
+ {0x4DCC, 0x030E902E},
+ {0x4DD0, 0x994C1502},
+ {0x4DD4, 0x00017912},
+ {0x4EE4, 0x00000001},
+ {0x4904, 0x00008A62},
+ {0x4DD8, 0x00000002},
+ {0x4908, 0x00000008},
+ {0x490C, 0x80040000},
+ {0x4910, 0x80040000},
+ {0x4914, 0xFE800000},
+ {0x4918, 0x834C0000},
+ {0x491C, 0x00000000},
+ {0x4920, 0x00000000},
+ {0x4924, 0x000003FF},
+ {0x4928, 0x00000000},
+ {0x492C, 0x00000000},
+ {0x4930, 0x00000000},
+ {0x4934, 0x40000000},
+ {0x4938, 0x00000000},
+ {0x493C, 0x00000000},
+ {0x4940, 0x00000000},
+ {0x4944, 0x00000000},
+ {0x4948, 0x04065800},
+ {0x494C, 0x02010080},
+ {0x4950, 0x0E1E3E05},
+ {0x4954, 0x0A163068},
+ {0x4958, 0x00206040},
+ {0x495C, 0x02020202},
+ {0x4960, 0x00002020},
+ {0x4DDC, 0x18002000},
+ {0x4DE0, 0x00004001},
+ {0x4DE4, 0x00040004},
+ {0x4DE8, 0x00400040},
+ {0x4DEC, 0x04000400},
+ {0x4DF0, 0x08080618},
+ {0x4DF4, 0x08081616},
+ {0x4DF8, 0x08080808},
+ {0x4DFC, 0x18180808},
+ {0x4E00, 0x01020100},
+ {0x4E04, 0x05020502},
+ {0x4E08, 0x00020E0F},
+ {0x4E0C, 0x00000000},
+ {0x4E10, 0x16080806},
+ {0x4E14, 0x08080816},
+ {0x4E18, 0x08080808},
+ {0x4E1C, 0x00181808},
+ {0x4E20, 0x02010201},
+ {0x4E24, 0x0F050205},
+ {0x4E28, 0x0000020E},
+ {0x4E2C, 0x00000000},
+ {0x4E70, 0x00000001},
+ {0x4970, 0x00000000},
+ {0x4974, 0xC00CD62D},
+ {0x4978, 0x00000103},
+ {0x4E30, 0x02E416A8},
+ {0x497C, 0x00000000},
+ {0x4980, 0x00000000},
+ {0x4984, 0x00000000},
+ {0x4988, 0x00000000},
+ {0x498C, 0x00000000},
+ {0x4E34, 0x00FC0000},
+ {0x4E38, 0x0000F800},
+ {0x4E3C, 0x00000001},
+ {0x4990, 0x00000000},
+ {0x4994, 0x00000000},
+ {0x4998, 0x00000000},
+ {0x499C, 0x00000000},
+ {0x49A0, 0x00000000},
+ {0x4E40, 0x00FC0000},
+ {0x4E44, 0x0000F800},
+ {0x4E48, 0x00000001},
+ {0xC54, 0x10014368},
+ {0xC58, 0x61000000},
+ {0xC5C, 0x805580F0},
+ {0xC64, 0x0010A030},
+ {0x189C, 0x000003FF},
+ {0xC6C, 0x00060020},
+ {0xC3C, 0x2840E1BF},
+ {0xC40, 0x00000000},
+ {0xC44, 0x00000007},
+ {0xC48, 0x410E4000},
+ {0xC54, 0x1EE1436A},
+ {0xC58, 0x61000000},
+ {0x730, 0x00000002},
+ {0xC60, 0x017FFFF2},
+ {0xC64, 0x0010A170},
+ {0xC64, 0x0010A170},
+ {0xC68, 0x000000FF},
+ {0xC64, 0x0010A130},
+ {0xC54, 0x1AE1436A},
+ {0xC6C, 0x00060020},
+ {0xC58, 0x41000000},
+ {0x708, 0x00000000},
+ {0xC6C, 0x00061020},
+ {0x884, 0x0043F01D},
+ {0x704, 0x601E0100},
+ {0x710, 0xEF810000},
+ {0xC54, 0x1AE1436A},
+ {0xC58, 0x41000000},
+ {0xC68, 0x10000050},
+ {0xC6C, 0x20061020},
+ {0x704, 0x601E0100},
+ {0xC74, 0x00000000},
+ {0x90C, 0x00300000},
+ {0xC70, 0x071BFC00},
+ {0xC74, 0x3FFFFFFF},
+ {0xC78, 0x3FFFFFFF},
+ {0xC7C, 0x0000BFFF},
+ {0xD40, 0xF64FA0F7},
+ {0xD44, 0x0400463F},
+ {0xD48, 0x0003FFFF},
+ {0xD4C, 0x00000000},
+ {0xD50, 0xF64FA0F7},
+ {0xD54, 0x04100437},
+ {0xD58, 0x0000FF7F},
+ {0xD5C, 0x00000000},
+ {0xD60, 0x00000000},
+ {0xD64, 0x00000000},
+ {0xD70, 0x00000015},
+ {0xD90, 0x000003FF},
+ {0xD94, 0x00000000},
+ {0xD98, 0x0000003F},
+ {0xD9C, 0x00000000},
+ {0xDA0, 0x000003FE},
+ {0xDA4, 0x00000000},
+ {0xDA8, 0x0000003F},
+ {0xDAC, 0x00000000},
+ {0xD00, 0x77777777},
+ {0xD04, 0xBBBBBBBB},
+ {0xD08, 0xBBBBBBBB},
+ {0xD0C, 0x00000070},
+ {0xD10, 0x20110900},
+ {0xD10, 0x20110FFF},
+ {0xD78, 0x00000001},
+ {0xD7C, 0x001C040A},
+ {0xD84, 0x00006007},
+ {0xD84, 0x00006607},
+ {0xD10, 0x28110FFF},
+ {0xD18, 0x50209900},
+ {0xD80, 0x00804100},
+ {0xD80, 0x00804200},
+ {0x718, 0x1333233F},
+ {0x604, 0x041E1E1E},
+ {0x714, 0x00010000},
+ {0x586C, 0x000000F0},
+ {0x586C, 0x000000E0},
+ {0x586C, 0x000000D0},
+ {0x586C, 0x000000C0},
+ {0x586C, 0x000000B0},
+ {0x586C, 0x000000A0},
+ {0x586C, 0x00000090},
+ {0x586C, 0x00000080},
+ {0x586C, 0x00000070},
+ {0x586C, 0x00000060},
+ {0x586C, 0x00000050},
+ {0x586C, 0x00000040},
+ {0x586C, 0x00000030},
+ {0x586C, 0x00000020},
+ {0x586C, 0x00000010},
+ {0x586C, 0x00000000},
+ {0x786C, 0x000000F0},
+ {0x786C, 0x000000E0},
+ {0x786C, 0x000000D0},
+ {0x786C, 0x000000C0},
+ {0x786C, 0x000000B0},
+ {0x786C, 0x000000A0},
+ {0x786C, 0x00000090},
+ {0x786C, 0x00000080},
+ {0x786C, 0x00000070},
+ {0x786C, 0x00000060},
+ {0x786C, 0x00000050},
+ {0x786C, 0x00000040},
+ {0x786C, 0x00000030},
+ {0x786C, 0x00000020},
+ {0x786C, 0x00000010},
+ {0x786C, 0x00000000},
+ {0x304, 0x0CE31333},
+ {0x300, 0xF30CE31C},
+ {0x304, 0x13EF1F19},
+ {0x308, 0x0C13E3F3},
+ {0x30C, 0x130C0C0C},
+ {0x310, 0x80496000},
+ {0x314, 0x0041E000},
+ {0x318, 0x20022042},
+ {0x31C, 0x20448009},
+ {0x320, 0x00490040},
+ {0x324, 0xE0000070},
+ {0x328, 0xE000E000},
+ {0x32C, 0x0041E000},
+ {0x35C, 0x000004C4},
+ {0xC0D4, 0xA7C41460},
+ {0xC0D8, 0xC6BA7F67},
+ {0xC0DC, 0x30C52868},
+ {0xC0E0, 0x75008128},
+ {0xC0E4, 0x0000272B},
+ {0xC1D4, 0xA7C41460},
+ {0xC1D8, 0xC6BA7F67},
+ {0xC1DC, 0x30C52868},
+ {0xC1E0, 0x75008128},
+ {0xC1E4, 0x0000272B},
+ {0xC0EC, 0x00030003},
+ {0xC1EC, 0x00030003},
+ {0xC004, 0x03020000},
+ {0xC024, 0x03020000},
+ {0xC104, 0x03020000},
+ {0xC124, 0x03020000},
+ {0xC0E8, 0x000A0C81},
+ {0xC0F0, 0x00000024},
+ {0xC1E8, 0x000A0C81},
+ {0xC1F0, 0x00000024},
+ {0x334, 0xFFFFFFFF},
+ {0x33C, 0x55000000},
+ {0x340, 0x00005555},
+ {0x724, 0x00111201},
+ {0x5868, 0xA9550000},
+ {0x5870, 0x33221100},
+ {0x5874, 0x77665544},
+ {0x5878, 0xBBAA9988},
+ {0x587C, 0xFFEEDDCC},
+ {0x5880, 0x76543210},
+ {0x5884, 0xFEDCBA98},
+ {0x5888, 0x00000000},
+ {0x588C, 0x00000000},
+ {0x5894, 0x00000008},
+ {0x7868, 0xA9550000},
+ {0x7870, 0x33221100},
+ {0x7874, 0x77665544},
+ {0x7878, 0xBBAA9988},
+ {0x787C, 0xFFEEDDCC},
+ {0x7880, 0x76543210},
+ {0x7884, 0xFEDCBA98},
+ {0x7888, 0x00000000},
+ {0x788C, 0x00000000},
+ {0x7894, 0x00000008},
+ {0x650, 0x00200888},
+ {0x710, 0xF3810000},
+ {0x020, 0x0000F381},
+ {0x024, 0x0000F381},
+ {0xC0A8, 0x00000080},
+ {0xC0AC, 0x00000100},
+ {0xC0B8, 0x00020000},
+ {0xC1A8, 0x00000080},
+ {0xC1AC, 0x00000100},
+ {0xC1B8, 0x00020000},
+ {0x1038, 0x00003100},
+ {0x1038, 0x00003100},
+ {0x3038, 0x00003100},
+ {0x3038, 0x00003100},
+ {0xC14, 0xA5000000},
+ {0x908, 0x00000001},
+ {0xC54, 0x1EE14368},
+ {0xC88, 0xC2AC8000},
+ {0xC8C, 0x02F2FC08},
+ {0xC70, 0x071BFC00},
+ {0x980, 0x10002251},
+ {0x988, 0x3C3C4107},
+ {0x904, 0x00000005},
+ {0x994, 0x00000010},
+ {0x000, 0x0580801F},
+ {0x240C, 0x00000000},
+ {0x010, 0x000C01FF},
+ {0x010, 0x001C01FF},
+ {0x2424, 0x00000008},
+ {0x620, 0x00141A30},
+ {0x660, 0x00000004},
+ {0x2620, 0x00141A30},
+ {0x2660, 0x00000000},
+ {0x640, 0x180A141E},
+ {0x640, 0x1814141E},
+ {0x640, 0x1814141E},
+ {0x640, 0x14141414},
+ {0x644, 0x3C14283C},
+ {0x644, 0x3C29283C},
+ {0x644, 0x3C29203C},
+ {0x644, 0x3C29201A},
+ {0x2640, 0x180A141E},
+ {0x2640, 0x1814141E},
+ {0x2640, 0x1814141E},
+ {0x2640, 0x14141414},
+ {0x2644, 0x3C14283C},
+ {0x2644, 0x3C29283C},
+ {0x2644, 0x3C29203C},
+ {0x2644, 0x3C29201A},
+ {0x620, 0x00141A40},
+ {0x64C, 0x1D0A141E},
+ {0x64C, 0x1D1D141E},
+ {0x64C, 0x1D1D1D1E},
+ {0x2620, 0x00141A40},
+ {0x264C, 0x1D0A141E},
+ {0x264C, 0x1D1D141E},
+ {0x264C, 0x1D1D1D1E},
+ {0x2300, 0x03020100},
+ {0x2304, 0x07060504},
+ {0x2308, 0x0B0A0908},
+ {0x230C, 0x0F0E0D0C},
+ {0x2310, 0x13121110},
+ {0x2314, 0x17161514},
+ {0x2318, 0x00000018},
+ {0x231C, 0x00C00000},
+ {0x2320, 0x00000000},
+ {0x2324, 0x0005298F},
+ {0x2328, 0x0015296E},
+ {0x232C, 0x0D3B5200},
+ {0x2330, 0x00000000},
+ {0x2334, 0x00000000},
+ {0x2338, 0x00000000},
+ {0x233C, 0x00000402},
+ {0x2340, 0x00020080},
+ {0x2344, 0x03C00000},
+ {0x2348, 0x0001FFFF},
+ {0x234C, 0x00C80064},
+ {0x2350, 0x0190012C},
+ {0x2354, 0x000032FE},
+ {0x2358, 0xF0203C28},
+ {0x235C, 0xF027C000},
+ {0x2360, 0x01210C00},
+ {0x2320, 0x00000001},
+ {0x2300, 0x0C811B40},
+ {0x2304, 0xF3FC4ED8},
+ {0x2308, 0x08FF808F},
+ {0x230C, 0xFCBC80C8},
+ {0x2310, 0xBC80536C},
+ {0x2314, 0x0363A0F3},
+ {0x2318, 0x000000BB},
+ {0x724, 0x00111200},
+ {0x704, 0x601E0D00},
+ {0xC78, 0xBFFFFFFF},
+ {0x704, 0x601E0D02},
+ {0x704, 0x601E0D02},
+ {0x5864, 0x080801FF},
+ {0x7864, 0x080801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC6C, 0x20061021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8088, 0x007F0000},
+ {0x81A4, 0x003F3A00},
+ {0x81B4, 0x0100007F},
+ {0x81C0, 0x0060010B},
+ {0x81A0, 0x00000010},
+ {0x8138, 0x40000002},
+ {0x82A4, 0x003F3A00},
+ {0x82B4, 0x0100007F},
+ {0x82C0, 0x0060010B},
+ {0x82A0, 0x00000010},
+ {0x81A0, 0x00000010},
+ {0x8238, 0x40000002},
+ {0x8088, 0x00000000},
+ {0x8020, 0x00000000},
+ {0x8120, 0x00000000},
+ {0x8220, 0x00000000},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC70, 0x071BFE00},
+ {0xC70, 0x071BFE60},
+ {0xC6C, 0x20061021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x8120, 0x10000000},
+ {0x8120, 0x10030000},
+ {0x8124, 0x00000F0F},
+ {0x8124, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8224, 0x00000F0F},
+ {0x8220, 0x10000000},
+ {0x8220, 0x10030000},
+ {0x704, 0x601E0D00},
+ {0x5864, 0x100801FF},
+ {0x7864, 0x100801FF},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0x58D4, 0x7401FE00},
+ {0x78D4, 0x7401FE00},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x704, 0x601E0D02},
+ {0xC7C, 0x0020BFFF},
+ {0x58C0, 0x00FE0000},
+ {0x58FC, 0x00000000},
+ {0x566C, 0x00010005},
+ {0x566C, 0x00011005},
+ {0x700, 0x00000030},
+ {0x9D0, 0x00001001},
+ {0x704, 0x601E0D02},
+ {0x704, 0x601E0D00},
+ {0x704, 0x601C0502},
+ {0x000, 0x0580801F},
+ {0x980, 0x10002250},
+ {0x010, 0x001C01FF},
+ {0xC3C, 0x2840E1BF},
+ {0x12A8, 0x33337824},
+ {0x32A8, 0x33337824},
+ {0x620, 0x00141A40},
+ {0x2320, 0x00000000},
+ {0x664, 0x0000000C},
+ {0xC0F8, 0x00000001},
+ {0xC1F8, 0x00000001},
+ {0x2D7C, 0x739C040A},
+ {0x1010, 0x00000000},
+ {0x3010, 0x00000000},
+ {0x2C14, 0x80000005},
+ {0x5818, 0x082C1800},
+ {0x7818, 0x082C1800},
+ {0x624, 0x0101030A},
+ {0x028, 0x0000F381},
+ {0x02C, 0x0000F381},
+ {0x720, 0x20000000},
+ {0x1200, 0x00010142},
+ {0x12A0, 0x24903056},
+ {0x12AC, 0x12333121},
+ {0x12B8, 0x30020000},
+ {0x2000, 0x18BBBF84},
+ {0x2C14, 0x85000005},
+ {0x3200, 0x00010142},
+ {0x32A0, 0x24903056},
+ {0x32AC, 0x12333121},
+ {0x32B8, 0x30020000},
+ {0x5800, 0x03FF807F},
+ {0x5804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7800, 0x03FF807F},
+ {0x7804, 0x04237040},
+ {0x7808, 0x04237040},
+ {0x010, 0x001C61FF},
+ {0x56C8, 0x0E800400},
+ {0x76C8, 0x0E800400},
+ {0x984, 0x000000E0},
+ {0x2008, 0x000FFFFF},
+ {0x58B0, 0x00000800},
+ {0x5A00, 0x00000000},
+ {0x5A04, 0x00000000},
+ {0x5A08, 0x00000000},
+ {0x5A0C, 0x00000000},
+ {0x5A10, 0x00000000},
+ {0x5A14, 0x00000000},
+ {0x5A18, 0x00000000},
+ {0x5A1C, 0x00000000},
+ {0x5A20, 0x00000000},
+ {0x5A24, 0x00050000},
+ {0x5A28, 0x00000000},
+ {0x5A2C, 0x00000000},
+ {0x5A30, 0x00000000},
+ {0x5A34, 0x00000000},
+ {0x5A38, 0x00000000},
+ {0x5A3C, 0x00000000},
+ {0x5A40, 0x00000000},
+ {0x5A44, 0x00000005},
+ {0x5A48, 0x00000000},
+ {0x5A4C, 0x00000000},
+ {0x5A50, 0x00000000},
+ {0x5A54, 0x00000000},
+ {0x5A58, 0x00000000},
+ {0x5A5C, 0x00000000},
+ {0x5A60, 0x00050000},
+ {0x5A64, 0x00000000},
+ {0x5A68, 0x00000000},
+ {0x5A6C, 0x00000000},
+ {0x5A70, 0x00000000},
+ {0x5A74, 0x00000000},
+ {0x5A78, 0x00000000},
+ {0x5A7C, 0x00000000},
+ {0x5A80, 0x00000000},
+ {0x5A84, 0x00000000},
+ {0x5A88, 0x00000000},
+ {0x5A8C, 0x00000000},
+ {0x5A90, 0x00000000},
+ {0x5A94, 0x00000000},
+ {0x5A98, 0x00000000},
+ {0x5A9C, 0x00000000},
+ {0x5AA0, 0x00000000},
+ {0x5AA4, 0x00000000},
+ {0x5AA8, 0x00000000},
+ {0x5AAC, 0x00000000},
+ {0x5AB0, 0x00050005},
+ {0x5AB4, 0x00050005},
+ {0x5AB8, 0x00050005},
+ {0x5ABC, 0x00050005},
+ {0x5AC0, 0x00000005},
+ {0x78B0, 0x00000800},
+ {0x7A00, 0x00000000},
+ {0x7A04, 0x00000000},
+ {0x7A08, 0x00000000},
+ {0x7A0C, 0x00000000},
+ {0x7A10, 0x00000000},
+ {0x7A14, 0x00000000},
+ {0x7A18, 0x00000000},
+ {0x7A1C, 0x00000000},
+ {0x7A20, 0x00000000},
+ {0x7A24, 0x00050000},
+ {0x7A28, 0x00000000},
+ {0x7A2C, 0x00000000},
+ {0x7A30, 0x00000000},
+ {0x7A34, 0x00000000},
+ {0x7A38, 0x00000000},
+ {0x7A3C, 0x00000000},
+ {0x7A40, 0x00000000},
+ {0x7A44, 0x00000005},
+ {0x7A48, 0x00000000},
+ {0x7A4C, 0x00000000},
+ {0x7A50, 0x00000000},
+ {0x7A54, 0x00000000},
+ {0x7A58, 0x00000000},
+ {0x7A5C, 0x00000000},
+ {0x7A60, 0x00050000},
+ {0x7A64, 0x00000000},
+ {0x7A68, 0x00000000},
+ {0x7A6C, 0x00000000},
+ {0x7A70, 0x00000000},
+ {0x7A74, 0x00000000},
+ {0x7A78, 0x00000000},
+ {0x7A7C, 0x00000000},
+ {0x7A80, 0x00000000},
+ {0x7A84, 0x00000000},
+ {0x7A88, 0x00000000},
+ {0x7A8C, 0x00000000},
+ {0x7A90, 0x00000000},
+ {0x7A94, 0x00000000},
+ {0x7A98, 0x00000000},
+ {0x7A9C, 0x00000000},
+ {0x7AA0, 0x00000000},
+ {0x7AA4, 0x00000000},
+ {0x7AA8, 0x00000000},
+ {0x7AAC, 0x00000000},
+ {0x7AB0, 0x00050005},
+ {0x7AB4, 0x00050005},
+ {0x7AB8, 0x00050005},
+ {0x7ABC, 0x00050005},
+ {0x7AC0, 0x00000005},
+ {0x0F0, 0x00010000},
+ {0x0F4, 0x00000018},
+ {0x0F8, 0x20220120},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
+ {0xF0FF0000, 0x00000000},
+ {0xF03300FF, 0x00000001},
+ {0x000, 0x01E3C39F},
+ {0x001, 0x00694727},
+ {0x002, 0x00005536},
+ {0x100, 0x02E3C39F},
+ {0x101, 0x0069472A},
+ {0x102, 0x00005536},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10000, 0x1A02E1C9},
+ {0x10001, 0x00644A30},
+ {0x10002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10000, 0x0EF4D1B9},
+ {0x10001, 0x00584125},
+ {0x10002, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x10000, 0x1A02E1C9},
+ {0x10001, 0x00644A30},
+ {0x10002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10100, 0x1901E1C8},
+ {0x10101, 0x0061482D},
+ {0x10102, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10100, 0x04E8C5AD},
+ {0x10101, 0x00594125},
+ {0x10102, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x10100, 0x1901E1C8},
+ {0x10101, 0x0061482D},
+ {0x10102, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20000, 0x1601E2CA},
+ {0x20001, 0x005D452A},
+ {0x20002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20000, 0x0EF4D3BB},
+ {0x20001, 0x00563F25},
+ {0x20002, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x20000, 0x1601E2CA},
+ {0x20001, 0x005D452A},
+ {0x20002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20100, 0x1901E1C8},
+ {0x20101, 0x0061482D},
+ {0x20102, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x20100, 0x0BF1CFB7},
+ {0x20101, 0x00574025},
+ {0x20102, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x20100, 0x1901E1C8},
+ {0x20101, 0x0061482D},
+ {0x20102, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30000, 0x1700E1CA},
+ {0x30001, 0x005E472B},
+ {0x30002, 0x00006750},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30000, 0x05EFCEB7},
+ {0x30001, 0x004B351A},
+ {0x30002, 0x00006850},
+ {0xA0000000, 0x00000000},
+ {0x30000, 0x1700E1CA},
+ {0x30001, 0x005E472B},
+ {0x30002, 0x00006750},
+ {0xB0000000, 0x00000000},
+ {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30100, 0x14FEE0C9},
+ {0x30101, 0x00594428},
+ {0x30102, 0x00006650},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x30100, 0x0CF2D1B9},
+ {0x30101, 0x00563F24},
+ {0x30102, 0x00006750},
+ {0xA0000000, 0x00000000},
+ {0x30100, 0x14FEE0C9},
+ {0x30101, 0x00594428},
+ {0x30102, 0x00006650},
+ {0xB0000000, 0x00000000},
+ {0x40000, 0x13FCDDC8},
+ {0x40001, 0x005D4328},
+ {0x40002, 0x00006850},
+ {0x40100, 0x14FEE3CF},
+ {0x40101, 0x00583E24},
+ {0x40102, 0x00006850},
+ {0x50000, 0x0DF4D6C6},
+ {0x50001, 0x00604227},
+ {0x50002, 0x00006850},
+ {0x50100, 0x1903E7D5},
+ {0x50101, 0x0061462B},
+ {0x50102, 0x00006850},
+ {0x60000, 0x0FF5D7C6},
+ {0x60001, 0x005D4429},
+ {0x60002, 0x00006850},
+ {0x60100, 0x12FADECF},
+ {0x60101, 0x005B4126},
+ {0x60102, 0x00006850},
+ {0x70000, 0x09F1D2C3},
+ {0x70001, 0x00554026},
+ {0x70002, 0x00006750},
+ {0x70100, 0x0CF5DACC},
+ {0x70101, 0x00563E25},
+ {0x70102, 0x00006750},
+ {0x2000000, 0x02E4C4A0},
+ {0x2000001, 0x006A4828},
+ {0x2000100, 0x02E4C5A1},
+ {0x2000101, 0x00664629},
+ {0x2010000, 0x05EBC8AF},
+ {0x2010001, 0x00543D24},
+ {0x2010100, 0x07ECC9B0},
+ {0x2010101, 0x005B4126},
+ {0x2020000, 0x05EDCCB2},
+ {0x2020001, 0x004D361C},
+ {0x2020100, 0x06ECCBB2},
+ {0x2020101, 0x00553D22},
+ {0x2030000, 0x02ECCCB3},
+ {0x2030001, 0x00483118},
+ {0x2030100, 0x04ECCCB2},
+ {0x2030101, 0x004F381C},
+ {0x3000000, 0x00000000},
+ {0x3000001, 0x00000000},
+ {0x3000002, 0x00000000},
+ {0x3000003, 0x00000000},
+ {0x3000100, 0x00000000},
+ {0x3000101, 0x00000000},
+ {0x3000102, 0x00000000},
+ {0x3000103, 0x00000000},
+ {0x3010000, 0x0E0CFB0A},
+ {0x3010001, 0x00100F06},
+ {0x3010002, 0x34333333},
+ {0x3010003, 0x3434343C},
+ {0x3010100, 0x0E0CFB0A},
+ {0x3010101, 0x00100F06},
+ {0x3010102, 0x34333333},
+ {0x3010103, 0x3434343C},
+ {0x3020000, 0x0E0CFB0A},
+ {0x3020001, 0x00100F06},
+ {0x3020002, 0x34333333},
+ {0x3020003, 0x3434343C},
+ {0x3020100, 0x0E0CFB0A},
+ {0x3020101, 0x00100F06},
+ {0x3020102, 0x34333333},
+ {0x3020103, 0x3434343C},
+ {0x3030000, 0x0E0CFB0A},
+ {0x3030001, 0x00100F06},
+ {0x3030002, 0x34333333},
+ {0x3030003, 0x3434343C},
+ {0x3030100, 0x0E0CFB0A},
+ {0x3030101, 0x00100F06},
+ {0x3030102, 0x34333333},
+ {0x3030103, 0x3434343C},
+ {0x3040000, 0x0E0CFB0A},
+ {0x3040001, 0x00100F06},
+ {0x3040002, 0x343B3333},
+ {0x3040003, 0x34343C3C},
+ {0x3040100, 0x0E0CFB0A},
+ {0x3040101, 0x00100F06},
+ {0x3040102, 0x343B3333},
+ {0x3040103, 0x34343C3C},
+ {0x3050000, 0x0E0CFB0A},
+ {0x3050001, 0x00100F06},
+ {0x3050002, 0x343B3333},
+ {0x3050003, 0x34343C3C},
+ {0x3050100, 0x0E0CFB0A},
+ {0x3050101, 0x00100F06},
+ {0x3050102, 0x343B3333},
+ {0x3050103, 0x34343C3C},
+ {0x3060000, 0x0E0CFB0A},
+ {0x3060001, 0x00100F06},
+ {0x3060002, 0x3C3B3333},
+ {0x3060003, 0x34343C3C},
+ {0x3060100, 0x0E0CFB0A},
+ {0x3060101, 0x00100F06},
+ {0x3060102, 0x3C3B3333},
+ {0x3060103, 0x34343C3C},
+ {0x3070000, 0x0E0CFB0A},
+ {0x3070001, 0x00100F06},
+ {0x3070002, 0x3C3B3333},
+ {0x3070003, 0x34343C3C},
+ {0x3070100, 0x0E0CFB0A},
+ {0x3070101, 0x00100F06},
+ {0x3070102, 0x3C3B3333},
+ {0x3070103, 0x34343C3C},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0320000, 0x00000002},
+ {0xF0330000, 0x00000003},
+ {0xF0340000, 0x00000004},
+ {0xF0350000, 0x00000005},
+ {0xF0360000, 0x00000006},
+ {0xF0010001, 0x00000007},
+ {0xF0020001, 0x00000008},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
+ {0x005, 0x00000000},
+ {0x10005, 0x00000000},
+ {0x000, 0x00030001},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000620},
+ {0x03F, 0x0000020C},
+ {0x0EF, 0x00000000},
+ {0x05F, 0x00000038},
+ {0x097, 0x00043200},
+ {0x0A6, 0x00066DB7},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000003},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000002},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x0EF, 0x00000000},
+ {0x000, 0x00033C01},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x096, 0x00015200},
+ {0x10055, 0x00080080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0xA0000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0xB0000000, 0x00000000},
+ {0x057, 0x0000D589},
+ {0x05A, 0x0007FFFF},
+ {0x043, 0x00005000},
+ {0x0B5, 0x00001720},
+ {0x0ED, 0x00000080},
+ {0x033, 0x00000000},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x033, 0x00000010},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x033, 0x00000020},
+ {0x03E, 0x00013FAB},
+ {0x03F, 0x000FD800},
+ {0x0ED, 0x00000000},
+ {0x0ED, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x000000FA},
+ {0x033, 0x00000001},
+ {0x03F, 0x000000F2},
+ {0x033, 0x00000002},
+ {0x03F, 0x000000EA},
+ {0x033, 0x00000003},
+ {0x03F, 0x000000E2},
+ {0x033, 0x00000004},
+ {0x03F, 0x000000DA},
+ {0x033, 0x00000005},
+ {0x03F, 0x000000D2},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000CA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000C2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000B0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000A0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000098},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000090},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000088},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000080},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000038},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000030},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000028},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000020},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000018},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000010},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000008},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x0B9, 0x00020440},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0xA0000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0xB0000000, 0x00000000},
+ {0x0EB, 0x00000000},
+ {0x030, 0x000109B0},
+ {0x030, 0x000189B0},
+ {0x0EB, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000008},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000009},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000AFFF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000010},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000011},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000012},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000013},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000014},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000018},
+ {0x03F, 0x0000FBFF},
+ {0x033, 0x00000019},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000021},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000023},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000027},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000030},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000031},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000033},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000035},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000036},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000037},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000038},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000039},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000003A},
+ {0x03F, 0x0000EFFF},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000000},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000001},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000002},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000003},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000004},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000005},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000006},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000007},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000008},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000009},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000010},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000011},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000012},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000013},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000014},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000015},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000016},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000017},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000018},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000019},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00004344},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000020},
+ {0x033, 0x00000010},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000011},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000012},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000013},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000200},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
+ {0x030, 0x00068000},
+ {0x030, 0x00070000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000006},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x0000000E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022B58},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023C58},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x00021C58},
+ {0x033, 0x00000017},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000018},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00025A58},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x0000001A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000001B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000001C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00025A58},
+ {0x033, 0x0000001E},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000001F},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000020},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000022},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000023},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000024},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000025},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000026},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000027},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000028},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000029},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x0000002C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000002E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000002F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000030},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000031},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000032},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000033},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000034},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000035},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000036},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x00000037},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x00000038},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x00000039},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000003B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x033, 0x0000003C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002CD58},
+ {0x033, 0x0000003E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00021E58},
+ {0x033, 0x0000003F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00022D58},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000031},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000023},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EC, 0x00000400},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000030},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000021},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x0A7, 0x00080308},
+ {0x066, 0x00006000},
+ {0x0EF, 0x00000400},
+ {0x030, 0x000001FF},
+ {0x030, 0x000081FF},
+ {0x030, 0x000101FF},
+ {0x030, 0x000181FF},
+ {0x030, 0x000201FF},
+ {0x030, 0x000281FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x0003017F},
+ {0xB0000000, 0x00000000},
+ {0x030, 0x000380FB},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x06A, 0x000E0F8A},
+ {0x06B, 0x000018A0},
+ {0x06F, 0x000F81FC},
+ {0x05E, 0x0000001F},
+ {0x0EF, 0x00000200},
+ {0x030, 0x0003D407},
+ {0x030, 0x00035A87},
+ {0x030, 0x0002CF07},
+ {0x030, 0x00024F07},
+ {0x030, 0x0001CF07},
+ {0x030, 0x00014F07},
+ {0x030, 0x0000CF07},
+ {0x030, 0x00004F07},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00080000},
+ {0x030, 0x00008038},
+ {0x030, 0x00010038},
+ {0x030, 0x00018038},
+ {0x030, 0x00020038},
+ {0x030, 0x00028038},
+ {0x030, 0x00030038},
+ {0x030, 0x0003803C},
+ {0x030, 0x0004003C},
+ {0x030, 0x0004803C},
+ {0x030, 0x0005003C},
+ {0x030, 0x0005803C},
+ {0x030, 0x0006003C},
+ {0x030, 0x0006803C},
+ {0x030, 0x0007003C},
+ {0x0EB, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0xA0000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000003C},
+ {0x03F, 0x000003E7},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000007F},
+ {0x03F, 0x000003E7},
+ {0x0EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000000FC},
+ {0x10030, 0x000004F9},
+ {0x10030, 0x000008F6},
+ {0x10030, 0x00000CF3},
+ {0x10030, 0x000010F0},
+ {0x10030, 0x000014ED},
+ {0x10030, 0x000018AC},
+ {0x10030, 0x00001CA9},
+ {0x10030, 0x00002069},
+ {0x10030, 0x00002466},
+ {0x10030, 0x00002829},
+ {0x10030, 0x00002C26},
+ {0x10030, 0x00003023},
+ {0x10030, 0x00003420},
+ {0x10030, 0x0000381D},
+ {0x10030, 0x00003C1A},
+ {0x10030, 0x00004017},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
+ {0x10030, 0x000780F4},
+ {0x10030, 0x000784F1},
+ {0x10030, 0x000788EE},
+ {0x10030, 0x00078CEB},
+ {0x10030, 0x000790E8},
+ {0x10030, 0x000794E5},
+ {0x10030, 0x000798E2},
+ {0x10030, 0x00079CDF},
+ {0x10030, 0x0007A0DC},
+ {0x10030, 0x0007A4D9},
+ {0x10030, 0x0007A8D6},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B0D0},
+ {0x10030, 0x0007B4CD},
+ {0x10030, 0x0007B8CA},
+ {0x10030, 0x0007BC07},
+ {0x10030, 0x0007C004},
+ {0x100EE, 0x00000000},
+ {0x0EF, 0x00002000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000004},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00040000},
+ {0x030, 0x000109B7},
+ {0x0EB, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000120},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000128},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000130},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000160},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000168},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000170},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000178},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x10005, 0x00000001},
+ {0x100EE, 0x00000400},
+ {0x10030, 0x00000000},
+ {0x10030, 0x00001000},
+ {0x10030, 0x00002000},
+ {0x10030, 0x00003000},
+ {0x10030, 0x00004000},
+ {0x10030, 0x00005000},
+ {0x10030, 0x00006003},
+ {0x10030, 0x00007003},
+ {0x10030, 0x00008000},
+ {0x10030, 0x00009000},
+ {0x10030, 0x0000A000},
+ {0x10030, 0x0000B000},
+ {0x10030, 0x0000C000},
+ {0x10030, 0x0000D000},
+ {0x10030, 0x0000E003},
+ {0x10030, 0x0000F003},
+ {0x10030, 0x00010000},
+ {0x10030, 0x00011000},
+ {0x10030, 0x00012000},
+ {0x10030, 0x00013000},
+ {0x10030, 0x00014000},
+ {0x10030, 0x00015000},
+ {0x10030, 0x00016003},
+ {0x10030, 0x00017003},
+ {0x10030, 0x00018000},
+ {0x10030, 0x00019000},
+ {0x10030, 0x0001A000},
+ {0x10030, 0x0001B000},
+ {0x10030, 0x0001C000},
+ {0x10030, 0x0001D000},
+ {0x10030, 0x0001E003},
+ {0x10030, 0x0001F003},
+ {0x10030, 0x00020000},
+ {0x10030, 0x00021000},
+ {0x10030, 0x00022000},
+ {0x10030, 0x00023000},
+ {0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00026003},
+ {0x10030, 0x00027003},
+ {0x10030, 0x00028000},
+ {0x10030, 0x00029000},
+ {0x10030, 0x0002A000},
+ {0x10030, 0x0002B000},
+ {0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0002E003},
+ {0x10030, 0x0002F003},
+ {0x10030, 0x00030000},
+ {0x10030, 0x00031000},
+ {0x10030, 0x00032000},
+ {0x10030, 0x00033000},
+ {0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00036003},
+ {0x10030, 0x00037003},
+ {0x10030, 0x00038000},
+ {0x10030, 0x00039000},
+ {0x10030, 0x0003A000},
+ {0x10030, 0x0003B000},
+ {0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0003E003},
+ {0x10030, 0x0003F003},
+ {0x10030, 0x00060000},
+ {0x10030, 0x00061000},
+ {0x10030, 0x00062000},
+ {0x10030, 0x00063000},
+ {0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00067003},
+ {0x10030, 0x00068000},
+ {0x10030, 0x00069000},
+ {0x10030, 0x0006A000},
+ {0x10030, 0x0006B000},
+ {0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0006F003},
+ {0x10030, 0x00070000},
+ {0x10030, 0x00071000},
+ {0x10030, 0x00072000},
+ {0x10030, 0x00073000},
+ {0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00077003},
+ {0x10030, 0x00078000},
+ {0x10030, 0x00079000},
+ {0x10030, 0x0007A000},
+ {0x10030, 0x0007B000},
+ {0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0007F003},
+ {0x100EE, 0x00000000},
+ {0x0FE, 0x00000048},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0020000, 0x00000001},
+ {0xF0320000, 0x00000002},
+ {0xF0330000, 0x00000003},
+ {0xF0340000, 0x00000004},
+ {0xF0350000, 0x00000005},
+ {0xF0360000, 0x00000006},
+ {0xF0010001, 0x00000007},
+ {0xF0020001, 0x00000008},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
+ {0x005, 0x00000000},
+ {0x10005, 0x00000000},
+ {0x0B9, 0x00020440},
+ {0x000, 0x00030001},
+ {0x10000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x10018, 0x00011124},
+ {0x05F, 0x00000038},
+ {0x097, 0x00043200},
+ {0x0A6, 0x00066DB7},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x033, 0x00000003},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000002},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000000},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x033, 0x00000015},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00010500},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00028B00},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000000},
+ {0x03F, 0x0009AB00},
+ {0x033, 0x00000011},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x0EF, 0x00000000},
+ {0x10055, 0x00080080},
+ {0x000, 0x00033C01},
+ {0x10000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x096, 0x00015200},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0xA0000000, 0x00000000},
+ {0x067, 0x0004D000},
+ {0x0DA, 0x000D4009},
+ {0xB0000000, 0x00000000},
+ {0x057, 0x0000D589},
+ {0x05A, 0x0007F0F8},
+ {0x043, 0x00005000},
+ {0x018, 0x00001001},
+ {0x10018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x10002, 0x0000000D},
+ {0x0EE, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x0EE, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0xA0000000, 0x00000000},
+ {0x08F, 0x000D1752},
+ {0xB0000000, 0x00000000},
+ {0x0EB, 0x00000000},
+ {0x030, 0x000109B0},
+ {0x030, 0x000189B0},
+ {0x0EB, 0x00000000},
+ {0x0EE, 0x00000010},
+ {0x033, 0x00000006},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000001},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000026},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000027},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000028},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000030},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000036},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000037},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000060},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000066},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000067},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000068},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000006E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000006F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000070},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000076},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000077},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000078},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000007E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000007F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000000FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000000FF},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000120},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000126},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000127},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000128},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000012E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000012F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000130},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000136},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000137},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000160},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000166},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000167},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000168},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000016E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000016F},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000170},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000176},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000177},
+ {0x03F, 0x00000003},
+ {0x033, 0x00000178},
+ {0x03F, 0x00060001},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00000001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000017E},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000017F},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001A6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001AE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001AF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001B6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001B7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001E6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001EE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001EF},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001F6},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F7},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00060001},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00000001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00000002},
+ {0x033, 0x000001FE},
+ {0x03F, 0x00000003},
+ {0x033, 0x000001FF},
+ {0x03F, 0x00000003},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000004},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000008},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000009},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000AFFF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000010},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000011},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000012},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000013},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000014},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000E3FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000018},
+ {0x03F, 0x0000FBFF},
+ {0x033, 0x00000019},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000021},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000023},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000024},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000025},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000026},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000027},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000028},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000029},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002A},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002B},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002C},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002D},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002E},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000002F},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000030},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000031},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000033},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000035},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000036},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000037},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000038},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x00000039},
+ {0x03F, 0x0000EFFF},
+ {0x033, 0x0000003A},
+ {0x03F, 0x0000EFFF},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000000},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000001},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000002},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000003},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000004},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000005},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000006},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000007},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000008},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000009},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000010},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000011},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000012},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000013},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000014},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000015},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000016},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000017},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000018},
+ {0x03F, 0x00004344},
+ {0x033, 0x00000019},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00004344},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00004344},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000020},
+ {0x033, 0x00000010},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000011},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000012},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000013},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000020},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000021},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000022},
+ {0x03F, 0x00000200},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000200},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
+ {0x030, 0x00068000},
+ {0x030, 0x00070000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000005},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000006},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000D},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x0000000E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000012},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000013},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00022A58},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x00000016},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x00023958},
+ {0x033, 0x00000017},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000018},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00029858},
+ {0x033, 0x00000019},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x0000001A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000001D},
+ {0x03E, 0x0000001B},
+ {0x03F, 0x00029858},
+ {0x033, 0x0000001E},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000001F},
+ {0x03E, 0x00000013},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000020},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000021},
+ {0x03E, 0x0000001C},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000022},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000023},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000024},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000025},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000026},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000027},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000028},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000029},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000002E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000002F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000030},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000031},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000032},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000033},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000034},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000035},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000036},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000037},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x00000038},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x00000039},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003A},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003B},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003C},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003D},
+ {0x03E, 0x00000014},
+ {0x03F, 0x0002AC58},
+ {0x033, 0x0000003E},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x033, 0x0000003F},
+ {0x03E, 0x00000014},
+ {0x03F, 0x00023A58},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00000800},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000031},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000023},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000007},
+ {0x0EE, 0x00000000},
+ {0x0EC, 0x00000400},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000030},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000021},
+ {0x0EC, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x033, 0x00000000},
+ {0x008, 0x00060280},
+ {0x009, 0x00030400},
+ {0x0EF, 0x00000000},
+ {0x0A7, 0x00080308},
+ {0x066, 0x00006000},
+ {0x0EF, 0x00000400},
+ {0x030, 0x000001FF},
+ {0x030, 0x000081FF},
+ {0x030, 0x000101FF},
+ {0x030, 0x000181FF},
+ {0x030, 0x000201FF},
+ {0x030, 0x000281FF},
+ {0x030, 0x0003017F},
+ {0x030, 0x000380FB},
+ {0x0EF, 0x00000000},
+ {0x06E, 0x00077A18},
+ {0x06D, 0x00000C31},
+ {0x06A, 0x000E0F8A},
+ {0x06B, 0x000018A0},
+ {0x06F, 0x000F81FC},
+ {0x05E, 0x0000001F},
+ {0x0EF, 0x00000200},
+ {0x030, 0x0003D407},
+ {0x030, 0x00035A87},
+ {0x030, 0x0002CF07},
+ {0x030, 0x00024F07},
+ {0x030, 0x0001CF07},
+ {0x030, 0x00014F07},
+ {0x030, 0x0000CF07},
+ {0x030, 0x00004F07},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00080000},
+ {0x030, 0x00008038},
+ {0x030, 0x00010038},
+ {0x030, 0x00018038},
+ {0x030, 0x00020038},
+ {0x030, 0x00028038},
+ {0x030, 0x00030038},
+ {0x030, 0x0003803C},
+ {0x030, 0x0004003C},
+ {0x030, 0x0004803C},
+ {0x030, 0x0005003C},
+ {0x030, 0x0005803C},
+ {0x030, 0x0006003C},
+ {0x030, 0x0006803C},
+ {0x030, 0x0007003C},
+ {0x0EB, 0x00000000},
+ {0x094, 0x000000FC},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0xA0000000, 0x00000000},
+ {0x095, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00001000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x03F, 0x000002E7},
+ {0x033, 0x0000007F},
+ {0x03F, 0x000003E7},
+ {0x0EE, 0x00000000},
+ {0x100EE, 0x00004000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000201DF},
+ {0x10030, 0x000205D9},
+ {0x10030, 0x000209D3},
+ {0x10030, 0x00020D99},
+ {0x10030, 0x00021193},
+ {0x10030, 0x0002155F},
+ {0x10030, 0x00021959},
+ {0x10030, 0x00021D21},
+ {0x10030, 0x00022119},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228D9},
+ {0x10030, 0x00022C9F},
+ {0x10030, 0x00023099},
+ {0x10030, 0x0002345F},
+ {0x10030, 0x00023859},
+ {0x10030, 0x00023C1F},
+ {0x10030, 0x00024019},
+ {0x10030, 0x00024413},
+ {0x10030, 0x000281CD},
+ {0x10030, 0x000285DB},
+ {0x10030, 0x000289D5},
+ {0x10030, 0x00028D9B},
+ {0x10030, 0x0002918D},
+ {0x10030, 0x00029555},
+ {0x10030, 0x00029957},
+ {0x10030, 0x00029D1F},
+ {0x10030, 0x0002A119},
+ {0x10030, 0x0002A4DF},
+ {0x10030, 0x0002A8D9},
+ {0x10030, 0x0002AC9F},
+ {0x10030, 0x0002B099},
+ {0x10030, 0x0002B45F},
+ {0x10030, 0x0002B859},
+ {0x10030, 0x0002BC1F},
+ {0x10030, 0x0002C019},
+ {0x10030, 0x0002C413},
+ {0x10030, 0x000301D9},
+ {0x10030, 0x000305DB},
+ {0x10030, 0x000309D5},
+ {0x10030, 0x00030D9B},
+ {0x10030, 0x00031195},
+ {0x10030, 0x0003155D},
+ {0x10030, 0x00031955},
+ {0x10030, 0x00031D1D},
+ {0x10030, 0x00032119},
+ {0x10030, 0x000324DF},
+ {0x10030, 0x000328D9},
+ {0x10030, 0x00032C9F},
+ {0x10030, 0x00033099},
+ {0x10030, 0x0003345F},
+ {0x10030, 0x00033859},
+ {0x10030, 0x00033C1F},
+ {0x10030, 0x00034019},
+ {0x10030, 0x00034413},
+ {0x10030, 0x000601E1},
+ {0x10030, 0x000605DB},
+ {0x10030, 0x000609D5},
+ {0x10030, 0x00060D9B},
+ {0x10030, 0x00061195},
+ {0x10030, 0x0006155B},
+ {0x10030, 0x00061957},
+ {0x10030, 0x00061D1F},
+ {0x10030, 0x00062119},
+ {0x10030, 0x000624DF},
+ {0x10030, 0x000628D9},
+ {0x10030, 0x00062C9F},
+ {0x10030, 0x00063099},
+ {0x10030, 0x0006345F},
+ {0x10030, 0x00063859},
+ {0x10030, 0x00063C1F},
+ {0x10030, 0x00064019},
+ {0x10030, 0x00064413},
+ {0x10030, 0x000681E1},
+ {0x10030, 0x000685DB},
+ {0x10030, 0x000689D5},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955B},
+ {0x10030, 0x00069957},
+ {0x10030, 0x00069D1F},
+ {0x10030, 0x0006A119},
+ {0x10030, 0x0006A4DF},
+ {0x10030, 0x0006A8D9},
+ {0x10030, 0x0006AC9F},
+ {0x10030, 0x0006B099},
+ {0x10030, 0x0006B45F},
+ {0x10030, 0x0006B859},
+ {0x10030, 0x0006BC1F},
+ {0x10030, 0x0006C019},
+ {0x10030, 0x0006C413},
+ {0x10030, 0x000701E1},
+ {0x10030, 0x000705DB},
+ {0x10030, 0x000709D5},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071957},
+ {0x10030, 0x00071D1F},
+ {0x10030, 0x00072119},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072C9F},
+ {0x10030, 0x00073099},
+ {0x10030, 0x0007345F},
+ {0x10030, 0x00073859},
+ {0x10030, 0x00073C1F},
+ {0x10030, 0x00074019},
+ {0x10030, 0x00074413},
+ {0x10030, 0x000781DF},
+ {0x10030, 0x000785D9},
+ {0x10030, 0x000789D3},
+ {0x10030, 0x00078D99},
+ {0x10030, 0x00079193},
+ {0x10030, 0x0007955F},
+ {0x10030, 0x00079959},
+ {0x10030, 0x00079D21},
+ {0x10030, 0x0007A115},
+ {0x10030, 0x0007A4DF},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007AC9F},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B45F},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC1F},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x10030, 0x00000000},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201ED},
+ {0x10030, 0x000205AD},
+ {0x10030, 0x000209A7},
+ {0x10030, 0x00020DA1},
+ {0x10030, 0x0002119B},
+ {0x10030, 0x00021561},
+ {0x10030, 0x0002195B},
+ {0x10030, 0x00021D27},
+ {0x10030, 0x00022121},
+ {0x10030, 0x000224E9},
+ {0x10030, 0x000228E3},
+ {0x10030, 0x00022CA9},
+ {0x10030, 0x000230A3},
+ {0x10030, 0x00023469},
+ {0x10030, 0x00023863},
+ {0x10030, 0x00023C29},
+ {0x10030, 0x00024023},
+ {0x10030, 0x0002441D},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285AF},
+ {0x10030, 0x000289A9},
+ {0x10030, 0x00028DA3},
+ {0x10030, 0x0002919D},
+ {0x10030, 0x00029563},
+ {0x10030, 0x0002995D},
+ {0x10030, 0x00029D25},
+ {0x10030, 0x0002A11F},
+ {0x10030, 0x0002A4E7},
+ {0x10030, 0x0002A8E1},
+ {0x10030, 0x0002ACA7},
+ {0x10030, 0x0002B0A1},
+ {0x10030, 0x0002B467},
+ {0x10030, 0x0002B861},
+ {0x10030, 0x0002BC27},
+ {0x10030, 0x0002C021},
+ {0x10030, 0x0002C41B},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305AF},
+ {0x10030, 0x000309A9},
+ {0x10030, 0x00030DA3},
+ {0x10030, 0x0003119D},
+ {0x10030, 0x00031563},
+ {0x10030, 0x0003195D},
+ {0x10030, 0x00031D25},
+ {0x10030, 0x0003211F},
+ {0x10030, 0x000324E7},
+ {0x10030, 0x000328E1},
+ {0x10030, 0x00032CA7},
+ {0x10030, 0x000330A1},
+ {0x10030, 0x00033467},
+ {0x10030, 0x00033861},
+ {0x10030, 0x00033C27},
+ {0x10030, 0x00034021},
+ {0x10030, 0x0003441B},
+ {0x10030, 0x000601EB},
+ {0x10030, 0x000605AB},
+ {0x10030, 0x000609A5},
+ {0x10030, 0x00060D9F},
+ {0x10030, 0x00061199},
+ {0x10030, 0x00061593},
+ {0x10030, 0x00061959},
+ {0x10030, 0x00061D53},
+ {0x10030, 0x0006211B},
+ {0x10030, 0x00062515},
+ {0x10030, 0x000628DD},
+ {0x10030, 0x00062CD7},
+ {0x10030, 0x0006309D},
+ {0x10030, 0x00063497},
+ {0x10030, 0x0006385D},
+ {0x10030, 0x00063C57},
+ {0x10030, 0x0006401D},
+ {0x10030, 0x00064417},
+ {0x10030, 0x000681E7},
+ {0x10030, 0x000685A7},
+ {0x10030, 0x000689A1},
+ {0x10030, 0x00068D9B},
+ {0x10030, 0x00069195},
+ {0x10030, 0x0006955F},
+ {0x10030, 0x00069959},
+ {0x10030, 0x00069D21},
+ {0x10030, 0x0006A11B},
+ {0x10030, 0x0006A4E3},
+ {0x10030, 0x0006A8DD},
+ {0x10030, 0x0006ACA5},
+ {0x10030, 0x0006B09F},
+ {0x10030, 0x0006B465},
+ {0x10030, 0x0006B85F},
+ {0x10030, 0x0006BC25},
+ {0x10030, 0x0006C01F},
+ {0x10030, 0x0006C419},
+ {0x10030, 0x000701E7},
+ {0x10030, 0x000705A7},
+ {0x10030, 0x000709A1},
+ {0x10030, 0x00070D9B},
+ {0x10030, 0x00071195},
+ {0x10030, 0x0007155B},
+ {0x10030, 0x00071955},
+ {0x10030, 0x00071D1D},
+ {0x10030, 0x00072117},
+ {0x10030, 0x000724DF},
+ {0x10030, 0x000728D9},
+ {0x10030, 0x00072CA1},
+ {0x10030, 0x0007309B},
+ {0x10030, 0x00073461},
+ {0x10030, 0x0007385B},
+ {0x10030, 0x00073C21},
+ {0x10030, 0x0007401B},
+ {0x10030, 0x0007441B},
+ {0x10030, 0x000781E9},
+ {0x10030, 0x000785A9},
+ {0x10030, 0x000789A3},
+ {0x10030, 0x00078D9D},
+ {0x10030, 0x00079197},
+ {0x10030, 0x00079591},
+ {0x10030, 0x00079957},
+ {0x10030, 0x00079D51},
+ {0x10030, 0x0007A119},
+ {0x10030, 0x0007A513},
+ {0x10030, 0x0007A8D9},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B099},
+ {0x10030, 0x0007B493},
+ {0x10030, 0x0007B859},
+ {0x10030, 0x0007BC53},
+ {0x10030, 0x0007C019},
+ {0x10030, 0x0007C413},
+ {0xB0000000, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x10030, 0x000000FC},
+ {0x10030, 0x000004F9},
+ {0x10030, 0x000008F6},
+ {0x10030, 0x00000CF3},
+ {0x10030, 0x000010F0},
+ {0x10030, 0x000014ED},
+ {0x10030, 0x000018AC},
+ {0x10030, 0x00001CA9},
+ {0x10030, 0x00002069},
+ {0x10030, 0x00002466},
+ {0x10030, 0x00002829},
+ {0x10030, 0x00002C26},
+ {0x10030, 0x00003023},
+ {0x10030, 0x00003420},
+ {0x10030, 0x0000381D},
+ {0x10030, 0x00003C1A},
+ {0x10030, 0x00004017},
+ {0x100EE, 0x00000000},
+ {0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
+ {0x10030, 0x000780F4},
+ {0x10030, 0x000784F1},
+ {0x10030, 0x000788EE},
+ {0x10030, 0x00078CEB},
+ {0x10030, 0x000790E8},
+ {0x10030, 0x000794E5},
+ {0x10030, 0x000798E2},
+ {0x10030, 0x00079CDF},
+ {0x10030, 0x0007A0DC},
+ {0x10030, 0x0007A4D9},
+ {0x10030, 0x0007A8D6},
+ {0x10030, 0x0007ACD3},
+ {0x10030, 0x0007B0D0},
+ {0x10030, 0x0007B4CD},
+ {0x10030, 0x0007B8CA},
+ {0x10030, 0x0007BC07},
+ {0x10030, 0x0007C004},
+ {0x100EE, 0x00000000},
+ {0x0EF, 0x00002000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000004},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000003},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000002},
+ {0x033, 0x0000000F},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EB, 0x00040000},
+ {0x030, 0x000109B7},
+ {0x0EB, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000020},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000021},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000022},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000023},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000024},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000025},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000029},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000031},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000032},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000033},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000034},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000035},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000061},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000062},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000063},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000064},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000065},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000069},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000006A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000006C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000006D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000071},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000072},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000073},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000074},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000075},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000079},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000007A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000007B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000007C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000000F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000000FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000000FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000000FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000000FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000120},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000121},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000122},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000123},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000124},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000125},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000128},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000129},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000012A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000012B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000012C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000012D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000130},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000131},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000132},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000133},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000134},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000135},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000160},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000161},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000162},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000163},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000164},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000165},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000168},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000169},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000016A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000016B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000016C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000016D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000170},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000171},
+ {0x03F, 0x00060032},
+ {0x033, 0x00000172},
+ {0x03F, 0x00050042},
+ {0x033, 0x00000173},
+ {0x03F, 0x00040042},
+ {0x033, 0x00000174},
+ {0x03F, 0x00008001},
+ {0x033, 0x00000175},
+ {0x03F, 0x00008002},
+ {0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000178},
+ {0x03F, 0x00050002},
+ {0x033, 0x00000179},
+ {0x03F, 0x00060032},
+ {0x033, 0x0000017A},
+ {0x03F, 0x00050042},
+ {0x033, 0x0000017B},
+ {0x03F, 0x00040042},
+ {0x033, 0x0000017C},
+ {0x03F, 0x00008001},
+ {0x033, 0x0000017D},
+ {0x03F, 0x00008002},
+ {0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001A2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001A3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001A4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001A5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001A8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001A9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001AA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001AB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001AC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001AD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001B0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001B1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001B2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001B3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001B4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001B5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001E2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001E3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001E4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001E5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001E8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001E9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001EA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001EB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001EC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001ED},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F0},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F1},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001F2},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001F3},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001F4},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001F5},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001F8},
+ {0x03F, 0x00050002},
+ {0x033, 0x000001F9},
+ {0x03F, 0x00060032},
+ {0x033, 0x000001FA},
+ {0x03F, 0x00050042},
+ {0x033, 0x000001FB},
+ {0x03F, 0x00040042},
+ {0x033, 0x000001FC},
+ {0x03F, 0x00008001},
+ {0x033, 0x000001FD},
+ {0x03F, 0x00008002},
+ {0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x10005, 0x00000001},
+ {0x100EE, 0x00000400},
+ {0x10030, 0x00000000},
+ {0x10030, 0x00001000},
+ {0x10030, 0x00002000},
+ {0x10030, 0x00003000},
+ {0x10030, 0x00004000},
+ {0x10030, 0x00005000},
+ {0x10030, 0x00006003},
+ {0x10030, 0x00007003},
+ {0x10030, 0x00008000},
+ {0x10030, 0x00009000},
+ {0x10030, 0x0000A000},
+ {0x10030, 0x0000B000},
+ {0x10030, 0x0000C000},
+ {0x10030, 0x0000D000},
+ {0x10030, 0x0000E003},
+ {0x10030, 0x0000F003},
+ {0x10030, 0x00010000},
+ {0x10030, 0x00011000},
+ {0x10030, 0x00012000},
+ {0x10030, 0x00013000},
+ {0x10030, 0x00014000},
+ {0x10030, 0x00015000},
+ {0x10030, 0x00016003},
+ {0x10030, 0x00017003},
+ {0x10030, 0x00018000},
+ {0x10030, 0x00019000},
+ {0x10030, 0x0001A000},
+ {0x10030, 0x0001B000},
+ {0x10030, 0x0001C000},
+ {0x10030, 0x0001D000},
+ {0x10030, 0x0001E003},
+ {0x10030, 0x0001F003},
+ {0x10030, 0x00020000},
+ {0x10030, 0x00021000},
+ {0x10030, 0x00022000},
+ {0x10030, 0x00023000},
+ {0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00026003},
+ {0x10030, 0x00027003},
+ {0x10030, 0x00028000},
+ {0x10030, 0x00029000},
+ {0x10030, 0x0002A000},
+ {0x10030, 0x0002B000},
+ {0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0002E003},
+ {0x10030, 0x0002F003},
+ {0x10030, 0x00030000},
+ {0x10030, 0x00031000},
+ {0x10030, 0x00032000},
+ {0x10030, 0x00033000},
+ {0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00036003},
+ {0x10030, 0x00037003},
+ {0x10030, 0x00038000},
+ {0x10030, 0x00039000},
+ {0x10030, 0x0003A000},
+ {0x10030, 0x0003B000},
+ {0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0003E003},
+ {0x10030, 0x0003F003},
+ {0x10030, 0x00060000},
+ {0x10030, 0x00061000},
+ {0x10030, 0x00062000},
+ {0x10030, 0x00063000},
+ {0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00067003},
+ {0x10030, 0x00068000},
+ {0x10030, 0x00069000},
+ {0x10030, 0x0006A000},
+ {0x10030, 0x0006B000},
+ {0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0006F003},
+ {0x10030, 0x00070000},
+ {0x10030, 0x00071000},
+ {0x10030, 0x00072000},
+ {0x10030, 0x00073000},
+ {0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x00077003},
+ {0x10030, 0x00078000},
+ {0x10030, 0x00079000},
+ {0x10030, 0x0007A000},
+ {0x10030, 0x0007B000},
+ {0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x0007F003},
+ {0x0ED, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000002},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000003},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000005},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000000A},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000000A},
+ {0x0ED, 0x00000000},
+ {0x100EE, 0x00000000},
+ {0x0FE, 0x00000048},
+};
+
+static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = {
+ {0x8008, 0x00000000},
+ {0x8000, 0x00000008},
+ {0x8004, 0xf0862966},
+ {0x800c, 0x78000000},
+ {0x8010, 0x88015000},
+ {0x8014, 0x80010100},
+ {0x8018, 0x10010100},
+ {0x801c, 0xa210bc00},
+ {0x8020, 0x000403e0},
+ {0x8024, 0x00072160},
+ {0x8028, 0x00180e00},
+ {0x8030, 0x400000c0},
+ {0x8034, 0x11000830},
+ {0x8038, 0x00000009},
+ {0x803c, 0x00000008},
+ {0x8040, 0x00000046},
+ {0x8044, 0x0010001f},
+ {0x8048, 0xf0000003},
+ {0x804c, 0x62ac6162},
+ {0x8050, 0xf2acf162},
+ {0x8054, 0x62ac6162},
+ {0x8058, 0xf2acf162},
+ {0x805c, 0x150c0b02},
+ {0x8060, 0x150c0b02},
+ {0x8064, 0x2aa00047},
+ {0x8074, 0x80000000},
+ {0x807c, 0x000000ee},
+ {0x8088, 0x80000000},
+ {0x808c, 0x00000000},
+ {0x80b0, 0x00000000},
+ {0x80d0, 0x00000000},
+ {0x80ec, 0x00000002},
+ {0x8098, 0x0000ff00},
+ {0x8070, 0x00e80000},
+ {0x80b0, 0xffe00fff},
+ {0x809c, 0x0000001f},
+ {0x80b8, 0x00001000},
+ {0x80bc, 0x0005001d},
+ {0x810c, 0x33112211},
+ {0x8110, 0x33112211},
+ {0x8114, 0x00000000},
+ {0x8120, 0x10010000},
+ {0x8124, 0x00000000},
+ {0x8128, 0x00000200},
+ {0x812c, 0x0000c000},
+ {0x8138, 0x40000000},
+ {0x813c, 0x40000000},
+ {0x8140, 0x00000000},
+ {0x8144, 0x0b040b03},
+ {0x8148, 0x0a040b04},
+ {0x814c, 0x0a040b04},
+ {0x8150, 0xe4e40000},
+ {0x8158, 0xffffffff},
+ {0x815c, 0xffffffff},
+ {0x8160, 0xffffffff},
+ {0x8164, 0xffffffff},
+ {0x8168, 0xffffffff},
+ {0x816c, 0x1fffffff},
+ {0x81cc, 0x00000000},
+ {0x81dc, 0x00000002},
+ {0x81e0, 0x00000000},
+ {0x81e4, 0x00000001},
+ {0x81a0, 0x00000000},
+ {0x81ac, 0x3fc20400},
+ {0x81b0, 0x3f914100},
+ {0x81bc, 0x0000005b},
+ {0x81c0, 0x0000005b},
+ {0x81b4, 0x01e0f078},
+ {0x81b8, 0x01e0f078},
+ {0x81f0, 0x0000f078},
+ {0x820c, 0x33112211},
+ {0x8210, 0x33112211},
+ {0x8214, 0x00000000},
+ {0x8220, 0x10010000},
+ {0x8224, 0x00000000},
+ {0x8228, 0x00000200},
+ {0x822c, 0x0000d000},
+ {0x8238, 0x40000000},
+ {0x823c, 0x40000000},
+ {0x8240, 0x00000000},
+ {0x8244, 0x0b040b03},
+ {0x8248, 0x0a040b04},
+ {0x824c, 0x0a040b04},
+ {0x8250, 0xe4e40000},
+ {0x8258, 0xffffffff},
+ {0x825c, 0xffffffff},
+ {0x8260, 0xffffffff},
+ {0x8264, 0xffffffff},
+ {0x8268, 0xffffffff},
+ {0x826c, 0x1fffffff},
+ {0x82cc, 0x00000000},
+ {0x82dc, 0x00000002},
+ {0x82e0, 0x00100000},
+ {0x82e4, 0x00000001},
+ {0x82a0, 0x00000000},
+ {0x82ac, 0x3fc20400},
+ {0x82b0, 0x3f914100},
+ {0x82bc, 0x0000005b},
+ {0x82c0, 0x0000005b},
+ {0x82b4, 0x01e0f078},
+ {0x82b8, 0x01e0f078},
+ {0x82f0, 0x0000f078},
+ {0x81d8, 0x00000001},
+ {0x82d8, 0x00000001},
+ {0x9500, 0x00000000},
+ {0x9504, 0x00000000},
+ {0x9508, 0x00000000},
+ {0x950c, 0x00000000},
+ {0x9510, 0x00000000},
+ {0x9514, 0x00000000},
+ {0x9518, 0x00000000},
+ {0x951c, 0x00000000},
+ {0x9520, 0x00000000},
+ {0x9524, 0x00000000},
+ {0x9528, 0x00000000},
+ {0x952c, 0x00000000},
+ {0x9530, 0x00000000},
+ {0x9534, 0x00000000},
+ {0x9538, 0x00000000},
+ {0x953c, 0x00000000},
+ {0x9540, 0x04000000},
+ {0x9544, 0x00000000},
+ {0x9548, 0x00000000},
+ {0x954c, 0x00000000},
+ {0x9550, 0x00000000},
+ {0x9554, 0x00000000},
+ {0x9558, 0x00000000},
+ {0x955c, 0x00000000},
+ {0x9560, 0x00000000},
+ {0x9564, 0x00000000},
+ {0x9568, 0x00000000},
+ {0x956c, 0x00000000},
+ {0x9570, 0x00000000},
+ {0x9574, 0x00000000},
+ {0x9578, 0x00000000},
+ {0x957c, 0x00000000},
+ {0x9580, 0x00000000},
+ {0x9584, 0x04000000},
+ {0x9588, 0x00000000},
+ {0x958c, 0x00000000},
+ {0x9590, 0x00000000},
+ {0x9594, 0x00000000},
+ {0x9598, 0x00000000},
+ {0x959c, 0x00000000},
+ {0x95a0, 0x00000000},
+ {0x95a4, 0x00000000},
+ {0x95a8, 0x00000000},
+ {0x95ac, 0x00000000},
+ {0x95b0, 0x00000000},
+ {0x95b4, 0x00000000},
+ {0x95b8, 0x00000000},
+ {0x95bc, 0x00000000},
+ {0x95c0, 0x00000000},
+ {0x95c4, 0x00000000},
+ {0x95c8, 0x04000000},
+ {0x95cc, 0x00000000},
+ {0x95d0, 0x00000000},
+ {0x95d4, 0x00000000},
+ {0x95d8, 0x00000000},
+ {0x95dc, 0x00000000},
+ {0x95e0, 0x00000000},
+ {0x95e4, 0x00000000},
+ {0x95e8, 0x00000000},
+ {0x95ec, 0x00000000},
+ {0x95f0, 0x00000000},
+ {0x95f4, 0x00000000},
+ {0x95f8, 0x00000000},
+ {0x95fc, 0x00000000},
+ {0x9600, 0x00000000},
+ {0x9604, 0x00000000},
+ {0x9608, 0x00000000},
+ {0x960c, 0x04000000},
+ {0x9610, 0x00000000},
+ {0x9614, 0x00000000},
+ {0x9618, 0x00000000},
+ {0x961c, 0x00000000},
+ {0x9620, 0x00000000},
+ {0x9624, 0x00000000},
+ {0x9628, 0x00000000},
+ {0x962c, 0x00000000},
+ {0x9630, 0x00000000},
+ {0x9634, 0x00000000},
+ {0x9638, 0x00000000},
+ {0x963c, 0x00000000},
+ {0x9640, 0x00000000},
+ {0x9644, 0x00000000},
+ {0x9648, 0x00000000},
+ {0x964c, 0x00000000},
+ {0x9650, 0x04000000},
+ {0x9654, 0x00000000},
+ {0x9658, 0x00000000},
+ {0x965c, 0x00000000},
+ {0x9660, 0x00000000},
+ {0x9664, 0x00000000},
+ {0x9668, 0x00000000},
+ {0x966c, 0x00000000},
+ {0x9670, 0x00000000},
+ {0x9674, 0x00000000},
+ {0x9678, 0x00000000},
+ {0x967c, 0x00000000},
+ {0x9680, 0x00000000},
+ {0x9684, 0x00000000},
+ {0x9688, 0x00000000},
+ {0x968c, 0x00000000},
+ {0x9690, 0x00000000},
+ {0x9694, 0x04000000},
+ {0x9698, 0x00000000},
+ {0x969c, 0x00000000},
+ {0x96a0, 0x00000000},
+ {0x96a4, 0x00000000},
+ {0x96a8, 0x00000000},
+ {0x96ac, 0x00000000},
+ {0x96b0, 0x00000000},
+ {0x96b4, 0x00000000},
+ {0x96b8, 0x00000000},
+ {0x96bc, 0x00000000},
+ {0x96c0, 0x00000000},
+ {0x96c4, 0x00000000},
+ {0x96c8, 0x00000000},
+ {0x96cc, 0x00000000},
+ {0x96d0, 0x00000000},
+ {0x96d4, 0x00000000},
+ {0x96d8, 0x04000000},
+ {0x96dc, 0x00000000},
+ {0x96e0, 0x00000000},
+ {0x96e4, 0x00000000},
+ {0x96e8, 0x00000000},
+ {0x96ec, 0x00000000},
+ {0x96f0, 0x00000000},
+ {0x96f4, 0x00000000},
+ {0x96f8, 0x00000000},
+ {0x96fc, 0x00000000},
+ {0x9700, 0x00000000},
+ {0x9704, 0x00000000},
+ {0x9708, 0x00000000},
+ {0x970c, 0x00000000},
+ {0x9710, 0x00000000},
+ {0x9714, 0x00000000},
+ {0x9718, 0x00000000},
+ {0x971c, 0x04000000},
+ {0x9720, 0x00000000},
+ {0x9724, 0x00000000},
+ {0x9728, 0x00000000},
+ {0x972c, 0x00000000},
+ {0x9730, 0x00000000},
+ {0x9734, 0x00000000},
+ {0x9738, 0x00000000},
+ {0x973c, 0x00000000},
+ {0x9740, 0x00000000},
+ {0x9744, 0x00000000},
+ {0x9748, 0x00000000},
+ {0x974c, 0x00000000},
+ {0x9750, 0x00000000},
+ {0x9754, 0x00000000},
+ {0x9758, 0x00000000},
+ {0x975c, 0x00000000},
+ {0x9760, 0x04000000},
+ {0x9764, 0x00000000},
+ {0x9768, 0x00000000},
+ {0x976c, 0x00000000},
+ {0x9770, 0x00000000},
+ {0x9774, 0x00000000},
+ {0x9778, 0x00000000},
+ {0x977c, 0x00000000},
+ {0x9780, 0x00000000},
+ {0x9784, 0x00000000},
+ {0x9788, 0x00000000},
+ {0x978c, 0x00000000},
+ {0x9790, 0x00000000},
+ {0x9794, 0x00000000},
+ {0x9798, 0x00000000},
+ {0x979c, 0x00000000},
+ {0x97a0, 0x00000000},
+ {0x97a4, 0x04000000},
+ {0x97a8, 0x00000000},
+ {0x97ac, 0x00000000},
+ {0x97b0, 0x00000000},
+ {0x97b4, 0x00000000},
+ {0x97b8, 0x00000000},
+ {0x97bc, 0x00000000},
+ {0x97c0, 0x00000000},
+ {0x97c4, 0x00000000},
+ {0x97c8, 0x00000000},
+ {0x97cc, 0x00000000},
+ {0x97d0, 0x00000000},
+ {0x97d4, 0x00000000},
+ {0x97d8, 0x00000000},
+ {0x97dc, 0x00000000},
+ {0x97e0, 0x00000000},
+ {0x97e4, 0x00000000},
+ {0x97e8, 0x04000000},
+ {0x97ec, 0x00000000},
+ {0x97f0, 0x00000000},
+ {0x97f4, 0x00000000},
+ {0x97f8, 0x00000000},
+ {0x97fc, 0x00000000},
+ {0x9800, 0x00000000},
+ {0x9804, 0x00000000},
+ {0x9808, 0x00000000},
+ {0x980c, 0x00000000},
+ {0x9810, 0x00000000},
+ {0x9814, 0x00000000},
+ {0x9818, 0x00000000},
+ {0x981c, 0x00000000},
+ {0x9820, 0x00000000},
+ {0x9824, 0x00000000},
+ {0x9828, 0x00000000},
+ {0x982c, 0x04000000},
+ {0x9830, 0x00000000},
+ {0x9834, 0x00000000},
+ {0x9838, 0x00000000},
+ {0x983c, 0x00000000},
+ {0x9840, 0x00000000},
+ {0x9844, 0x00000000},
+ {0x9848, 0x00000000},
+ {0x984c, 0x00000000},
+ {0x9850, 0x00000000},
+ {0x9854, 0x00000000},
+ {0x9858, 0x00000000},
+ {0x985c, 0x00000000},
+ {0x9860, 0x00000000},
+ {0x9864, 0x00000000},
+ {0x9868, 0x00000000},
+ {0x986c, 0x00000000},
+ {0x9870, 0x04000000},
+ {0x9874, 0x00000000},
+ {0x9878, 0x00000000},
+ {0x987c, 0x00000000},
+ {0x9880, 0x00000000},
+ {0x9884, 0x00000000},
+ {0x9888, 0x00000000},
+ {0x988c, 0x00000000},
+ {0x9890, 0x00000000},
+ {0x9894, 0x00000000},
+ {0x9898, 0x00000000},
+ {0x989c, 0x00000000},
+ {0x98a0, 0x00000000},
+ {0x98a4, 0x00000000},
+ {0x98a8, 0x00000000},
+ {0x98ac, 0x00000000},
+ {0x98b0, 0x00000000},
+ {0x98b4, 0x04000000},
+ {0x98b8, 0x00000000},
+ {0x98bc, 0x00000000},
+ {0x98c0, 0x00000000},
+ {0x98c4, 0x00000000},
+ {0x98c8, 0x00000000},
+ {0x98cc, 0x00000000},
+ {0x98d0, 0x00000000},
+ {0x98d4, 0x00000000},
+ {0x98d8, 0x00000000},
+ {0x98dc, 0x00000000},
+ {0x98e0, 0x00000000},
+ {0x98e4, 0x00000000},
+ {0x98e8, 0x00000000},
+ {0x98ec, 0x00000000},
+ {0x98f0, 0x00000000},
+ {0x98f4, 0x00000000},
+ {0x98f8, 0x04000000},
+ {0x98fc, 0x00000000},
+ {0x9900, 0x00000000},
+ {0x9904, 0x00000000},
+ {0x9908, 0x00000000},
+ {0x990c, 0x00000000},
+ {0x9910, 0x00000000},
+ {0x9914, 0x00000000},
+ {0x9918, 0x00000000},
+ {0x991c, 0x00000000},
+ {0x9920, 0x00000000},
+ {0x9924, 0x00000000},
+ {0x9928, 0x00000000},
+ {0x992c, 0x00000000},
+ {0x9930, 0x00000000},
+ {0x9934, 0x00000000},
+ {0x9938, 0x00000000},
+ {0x993c, 0x04000000},
+ {0x9940, 0x00000000},
+ {0x9944, 0x00000000},
+ {0x9948, 0x00000000},
+ {0x994c, 0x00000000},
+ {0x9950, 0x00000000},
+ {0x9954, 0x00000000},
+ {0x9958, 0x00000000},
+ {0x995c, 0x00000000},
+ {0x9960, 0x00000000},
+ {0x9964, 0x00000000},
+ {0x9968, 0x00000000},
+ {0x996c, 0x00000000},
+ {0x9970, 0x00000000},
+ {0x9974, 0x00000000},
+ {0x9978, 0x00000000},
+ {0x997c, 0x00000000},
+ {0x9980, 0x04000000},
+ {0x9984, 0x00000000},
+ {0x9988, 0x00000000},
+ {0x998c, 0x00000000},
+ {0x9990, 0x00000000},
+ {0x9994, 0x00000000},
+ {0x9998, 0x00000000},
+ {0x999c, 0x00000000},
+ {0x99a0, 0x00000000},
+ {0x99a4, 0x00000000},
+ {0x99a8, 0x00000000},
+ {0x99ac, 0x00000000},
+ {0x99b0, 0x00000000},
+ {0x99b4, 0x00000000},
+ {0x99b8, 0x00000000},
+ {0x99bc, 0x00000000},
+ {0x99c0, 0x00000000},
+ {0x99c4, 0x04000000},
+ {0x99c8, 0x00000000},
+ {0x99cc, 0x00000000},
+ {0x99d0, 0x00000000},
+ {0x99d4, 0x00000000},
+ {0x99d8, 0x00000000},
+ {0x99dc, 0x00000000},
+ {0x99e0, 0x00000000},
+ {0x99e4, 0x00000000},
+ {0x99e8, 0x00000000},
+ {0x99ec, 0x00000000},
+ {0x99f0, 0x00000000},
+ {0x99f4, 0x00000000},
+ {0x99f8, 0x00000000},
+ {0x99fc, 0x00000000},
+ {0x9a00, 0x00000000},
+ {0x9a04, 0x00000000},
+ {0x9a08, 0x04000000},
+ {0x9a0c, 0x00000000},
+ {0x9a10, 0x00000000},
+ {0x9a14, 0x00000000},
+ {0x9a18, 0x00000000},
+ {0x9a1c, 0x00000000},
+ {0x9a20, 0x00000000},
+ {0x9a24, 0x00000000},
+ {0x9a28, 0x00000000},
+ {0x9a2c, 0x00000000},
+ {0x9a30, 0x00000000},
+ {0x9a34, 0x00000000},
+ {0x9a38, 0x00000000},
+ {0x9a3c, 0x00000000},
+ {0x9a40, 0x00000000},
+ {0x9a44, 0x00000000},
+ {0x9a48, 0x00000000},
+ {0x9a4c, 0x04000000},
+ {0x9a50, 0x00000000},
+ {0x9a54, 0x00000000},
+ {0x9a58, 0x00000000},
+ {0x9a5c, 0x00000000},
+ {0x9a60, 0x00000000},
+ {0x9a64, 0x00000000},
+ {0x9a68, 0x00000000},
+ {0x9a6c, 0x00000000},
+ {0x9a70, 0x00000000},
+ {0x9a74, 0x00000000},
+ {0x9a78, 0x00000000},
+ {0x9a7c, 0x00000000},
+ {0x9a80, 0x00000000},
+ {0x9a84, 0x00000000},
+ {0x9a88, 0x00000000},
+ {0x9a8c, 0x00000000},
+ {0x9a90, 0x04000000},
+ {0x9a94, 0x00000000},
+ {0x9a98, 0x00000000},
+ {0x9a9c, 0x00000000},
+ {0x9aa0, 0x00000000},
+ {0x9aa4, 0x00000000},
+ {0x9aa8, 0x00000000},
+ {0x9aac, 0x00000000},
+ {0x9ab0, 0x00000000},
+ {0x9ab4, 0x00000000},
+ {0x9ab8, 0x00000000},
+ {0x9abc, 0x00000000},
+ {0x9ac0, 0x00000000},
+ {0x9ac4, 0x00000000},
+ {0x9ac8, 0x00000000},
+ {0x9acc, 0x00000000},
+ {0x9ad0, 0x00000000},
+ {0x9ad4, 0x04000000},
+ {0x9ad8, 0x00000000},
+ {0x9adc, 0x00000000},
+ {0x9ae0, 0x00000000},
+ {0x9ae4, 0x00000000},
+ {0x9ae8, 0x00000000},
+ {0x9aec, 0x00000000},
+ {0x9af0, 0x00000000},
+ {0x9af4, 0x00000000},
+ {0x9af8, 0x00000000},
+ {0x9afc, 0x00000000},
+ {0x9b00, 0x00000000},
+ {0x9b04, 0x00000000},
+ {0x9b08, 0x00000000},
+ {0x9b0c, 0x00000000},
+ {0x9b10, 0x00000000},
+ {0x9b14, 0x00000000},
+ {0x9b18, 0x04000000},
+ {0x9b1c, 0x00000000},
+ {0x9b20, 0x00000000},
+ {0x9b24, 0x00000000},
+ {0x9b28, 0x00000000},
+ {0x9b2c, 0x00000000},
+ {0x9b30, 0x00000000},
+ {0x9b34, 0x00000000},
+ {0x9b38, 0x00000000},
+ {0x9b3c, 0x00000000},
+ {0x9b40, 0x00000000},
+ {0x9b44, 0x00000000},
+ {0x9b48, 0x00000000},
+ {0x9b4c, 0x00000000},
+ {0x9b50, 0x00000000},
+ {0x9b54, 0x00000000},
+ {0x9b58, 0x00000000},
+ {0x9b5c, 0x04000000},
+ {0x9d00, 0x00000000},
+ {0x9d04, 0x00000000},
+ {0x9d08, 0x00000000},
+ {0x9d0c, 0x00000000},
+ {0x9d10, 0x00000000},
+ {0x9d14, 0x00000000},
+ {0x9d18, 0x00000000},
+ {0x9d1c, 0x00000000},
+ {0x9d20, 0x00000000},
+ {0x9d24, 0x00000000},
+ {0x9d28, 0x00000000},
+ {0x9d2c, 0x00000000},
+ {0x9d30, 0x00000000},
+ {0x9d34, 0x00000000},
+ {0x9d38, 0x00000000},
+ {0x9d3c, 0x00000000},
+ {0x9d40, 0x04000000},
+ {0x9d44, 0x00000000},
+ {0x9d48, 0x00000000},
+ {0x9d4c, 0x00000000},
+ {0x9d50, 0x00000000},
+ {0x9d54, 0x00000000},
+ {0x9d58, 0x00000000},
+ {0x9d5c, 0x00000000},
+ {0x9d60, 0x00000000},
+ {0x9d64, 0x00000000},
+ {0x9d68, 0x00000000},
+ {0x9d6c, 0x00000000},
+ {0x9d70, 0x00000000},
+ {0x9d74, 0x00000000},
+ {0x9d78, 0x00000000},
+ {0x9d7c, 0x00000000},
+ {0x9d80, 0x00000000},
+ {0x9d84, 0x04000000},
+ {0x9d88, 0x00000000},
+ {0x9d8c, 0x00000000},
+ {0x9d90, 0x00000000},
+ {0x9d94, 0x00000000},
+ {0x9d98, 0x00000000},
+ {0x9d9c, 0x00000000},
+ {0x9da0, 0x00000000},
+ {0x9da4, 0x00000000},
+ {0x9da8, 0x00000000},
+ {0x9dac, 0x00000000},
+ {0x9db0, 0x00000000},
+ {0x9db4, 0x00000000},
+ {0x9db8, 0x00000000},
+ {0x9dbc, 0x00000000},
+ {0x9dc0, 0x00000000},
+ {0x9dc4, 0x00000000},
+ {0x9dc8, 0x04000000},
+ {0x9dcc, 0x00000000},
+ {0x9dd0, 0x00000000},
+ {0x9dd4, 0x00000000},
+ {0x9dd8, 0x00000000},
+ {0x9ddc, 0x00000000},
+ {0x9de0, 0x00000000},
+ {0x9de4, 0x00000000},
+ {0x9de8, 0x00000000},
+ {0x9dec, 0x00000000},
+ {0x9df0, 0x00000000},
+ {0x9df4, 0x00000000},
+ {0x9df8, 0x00000000},
+ {0x9dfc, 0x00000000},
+ {0x9e00, 0x00000000},
+ {0x9e04, 0x00000000},
+ {0x9e08, 0x00000000},
+ {0x9e0c, 0x04000000},
+ {0x9e10, 0x00000000},
+ {0x9e14, 0x00000000},
+ {0x9e18, 0x00000000},
+ {0x9e1c, 0x00000000},
+ {0x9e20, 0x00000000},
+ {0x9e24, 0x00000000},
+ {0x9e28, 0x00000000},
+ {0x9e2c, 0x00000000},
+ {0x9e30, 0x00000000},
+ {0x9e34, 0x00000000},
+ {0x9e38, 0x00000000},
+ {0x9e3c, 0x00000000},
+ {0x9e40, 0x00000000},
+ {0x9e44, 0x00000000},
+ {0x9e48, 0x00000000},
+ {0x9e4c, 0x00000000},
+ {0x9e50, 0x04000000},
+ {0x9e54, 0x00000000},
+ {0x9e58, 0x00000000},
+ {0x9e5c, 0x00000000},
+ {0x9e60, 0x00000000},
+ {0x9e64, 0x00000000},
+ {0x9e68, 0x00000000},
+ {0x9e6c, 0x00000000},
+ {0x9e70, 0x00000000},
+ {0x9e74, 0x00000000},
+ {0x9e78, 0x00000000},
+ {0x9e7c, 0x00000000},
+ {0x9e80, 0x00000000},
+ {0x9e84, 0x00000000},
+ {0x9e88, 0x00000000},
+ {0x9e8c, 0x00000000},
+ {0x9e90, 0x00000000},
+ {0x9e94, 0x04000000},
+ {0x9e98, 0x00000000},
+ {0x9e9c, 0x00000000},
+ {0x9ea0, 0x00000000},
+ {0x9ea4, 0x00000000},
+ {0x9ea8, 0x00000000},
+ {0x9eac, 0x00000000},
+ {0x9eb0, 0x00000000},
+ {0x9eb4, 0x00000000},
+ {0x9eb8, 0x00000000},
+ {0x9ebc, 0x00000000},
+ {0x9ec0, 0x00000000},
+ {0x9ec4, 0x00000000},
+ {0x9ec8, 0x00000000},
+ {0x9ecc, 0x00000000},
+ {0x9ed0, 0x00000000},
+ {0x9ed4, 0x00000000},
+ {0x9ed8, 0x04000000},
+ {0x9edc, 0x00000000},
+ {0x9ee0, 0x00000000},
+ {0x9ee4, 0x00000000},
+ {0x9ee8, 0x00000000},
+ {0x9eec, 0x00000000},
+ {0x9ef0, 0x00000000},
+ {0x9ef4, 0x00000000},
+ {0x9ef8, 0x00000000},
+ {0x9efc, 0x00000000},
+ {0x9f00, 0x00000000},
+ {0x9f04, 0x00000000},
+ {0x9f08, 0x00000000},
+ {0x9f0c, 0x00000000},
+ {0x9f10, 0x00000000},
+ {0x9f14, 0x00000000},
+ {0x9f18, 0x00000000},
+ {0x9f1c, 0x04000000},
+ {0x9f20, 0x00000000},
+ {0x9f24, 0x00000000},
+ {0x9f28, 0x00000000},
+ {0x9f2c, 0x00000000},
+ {0x9f30, 0x00000000},
+ {0x9f34, 0x00000000},
+ {0x9f38, 0x00000000},
+ {0x9f3c, 0x00000000},
+ {0x9f40, 0x00000000},
+ {0x9f44, 0x00000000},
+ {0x9f48, 0x00000000},
+ {0x9f4c, 0x00000000},
+ {0x9f50, 0x00000000},
+ {0x9f54, 0x00000000},
+ {0x9f58, 0x00000000},
+ {0x9f5c, 0x00000000},
+ {0x9f60, 0x04000000},
+ {0x9f64, 0x00000000},
+ {0x9f68, 0x00000000},
+ {0x9f6c, 0x00000000},
+ {0x9f70, 0x00000000},
+ {0x9f74, 0x00000000},
+ {0x9f78, 0x00000000},
+ {0x9f7c, 0x00000000},
+ {0x9f80, 0x00000000},
+ {0x9f84, 0x00000000},
+ {0x9f88, 0x00000000},
+ {0x9f8c, 0x00000000},
+ {0x9f90, 0x00000000},
+ {0x9f94, 0x00000000},
+ {0x9f98, 0x00000000},
+ {0x9f9c, 0x00000000},
+ {0x9fa0, 0x00000000},
+ {0x9fa4, 0x04000000},
+ {0x9fa8, 0x00000000},
+ {0x9fac, 0x00000000},
+ {0x9fb0, 0x00000000},
+ {0x9fb4, 0x00000000},
+ {0x9fb8, 0x00000000},
+ {0x9fbc, 0x00000000},
+ {0x9fc0, 0x00000000},
+ {0x9fc4, 0x00000000},
+ {0x9fc8, 0x00000000},
+ {0x9fcc, 0x00000000},
+ {0x9fd0, 0x00000000},
+ {0x9fd4, 0x00000000},
+ {0x9fd8, 0x00000000},
+ {0x9fdc, 0x00000000},
+ {0x9fe0, 0x00000000},
+ {0x9fe4, 0x00000000},
+ {0x9fe8, 0x04000000},
+ {0x9fec, 0x00000000},
+ {0x9ff0, 0x00000000},
+ {0x9ff4, 0x00000000},
+ {0x9ff8, 0x00000000},
+ {0x9ffc, 0x00000000},
+ {0xa000, 0x00000000},
+ {0xa004, 0x00000000},
+ {0xa008, 0x00000000},
+ {0xa00c, 0x00000000},
+ {0xa010, 0x00000000},
+ {0xa014, 0x00000000},
+ {0xa018, 0x00000000},
+ {0xa01c, 0x00000000},
+ {0xa020, 0x00000000},
+ {0xa024, 0x00000000},
+ {0xa028, 0x00000000},
+ {0xa02c, 0x04000000},
+ {0xa030, 0x00000000},
+ {0xa034, 0x00000000},
+ {0xa038, 0x00000000},
+ {0xa03c, 0x00000000},
+ {0xa040, 0x00000000},
+ {0xa044, 0x00000000},
+ {0xa048, 0x00000000},
+ {0xa04c, 0x00000000},
+ {0xa050, 0x00000000},
+ {0xa054, 0x00000000},
+ {0xa058, 0x00000000},
+ {0xa05c, 0x00000000},
+ {0xa060, 0x00000000},
+ {0xa064, 0x00000000},
+ {0xa068, 0x00000000},
+ {0xa06c, 0x00000000},
+ {0xa070, 0x04000000},
+ {0xa074, 0x00000000},
+ {0xa078, 0x00000000},
+ {0xa07c, 0x00000000},
+ {0xa080, 0x00000000},
+ {0xa084, 0x00000000},
+ {0xa088, 0x00000000},
+ {0xa08c, 0x00000000},
+ {0xa090, 0x00000000},
+ {0xa094, 0x00000000},
+ {0xa098, 0x00000000},
+ {0xa09c, 0x00000000},
+ {0xa0a0, 0x00000000},
+ {0xa0a4, 0x00000000},
+ {0xa0a8, 0x00000000},
+ {0xa0ac, 0x00000000},
+ {0xa0b0, 0x00000000},
+ {0xa0b4, 0x04000000},
+ {0xa0b8, 0x00000000},
+ {0xa0bc, 0x00000000},
+ {0xa0c0, 0x00000000},
+ {0xa0c4, 0x00000000},
+ {0xa0c8, 0x00000000},
+ {0xa0cc, 0x00000000},
+ {0xa0d0, 0x00000000},
+ {0xa0d4, 0x00000000},
+ {0xa0d8, 0x00000000},
+ {0xa0dc, 0x00000000},
+ {0xa0e0, 0x00000000},
+ {0xa0e4, 0x00000000},
+ {0xa0e8, 0x00000000},
+ {0xa0ec, 0x00000000},
+ {0xa0f0, 0x00000000},
+ {0xa0f4, 0x00000000},
+ {0xa0f8, 0x04000000},
+ {0xa0fc, 0x00000000},
+ {0xa100, 0x00000000},
+ {0xa104, 0x00000000},
+ {0xa108, 0x00000000},
+ {0xa10c, 0x00000000},
+ {0xa110, 0x00000000},
+ {0xa114, 0x00000000},
+ {0xa118, 0x00000000},
+ {0xa11c, 0x00000000},
+ {0xa120, 0x00000000},
+ {0xa124, 0x00000000},
+ {0xa128, 0x00000000},
+ {0xa12c, 0x00000000},
+ {0xa130, 0x00000000},
+ {0xa134, 0x00000000},
+ {0xa138, 0x00000000},
+ {0xa13c, 0x04000000},
+ {0xa140, 0x00000000},
+ {0xa144, 0x00000000},
+ {0xa148, 0x00000000},
+ {0xa14c, 0x00000000},
+ {0xa150, 0x00000000},
+ {0xa154, 0x00000000},
+ {0xa158, 0x00000000},
+ {0xa15c, 0x00000000},
+ {0xa160, 0x00000000},
+ {0xa164, 0x00000000},
+ {0xa168, 0x00000000},
+ {0xa16c, 0x00000000},
+ {0xa170, 0x00000000},
+ {0xa174, 0x00000000},
+ {0xa178, 0x00000000},
+ {0xa17c, 0x00000000},
+ {0xa180, 0x04000000},
+ {0xa184, 0x00000000},
+ {0xa188, 0x00000000},
+ {0xa18c, 0x00000000},
+ {0xa190, 0x00000000},
+ {0xa194, 0x00000000},
+ {0xa198, 0x00000000},
+ {0xa19c, 0x00000000},
+ {0xa1a0, 0x00000000},
+ {0xa1a4, 0x00000000},
+ {0xa1a8, 0x00000000},
+ {0xa1ac, 0x00000000},
+ {0xa1b0, 0x00000000},
+ {0xa1b4, 0x00000000},
+ {0xa1b8, 0x00000000},
+ {0xa1bc, 0x00000000},
+ {0xa1c0, 0x00000000},
+ {0xa1c4, 0x04000000},
+ {0xa1c8, 0x00000000},
+ {0xa1cc, 0x00000000},
+ {0xa1d0, 0x00000000},
+ {0xa1d4, 0x00000000},
+ {0xa1d8, 0x00000000},
+ {0xa1dc, 0x00000000},
+ {0xa1e0, 0x00000000},
+ {0xa1e4, 0x00000000},
+ {0xa1e8, 0x00000000},
+ {0xa1ec, 0x00000000},
+ {0xa1f0, 0x00000000},
+ {0xa1f4, 0x00000000},
+ {0xa1f8, 0x00000000},
+ {0xa1fc, 0x00000000},
+ {0xa200, 0x00000000},
+ {0xa204, 0x00000000},
+ {0xa208, 0x04000000},
+ {0xa20c, 0x00000000},
+ {0xa210, 0x00000000},
+ {0xa214, 0x00000000},
+ {0xa218, 0x00000000},
+ {0xa21c, 0x00000000},
+ {0xa220, 0x00000000},
+ {0xa224, 0x00000000},
+ {0xa228, 0x00000000},
+ {0xa22c, 0x00000000},
+ {0xa230, 0x00000000},
+ {0xa234, 0x00000000},
+ {0xa238, 0x00000000},
+ {0xa23c, 0x00000000},
+ {0xa240, 0x00000000},
+ {0xa244, 0x00000000},
+ {0xa248, 0x00000000},
+ {0xa24c, 0x04000000},
+ {0xa250, 0x00000000},
+ {0xa254, 0x00000000},
+ {0xa258, 0x00000000},
+ {0xa25c, 0x00000000},
+ {0xa260, 0x00000000},
+ {0xa264, 0x00000000},
+ {0xa268, 0x00000000},
+ {0xa26c, 0x00000000},
+ {0xa270, 0x00000000},
+ {0xa274, 0x00000000},
+ {0xa278, 0x00000000},
+ {0xa27c, 0x00000000},
+ {0xa280, 0x00000000},
+ {0xa284, 0x00000000},
+ {0xa288, 0x00000000},
+ {0xa28c, 0x00000000},
+ {0xa290, 0x04000000},
+ {0xa294, 0x00000000},
+ {0xa298, 0x00000000},
+ {0xa29c, 0x00000000},
+ {0xa2a0, 0x00000000},
+ {0xa2a4, 0x00000000},
+ {0xa2a8, 0x00000000},
+ {0xa2ac, 0x00000000},
+ {0xa2b0, 0x00000000},
+ {0xa2b4, 0x00000000},
+ {0xa2b8, 0x00000000},
+ {0xa2bc, 0x00000000},
+ {0xa2c0, 0x00000000},
+ {0xa2c4, 0x00000000},
+ {0xa2c8, 0x00000000},
+ {0xa2cc, 0x00000000},
+ {0xa2d0, 0x00000000},
+ {0xa2d4, 0x04000000},
+ {0xa2d8, 0x00000000},
+ {0xa2dc, 0x00000000},
+ {0xa2e0, 0x00000000},
+ {0xa2e4, 0x00000000},
+ {0xa2e8, 0x00000000},
+ {0xa2ec, 0x00000000},
+ {0xa2f0, 0x00000000},
+ {0xa2f4, 0x00000000},
+ {0xa2f8, 0x00000000},
+ {0xa2fc, 0x00000000},
+ {0xa300, 0x00000000},
+ {0xa304, 0x00000000},
+ {0xa308, 0x00000000},
+ {0xa30c, 0x00000000},
+ {0xa310, 0x00000000},
+ {0xa314, 0x00000000},
+ {0xa318, 0x04000000},
+ {0xa31c, 0x00000000},
+ {0xa320, 0x00000000},
+ {0xa324, 0x00000000},
+ {0xa328, 0x00000000},
+ {0xa32c, 0x00000000},
+ {0xa330, 0x00000000},
+ {0xa334, 0x00000000},
+ {0xa338, 0x00000000},
+ {0xa33c, 0x00000000},
+ {0xa340, 0x00000000},
+ {0xa344, 0x00000000},
+ {0xa348, 0x00000000},
+ {0xa34c, 0x00000000},
+ {0xa350, 0x00000000},
+ {0xa354, 0x00000000},
+ {0xa358, 0x00000000},
+ {0xa35c, 0x04000000},
+ {0x81d8, 0x00000000},
+ {0x82d8, 0x00000000},
+ {0xb104, 0x2b251f19},
+ {0xb108, 0x433d3731},
+ {0xb10c, 0x5b554f49},
+ {0xb110, 0x736d6761},
+ {0xb114, 0x7f7f7f79},
+ {0xb118, 0x120f7f7f},
+ {0xb11c, 0x1e1b1815},
+ {0xb120, 0x2a272421},
+ {0xb124, 0x3633302d},
+ {0xb128, 0x3f3f3c39},
+ {0xb12c, 0x3f3f3f3f},
+ {0x8088, 0x00000110},
+ {0x8000, 0x00000008},
+ {0x8080, 0x00000005},
+ {0x8500, 0x80000008},
+ {0x8504, 0x43000004},
+ {0x8508, 0x4b044a00},
+ {0x850c, 0x40098604},
+ {0x8510, 0x0004e024},
+ {0x8514, 0x87044b05},
+ {0x8518, 0xe024400b},
+ {0x851c, 0x4b000004},
+ {0x8520, 0x21e07410},
+ {0x8524, 0x16580000},
+ {0x8528, 0x00047430},
+ {0x852c, 0x00074380},
+ {0x8530, 0x00044c00},
+ {0x8534, 0x00074300},
+ {0x8538, 0x00045603},
+ {0x853c, 0x42fe5700},
+ {0x8540, 0x42004000},
+ {0x8544, 0x30005055},
+ {0x8548, 0xa512b41c},
+ {0x854c, 0xf02fe66f},
+ {0x8550, 0xf22ff12f},
+ {0x8554, 0xf42ff32f},
+ {0x8558, 0xf62ff52f},
+ {0x855c, 0xf82ff72f},
+ {0x8560, 0xfa2ff92f},
+ {0x8564, 0xfc2ffb2f},
+ {0x8568, 0xfe2ffd2f},
+ {0x856c, 0xe66fff2f},
+ {0x8570, 0xf12ef02e},
+ {0x8574, 0xf32ef22e},
+ {0x8578, 0xf52ef42e},
+ {0x857c, 0xff2ef62e},
+ {0x8580, 0xa511000b},
+ {0x8584, 0xf12cf02c},
+ {0x8588, 0xf32cf22c},
+ {0x858c, 0xf52cf42c},
+ {0x8590, 0xf72cf62c},
+ {0x8594, 0xf92cf82c},
+ {0x8598, 0xfb2cfa2c},
+ {0x859c, 0xfd2cfc2c},
+ {0x85a0, 0xff2cfe2c},
+ {0x85a4, 0xf12cf02c},
+ {0x85a8, 0x0001f22c},
+ {0x85ac, 0x30b330b3},
+ {0x85b0, 0x310c3125},
+ {0x85b4, 0x31253161},
+ {0x85b8, 0x3081316f},
+ {0x85bc, 0x317f3172},
+ {0x85c0, 0x3192318c},
+ {0x85c4, 0x32b832a6},
+ {0x85c8, 0x31fd32c2},
+ {0x85cc, 0x330732cc},
+ {0x85d0, 0x33193343},
+ {0x85d4, 0x331d3312},
+ {0x85d8, 0x31663316},
+ {0x85dc, 0x3365335b},
+ {0x85e0, 0x3379336f},
+ {0x85e4, 0x338d3383},
+ {0x85e8, 0x33a13397},
+ {0x85ec, 0x33b833ab},
+ {0x85f0, 0x33d733c9},
+ {0x85f4, 0x342333db},
+ {0x85f8, 0x343c343b},
+ {0x85fc, 0x3471346f},
+ {0x8600, 0xe493347c},
+ {0x8604, 0x20887410},
+ {0x8608, 0x140f0200},
+ {0x860c, 0x02002098},
+ {0x8610, 0x20a8140f},
+ {0x8614, 0x140f0200},
+ {0x8618, 0xe4df7430},
+ {0x861c, 0x74105b10},
+ {0x8620, 0x000120a0},
+ {0x8624, 0x140f140f},
+ {0x8628, 0x56e15507},
+ {0x862c, 0xe4c95c06},
+ {0x8630, 0x20a87410},
+ {0x8634, 0x140f0201},
+ {0x8638, 0xe4c95517},
+ {0x863c, 0x20a87410},
+ {0x8640, 0x140f0200},
+ {0x8644, 0x56c15517},
+ {0x8648, 0xe4c95c02},
+ {0x864c, 0x20a07410},
+ {0x8650, 0x140f0000},
+ {0x8654, 0x55071407},
+ {0x8658, 0xe47ee4c9},
+ {0x865c, 0x4686750a},
+ {0x8660, 0xe159e4d3},
+ {0x8664, 0xe4930001},
+ {0x8668, 0x20a87410},
+ {0x866c, 0x140f0200},
+ {0x8670, 0x02002098},
+ {0x8674, 0x2088140f},
+ {0x8678, 0x140f0200},
+ {0x867c, 0xe4df7430},
+ {0x8680, 0x74105b10},
+ {0x8684, 0x020120a8},
+ {0x8688, 0x2080140f},
+ {0x868c, 0x140f0000},
+ {0x8690, 0x56615507},
+ {0x8694, 0xe4c95c06},
+ {0x8698, 0x20887410},
+ {0x869c, 0x140f0200},
+ {0x86a0, 0xe4c95517},
+ {0x86a4, 0x20a87410},
+ {0x86a8, 0x140f0200},
+ {0x86ac, 0x56415517},
+ {0x86b0, 0xe4c95c02},
+ {0x86b4, 0x20807410},
+ {0x86b8, 0x140f0000},
+ {0x86bc, 0x55071407},
+ {0x86c0, 0xe47ee4c9},
+ {0x86c4, 0x468e7508},
+ {0x86c8, 0xe159e4d3},
+ {0x86cc, 0x5b10f025},
+ {0x86d0, 0x20a87410},
+ {0x86d4, 0x140f0201},
+ {0x86d8, 0x00002090},
+ {0x86dc, 0x5507140f},
+ {0x86e0, 0x5c065661},
+ {0x86e4, 0x7410e4c9},
+ {0x86e8, 0x02002098},
+ {0x86ec, 0x5517140f},
+ {0x86f0, 0x7410e4c9},
+ {0x86f4, 0x020020a8},
+ {0x86f8, 0x5517140f},
+ {0x86fc, 0x5c025641},
+ {0x8700, 0x7410e4c9},
+ {0x8704, 0x00002090},
+ {0x8708, 0x5507140f},
+ {0x870c, 0x7509e4c9},
+ {0x8710, 0xe4d34696},
+ {0x8714, 0x0001e159},
+ {0x8718, 0x74105b10},
+ {0x871c, 0x000020a0},
+ {0x8720, 0x5507140f},
+ {0x8724, 0xe4c95601},
+ {0x8728, 0x20a87410},
+ {0x872c, 0x140f0200},
+ {0x8730, 0xe4c95517},
+ {0x8734, 0x750ae47e},
+ {0x8738, 0xe4d34686},
+ {0x873c, 0x5500e159},
+ {0x8740, 0x5501e4c5},
+ {0x8744, 0xe4930001},
+ {0x8748, 0x5b10e4df},
+ {0x874c, 0x20807410},
+ {0x8750, 0x140f0000},
+ {0x8754, 0x02002098},
+ {0x8758, 0xf205140f},
+ {0x875c, 0x20a8f504},
+ {0x8760, 0x140f0200},
+ {0x8764, 0x56015507},
+ {0x8768, 0x7410e4c9},
+ {0x876c, 0x02002088},
+ {0x8770, 0x5517140f},
+ {0x8774, 0xe47ee4c9},
+ {0x8778, 0x468e7508},
+ {0x877c, 0xe159e4d3},
+ {0x8780, 0x7410f512},
+ {0x8784, 0x00002090},
+ {0x8788, 0x5507140f},
+ {0x878c, 0x7410e4c9},
+ {0x8790, 0x02002098},
+ {0x8794, 0x5517140f},
+ {0x8798, 0x7509e4c9},
+ {0x879c, 0xe4d34696},
+ {0x87a0, 0x0001e159},
+ {0x87a4, 0x46965b90},
+ {0x87a8, 0xe4c55500},
+ {0x87ac, 0x5b105501},
+ {0x87b0, 0x79000001},
+ {0x87b4, 0x57107420},
+ {0x87b8, 0x140f5700},
+ {0x87bc, 0x74309700},
+ {0x87c0, 0xe4930001},
+ {0x87c4, 0x0bbde4df},
+ {0x87c8, 0x0001e662},
+ {0x87cc, 0x5720e493},
+ {0x87d0, 0x540054fd},
+ {0x87d4, 0x70005700},
+ {0x87d8, 0x70c0e4dd},
+ {0x87dc, 0xe4a90001},
+ {0x87e0, 0x0001e512},
+ {0x87e4, 0x31abe493},
+ {0x87e8, 0xe6620023},
+ {0x87ec, 0x54ed0002},
+ {0x87f0, 0x00230baa},
+ {0x87f4, 0x0002e662},
+ {0x87f8, 0xe486e52e},
+ {0x87fc, 0xe4930001},
+ {0x8800, 0x002231a1},
+ {0x8804, 0x0002e662},
+ {0x8808, 0x0baa54ec},
+ {0x880c, 0xe6620022},
+ {0x8810, 0xe52e0002},
+ {0x8814, 0x0001e486},
+ {0x8818, 0x0baae493},
+ {0x881c, 0xe52e3194},
+ {0x8820, 0x0001e486},
+ {0x8824, 0x0babe493},
+ {0x8828, 0x6d0f6c67},
+ {0x882c, 0xe662e4df},
+ {0x8830, 0x6c8bfb04},
+ {0x8834, 0xe662e4df},
+ {0x8838, 0x6c95fa04},
+ {0x883c, 0xe662e4df},
+ {0x8840, 0x0bacfb06},
+ {0x8844, 0x6d0f6cb3},
+ {0x8848, 0xe662e4df},
+ {0x884c, 0xf904fa05},
+ {0x8850, 0xe4df6ccb},
+ {0x8854, 0xfb06e662},
+ {0x8858, 0x6cdb0bad},
+ {0x885c, 0xe4df6d0f},
+ {0x8860, 0x6cf5e662},
+ {0x8864, 0xe4df6d0f},
+ {0x8868, 0x6c0be662},
+ {0x886c, 0xe4df6d00},
+ {0x8870, 0xfb04e662},
+ {0x8874, 0xe4df6c25},
+ {0x8878, 0xf8b7e662},
+ {0x887c, 0xf904fa05},
+ {0x8880, 0xe4df6c35},
+ {0x8884, 0xfb04e662},
+ {0x8888, 0xe4df6c4d},
+ {0x888c, 0xf9bae662},
+ {0x8890, 0x6c6bfa04},
+ {0x8894, 0xe662e4df},
+ {0x8898, 0x6c75fb04},
+ {0x889c, 0xe662e4df},
+ {0x88a0, 0xe4df6c99},
+ {0x88a4, 0xfabce662},
+ {0x88a8, 0x57200ba8},
+ {0x88ac, 0x540054f0},
+ {0x88b0, 0x7c355700},
+ {0x88b4, 0x70007d00},
+ {0x88b8, 0x6d0e6cc5},
+ {0x88bc, 0xe662e4dd},
+ {0x88c0, 0xe4dd6cf5},
+ {0x88c4, 0x6c29e662},
+ {0x88c8, 0xe4dd6d0f},
+ {0x88cc, 0x0bb3e662},
+ {0x88d0, 0x54ed5720},
+ {0x88d4, 0x57005400},
+ {0x88d8, 0x7d0f7ccb},
+ {0x88dc, 0x6d006cd7},
+ {0x88e0, 0xe662e4dd},
+ {0x88e4, 0x6d016c0b},
+ {0x88e8, 0xe662e4dd},
+ {0x88ec, 0xe4dd6c3b},
+ {0x88f0, 0x70c0e662},
+ {0x88f4, 0xe486e52e},
+ {0x88f8, 0xe4a90001},
+ {0x88fc, 0x63424380},
+ {0x8900, 0x43006887},
+ {0x8904, 0x74100ba6},
+ {0x8908, 0x000121e8},
+ {0x890c, 0x6ec71658},
+ {0x8910, 0xe5126f0e},
+ {0x8914, 0x7410e667},
+ {0x8918, 0x000321e8},
+ {0x891c, 0x6eeb1658},
+ {0x8920, 0xe667e512},
+ {0x8924, 0x21e87410},
+ {0x8928, 0x16580005},
+ {0x892c, 0x6f0f6e13},
+ {0x8930, 0xe667e512},
+ {0x8934, 0x21e87410},
+ {0x8938, 0x16580007},
+ {0x893c, 0xe5126e3b},
+ {0x8940, 0x7410e667},
+ {0x8944, 0x000921e8},
+ {0x8948, 0x6e671658},
+ {0x894c, 0xe5126f0f},
+ {0x8950, 0x7410e667},
+ {0x8954, 0x000b21e8},
+ {0x8958, 0x6e8b1658},
+ {0x895c, 0xe667e512},
+ {0x8960, 0x21e87410},
+ {0x8964, 0x1658000d},
+ {0x8968, 0x6f0f6eb3},
+ {0x896c, 0xe667e512},
+ {0x8970, 0xfe08ff09},
+ {0x8974, 0x21e87410},
+ {0x8978, 0x1658000e},
+ {0x897c, 0xe5126ec7},
+ {0x8980, 0x7410e667},
+ {0x8984, 0x000f21e8},
+ {0x8988, 0x6edb1658},
+ {0x898c, 0xe5126f0f},
+ {0x8990, 0x7410e667},
+ {0x8994, 0x001021e8},
+ {0x8998, 0x6eef1658},
+ {0x899c, 0xe667e512},
+ {0x89a0, 0xfe02ff03},
+ {0x89a4, 0x7410e667},
+ {0x89a8, 0x001321e8},
+ {0x89ac, 0x6e111658},
+ {0x89b0, 0xe5126f00},
+ {0x89b4, 0xff03e667},
+ {0x89b8, 0xe667fe02},
+ {0x89bc, 0x21e87410},
+ {0x89c0, 0x16580014},
+ {0x89c4, 0xe5126e25},
+ {0x89c8, 0xfc48e667},
+ {0x89cc, 0xfe08ff09},
+ {0x89d0, 0x21e87410},
+ {0x89d4, 0x16580015},
+ {0x89d8, 0xe5126e39},
+ {0x89dc, 0x7410e667},
+ {0x89e0, 0x001621e8},
+ {0x89e4, 0x6e4d1658},
+ {0x89e8, 0xe667e512},
+ {0x89ec, 0x7410fd49},
+ {0x89f0, 0x001821e8},
+ {0x89f4, 0x6e751658},
+ {0x89f8, 0xe667e512},
+ {0x89fc, 0x21e87410},
+ {0x8a00, 0x1658001a},
+ {0x8a04, 0xe5126e99},
+ {0x8a08, 0xfe44e667},
+ {0x8a0c, 0x21e87410},
+ {0x8a10, 0x1658001c},
+ {0x8a14, 0xe5126ec5},
+ {0x8a18, 0x7410e667},
+ {0x8a1c, 0x001e21e8},
+ {0x8a20, 0x6eed1658},
+ {0x8a24, 0xe667e512},
+ {0x8a28, 0x21e87410},
+ {0x8a2c, 0x16580020},
+ {0x8a30, 0x6f016e15},
+ {0x8a34, 0xe667e512},
+ {0x8a38, 0x21e87410},
+ {0x8a3c, 0x16580022},
+ {0x8a40, 0xe5126e39},
+ {0x8a44, 0xe52ee667},
+ {0x8a48, 0x0001e49c},
+ {0x8a4c, 0x4380e4a9},
+ {0x8a50, 0x68806340},
+ {0x8a54, 0x0bac4300},
+ {0x8a58, 0x00223241},
+ {0x8a5c, 0x0002e667},
+ {0x8a60, 0x0baa54ec},
+ {0x8a64, 0xe6670022},
+ {0x8a68, 0xe52e0002},
+ {0x8a6c, 0x0001e49c},
+ {0x8a70, 0x4380e4a9},
+ {0x8a74, 0x68816340},
+ {0x8a78, 0x0baa4300},
+ {0x8a7c, 0xe52e3230},
+ {0x8a80, 0x0001e49c},
+ {0x8a84, 0x4380e4a9},
+ {0x8a88, 0x68826341},
+ {0x8a8c, 0x0baa4300},
+ {0x8a90, 0xe52e3221},
+ {0x8a94, 0x0001e49c},
+ {0x8a98, 0x42fc0004},
+ {0x8a9c, 0x60010007},
+ {0x8aa0, 0x42000004},
+ {0x8aa4, 0x62200007},
+ {0x8aa8, 0x00046200},
+ {0x8aac, 0x5b405501},
+ {0x8ab0, 0x00076605},
+ {0x8ab4, 0x63006200},
+ {0x8ab8, 0x0004e54f},
+ {0x8abc, 0x0a010900},
+ {0x8ac0, 0x0d000b40},
+ {0x8ac4, 0x00320e01},
+ {0x8ac8, 0x95090004},
+ {0x8acc, 0x790442fb},
+ {0x8ad0, 0x43804200},
+ {0x8ad4, 0x4d010007},
+ {0x8ad8, 0x43000004},
+ {0x8adc, 0x05620007},
+ {0x8ae0, 0x961d05a3},
+ {0x8ae4, 0x0004e54f},
+ {0x8ae8, 0x0007e4c5},
+ {0x8aec, 0x07a306a2},
+ {0x8af0, 0x0004e54f},
+ {0x8af4, 0xe53fe4c5},
+ {0x8af8, 0xe5470002},
+ {0x8afc, 0x00074380},
+ {0x8b00, 0x00044d00},
+ {0x8b04, 0x42fe4300},
+ {0x8b08, 0x42007900},
+ {0x8b0c, 0x00040001},
+ {0x8b10, 0x000742fc},
+ {0x8b14, 0x00046003},
+ {0x8b18, 0x32d24200},
+ {0x8b1c, 0x06a20007},
+ {0x8b20, 0x32fc07a3},
+ {0x8b24, 0xe32ee320},
+ {0x8b28, 0x0001e333},
+ {0x8b2c, 0xe333e320},
+ {0x8b30, 0xe3270001},
+ {0x8b34, 0xe333e32e},
+ {0x8b38, 0xe3270001},
+ {0x8b3c, 0x0001e333},
+ {0x8b40, 0x42fc0004},
+ {0x8b44, 0x60030007},
+ {0x8b48, 0x42000004},
+ {0x8b4c, 0x00040001},
+ {0x8b50, 0x000742fc},
+ {0x8b54, 0x00046001},
+ {0x8b58, 0x00014200},
+ {0x8b5c, 0x62200007},
+ {0x8b60, 0xe5476200},
+ {0x8b64, 0x00070001},
+ {0x8b68, 0x00046300},
+ {0x8b6c, 0x0a000900},
+ {0x8b70, 0x00320e01},
+ {0x8b74, 0x06a20007},
+ {0x8b78, 0xe559e54f},
+ {0x8b7c, 0x42fe0002},
+ {0x8b80, 0x42007900},
+ {0x8b84, 0x00050001},
+ {0x8b88, 0x00077700},
+ {0x8b8c, 0x00045200},
+ {0x8b90, 0x000742fe},
+ {0x8b94, 0x00046000},
+ {0x8b98, 0x43804200},
+ {0x8b9c, 0x61006000},
+ {0x8ba0, 0x63106201},
+ {0x8ba4, 0x00056804},
+ {0x8ba8, 0x55004100},
+ {0x8bac, 0x5c020007},
+ {0x8bb0, 0x43000004},
+ {0x8bb4, 0x00050001},
+ {0x8bb8, 0xe3c96c06},
+ {0x8bbc, 0xe3b8e3db},
+ {0x8bc0, 0xe423e567},
+ {0x8bc4, 0xe43ce56f},
+ {0x8bc8, 0xe3b80001},
+ {0x8bcc, 0x6c060005},
+ {0x8bd0, 0xe5e6e3c9},
+ {0x8bd4, 0xe423e567},
+ {0x8bd8, 0xe43ce56f},
+ {0x8bdc, 0x00050001},
+ {0x8be0, 0xe3c96c00},
+ {0x8be4, 0xe3b8e3db},
+ {0x8be8, 0xe423e582},
+ {0x8bec, 0xe43ce58a},
+ {0x8bf0, 0xe3b80001},
+ {0x8bf4, 0x6c000005},
+ {0x8bf8, 0xe5e6e3c9},
+ {0x8bfc, 0xe423e582},
+ {0x8c00, 0xe43ce58a},
+ {0x8c04, 0x00050001},
+ {0x8c08, 0xe3c96c04},
+ {0x8c0c, 0xe3b8e3db},
+ {0x8c10, 0xe423e59d},
+ {0x8c14, 0xe43ce5a5},
+ {0x8c18, 0xe3b80001},
+ {0x8c1c, 0x6c040005},
+ {0x8c20, 0xe5e6e3c9},
+ {0x8c24, 0xe423e59d},
+ {0x8c28, 0xe43ce5a5},
+ {0x8c2c, 0x00050001},
+ {0x8c30, 0xe3c96c02},
+ {0x8c34, 0xe3b8e3db},
+ {0x8c38, 0xe423e5b8},
+ {0x8c3c, 0xe43ce5c0},
+ {0x8c40, 0xe3b80001},
+ {0x8c44, 0x6c020005},
+ {0x8c48, 0xe5e6e3c9},
+ {0x8c4c, 0xe423e5b8},
+ {0x8c50, 0xe43ce5c0},
+ {0x8c54, 0x00040001},
+ {0x8c58, 0x60084380},
+ {0x8c5c, 0x6200610a},
+ {0x8c60, 0x000663ce},
+ {0x8c64, 0x7f006080},
+ {0x8c68, 0x43000004},
+ {0x8c6c, 0x0001e618},
+ {0x8c70, 0x55000007},
+ {0x8c74, 0x74200004},
+ {0x8c78, 0x77117901},
+ {0x8c7c, 0x57005710},
+ {0x8c80, 0x7430140f},
+ {0x8c84, 0x43800004},
+ {0x8c88, 0x72000007},
+ {0x8c8c, 0x43000004},
+ {0x8c90, 0x00040001},
+ {0x8c94, 0x00057420},
+ {0x8c98, 0x7e067700},
+ {0x8c9c, 0x73807388},
+ {0x8ca0, 0x140f8f00},
+ {0x8ca4, 0x74300004},
+ {0x8ca8, 0x73000005},
+ {0x8cac, 0xe5d30001},
+ {0x8cb0, 0x73000005},
+ {0x8cb4, 0x00040001},
+ {0x8cb8, 0xb1034380},
+ {0x8cbc, 0x7cdb0006},
+ {0x8cc0, 0x00079103},
+ {0x8cc4, 0x000440db},
+ {0x8cc8, 0xe5d34300},
+ {0x8ccc, 0x73800005},
+ {0x8cd0, 0x5d010006},
+ {0x8cd4, 0x62006002},
+ {0x8cd8, 0x0005e5f7},
+ {0x8cdc, 0x00077300},
+ {0x8ce0, 0x75787608},
+ {0x8ce4, 0x43800004},
+ {0x8ce8, 0x5e010007},
+ {0x8cec, 0x140a5e00},
+ {0x8cf0, 0x63800006},
+ {0x8cf4, 0x00077f00},
+ {0x8cf8, 0x4e204c3f},
+ {0x8cfc, 0x73047280},
+ {0x8d00, 0x140a7300},
+ {0x8d04, 0x00044d20},
+ {0x8d08, 0x00064300},
+ {0x8d0c, 0x00077402},
+ {0x8d10, 0x40004001},
+ {0x8d14, 0x0006ab00},
+ {0x8d18, 0x00077404},
+ {0x8d1c, 0x40004001},
+ {0x8d20, 0x140aab00},
+ {0x8d24, 0x43800004},
+ {0x8d28, 0x52800007},
+ {0x8d2c, 0x140a5200},
+ {0x8d30, 0x4d004c00},
+ {0x8d34, 0x00064e00},
+ {0x8d38, 0x63006080},
+ {0x8d3c, 0x43000004},
+ {0x8d40, 0x76000007},
+ {0x8d44, 0x00040001},
+ {0x8d48, 0xb1034380},
+ {0x8d4c, 0x7cdb0006},
+ {0x8d50, 0x00079103},
+ {0x8d54, 0x000440db},
+ {0x8d58, 0xe5d34300},
+ {0x8d5c, 0xe5f77e03},
+ {0x8d60, 0x43800004},
+ {0x8d64, 0x0006b103},
+ {0x8d68, 0x91037c5b},
+ {0x8d6c, 0x405b0007},
+ {0x8d70, 0x43000004},
+ {0x8d74, 0x00010001},
+ {0x8d78, 0x43800004},
+ {0x8d7c, 0x4e200007},
+ {0x8d80, 0x63800006},
+ {0x8d84, 0x5f807cdb},
+ {0x8d88, 0x43000004},
+ {0x8d8c, 0x76080007},
+ {0x8d90, 0x00057560},
+ {0x8d94, 0x00047380},
+ {0x8d98, 0x0005420e},
+ {0x8d9c, 0x92006c01},
+ {0x8da0, 0x6c001432},
+ {0x8da4, 0x42000004},
+ {0x8da8, 0x43800004},
+ {0x8dac, 0x5f000006},
+ {0x8db0, 0x73010007},
+ {0x8db4, 0x00047300},
+ {0x8db8, 0x0007420f},
+ {0x8dbc, 0x52005280},
+ {0x8dc0, 0x0004140a},
+ {0x8dc4, 0x00064200},
+ {0x8dc8, 0x7c5b6300},
+ {0x8dcc, 0x4e000007},
+ {0x8dd0, 0x43000004},
+ {0x8dd4, 0x73000005},
+ {0x8dd8, 0x76000007},
+ {0x8ddc, 0xe5fb0001},
+ {0x8de0, 0x00040001},
+ {0x8de4, 0x60004380},
+ {0x8de8, 0x62016100},
+ {0x8dec, 0x00066310},
+ {0x8df0, 0x00046000},
+ {0x8df4, 0x00014300},
+ {0x8df8, 0x0001e618},
+ {0x8dfc, 0x4e004f02},
+ {0x8e00, 0x52015302},
+ {0x8e04, 0x140f0001},
+ {0x8e08, 0x00019700},
+ {0x8e0c, 0x8a084380},
+ {0x8e10, 0x7800aa09},
+ {0x8e14, 0x7a007900},
+ {0x8e18, 0x43007b40},
+ {0x8e1c, 0x65010001},
+ {0x8e20, 0x67013489},
+ {0x8e24, 0x43803489},
+ {0x8e28, 0xaa058a04},
+ {0x8e2c, 0x00014300},
+ {0x8e30, 0x34966500},
+ {0x8e34, 0x34966700},
+ {0x8e38, 0x8a084380},
+ {0x8e3c, 0x7c00aa09},
+ {0x8e40, 0x7e007d00},
+ {0x8e44, 0x43007f40},
+ {0x8e48, 0x64010001},
+ {0x8e4c, 0x6601349f},
+ {0x8e50, 0x4380349f},
+ {0x8e54, 0xaa058a04},
+ {0x8e58, 0x00014300},
+ {0x8e5c, 0x34ac6400},
+ {0x8e60, 0x34ac6600},
+ {0x8e64, 0x7b484380},
+ {0x8e68, 0x79007a90},
+ {0x8e6c, 0x43007802},
+ {0x8e70, 0x34c95503},
+ {0x8e74, 0x7b384380},
+ {0x8e78, 0x43007a80},
+ {0x8e7c, 0x34c95513},
+ {0x8e80, 0x7b404380},
+ {0x8e84, 0x43007a00},
+ {0x8e88, 0x74015523},
+ {0x8e8c, 0x8e007400},
+ {0x8e90, 0x00070001},
+ {0x8e94, 0x00045230},
+ {0x8e98, 0x74307431},
+ {0x8e9c, 0x00078e00},
+ {0x8ea0, 0x00045220},
+ {0x8ea4, 0x57020001},
+ {0x8ea8, 0x8e005700},
+ {0x8eac, 0x42ef0001},
+ {0x8eb0, 0x56005610},
+ {0x8eb4, 0x8c004200},
+ {0x8eb8, 0x5b500001},
+ {0x8ebc, 0x5b2034e0},
+ {0x8ec0, 0x4e004f78},
+ {0x8ec4, 0x52015388},
+ {0x8ec8, 0x4e004f78},
+ {0x8ecc, 0x52015388},
+ {0x8ed0, 0x5480e4f2},
+ {0x8ed4, 0x54815400},
+ {0x8ed8, 0x54825400},
+ {0x8edc, 0xe4fd5400},
+ {0x8ee0, 0x3010bf1d},
+ {0x8ee4, 0xe4bae4b2},
+ {0x8ee8, 0xe4d3e4c0},
+ {0x8eec, 0x5523e65b},
+ {0x8ef0, 0x5525e4c9},
+ {0x8ef4, 0xe65be4d3},
+ {0x8ef8, 0x54bf0001},
+ {0x8efc, 0x54a354c0},
+ {0x8f00, 0x54a454c1},
+ {0x8f04, 0xbf074c18},
+ {0x8f08, 0x54a454c2},
+ {0x8f0c, 0x54c1bf04},
+ {0x8f10, 0xbf0154a3},
+ {0x8f14, 0x54dfe66c},
+ {0x8f18, 0x54bf0001},
+ {0x8f1c, 0x050a54e5},
+ {0x8f20, 0x000154df},
+ {0x8f24, 0x7b801657},
+ {0x8f28, 0x43807430},
+ {0x8f2c, 0x7e007f40},
+ {0x8f30, 0x7c027d00},
+ {0x8f34, 0x5b404300},
+ {0x8f38, 0x5c015501},
+ {0x8f3c, 0x5480e4d7},
+ {0x8f40, 0x54815400},
+ {0x8f44, 0x54825400},
+ {0x8f48, 0x7b005400},
+ {0x8f4c, 0xe4fd7410},
+ {0x8f50, 0x3010bfe5},
+ {0x8f54, 0x56005610},
+ {0x8f58, 0x00018c00},
+ {0x8f5c, 0x57005704},
+ {0x8f60, 0x57088e00},
+ {0x8f64, 0x8e005700},
+ {0x8f68, 0x57805781},
+ {0x8f6c, 0x43808e00},
+ {0x8f70, 0x5c010007},
+ {0x8f74, 0x14035c00},
+ {0x8f78, 0x43000004},
+ {0x8f7c, 0x427f0001},
+ {0x8f80, 0x62800007},
+ {0x8f84, 0x92006200},
+ {0x8f88, 0x42000004},
+ {0x8f8c, 0x427f0001},
+ {0x8f90, 0x63940007},
+ {0x8f94, 0x92006314},
+ {0x8f98, 0x42000004},
+ {0x8f9c, 0x00040001},
+ {0x8fa0, 0x790142fe},
+ {0x8fa4, 0x74204200},
+ {0x8fa8, 0x5710140f},
+ {0x8fac, 0x141f5700},
+ {0x8fb0, 0x00040001},
+ {0x8fb4, 0x790142fe},
+ {0x8fb8, 0x74204200},
+ {0x8fbc, 0x42bf140f},
+ {0x8fc0, 0x62400007},
+ {0x8fc4, 0x141f6200},
+ {0x8fc8, 0x42000004},
+ {0x8fcc, 0x00060001},
+ {0x8fd0, 0x60035d06},
+ {0x8fd4, 0x62016104},
+ {0x8fd8, 0x73100005},
+ {0x8fdc, 0x00040001},
+ {0x8fe0, 0x00074380},
+ {0x8fe4, 0x5e005e01},
+ {0x8fe8, 0xb103140a},
+ {0x8fec, 0x7f070006},
+ {0x8ff0, 0x00079103},
+ {0x8ff4, 0x00064307},
+ {0x8ff8, 0x5d025c00},
+ {0x8ffc, 0x00045e03},
+ {0x9000, 0x00014300},
+ {0x9004, 0x5d040006},
+ {0x9008, 0x61046000},
+ {0x900c, 0x00056201},
+ {0x9010, 0x00017310},
+ {0x9014, 0x43800004},
+ {0x9018, 0x5e010007},
+ {0x901c, 0x140a5e00},
+ {0x9020, 0x0006b103},
+ {0x9024, 0x91037fc6},
+ {0x9028, 0x43c60007},
+ {0x902c, 0x5c000006},
+ {0x9030, 0x5e035d02},
+ {0x9034, 0x43000004},
+ {0x9038, 0x00060001},
+ {0x903c, 0x60005d04},
+ {0x9040, 0x62016104},
+ {0x9044, 0x73100005},
+ {0x9048, 0x00040001},
+ {0x904c, 0x00074380},
+ {0x9050, 0x5e005e01},
+ {0x9054, 0xb103140a},
+ {0x9058, 0x7fc60006},
+ {0x905c, 0x00079103},
+ {0x9060, 0x000643c6},
+ {0x9064, 0x5d025c00},
+ {0x9068, 0x00045e03},
+ {0x906c, 0x00014300},
+ {0x9070, 0x5d000006},
+ {0x9074, 0x61006002},
+ {0x9078, 0x00056201},
+ {0x907c, 0x00017300},
+ {0x9080, 0x43800004},
+ {0x9084, 0x5e010007},
+ {0x9088, 0x140a5e00},
+ {0x908c, 0x0006b103},
+ {0x9090, 0x91037fc0},
+ {0x9094, 0x43c00007},
+ {0x9098, 0x5c000006},
+ {0x909c, 0x5e035d02},
+ {0x90a0, 0x43000004},
+ {0x90a4, 0x00050001},
+ {0x90a8, 0x00047e02},
+ {0x90ac, 0x000542f7},
+ {0x90b0, 0x00046c08},
+ {0x90b4, 0x00054270},
+ {0x90b8, 0x73807381},
+ {0x90bc, 0x00049300},
+ {0x90c0, 0x000542f7},
+ {0x90c4, 0x00046c00},
+ {0x90c8, 0x00014200},
+ {0x90cc, 0x43800004},
+ {0x90d0, 0x73040007},
+ {0x90d4, 0x14057300},
+ {0x90d8, 0x00047240},
+ {0x90dc, 0x00064300},
+ {0x90e0, 0x00077404},
+ {0x90e4, 0x40004001},
+ {0x90e8, 0x140fab00},
+ {0x90ec, 0xe64f0001},
+ {0x90f0, 0xe656e5fb},
+ {0x90f4, 0x00040001},
+ {0x90f8, 0x00047410},
+ {0x90fc, 0x42f04380},
+ {0x9100, 0x62080007},
+ {0x9104, 0x24206301},
+ {0x9108, 0x14c80000},
+ {0x910c, 0x00002428},
+ {0x9110, 0x1a4215f4},
+ {0x9114, 0x6300000b},
+ {0x9118, 0x42000004},
+ {0x911c, 0x74304300},
+ {0x9120, 0x4380140f},
+ {0x9124, 0x73080007},
+ {0x9128, 0x00047300},
+ {0x912c, 0x00014300},
+ {0x9130, 0x4bf00007},
+ {0x9134, 0x490b4a8f},
+ {0x9138, 0x4a8e48f1},
+ {0x913c, 0x48a5490a},
+ {0x9140, 0x49094a8d},
+ {0x9144, 0x4a8c487d},
+ {0x9148, 0x48754908},
+ {0x914c, 0x49074a8b},
+ {0x9150, 0x4a8a4889},
+ {0x9154, 0x48b74906},
+ {0x9158, 0x49054a89},
+ {0x915c, 0x4a8848fc},
+ {0x9160, 0x48564905},
+ {0x9164, 0x49044a87},
+ {0x9168, 0x4a8648c1},
+ {0x916c, 0x483d4904},
+ {0x9170, 0x49034a85},
+ {0x9174, 0x4a8448c7},
+ {0x9178, 0x485e4903},
+ {0x917c, 0x49024a83},
+ {0x9180, 0x4a8248ac},
+ {0x9184, 0x48624902},
+ {0x9188, 0x49024a81},
+ {0x918c, 0x4a804820},
+ {0x9190, 0x48004900},
+ {0x9194, 0x49014a90},
+ {0x9198, 0x4a10481f},
+ {0x919c, 0x00060001},
+ {0x91a0, 0x5f005f80},
+ {0x91a4, 0x00059900},
+ {0x91a8, 0x00017300},
+ {0x91ac, 0x63800006},
+ {0x91b0, 0x98006300},
+ {0x91b4, 0x549f0001},
+ {0x91b8, 0x5c015400},
+ {0x91bc, 0x540054df},
+ {0x91c0, 0x00015c02},
+ {0x91c4, 0x07145c01},
+ {0x91c8, 0x5c025400},
+ {0x91cc, 0x5c020001},
+ {0x91d0, 0x54000714},
+ {0x91d4, 0x00015c01},
+ {0x91d8, 0x4c184c98},
+ {0x91dc, 0x00080001},
+ {0x91e0, 0x5c020004},
+ {0x91e4, 0x09017430},
+ {0x91e8, 0x0ba60c01},
+ {0x91ec, 0x77800005},
+ {0x91f0, 0x52200007},
+ {0x91f4, 0x43800004},
+ {0x91f8, 0x610a6008},
+ {0x91fc, 0x63c26200},
+ {0x9200, 0x5c000007},
+ {0x9204, 0x43000004},
+ {0x9208, 0x00000001},
+ {0x8080, 0x00000004},
+ {0x8080, 0x00000000},
+ {0x8088, 0x00000000},
+};
+
+static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = {
+ { 0, 0, 0, 0, 4, 0x50505050, },
+ { 0, 0, 1, 0, 4, 0x50505050, },
+ { 0, 0, 1, 4, 4, 0x484c5050, },
+ { 0, 0, 2, 0, 4, 0x50505050, },
+ { 0, 0, 2, 4, 4, 0x44484c50, },
+ { 0, 0, 2, 8, 4, 0x34383c40, },
+ { 0, 0, 3, 0, 4, 0x50505050, },
+ { 0, 1, 2, 0, 4, 0x50505050, },
+ { 0, 1, 2, 4, 4, 0x44484c50, },
+ { 0, 1, 2, 8, 4, 0x34383c40, },
+ { 0, 1, 3, 0, 4, 0x50505050, },
+ { 0, 0, 4, 1, 4, 0x00000000, },
+ { 0, 0, 4, 0, 1, 0x00000000, },
+ { 1, 0, 1, 0, 4, 0x48484848, },
+ { 1, 0, 1, 4, 4, 0x40444848, },
+ { 1, 0, 2, 0, 4, 0x48484848, },
+ { 1, 0, 2, 4, 4, 0x3c404448, },
+ { 1, 0, 2, 8, 4, 0x2c303438, },
+ { 1, 0, 3, 0, 4, 0x48484848, },
+ { 1, 1, 2, 0, 4, 0x48484848, },
+ { 1, 1, 2, 4, 4, 0x3c404448, },
+ { 1, 1, 2, 8, 4, 0x2c303438, },
+ { 1, 1, 3, 0, 4, 0x48484848, },
+ { 1, 0, 4, 0, 4, 0x00000000, },
+ { 2, 0, 1, 0, 4, 0x40404040, },
+ { 2, 0, 1, 4, 4, 0x383c4040, },
+ { 2, 0, 2, 0, 4, 0x40404040, },
+ { 2, 0, 2, 4, 4, 0x34383c40, },
+ { 2, 0, 2, 8, 4, 0x24282c30, },
+ { 2, 0, 3, 0, 4, 0x40404040, },
+ { 2, 1, 2, 0, 4, 0x40404040, },
+ { 2, 1, 2, 4, 4, 0x34383c40, },
+ { 2, 1, 2, 8, 4, 0x24282c30, },
+ { 2, 1, 3, 0, 4, 0x40404040, },
+ { 2, 0, 4, 0, 4, 0x00000000, },
+};
+
+static const s8 _txpwr_track_delta_swingidx_6gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
+ 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+};
+
+static const s8 _txpwr_track_delta_swingidx_6ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10},
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6},
+ {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3},
+ {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8},
+};
+
+static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14},
+};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static const s8 _txpwr_track_delta_swingidx_2gb_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2,
+ -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3
+};
+
+static const s8 _txpwr_track_delta_swingidx_2ga_p[] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2,
+ -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3
+};
+
+static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
+};
+
+const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM] = {
+ [0][0][RTW89_ACMA] = 0,
+ [0][0][RTW89_CN] = 0,
+ [0][0][RTW89_ETSI] = 0,
+ [0][0][RTW89_FCC] = 1,
+ [0][0][RTW89_IC] = 1,
+ [0][0][RTW89_KCC] = 0,
+ [0][0][RTW89_MKK] = 0,
+ [0][0][RTW89_UK] = 0,
+ [0][1][RTW89_ACMA] = 0,
+ [0][1][RTW89_CN] = 0,
+ [0][1][RTW89_ETSI] = 0,
+ [0][1][RTW89_FCC] = 3,
+ [0][1][RTW89_IC] = 3,
+ [0][1][RTW89_KCC] = 0,
+ [0][1][RTW89_MKK] = 0,
+ [0][1][RTW89_UK] = 0,
+ [1][1][RTW89_ACMA] = 0,
+ [1][1][RTW89_CN] = 0,
+ [1][1][RTW89_ETSI] = 0,
+ [1][1][RTW89_FCC] = 3,
+ [1][1][RTW89_IC] = 3,
+ [1][1][RTW89_KCC] = 0,
+ [1][1][RTW89_MKK] = 0,
+ [1][1][RTW89_UK] = 0,
+ [2][1][RTW89_ETSI] = 0,
+ [2][1][RTW89_FCC] = 0,
+ [2][1][RTW89_KCC] = 0,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][0][0][RTW89_WW][0] = 58,
+ [0][0][0][0][RTW89_WW][1] = 58,
+ [0][0][0][0][RTW89_WW][2] = 58,
+ [0][0][0][0][RTW89_WW][3] = 58,
+ [0][0][0][0][RTW89_WW][4] = 58,
+ [0][0][0][0][RTW89_WW][5] = 58,
+ [0][0][0][0][RTW89_WW][6] = 58,
+ [0][0][0][0][RTW89_WW][7] = 58,
+ [0][0][0][0][RTW89_WW][8] = 58,
+ [0][0][0][0][RTW89_WW][9] = 58,
+ [0][0][0][0][RTW89_WW][10] = 58,
+ [0][0][0][0][RTW89_WW][11] = 58,
+ [0][0][0][0][RTW89_WW][12] = 46,
+ [0][0][0][0][RTW89_WW][13] = 72,
+ [0][1][0][0][RTW89_WW][0] = 42,
+ [0][1][0][0][RTW89_WW][1] = 42,
+ [0][1][0][0][RTW89_WW][2] = 42,
+ [0][1][0][0][RTW89_WW][3] = 42,
+ [0][1][0][0][RTW89_WW][4] = 42,
+ [0][1][0][0][RTW89_WW][5] = 42,
+ [0][1][0][0][RTW89_WW][6] = 42,
+ [0][1][0][0][RTW89_WW][7] = 42,
+ [0][1][0][0][RTW89_WW][8] = 42,
+ [0][1][0][0][RTW89_WW][9] = 42,
+ [0][1][0][0][RTW89_WW][10] = 42,
+ [0][1][0][0][RTW89_WW][11] = 42,
+ [0][1][0][0][RTW89_WW][12] = 18,
+ [0][1][0][0][RTW89_WW][13] = 60,
+ [1][0][0][0][RTW89_WW][0] = 0,
+ [1][0][0][0][RTW89_WW][1] = 0,
+ [1][0][0][0][RTW89_WW][2] = 44,
+ [1][0][0][0][RTW89_WW][3] = 58,
+ [1][0][0][0][RTW89_WW][4] = 58,
+ [1][0][0][0][RTW89_WW][5] = 58,
+ [1][0][0][0][RTW89_WW][6] = 46,
+ [1][0][0][0][RTW89_WW][7] = 46,
+ [1][0][0][0][RTW89_WW][8] = 28,
+ [1][0][0][0][RTW89_WW][9] = 26,
+ [1][0][0][0][RTW89_WW][10] = 26,
+ [1][0][0][0][RTW89_WW][11] = 0,
+ [1][0][0][0][RTW89_WW][12] = 0,
+ [1][0][0][0][RTW89_WW][13] = 0,
+ [1][1][0][0][RTW89_WW][0] = 0,
+ [1][1][0][0][RTW89_WW][1] = 0,
+ [1][1][0][0][RTW89_WW][2] = 46,
+ [1][1][0][0][RTW89_WW][3] = 46,
+ [1][1][0][0][RTW89_WW][4] = 46,
+ [1][1][0][0][RTW89_WW][5] = 46,
+ [1][1][0][0][RTW89_WW][6] = 40,
+ [1][1][0][0][RTW89_WW][7] = 40,
+ [1][1][0][0][RTW89_WW][8] = 14,
+ [1][1][0][0][RTW89_WW][9] = 14,
+ [1][1][0][0][RTW89_WW][10] = 12,
+ [1][1][0][0][RTW89_WW][11] = 0,
+ [1][1][0][0][RTW89_WW][12] = 0,
+ [1][1][0][0][RTW89_WW][13] = 0,
+ [0][0][1][0][RTW89_WW][0] = 58,
+ [0][0][1][0][RTW89_WW][1] = 58,
+ [0][0][1][0][RTW89_WW][2] = 58,
+ [0][0][1][0][RTW89_WW][3] = 58,
+ [0][0][1][0][RTW89_WW][4] = 58,
+ [0][0][1][0][RTW89_WW][5] = 58,
+ [0][0][1][0][RTW89_WW][6] = 58,
+ [0][0][1][0][RTW89_WW][7] = 58,
+ [0][0][1][0][RTW89_WW][8] = 58,
+ [0][0][1][0][RTW89_WW][9] = 58,
+ [0][0][1][0][RTW89_WW][10] = 58,
+ [0][0][1][0][RTW89_WW][11] = 58,
+ [0][0][1][0][RTW89_WW][12] = 58,
+ [0][0][1][0][RTW89_WW][13] = 0,
+ [0][1][1][0][RTW89_WW][0] = 46,
+ [0][1][1][0][RTW89_WW][1] = 46,
+ [0][1][1][0][RTW89_WW][2] = 46,
+ [0][1][1][0][RTW89_WW][3] = 46,
+ [0][1][1][0][RTW89_WW][4] = 46,
+ [0][1][1][0][RTW89_WW][5] = 46,
+ [0][1][1][0][RTW89_WW][6] = 46,
+ [0][1][1][0][RTW89_WW][7] = 46,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][9] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][11] = 46,
+ [0][1][1][0][RTW89_WW][12] = 36,
+ [0][1][1][0][RTW89_WW][13] = 0,
+ [0][0][2][0][RTW89_WW][0] = 58,
+ [0][0][2][0][RTW89_WW][1] = 58,
+ [0][0][2][0][RTW89_WW][2] = 58,
+ [0][0][2][0][RTW89_WW][3] = 58,
+ [0][0][2][0][RTW89_WW][4] = 58,
+ [0][0][2][0][RTW89_WW][5] = 58,
+ [0][0][2][0][RTW89_WW][6] = 58,
+ [0][0][2][0][RTW89_WW][7] = 58,
+ [0][0][2][0][RTW89_WW][8] = 58,
+ [0][0][2][0][RTW89_WW][9] = 58,
+ [0][0][2][0][RTW89_WW][10] = 58,
+ [0][0][2][0][RTW89_WW][11] = 58,
+ [0][0][2][0][RTW89_WW][12] = 38,
+ [0][0][2][0][RTW89_WW][13] = 0,
+ [0][1][2][0][RTW89_WW][0] = 46,
+ [0][1][2][0][RTW89_WW][1] = 46,
+ [0][1][2][0][RTW89_WW][2] = 46,
+ [0][1][2][0][RTW89_WW][3] = 46,
+ [0][1][2][0][RTW89_WW][4] = 46,
+ [0][1][2][0][RTW89_WW][5] = 46,
+ [0][1][2][0][RTW89_WW][6] = 46,
+ [0][1][2][0][RTW89_WW][7] = 46,
+ [0][1][2][0][RTW89_WW][8] = 46,
+ [0][1][2][0][RTW89_WW][9] = 46,
+ [0][1][2][0][RTW89_WW][10] = 46,
+ [0][1][2][0][RTW89_WW][11] = 46,
+ [0][1][2][0][RTW89_WW][12] = 16,
+ [0][1][2][0][RTW89_WW][13] = 0,
+ [0][1][2][1][RTW89_WW][0] = 36,
+ [0][1][2][1][RTW89_WW][1] = 34,
+ [0][1][2][1][RTW89_WW][2] = 34,
+ [0][1][2][1][RTW89_WW][3] = 34,
+ [0][1][2][1][RTW89_WW][4] = 34,
+ [0][1][2][1][RTW89_WW][5] = 34,
+ [0][1][2][1][RTW89_WW][6] = 34,
+ [0][1][2][1][RTW89_WW][7] = 34,
+ [0][1][2][1][RTW89_WW][8] = 34,
+ [0][1][2][1][RTW89_WW][9] = 34,
+ [0][1][2][1][RTW89_WW][10] = 34,
+ [0][1][2][1][RTW89_WW][11] = 34,
+ [0][1][2][1][RTW89_WW][12] = 16,
+ [0][1][2][1][RTW89_WW][13] = 0,
+ [1][0][2][0][RTW89_WW][0] = 0,
+ [1][0][2][0][RTW89_WW][1] = 0,
+ [1][0][2][0][RTW89_WW][2] = 58,
+ [1][0][2][0][RTW89_WW][3] = 58,
+ [1][0][2][0][RTW89_WW][4] = 58,
+ [1][0][2][0][RTW89_WW][5] = 58,
+ [1][0][2][0][RTW89_WW][6] = 58,
+ [1][0][2][0][RTW89_WW][7] = 58,
+ [1][0][2][0][RTW89_WW][8] = 58,
+ [1][0][2][0][RTW89_WW][9] = 58,
+ [1][0][2][0][RTW89_WW][10] = 56,
+ [1][0][2][0][RTW89_WW][11] = 0,
+ [1][0][2][0][RTW89_WW][12] = 0,
+ [1][0][2][0][RTW89_WW][13] = 0,
+ [1][1][2][0][RTW89_WW][0] = 0,
+ [1][1][2][0][RTW89_WW][1] = 0,
+ [1][1][2][0][RTW89_WW][2] = 34,
+ [1][1][2][0][RTW89_WW][3] = 34,
+ [1][1][2][0][RTW89_WW][4] = 34,
+ [1][1][2][0][RTW89_WW][5] = 34,
+ [1][1][2][0][RTW89_WW][6] = 34,
+ [1][1][2][0][RTW89_WW][7] = 34,
+ [1][1][2][0][RTW89_WW][8] = 34,
+ [1][1][2][0][RTW89_WW][9] = 34,
+ [1][1][2][0][RTW89_WW][10] = 34,
+ [1][1][2][0][RTW89_WW][11] = 0,
+ [1][1][2][0][RTW89_WW][12] = 0,
+ [1][1][2][0][RTW89_WW][13] = 0,
+ [1][1][2][1][RTW89_WW][0] = 0,
+ [1][1][2][1][RTW89_WW][1] = 0,
+ [1][1][2][1][RTW89_WW][2] = 34,
+ [1][1][2][1][RTW89_WW][3] = 34,
+ [1][1][2][1][RTW89_WW][4] = 34,
+ [1][1][2][1][RTW89_WW][5] = 34,
+ [1][1][2][1][RTW89_WW][6] = 34,
+ [1][1][2][1][RTW89_WW][7] = 34,
+ [1][1][2][1][RTW89_WW][8] = 34,
+ [1][1][2][1][RTW89_WW][9] = 34,
+ [1][1][2][1][RTW89_WW][10] = 36,
+ [1][1][2][1][RTW89_WW][11] = 0,
+ [1][1][2][1][RTW89_WW][12] = 0,
+ [1][1][2][1][RTW89_WW][13] = 0,
+ [0][0][0][0][RTW89_FCC][0] = 76,
+ [0][0][0][0][RTW89_ETSI][0] = 60,
+ [0][0][0][0][RTW89_MKK][0] = 68,
+ [0][0][0][0][RTW89_IC][0] = 76,
+ [0][0][0][0][RTW89_KCC][0] = 68,
+ [0][0][0][0][RTW89_ACMA][0] = 60,
+ [0][0][0][0][RTW89_CN][0] = 58,
+ [0][0][0][0][RTW89_UK][0] = 60,
+ [0][0][0][0][RTW89_FCC][1] = 76,
+ [0][0][0][0][RTW89_ETSI][1] = 60,
+ [0][0][0][0][RTW89_MKK][1] = 68,
+ [0][0][0][0][RTW89_IC][1] = 76,
+ [0][0][0][0][RTW89_KCC][1] = 68,
+ [0][0][0][0][RTW89_ACMA][1] = 60,
+ [0][0][0][0][RTW89_CN][1] = 58,
+ [0][0][0][0][RTW89_UK][1] = 60,
+ [0][0][0][0][RTW89_FCC][2] = 76,
+ [0][0][0][0][RTW89_ETSI][2] = 60,
+ [0][0][0][0][RTW89_MKK][2] = 68,
+ [0][0][0][0][RTW89_IC][2] = 76,
+ [0][0][0][0][RTW89_KCC][2] = 68,
+ [0][0][0][0][RTW89_ACMA][2] = 60,
+ [0][0][0][0][RTW89_CN][2] = 58,
+ [0][0][0][0][RTW89_UK][2] = 60,
+ [0][0][0][0][RTW89_FCC][3] = 76,
+ [0][0][0][0][RTW89_ETSI][3] = 60,
+ [0][0][0][0][RTW89_MKK][3] = 68,
+ [0][0][0][0][RTW89_IC][3] = 76,
+ [0][0][0][0][RTW89_KCC][3] = 68,
+ [0][0][0][0][RTW89_ACMA][3] = 60,
+ [0][0][0][0][RTW89_CN][3] = 58,
+ [0][0][0][0][RTW89_UK][3] = 60,
+ [0][0][0][0][RTW89_FCC][4] = 76,
+ [0][0][0][0][RTW89_ETSI][4] = 60,
+ [0][0][0][0][RTW89_MKK][4] = 68,
+ [0][0][0][0][RTW89_IC][4] = 76,
+ [0][0][0][0][RTW89_KCC][4] = 68,
+ [0][0][0][0][RTW89_ACMA][4] = 60,
+ [0][0][0][0][RTW89_CN][4] = 58,
+ [0][0][0][0][RTW89_UK][4] = 60,
+ [0][0][0][0][RTW89_FCC][5] = 76,
+ [0][0][0][0][RTW89_ETSI][5] = 60,
+ [0][0][0][0][RTW89_MKK][5] = 68,
+ [0][0][0][0][RTW89_IC][5] = 76,
+ [0][0][0][0][RTW89_KCC][5] = 68,
+ [0][0][0][0][RTW89_ACMA][5] = 60,
+ [0][0][0][0][RTW89_CN][5] = 58,
+ [0][0][0][0][RTW89_UK][5] = 60,
+ [0][0][0][0][RTW89_FCC][6] = 76,
+ [0][0][0][0][RTW89_ETSI][6] = 60,
+ [0][0][0][0][RTW89_MKK][6] = 68,
+ [0][0][0][0][RTW89_IC][6] = 76,
+ [0][0][0][0][RTW89_KCC][6] = 68,
+ [0][0][0][0][RTW89_ACMA][6] = 60,
+ [0][0][0][0][RTW89_CN][6] = 58,
+ [0][0][0][0][RTW89_UK][6] = 60,
+ [0][0][0][0][RTW89_FCC][7] = 76,
+ [0][0][0][0][RTW89_ETSI][7] = 60,
+ [0][0][0][0][RTW89_MKK][7] = 68,
+ [0][0][0][0][RTW89_IC][7] = 76,
+ [0][0][0][0][RTW89_KCC][7] = 68,
+ [0][0][0][0][RTW89_ACMA][7] = 60,
+ [0][0][0][0][RTW89_CN][7] = 58,
+ [0][0][0][0][RTW89_UK][7] = 60,
+ [0][0][0][0][RTW89_FCC][8] = 76,
+ [0][0][0][0][RTW89_ETSI][8] = 60,
+ [0][0][0][0][RTW89_MKK][8] = 68,
+ [0][0][0][0][RTW89_IC][8] = 76,
+ [0][0][0][0][RTW89_KCC][8] = 68,
+ [0][0][0][0][RTW89_ACMA][8] = 60,
+ [0][0][0][0][RTW89_CN][8] = 58,
+ [0][0][0][0][RTW89_UK][8] = 60,
+ [0][0][0][0][RTW89_FCC][9] = 76,
+ [0][0][0][0][RTW89_ETSI][9] = 60,
+ [0][0][0][0][RTW89_MKK][9] = 68,
+ [0][0][0][0][RTW89_IC][9] = 76,
+ [0][0][0][0][RTW89_KCC][9] = 70,
+ [0][0][0][0][RTW89_ACMA][9] = 60,
+ [0][0][0][0][RTW89_CN][9] = 58,
+ [0][0][0][0][RTW89_UK][9] = 60,
+ [0][0][0][0][RTW89_FCC][10] = 76,
+ [0][0][0][0][RTW89_ETSI][10] = 60,
+ [0][0][0][0][RTW89_MKK][10] = 68,
+ [0][0][0][0][RTW89_IC][10] = 76,
+ [0][0][0][0][RTW89_KCC][10] = 70,
+ [0][0][0][0][RTW89_ACMA][10] = 60,
+ [0][0][0][0][RTW89_CN][10] = 58,
+ [0][0][0][0][RTW89_UK][10] = 60,
+ [0][0][0][0][RTW89_FCC][11] = 58,
+ [0][0][0][0][RTW89_ETSI][11] = 60,
+ [0][0][0][0][RTW89_MKK][11] = 68,
+ [0][0][0][0][RTW89_IC][11] = 58,
+ [0][0][0][0][RTW89_KCC][11] = 70,
+ [0][0][0][0][RTW89_ACMA][11] = 60,
+ [0][0][0][0][RTW89_CN][11] = 58,
+ [0][0][0][0][RTW89_UK][11] = 60,
+ [0][0][0][0][RTW89_FCC][12] = 46,
+ [0][0][0][0][RTW89_ETSI][12] = 60,
+ [0][0][0][0][RTW89_MKK][12] = 68,
+ [0][0][0][0][RTW89_IC][12] = 46,
+ [0][0][0][0][RTW89_KCC][12] = 70,
+ [0][0][0][0][RTW89_ACMA][12] = 60,
+ [0][0][0][0][RTW89_CN][12] = 58,
+ [0][0][0][0][RTW89_UK][12] = 60,
+ [0][0][0][0][RTW89_FCC][13] = 127,
+ [0][0][0][0][RTW89_ETSI][13] = 127,
+ [0][0][0][0][RTW89_MKK][13] = 72,
+ [0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_KCC][13] = 127,
+ [0][0][0][0][RTW89_ACMA][13] = 127,
+ [0][0][0][0][RTW89_CN][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 76,
+ [0][1][0][0][RTW89_ETSI][0] = 48,
+ [0][1][0][0][RTW89_MKK][0] = 58,
+ [0][1][0][0][RTW89_IC][0] = 76,
+ [0][1][0][0][RTW89_KCC][0] = 56,
+ [0][1][0][0][RTW89_ACMA][0] = 48,
+ [0][1][0][0][RTW89_CN][0] = 42,
+ [0][1][0][0][RTW89_UK][0] = 48,
+ [0][1][0][0][RTW89_FCC][1] = 76,
+ [0][1][0][0][RTW89_ETSI][1] = 48,
+ [0][1][0][0][RTW89_MKK][1] = 58,
+ [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_KCC][1] = 56,
+ [0][1][0][0][RTW89_ACMA][1] = 48,
+ [0][1][0][0][RTW89_CN][1] = 42,
+ [0][1][0][0][RTW89_UK][1] = 48,
+ [0][1][0][0][RTW89_FCC][2] = 76,
+ [0][1][0][0][RTW89_ETSI][2] = 48,
+ [0][1][0][0][RTW89_MKK][2] = 58,
+ [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_KCC][2] = 56,
+ [0][1][0][0][RTW89_ACMA][2] = 48,
+ [0][1][0][0][RTW89_CN][2] = 42,
+ [0][1][0][0][RTW89_UK][2] = 48,
+ [0][1][0][0][RTW89_FCC][3] = 76,
+ [0][1][0][0][RTW89_ETSI][3] = 48,
+ [0][1][0][0][RTW89_MKK][3] = 58,
+ [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_KCC][3] = 56,
+ [0][1][0][0][RTW89_ACMA][3] = 48,
+ [0][1][0][0][RTW89_CN][3] = 42,
+ [0][1][0][0][RTW89_UK][3] = 48,
+ [0][1][0][0][RTW89_FCC][4] = 76,
+ [0][1][0][0][RTW89_ETSI][4] = 48,
+ [0][1][0][0][RTW89_MKK][4] = 58,
+ [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_KCC][4] = 56,
+ [0][1][0][0][RTW89_ACMA][4] = 48,
+ [0][1][0][0][RTW89_CN][4] = 42,
+ [0][1][0][0][RTW89_UK][4] = 48,
+ [0][1][0][0][RTW89_FCC][5] = 76,
+ [0][1][0][0][RTW89_ETSI][5] = 48,
+ [0][1][0][0][RTW89_MKK][5] = 58,
+ [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_KCC][5] = 56,
+ [0][1][0][0][RTW89_ACMA][5] = 48,
+ [0][1][0][0][RTW89_CN][5] = 42,
+ [0][1][0][0][RTW89_UK][5] = 48,
+ [0][1][0][0][RTW89_FCC][6] = 76,
+ [0][1][0][0][RTW89_ETSI][6] = 48,
+ [0][1][0][0][RTW89_MKK][6] = 58,
+ [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_KCC][6] = 56,
+ [0][1][0][0][RTW89_ACMA][6] = 48,
+ [0][1][0][0][RTW89_CN][6] = 42,
+ [0][1][0][0][RTW89_UK][6] = 48,
+ [0][1][0][0][RTW89_FCC][7] = 76,
+ [0][1][0][0][RTW89_ETSI][7] = 48,
+ [0][1][0][0][RTW89_MKK][7] = 58,
+ [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_KCC][7] = 56,
+ [0][1][0][0][RTW89_ACMA][7] = 48,
+ [0][1][0][0][RTW89_CN][7] = 42,
+ [0][1][0][0][RTW89_UK][7] = 48,
+ [0][1][0][0][RTW89_FCC][8] = 76,
+ [0][1][0][0][RTW89_ETSI][8] = 48,
+ [0][1][0][0][RTW89_MKK][8] = 58,
+ [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_KCC][8] = 56,
+ [0][1][0][0][RTW89_ACMA][8] = 48,
+ [0][1][0][0][RTW89_CN][8] = 42,
+ [0][1][0][0][RTW89_UK][8] = 48,
+ [0][1][0][0][RTW89_FCC][9] = 70,
+ [0][1][0][0][RTW89_ETSI][9] = 48,
+ [0][1][0][0][RTW89_MKK][9] = 58,
+ [0][1][0][0][RTW89_IC][9] = 70,
+ [0][1][0][0][RTW89_KCC][9] = 56,
+ [0][1][0][0][RTW89_ACMA][9] = 48,
+ [0][1][0][0][RTW89_CN][9] = 42,
+ [0][1][0][0][RTW89_UK][9] = 48,
+ [0][1][0][0][RTW89_FCC][10] = 72,
+ [0][1][0][0][RTW89_ETSI][10] = 48,
+ [0][1][0][0][RTW89_MKK][10] = 58,
+ [0][1][0][0][RTW89_IC][10] = 72,
+ [0][1][0][0][RTW89_KCC][10] = 56,
+ [0][1][0][0][RTW89_ACMA][10] = 48,
+ [0][1][0][0][RTW89_CN][10] = 42,
+ [0][1][0][0][RTW89_UK][10] = 48,
+ [0][1][0][0][RTW89_FCC][11] = 44,
+ [0][1][0][0][RTW89_ETSI][11] = 48,
+ [0][1][0][0][RTW89_MKK][11] = 58,
+ [0][1][0][0][RTW89_IC][11] = 44,
+ [0][1][0][0][RTW89_KCC][11] = 56,
+ [0][1][0][0][RTW89_ACMA][11] = 48,
+ [0][1][0][0][RTW89_CN][11] = 42,
+ [0][1][0][0][RTW89_UK][11] = 48,
+ [0][1][0][0][RTW89_FCC][12] = 18,
+ [0][1][0][0][RTW89_ETSI][12] = 48,
+ [0][1][0][0][RTW89_MKK][12] = 58,
+ [0][1][0][0][RTW89_IC][12] = 18,
+ [0][1][0][0][RTW89_KCC][12] = 56,
+ [0][1][0][0][RTW89_ACMA][12] = 48,
+ [0][1][0][0][RTW89_CN][12] = 42,
+ [0][1][0][0][RTW89_UK][12] = 48,
+ [0][1][0][0][RTW89_FCC][13] = 127,
+ [0][1][0][0][RTW89_ETSI][13] = 127,
+ [0][1][0][0][RTW89_MKK][13] = 60,
+ [0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_KCC][13] = 127,
+ [0][1][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_CN][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
+ [1][0][0][0][RTW89_FCC][0] = 127,
+ [1][0][0][0][RTW89_ETSI][0] = 127,
+ [1][0][0][0][RTW89_MKK][0] = 127,
+ [1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_KCC][0] = 127,
+ [1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_CN][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
+ [1][0][0][0][RTW89_FCC][1] = 127,
+ [1][0][0][0][RTW89_ETSI][1] = 127,
+ [1][0][0][0][RTW89_MKK][1] = 127,
+ [1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_KCC][1] = 127,
+ [1][0][0][0][RTW89_ACMA][1] = 127,
+ [1][0][0][0][RTW89_CN][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 44,
+ [1][0][0][0][RTW89_ETSI][2] = 60,
+ [1][0][0][0][RTW89_MKK][2] = 66,
+ [1][0][0][0][RTW89_IC][2] = 44,
+ [1][0][0][0][RTW89_KCC][2] = 68,
+ [1][0][0][0][RTW89_ACMA][2] = 60,
+ [1][0][0][0][RTW89_CN][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 60,
+ [1][0][0][0][RTW89_FCC][3] = 60,
+ [1][0][0][0][RTW89_ETSI][3] = 60,
+ [1][0][0][0][RTW89_MKK][3] = 66,
+ [1][0][0][0][RTW89_IC][3] = 60,
+ [1][0][0][0][RTW89_KCC][3] = 68,
+ [1][0][0][0][RTW89_ACMA][3] = 60,
+ [1][0][0][0][RTW89_CN][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 60,
+ [1][0][0][0][RTW89_FCC][4] = 60,
+ [1][0][0][0][RTW89_ETSI][4] = 60,
+ [1][0][0][0][RTW89_MKK][4] = 66,
+ [1][0][0][0][RTW89_IC][4] = 60,
+ [1][0][0][0][RTW89_KCC][4] = 68,
+ [1][0][0][0][RTW89_ACMA][4] = 60,
+ [1][0][0][0][RTW89_CN][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 60,
+ [1][0][0][0][RTW89_FCC][5] = 62,
+ [1][0][0][0][RTW89_ETSI][5] = 60,
+ [1][0][0][0][RTW89_MKK][5] = 66,
+ [1][0][0][0][RTW89_IC][5] = 62,
+ [1][0][0][0][RTW89_KCC][5] = 68,
+ [1][0][0][0][RTW89_ACMA][5] = 60,
+ [1][0][0][0][RTW89_CN][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 60,
+ [1][0][0][0][RTW89_FCC][6] = 46,
+ [1][0][0][0][RTW89_ETSI][6] = 60,
+ [1][0][0][0][RTW89_MKK][6] = 66,
+ [1][0][0][0][RTW89_IC][6] = 46,
+ [1][0][0][0][RTW89_KCC][6] = 68,
+ [1][0][0][0][RTW89_ACMA][6] = 60,
+ [1][0][0][0][RTW89_CN][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 60,
+ [1][0][0][0][RTW89_FCC][7] = 46,
+ [1][0][0][0][RTW89_ETSI][7] = 60,
+ [1][0][0][0][RTW89_MKK][7] = 66,
+ [1][0][0][0][RTW89_IC][7] = 46,
+ [1][0][0][0][RTW89_KCC][7] = 68,
+ [1][0][0][0][RTW89_ACMA][7] = 60,
+ [1][0][0][0][RTW89_CN][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 60,
+ [1][0][0][0][RTW89_FCC][8] = 28,
+ [1][0][0][0][RTW89_ETSI][8] = 60,
+ [1][0][0][0][RTW89_MKK][8] = 66,
+ [1][0][0][0][RTW89_IC][8] = 28,
+ [1][0][0][0][RTW89_KCC][8] = 70,
+ [1][0][0][0][RTW89_ACMA][8] = 60,
+ [1][0][0][0][RTW89_CN][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 60,
+ [1][0][0][0][RTW89_FCC][9] = 26,
+ [1][0][0][0][RTW89_ETSI][9] = 60,
+ [1][0][0][0][RTW89_MKK][9] = 66,
+ [1][0][0][0][RTW89_IC][9] = 26,
+ [1][0][0][0][RTW89_KCC][9] = 70,
+ [1][0][0][0][RTW89_ACMA][9] = 60,
+ [1][0][0][0][RTW89_CN][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 60,
+ [1][0][0][0][RTW89_FCC][10] = 26,
+ [1][0][0][0][RTW89_ETSI][10] = 60,
+ [1][0][0][0][RTW89_MKK][10] = 66,
+ [1][0][0][0][RTW89_IC][10] = 26,
+ [1][0][0][0][RTW89_KCC][10] = 70,
+ [1][0][0][0][RTW89_ACMA][10] = 60,
+ [1][0][0][0][RTW89_CN][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 60,
+ [1][0][0][0][RTW89_FCC][11] = 127,
+ [1][0][0][0][RTW89_ETSI][11] = 127,
+ [1][0][0][0][RTW89_MKK][11] = 127,
+ [1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_KCC][11] = 127,
+ [1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_CN][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
+ [1][0][0][0][RTW89_FCC][12] = 127,
+ [1][0][0][0][RTW89_ETSI][12] = 127,
+ [1][0][0][0][RTW89_MKK][12] = 127,
+ [1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_KCC][12] = 127,
+ [1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_CN][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
+ [1][0][0][0][RTW89_FCC][13] = 127,
+ [1][0][0][0][RTW89_ETSI][13] = 127,
+ [1][0][0][0][RTW89_MKK][13] = 127,
+ [1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_KCC][13] = 127,
+ [1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_CN][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
+ [1][1][0][0][RTW89_FCC][0] = 127,
+ [1][1][0][0][RTW89_ETSI][0] = 127,
+ [1][1][0][0][RTW89_MKK][0] = 127,
+ [1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_KCC][0] = 127,
+ [1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_CN][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
+ [1][1][0][0][RTW89_FCC][1] = 127,
+ [1][1][0][0][RTW89_ETSI][1] = 127,
+ [1][1][0][0][RTW89_MKK][1] = 127,
+ [1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_KCC][1] = 127,
+ [1][1][0][0][RTW89_ACMA][1] = 127,
+ [1][1][0][0][RTW89_CN][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 46,
+ [1][1][0][0][RTW89_ETSI][2] = 48,
+ [1][1][0][0][RTW89_MKK][2] = 58,
+ [1][1][0][0][RTW89_IC][2] = 46,
+ [1][1][0][0][RTW89_KCC][2] = 56,
+ [1][1][0][0][RTW89_ACMA][2] = 48,
+ [1][1][0][0][RTW89_CN][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 48,
+ [1][1][0][0][RTW89_FCC][3] = 46,
+ [1][1][0][0][RTW89_ETSI][3] = 48,
+ [1][1][0][0][RTW89_MKK][3] = 58,
+ [1][1][0][0][RTW89_IC][3] = 46,
+ [1][1][0][0][RTW89_KCC][3] = 56,
+ [1][1][0][0][RTW89_ACMA][3] = 48,
+ [1][1][0][0][RTW89_CN][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 48,
+ [1][1][0][0][RTW89_FCC][4] = 46,
+ [1][1][0][0][RTW89_ETSI][4] = 48,
+ [1][1][0][0][RTW89_MKK][4] = 58,
+ [1][1][0][0][RTW89_IC][4] = 46,
+ [1][1][0][0][RTW89_KCC][4] = 56,
+ [1][1][0][0][RTW89_ACMA][4] = 48,
+ [1][1][0][0][RTW89_CN][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 48,
+ [1][1][0][0][RTW89_FCC][5] = 48,
+ [1][1][0][0][RTW89_ETSI][5] = 48,
+ [1][1][0][0][RTW89_MKK][5] = 58,
+ [1][1][0][0][RTW89_IC][5] = 48,
+ [1][1][0][0][RTW89_KCC][5] = 56,
+ [1][1][0][0][RTW89_ACMA][5] = 48,
+ [1][1][0][0][RTW89_CN][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 48,
+ [1][1][0][0][RTW89_FCC][6] = 40,
+ [1][1][0][0][RTW89_ETSI][6] = 48,
+ [1][1][0][0][RTW89_MKK][6] = 58,
+ [1][1][0][0][RTW89_IC][6] = 40,
+ [1][1][0][0][RTW89_KCC][6] = 56,
+ [1][1][0][0][RTW89_ACMA][6] = 48,
+ [1][1][0][0][RTW89_CN][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 48,
+ [1][1][0][0][RTW89_FCC][7] = 40,
+ [1][1][0][0][RTW89_ETSI][7] = 48,
+ [1][1][0][0][RTW89_MKK][7] = 58,
+ [1][1][0][0][RTW89_IC][7] = 40,
+ [1][1][0][0][RTW89_KCC][7] = 56,
+ [1][1][0][0][RTW89_ACMA][7] = 48,
+ [1][1][0][0][RTW89_CN][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 48,
+ [1][1][0][0][RTW89_FCC][8] = 14,
+ [1][1][0][0][RTW89_ETSI][8] = 48,
+ [1][1][0][0][RTW89_MKK][8] = 58,
+ [1][1][0][0][RTW89_IC][8] = 14,
+ [1][1][0][0][RTW89_KCC][8] = 58,
+ [1][1][0][0][RTW89_ACMA][8] = 48,
+ [1][1][0][0][RTW89_CN][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 48,
+ [1][1][0][0][RTW89_FCC][9] = 14,
+ [1][1][0][0][RTW89_ETSI][9] = 48,
+ [1][1][0][0][RTW89_MKK][9] = 58,
+ [1][1][0][0][RTW89_IC][9] = 14,
+ [1][1][0][0][RTW89_KCC][9] = 58,
+ [1][1][0][0][RTW89_ACMA][9] = 48,
+ [1][1][0][0][RTW89_CN][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 48,
+ [1][1][0][0][RTW89_FCC][10] = 12,
+ [1][1][0][0][RTW89_ETSI][10] = 48,
+ [1][1][0][0][RTW89_MKK][10] = 56,
+ [1][1][0][0][RTW89_IC][10] = 12,
+ [1][1][0][0][RTW89_KCC][10] = 58,
+ [1][1][0][0][RTW89_ACMA][10] = 48,
+ [1][1][0][0][RTW89_CN][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 48,
+ [1][1][0][0][RTW89_FCC][11] = 127,
+ [1][1][0][0][RTW89_ETSI][11] = 127,
+ [1][1][0][0][RTW89_MKK][11] = 127,
+ [1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_KCC][11] = 127,
+ [1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_CN][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
+ [1][1][0][0][RTW89_FCC][12] = 127,
+ [1][1][0][0][RTW89_ETSI][12] = 127,
+ [1][1][0][0][RTW89_MKK][12] = 127,
+ [1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_KCC][12] = 127,
+ [1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_CN][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
+ [1][1][0][0][RTW89_FCC][13] = 127,
+ [1][1][0][0][RTW89_ETSI][13] = 127,
+ [1][1][0][0][RTW89_MKK][13] = 127,
+ [1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_KCC][13] = 127,
+ [1][1][0][0][RTW89_ACMA][13] = 127,
+ [1][1][0][0][RTW89_CN][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 66,
+ [0][0][1][0][RTW89_ETSI][0] = 60,
+ [0][0][1][0][RTW89_MKK][0] = 76,
+ [0][0][1][0][RTW89_IC][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 68,
+ [0][0][1][0][RTW89_ACMA][0] = 60,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 60,
+ [0][0][1][0][RTW89_FCC][1] = 68,
+ [0][0][1][0][RTW89_ETSI][1] = 60,
+ [0][0][1][0][RTW89_MKK][1] = 78,
+ [0][0][1][0][RTW89_IC][1] = 68,
+ [0][0][1][0][RTW89_KCC][1] = 68,
+ [0][0][1][0][RTW89_ACMA][1] = 60,
+ [0][0][1][0][RTW89_CN][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 60,
+ [0][0][1][0][RTW89_FCC][2] = 72,
+ [0][0][1][0][RTW89_ETSI][2] = 60,
+ [0][0][1][0][RTW89_MKK][2] = 78,
+ [0][0][1][0][RTW89_IC][2] = 72,
+ [0][0][1][0][RTW89_KCC][2] = 68,
+ [0][0][1][0][RTW89_ACMA][2] = 60,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 60,
+ [0][0][1][0][RTW89_FCC][3] = 76,
+ [0][0][1][0][RTW89_ETSI][3] = 60,
+ [0][0][1][0][RTW89_MKK][3] = 78,
+ [0][0][1][0][RTW89_IC][3] = 76,
+ [0][0][1][0][RTW89_KCC][3] = 68,
+ [0][0][1][0][RTW89_ACMA][3] = 60,
+ [0][0][1][0][RTW89_CN][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 60,
+ [0][0][1][0][RTW89_FCC][4] = 80,
+ [0][0][1][0][RTW89_ETSI][4] = 60,
+ [0][0][1][0][RTW89_MKK][4] = 78,
+ [0][0][1][0][RTW89_IC][4] = 80,
+ [0][0][1][0][RTW89_KCC][4] = 76,
+ [0][0][1][0][RTW89_ACMA][4] = 60,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 60,
+ [0][0][1][0][RTW89_FCC][5] = 80,
+ [0][0][1][0][RTW89_ETSI][5] = 60,
+ [0][0][1][0][RTW89_MKK][5] = 78,
+ [0][0][1][0][RTW89_IC][5] = 80,
+ [0][0][1][0][RTW89_KCC][5] = 76,
+ [0][0][1][0][RTW89_ACMA][5] = 60,
+ [0][0][1][0][RTW89_CN][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 60,
+ [0][0][1][0][RTW89_FCC][6] = 80,
+ [0][0][1][0][RTW89_ETSI][6] = 60,
+ [0][0][1][0][RTW89_MKK][6] = 76,
+ [0][0][1][0][RTW89_IC][6] = 80,
+ [0][0][1][0][RTW89_KCC][6] = 76,
+ [0][0][1][0][RTW89_ACMA][6] = 60,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 60,
+ [0][0][1][0][RTW89_FCC][7] = 80,
+ [0][0][1][0][RTW89_ETSI][7] = 60,
+ [0][0][1][0][RTW89_MKK][7] = 78,
+ [0][0][1][0][RTW89_IC][7] = 80,
+ [0][0][1][0][RTW89_KCC][7] = 76,
+ [0][0][1][0][RTW89_ACMA][7] = 60,
+ [0][0][1][0][RTW89_CN][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 60,
+ [0][0][1][0][RTW89_FCC][8] = 80,
+ [0][0][1][0][RTW89_ETSI][8] = 60,
+ [0][0][1][0][RTW89_MKK][8] = 78,
+ [0][0][1][0][RTW89_IC][8] = 80,
+ [0][0][1][0][RTW89_KCC][8] = 76,
+ [0][0][1][0][RTW89_ACMA][8] = 60,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 60,
+ [0][0][1][0][RTW89_FCC][9] = 76,
+ [0][0][1][0][RTW89_ETSI][9] = 60,
+ [0][0][1][0][RTW89_MKK][9] = 78,
+ [0][0][1][0][RTW89_IC][9] = 76,
+ [0][0][1][0][RTW89_KCC][9] = 70,
+ [0][0][1][0][RTW89_ACMA][9] = 60,
+ [0][0][1][0][RTW89_CN][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 60,
+ [0][0][1][0][RTW89_FCC][10] = 66,
+ [0][0][1][0][RTW89_ETSI][10] = 60,
+ [0][0][1][0][RTW89_MKK][10] = 78,
+ [0][0][1][0][RTW89_IC][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 70,
+ [0][0][1][0][RTW89_ACMA][10] = 60,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 60,
+ [0][0][1][0][RTW89_FCC][11] = 62,
+ [0][0][1][0][RTW89_ETSI][11] = 60,
+ [0][0][1][0][RTW89_MKK][11] = 78,
+ [0][0][1][0][RTW89_IC][11] = 62,
+ [0][0][1][0][RTW89_KCC][11] = 70,
+ [0][0][1][0][RTW89_ACMA][11] = 60,
+ [0][0][1][0][RTW89_CN][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 60,
+ [0][0][1][0][RTW89_FCC][12] = 60,
+ [0][0][1][0][RTW89_ETSI][12] = 60,
+ [0][0][1][0][RTW89_MKK][12] = 78,
+ [0][0][1][0][RTW89_IC][12] = 60,
+ [0][0][1][0][RTW89_KCC][12] = 70,
+ [0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 60,
+ [0][0][1][0][RTW89_FCC][13] = 127,
+ [0][0][1][0][RTW89_ETSI][13] = 127,
+ [0][0][1][0][RTW89_MKK][13] = 127,
+ [0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_KCC][13] = 127,
+ [0][0][1][0][RTW89_ACMA][13] = 127,
+ [0][0][1][0][RTW89_CN][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 66,
+ [0][1][1][0][RTW89_ETSI][0] = 48,
+ [0][1][1][0][RTW89_MKK][0] = 66,
+ [0][1][1][0][RTW89_IC][0] = 66,
+ [0][1][1][0][RTW89_KCC][0] = 64,
+ [0][1][1][0][RTW89_ACMA][0] = 48,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 48,
+ [0][1][1][0][RTW89_FCC][1] = 68,
+ [0][1][1][0][RTW89_ETSI][1] = 48,
+ [0][1][1][0][RTW89_MKK][1] = 66,
+ [0][1][1][0][RTW89_IC][1] = 68,
+ [0][1][1][0][RTW89_KCC][1] = 64,
+ [0][1][1][0][RTW89_ACMA][1] = 48,
+ [0][1][1][0][RTW89_CN][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 48,
+ [0][1][1][0][RTW89_FCC][2] = 72,
+ [0][1][1][0][RTW89_ETSI][2] = 48,
+ [0][1][1][0][RTW89_MKK][2] = 66,
+ [0][1][1][0][RTW89_IC][2] = 72,
+ [0][1][1][0][RTW89_KCC][2] = 64,
+ [0][1][1][0][RTW89_ACMA][2] = 48,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 48,
+ [0][1][1][0][RTW89_FCC][3] = 76,
+ [0][1][1][0][RTW89_ETSI][3] = 48,
+ [0][1][1][0][RTW89_MKK][3] = 66,
+ [0][1][1][0][RTW89_IC][3] = 76,
+ [0][1][1][0][RTW89_KCC][3] = 64,
+ [0][1][1][0][RTW89_ACMA][3] = 48,
+ [0][1][1][0][RTW89_CN][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 48,
+ [0][1][1][0][RTW89_FCC][4] = 80,
+ [0][1][1][0][RTW89_ETSI][4] = 48,
+ [0][1][1][0][RTW89_MKK][4] = 66,
+ [0][1][1][0][RTW89_IC][4] = 80,
+ [0][1][1][0][RTW89_KCC][4] = 66,
+ [0][1][1][0][RTW89_ACMA][4] = 48,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 48,
+ [0][1][1][0][RTW89_FCC][5] = 80,
+ [0][1][1][0][RTW89_ETSI][5] = 48,
+ [0][1][1][0][RTW89_MKK][5] = 66,
+ [0][1][1][0][RTW89_IC][5] = 80,
+ [0][1][1][0][RTW89_KCC][5] = 66,
+ [0][1][1][0][RTW89_ACMA][5] = 48,
+ [0][1][1][0][RTW89_CN][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 48,
+ [0][1][1][0][RTW89_FCC][6] = 80,
+ [0][1][1][0][RTW89_ETSI][6] = 48,
+ [0][1][1][0][RTW89_MKK][6] = 66,
+ [0][1][1][0][RTW89_IC][6] = 80,
+ [0][1][1][0][RTW89_KCC][6] = 66,
+ [0][1][1][0][RTW89_ACMA][6] = 48,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 48,
+ [0][1][1][0][RTW89_FCC][7] = 78,
+ [0][1][1][0][RTW89_ETSI][7] = 48,
+ [0][1][1][0][RTW89_MKK][7] = 66,
+ [0][1][1][0][RTW89_IC][7] = 78,
+ [0][1][1][0][RTW89_KCC][7] = 66,
+ [0][1][1][0][RTW89_ACMA][7] = 48,
+ [0][1][1][0][RTW89_CN][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 48,
+ [0][1][1][0][RTW89_FCC][8] = 74,
+ [0][1][1][0][RTW89_ETSI][8] = 48,
+ [0][1][1][0][RTW89_MKK][8] = 66,
+ [0][1][1][0][RTW89_IC][8] = 74,
+ [0][1][1][0][RTW89_KCC][8] = 66,
+ [0][1][1][0][RTW89_ACMA][8] = 48,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 48,
+ [0][1][1][0][RTW89_FCC][9] = 70,
+ [0][1][1][0][RTW89_ETSI][9] = 48,
+ [0][1][1][0][RTW89_MKK][9] = 66,
+ [0][1][1][0][RTW89_IC][9] = 70,
+ [0][1][1][0][RTW89_KCC][9] = 64,
+ [0][1][1][0][RTW89_ACMA][9] = 48,
+ [0][1][1][0][RTW89_CN][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 48,
+ [0][1][1][0][RTW89_FCC][10] = 62,
+ [0][1][1][0][RTW89_ETSI][10] = 48,
+ [0][1][1][0][RTW89_MKK][10] = 66,
+ [0][1][1][0][RTW89_IC][10] = 62,
+ [0][1][1][0][RTW89_KCC][10] = 64,
+ [0][1][1][0][RTW89_ACMA][10] = 48,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 48,
+ [0][1][1][0][RTW89_FCC][11] = 60,
+ [0][1][1][0][RTW89_ETSI][11] = 48,
+ [0][1][1][0][RTW89_MKK][11] = 66,
+ [0][1][1][0][RTW89_IC][11] = 60,
+ [0][1][1][0][RTW89_KCC][11] = 64,
+ [0][1][1][0][RTW89_ACMA][11] = 48,
+ [0][1][1][0][RTW89_CN][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 48,
+ [0][1][1][0][RTW89_FCC][12] = 36,
+ [0][1][1][0][RTW89_ETSI][12] = 48,
+ [0][1][1][0][RTW89_MKK][12] = 66,
+ [0][1][1][0][RTW89_IC][12] = 36,
+ [0][1][1][0][RTW89_KCC][12] = 64,
+ [0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 48,
+ [0][1][1][0][RTW89_FCC][13] = 127,
+ [0][1][1][0][RTW89_ETSI][13] = 127,
+ [0][1][1][0][RTW89_MKK][13] = 127,
+ [0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_KCC][13] = 127,
+ [0][1][1][0][RTW89_ACMA][13] = 127,
+ [0][1][1][0][RTW89_CN][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 66,
+ [0][0][2][0][RTW89_ETSI][0] = 60,
+ [0][0][2][0][RTW89_MKK][0] = 78,
+ [0][0][2][0][RTW89_IC][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 70,
+ [0][0][2][0][RTW89_ACMA][0] = 60,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 60,
+ [0][0][2][0][RTW89_FCC][1] = 70,
+ [0][0][2][0][RTW89_ETSI][1] = 60,
+ [0][0][2][0][RTW89_MKK][1] = 78,
+ [0][0][2][0][RTW89_IC][1] = 70,
+ [0][0][2][0][RTW89_KCC][1] = 70,
+ [0][0][2][0][RTW89_ACMA][1] = 60,
+ [0][0][2][0][RTW89_CN][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 60,
+ [0][0][2][0][RTW89_FCC][2] = 74,
+ [0][0][2][0][RTW89_ETSI][2] = 60,
+ [0][0][2][0][RTW89_MKK][2] = 78,
+ [0][0][2][0][RTW89_IC][2] = 74,
+ [0][0][2][0][RTW89_KCC][2] = 70,
+ [0][0][2][0][RTW89_ACMA][2] = 60,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 60,
+ [0][0][2][0][RTW89_FCC][3] = 78,
+ [0][0][2][0][RTW89_ETSI][3] = 60,
+ [0][0][2][0][RTW89_MKK][3] = 78,
+ [0][0][2][0][RTW89_IC][3] = 78,
+ [0][0][2][0][RTW89_KCC][3] = 70,
+ [0][0][2][0][RTW89_ACMA][3] = 60,
+ [0][0][2][0][RTW89_CN][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 60,
+ [0][0][2][0][RTW89_FCC][4] = 80,
+ [0][0][2][0][RTW89_ETSI][4] = 60,
+ [0][0][2][0][RTW89_MKK][4] = 78,
+ [0][0][2][0][RTW89_IC][4] = 80,
+ [0][0][2][0][RTW89_KCC][4] = 78,
+ [0][0][2][0][RTW89_ACMA][4] = 60,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 60,
+ [0][0][2][0][RTW89_FCC][5] = 80,
+ [0][0][2][0][RTW89_ETSI][5] = 60,
+ [0][0][2][0][RTW89_MKK][5] = 78,
+ [0][0][2][0][RTW89_IC][5] = 80,
+ [0][0][2][0][RTW89_KCC][5] = 78,
+ [0][0][2][0][RTW89_ACMA][5] = 60,
+ [0][0][2][0][RTW89_CN][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 60,
+ [0][0][2][0][RTW89_FCC][6] = 80,
+ [0][0][2][0][RTW89_ETSI][6] = 60,
+ [0][0][2][0][RTW89_MKK][6] = 78,
+ [0][0][2][0][RTW89_IC][6] = 80,
+ [0][0][2][0][RTW89_KCC][6] = 78,
+ [0][0][2][0][RTW89_ACMA][6] = 60,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 60,
+ [0][0][2][0][RTW89_FCC][7] = 80,
+ [0][0][2][0][RTW89_ETSI][7] = 60,
+ [0][0][2][0][RTW89_MKK][7] = 78,
+ [0][0][2][0][RTW89_IC][7] = 80,
+ [0][0][2][0][RTW89_KCC][7] = 78,
+ [0][0][2][0][RTW89_ACMA][7] = 60,
+ [0][0][2][0][RTW89_CN][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 60,
+ [0][0][2][0][RTW89_FCC][8] = 78,
+ [0][0][2][0][RTW89_ETSI][8] = 60,
+ [0][0][2][0][RTW89_MKK][8] = 78,
+ [0][0][2][0][RTW89_IC][8] = 78,
+ [0][0][2][0][RTW89_KCC][8] = 78,
+ [0][0][2][0][RTW89_ACMA][8] = 60,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 60,
+ [0][0][2][0][RTW89_FCC][9] = 74,
+ [0][0][2][0][RTW89_ETSI][9] = 60,
+ [0][0][2][0][RTW89_MKK][9] = 78,
+ [0][0][2][0][RTW89_IC][9] = 74,
+ [0][0][2][0][RTW89_KCC][9] = 66,
+ [0][0][2][0][RTW89_ACMA][9] = 60,
+ [0][0][2][0][RTW89_CN][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 60,
+ [0][0][2][0][RTW89_FCC][10] = 62,
+ [0][0][2][0][RTW89_ETSI][10] = 60,
+ [0][0][2][0][RTW89_MKK][10] = 78,
+ [0][0][2][0][RTW89_IC][10] = 62,
+ [0][0][2][0][RTW89_KCC][10] = 66,
+ [0][0][2][0][RTW89_ACMA][10] = 60,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 60,
+ [0][0][2][0][RTW89_FCC][11] = 60,
+ [0][0][2][0][RTW89_ETSI][11] = 60,
+ [0][0][2][0][RTW89_MKK][11] = 78,
+ [0][0][2][0][RTW89_IC][11] = 60,
+ [0][0][2][0][RTW89_KCC][11] = 66,
+ [0][0][2][0][RTW89_ACMA][11] = 60,
+ [0][0][2][0][RTW89_CN][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 60,
+ [0][0][2][0][RTW89_FCC][12] = 38,
+ [0][0][2][0][RTW89_ETSI][12] = 60,
+ [0][0][2][0][RTW89_MKK][12] = 78,
+ [0][0][2][0][RTW89_IC][12] = 38,
+ [0][0][2][0][RTW89_KCC][12] = 66,
+ [0][0][2][0][RTW89_ACMA][12] = 60,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 60,
+ [0][0][2][0][RTW89_FCC][13] = 127,
+ [0][0][2][0][RTW89_ETSI][13] = 127,
+ [0][0][2][0][RTW89_MKK][13] = 127,
+ [0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_KCC][13] = 127,
+ [0][0][2][0][RTW89_ACMA][13] = 127,
+ [0][0][2][0][RTW89_CN][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 64,
+ [0][1][2][0][RTW89_ETSI][0] = 48,
+ [0][1][2][0][RTW89_MKK][0] = 68,
+ [0][1][2][0][RTW89_IC][0] = 64,
+ [0][1][2][0][RTW89_KCC][0] = 66,
+ [0][1][2][0][RTW89_ACMA][0] = 48,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 48,
+ [0][1][2][0][RTW89_FCC][1] = 70,
+ [0][1][2][0][RTW89_ETSI][1] = 48,
+ [0][1][2][0][RTW89_MKK][1] = 68,
+ [0][1][2][0][RTW89_IC][1] = 70,
+ [0][1][2][0][RTW89_KCC][1] = 66,
+ [0][1][2][0][RTW89_ACMA][1] = 48,
+ [0][1][2][0][RTW89_CN][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 48,
+ [0][1][2][0][RTW89_FCC][2] = 74,
+ [0][1][2][0][RTW89_ETSI][2] = 48,
+ [0][1][2][0][RTW89_MKK][2] = 68,
+ [0][1][2][0][RTW89_IC][2] = 74,
+ [0][1][2][0][RTW89_KCC][2] = 66,
+ [0][1][2][0][RTW89_ACMA][2] = 48,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 48,
+ [0][1][2][0][RTW89_FCC][3] = 78,
+ [0][1][2][0][RTW89_ETSI][3] = 48,
+ [0][1][2][0][RTW89_MKK][3] = 68,
+ [0][1][2][0][RTW89_IC][3] = 78,
+ [0][1][2][0][RTW89_KCC][3] = 66,
+ [0][1][2][0][RTW89_ACMA][3] = 48,
+ [0][1][2][0][RTW89_CN][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 48,
+ [0][1][2][0][RTW89_FCC][4] = 80,
+ [0][1][2][0][RTW89_ETSI][4] = 48,
+ [0][1][2][0][RTW89_MKK][4] = 68,
+ [0][1][2][0][RTW89_IC][4] = 80,
+ [0][1][2][0][RTW89_KCC][4] = 66,
+ [0][1][2][0][RTW89_ACMA][4] = 48,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 48,
+ [0][1][2][0][RTW89_FCC][5] = 80,
+ [0][1][2][0][RTW89_ETSI][5] = 48,
+ [0][1][2][0][RTW89_MKK][5] = 68,
+ [0][1][2][0][RTW89_IC][5] = 80,
+ [0][1][2][0][RTW89_KCC][5] = 66,
+ [0][1][2][0][RTW89_ACMA][5] = 48,
+ [0][1][2][0][RTW89_CN][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 48,
+ [0][1][2][0][RTW89_FCC][6] = 80,
+ [0][1][2][0][RTW89_ETSI][6] = 48,
+ [0][1][2][0][RTW89_MKK][6] = 68,
+ [0][1][2][0][RTW89_IC][6] = 80,
+ [0][1][2][0][RTW89_KCC][6] = 66,
+ [0][1][2][0][RTW89_ACMA][6] = 48,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 48,
+ [0][1][2][0][RTW89_FCC][7] = 74,
+ [0][1][2][0][RTW89_ETSI][7] = 48,
+ [0][1][2][0][RTW89_MKK][7] = 68,
+ [0][1][2][0][RTW89_IC][7] = 74,
+ [0][1][2][0][RTW89_KCC][7] = 66,
+ [0][1][2][0][RTW89_ACMA][7] = 48,
+ [0][1][2][0][RTW89_CN][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 48,
+ [0][1][2][0][RTW89_FCC][8] = 70,
+ [0][1][2][0][RTW89_ETSI][8] = 48,
+ [0][1][2][0][RTW89_MKK][8] = 68,
+ [0][1][2][0][RTW89_IC][8] = 70,
+ [0][1][2][0][RTW89_KCC][8] = 66,
+ [0][1][2][0][RTW89_ACMA][8] = 48,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 48,
+ [0][1][2][0][RTW89_FCC][9] = 66,
+ [0][1][2][0][RTW89_ETSI][9] = 48,
+ [0][1][2][0][RTW89_MKK][9] = 68,
+ [0][1][2][0][RTW89_IC][9] = 66,
+ [0][1][2][0][RTW89_KCC][9] = 64,
+ [0][1][2][0][RTW89_ACMA][9] = 48,
+ [0][1][2][0][RTW89_CN][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 48,
+ [0][1][2][0][RTW89_FCC][10] = 58,
+ [0][1][2][0][RTW89_ETSI][10] = 48,
+ [0][1][2][0][RTW89_MKK][10] = 68,
+ [0][1][2][0][RTW89_IC][10] = 58,
+ [0][1][2][0][RTW89_KCC][10] = 64,
+ [0][1][2][0][RTW89_ACMA][10] = 48,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 48,
+ [0][1][2][0][RTW89_FCC][11] = 58,
+ [0][1][2][0][RTW89_ETSI][11] = 48,
+ [0][1][2][0][RTW89_MKK][11] = 68,
+ [0][1][2][0][RTW89_IC][11] = 58,
+ [0][1][2][0][RTW89_KCC][11] = 64,
+ [0][1][2][0][RTW89_ACMA][11] = 48,
+ [0][1][2][0][RTW89_CN][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 48,
+ [0][1][2][0][RTW89_FCC][12] = 16,
+ [0][1][2][0][RTW89_ETSI][12] = 48,
+ [0][1][2][0][RTW89_MKK][12] = 68,
+ [0][1][2][0][RTW89_IC][12] = 16,
+ [0][1][2][0][RTW89_KCC][12] = 64,
+ [0][1][2][0][RTW89_ACMA][12] = 48,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 48,
+ [0][1][2][0][RTW89_FCC][13] = 127,
+ [0][1][2][0][RTW89_ETSI][13] = 127,
+ [0][1][2][0][RTW89_MKK][13] = 127,
+ [0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_KCC][13] = 127,
+ [0][1][2][0][RTW89_ACMA][13] = 127,
+ [0][1][2][0][RTW89_CN][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 64,
+ [0][1][2][1][RTW89_ETSI][0] = 36,
+ [0][1][2][1][RTW89_MKK][0] = 68,
+ [0][1][2][1][RTW89_IC][0] = 64,
+ [0][1][2][1][RTW89_KCC][0] = 66,
+ [0][1][2][1][RTW89_ACMA][0] = 36,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 36,
+ [0][1][2][1][RTW89_FCC][1] = 70,
+ [0][1][2][1][RTW89_ETSI][1] = 36,
+ [0][1][2][1][RTW89_MKK][1] = 68,
+ [0][1][2][1][RTW89_IC][1] = 70,
+ [0][1][2][1][RTW89_KCC][1] = 66,
+ [0][1][2][1][RTW89_ACMA][1] = 36,
+ [0][1][2][1][RTW89_CN][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 36,
+ [0][1][2][1][RTW89_FCC][2] = 74,
+ [0][1][2][1][RTW89_ETSI][2] = 36,
+ [0][1][2][1][RTW89_MKK][2] = 68,
+ [0][1][2][1][RTW89_IC][2] = 74,
+ [0][1][2][1][RTW89_KCC][2] = 66,
+ [0][1][2][1][RTW89_ACMA][2] = 36,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 36,
+ [0][1][2][1][RTW89_FCC][3] = 78,
+ [0][1][2][1][RTW89_ETSI][3] = 36,
+ [0][1][2][1][RTW89_MKK][3] = 68,
+ [0][1][2][1][RTW89_IC][3] = 78,
+ [0][1][2][1][RTW89_KCC][3] = 66,
+ [0][1][2][1][RTW89_ACMA][3] = 36,
+ [0][1][2][1][RTW89_CN][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 36,
+ [0][1][2][1][RTW89_FCC][4] = 80,
+ [0][1][2][1][RTW89_ETSI][4] = 36,
+ [0][1][2][1][RTW89_MKK][4] = 68,
+ [0][1][2][1][RTW89_IC][4] = 80,
+ [0][1][2][1][RTW89_KCC][4] = 66,
+ [0][1][2][1][RTW89_ACMA][4] = 36,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 36,
+ [0][1][2][1][RTW89_FCC][5] = 80,
+ [0][1][2][1][RTW89_ETSI][5] = 36,
+ [0][1][2][1][RTW89_MKK][5] = 68,
+ [0][1][2][1][RTW89_IC][5] = 80,
+ [0][1][2][1][RTW89_KCC][5] = 66,
+ [0][1][2][1][RTW89_ACMA][5] = 36,
+ [0][1][2][1][RTW89_CN][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 36,
+ [0][1][2][1][RTW89_FCC][6] = 80,
+ [0][1][2][1][RTW89_ETSI][6] = 36,
+ [0][1][2][1][RTW89_MKK][6] = 68,
+ [0][1][2][1][RTW89_IC][6] = 80,
+ [0][1][2][1][RTW89_KCC][6] = 66,
+ [0][1][2][1][RTW89_ACMA][6] = 36,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 36,
+ [0][1][2][1][RTW89_FCC][7] = 74,
+ [0][1][2][1][RTW89_ETSI][7] = 36,
+ [0][1][2][1][RTW89_MKK][7] = 68,
+ [0][1][2][1][RTW89_IC][7] = 74,
+ [0][1][2][1][RTW89_KCC][7] = 66,
+ [0][1][2][1][RTW89_ACMA][7] = 36,
+ [0][1][2][1][RTW89_CN][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 36,
+ [0][1][2][1][RTW89_FCC][8] = 70,
+ [0][1][2][1][RTW89_ETSI][8] = 36,
+ [0][1][2][1][RTW89_MKK][8] = 68,
+ [0][1][2][1][RTW89_IC][8] = 70,
+ [0][1][2][1][RTW89_KCC][8] = 66,
+ [0][1][2][1][RTW89_ACMA][8] = 36,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 36,
+ [0][1][2][1][RTW89_FCC][9] = 66,
+ [0][1][2][1][RTW89_ETSI][9] = 36,
+ [0][1][2][1][RTW89_MKK][9] = 68,
+ [0][1][2][1][RTW89_IC][9] = 66,
+ [0][1][2][1][RTW89_KCC][9] = 64,
+ [0][1][2][1][RTW89_ACMA][9] = 36,
+ [0][1][2][1][RTW89_CN][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 36,
+ [0][1][2][1][RTW89_FCC][10] = 58,
+ [0][1][2][1][RTW89_ETSI][10] = 36,
+ [0][1][2][1][RTW89_MKK][10] = 68,
+ [0][1][2][1][RTW89_IC][10] = 58,
+ [0][1][2][1][RTW89_KCC][10] = 64,
+ [0][1][2][1][RTW89_ACMA][10] = 36,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 36,
+ [0][1][2][1][RTW89_FCC][11] = 58,
+ [0][1][2][1][RTW89_ETSI][11] = 36,
+ [0][1][2][1][RTW89_MKK][11] = 68,
+ [0][1][2][1][RTW89_IC][11] = 58,
+ [0][1][2][1][RTW89_KCC][11] = 64,
+ [0][1][2][1][RTW89_ACMA][11] = 36,
+ [0][1][2][1][RTW89_CN][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 36,
+ [0][1][2][1][RTW89_FCC][12] = 16,
+ [0][1][2][1][RTW89_ETSI][12] = 36,
+ [0][1][2][1][RTW89_MKK][12] = 68,
+ [0][1][2][1][RTW89_IC][12] = 16,
+ [0][1][2][1][RTW89_KCC][12] = 64,
+ [0][1][2][1][RTW89_ACMA][12] = 36,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 36,
+ [0][1][2][1][RTW89_FCC][13] = 127,
+ [0][1][2][1][RTW89_ETSI][13] = 127,
+ [0][1][2][1][RTW89_MKK][13] = 127,
+ [0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_KCC][13] = 127,
+ [0][1][2][1][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_CN][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
+ [1][0][2][0][RTW89_FCC][0] = 127,
+ [1][0][2][0][RTW89_ETSI][0] = 127,
+ [1][0][2][0][RTW89_MKK][0] = 127,
+ [1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_KCC][0] = 127,
+ [1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_CN][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 127,
+ [1][0][2][0][RTW89_ETSI][1] = 127,
+ [1][0][2][0][RTW89_MKK][1] = 127,
+ [1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_KCC][1] = 127,
+ [1][0][2][0][RTW89_ACMA][1] = 127,
+ [1][0][2][0][RTW89_CN][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 64,
+ [1][0][2][0][RTW89_ETSI][2] = 60,
+ [1][0][2][0][RTW89_MKK][2] = 74,
+ [1][0][2][0][RTW89_IC][2] = 64,
+ [1][0][2][0][RTW89_KCC][2] = 68,
+ [1][0][2][0][RTW89_ACMA][2] = 60,
+ [1][0][2][0][RTW89_CN][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 60,
+ [1][0][2][0][RTW89_FCC][3] = 64,
+ [1][0][2][0][RTW89_ETSI][3] = 60,
+ [1][0][2][0][RTW89_MKK][3] = 74,
+ [1][0][2][0][RTW89_IC][3] = 64,
+ [1][0][2][0][RTW89_KCC][3] = 68,
+ [1][0][2][0][RTW89_ACMA][3] = 60,
+ [1][0][2][0][RTW89_CN][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 60,
+ [1][0][2][0][RTW89_FCC][4] = 68,
+ [1][0][2][0][RTW89_ETSI][4] = 60,
+ [1][0][2][0][RTW89_MKK][4] = 74,
+ [1][0][2][0][RTW89_IC][4] = 68,
+ [1][0][2][0][RTW89_KCC][4] = 68,
+ [1][0][2][0][RTW89_ACMA][4] = 60,
+ [1][0][2][0][RTW89_CN][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 60,
+ [1][0][2][0][RTW89_FCC][5] = 68,
+ [1][0][2][0][RTW89_ETSI][5] = 60,
+ [1][0][2][0][RTW89_MKK][5] = 74,
+ [1][0][2][0][RTW89_IC][5] = 68,
+ [1][0][2][0][RTW89_KCC][5] = 74,
+ [1][0][2][0][RTW89_ACMA][5] = 60,
+ [1][0][2][0][RTW89_CN][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 60,
+ [1][0][2][0][RTW89_FCC][6] = 66,
+ [1][0][2][0][RTW89_ETSI][6] = 60,
+ [1][0][2][0][RTW89_MKK][6] = 74,
+ [1][0][2][0][RTW89_IC][6] = 66,
+ [1][0][2][0][RTW89_KCC][6] = 74,
+ [1][0][2][0][RTW89_ACMA][6] = 60,
+ [1][0][2][0][RTW89_CN][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 60,
+ [1][0][2][0][RTW89_FCC][7] = 62,
+ [1][0][2][0][RTW89_ETSI][7] = 60,
+ [1][0][2][0][RTW89_MKK][7] = 74,
+ [1][0][2][0][RTW89_IC][7] = 62,
+ [1][0][2][0][RTW89_KCC][7] = 74,
+ [1][0][2][0][RTW89_ACMA][7] = 60,
+ [1][0][2][0][RTW89_CN][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 60,
+ [1][0][2][0][RTW89_FCC][8] = 62,
+ [1][0][2][0][RTW89_ETSI][8] = 60,
+ [1][0][2][0][RTW89_MKK][8] = 74,
+ [1][0][2][0][RTW89_IC][8] = 62,
+ [1][0][2][0][RTW89_KCC][8] = 68,
+ [1][0][2][0][RTW89_ACMA][8] = 60,
+ [1][0][2][0][RTW89_CN][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 60,
+ [1][0][2][0][RTW89_FCC][9] = 60,
+ [1][0][2][0][RTW89_ETSI][9] = 60,
+ [1][0][2][0][RTW89_MKK][9] = 74,
+ [1][0][2][0][RTW89_IC][9] = 60,
+ [1][0][2][0][RTW89_KCC][9] = 68,
+ [1][0][2][0][RTW89_ACMA][9] = 60,
+ [1][0][2][0][RTW89_CN][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 60,
+ [1][0][2][0][RTW89_FCC][10] = 56,
+ [1][0][2][0][RTW89_ETSI][10] = 60,
+ [1][0][2][0][RTW89_MKK][10] = 74,
+ [1][0][2][0][RTW89_IC][10] = 56,
+ [1][0][2][0][RTW89_KCC][10] = 68,
+ [1][0][2][0][RTW89_ACMA][10] = 60,
+ [1][0][2][0][RTW89_CN][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 60,
+ [1][0][2][0][RTW89_FCC][11] = 127,
+ [1][0][2][0][RTW89_ETSI][11] = 127,
+ [1][0][2][0][RTW89_MKK][11] = 127,
+ [1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_KCC][11] = 127,
+ [1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_CN][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
+ [1][0][2][0][RTW89_FCC][12] = 127,
+ [1][0][2][0][RTW89_ETSI][12] = 127,
+ [1][0][2][0][RTW89_MKK][12] = 127,
+ [1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_KCC][12] = 127,
+ [1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_CN][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
+ [1][0][2][0][RTW89_FCC][13] = 127,
+ [1][0][2][0][RTW89_ETSI][13] = 127,
+ [1][0][2][0][RTW89_MKK][13] = 127,
+ [1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_KCC][13] = 127,
+ [1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_CN][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
+ [1][1][2][0][RTW89_FCC][0] = 127,
+ [1][1][2][0][RTW89_ETSI][0] = 127,
+ [1][1][2][0][RTW89_MKK][0] = 127,
+ [1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_KCC][0] = 127,
+ [1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_CN][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 127,
+ [1][1][2][0][RTW89_ETSI][1] = 127,
+ [1][1][2][0][RTW89_MKK][1] = 127,
+ [1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_KCC][1] = 127,
+ [1][1][2][0][RTW89_ACMA][1] = 127,
+ [1][1][2][0][RTW89_CN][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 60,
+ [1][1][2][0][RTW89_ETSI][2] = 48,
+ [1][1][2][0][RTW89_MKK][2] = 68,
+ [1][1][2][0][RTW89_IC][2] = 60,
+ [1][1][2][0][RTW89_KCC][2] = 64,
+ [1][1][2][0][RTW89_ACMA][2] = 48,
+ [1][1][2][0][RTW89_CN][2] = 34,
+ [1][1][2][0][RTW89_UK][2] = 48,
+ [1][1][2][0][RTW89_FCC][3] = 60,
+ [1][1][2][0][RTW89_ETSI][3] = 48,
+ [1][1][2][0][RTW89_MKK][3] = 68,
+ [1][1][2][0][RTW89_IC][3] = 60,
+ [1][1][2][0][RTW89_KCC][3] = 64,
+ [1][1][2][0][RTW89_ACMA][3] = 48,
+ [1][1][2][0][RTW89_CN][3] = 34,
+ [1][1][2][0][RTW89_UK][3] = 48,
+ [1][1][2][0][RTW89_FCC][4] = 60,
+ [1][1][2][0][RTW89_ETSI][4] = 48,
+ [1][1][2][0][RTW89_MKK][4] = 68,
+ [1][1][2][0][RTW89_IC][4] = 60,
+ [1][1][2][0][RTW89_KCC][4] = 64,
+ [1][1][2][0][RTW89_ACMA][4] = 48,
+ [1][1][2][0][RTW89_CN][4] = 34,
+ [1][1][2][0][RTW89_UK][4] = 48,
+ [1][1][2][0][RTW89_FCC][5] = 60,
+ [1][1][2][0][RTW89_ETSI][5] = 48,
+ [1][1][2][0][RTW89_MKK][5] = 68,
+ [1][1][2][0][RTW89_IC][5] = 60,
+ [1][1][2][0][RTW89_KCC][5] = 66,
+ [1][1][2][0][RTW89_ACMA][5] = 48,
+ [1][1][2][0][RTW89_CN][5] = 34,
+ [1][1][2][0][RTW89_UK][5] = 48,
+ [1][1][2][0][RTW89_FCC][6] = 58,
+ [1][1][2][0][RTW89_ETSI][6] = 48,
+ [1][1][2][0][RTW89_MKK][6] = 68,
+ [1][1][2][0][RTW89_IC][6] = 58,
+ [1][1][2][0][RTW89_KCC][6] = 66,
+ [1][1][2][0][RTW89_ACMA][6] = 48,
+ [1][1][2][0][RTW89_CN][6] = 34,
+ [1][1][2][0][RTW89_UK][6] = 48,
+ [1][1][2][0][RTW89_FCC][7] = 54,
+ [1][1][2][0][RTW89_ETSI][7] = 48,
+ [1][1][2][0][RTW89_MKK][7] = 68,
+ [1][1][2][0][RTW89_IC][7] = 54,
+ [1][1][2][0][RTW89_KCC][7] = 66,
+ [1][1][2][0][RTW89_ACMA][7] = 48,
+ [1][1][2][0][RTW89_CN][7] = 34,
+ [1][1][2][0][RTW89_UK][7] = 48,
+ [1][1][2][0][RTW89_FCC][8] = 54,
+ [1][1][2][0][RTW89_ETSI][8] = 48,
+ [1][1][2][0][RTW89_MKK][8] = 68,
+ [1][1][2][0][RTW89_IC][8] = 54,
+ [1][1][2][0][RTW89_KCC][8] = 64,
+ [1][1][2][0][RTW89_ACMA][8] = 48,
+ [1][1][2][0][RTW89_CN][8] = 34,
+ [1][1][2][0][RTW89_UK][8] = 48,
+ [1][1][2][0][RTW89_FCC][9] = 54,
+ [1][1][2][0][RTW89_ETSI][9] = 48,
+ [1][1][2][0][RTW89_MKK][9] = 68,
+ [1][1][2][0][RTW89_IC][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 64,
+ [1][1][2][0][RTW89_ACMA][9] = 48,
+ [1][1][2][0][RTW89_CN][9] = 34,
+ [1][1][2][0][RTW89_UK][9] = 48,
+ [1][1][2][0][RTW89_FCC][10] = 46,
+ [1][1][2][0][RTW89_ETSI][10] = 48,
+ [1][1][2][0][RTW89_MKK][10] = 68,
+ [1][1][2][0][RTW89_IC][10] = 46,
+ [1][1][2][0][RTW89_KCC][10] = 64,
+ [1][1][2][0][RTW89_ACMA][10] = 48,
+ [1][1][2][0][RTW89_CN][10] = 34,
+ [1][1][2][0][RTW89_UK][10] = 48,
+ [1][1][2][0][RTW89_FCC][11] = 127,
+ [1][1][2][0][RTW89_ETSI][11] = 127,
+ [1][1][2][0][RTW89_MKK][11] = 127,
+ [1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_KCC][11] = 127,
+ [1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_CN][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
+ [1][1][2][0][RTW89_FCC][12] = 127,
+ [1][1][2][0][RTW89_ETSI][12] = 127,
+ [1][1][2][0][RTW89_MKK][12] = 127,
+ [1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_KCC][12] = 127,
+ [1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_CN][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
+ [1][1][2][0][RTW89_FCC][13] = 127,
+ [1][1][2][0][RTW89_ETSI][13] = 127,
+ [1][1][2][0][RTW89_MKK][13] = 127,
+ [1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_KCC][13] = 127,
+ [1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_CN][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
+ [1][1][2][1][RTW89_FCC][0] = 127,
+ [1][1][2][1][RTW89_ETSI][0] = 127,
+ [1][1][2][1][RTW89_MKK][0] = 127,
+ [1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_KCC][0] = 127,
+ [1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_CN][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 127,
+ [1][1][2][1][RTW89_ETSI][1] = 127,
+ [1][1][2][1][RTW89_MKK][1] = 127,
+ [1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_KCC][1] = 127,
+ [1][1][2][1][RTW89_ACMA][1] = 127,
+ [1][1][2][1][RTW89_CN][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 60,
+ [1][1][2][1][RTW89_ETSI][2] = 36,
+ [1][1][2][1][RTW89_MKK][2] = 68,
+ [1][1][2][1][RTW89_IC][2] = 60,
+ [1][1][2][1][RTW89_KCC][2] = 64,
+ [1][1][2][1][RTW89_ACMA][2] = 36,
+ [1][1][2][1][RTW89_CN][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 36,
+ [1][1][2][1][RTW89_FCC][3] = 60,
+ [1][1][2][1][RTW89_ETSI][3] = 36,
+ [1][1][2][1][RTW89_MKK][3] = 68,
+ [1][1][2][1][RTW89_IC][3] = 60,
+ [1][1][2][1][RTW89_KCC][3] = 64,
+ [1][1][2][1][RTW89_ACMA][3] = 36,
+ [1][1][2][1][RTW89_CN][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 36,
+ [1][1][2][1][RTW89_FCC][4] = 60,
+ [1][1][2][1][RTW89_ETSI][4] = 36,
+ [1][1][2][1][RTW89_MKK][4] = 68,
+ [1][1][2][1][RTW89_IC][4] = 60,
+ [1][1][2][1][RTW89_KCC][4] = 64,
+ [1][1][2][1][RTW89_ACMA][4] = 36,
+ [1][1][2][1][RTW89_CN][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 36,
+ [1][1][2][1][RTW89_FCC][5] = 60,
+ [1][1][2][1][RTW89_ETSI][5] = 36,
+ [1][1][2][1][RTW89_MKK][5] = 68,
+ [1][1][2][1][RTW89_IC][5] = 60,
+ [1][1][2][1][RTW89_KCC][5] = 66,
+ [1][1][2][1][RTW89_ACMA][5] = 36,
+ [1][1][2][1][RTW89_CN][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 36,
+ [1][1][2][1][RTW89_FCC][6] = 58,
+ [1][1][2][1][RTW89_ETSI][6] = 36,
+ [1][1][2][1][RTW89_MKK][6] = 68,
+ [1][1][2][1][RTW89_IC][6] = 58,
+ [1][1][2][1][RTW89_KCC][6] = 66,
+ [1][1][2][1][RTW89_ACMA][6] = 36,
+ [1][1][2][1][RTW89_CN][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 36,
+ [1][1][2][1][RTW89_FCC][7] = 54,
+ [1][1][2][1][RTW89_ETSI][7] = 36,
+ [1][1][2][1][RTW89_MKK][7] = 68,
+ [1][1][2][1][RTW89_IC][7] = 54,
+ [1][1][2][1][RTW89_KCC][7] = 66,
+ [1][1][2][1][RTW89_ACMA][7] = 36,
+ [1][1][2][1][RTW89_CN][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 36,
+ [1][1][2][1][RTW89_FCC][8] = 54,
+ [1][1][2][1][RTW89_ETSI][8] = 36,
+ [1][1][2][1][RTW89_MKK][8] = 68,
+ [1][1][2][1][RTW89_IC][8] = 54,
+ [1][1][2][1][RTW89_KCC][8] = 64,
+ [1][1][2][1][RTW89_ACMA][8] = 36,
+ [1][1][2][1][RTW89_CN][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 36,
+ [1][1][2][1][RTW89_FCC][9] = 54,
+ [1][1][2][1][RTW89_ETSI][9] = 36,
+ [1][1][2][1][RTW89_MKK][9] = 68,
+ [1][1][2][1][RTW89_IC][9] = 54,
+ [1][1][2][1][RTW89_KCC][9] = 64,
+ [1][1][2][1][RTW89_ACMA][9] = 36,
+ [1][1][2][1][RTW89_CN][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 36,
+ [1][1][2][1][RTW89_FCC][10] = 46,
+ [1][1][2][1][RTW89_ETSI][10] = 36,
+ [1][1][2][1][RTW89_MKK][10] = 68,
+ [1][1][2][1][RTW89_IC][10] = 46,
+ [1][1][2][1][RTW89_KCC][10] = 64,
+ [1][1][2][1][RTW89_ACMA][10] = 36,
+ [1][1][2][1][RTW89_CN][10] = 36,
+ [1][1][2][1][RTW89_UK][10] = 36,
+ [1][1][2][1][RTW89_FCC][11] = 127,
+ [1][1][2][1][RTW89_ETSI][11] = 127,
+ [1][1][2][1][RTW89_MKK][11] = 127,
+ [1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_KCC][11] = 127,
+ [1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_CN][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
+ [1][1][2][1][RTW89_FCC][12] = 127,
+ [1][1][2][1][RTW89_ETSI][12] = 127,
+ [1][1][2][1][RTW89_MKK][12] = 127,
+ [1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_KCC][12] = 127,
+ [1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_CN][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
+ [1][1][2][1][RTW89_FCC][13] = 127,
+ [1][1][2][1][RTW89_ETSI][13] = 127,
+ [1][1][2][1][RTW89_MKK][13] = 127,
+ [1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_KCC][13] = 127,
+ [1][1][2][1][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_CN][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][1][0][RTW89_WW][0] = 50,
+ [0][0][1][0][RTW89_WW][2] = 50,
+ [0][0][1][0][RTW89_WW][4] = 50,
+ [0][0][1][0][RTW89_WW][6] = 50,
+ [0][0][1][0][RTW89_WW][8] = 50,
+ [0][0][1][0][RTW89_WW][10] = 50,
+ [0][0][1][0][RTW89_WW][12] = 50,
+ [0][0][1][0][RTW89_WW][14] = 50,
+ [0][0][1][0][RTW89_WW][15] = 66,
+ [0][0][1][0][RTW89_WW][17] = 66,
+ [0][0][1][0][RTW89_WW][19] = 66,
+ [0][0][1][0][RTW89_WW][21] = 66,
+ [0][0][1][0][RTW89_WW][23] = 66,
+ [0][0][1][0][RTW89_WW][25] = 66,
+ [0][0][1][0][RTW89_WW][27] = 66,
+ [0][0][1][0][RTW89_WW][29] = 66,
+ [0][0][1][0][RTW89_WW][31] = 66,
+ [0][0][1][0][RTW89_WW][33] = 66,
+ [0][0][1][0][RTW89_WW][35] = 60,
+ [0][0][1][0][RTW89_WW][37] = 64,
+ [0][0][1][0][RTW89_WW][38] = 30,
+ [0][0][1][0][RTW89_WW][40] = 30,
+ [0][0][1][0][RTW89_WW][42] = 30,
+ [0][0][1][0][RTW89_WW][44] = 30,
+ [0][0][1][0][RTW89_WW][46] = 30,
+ [0][0][1][0][RTW89_WW][48] = 72,
+ [0][0][1][0][RTW89_WW][50] = 72,
+ [0][0][1][0][RTW89_WW][52] = 72,
+ [0][1][1][0][RTW89_WW][0] = 34,
+ [0][1][1][0][RTW89_WW][2] = 34,
+ [0][1][1][0][RTW89_WW][4] = 34,
+ [0][1][1][0][RTW89_WW][6] = 36,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][12] = 46,
+ [0][1][1][0][RTW89_WW][14] = 46,
+ [0][1][1][0][RTW89_WW][15] = 54,
+ [0][1][1][0][RTW89_WW][17] = 54,
+ [0][1][1][0][RTW89_WW][19] = 54,
+ [0][1][1][0][RTW89_WW][21] = 54,
+ [0][1][1][0][RTW89_WW][23] = 54,
+ [0][1][1][0][RTW89_WW][25] = 54,
+ [0][1][1][0][RTW89_WW][27] = 54,
+ [0][1][1][0][RTW89_WW][29] = 54,
+ [0][1][1][0][RTW89_WW][31] = 54,
+ [0][1][1][0][RTW89_WW][33] = 54,
+ [0][1][1][0][RTW89_WW][35] = 52,
+ [0][1][1][0][RTW89_WW][37] = 52,
+ [0][1][1][0][RTW89_WW][38] = 18,
+ [0][1][1][0][RTW89_WW][40] = 18,
+ [0][1][1][0][RTW89_WW][42] = 18,
+ [0][1][1][0][RTW89_WW][44] = 18,
+ [0][1][1][0][RTW89_WW][46] = 18,
+ [0][1][1][0][RTW89_WW][48] = 48,
+ [0][1][1][0][RTW89_WW][50] = 48,
+ [0][1][1][0][RTW89_WW][52] = 48,
+ [0][0][2][0][RTW89_WW][0] = 52,
+ [0][0][2][0][RTW89_WW][2] = 52,
+ [0][0][2][0][RTW89_WW][4] = 52,
+ [0][0][2][0][RTW89_WW][6] = 52,
+ [0][0][2][0][RTW89_WW][8] = 52,
+ [0][0][2][0][RTW89_WW][10] = 52,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][14] = 52,
+ [0][0][2][0][RTW89_WW][15] = 66,
+ [0][0][2][0][RTW89_WW][17] = 66,
+ [0][0][2][0][RTW89_WW][19] = 66,
+ [0][0][2][0][RTW89_WW][21] = 66,
+ [0][0][2][0][RTW89_WW][23] = 66,
+ [0][0][2][0][RTW89_WW][25] = 66,
+ [0][0][2][0][RTW89_WW][27] = 66,
+ [0][0][2][0][RTW89_WW][29] = 66,
+ [0][0][2][0][RTW89_WW][31] = 66,
+ [0][0][2][0][RTW89_WW][33] = 66,
+ [0][0][2][0][RTW89_WW][35] = 56,
+ [0][0][2][0][RTW89_WW][37] = 64,
+ [0][0][2][0][RTW89_WW][38] = 30,
+ [0][0][2][0][RTW89_WW][40] = 30,
+ [0][0][2][0][RTW89_WW][42] = 30,
+ [0][0][2][0][RTW89_WW][44] = 30,
+ [0][0][2][0][RTW89_WW][46] = 30,
+ [0][0][2][0][RTW89_WW][48] = 72,
+ [0][0][2][0][RTW89_WW][50] = 72,
+ [0][0][2][0][RTW89_WW][52] = 72,
+ [0][1][2][0][RTW89_WW][0] = 36,
+ [0][1][2][0][RTW89_WW][2] = 36,
+ [0][1][2][0][RTW89_WW][4] = 36,
+ [0][1][2][0][RTW89_WW][6] = 38,
+ [0][1][2][0][RTW89_WW][8] = 40,
+ [0][1][2][0][RTW89_WW][10] = 40,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][14] = 40,
+ [0][1][2][0][RTW89_WW][15] = 54,
+ [0][1][2][0][RTW89_WW][17] = 54,
+ [0][1][2][0][RTW89_WW][19] = 54,
+ [0][1][2][0][RTW89_WW][21] = 54,
+ [0][1][2][0][RTW89_WW][23] = 54,
+ [0][1][2][0][RTW89_WW][25] = 54,
+ [0][1][2][0][RTW89_WW][27] = 54,
+ [0][1][2][0][RTW89_WW][29] = 54,
+ [0][1][2][0][RTW89_WW][31] = 54,
+ [0][1][2][0][RTW89_WW][33] = 54,
+ [0][1][2][0][RTW89_WW][35] = 46,
+ [0][1][2][0][RTW89_WW][37] = 52,
+ [0][1][2][0][RTW89_WW][38] = 18,
+ [0][1][2][0][RTW89_WW][40] = 18,
+ [0][1][2][0][RTW89_WW][42] = 18,
+ [0][1][2][0][RTW89_WW][44] = 18,
+ [0][1][2][0][RTW89_WW][46] = 18,
+ [0][1][2][0][RTW89_WW][48] = 48,
+ [0][1][2][0][RTW89_WW][50] = 50,
+ [0][1][2][0][RTW89_WW][52] = 48,
+ [0][1][2][1][RTW89_WW][0] = 36,
+ [0][1][2][1][RTW89_WW][2] = 36,
+ [0][1][2][1][RTW89_WW][4] = 36,
+ [0][1][2][1][RTW89_WW][6] = 36,
+ [0][1][2][1][RTW89_WW][8] = 36,
+ [0][1][2][1][RTW89_WW][10] = 36,
+ [0][1][2][1][RTW89_WW][12] = 36,
+ [0][1][2][1][RTW89_WW][14] = 36,
+ [0][1][2][1][RTW89_WW][15] = 40,
+ [0][1][2][1][RTW89_WW][17] = 40,
+ [0][1][2][1][RTW89_WW][19] = 40,
+ [0][1][2][1][RTW89_WW][21] = 40,
+ [0][1][2][1][RTW89_WW][23] = 40,
+ [0][1][2][1][RTW89_WW][25] = 40,
+ [0][1][2][1][RTW89_WW][27] = 40,
+ [0][1][2][1][RTW89_WW][29] = 40,
+ [0][1][2][1][RTW89_WW][31] = 40,
+ [0][1][2][1][RTW89_WW][33] = 40,
+ [0][1][2][1][RTW89_WW][35] = 40,
+ [0][1][2][1][RTW89_WW][37] = 40,
+ [0][1][2][1][RTW89_WW][38] = 6,
+ [0][1][2][1][RTW89_WW][40] = 6,
+ [0][1][2][1][RTW89_WW][42] = 6,
+ [0][1][2][1][RTW89_WW][44] = 6,
+ [0][1][2][1][RTW89_WW][46] = 6,
+ [0][1][2][1][RTW89_WW][48] = 48,
+ [0][1][2][1][RTW89_WW][50] = 50,
+ [0][1][2][1][RTW89_WW][52] = 48,
+ [1][0][2][0][RTW89_WW][1] = 54,
+ [1][0][2][0][RTW89_WW][5] = 54,
+ [1][0][2][0][RTW89_WW][9] = 54,
+ [1][0][2][0][RTW89_WW][13] = 52,
+ [1][0][2][0][RTW89_WW][16] = 56,
+ [1][0][2][0][RTW89_WW][20] = 56,
+ [1][0][2][0][RTW89_WW][24] = 56,
+ [1][0][2][0][RTW89_WW][28] = 66,
+ [1][0][2][0][RTW89_WW][32] = 62,
+ [1][0][2][0][RTW89_WW][36] = 64,
+ [1][0][2][0][RTW89_WW][39] = 30,
+ [1][0][2][0][RTW89_WW][43] = 30,
+ [1][0][2][0][RTW89_WW][47] = 68,
+ [1][0][2][0][RTW89_WW][51] = 68,
+ [1][1][2][0][RTW89_WW][1] = 42,
+ [1][1][2][0][RTW89_WW][5] = 42,
+ [1][1][2][0][RTW89_WW][9] = 42,
+ [1][1][2][0][RTW89_WW][13] = 42,
+ [1][1][2][0][RTW89_WW][16] = 54,
+ [1][1][2][0][RTW89_WW][20] = 54,
+ [1][1][2][0][RTW89_WW][24] = 54,
+ [1][1][2][0][RTW89_WW][28] = 54,
+ [1][1][2][0][RTW89_WW][32] = 54,
+ [1][1][2][0][RTW89_WW][36] = 52,
+ [1][1][2][0][RTW89_WW][39] = 18,
+ [1][1][2][0][RTW89_WW][43] = 18,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 60,
+ [1][1][2][1][RTW89_WW][1] = 40,
+ [1][1][2][1][RTW89_WW][5] = 40,
+ [1][1][2][1][RTW89_WW][9] = 40,
+ [1][1][2][1][RTW89_WW][13] = 40,
+ [1][1][2][1][RTW89_WW][16] = 40,
+ [1][1][2][1][RTW89_WW][20] = 40,
+ [1][1][2][1][RTW89_WW][24] = 40,
+ [1][1][2][1][RTW89_WW][28] = 40,
+ [1][1][2][1][RTW89_WW][32] = 40,
+ [1][1][2][1][RTW89_WW][36] = 40,
+ [1][1][2][1][RTW89_WW][39] = 6,
+ [1][1][2][1][RTW89_WW][43] = 6,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 60,
+ [2][0][2][0][RTW89_WW][3] = 54,
+ [2][0][2][0][RTW89_WW][11] = 50,
+ [2][0][2][0][RTW89_WW][18] = 56,
+ [2][0][2][0][RTW89_WW][26] = 60,
+ [2][0][2][0][RTW89_WW][34] = 60,
+ [2][0][2][0][RTW89_WW][41] = 30,
+ [2][0][2][0][RTW89_WW][49] = 62,
+ [2][1][2][0][RTW89_WW][3] = 46,
+ [2][1][2][0][RTW89_WW][11] = 38,
+ [2][1][2][0][RTW89_WW][18] = 50,
+ [2][1][2][0][RTW89_WW][26] = 52,
+ [2][1][2][0][RTW89_WW][34] = 52,
+ [2][1][2][0][RTW89_WW][41] = 18,
+ [2][1][2][0][RTW89_WW][49] = 62,
+ [2][1][2][1][RTW89_WW][3] = 40,
+ [2][1][2][1][RTW89_WW][11] = 38,
+ [2][1][2][1][RTW89_WW][18] = 40,
+ [2][1][2][1][RTW89_WW][26] = 42,
+ [2][1][2][1][RTW89_WW][34] = 40,
+ [2][1][2][1][RTW89_WW][41] = 6,
+ [2][1][2][1][RTW89_WW][49] = 62,
+ [3][0][2][0][RTW89_WW][7] = 40,
+ [3][0][2][0][RTW89_WW][22] = 42,
+ [3][0][2][0][RTW89_WW][45] = 52,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 36,
+ [3][1][2][0][RTW89_WW][45] = 46,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 36,
+ [3][1][2][1][RTW89_WW][45] = 46,
+ [0][0][1][0][RTW89_FCC][0] = 72,
+ [0][0][1][0][RTW89_ETSI][0] = 66,
+ [0][0][1][0][RTW89_MKK][0] = 66,
+ [0][0][1][0][RTW89_IC][0] = 60,
+ [0][0][1][0][RTW89_KCC][0] = 52,
+ [0][0][1][0][RTW89_ACMA][0] = 66,
+ [0][0][1][0][RTW89_CN][0] = 50,
+ [0][0][1][0][RTW89_UK][0] = 66,
+ [0][0][1][0][RTW89_FCC][2] = 72,
+ [0][0][1][0][RTW89_ETSI][2] = 66,
+ [0][0][1][0][RTW89_MKK][2] = 66,
+ [0][0][1][0][RTW89_IC][2] = 60,
+ [0][0][1][0][RTW89_KCC][2] = 52,
+ [0][0][1][0][RTW89_ACMA][2] = 66,
+ [0][0][1][0][RTW89_CN][2] = 50,
+ [0][0][1][0][RTW89_UK][2] = 66,
+ [0][0][1][0][RTW89_FCC][4] = 72,
+ [0][0][1][0][RTW89_ETSI][4] = 66,
+ [0][0][1][0][RTW89_MKK][4] = 66,
+ [0][0][1][0][RTW89_IC][4] = 60,
+ [0][0][1][0][RTW89_KCC][4] = 52,
+ [0][0][1][0][RTW89_ACMA][4] = 66,
+ [0][0][1][0][RTW89_CN][4] = 50,
+ [0][0][1][0][RTW89_UK][4] = 66,
+ [0][0][1][0][RTW89_FCC][6] = 72,
+ [0][0][1][0][RTW89_ETSI][6] = 66,
+ [0][0][1][0][RTW89_MKK][6] = 66,
+ [0][0][1][0][RTW89_IC][6] = 58,
+ [0][0][1][0][RTW89_KCC][6] = 62,
+ [0][0][1][0][RTW89_ACMA][6] = 66,
+ [0][0][1][0][RTW89_CN][6] = 50,
+ [0][0][1][0][RTW89_UK][6] = 66,
+ [0][0][1][0][RTW89_FCC][8] = 72,
+ [0][0][1][0][RTW89_ETSI][8] = 66,
+ [0][0][1][0][RTW89_MKK][8] = 66,
+ [0][0][1][0][RTW89_IC][8] = 64,
+ [0][0][1][0][RTW89_KCC][8] = 70,
+ [0][0][1][0][RTW89_ACMA][8] = 66,
+ [0][0][1][0][RTW89_CN][8] = 50,
+ [0][0][1][0][RTW89_UK][8] = 66,
+ [0][0][1][0][RTW89_FCC][10] = 72,
+ [0][0][1][0][RTW89_ETSI][10] = 66,
+ [0][0][1][0][RTW89_MKK][10] = 66,
+ [0][0][1][0][RTW89_IC][10] = 64,
+ [0][0][1][0][RTW89_KCC][10] = 70,
+ [0][0][1][0][RTW89_ACMA][10] = 66,
+ [0][0][1][0][RTW89_CN][10] = 50,
+ [0][0][1][0][RTW89_UK][10] = 66,
+ [0][0][1][0][RTW89_FCC][12] = 72,
+ [0][0][1][0][RTW89_ETSI][12] = 66,
+ [0][0][1][0][RTW89_MKK][12] = 66,
+ [0][0][1][0][RTW89_IC][12] = 64,
+ [0][0][1][0][RTW89_KCC][12] = 66,
+ [0][0][1][0][RTW89_ACMA][12] = 66,
+ [0][0][1][0][RTW89_CN][12] = 50,
+ [0][0][1][0][RTW89_UK][12] = 66,
+ [0][0][1][0][RTW89_FCC][14] = 70,
+ [0][0][1][0][RTW89_ETSI][14] = 66,
+ [0][0][1][0][RTW89_MKK][14] = 66,
+ [0][0][1][0][RTW89_IC][14] = 64,
+ [0][0][1][0][RTW89_KCC][14] = 66,
+ [0][0][1][0][RTW89_ACMA][14] = 66,
+ [0][0][1][0][RTW89_CN][14] = 50,
+ [0][0][1][0][RTW89_UK][14] = 66,
+ [0][0][1][0][RTW89_FCC][15] = 72,
+ [0][0][1][0][RTW89_ETSI][15] = 66,
+ [0][0][1][0][RTW89_MKK][15] = 70,
+ [0][0][1][0][RTW89_IC][15] = 72,
+ [0][0][1][0][RTW89_KCC][15] = 70,
+ [0][0][1][0][RTW89_ACMA][15] = 66,
+ [0][0][1][0][RTW89_CN][15] = 127,
+ [0][0][1][0][RTW89_UK][15] = 66,
+ [0][0][1][0][RTW89_FCC][17] = 72,
+ [0][0][1][0][RTW89_ETSI][17] = 66,
+ [0][0][1][0][RTW89_MKK][17] = 70,
+ [0][0][1][0][RTW89_IC][17] = 72,
+ [0][0][1][0][RTW89_KCC][17] = 70,
+ [0][0][1][0][RTW89_ACMA][17] = 66,
+ [0][0][1][0][RTW89_CN][17] = 127,
+ [0][0][1][0][RTW89_UK][17] = 66,
+ [0][0][1][0][RTW89_FCC][19] = 72,
+ [0][0][1][0][RTW89_ETSI][19] = 66,
+ [0][0][1][0][RTW89_MKK][19] = 70,
+ [0][0][1][0][RTW89_IC][19] = 72,
+ [0][0][1][0][RTW89_KCC][19] = 70,
+ [0][0][1][0][RTW89_ACMA][19] = 66,
+ [0][0][1][0][RTW89_CN][19] = 127,
+ [0][0][1][0][RTW89_UK][19] = 66,
+ [0][0][1][0][RTW89_FCC][21] = 72,
+ [0][0][1][0][RTW89_ETSI][21] = 66,
+ [0][0][1][0][RTW89_MKK][21] = 70,
+ [0][0][1][0][RTW89_IC][21] = 72,
+ [0][0][1][0][RTW89_KCC][21] = 70,
+ [0][0][1][0][RTW89_ACMA][21] = 66,
+ [0][0][1][0][RTW89_CN][21] = 127,
+ [0][0][1][0][RTW89_UK][21] = 66,
+ [0][0][1][0][RTW89_FCC][23] = 72,
+ [0][0][1][0][RTW89_ETSI][23] = 66,
+ [0][0][1][0][RTW89_MKK][23] = 70,
+ [0][0][1][0][RTW89_IC][23] = 72,
+ [0][0][1][0][RTW89_KCC][23] = 70,
+ [0][0][1][0][RTW89_ACMA][23] = 66,
+ [0][0][1][0][RTW89_CN][23] = 127,
+ [0][0][1][0][RTW89_UK][23] = 66,
+ [0][0][1][0][RTW89_FCC][25] = 72,
+ [0][0][1][0][RTW89_ETSI][25] = 66,
+ [0][0][1][0][RTW89_MKK][25] = 70,
+ [0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_KCC][25] = 70,
+ [0][0][1][0][RTW89_ACMA][25] = 127,
+ [0][0][1][0][RTW89_CN][25] = 127,
+ [0][0][1][0][RTW89_UK][25] = 66,
+ [0][0][1][0][RTW89_FCC][27] = 72,
+ [0][0][1][0][RTW89_ETSI][27] = 66,
+ [0][0][1][0][RTW89_MKK][27] = 70,
+ [0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_KCC][27] = 70,
+ [0][0][1][0][RTW89_ACMA][27] = 127,
+ [0][0][1][0][RTW89_CN][27] = 127,
+ [0][0][1][0][RTW89_UK][27] = 66,
+ [0][0][1][0][RTW89_FCC][29] = 72,
+ [0][0][1][0][RTW89_ETSI][29] = 66,
+ [0][0][1][0][RTW89_MKK][29] = 70,
+ [0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_KCC][29] = 70,
+ [0][0][1][0][RTW89_ACMA][29] = 127,
+ [0][0][1][0][RTW89_CN][29] = 127,
+ [0][0][1][0][RTW89_UK][29] = 66,
+ [0][0][1][0][RTW89_FCC][31] = 72,
+ [0][0][1][0][RTW89_ETSI][31] = 66,
+ [0][0][1][0][RTW89_MKK][31] = 70,
+ [0][0][1][0][RTW89_IC][31] = 72,
+ [0][0][1][0][RTW89_KCC][31] = 70,
+ [0][0][1][0][RTW89_ACMA][31] = 66,
+ [0][0][1][0][RTW89_CN][31] = 127,
+ [0][0][1][0][RTW89_UK][31] = 66,
+ [0][0][1][0][RTW89_FCC][33] = 72,
+ [0][0][1][0][RTW89_ETSI][33] = 66,
+ [0][0][1][0][RTW89_MKK][33] = 70,
+ [0][0][1][0][RTW89_IC][33] = 72,
+ [0][0][1][0][RTW89_KCC][33] = 70,
+ [0][0][1][0][RTW89_ACMA][33] = 66,
+ [0][0][1][0][RTW89_CN][33] = 127,
+ [0][0][1][0][RTW89_UK][33] = 66,
+ [0][0][1][0][RTW89_FCC][35] = 60,
+ [0][0][1][0][RTW89_ETSI][35] = 66,
+ [0][0][1][0][RTW89_MKK][35] = 70,
+ [0][0][1][0][RTW89_IC][35] = 60,
+ [0][0][1][0][RTW89_KCC][35] = 70,
+ [0][0][1][0][RTW89_ACMA][35] = 66,
+ [0][0][1][0][RTW89_CN][35] = 127,
+ [0][0][1][0][RTW89_UK][35] = 66,
+ [0][0][1][0][RTW89_FCC][37] = 72,
+ [0][0][1][0][RTW89_ETSI][37] = 127,
+ [0][0][1][0][RTW89_MKK][37] = 70,
+ [0][0][1][0][RTW89_IC][37] = 72,
+ [0][0][1][0][RTW89_KCC][37] = 70,
+ [0][0][1][0][RTW89_ACMA][37] = 70,
+ [0][0][1][0][RTW89_CN][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 64,
+ [0][0][1][0][RTW89_FCC][38] = 72,
+ [0][0][1][0][RTW89_ETSI][38] = 30,
+ [0][0][1][0][RTW89_MKK][38] = 127,
+ [0][0][1][0][RTW89_IC][38] = 72,
+ [0][0][1][0][RTW89_KCC][38] = 62,
+ [0][0][1][0][RTW89_ACMA][38] = 70,
+ [0][0][1][0][RTW89_CN][38] = 68,
+ [0][0][1][0][RTW89_UK][38] = 64,
+ [0][0][1][0][RTW89_FCC][40] = 72,
+ [0][0][1][0][RTW89_ETSI][40] = 30,
+ [0][0][1][0][RTW89_MKK][40] = 127,
+ [0][0][1][0][RTW89_IC][40] = 72,
+ [0][0][1][0][RTW89_KCC][40] = 62,
+ [0][0][1][0][RTW89_ACMA][40] = 70,
+ [0][0][1][0][RTW89_CN][40] = 68,
+ [0][0][1][0][RTW89_UK][40] = 64,
+ [0][0][1][0][RTW89_FCC][42] = 72,
+ [0][0][1][0][RTW89_ETSI][42] = 30,
+ [0][0][1][0][RTW89_MKK][42] = 127,
+ [0][0][1][0][RTW89_IC][42] = 72,
+ [0][0][1][0][RTW89_KCC][42] = 62,
+ [0][0][1][0][RTW89_ACMA][42] = 70,
+ [0][0][1][0][RTW89_CN][42] = 68,
+ [0][0][1][0][RTW89_UK][42] = 64,
+ [0][0][1][0][RTW89_FCC][44] = 72,
+ [0][0][1][0][RTW89_ETSI][44] = 30,
+ [0][0][1][0][RTW89_MKK][44] = 127,
+ [0][0][1][0][RTW89_IC][44] = 72,
+ [0][0][1][0][RTW89_KCC][44] = 62,
+ [0][0][1][0][RTW89_ACMA][44] = 70,
+ [0][0][1][0][RTW89_CN][44] = 68,
+ [0][0][1][0][RTW89_UK][44] = 64,
+ [0][0][1][0][RTW89_FCC][46] = 72,
+ [0][0][1][0][RTW89_ETSI][46] = 30,
+ [0][0][1][0][RTW89_MKK][46] = 127,
+ [0][0][1][0][RTW89_IC][46] = 72,
+ [0][0][1][0][RTW89_KCC][46] = 62,
+ [0][0][1][0][RTW89_ACMA][46] = 70,
+ [0][0][1][0][RTW89_CN][46] = 68,
+ [0][0][1][0][RTW89_UK][46] = 64,
+ [0][0][1][0][RTW89_FCC][48] = 72,
+ [0][0][1][0][RTW89_ETSI][48] = 127,
+ [0][0][1][0][RTW89_MKK][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_KCC][48] = 127,
+ [0][0][1][0][RTW89_ACMA][48] = 127,
+ [0][0][1][0][RTW89_CN][48] = 127,
+ [0][0][1][0][RTW89_UK][48] = 127,
+ [0][0][1][0][RTW89_FCC][50] = 72,
+ [0][0][1][0][RTW89_ETSI][50] = 127,
+ [0][0][1][0][RTW89_MKK][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_KCC][50] = 127,
+ [0][0][1][0][RTW89_ACMA][50] = 127,
+ [0][0][1][0][RTW89_CN][50] = 127,
+ [0][0][1][0][RTW89_UK][50] = 127,
+ [0][0][1][0][RTW89_FCC][52] = 72,
+ [0][0][1][0][RTW89_ETSI][52] = 127,
+ [0][0][1][0][RTW89_MKK][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_KCC][52] = 127,
+ [0][0][1][0][RTW89_ACMA][52] = 127,
+ [0][0][1][0][RTW89_CN][52] = 127,
+ [0][0][1][0][RTW89_UK][52] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 60,
+ [0][1][1][0][RTW89_ETSI][0] = 54,
+ [0][1][1][0][RTW89_MKK][0] = 54,
+ [0][1][1][0][RTW89_IC][0] = 34,
+ [0][1][1][0][RTW89_KCC][0] = 40,
+ [0][1][1][0][RTW89_ACMA][0] = 54,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 54,
+ [0][1][1][0][RTW89_FCC][2] = 60,
+ [0][1][1][0][RTW89_ETSI][2] = 54,
+ [0][1][1][0][RTW89_MKK][2] = 54,
+ [0][1][1][0][RTW89_IC][2] = 34,
+ [0][1][1][0][RTW89_KCC][2] = 40,
+ [0][1][1][0][RTW89_ACMA][2] = 54,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 54,
+ [0][1][1][0][RTW89_FCC][4] = 60,
+ [0][1][1][0][RTW89_ETSI][4] = 54,
+ [0][1][1][0][RTW89_MKK][4] = 54,
+ [0][1][1][0][RTW89_IC][4] = 34,
+ [0][1][1][0][RTW89_KCC][4] = 40,
+ [0][1][1][0][RTW89_ACMA][4] = 54,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 54,
+ [0][1][1][0][RTW89_FCC][6] = 60,
+ [0][1][1][0][RTW89_ETSI][6] = 54,
+ [0][1][1][0][RTW89_MKK][6] = 54,
+ [0][1][1][0][RTW89_IC][6] = 36,
+ [0][1][1][0][RTW89_KCC][6] = 60,
+ [0][1][1][0][RTW89_ACMA][6] = 54,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 54,
+ [0][1][1][0][RTW89_FCC][8] = 62,
+ [0][1][1][0][RTW89_ETSI][8] = 54,
+ [0][1][1][0][RTW89_MKK][8] = 52,
+ [0][1][1][0][RTW89_IC][8] = 52,
+ [0][1][1][0][RTW89_KCC][8] = 60,
+ [0][1][1][0][RTW89_ACMA][8] = 54,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 54,
+ [0][1][1][0][RTW89_FCC][10] = 62,
+ [0][1][1][0][RTW89_ETSI][10] = 54,
+ [0][1][1][0][RTW89_MKK][10] = 54,
+ [0][1][1][0][RTW89_IC][10] = 52,
+ [0][1][1][0][RTW89_KCC][10] = 60,
+ [0][1][1][0][RTW89_ACMA][10] = 54,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 54,
+ [0][1][1][0][RTW89_FCC][12] = 62,
+ [0][1][1][0][RTW89_ETSI][12] = 54,
+ [0][1][1][0][RTW89_MKK][12] = 54,
+ [0][1][1][0][RTW89_IC][12] = 52,
+ [0][1][1][0][RTW89_KCC][12] = 60,
+ [0][1][1][0][RTW89_ACMA][12] = 54,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 54,
+ [0][1][1][0][RTW89_FCC][14] = 60,
+ [0][1][1][0][RTW89_ETSI][14] = 54,
+ [0][1][1][0][RTW89_MKK][14] = 54,
+ [0][1][1][0][RTW89_IC][14] = 52,
+ [0][1][1][0][RTW89_KCC][14] = 60,
+ [0][1][1][0][RTW89_ACMA][14] = 54,
+ [0][1][1][0][RTW89_CN][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 54,
+ [0][1][1][0][RTW89_FCC][15] = 60,
+ [0][1][1][0][RTW89_ETSI][15] = 54,
+ [0][1][1][0][RTW89_MKK][15] = 70,
+ [0][1][1][0][RTW89_IC][15] = 60,
+ [0][1][1][0][RTW89_KCC][15] = 60,
+ [0][1][1][0][RTW89_ACMA][15] = 54,
+ [0][1][1][0][RTW89_CN][15] = 127,
+ [0][1][1][0][RTW89_UK][15] = 54,
+ [0][1][1][0][RTW89_FCC][17] = 60,
+ [0][1][1][0][RTW89_ETSI][17] = 54,
+ [0][1][1][0][RTW89_MKK][17] = 70,
+ [0][1][1][0][RTW89_IC][17] = 60,
+ [0][1][1][0][RTW89_KCC][17] = 60,
+ [0][1][1][0][RTW89_ACMA][17] = 54,
+ [0][1][1][0][RTW89_CN][17] = 127,
+ [0][1][1][0][RTW89_UK][17] = 54,
+ [0][1][1][0][RTW89_FCC][19] = 60,
+ [0][1][1][0][RTW89_ETSI][19] = 54,
+ [0][1][1][0][RTW89_MKK][19] = 70,
+ [0][1][1][0][RTW89_IC][19] = 60,
+ [0][1][1][0][RTW89_KCC][19] = 60,
+ [0][1][1][0][RTW89_ACMA][19] = 54,
+ [0][1][1][0][RTW89_CN][19] = 127,
+ [0][1][1][0][RTW89_UK][19] = 54,
+ [0][1][1][0][RTW89_FCC][21] = 60,
+ [0][1][1][0][RTW89_ETSI][21] = 54,
+ [0][1][1][0][RTW89_MKK][21] = 70,
+ [0][1][1][0][RTW89_IC][21] = 60,
+ [0][1][1][0][RTW89_KCC][21] = 60,
+ [0][1][1][0][RTW89_ACMA][21] = 54,
+ [0][1][1][0][RTW89_CN][21] = 127,
+ [0][1][1][0][RTW89_UK][21] = 54,
+ [0][1][1][0][RTW89_FCC][23] = 60,
+ [0][1][1][0][RTW89_ETSI][23] = 54,
+ [0][1][1][0][RTW89_MKK][23] = 70,
+ [0][1][1][0][RTW89_IC][23] = 60,
+ [0][1][1][0][RTW89_KCC][23] = 60,
+ [0][1][1][0][RTW89_ACMA][23] = 54,
+ [0][1][1][0][RTW89_CN][23] = 127,
+ [0][1][1][0][RTW89_UK][23] = 54,
+ [0][1][1][0][RTW89_FCC][25] = 60,
+ [0][1][1][0][RTW89_ETSI][25] = 54,
+ [0][1][1][0][RTW89_MKK][25] = 70,
+ [0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_KCC][25] = 60,
+ [0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_CN][25] = 127,
+ [0][1][1][0][RTW89_UK][25] = 54,
+ [0][1][1][0][RTW89_FCC][27] = 60,
+ [0][1][1][0][RTW89_ETSI][27] = 54,
+ [0][1][1][0][RTW89_MKK][27] = 70,
+ [0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_KCC][27] = 60,
+ [0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_CN][27] = 127,
+ [0][1][1][0][RTW89_UK][27] = 54,
+ [0][1][1][0][RTW89_FCC][29] = 60,
+ [0][1][1][0][RTW89_ETSI][29] = 54,
+ [0][1][1][0][RTW89_MKK][29] = 70,
+ [0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_KCC][29] = 60,
+ [0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_CN][29] = 127,
+ [0][1][1][0][RTW89_UK][29] = 54,
+ [0][1][1][0][RTW89_FCC][31] = 60,
+ [0][1][1][0][RTW89_ETSI][31] = 54,
+ [0][1][1][0][RTW89_MKK][31] = 70,
+ [0][1][1][0][RTW89_IC][31] = 60,
+ [0][1][1][0][RTW89_KCC][31] = 58,
+ [0][1][1][0][RTW89_ACMA][31] = 54,
+ [0][1][1][0][RTW89_CN][31] = 127,
+ [0][1][1][0][RTW89_UK][31] = 54,
+ [0][1][1][0][RTW89_FCC][33] = 60,
+ [0][1][1][0][RTW89_ETSI][33] = 54,
+ [0][1][1][0][RTW89_MKK][33] = 70,
+ [0][1][1][0][RTW89_IC][33] = 60,
+ [0][1][1][0][RTW89_KCC][33] = 58,
+ [0][1][1][0][RTW89_ACMA][33] = 54,
+ [0][1][1][0][RTW89_CN][33] = 127,
+ [0][1][1][0][RTW89_UK][33] = 54,
+ [0][1][1][0][RTW89_FCC][35] = 52,
+ [0][1][1][0][RTW89_ETSI][35] = 54,
+ [0][1][1][0][RTW89_MKK][35] = 70,
+ [0][1][1][0][RTW89_IC][35] = 52,
+ [0][1][1][0][RTW89_KCC][35] = 58,
+ [0][1][1][0][RTW89_ACMA][35] = 54,
+ [0][1][1][0][RTW89_CN][35] = 127,
+ [0][1][1][0][RTW89_UK][35] = 54,
+ [0][1][1][0][RTW89_FCC][37] = 62,
+ [0][1][1][0][RTW89_ETSI][37] = 127,
+ [0][1][1][0][RTW89_MKK][37] = 70,
+ [0][1][1][0][RTW89_IC][37] = 62,
+ [0][1][1][0][RTW89_KCC][37] = 58,
+ [0][1][1][0][RTW89_ACMA][37] = 64,
+ [0][1][1][0][RTW89_CN][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 52,
+ [0][1][1][0][RTW89_FCC][38] = 72,
+ [0][1][1][0][RTW89_ETSI][38] = 18,
+ [0][1][1][0][RTW89_MKK][38] = 127,
+ [0][1][1][0][RTW89_IC][38] = 72,
+ [0][1][1][0][RTW89_KCC][38] = 60,
+ [0][1][1][0][RTW89_ACMA][38] = 70,
+ [0][1][1][0][RTW89_CN][38] = 64,
+ [0][1][1][0][RTW89_UK][38] = 52,
+ [0][1][1][0][RTW89_FCC][40] = 72,
+ [0][1][1][0][RTW89_ETSI][40] = 18,
+ [0][1][1][0][RTW89_MKK][40] = 127,
+ [0][1][1][0][RTW89_IC][40] = 72,
+ [0][1][1][0][RTW89_KCC][40] = 60,
+ [0][1][1][0][RTW89_ACMA][40] = 70,
+ [0][1][1][0][RTW89_CN][40] = 64,
+ [0][1][1][0][RTW89_UK][40] = 52,
+ [0][1][1][0][RTW89_FCC][42] = 72,
+ [0][1][1][0][RTW89_ETSI][42] = 18,
+ [0][1][1][0][RTW89_MKK][42] = 127,
+ [0][1][1][0][RTW89_IC][42] = 72,
+ [0][1][1][0][RTW89_KCC][42] = 60,
+ [0][1][1][0][RTW89_ACMA][42] = 70,
+ [0][1][1][0][RTW89_CN][42] = 64,
+ [0][1][1][0][RTW89_UK][42] = 52,
+ [0][1][1][0][RTW89_FCC][44] = 72,
+ [0][1][1][0][RTW89_ETSI][44] = 18,
+ [0][1][1][0][RTW89_MKK][44] = 127,
+ [0][1][1][0][RTW89_IC][44] = 72,
+ [0][1][1][0][RTW89_KCC][44] = 60,
+ [0][1][1][0][RTW89_ACMA][44] = 70,
+ [0][1][1][0][RTW89_CN][44] = 60,
+ [0][1][1][0][RTW89_UK][44] = 52,
+ [0][1][1][0][RTW89_FCC][46] = 72,
+ [0][1][1][0][RTW89_ETSI][46] = 18,
+ [0][1][1][0][RTW89_MKK][46] = 127,
+ [0][1][1][0][RTW89_IC][46] = 72,
+ [0][1][1][0][RTW89_KCC][46] = 60,
+ [0][1][1][0][RTW89_ACMA][46] = 70,
+ [0][1][1][0][RTW89_CN][46] = 60,
+ [0][1][1][0][RTW89_UK][46] = 52,
+ [0][1][1][0][RTW89_FCC][48] = 48,
+ [0][1][1][0][RTW89_ETSI][48] = 127,
+ [0][1][1][0][RTW89_MKK][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_KCC][48] = 127,
+ [0][1][1][0][RTW89_ACMA][48] = 127,
+ [0][1][1][0][RTW89_CN][48] = 127,
+ [0][1][1][0][RTW89_UK][48] = 127,
+ [0][1][1][0][RTW89_FCC][50] = 48,
+ [0][1][1][0][RTW89_ETSI][50] = 127,
+ [0][1][1][0][RTW89_MKK][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_KCC][50] = 127,
+ [0][1][1][0][RTW89_ACMA][50] = 127,
+ [0][1][1][0][RTW89_CN][50] = 127,
+ [0][1][1][0][RTW89_UK][50] = 127,
+ [0][1][1][0][RTW89_FCC][52] = 48,
+ [0][1][1][0][RTW89_ETSI][52] = 127,
+ [0][1][1][0][RTW89_MKK][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_KCC][52] = 127,
+ [0][1][1][0][RTW89_ACMA][52] = 127,
+ [0][1][1][0][RTW89_CN][52] = 127,
+ [0][1][1][0][RTW89_UK][52] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 70,
+ [0][0][2][0][RTW89_ETSI][0] = 66,
+ [0][0][2][0][RTW89_MKK][0] = 68,
+ [0][0][2][0][RTW89_IC][0] = 60,
+ [0][0][2][0][RTW89_KCC][0] = 54,
+ [0][0][2][0][RTW89_ACMA][0] = 66,
+ [0][0][2][0][RTW89_CN][0] = 52,
+ [0][0][2][0][RTW89_UK][0] = 66,
+ [0][0][2][0][RTW89_FCC][2] = 72,
+ [0][0][2][0][RTW89_ETSI][2] = 66,
+ [0][0][2][0][RTW89_MKK][2] = 68,
+ [0][0][2][0][RTW89_IC][2] = 60,
+ [0][0][2][0][RTW89_KCC][2] = 54,
+ [0][0][2][0][RTW89_ACMA][2] = 66,
+ [0][0][2][0][RTW89_CN][2] = 52,
+ [0][0][2][0][RTW89_UK][2] = 66,
+ [0][0][2][0][RTW89_FCC][4] = 72,
+ [0][0][2][0][RTW89_ETSI][4] = 66,
+ [0][0][2][0][RTW89_MKK][4] = 68,
+ [0][0][2][0][RTW89_IC][4] = 60,
+ [0][0][2][0][RTW89_KCC][4] = 54,
+ [0][0][2][0][RTW89_ACMA][4] = 66,
+ [0][0][2][0][RTW89_CN][4] = 52,
+ [0][0][2][0][RTW89_UK][4] = 66,
+ [0][0][2][0][RTW89_FCC][6] = 72,
+ [0][0][2][0][RTW89_ETSI][6] = 66,
+ [0][0][2][0][RTW89_MKK][6] = 60,
+ [0][0][2][0][RTW89_IC][6] = 60,
+ [0][0][2][0][RTW89_KCC][6] = 68,
+ [0][0][2][0][RTW89_ACMA][6] = 66,
+ [0][0][2][0][RTW89_CN][6] = 52,
+ [0][0][2][0][RTW89_UK][6] = 66,
+ [0][0][2][0][RTW89_FCC][8] = 72,
+ [0][0][2][0][RTW89_ETSI][8] = 66,
+ [0][0][2][0][RTW89_MKK][8] = 58,
+ [0][0][2][0][RTW89_IC][8] = 64,
+ [0][0][2][0][RTW89_KCC][8] = 70,
+ [0][0][2][0][RTW89_ACMA][8] = 66,
+ [0][0][2][0][RTW89_CN][8] = 52,
+ [0][0][2][0][RTW89_UK][8] = 66,
+ [0][0][2][0][RTW89_FCC][10] = 72,
+ [0][0][2][0][RTW89_ETSI][10] = 66,
+ [0][0][2][0][RTW89_MKK][10] = 70,
+ [0][0][2][0][RTW89_IC][10] = 64,
+ [0][0][2][0][RTW89_KCC][10] = 70,
+ [0][0][2][0][RTW89_ACMA][10] = 66,
+ [0][0][2][0][RTW89_CN][10] = 52,
+ [0][0][2][0][RTW89_UK][10] = 66,
+ [0][0][2][0][RTW89_FCC][12] = 72,
+ [0][0][2][0][RTW89_ETSI][12] = 66,
+ [0][0][2][0][RTW89_MKK][12] = 70,
+ [0][0][2][0][RTW89_IC][12] = 64,
+ [0][0][2][0][RTW89_KCC][12] = 66,
+ [0][0][2][0][RTW89_ACMA][12] = 66,
+ [0][0][2][0][RTW89_CN][12] = 52,
+ [0][0][2][0][RTW89_UK][12] = 66,
+ [0][0][2][0][RTW89_FCC][14] = 68,
+ [0][0][2][0][RTW89_ETSI][14] = 66,
+ [0][0][2][0][RTW89_MKK][14] = 70,
+ [0][0][2][0][RTW89_IC][14] = 64,
+ [0][0][2][0][RTW89_KCC][14] = 66,
+ [0][0][2][0][RTW89_ACMA][14] = 66,
+ [0][0][2][0][RTW89_CN][14] = 52,
+ [0][0][2][0][RTW89_UK][14] = 66,
+ [0][0][2][0][RTW89_FCC][15] = 70,
+ [0][0][2][0][RTW89_ETSI][15] = 66,
+ [0][0][2][0][RTW89_MKK][15] = 70,
+ [0][0][2][0][RTW89_IC][15] = 70,
+ [0][0][2][0][RTW89_KCC][15] = 70,
+ [0][0][2][0][RTW89_ACMA][15] = 66,
+ [0][0][2][0][RTW89_CN][15] = 127,
+ [0][0][2][0][RTW89_UK][15] = 66,
+ [0][0][2][0][RTW89_FCC][17] = 72,
+ [0][0][2][0][RTW89_ETSI][17] = 66,
+ [0][0][2][0][RTW89_MKK][17] = 70,
+ [0][0][2][0][RTW89_IC][17] = 72,
+ [0][0][2][0][RTW89_KCC][17] = 70,
+ [0][0][2][0][RTW89_ACMA][17] = 66,
+ [0][0][2][0][RTW89_CN][17] = 127,
+ [0][0][2][0][RTW89_UK][17] = 66,
+ [0][0][2][0][RTW89_FCC][19] = 72,
+ [0][0][2][0][RTW89_ETSI][19] = 66,
+ [0][0][2][0][RTW89_MKK][19] = 70,
+ [0][0][2][0][RTW89_IC][19] = 72,
+ [0][0][2][0][RTW89_KCC][19] = 70,
+ [0][0][2][0][RTW89_ACMA][19] = 66,
+ [0][0][2][0][RTW89_CN][19] = 127,
+ [0][0][2][0][RTW89_UK][19] = 66,
+ [0][0][2][0][RTW89_FCC][21] = 72,
+ [0][0][2][0][RTW89_ETSI][21] = 66,
+ [0][0][2][0][RTW89_MKK][21] = 70,
+ [0][0][2][0][RTW89_IC][21] = 72,
+ [0][0][2][0][RTW89_KCC][21] = 70,
+ [0][0][2][0][RTW89_ACMA][21] = 66,
+ [0][0][2][0][RTW89_CN][21] = 127,
+ [0][0][2][0][RTW89_UK][21] = 66,
+ [0][0][2][0][RTW89_FCC][23] = 72,
+ [0][0][2][0][RTW89_ETSI][23] = 66,
+ [0][0][2][0][RTW89_MKK][23] = 70,
+ [0][0][2][0][RTW89_IC][23] = 72,
+ [0][0][2][0][RTW89_KCC][23] = 70,
+ [0][0][2][0][RTW89_ACMA][23] = 66,
+ [0][0][2][0][RTW89_CN][23] = 127,
+ [0][0][2][0][RTW89_UK][23] = 66,
+ [0][0][2][0][RTW89_FCC][25] = 72,
+ [0][0][2][0][RTW89_ETSI][25] = 66,
+ [0][0][2][0][RTW89_MKK][25] = 70,
+ [0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_KCC][25] = 70,
+ [0][0][2][0][RTW89_ACMA][25] = 127,
+ [0][0][2][0][RTW89_CN][25] = 127,
+ [0][0][2][0][RTW89_UK][25] = 66,
+ [0][0][2][0][RTW89_FCC][27] = 72,
+ [0][0][2][0][RTW89_ETSI][27] = 66,
+ [0][0][2][0][RTW89_MKK][27] = 70,
+ [0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_KCC][27] = 70,
+ [0][0][2][0][RTW89_ACMA][27] = 127,
+ [0][0][2][0][RTW89_CN][27] = 127,
+ [0][0][2][0][RTW89_UK][27] = 66,
+ [0][0][2][0][RTW89_FCC][29] = 72,
+ [0][0][2][0][RTW89_ETSI][29] = 66,
+ [0][0][2][0][RTW89_MKK][29] = 70,
+ [0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_KCC][29] = 70,
+ [0][0][2][0][RTW89_ACMA][29] = 127,
+ [0][0][2][0][RTW89_CN][29] = 127,
+ [0][0][2][0][RTW89_UK][29] = 66,
+ [0][0][2][0][RTW89_FCC][31] = 72,
+ [0][0][2][0][RTW89_ETSI][31] = 66,
+ [0][0][2][0][RTW89_MKK][31] = 70,
+ [0][0][2][0][RTW89_IC][31] = 72,
+ [0][0][2][0][RTW89_KCC][31] = 70,
+ [0][0][2][0][RTW89_ACMA][31] = 66,
+ [0][0][2][0][RTW89_CN][31] = 127,
+ [0][0][2][0][RTW89_UK][31] = 66,
+ [0][0][2][0][RTW89_FCC][33] = 72,
+ [0][0][2][0][RTW89_ETSI][33] = 66,
+ [0][0][2][0][RTW89_MKK][33] = 70,
+ [0][0][2][0][RTW89_IC][33] = 72,
+ [0][0][2][0][RTW89_KCC][33] = 70,
+ [0][0][2][0][RTW89_ACMA][33] = 66,
+ [0][0][2][0][RTW89_CN][33] = 127,
+ [0][0][2][0][RTW89_UK][33] = 66,
+ [0][0][2][0][RTW89_FCC][35] = 56,
+ [0][0][2][0][RTW89_ETSI][35] = 66,
+ [0][0][2][0][RTW89_MKK][35] = 70,
+ [0][0][2][0][RTW89_IC][35] = 56,
+ [0][0][2][0][RTW89_KCC][35] = 70,
+ [0][0][2][0][RTW89_ACMA][35] = 66,
+ [0][0][2][0][RTW89_CN][35] = 127,
+ [0][0][2][0][RTW89_UK][35] = 66,
+ [0][0][2][0][RTW89_FCC][37] = 72,
+ [0][0][2][0][RTW89_ETSI][37] = 127,
+ [0][0][2][0][RTW89_MKK][37] = 70,
+ [0][0][2][0][RTW89_IC][37] = 72,
+ [0][0][2][0][RTW89_KCC][37] = 70,
+ [0][0][2][0][RTW89_ACMA][37] = 70,
+ [0][0][2][0][RTW89_CN][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 64,
+ [0][0][2][0][RTW89_FCC][38] = 72,
+ [0][0][2][0][RTW89_ETSI][38] = 30,
+ [0][0][2][0][RTW89_MKK][38] = 127,
+ [0][0][2][0][RTW89_IC][38] = 72,
+ [0][0][2][0][RTW89_KCC][38] = 58,
+ [0][0][2][0][RTW89_ACMA][38] = 70,
+ [0][0][2][0][RTW89_CN][38] = 68,
+ [0][0][2][0][RTW89_UK][38] = 64,
+ [0][0][2][0][RTW89_FCC][40] = 72,
+ [0][0][2][0][RTW89_ETSI][40] = 30,
+ [0][0][2][0][RTW89_MKK][40] = 127,
+ [0][0][2][0][RTW89_IC][40] = 72,
+ [0][0][2][0][RTW89_KCC][40] = 58,
+ [0][0][2][0][RTW89_ACMA][40] = 70,
+ [0][0][2][0][RTW89_CN][40] = 68,
+ [0][0][2][0][RTW89_UK][40] = 64,
+ [0][0][2][0][RTW89_FCC][42] = 72,
+ [0][0][2][0][RTW89_ETSI][42] = 30,
+ [0][0][2][0][RTW89_MKK][42] = 127,
+ [0][0][2][0][RTW89_IC][42] = 72,
+ [0][0][2][0][RTW89_KCC][42] = 58,
+ [0][0][2][0][RTW89_ACMA][42] = 70,
+ [0][0][2][0][RTW89_CN][42] = 68,
+ [0][0][2][0][RTW89_UK][42] = 64,
+ [0][0][2][0][RTW89_FCC][44] = 72,
+ [0][0][2][0][RTW89_ETSI][44] = 30,
+ [0][0][2][0][RTW89_MKK][44] = 127,
+ [0][0][2][0][RTW89_IC][44] = 72,
+ [0][0][2][0][RTW89_KCC][44] = 58,
+ [0][0][2][0][RTW89_ACMA][44] = 70,
+ [0][0][2][0][RTW89_CN][44] = 68,
+ [0][0][2][0][RTW89_UK][44] = 64,
+ [0][0][2][0][RTW89_FCC][46] = 72,
+ [0][0][2][0][RTW89_ETSI][46] = 30,
+ [0][0][2][0][RTW89_MKK][46] = 127,
+ [0][0][2][0][RTW89_IC][46] = 72,
+ [0][0][2][0][RTW89_KCC][46] = 58,
+ [0][0][2][0][RTW89_ACMA][46] = 70,
+ [0][0][2][0][RTW89_CN][46] = 68,
+ [0][0][2][0][RTW89_UK][46] = 64,
+ [0][0][2][0][RTW89_FCC][48] = 72,
+ [0][0][2][0][RTW89_ETSI][48] = 127,
+ [0][0][2][0][RTW89_MKK][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_KCC][48] = 127,
+ [0][0][2][0][RTW89_ACMA][48] = 127,
+ [0][0][2][0][RTW89_CN][48] = 127,
+ [0][0][2][0][RTW89_UK][48] = 127,
+ [0][0][2][0][RTW89_FCC][50] = 72,
+ [0][0][2][0][RTW89_ETSI][50] = 127,
+ [0][0][2][0][RTW89_MKK][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_KCC][50] = 127,
+ [0][0][2][0][RTW89_ACMA][50] = 127,
+ [0][0][2][0][RTW89_CN][50] = 127,
+ [0][0][2][0][RTW89_UK][50] = 127,
+ [0][0][2][0][RTW89_FCC][52] = 72,
+ [0][0][2][0][RTW89_ETSI][52] = 127,
+ [0][0][2][0][RTW89_MKK][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_KCC][52] = 127,
+ [0][0][2][0][RTW89_ACMA][52] = 127,
+ [0][0][2][0][RTW89_CN][52] = 127,
+ [0][0][2][0][RTW89_UK][52] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 60,
+ [0][1][2][0][RTW89_ETSI][0] = 54,
+ [0][1][2][0][RTW89_MKK][0] = 54,
+ [0][1][2][0][RTW89_IC][0] = 36,
+ [0][1][2][0][RTW89_KCC][0] = 40,
+ [0][1][2][0][RTW89_ACMA][0] = 54,
+ [0][1][2][0][RTW89_CN][0] = 40,
+ [0][1][2][0][RTW89_UK][0] = 54,
+ [0][1][2][0][RTW89_FCC][2] = 62,
+ [0][1][2][0][RTW89_ETSI][2] = 54,
+ [0][1][2][0][RTW89_MKK][2] = 54,
+ [0][1][2][0][RTW89_IC][2] = 36,
+ [0][1][2][0][RTW89_KCC][2] = 40,
+ [0][1][2][0][RTW89_ACMA][2] = 54,
+ [0][1][2][0][RTW89_CN][2] = 40,
+ [0][1][2][0][RTW89_UK][2] = 54,
+ [0][1][2][0][RTW89_FCC][4] = 62,
+ [0][1][2][0][RTW89_ETSI][4] = 54,
+ [0][1][2][0][RTW89_MKK][4] = 54,
+ [0][1][2][0][RTW89_IC][4] = 36,
+ [0][1][2][0][RTW89_KCC][4] = 40,
+ [0][1][2][0][RTW89_ACMA][4] = 54,
+ [0][1][2][0][RTW89_CN][4] = 40,
+ [0][1][2][0][RTW89_UK][4] = 54,
+ [0][1][2][0][RTW89_FCC][6] = 62,
+ [0][1][2][0][RTW89_ETSI][6] = 54,
+ [0][1][2][0][RTW89_MKK][6] = 50,
+ [0][1][2][0][RTW89_IC][6] = 38,
+ [0][1][2][0][RTW89_KCC][6] = 64,
+ [0][1][2][0][RTW89_ACMA][6] = 54,
+ [0][1][2][0][RTW89_CN][6] = 40,
+ [0][1][2][0][RTW89_UK][6] = 54,
+ [0][1][2][0][RTW89_FCC][8] = 62,
+ [0][1][2][0][RTW89_ETSI][8] = 54,
+ [0][1][2][0][RTW89_MKK][8] = 42,
+ [0][1][2][0][RTW89_IC][8] = 52,
+ [0][1][2][0][RTW89_KCC][8] = 62,
+ [0][1][2][0][RTW89_ACMA][8] = 54,
+ [0][1][2][0][RTW89_CN][8] = 40,
+ [0][1][2][0][RTW89_UK][8] = 54,
+ [0][1][2][0][RTW89_FCC][10] = 62,
+ [0][1][2][0][RTW89_ETSI][10] = 54,
+ [0][1][2][0][RTW89_MKK][10] = 54,
+ [0][1][2][0][RTW89_IC][10] = 52,
+ [0][1][2][0][RTW89_KCC][10] = 62,
+ [0][1][2][0][RTW89_ACMA][10] = 54,
+ [0][1][2][0][RTW89_CN][10] = 40,
+ [0][1][2][0][RTW89_UK][10] = 54,
+ [0][1][2][0][RTW89_FCC][12] = 62,
+ [0][1][2][0][RTW89_ETSI][12] = 54,
+ [0][1][2][0][RTW89_MKK][12] = 54,
+ [0][1][2][0][RTW89_IC][12] = 52,
+ [0][1][2][0][RTW89_KCC][12] = 62,
+ [0][1][2][0][RTW89_ACMA][12] = 54,
+ [0][1][2][0][RTW89_CN][12] = 40,
+ [0][1][2][0][RTW89_UK][12] = 54,
+ [0][1][2][0][RTW89_FCC][14] = 62,
+ [0][1][2][0][RTW89_ETSI][14] = 54,
+ [0][1][2][0][RTW89_MKK][14] = 54,
+ [0][1][2][0][RTW89_IC][14] = 52,
+ [0][1][2][0][RTW89_KCC][14] = 62,
+ [0][1][2][0][RTW89_ACMA][14] = 54,
+ [0][1][2][0][RTW89_CN][14] = 40,
+ [0][1][2][0][RTW89_UK][14] = 54,
+ [0][1][2][0][RTW89_FCC][15] = 60,
+ [0][1][2][0][RTW89_ETSI][15] = 54,
+ [0][1][2][0][RTW89_MKK][15] = 68,
+ [0][1][2][0][RTW89_IC][15] = 60,
+ [0][1][2][0][RTW89_KCC][15] = 64,
+ [0][1][2][0][RTW89_ACMA][15] = 54,
+ [0][1][2][0][RTW89_CN][15] = 127,
+ [0][1][2][0][RTW89_UK][15] = 54,
+ [0][1][2][0][RTW89_FCC][17] = 62,
+ [0][1][2][0][RTW89_ETSI][17] = 54,
+ [0][1][2][0][RTW89_MKK][17] = 68,
+ [0][1][2][0][RTW89_IC][17] = 62,
+ [0][1][2][0][RTW89_KCC][17] = 64,
+ [0][1][2][0][RTW89_ACMA][17] = 54,
+ [0][1][2][0][RTW89_CN][17] = 127,
+ [0][1][2][0][RTW89_UK][17] = 54,
+ [0][1][2][0][RTW89_FCC][19] = 62,
+ [0][1][2][0][RTW89_ETSI][19] = 54,
+ [0][1][2][0][RTW89_MKK][19] = 68,
+ [0][1][2][0][RTW89_IC][19] = 62,
+ [0][1][2][0][RTW89_KCC][19] = 64,
+ [0][1][2][0][RTW89_ACMA][19] = 54,
+ [0][1][2][0][RTW89_CN][19] = 127,
+ [0][1][2][0][RTW89_UK][19] = 54,
+ [0][1][2][0][RTW89_FCC][21] = 62,
+ [0][1][2][0][RTW89_ETSI][21] = 54,
+ [0][1][2][0][RTW89_MKK][21] = 68,
+ [0][1][2][0][RTW89_IC][21] = 62,
+ [0][1][2][0][RTW89_KCC][21] = 64,
+ [0][1][2][0][RTW89_ACMA][21] = 54,
+ [0][1][2][0][RTW89_CN][21] = 127,
+ [0][1][2][0][RTW89_UK][21] = 54,
+ [0][1][2][0][RTW89_FCC][23] = 62,
+ [0][1][2][0][RTW89_ETSI][23] = 54,
+ [0][1][2][0][RTW89_MKK][23] = 68,
+ [0][1][2][0][RTW89_IC][23] = 62,
+ [0][1][2][0][RTW89_KCC][23] = 64,
+ [0][1][2][0][RTW89_ACMA][23] = 54,
+ [0][1][2][0][RTW89_CN][23] = 127,
+ [0][1][2][0][RTW89_UK][23] = 54,
+ [0][1][2][0][RTW89_FCC][25] = 62,
+ [0][1][2][0][RTW89_ETSI][25] = 54,
+ [0][1][2][0][RTW89_MKK][25] = 68,
+ [0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_KCC][25] = 64,
+ [0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_CN][25] = 127,
+ [0][1][2][0][RTW89_UK][25] = 54,
+ [0][1][2][0][RTW89_FCC][27] = 62,
+ [0][1][2][0][RTW89_ETSI][27] = 54,
+ [0][1][2][0][RTW89_MKK][27] = 68,
+ [0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_KCC][27] = 64,
+ [0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_CN][27] = 127,
+ [0][1][2][0][RTW89_UK][27] = 54,
+ [0][1][2][0][RTW89_FCC][29] = 62,
+ [0][1][2][0][RTW89_ETSI][29] = 54,
+ [0][1][2][0][RTW89_MKK][29] = 68,
+ [0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_KCC][29] = 64,
+ [0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_CN][29] = 127,
+ [0][1][2][0][RTW89_UK][29] = 54,
+ [0][1][2][0][RTW89_FCC][31] = 62,
+ [0][1][2][0][RTW89_ETSI][31] = 54,
+ [0][1][2][0][RTW89_MKK][31] = 68,
+ [0][1][2][0][RTW89_IC][31] = 62,
+ [0][1][2][0][RTW89_KCC][31] = 62,
+ [0][1][2][0][RTW89_ACMA][31] = 54,
+ [0][1][2][0][RTW89_CN][31] = 127,
+ [0][1][2][0][RTW89_UK][31] = 54,
+ [0][1][2][0][RTW89_FCC][33] = 62,
+ [0][1][2][0][RTW89_ETSI][33] = 54,
+ [0][1][2][0][RTW89_MKK][33] = 68,
+ [0][1][2][0][RTW89_IC][33] = 62,
+ [0][1][2][0][RTW89_KCC][33] = 62,
+ [0][1][2][0][RTW89_ACMA][33] = 54,
+ [0][1][2][0][RTW89_CN][33] = 127,
+ [0][1][2][0][RTW89_UK][33] = 54,
+ [0][1][2][0][RTW89_FCC][35] = 46,
+ [0][1][2][0][RTW89_ETSI][35] = 54,
+ [0][1][2][0][RTW89_MKK][35] = 68,
+ [0][1][2][0][RTW89_IC][35] = 46,
+ [0][1][2][0][RTW89_KCC][35] = 62,
+ [0][1][2][0][RTW89_ACMA][35] = 54,
+ [0][1][2][0][RTW89_CN][35] = 127,
+ [0][1][2][0][RTW89_UK][35] = 54,
+ [0][1][2][0][RTW89_FCC][37] = 64,
+ [0][1][2][0][RTW89_ETSI][37] = 127,
+ [0][1][2][0][RTW89_MKK][37] = 68,
+ [0][1][2][0][RTW89_IC][37] = 64,
+ [0][1][2][0][RTW89_KCC][37] = 62,
+ [0][1][2][0][RTW89_ACMA][37] = 64,
+ [0][1][2][0][RTW89_CN][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 52,
+ [0][1][2][0][RTW89_FCC][38] = 72,
+ [0][1][2][0][RTW89_ETSI][38] = 18,
+ [0][1][2][0][RTW89_MKK][38] = 127,
+ [0][1][2][0][RTW89_IC][38] = 72,
+ [0][1][2][0][RTW89_KCC][38] = 56,
+ [0][1][2][0][RTW89_ACMA][38] = 70,
+ [0][1][2][0][RTW89_CN][38] = 68,
+ [0][1][2][0][RTW89_UK][38] = 52,
+ [0][1][2][0][RTW89_FCC][40] = 72,
+ [0][1][2][0][RTW89_ETSI][40] = 18,
+ [0][1][2][0][RTW89_MKK][40] = 127,
+ [0][1][2][0][RTW89_IC][40] = 72,
+ [0][1][2][0][RTW89_KCC][40] = 56,
+ [0][1][2][0][RTW89_ACMA][40] = 70,
+ [0][1][2][0][RTW89_CN][40] = 68,
+ [0][1][2][0][RTW89_UK][40] = 52,
+ [0][1][2][0][RTW89_FCC][42] = 72,
+ [0][1][2][0][RTW89_ETSI][42] = 18,
+ [0][1][2][0][RTW89_MKK][42] = 127,
+ [0][1][2][0][RTW89_IC][42] = 72,
+ [0][1][2][0][RTW89_KCC][42] = 56,
+ [0][1][2][0][RTW89_ACMA][42] = 70,
+ [0][1][2][0][RTW89_CN][42] = 68,
+ [0][1][2][0][RTW89_UK][42] = 52,
+ [0][1][2][0][RTW89_FCC][44] = 72,
+ [0][1][2][0][RTW89_ETSI][44] = 18,
+ [0][1][2][0][RTW89_MKK][44] = 127,
+ [0][1][2][0][RTW89_IC][44] = 72,
+ [0][1][2][0][RTW89_KCC][44] = 56,
+ [0][1][2][0][RTW89_ACMA][44] = 70,
+ [0][1][2][0][RTW89_CN][44] = 68,
+ [0][1][2][0][RTW89_UK][44] = 52,
+ [0][1][2][0][RTW89_FCC][46] = 72,
+ [0][1][2][0][RTW89_ETSI][46] = 18,
+ [0][1][2][0][RTW89_MKK][46] = 127,
+ [0][1][2][0][RTW89_IC][46] = 72,
+ [0][1][2][0][RTW89_KCC][46] = 56,
+ [0][1][2][0][RTW89_ACMA][46] = 70,
+ [0][1][2][0][RTW89_CN][46] = 68,
+ [0][1][2][0][RTW89_UK][46] = 52,
+ [0][1][2][0][RTW89_FCC][48] = 48,
+ [0][1][2][0][RTW89_ETSI][48] = 127,
+ [0][1][2][0][RTW89_MKK][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_KCC][48] = 127,
+ [0][1][2][0][RTW89_ACMA][48] = 127,
+ [0][1][2][0][RTW89_CN][48] = 127,
+ [0][1][2][0][RTW89_UK][48] = 127,
+ [0][1][2][0][RTW89_FCC][50] = 50,
+ [0][1][2][0][RTW89_ETSI][50] = 127,
+ [0][1][2][0][RTW89_MKK][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_KCC][50] = 127,
+ [0][1][2][0][RTW89_ACMA][50] = 127,
+ [0][1][2][0][RTW89_CN][50] = 127,
+ [0][1][2][0][RTW89_UK][50] = 127,
+ [0][1][2][0][RTW89_FCC][52] = 48,
+ [0][1][2][0][RTW89_ETSI][52] = 127,
+ [0][1][2][0][RTW89_MKK][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_KCC][52] = 127,
+ [0][1][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][0][RTW89_CN][52] = 127,
+ [0][1][2][0][RTW89_UK][52] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 60,
+ [0][1][2][1][RTW89_ETSI][0] = 40,
+ [0][1][2][1][RTW89_MKK][0] = 54,
+ [0][1][2][1][RTW89_IC][0] = 40,
+ [0][1][2][1][RTW89_KCC][0] = 40,
+ [0][1][2][1][RTW89_ACMA][0] = 40,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 40,
+ [0][1][2][1][RTW89_FCC][2] = 62,
+ [0][1][2][1][RTW89_ETSI][2] = 40,
+ [0][1][2][1][RTW89_MKK][2] = 54,
+ [0][1][2][1][RTW89_IC][2] = 40,
+ [0][1][2][1][RTW89_KCC][2] = 40,
+ [0][1][2][1][RTW89_ACMA][2] = 40,
+ [0][1][2][1][RTW89_CN][2] = 36,
+ [0][1][2][1][RTW89_UK][2] = 40,
+ [0][1][2][1][RTW89_FCC][4] = 62,
+ [0][1][2][1][RTW89_ETSI][4] = 40,
+ [0][1][2][1][RTW89_MKK][4] = 54,
+ [0][1][2][1][RTW89_IC][4] = 40,
+ [0][1][2][1][RTW89_KCC][4] = 40,
+ [0][1][2][1][RTW89_ACMA][4] = 40,
+ [0][1][2][1][RTW89_CN][4] = 36,
+ [0][1][2][1][RTW89_UK][4] = 40,
+ [0][1][2][1][RTW89_FCC][6] = 62,
+ [0][1][2][1][RTW89_ETSI][6] = 40,
+ [0][1][2][1][RTW89_MKK][6] = 50,
+ [0][1][2][1][RTW89_IC][6] = 40,
+ [0][1][2][1][RTW89_KCC][6] = 64,
+ [0][1][2][1][RTW89_ACMA][6] = 40,
+ [0][1][2][1][RTW89_CN][6] = 36,
+ [0][1][2][1][RTW89_UK][6] = 40,
+ [0][1][2][1][RTW89_FCC][8] = 62,
+ [0][1][2][1][RTW89_ETSI][8] = 40,
+ [0][1][2][1][RTW89_MKK][8] = 42,
+ [0][1][2][1][RTW89_IC][8] = 40,
+ [0][1][2][1][RTW89_KCC][8] = 62,
+ [0][1][2][1][RTW89_ACMA][8] = 40,
+ [0][1][2][1][RTW89_CN][8] = 36,
+ [0][1][2][1][RTW89_UK][8] = 40,
+ [0][1][2][1][RTW89_FCC][10] = 62,
+ [0][1][2][1][RTW89_ETSI][10] = 40,
+ [0][1][2][1][RTW89_MKK][10] = 54,
+ [0][1][2][1][RTW89_IC][10] = 40,
+ [0][1][2][1][RTW89_KCC][10] = 62,
+ [0][1][2][1][RTW89_ACMA][10] = 40,
+ [0][1][2][1][RTW89_CN][10] = 36,
+ [0][1][2][1][RTW89_UK][10] = 40,
+ [0][1][2][1][RTW89_FCC][12] = 62,
+ [0][1][2][1][RTW89_ETSI][12] = 40,
+ [0][1][2][1][RTW89_MKK][12] = 54,
+ [0][1][2][1][RTW89_IC][12] = 40,
+ [0][1][2][1][RTW89_KCC][12] = 62,
+ [0][1][2][1][RTW89_ACMA][12] = 40,
+ [0][1][2][1][RTW89_CN][12] = 36,
+ [0][1][2][1][RTW89_UK][12] = 40,
+ [0][1][2][1][RTW89_FCC][14] = 62,
+ [0][1][2][1][RTW89_ETSI][14] = 40,
+ [0][1][2][1][RTW89_MKK][14] = 54,
+ [0][1][2][1][RTW89_IC][14] = 40,
+ [0][1][2][1][RTW89_KCC][14] = 62,
+ [0][1][2][1][RTW89_ACMA][14] = 40,
+ [0][1][2][1][RTW89_CN][14] = 36,
+ [0][1][2][1][RTW89_UK][14] = 40,
+ [0][1][2][1][RTW89_FCC][15] = 60,
+ [0][1][2][1][RTW89_ETSI][15] = 40,
+ [0][1][2][1][RTW89_MKK][15] = 68,
+ [0][1][2][1][RTW89_IC][15] = 60,
+ [0][1][2][1][RTW89_KCC][15] = 64,
+ [0][1][2][1][RTW89_ACMA][15] = 40,
+ [0][1][2][1][RTW89_CN][15] = 127,
+ [0][1][2][1][RTW89_UK][15] = 40,
+ [0][1][2][1][RTW89_FCC][17] = 62,
+ [0][1][2][1][RTW89_ETSI][17] = 40,
+ [0][1][2][1][RTW89_MKK][17] = 68,
+ [0][1][2][1][RTW89_IC][17] = 62,
+ [0][1][2][1][RTW89_KCC][17] = 64,
+ [0][1][2][1][RTW89_ACMA][17] = 40,
+ [0][1][2][1][RTW89_CN][17] = 127,
+ [0][1][2][1][RTW89_UK][17] = 40,
+ [0][1][2][1][RTW89_FCC][19] = 62,
+ [0][1][2][1][RTW89_ETSI][19] = 40,
+ [0][1][2][1][RTW89_MKK][19] = 68,
+ [0][1][2][1][RTW89_IC][19] = 62,
+ [0][1][2][1][RTW89_KCC][19] = 64,
+ [0][1][2][1][RTW89_ACMA][19] = 40,
+ [0][1][2][1][RTW89_CN][19] = 127,
+ [0][1][2][1][RTW89_UK][19] = 40,
+ [0][1][2][1][RTW89_FCC][21] = 62,
+ [0][1][2][1][RTW89_ETSI][21] = 40,
+ [0][1][2][1][RTW89_MKK][21] = 68,
+ [0][1][2][1][RTW89_IC][21] = 62,
+ [0][1][2][1][RTW89_KCC][21] = 64,
+ [0][1][2][1][RTW89_ACMA][21] = 40,
+ [0][1][2][1][RTW89_CN][21] = 127,
+ [0][1][2][1][RTW89_UK][21] = 40,
+ [0][1][2][1][RTW89_FCC][23] = 62,
+ [0][1][2][1][RTW89_ETSI][23] = 40,
+ [0][1][2][1][RTW89_MKK][23] = 68,
+ [0][1][2][1][RTW89_IC][23] = 62,
+ [0][1][2][1][RTW89_KCC][23] = 64,
+ [0][1][2][1][RTW89_ACMA][23] = 40,
+ [0][1][2][1][RTW89_CN][23] = 127,
+ [0][1][2][1][RTW89_UK][23] = 40,
+ [0][1][2][1][RTW89_FCC][25] = 46,
+ [0][1][2][1][RTW89_ETSI][25] = 40,
+ [0][1][2][1][RTW89_MKK][25] = 68,
+ [0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_KCC][25] = 64,
+ [0][1][2][1][RTW89_ACMA][25] = 127,
+ [0][1][2][1][RTW89_CN][25] = 127,
+ [0][1][2][1][RTW89_UK][25] = 40,
+ [0][1][2][1][RTW89_FCC][27] = 46,
+ [0][1][2][1][RTW89_ETSI][27] = 40,
+ [0][1][2][1][RTW89_MKK][27] = 68,
+ [0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_KCC][27] = 64,
+ [0][1][2][1][RTW89_ACMA][27] = 127,
+ [0][1][2][1][RTW89_CN][27] = 127,
+ [0][1][2][1][RTW89_UK][27] = 40,
+ [0][1][2][1][RTW89_FCC][29] = 46,
+ [0][1][2][1][RTW89_ETSI][29] = 40,
+ [0][1][2][1][RTW89_MKK][29] = 68,
+ [0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_KCC][29] = 64,
+ [0][1][2][1][RTW89_ACMA][29] = 127,
+ [0][1][2][1][RTW89_CN][29] = 127,
+ [0][1][2][1][RTW89_UK][29] = 40,
+ [0][1][2][1][RTW89_FCC][31] = 46,
+ [0][1][2][1][RTW89_ETSI][31] = 40,
+ [0][1][2][1][RTW89_MKK][31] = 68,
+ [0][1][2][1][RTW89_IC][31] = 46,
+ [0][1][2][1][RTW89_KCC][31] = 62,
+ [0][1][2][1][RTW89_ACMA][31] = 40,
+ [0][1][2][1][RTW89_CN][31] = 127,
+ [0][1][2][1][RTW89_UK][31] = 40,
+ [0][1][2][1][RTW89_FCC][33] = 46,
+ [0][1][2][1][RTW89_ETSI][33] = 40,
+ [0][1][2][1][RTW89_MKK][33] = 68,
+ [0][1][2][1][RTW89_IC][33] = 46,
+ [0][1][2][1][RTW89_KCC][33] = 62,
+ [0][1][2][1][RTW89_ACMA][33] = 40,
+ [0][1][2][1][RTW89_CN][33] = 127,
+ [0][1][2][1][RTW89_UK][33] = 40,
+ [0][1][2][1][RTW89_FCC][35] = 46,
+ [0][1][2][1][RTW89_ETSI][35] = 40,
+ [0][1][2][1][RTW89_MKK][35] = 68,
+ [0][1][2][1][RTW89_IC][35] = 46,
+ [0][1][2][1][RTW89_KCC][35] = 62,
+ [0][1][2][1][RTW89_ACMA][35] = 40,
+ [0][1][2][1][RTW89_CN][35] = 127,
+ [0][1][2][1][RTW89_UK][35] = 40,
+ [0][1][2][1][RTW89_FCC][37] = 64,
+ [0][1][2][1][RTW89_ETSI][37] = 127,
+ [0][1][2][1][RTW89_MKK][37] = 68,
+ [0][1][2][1][RTW89_IC][37] = 64,
+ [0][1][2][1][RTW89_KCC][37] = 62,
+ [0][1][2][1][RTW89_ACMA][37] = 64,
+ [0][1][2][1][RTW89_CN][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 40,
+ [0][1][2][1][RTW89_FCC][38] = 72,
+ [0][1][2][1][RTW89_ETSI][38] = 6,
+ [0][1][2][1][RTW89_MKK][38] = 127,
+ [0][1][2][1][RTW89_IC][38] = 72,
+ [0][1][2][1][RTW89_KCC][38] = 56,
+ [0][1][2][1][RTW89_ACMA][38] = 70,
+ [0][1][2][1][RTW89_CN][38] = 60,
+ [0][1][2][1][RTW89_UK][38] = 40,
+ [0][1][2][1][RTW89_FCC][40] = 72,
+ [0][1][2][1][RTW89_ETSI][40] = 6,
+ [0][1][2][1][RTW89_MKK][40] = 127,
+ [0][1][2][1][RTW89_IC][40] = 72,
+ [0][1][2][1][RTW89_KCC][40] = 56,
+ [0][1][2][1][RTW89_ACMA][40] = 70,
+ [0][1][2][1][RTW89_CN][40] = 60,
+ [0][1][2][1][RTW89_UK][40] = 40,
+ [0][1][2][1][RTW89_FCC][42] = 72,
+ [0][1][2][1][RTW89_ETSI][42] = 6,
+ [0][1][2][1][RTW89_MKK][42] = 127,
+ [0][1][2][1][RTW89_IC][42] = 72,
+ [0][1][2][1][RTW89_KCC][42] = 56,
+ [0][1][2][1][RTW89_ACMA][42] = 70,
+ [0][1][2][1][RTW89_CN][42] = 60,
+ [0][1][2][1][RTW89_UK][42] = 40,
+ [0][1][2][1][RTW89_FCC][44] = 72,
+ [0][1][2][1][RTW89_ETSI][44] = 6,
+ [0][1][2][1][RTW89_MKK][44] = 127,
+ [0][1][2][1][RTW89_IC][44] = 72,
+ [0][1][2][1][RTW89_KCC][44] = 56,
+ [0][1][2][1][RTW89_ACMA][44] = 70,
+ [0][1][2][1][RTW89_CN][44] = 54,
+ [0][1][2][1][RTW89_UK][44] = 40,
+ [0][1][2][1][RTW89_FCC][46] = 72,
+ [0][1][2][1][RTW89_ETSI][46] = 6,
+ [0][1][2][1][RTW89_MKK][46] = 127,
+ [0][1][2][1][RTW89_IC][46] = 72,
+ [0][1][2][1][RTW89_KCC][46] = 56,
+ [0][1][2][1][RTW89_ACMA][46] = 70,
+ [0][1][2][1][RTW89_CN][46] = 54,
+ [0][1][2][1][RTW89_UK][46] = 40,
+ [0][1][2][1][RTW89_FCC][48] = 48,
+ [0][1][2][1][RTW89_ETSI][48] = 127,
+ [0][1][2][1][RTW89_MKK][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_KCC][48] = 127,
+ [0][1][2][1][RTW89_ACMA][48] = 127,
+ [0][1][2][1][RTW89_CN][48] = 127,
+ [0][1][2][1][RTW89_UK][48] = 127,
+ [0][1][2][1][RTW89_FCC][50] = 50,
+ [0][1][2][1][RTW89_ETSI][50] = 127,
+ [0][1][2][1][RTW89_MKK][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_KCC][50] = 127,
+ [0][1][2][1][RTW89_ACMA][50] = 127,
+ [0][1][2][1][RTW89_CN][50] = 127,
+ [0][1][2][1][RTW89_UK][50] = 127,
+ [0][1][2][1][RTW89_FCC][52] = 48,
+ [0][1][2][1][RTW89_ETSI][52] = 127,
+ [0][1][2][1][RTW89_MKK][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_KCC][52] = 127,
+ [0][1][2][1][RTW89_ACMA][52] = 127,
+ [0][1][2][1][RTW89_CN][52] = 127,
+ [0][1][2][1][RTW89_UK][52] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 64,
+ [1][0][2][0][RTW89_ETSI][1] = 66,
+ [1][0][2][0][RTW89_MKK][1] = 66,
+ [1][0][2][0][RTW89_IC][1] = 62,
+ [1][0][2][0][RTW89_KCC][1] = 66,
+ [1][0][2][0][RTW89_ACMA][1] = 66,
+ [1][0][2][0][RTW89_CN][1] = 54,
+ [1][0][2][0][RTW89_UK][1] = 66,
+ [1][0][2][0][RTW89_FCC][5] = 68,
+ [1][0][2][0][RTW89_ETSI][5] = 66,
+ [1][0][2][0][RTW89_MKK][5] = 66,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 54,
+ [1][0][2][0][RTW89_ACMA][5] = 66,
+ [1][0][2][0][RTW89_CN][5] = 54,
+ [1][0][2][0][RTW89_UK][5] = 66,
+ [1][0][2][0][RTW89_FCC][9] = 68,
+ [1][0][2][0][RTW89_ETSI][9] = 66,
+ [1][0][2][0][RTW89_MKK][9] = 66,
+ [1][0][2][0][RTW89_IC][9] = 64,
+ [1][0][2][0][RTW89_KCC][9] = 66,
+ [1][0][2][0][RTW89_ACMA][9] = 66,
+ [1][0][2][0][RTW89_CN][9] = 54,
+ [1][0][2][0][RTW89_UK][9] = 66,
+ [1][0][2][0][RTW89_FCC][13] = 60,
+ [1][0][2][0][RTW89_ETSI][13] = 66,
+ [1][0][2][0][RTW89_MKK][13] = 66,
+ [1][0][2][0][RTW89_IC][13] = 60,
+ [1][0][2][0][RTW89_KCC][13] = 52,
+ [1][0][2][0][RTW89_ACMA][13] = 66,
+ [1][0][2][0][RTW89_CN][13] = 54,
+ [1][0][2][0][RTW89_UK][13] = 66,
+ [1][0][2][0][RTW89_FCC][16] = 64,
+ [1][0][2][0][RTW89_ETSI][16] = 66,
+ [1][0][2][0][RTW89_MKK][16] = 66,
+ [1][0][2][0][RTW89_IC][16] = 64,
+ [1][0][2][0][RTW89_KCC][16] = 56,
+ [1][0][2][0][RTW89_ACMA][16] = 66,
+ [1][0][2][0][RTW89_CN][16] = 127,
+ [1][0][2][0][RTW89_UK][16] = 66,
+ [1][0][2][0][RTW89_FCC][20] = 68,
+ [1][0][2][0][RTW89_ETSI][20] = 66,
+ [1][0][2][0][RTW89_MKK][20] = 66,
+ [1][0][2][0][RTW89_IC][20] = 68,
+ [1][0][2][0][RTW89_KCC][20] = 56,
+ [1][0][2][0][RTW89_ACMA][20] = 66,
+ [1][0][2][0][RTW89_CN][20] = 127,
+ [1][0][2][0][RTW89_UK][20] = 66,
+ [1][0][2][0][RTW89_FCC][24] = 68,
+ [1][0][2][0][RTW89_ETSI][24] = 66,
+ [1][0][2][0][RTW89_MKK][24] = 66,
+ [1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_KCC][24] = 56,
+ [1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_CN][24] = 127,
+ [1][0][2][0][RTW89_UK][24] = 66,
+ [1][0][2][0][RTW89_FCC][28] = 68,
+ [1][0][2][0][RTW89_ETSI][28] = 66,
+ [1][0][2][0][RTW89_MKK][28] = 66,
+ [1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_KCC][28] = 66,
+ [1][0][2][0][RTW89_ACMA][28] = 127,
+ [1][0][2][0][RTW89_CN][28] = 127,
+ [1][0][2][0][RTW89_UK][28] = 66,
+ [1][0][2][0][RTW89_FCC][32] = 62,
+ [1][0][2][0][RTW89_ETSI][32] = 66,
+ [1][0][2][0][RTW89_MKK][32] = 66,
+ [1][0][2][0][RTW89_IC][32] = 62,
+ [1][0][2][0][RTW89_KCC][32] = 66,
+ [1][0][2][0][RTW89_ACMA][32] = 66,
+ [1][0][2][0][RTW89_CN][32] = 127,
+ [1][0][2][0][RTW89_UK][32] = 66,
+ [1][0][2][0][RTW89_FCC][36] = 68,
+ [1][0][2][0][RTW89_ETSI][36] = 127,
+ [1][0][2][0][RTW89_MKK][36] = 66,
+ [1][0][2][0][RTW89_IC][36] = 68,
+ [1][0][2][0][RTW89_KCC][36] = 66,
+ [1][0][2][0][RTW89_ACMA][36] = 66,
+ [1][0][2][0][RTW89_CN][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 64,
+ [1][0][2][0][RTW89_FCC][39] = 68,
+ [1][0][2][0][RTW89_ETSI][39] = 30,
+ [1][0][2][0][RTW89_MKK][39] = 127,
+ [1][0][2][0][RTW89_IC][39] = 68,
+ [1][0][2][0][RTW89_KCC][39] = 66,
+ [1][0][2][0][RTW89_ACMA][39] = 66,
+ [1][0][2][0][RTW89_CN][39] = 62,
+ [1][0][2][0][RTW89_UK][39] = 64,
+ [1][0][2][0][RTW89_FCC][43] = 68,
+ [1][0][2][0][RTW89_ETSI][43] = 30,
+ [1][0][2][0][RTW89_MKK][43] = 127,
+ [1][0][2][0][RTW89_IC][43] = 68,
+ [1][0][2][0][RTW89_KCC][43] = 66,
+ [1][0][2][0][RTW89_ACMA][43] = 66,
+ [1][0][2][0][RTW89_CN][43] = 66,
+ [1][0][2][0][RTW89_UK][43] = 64,
+ [1][0][2][0][RTW89_FCC][47] = 68,
+ [1][0][2][0][RTW89_ETSI][47] = 127,
+ [1][0][2][0][RTW89_MKK][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_KCC][47] = 127,
+ [1][0][2][0][RTW89_ACMA][47] = 127,
+ [1][0][2][0][RTW89_CN][47] = 127,
+ [1][0][2][0][RTW89_UK][47] = 127,
+ [1][0][2][0][RTW89_FCC][51] = 68,
+ [1][0][2][0][RTW89_ETSI][51] = 127,
+ [1][0][2][0][RTW89_MKK][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_KCC][51] = 127,
+ [1][0][2][0][RTW89_ACMA][51] = 127,
+ [1][0][2][0][RTW89_CN][51] = 127,
+ [1][0][2][0][RTW89_UK][51] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 54,
+ [1][1][2][0][RTW89_ETSI][1] = 54,
+ [1][1][2][0][RTW89_MKK][1] = 48,
+ [1][1][2][0][RTW89_IC][1] = 48,
+ [1][1][2][0][RTW89_KCC][1] = 54,
+ [1][1][2][0][RTW89_ACMA][1] = 54,
+ [1][1][2][0][RTW89_CN][1] = 42,
+ [1][1][2][0][RTW89_UK][1] = 54,
+ [1][1][2][0][RTW89_FCC][5] = 68,
+ [1][1][2][0][RTW89_ETSI][5] = 54,
+ [1][1][2][0][RTW89_MKK][5] = 52,
+ [1][1][2][0][RTW89_IC][5] = 48,
+ [1][1][2][0][RTW89_KCC][5] = 54,
+ [1][1][2][0][RTW89_ACMA][5] = 54,
+ [1][1][2][0][RTW89_CN][5] = 42,
+ [1][1][2][0][RTW89_UK][5] = 54,
+ [1][1][2][0][RTW89_FCC][9] = 68,
+ [1][1][2][0][RTW89_ETSI][9] = 54,
+ [1][1][2][0][RTW89_MKK][9] = 52,
+ [1][1][2][0][RTW89_IC][9] = 52,
+ [1][1][2][0][RTW89_KCC][9] = 64,
+ [1][1][2][0][RTW89_ACMA][9] = 54,
+ [1][1][2][0][RTW89_CN][9] = 42,
+ [1][1][2][0][RTW89_UK][9] = 54,
+ [1][1][2][0][RTW89_FCC][13] = 54,
+ [1][1][2][0][RTW89_ETSI][13] = 54,
+ [1][1][2][0][RTW89_MKK][13] = 52,
+ [1][1][2][0][RTW89_IC][13] = 52,
+ [1][1][2][0][RTW89_KCC][13] = 52,
+ [1][1][2][0][RTW89_ACMA][13] = 54,
+ [1][1][2][0][RTW89_CN][13] = 42,
+ [1][1][2][0][RTW89_UK][13] = 54,
+ [1][1][2][0][RTW89_FCC][16] = 56,
+ [1][1][2][0][RTW89_ETSI][16] = 54,
+ [1][1][2][0][RTW89_MKK][16] = 66,
+ [1][1][2][0][RTW89_IC][16] = 56,
+ [1][1][2][0][RTW89_KCC][16] = 54,
+ [1][1][2][0][RTW89_ACMA][16] = 54,
+ [1][1][2][0][RTW89_CN][16] = 127,
+ [1][1][2][0][RTW89_UK][16] = 54,
+ [1][1][2][0][RTW89_FCC][20] = 68,
+ [1][1][2][0][RTW89_ETSI][20] = 54,
+ [1][1][2][0][RTW89_MKK][20] = 66,
+ [1][1][2][0][RTW89_IC][20] = 68,
+ [1][1][2][0][RTW89_KCC][20] = 54,
+ [1][1][2][0][RTW89_ACMA][20] = 54,
+ [1][1][2][0][RTW89_CN][20] = 127,
+ [1][1][2][0][RTW89_UK][20] = 54,
+ [1][1][2][0][RTW89_FCC][24] = 68,
+ [1][1][2][0][RTW89_ETSI][24] = 54,
+ [1][1][2][0][RTW89_MKK][24] = 66,
+ [1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_KCC][24] = 54,
+ [1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_CN][24] = 127,
+ [1][1][2][0][RTW89_UK][24] = 54,
+ [1][1][2][0][RTW89_FCC][28] = 68,
+ [1][1][2][0][RTW89_ETSI][28] = 54,
+ [1][1][2][0][RTW89_MKK][28] = 66,
+ [1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_KCC][28] = 66,
+ [1][1][2][0][RTW89_ACMA][28] = 127,
+ [1][1][2][0][RTW89_CN][28] = 127,
+ [1][1][2][0][RTW89_UK][28] = 54,
+ [1][1][2][0][RTW89_FCC][32] = 56,
+ [1][1][2][0][RTW89_ETSI][32] = 54,
+ [1][1][2][0][RTW89_MKK][32] = 66,
+ [1][1][2][0][RTW89_IC][32] = 56,
+ [1][1][2][0][RTW89_KCC][32] = 66,
+ [1][1][2][0][RTW89_ACMA][32] = 54,
+ [1][1][2][0][RTW89_CN][32] = 127,
+ [1][1][2][0][RTW89_UK][32] = 54,
+ [1][1][2][0][RTW89_FCC][36] = 68,
+ [1][1][2][0][RTW89_ETSI][36] = 127,
+ [1][1][2][0][RTW89_MKK][36] = 66,
+ [1][1][2][0][RTW89_IC][36] = 68,
+ [1][1][2][0][RTW89_KCC][36] = 66,
+ [1][1][2][0][RTW89_ACMA][36] = 66,
+ [1][1][2][0][RTW89_CN][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 52,
+ [1][1][2][0][RTW89_FCC][39] = 68,
+ [1][1][2][0][RTW89_ETSI][39] = 18,
+ [1][1][2][0][RTW89_MKK][39] = 127,
+ [1][1][2][0][RTW89_IC][39] = 68,
+ [1][1][2][0][RTW89_KCC][39] = 56,
+ [1][1][2][0][RTW89_ACMA][39] = 66,
+ [1][1][2][0][RTW89_CN][39] = 62,
+ [1][1][2][0][RTW89_UK][39] = 52,
+ [1][1][2][0][RTW89_FCC][43] = 68,
+ [1][1][2][0][RTW89_ETSI][43] = 18,
+ [1][1][2][0][RTW89_MKK][43] = 127,
+ [1][1][2][0][RTW89_IC][43] = 68,
+ [1][1][2][0][RTW89_KCC][43] = 56,
+ [1][1][2][0][RTW89_ACMA][43] = 66,
+ [1][1][2][0][RTW89_CN][43] = 66,
+ [1][1][2][0][RTW89_UK][43] = 52,
+ [1][1][2][0][RTW89_FCC][47] = 62,
+ [1][1][2][0][RTW89_ETSI][47] = 127,
+ [1][1][2][0][RTW89_MKK][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_KCC][47] = 127,
+ [1][1][2][0][RTW89_ACMA][47] = 127,
+ [1][1][2][0][RTW89_CN][47] = 127,
+ [1][1][2][0][RTW89_UK][47] = 127,
+ [1][1][2][0][RTW89_FCC][51] = 60,
+ [1][1][2][0][RTW89_ETSI][51] = 127,
+ [1][1][2][0][RTW89_MKK][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_KCC][51] = 127,
+ [1][1][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][0][RTW89_CN][51] = 127,
+ [1][1][2][0][RTW89_UK][51] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 54,
+ [1][1][2][1][RTW89_ETSI][1] = 40,
+ [1][1][2][1][RTW89_MKK][1] = 48,
+ [1][1][2][1][RTW89_IC][1] = 40,
+ [1][1][2][1][RTW89_KCC][1] = 54,
+ [1][1][2][1][RTW89_ACMA][1] = 40,
+ [1][1][2][1][RTW89_CN][1] = 42,
+ [1][1][2][1][RTW89_UK][1] = 40,
+ [1][1][2][1][RTW89_FCC][5] = 68,
+ [1][1][2][1][RTW89_ETSI][5] = 40,
+ [1][1][2][1][RTW89_MKK][5] = 52,
+ [1][1][2][1][RTW89_IC][5] = 40,
+ [1][1][2][1][RTW89_KCC][5] = 54,
+ [1][1][2][1][RTW89_ACMA][5] = 40,
+ [1][1][2][1][RTW89_CN][5] = 42,
+ [1][1][2][1][RTW89_UK][5] = 40,
+ [1][1][2][1][RTW89_FCC][9] = 68,
+ [1][1][2][1][RTW89_ETSI][9] = 40,
+ [1][1][2][1][RTW89_MKK][9] = 52,
+ [1][1][2][1][RTW89_IC][9] = 40,
+ [1][1][2][1][RTW89_KCC][9] = 64,
+ [1][1][2][1][RTW89_ACMA][9] = 40,
+ [1][1][2][1][RTW89_CN][9] = 42,
+ [1][1][2][1][RTW89_UK][9] = 40,
+ [1][1][2][1][RTW89_FCC][13] = 54,
+ [1][1][2][1][RTW89_ETSI][13] = 40,
+ [1][1][2][1][RTW89_MKK][13] = 52,
+ [1][1][2][1][RTW89_IC][13] = 40,
+ [1][1][2][1][RTW89_KCC][13] = 52,
+ [1][1][2][1][RTW89_ACMA][13] = 40,
+ [1][1][2][1][RTW89_CN][13] = 42,
+ [1][1][2][1][RTW89_UK][13] = 40,
+ [1][1][2][1][RTW89_FCC][16] = 56,
+ [1][1][2][1][RTW89_ETSI][16] = 40,
+ [1][1][2][1][RTW89_MKK][16] = 66,
+ [1][1][2][1][RTW89_IC][16] = 56,
+ [1][1][2][1][RTW89_KCC][16] = 54,
+ [1][1][2][1][RTW89_ACMA][16] = 40,
+ [1][1][2][1][RTW89_CN][16] = 127,
+ [1][1][2][1][RTW89_UK][16] = 40,
+ [1][1][2][1][RTW89_FCC][20] = 68,
+ [1][1][2][1][RTW89_ETSI][20] = 40,
+ [1][1][2][1][RTW89_MKK][20] = 66,
+ [1][1][2][1][RTW89_IC][20] = 68,
+ [1][1][2][1][RTW89_KCC][20] = 54,
+ [1][1][2][1][RTW89_ACMA][20] = 40,
+ [1][1][2][1][RTW89_CN][20] = 127,
+ [1][1][2][1][RTW89_UK][20] = 40,
+ [1][1][2][1][RTW89_FCC][24] = 68,
+ [1][1][2][1][RTW89_ETSI][24] = 40,
+ [1][1][2][1][RTW89_MKK][24] = 66,
+ [1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_KCC][24] = 54,
+ [1][1][2][1][RTW89_ACMA][24] = 127,
+ [1][1][2][1][RTW89_CN][24] = 127,
+ [1][1][2][1][RTW89_UK][24] = 40,
+ [1][1][2][1][RTW89_FCC][28] = 68,
+ [1][1][2][1][RTW89_ETSI][28] = 40,
+ [1][1][2][1][RTW89_MKK][28] = 66,
+ [1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_KCC][28] = 66,
+ [1][1][2][1][RTW89_ACMA][28] = 127,
+ [1][1][2][1][RTW89_CN][28] = 127,
+ [1][1][2][1][RTW89_UK][28] = 40,
+ [1][1][2][1][RTW89_FCC][32] = 56,
+ [1][1][2][1][RTW89_ETSI][32] = 40,
+ [1][1][2][1][RTW89_MKK][32] = 66,
+ [1][1][2][1][RTW89_IC][32] = 56,
+ [1][1][2][1][RTW89_KCC][32] = 66,
+ [1][1][2][1][RTW89_ACMA][32] = 40,
+ [1][1][2][1][RTW89_CN][32] = 127,
+ [1][1][2][1][RTW89_UK][32] = 40,
+ [1][1][2][1][RTW89_FCC][36] = 68,
+ [1][1][2][1][RTW89_ETSI][36] = 127,
+ [1][1][2][1][RTW89_MKK][36] = 66,
+ [1][1][2][1][RTW89_IC][36] = 68,
+ [1][1][2][1][RTW89_KCC][36] = 66,
+ [1][1][2][1][RTW89_ACMA][36] = 66,
+ [1][1][2][1][RTW89_CN][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 40,
+ [1][1][2][1][RTW89_FCC][39] = 68,
+ [1][1][2][1][RTW89_ETSI][39] = 6,
+ [1][1][2][1][RTW89_MKK][39] = 127,
+ [1][1][2][1][RTW89_IC][39] = 68,
+ [1][1][2][1][RTW89_KCC][39] = 56,
+ [1][1][2][1][RTW89_ACMA][39] = 66,
+ [1][1][2][1][RTW89_CN][39] = 60,
+ [1][1][2][1][RTW89_UK][39] = 40,
+ [1][1][2][1][RTW89_FCC][43] = 68,
+ [1][1][2][1][RTW89_ETSI][43] = 6,
+ [1][1][2][1][RTW89_MKK][43] = 127,
+ [1][1][2][1][RTW89_IC][43] = 68,
+ [1][1][2][1][RTW89_KCC][43] = 56,
+ [1][1][2][1][RTW89_ACMA][43] = 66,
+ [1][1][2][1][RTW89_CN][43] = 52,
+ [1][1][2][1][RTW89_UK][43] = 40,
+ [1][1][2][1][RTW89_FCC][47] = 62,
+ [1][1][2][1][RTW89_ETSI][47] = 127,
+ [1][1][2][1][RTW89_MKK][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_KCC][47] = 127,
+ [1][1][2][1][RTW89_ACMA][47] = 127,
+ [1][1][2][1][RTW89_CN][47] = 127,
+ [1][1][2][1][RTW89_UK][47] = 127,
+ [1][1][2][1][RTW89_FCC][51] = 60,
+ [1][1][2][1][RTW89_ETSI][51] = 127,
+ [1][1][2][1][RTW89_MKK][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_KCC][51] = 127,
+ [1][1][2][1][RTW89_ACMA][51] = 127,
+ [1][1][2][1][RTW89_CN][51] = 127,
+ [1][1][2][1][RTW89_UK][51] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 58,
+ [2][0][2][0][RTW89_ETSI][3] = 60,
+ [2][0][2][0][RTW89_MKK][3] = 60,
+ [2][0][2][0][RTW89_IC][3] = 56,
+ [2][0][2][0][RTW89_KCC][3] = 60,
+ [2][0][2][0][RTW89_ACMA][3] = 60,
+ [2][0][2][0][RTW89_CN][3] = 54,
+ [2][0][2][0][RTW89_UK][3] = 60,
+ [2][0][2][0][RTW89_FCC][11] = 50,
+ [2][0][2][0][RTW89_ETSI][11] = 60,
+ [2][0][2][0][RTW89_MKK][11] = 60,
+ [2][0][2][0][RTW89_IC][11] = 50,
+ [2][0][2][0][RTW89_KCC][11] = 58,
+ [2][0][2][0][RTW89_ACMA][11] = 60,
+ [2][0][2][0][RTW89_CN][11] = 54,
+ [2][0][2][0][RTW89_UK][11] = 60,
+ [2][0][2][0][RTW89_FCC][18] = 60,
+ [2][0][2][0][RTW89_ETSI][18] = 60,
+ [2][0][2][0][RTW89_MKK][18] = 60,
+ [2][0][2][0][RTW89_IC][18] = 60,
+ [2][0][2][0][RTW89_KCC][18] = 56,
+ [2][0][2][0][RTW89_ACMA][18] = 60,
+ [2][0][2][0][RTW89_CN][18] = 127,
+ [2][0][2][0][RTW89_UK][18] = 60,
+ [2][0][2][0][RTW89_FCC][26] = 62,
+ [2][0][2][0][RTW89_ETSI][26] = 60,
+ [2][0][2][0][RTW89_MKK][26] = 60,
+ [2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_KCC][26] = 60,
+ [2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_CN][26] = 127,
+ [2][0][2][0][RTW89_UK][26] = 60,
+ [2][0][2][0][RTW89_FCC][34] = 62,
+ [2][0][2][0][RTW89_ETSI][34] = 127,
+ [2][0][2][0][RTW89_MKK][34] = 60,
+ [2][0][2][0][RTW89_IC][34] = 62,
+ [2][0][2][0][RTW89_KCC][34] = 60,
+ [2][0][2][0][RTW89_ACMA][34] = 60,
+ [2][0][2][0][RTW89_CN][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 60,
+ [2][0][2][0][RTW89_FCC][41] = 62,
+ [2][0][2][0][RTW89_ETSI][41] = 30,
+ [2][0][2][0][RTW89_MKK][41] = 127,
+ [2][0][2][0][RTW89_IC][41] = 62,
+ [2][0][2][0][RTW89_KCC][41] = 58,
+ [2][0][2][0][RTW89_ACMA][41] = 60,
+ [2][0][2][0][RTW89_CN][41] = 62,
+ [2][0][2][0][RTW89_UK][41] = 60,
+ [2][0][2][0][RTW89_FCC][49] = 62,
+ [2][0][2][0][RTW89_ETSI][49] = 127,
+ [2][0][2][0][RTW89_MKK][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_KCC][49] = 127,
+ [2][0][2][0][RTW89_ACMA][49] = 127,
+ [2][0][2][0][RTW89_CN][49] = 127,
+ [2][0][2][0][RTW89_UK][49] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 48,
+ [2][1][2][0][RTW89_ETSI][3] = 54,
+ [2][1][2][0][RTW89_MKK][3] = 56,
+ [2][1][2][0][RTW89_IC][3] = 46,
+ [2][1][2][0][RTW89_KCC][3] = 56,
+ [2][1][2][0][RTW89_ACMA][3] = 54,
+ [2][1][2][0][RTW89_CN][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 54,
+ [2][1][2][0][RTW89_FCC][11] = 38,
+ [2][1][2][0][RTW89_ETSI][11] = 54,
+ [2][1][2][0][RTW89_MKK][11] = 54,
+ [2][1][2][0][RTW89_IC][11] = 38,
+ [2][1][2][0][RTW89_KCC][11] = 52,
+ [2][1][2][0][RTW89_ACMA][11] = 54,
+ [2][1][2][0][RTW89_CN][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 54,
+ [2][1][2][0][RTW89_FCC][18] = 50,
+ [2][1][2][0][RTW89_ETSI][18] = 54,
+ [2][1][2][0][RTW89_MKK][18] = 60,
+ [2][1][2][0][RTW89_IC][18] = 50,
+ [2][1][2][0][RTW89_KCC][18] = 54,
+ [2][1][2][0][RTW89_ACMA][18] = 54,
+ [2][1][2][0][RTW89_CN][18] = 127,
+ [2][1][2][0][RTW89_UK][18] = 54,
+ [2][1][2][0][RTW89_FCC][26] = 52,
+ [2][1][2][0][RTW89_ETSI][26] = 54,
+ [2][1][2][0][RTW89_MKK][26] = 56,
+ [2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_KCC][26] = 60,
+ [2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_CN][26] = 127,
+ [2][1][2][0][RTW89_UK][26] = 54,
+ [2][1][2][0][RTW89_FCC][34] = 62,
+ [2][1][2][0][RTW89_ETSI][34] = 127,
+ [2][1][2][0][RTW89_MKK][34] = 60,
+ [2][1][2][0][RTW89_IC][34] = 62,
+ [2][1][2][0][RTW89_KCC][34] = 60,
+ [2][1][2][0][RTW89_ACMA][34] = 60,
+ [2][1][2][0][RTW89_CN][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 52,
+ [2][1][2][0][RTW89_FCC][41] = 60,
+ [2][1][2][0][RTW89_ETSI][41] = 18,
+ [2][1][2][0][RTW89_MKK][41] = 127,
+ [2][1][2][0][RTW89_IC][41] = 60,
+ [2][1][2][0][RTW89_KCC][41] = 50,
+ [2][1][2][0][RTW89_ACMA][41] = 58,
+ [2][1][2][0][RTW89_CN][41] = 62,
+ [2][1][2][0][RTW89_UK][41] = 52,
+ [2][1][2][0][RTW89_FCC][49] = 62,
+ [2][1][2][0][RTW89_ETSI][49] = 127,
+ [2][1][2][0][RTW89_MKK][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_KCC][49] = 127,
+ [2][1][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][0][RTW89_CN][49] = 127,
+ [2][1][2][0][RTW89_UK][49] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 48,
+ [2][1][2][1][RTW89_ETSI][3] = 40,
+ [2][1][2][1][RTW89_MKK][3] = 56,
+ [2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_KCC][3] = 56,
+ [2][1][2][1][RTW89_ACMA][3] = 40,
+ [2][1][2][1][RTW89_CN][3] = 42,
+ [2][1][2][1][RTW89_UK][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 38,
+ [2][1][2][1][RTW89_ETSI][11] = 40,
+ [2][1][2][1][RTW89_MKK][11] = 54,
+ [2][1][2][1][RTW89_IC][11] = 38,
+ [2][1][2][1][RTW89_KCC][11] = 52,
+ [2][1][2][1][RTW89_ACMA][11] = 40,
+ [2][1][2][1][RTW89_CN][11] = 42,
+ [2][1][2][1][RTW89_UK][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 50,
+ [2][1][2][1][RTW89_ETSI][18] = 40,
+ [2][1][2][1][RTW89_MKK][18] = 60,
+ [2][1][2][1][RTW89_IC][18] = 50,
+ [2][1][2][1][RTW89_KCC][18] = 54,
+ [2][1][2][1][RTW89_ACMA][18] = 40,
+ [2][1][2][1][RTW89_CN][18] = 127,
+ [2][1][2][1][RTW89_UK][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 52,
+ [2][1][2][1][RTW89_ETSI][26] = 42,
+ [2][1][2][1][RTW89_MKK][26] = 56,
+ [2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_KCC][26] = 60,
+ [2][1][2][1][RTW89_ACMA][26] = 127,
+ [2][1][2][1][RTW89_CN][26] = 127,
+ [2][1][2][1][RTW89_UK][26] = 42,
+ [2][1][2][1][RTW89_FCC][34] = 62,
+ [2][1][2][1][RTW89_ETSI][34] = 127,
+ [2][1][2][1][RTW89_MKK][34] = 60,
+ [2][1][2][1][RTW89_IC][34] = 62,
+ [2][1][2][1][RTW89_KCC][34] = 60,
+ [2][1][2][1][RTW89_ACMA][34] = 60,
+ [2][1][2][1][RTW89_CN][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 40,
+ [2][1][2][1][RTW89_FCC][41] = 60,
+ [2][1][2][1][RTW89_ETSI][41] = 6,
+ [2][1][2][1][RTW89_MKK][41] = 127,
+ [2][1][2][1][RTW89_IC][41] = 60,
+ [2][1][2][1][RTW89_KCC][41] = 50,
+ [2][1][2][1][RTW89_ACMA][41] = 58,
+ [2][1][2][1][RTW89_CN][41] = 40,
+ [2][1][2][1][RTW89_UK][41] = 40,
+ [2][1][2][1][RTW89_FCC][49] = 62,
+ [2][1][2][1][RTW89_ETSI][49] = 127,
+ [2][1][2][1][RTW89_MKK][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_KCC][49] = 127,
+ [2][1][2][1][RTW89_ACMA][49] = 127,
+ [2][1][2][1][RTW89_CN][49] = 127,
+ [2][1][2][1][RTW89_UK][49] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 40,
+ [3][0][2][0][RTW89_ETSI][7] = 50,
+ [3][0][2][0][RTW89_MKK][7] = 50,
+ [3][0][2][0][RTW89_IC][7] = 40,
+ [3][0][2][0][RTW89_KCC][7] = 44,
+ [3][0][2][0][RTW89_ACMA][7] = 127,
+ [3][0][2][0][RTW89_CN][7] = 66,
+ [3][0][2][0][RTW89_UK][7] = 127,
+ [3][0][2][0][RTW89_FCC][22] = 42,
+ [3][0][2][0][RTW89_ETSI][22] = 50,
+ [3][0][2][0][RTW89_MKK][22] = 50,
+ [3][0][2][0][RTW89_IC][22] = 127,
+ [3][0][2][0][RTW89_KCC][22] = 50,
+ [3][0][2][0][RTW89_ACMA][22] = 127,
+ [3][0][2][0][RTW89_CN][22] = 66,
+ [3][0][2][0][RTW89_UK][22] = 127,
+ [3][0][2][0][RTW89_FCC][45] = 52,
+ [3][0][2][0][RTW89_ETSI][45] = 127,
+ [3][0][2][0][RTW89_MKK][45] = 127,
+ [3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_KCC][45] = 127,
+ [3][0][2][0][RTW89_ACMA][45] = 127,
+ [3][0][2][0][RTW89_CN][45] = 127,
+ [3][0][2][0][RTW89_UK][45] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
+ [3][1][2][0][RTW89_ETSI][7] = 50,
+ [3][1][2][0][RTW89_MKK][7] = 36,
+ [3][1][2][0][RTW89_IC][7] = 44,
+ [3][1][2][0][RTW89_KCC][7] = 50,
+ [3][1][2][0][RTW89_ACMA][7] = 127,
+ [3][1][2][0][RTW89_CN][7] = 54,
+ [3][1][2][0][RTW89_UK][7] = 127,
+ [3][1][2][0][RTW89_FCC][22] = 36,
+ [3][1][2][0][RTW89_ETSI][22] = 50,
+ [3][1][2][0][RTW89_MKK][22] = 48,
+ [3][1][2][0][RTW89_IC][22] = 127,
+ [3][1][2][0][RTW89_KCC][22] = 50,
+ [3][1][2][0][RTW89_ACMA][22] = 127,
+ [3][1][2][0][RTW89_CN][22] = 54,
+ [3][1][2][0][RTW89_UK][22] = 127,
+ [3][1][2][0][RTW89_FCC][45] = 46,
+ [3][1][2][0][RTW89_ETSI][45] = 127,
+ [3][1][2][0][RTW89_MKK][45] = 127,
+ [3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_KCC][45] = 127,
+ [3][1][2][0][RTW89_ACMA][45] = 127,
+ [3][1][2][0][RTW89_CN][45] = 127,
+ [3][1][2][0][RTW89_UK][45] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
+ [3][1][2][1][RTW89_ETSI][7] = 42,
+ [3][1][2][1][RTW89_MKK][7] = 36,
+ [3][1][2][1][RTW89_IC][7] = 44,
+ [3][1][2][1][RTW89_KCC][7] = 50,
+ [3][1][2][1][RTW89_ACMA][7] = 127,
+ [3][1][2][1][RTW89_CN][7] = 42,
+ [3][1][2][1][RTW89_UK][7] = 127,
+ [3][1][2][1][RTW89_FCC][22] = 36,
+ [3][1][2][1][RTW89_ETSI][22] = 42,
+ [3][1][2][1][RTW89_MKK][22] = 48,
+ [3][1][2][1][RTW89_IC][22] = 127,
+ [3][1][2][1][RTW89_KCC][22] = 50,
+ [3][1][2][1][RTW89_ACMA][22] = 127,
+ [3][1][2][1][RTW89_CN][22] = 42,
+ [3][1][2][1][RTW89_UK][22] = 127,
+ [3][1][2][1][RTW89_FCC][45] = 46,
+ [3][1][2][1][RTW89_ETSI][45] = 127,
+ [3][1][2][1][RTW89_MKK][45] = 127,
+ [3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_KCC][45] = 127,
+ [3][1][2][1][RTW89_ACMA][45] = 127,
+ [3][1][2][1][RTW89_CN][45] = 127,
+ [3][1][2][1][RTW89_UK][45] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
+ [0][0][1][0][RTW89_WW][0] = 24,
+ [0][0][1][0][RTW89_WW][2] = 22,
+ [0][0][1][0][RTW89_WW][4] = 22,
+ [0][0][1][0][RTW89_WW][6] = 22,
+ [0][0][1][0][RTW89_WW][8] = 22,
+ [0][0][1][0][RTW89_WW][10] = 22,
+ [0][0][1][0][RTW89_WW][12] = 22,
+ [0][0][1][0][RTW89_WW][14] = 22,
+ [0][0][1][0][RTW89_WW][15] = 22,
+ [0][0][1][0][RTW89_WW][17] = 22,
+ [0][0][1][0][RTW89_WW][19] = 22,
+ [0][0][1][0][RTW89_WW][21] = 22,
+ [0][0][1][0][RTW89_WW][23] = 22,
+ [0][0][1][0][RTW89_WW][25] = 22,
+ [0][0][1][0][RTW89_WW][27] = 22,
+ [0][0][1][0][RTW89_WW][29] = 22,
+ [0][0][1][0][RTW89_WW][30] = 22,
+ [0][0][1][0][RTW89_WW][32] = 22,
+ [0][0][1][0][RTW89_WW][34] = 22,
+ [0][0][1][0][RTW89_WW][36] = 22,
+ [0][0][1][0][RTW89_WW][38] = 22,
+ [0][0][1][0][RTW89_WW][40] = 22,
+ [0][0][1][0][RTW89_WW][42] = 22,
+ [0][0][1][0][RTW89_WW][44] = 22,
+ [0][0][1][0][RTW89_WW][45] = 22,
+ [0][0][1][0][RTW89_WW][47] = 22,
+ [0][0][1][0][RTW89_WW][49] = 24,
+ [0][0][1][0][RTW89_WW][51] = 22,
+ [0][0][1][0][RTW89_WW][53] = 22,
+ [0][0][1][0][RTW89_WW][55] = 22,
+ [0][0][1][0][RTW89_WW][57] = 22,
+ [0][0][1][0][RTW89_WW][59] = 22,
+ [0][0][1][0][RTW89_WW][60] = 22,
+ [0][0][1][0][RTW89_WW][62] = 22,
+ [0][0][1][0][RTW89_WW][64] = 22,
+ [0][0][1][0][RTW89_WW][66] = 22,
+ [0][0][1][0][RTW89_WW][68] = 22,
+ [0][0][1][0][RTW89_WW][70] = 24,
+ [0][0][1][0][RTW89_WW][72] = 22,
+ [0][0][1][0][RTW89_WW][74] = 22,
+ [0][0][1][0][RTW89_WW][75] = 22,
+ [0][0][1][0][RTW89_WW][77] = 22,
+ [0][0][1][0][RTW89_WW][79] = 22,
+ [0][0][1][0][RTW89_WW][81] = 22,
+ [0][0][1][0][RTW89_WW][83] = 22,
+ [0][0][1][0][RTW89_WW][85] = 22,
+ [0][0][1][0][RTW89_WW][87] = 22,
+ [0][0][1][0][RTW89_WW][89] = 22,
+ [0][0][1][0][RTW89_WW][90] = 22,
+ [0][0][1][0][RTW89_WW][92] = 22,
+ [0][0][1][0][RTW89_WW][94] = 22,
+ [0][0][1][0][RTW89_WW][96] = 22,
+ [0][0][1][0][RTW89_WW][98] = 22,
+ [0][0][1][0][RTW89_WW][100] = 22,
+ [0][0][1][0][RTW89_WW][102] = 22,
+ [0][0][1][0][RTW89_WW][104] = 22,
+ [0][0][1][0][RTW89_WW][105] = 22,
+ [0][0][1][0][RTW89_WW][107] = 24,
+ [0][0][1][0][RTW89_WW][109] = 24,
+ [0][0][1][0][RTW89_WW][111] = 0,
+ [0][0][1][0][RTW89_WW][113] = 0,
+ [0][0][1][0][RTW89_WW][115] = 0,
+ [0][0][1][0][RTW89_WW][117] = 0,
+ [0][0][1][0][RTW89_WW][119] = 0,
+ [0][1][1][0][RTW89_WW][0] = -2,
+ [0][1][1][0][RTW89_WW][2] = -4,
+ [0][1][1][0][RTW89_WW][4] = -4,
+ [0][1][1][0][RTW89_WW][6] = -4,
+ [0][1][1][0][RTW89_WW][8] = -4,
+ [0][1][1][0][RTW89_WW][10] = -4,
+ [0][1][1][0][RTW89_WW][12] = -4,
+ [0][1][1][0][RTW89_WW][14] = -4,
+ [0][1][1][0][RTW89_WW][15] = -4,
+ [0][1][1][0][RTW89_WW][17] = -4,
+ [0][1][1][0][RTW89_WW][19] = -4,
+ [0][1][1][0][RTW89_WW][21] = -4,
+ [0][1][1][0][RTW89_WW][23] = -4,
+ [0][1][1][0][RTW89_WW][25] = -4,
+ [0][1][1][0][RTW89_WW][27] = -4,
+ [0][1][1][0][RTW89_WW][29] = -4,
+ [0][1][1][0][RTW89_WW][30] = -4,
+ [0][1][1][0][RTW89_WW][32] = -4,
+ [0][1][1][0][RTW89_WW][34] = -4,
+ [0][1][1][0][RTW89_WW][36] = -4,
+ [0][1][1][0][RTW89_WW][38] = -4,
+ [0][1][1][0][RTW89_WW][40] = -4,
+ [0][1][1][0][RTW89_WW][42] = -4,
+ [0][1][1][0][RTW89_WW][44] = -2,
+ [0][1][1][0][RTW89_WW][45] = -2,
+ [0][1][1][0][RTW89_WW][47] = -2,
+ [0][1][1][0][RTW89_WW][49] = -2,
+ [0][1][1][0][RTW89_WW][51] = -2,
+ [0][1][1][0][RTW89_WW][53] = -2,
+ [0][1][1][0][RTW89_WW][55] = -2,
+ [0][1][1][0][RTW89_WW][57] = -2,
+ [0][1][1][0][RTW89_WW][59] = -2,
+ [0][1][1][0][RTW89_WW][60] = -2,
+ [0][1][1][0][RTW89_WW][62] = -2,
+ [0][1][1][0][RTW89_WW][64] = -2,
+ [0][1][1][0][RTW89_WW][66] = -2,
+ [0][1][1][0][RTW89_WW][68] = -2,
+ [0][1][1][0][RTW89_WW][70] = -2,
+ [0][1][1][0][RTW89_WW][72] = -2,
+ [0][1][1][0][RTW89_WW][74] = -2,
+ [0][1][1][0][RTW89_WW][75] = -2,
+ [0][1][1][0][RTW89_WW][77] = -2,
+ [0][1][1][0][RTW89_WW][79] = -2,
+ [0][1][1][0][RTW89_WW][81] = -2,
+ [0][1][1][0][RTW89_WW][83] = -2,
+ [0][1][1][0][RTW89_WW][85] = -2,
+ [0][1][1][0][RTW89_WW][87] = -2,
+ [0][1][1][0][RTW89_WW][89] = -2,
+ [0][1][1][0][RTW89_WW][90] = -2,
+ [0][1][1][0][RTW89_WW][92] = -2,
+ [0][1][1][0][RTW89_WW][94] = -2,
+ [0][1][1][0][RTW89_WW][96] = -2,
+ [0][1][1][0][RTW89_WW][98] = -2,
+ [0][1][1][0][RTW89_WW][100] = -2,
+ [0][1][1][0][RTW89_WW][102] = -2,
+ [0][1][1][0][RTW89_WW][104] = -2,
+ [0][1][1][0][RTW89_WW][105] = -2,
+ [0][1][1][0][RTW89_WW][107] = 1,
+ [0][1][1][0][RTW89_WW][109] = 1,
+ [0][1][1][0][RTW89_WW][111] = 0,
+ [0][1][1][0][RTW89_WW][113] = 0,
+ [0][1][1][0][RTW89_WW][115] = 0,
+ [0][1][1][0][RTW89_WW][117] = 0,
+ [0][1][1][0][RTW89_WW][119] = 0,
+ [0][0][2][0][RTW89_WW][0] = 24,
+ [0][0][2][0][RTW89_WW][2] = 22,
+ [0][0][2][0][RTW89_WW][4] = 22,
+ [0][0][2][0][RTW89_WW][6] = 22,
+ [0][0][2][0][RTW89_WW][8] = 22,
+ [0][0][2][0][RTW89_WW][10] = 22,
+ [0][0][2][0][RTW89_WW][12] = 22,
+ [0][0][2][0][RTW89_WW][14] = 22,
+ [0][0][2][0][RTW89_WW][15] = 22,
+ [0][0][2][0][RTW89_WW][17] = 22,
+ [0][0][2][0][RTW89_WW][19] = 22,
+ [0][0][2][0][RTW89_WW][21] = 22,
+ [0][0][2][0][RTW89_WW][23] = 22,
+ [0][0][2][0][RTW89_WW][25] = 22,
+ [0][0][2][0][RTW89_WW][27] = 22,
+ [0][0][2][0][RTW89_WW][29] = 22,
+ [0][0][2][0][RTW89_WW][30] = 22,
+ [0][0][2][0][RTW89_WW][32] = 22,
+ [0][0][2][0][RTW89_WW][34] = 22,
+ [0][0][2][0][RTW89_WW][36] = 22,
+ [0][0][2][0][RTW89_WW][38] = 22,
+ [0][0][2][0][RTW89_WW][40] = 22,
+ [0][0][2][0][RTW89_WW][42] = 22,
+ [0][0][2][0][RTW89_WW][44] = 22,
+ [0][0][2][0][RTW89_WW][45] = 22,
+ [0][0][2][0][RTW89_WW][47] = 22,
+ [0][0][2][0][RTW89_WW][49] = 24,
+ [0][0][2][0][RTW89_WW][51] = 22,
+ [0][0][2][0][RTW89_WW][53] = 22,
+ [0][0][2][0][RTW89_WW][55] = 22,
+ [0][0][2][0][RTW89_WW][57] = 22,
+ [0][0][2][0][RTW89_WW][59] = 22,
+ [0][0][2][0][RTW89_WW][60] = 22,
+ [0][0][2][0][RTW89_WW][62] = 22,
+ [0][0][2][0][RTW89_WW][64] = 22,
+ [0][0][2][0][RTW89_WW][66] = 22,
+ [0][0][2][0][RTW89_WW][68] = 22,
+ [0][0][2][0][RTW89_WW][70] = 24,
+ [0][0][2][0][RTW89_WW][72] = 22,
+ [0][0][2][0][RTW89_WW][74] = 22,
+ [0][0][2][0][RTW89_WW][75] = 22,
+ [0][0][2][0][RTW89_WW][77] = 22,
+ [0][0][2][0][RTW89_WW][79] = 22,
+ [0][0][2][0][RTW89_WW][81] = 22,
+ [0][0][2][0][RTW89_WW][83] = 22,
+ [0][0][2][0][RTW89_WW][85] = 22,
+ [0][0][2][0][RTW89_WW][87] = 22,
+ [0][0][2][0][RTW89_WW][89] = 22,
+ [0][0][2][0][RTW89_WW][90] = 22,
+ [0][0][2][0][RTW89_WW][92] = 22,
+ [0][0][2][0][RTW89_WW][94] = 22,
+ [0][0][2][0][RTW89_WW][96] = 22,
+ [0][0][2][0][RTW89_WW][98] = 22,
+ [0][0][2][0][RTW89_WW][100] = 22,
+ [0][0][2][0][RTW89_WW][102] = 22,
+ [0][0][2][0][RTW89_WW][104] = 22,
+ [0][0][2][0][RTW89_WW][105] = 22,
+ [0][0][2][0][RTW89_WW][107] = 24,
+ [0][0][2][0][RTW89_WW][109] = 24,
+ [0][0][2][0][RTW89_WW][111] = 0,
+ [0][0][2][0][RTW89_WW][113] = 0,
+ [0][0][2][0][RTW89_WW][115] = 0,
+ [0][0][2][0][RTW89_WW][117] = 0,
+ [0][0][2][0][RTW89_WW][119] = 0,
+ [0][1][2][0][RTW89_WW][0] = -2,
+ [0][1][2][0][RTW89_WW][2] = -4,
+ [0][1][2][0][RTW89_WW][4] = -4,
+ [0][1][2][0][RTW89_WW][6] = -4,
+ [0][1][2][0][RTW89_WW][8] = -4,
+ [0][1][2][0][RTW89_WW][10] = -4,
+ [0][1][2][0][RTW89_WW][12] = -4,
+ [0][1][2][0][RTW89_WW][14] = -4,
+ [0][1][2][0][RTW89_WW][15] = -4,
+ [0][1][2][0][RTW89_WW][17] = -4,
+ [0][1][2][0][RTW89_WW][19] = -4,
+ [0][1][2][0][RTW89_WW][21] = -4,
+ [0][1][2][0][RTW89_WW][23] = -4,
+ [0][1][2][0][RTW89_WW][25] = -4,
+ [0][1][2][0][RTW89_WW][27] = -4,
+ [0][1][2][0][RTW89_WW][29] = -4,
+ [0][1][2][0][RTW89_WW][30] = -4,
+ [0][1][2][0][RTW89_WW][32] = -4,
+ [0][1][2][0][RTW89_WW][34] = -4,
+ [0][1][2][0][RTW89_WW][36] = -4,
+ [0][1][2][0][RTW89_WW][38] = -4,
+ [0][1][2][0][RTW89_WW][40] = -4,
+ [0][1][2][0][RTW89_WW][42] = -4,
+ [0][1][2][0][RTW89_WW][44] = -2,
+ [0][1][2][0][RTW89_WW][45] = -2,
+ [0][1][2][0][RTW89_WW][47] = -2,
+ [0][1][2][0][RTW89_WW][49] = -2,
+ [0][1][2][0][RTW89_WW][51] = -2,
+ [0][1][2][0][RTW89_WW][53] = -2,
+ [0][1][2][0][RTW89_WW][55] = -2,
+ [0][1][2][0][RTW89_WW][57] = -2,
+ [0][1][2][0][RTW89_WW][59] = -2,
+ [0][1][2][0][RTW89_WW][60] = -2,
+ [0][1][2][0][RTW89_WW][62] = -2,
+ [0][1][2][0][RTW89_WW][64] = -2,
+ [0][1][2][0][RTW89_WW][66] = -2,
+ [0][1][2][0][RTW89_WW][68] = -2,
+ [0][1][2][0][RTW89_WW][70] = -2,
+ [0][1][2][0][RTW89_WW][72] = -2,
+ [0][1][2][0][RTW89_WW][74] = -2,
+ [0][1][2][0][RTW89_WW][75] = -2,
+ [0][1][2][0][RTW89_WW][77] = -2,
+ [0][1][2][0][RTW89_WW][79] = -2,
+ [0][1][2][0][RTW89_WW][81] = -2,
+ [0][1][2][0][RTW89_WW][83] = -2,
+ [0][1][2][0][RTW89_WW][85] = -2,
+ [0][1][2][0][RTW89_WW][87] = -2,
+ [0][1][2][0][RTW89_WW][89] = -2,
+ [0][1][2][0][RTW89_WW][90] = -2,
+ [0][1][2][0][RTW89_WW][92] = -2,
+ [0][1][2][0][RTW89_WW][94] = -2,
+ [0][1][2][0][RTW89_WW][96] = -2,
+ [0][1][2][0][RTW89_WW][98] = -2,
+ [0][1][2][0][RTW89_WW][100] = -2,
+ [0][1][2][0][RTW89_WW][102] = -2,
+ [0][1][2][0][RTW89_WW][104] = -2,
+ [0][1][2][0][RTW89_WW][105] = -2,
+ [0][1][2][0][RTW89_WW][107] = 1,
+ [0][1][2][0][RTW89_WW][109] = 1,
+ [0][1][2][0][RTW89_WW][111] = 0,
+ [0][1][2][0][RTW89_WW][113] = 0,
+ [0][1][2][0][RTW89_WW][115] = 0,
+ [0][1][2][0][RTW89_WW][117] = 0,
+ [0][1][2][0][RTW89_WW][119] = 0,
+ [0][1][2][1][RTW89_WW][0] = -2,
+ [0][1][2][1][RTW89_WW][2] = -4,
+ [0][1][2][1][RTW89_WW][4] = -4,
+ [0][1][2][1][RTW89_WW][6] = -4,
+ [0][1][2][1][RTW89_WW][8] = -4,
+ [0][1][2][1][RTW89_WW][10] = -4,
+ [0][1][2][1][RTW89_WW][12] = -4,
+ [0][1][2][1][RTW89_WW][14] = -4,
+ [0][1][2][1][RTW89_WW][15] = -4,
+ [0][1][2][1][RTW89_WW][17] = -4,
+ [0][1][2][1][RTW89_WW][19] = -4,
+ [0][1][2][1][RTW89_WW][21] = -4,
+ [0][1][2][1][RTW89_WW][23] = -4,
+ [0][1][2][1][RTW89_WW][25] = -4,
+ [0][1][2][1][RTW89_WW][27] = -4,
+ [0][1][2][1][RTW89_WW][29] = -4,
+ [0][1][2][1][RTW89_WW][30] = -4,
+ [0][1][2][1][RTW89_WW][32] = -4,
+ [0][1][2][1][RTW89_WW][34] = -4,
+ [0][1][2][1][RTW89_WW][36] = -4,
+ [0][1][2][1][RTW89_WW][38] = -4,
+ [0][1][2][1][RTW89_WW][40] = -4,
+ [0][1][2][1][RTW89_WW][42] = -4,
+ [0][1][2][1][RTW89_WW][44] = -2,
+ [0][1][2][1][RTW89_WW][45] = -2,
+ [0][1][2][1][RTW89_WW][47] = -2,
+ [0][1][2][1][RTW89_WW][49] = -2,
+ [0][1][2][1][RTW89_WW][51] = -2,
+ [0][1][2][1][RTW89_WW][53] = -2,
+ [0][1][2][1][RTW89_WW][55] = -2,
+ [0][1][2][1][RTW89_WW][57] = -2,
+ [0][1][2][1][RTW89_WW][59] = -2,
+ [0][1][2][1][RTW89_WW][60] = -2,
+ [0][1][2][1][RTW89_WW][62] = -2,
+ [0][1][2][1][RTW89_WW][64] = -2,
+ [0][1][2][1][RTW89_WW][66] = -2,
+ [0][1][2][1][RTW89_WW][68] = -2,
+ [0][1][2][1][RTW89_WW][70] = -2,
+ [0][1][2][1][RTW89_WW][72] = -2,
+ [0][1][2][1][RTW89_WW][74] = -2,
+ [0][1][2][1][RTW89_WW][75] = -2,
+ [0][1][2][1][RTW89_WW][77] = -2,
+ [0][1][2][1][RTW89_WW][79] = -2,
+ [0][1][2][1][RTW89_WW][81] = -2,
+ [0][1][2][1][RTW89_WW][83] = -2,
+ [0][1][2][1][RTW89_WW][85] = -2,
+ [0][1][2][1][RTW89_WW][87] = -2,
+ [0][1][2][1][RTW89_WW][89] = -2,
+ [0][1][2][1][RTW89_WW][90] = -2,
+ [0][1][2][1][RTW89_WW][92] = -2,
+ [0][1][2][1][RTW89_WW][94] = -2,
+ [0][1][2][1][RTW89_WW][96] = -2,
+ [0][1][2][1][RTW89_WW][98] = -2,
+ [0][1][2][1][RTW89_WW][100] = -2,
+ [0][1][2][1][RTW89_WW][102] = -2,
+ [0][1][2][1][RTW89_WW][104] = -2,
+ [0][1][2][1][RTW89_WW][105] = -2,
+ [0][1][2][1][RTW89_WW][107] = 1,
+ [0][1][2][1][RTW89_WW][109] = 1,
+ [0][1][2][1][RTW89_WW][111] = 0,
+ [0][1][2][1][RTW89_WW][113] = 0,
+ [0][1][2][1][RTW89_WW][115] = 0,
+ [0][1][2][1][RTW89_WW][117] = 0,
+ [0][1][2][1][RTW89_WW][119] = 0,
+ [1][0][2][0][RTW89_WW][1] = 34,
+ [1][0][2][0][RTW89_WW][5] = 34,
+ [1][0][2][0][RTW89_WW][9] = 34,
+ [1][0][2][0][RTW89_WW][13] = 34,
+ [1][0][2][0][RTW89_WW][16] = 34,
+ [1][0][2][0][RTW89_WW][20] = 34,
+ [1][0][2][0][RTW89_WW][24] = 36,
+ [1][0][2][0][RTW89_WW][28] = 34,
+ [1][0][2][0][RTW89_WW][31] = 34,
+ [1][0][2][0][RTW89_WW][35] = 34,
+ [1][0][2][0][RTW89_WW][39] = 34,
+ [1][0][2][0][RTW89_WW][43] = 34,
+ [1][0][2][0][RTW89_WW][46] = 34,
+ [1][0][2][0][RTW89_WW][50] = 34,
+ [1][0][2][0][RTW89_WW][54] = 36,
+ [1][0][2][0][RTW89_WW][58] = 36,
+ [1][0][2][0][RTW89_WW][61] = 34,
+ [1][0][2][0][RTW89_WW][65] = 34,
+ [1][0][2][0][RTW89_WW][69] = 34,
+ [1][0][2][0][RTW89_WW][73] = 34,
+ [1][0][2][0][RTW89_WW][76] = 34,
+ [1][0][2][0][RTW89_WW][80] = 34,
+ [1][0][2][0][RTW89_WW][84] = 34,
+ [1][0][2][0][RTW89_WW][88] = 34,
+ [1][0][2][0][RTW89_WW][91] = 36,
+ [1][0][2][0][RTW89_WW][95] = 34,
+ [1][0][2][0][RTW89_WW][99] = 34,
+ [1][0][2][0][RTW89_WW][103] = 34,
+ [1][0][2][0][RTW89_WW][106] = 36,
+ [1][0][2][0][RTW89_WW][110] = 0,
+ [1][0][2][0][RTW89_WW][114] = 0,
+ [1][0][2][0][RTW89_WW][118] = 0,
+ [1][1][2][0][RTW89_WW][1] = 10,
+ [1][1][2][0][RTW89_WW][5] = 10,
+ [1][1][2][0][RTW89_WW][9] = 10,
+ [1][1][2][0][RTW89_WW][13] = 10,
+ [1][1][2][0][RTW89_WW][16] = 10,
+ [1][1][2][0][RTW89_WW][20] = 10,
+ [1][1][2][0][RTW89_WW][24] = 10,
+ [1][1][2][0][RTW89_WW][28] = 10,
+ [1][1][2][0][RTW89_WW][31] = 10,
+ [1][1][2][0][RTW89_WW][35] = 10,
+ [1][1][2][0][RTW89_WW][39] = 10,
+ [1][1][2][0][RTW89_WW][43] = 10,
+ [1][1][2][0][RTW89_WW][46] = 12,
+ [1][1][2][0][RTW89_WW][50] = 12,
+ [1][1][2][0][RTW89_WW][54] = 10,
+ [1][1][2][0][RTW89_WW][58] = 10,
+ [1][1][2][0][RTW89_WW][61] = 10,
+ [1][1][2][0][RTW89_WW][65] = 10,
+ [1][1][2][0][RTW89_WW][69] = 10,
+ [1][1][2][0][RTW89_WW][73] = 10,
+ [1][1][2][0][RTW89_WW][76] = 10,
+ [1][1][2][0][RTW89_WW][80] = 10,
+ [1][1][2][0][RTW89_WW][84] = 10,
+ [1][1][2][0][RTW89_WW][88] = 10,
+ [1][1][2][0][RTW89_WW][91] = 12,
+ [1][1][2][0][RTW89_WW][95] = 10,
+ [1][1][2][0][RTW89_WW][99] = 10,
+ [1][1][2][0][RTW89_WW][103] = 10,
+ [1][1][2][0][RTW89_WW][106] = 12,
+ [1][1][2][0][RTW89_WW][110] = 0,
+ [1][1][2][0][RTW89_WW][114] = 0,
+ [1][1][2][0][RTW89_WW][118] = 0,
+ [1][1][2][1][RTW89_WW][1] = 10,
+ [1][1][2][1][RTW89_WW][5] = 10,
+ [1][1][2][1][RTW89_WW][9] = 10,
+ [1][1][2][1][RTW89_WW][13] = 10,
+ [1][1][2][1][RTW89_WW][16] = 10,
+ [1][1][2][1][RTW89_WW][20] = 10,
+ [1][1][2][1][RTW89_WW][24] = 10,
+ [1][1][2][1][RTW89_WW][28] = 10,
+ [1][1][2][1][RTW89_WW][31] = 10,
+ [1][1][2][1][RTW89_WW][35] = 10,
+ [1][1][2][1][RTW89_WW][39] = 10,
+ [1][1][2][1][RTW89_WW][43] = 10,
+ [1][1][2][1][RTW89_WW][46] = 12,
+ [1][1][2][1][RTW89_WW][50] = 12,
+ [1][1][2][1][RTW89_WW][54] = 10,
+ [1][1][2][1][RTW89_WW][58] = 10,
+ [1][1][2][1][RTW89_WW][61] = 10,
+ [1][1][2][1][RTW89_WW][65] = 10,
+ [1][1][2][1][RTW89_WW][69] = 10,
+ [1][1][2][1][RTW89_WW][73] = 10,
+ [1][1][2][1][RTW89_WW][76] = 10,
+ [1][1][2][1][RTW89_WW][80] = 10,
+ [1][1][2][1][RTW89_WW][84] = 10,
+ [1][1][2][1][RTW89_WW][88] = 10,
+ [1][1][2][1][RTW89_WW][91] = 12,
+ [1][1][2][1][RTW89_WW][95] = 10,
+ [1][1][2][1][RTW89_WW][99] = 10,
+ [1][1][2][1][RTW89_WW][103] = 10,
+ [1][1][2][1][RTW89_WW][106] = 12,
+ [1][1][2][1][RTW89_WW][110] = 0,
+ [1][1][2][1][RTW89_WW][114] = 0,
+ [1][1][2][1][RTW89_WW][118] = 0,
+ [2][0][2][0][RTW89_WW][3] = 46,
+ [2][0][2][0][RTW89_WW][11] = 46,
+ [2][0][2][0][RTW89_WW][18] = 46,
+ [2][0][2][0][RTW89_WW][26] = 46,
+ [2][0][2][0][RTW89_WW][33] = 46,
+ [2][0][2][0][RTW89_WW][41] = 46,
+ [2][0][2][0][RTW89_WW][48] = 46,
+ [2][0][2][0][RTW89_WW][56] = 46,
+ [2][0][2][0][RTW89_WW][63] = 46,
+ [2][0][2][0][RTW89_WW][71] = 46,
+ [2][0][2][0][RTW89_WW][78] = 46,
+ [2][0][2][0][RTW89_WW][86] = 46,
+ [2][0][2][0][RTW89_WW][93] = 46,
+ [2][0][2][0][RTW89_WW][101] = 44,
+ [2][0][2][0][RTW89_WW][108] = 0,
+ [2][0][2][0][RTW89_WW][116] = 0,
+ [2][1][2][0][RTW89_WW][3] = 22,
+ [2][1][2][0][RTW89_WW][11] = 20,
+ [2][1][2][0][RTW89_WW][18] = 20,
+ [2][1][2][0][RTW89_WW][26] = 20,
+ [2][1][2][0][RTW89_WW][33] = 20,
+ [2][1][2][0][RTW89_WW][41] = 22,
+ [2][1][2][0][RTW89_WW][48] = 22,
+ [2][1][2][0][RTW89_WW][56] = 20,
+ [2][1][2][0][RTW89_WW][63] = 22,
+ [2][1][2][0][RTW89_WW][71] = 20,
+ [2][1][2][0][RTW89_WW][78] = 20,
+ [2][1][2][0][RTW89_WW][86] = 20,
+ [2][1][2][0][RTW89_WW][93] = 22,
+ [2][1][2][0][RTW89_WW][101] = 22,
+ [2][1][2][0][RTW89_WW][108] = 0,
+ [2][1][2][0][RTW89_WW][116] = 0,
+ [2][1][2][1][RTW89_WW][3] = 22,
+ [2][1][2][1][RTW89_WW][11] = 20,
+ [2][1][2][1][RTW89_WW][18] = 20,
+ [2][1][2][1][RTW89_WW][26] = 20,
+ [2][1][2][1][RTW89_WW][33] = 20,
+ [2][1][2][1][RTW89_WW][41] = 22,
+ [2][1][2][1][RTW89_WW][48] = 22,
+ [2][1][2][1][RTW89_WW][56] = 20,
+ [2][1][2][1][RTW89_WW][63] = 22,
+ [2][1][2][1][RTW89_WW][71] = 20,
+ [2][1][2][1][RTW89_WW][78] = 20,
+ [2][1][2][1][RTW89_WW][86] = 20,
+ [2][1][2][1][RTW89_WW][93] = 22,
+ [2][1][2][1][RTW89_WW][101] = 22,
+ [2][1][2][1][RTW89_WW][108] = 0,
+ [2][1][2][1][RTW89_WW][116] = 0,
+ [3][0][2][0][RTW89_WW][7] = 38,
+ [3][0][2][0][RTW89_WW][22] = 38,
+ [3][0][2][0][RTW89_WW][37] = 38,
+ [3][0][2][0][RTW89_WW][52] = 54,
+ [3][0][2][0][RTW89_WW][67] = 54,
+ [3][0][2][0][RTW89_WW][82] = 26,
+ [3][0][2][0][RTW89_WW][97] = 26,
+ [3][0][2][0][RTW89_WW][112] = 0,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 30,
+ [3][1][2][0][RTW89_WW][37] = 30,
+ [3][1][2][0][RTW89_WW][52] = 30,
+ [3][1][2][0][RTW89_WW][67] = 32,
+ [3][1][2][0][RTW89_WW][82] = 24,
+ [3][1][2][0][RTW89_WW][97] = 14,
+ [3][1][2][0][RTW89_WW][112] = 0,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 30,
+ [3][1][2][1][RTW89_WW][37] = 30,
+ [3][1][2][1][RTW89_WW][52] = 30,
+ [3][1][2][1][RTW89_WW][67] = 32,
+ [3][1][2][1][RTW89_WW][82] = 24,
+ [3][1][2][1][RTW89_WW][97] = 14,
+ [3][1][2][1][RTW89_WW][112] = 0,
+ [0][0][1][0][RTW89_FCC][0] = 24,
+ [0][0][1][0][RTW89_ETSI][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 24,
+ [0][0][1][0][RTW89_FCC][2] = 22,
+ [0][0][1][0][RTW89_ETSI][2] = 66,
+ [0][0][1][0][RTW89_KCC][2] = 24,
+ [0][0][1][0][RTW89_FCC][4] = 22,
+ [0][0][1][0][RTW89_ETSI][4] = 66,
+ [0][0][1][0][RTW89_KCC][4] = 24,
+ [0][0][1][0][RTW89_FCC][6] = 22,
+ [0][0][1][0][RTW89_ETSI][6] = 66,
+ [0][0][1][0][RTW89_KCC][6] = 24,
+ [0][0][1][0][RTW89_FCC][8] = 22,
+ [0][0][1][0][RTW89_ETSI][8] = 66,
+ [0][0][1][0][RTW89_KCC][8] = 24,
+ [0][0][1][0][RTW89_FCC][10] = 22,
+ [0][0][1][0][RTW89_ETSI][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 24,
+ [0][0][1][0][RTW89_FCC][12] = 22,
+ [0][0][1][0][RTW89_ETSI][12] = 66,
+ [0][0][1][0][RTW89_KCC][12] = 24,
+ [0][0][1][0][RTW89_FCC][14] = 22,
+ [0][0][1][0][RTW89_ETSI][14] = 66,
+ [0][0][1][0][RTW89_KCC][14] = 24,
+ [0][0][1][0][RTW89_FCC][15] = 22,
+ [0][0][1][0][RTW89_ETSI][15] = 66,
+ [0][0][1][0][RTW89_KCC][15] = 24,
+ [0][0][1][0][RTW89_FCC][17] = 22,
+ [0][0][1][0][RTW89_ETSI][17] = 66,
+ [0][0][1][0][RTW89_KCC][17] = 24,
+ [0][0][1][0][RTW89_FCC][19] = 22,
+ [0][0][1][0][RTW89_ETSI][19] = 66,
+ [0][0][1][0][RTW89_KCC][19] = 24,
+ [0][0][1][0][RTW89_FCC][21] = 22,
+ [0][0][1][0][RTW89_ETSI][21] = 66,
+ [0][0][1][0][RTW89_KCC][21] = 24,
+ [0][0][1][0][RTW89_FCC][23] = 22,
+ [0][0][1][0][RTW89_ETSI][23] = 66,
+ [0][0][1][0][RTW89_KCC][23] = 24,
+ [0][0][1][0][RTW89_FCC][25] = 22,
+ [0][0][1][0][RTW89_ETSI][25] = 66,
+ [0][0][1][0][RTW89_KCC][25] = 24,
+ [0][0][1][0][RTW89_FCC][27] = 22,
+ [0][0][1][0][RTW89_ETSI][27] = 66,
+ [0][0][1][0][RTW89_KCC][27] = 24,
+ [0][0][1][0][RTW89_FCC][29] = 22,
+ [0][0][1][0][RTW89_ETSI][29] = 66,
+ [0][0][1][0][RTW89_KCC][29] = 24,
+ [0][0][1][0][RTW89_FCC][30] = 22,
+ [0][0][1][0][RTW89_ETSI][30] = 66,
+ [0][0][1][0][RTW89_KCC][30] = 24,
+ [0][0][1][0][RTW89_FCC][32] = 22,
+ [0][0][1][0][RTW89_ETSI][32] = 66,
+ [0][0][1][0][RTW89_KCC][32] = 24,
+ [0][0][1][0][RTW89_FCC][34] = 22,
+ [0][0][1][0][RTW89_ETSI][34] = 66,
+ [0][0][1][0][RTW89_KCC][34] = 24,
+ [0][0][1][0][RTW89_FCC][36] = 22,
+ [0][0][1][0][RTW89_ETSI][36] = 66,
+ [0][0][1][0][RTW89_KCC][36] = 24,
+ [0][0][1][0][RTW89_FCC][38] = 22,
+ [0][0][1][0][RTW89_ETSI][38] = 66,
+ [0][0][1][0][RTW89_KCC][38] = 24,
+ [0][0][1][0][RTW89_FCC][40] = 22,
+ [0][0][1][0][RTW89_ETSI][40] = 66,
+ [0][0][1][0][RTW89_KCC][40] = 24,
+ [0][0][1][0][RTW89_FCC][42] = 22,
+ [0][0][1][0][RTW89_ETSI][42] = 66,
+ [0][0][1][0][RTW89_KCC][42] = 24,
+ [0][0][1][0][RTW89_FCC][44] = 22,
+ [0][0][1][0][RTW89_ETSI][44] = 66,
+ [0][0][1][0][RTW89_KCC][44] = 24,
+ [0][0][1][0][RTW89_FCC][45] = 22,
+ [0][0][1][0][RTW89_ETSI][45] = 127,
+ [0][0][1][0][RTW89_KCC][45] = 24,
+ [0][0][1][0][RTW89_FCC][47] = 22,
+ [0][0][1][0][RTW89_ETSI][47] = 127,
+ [0][0][1][0][RTW89_KCC][47] = 24,
+ [0][0][1][0][RTW89_FCC][49] = 24,
+ [0][0][1][0][RTW89_ETSI][49] = 127,
+ [0][0][1][0][RTW89_KCC][49] = 24,
+ [0][0][1][0][RTW89_FCC][51] = 22,
+ [0][0][1][0][RTW89_ETSI][51] = 127,
+ [0][0][1][0][RTW89_KCC][51] = 24,
+ [0][0][1][0][RTW89_FCC][53] = 22,
+ [0][0][1][0][RTW89_ETSI][53] = 127,
+ [0][0][1][0][RTW89_KCC][53] = 24,
+ [0][0][1][0][RTW89_FCC][55] = 22,
+ [0][0][1][0][RTW89_ETSI][55] = 127,
+ [0][0][1][0][RTW89_KCC][55] = 26,
+ [0][0][1][0][RTW89_FCC][57] = 22,
+ [0][0][1][0][RTW89_ETSI][57] = 127,
+ [0][0][1][0][RTW89_KCC][57] = 26,
+ [0][0][1][0][RTW89_FCC][59] = 22,
+ [0][0][1][0][RTW89_ETSI][59] = 127,
+ [0][0][1][0][RTW89_KCC][59] = 26,
+ [0][0][1][0][RTW89_FCC][60] = 22,
+ [0][0][1][0][RTW89_ETSI][60] = 127,
+ [0][0][1][0][RTW89_KCC][60] = 26,
+ [0][0][1][0][RTW89_FCC][62] = 22,
+ [0][0][1][0][RTW89_ETSI][62] = 127,
+ [0][0][1][0][RTW89_KCC][62] = 26,
+ [0][0][1][0][RTW89_FCC][64] = 22,
+ [0][0][1][0][RTW89_ETSI][64] = 127,
+ [0][0][1][0][RTW89_KCC][64] = 26,
+ [0][0][1][0][RTW89_FCC][66] = 22,
+ [0][0][1][0][RTW89_ETSI][66] = 127,
+ [0][0][1][0][RTW89_KCC][66] = 26,
+ [0][0][1][0][RTW89_FCC][68] = 22,
+ [0][0][1][0][RTW89_ETSI][68] = 127,
+ [0][0][1][0][RTW89_KCC][68] = 26,
+ [0][0][1][0][RTW89_FCC][70] = 24,
+ [0][0][1][0][RTW89_ETSI][70] = 127,
+ [0][0][1][0][RTW89_KCC][70] = 26,
+ [0][0][1][0][RTW89_FCC][72] = 22,
+ [0][0][1][0][RTW89_ETSI][72] = 127,
+ [0][0][1][0][RTW89_KCC][72] = 26,
+ [0][0][1][0][RTW89_FCC][74] = 22,
+ [0][0][1][0][RTW89_ETSI][74] = 127,
+ [0][0][1][0][RTW89_KCC][74] = 26,
+ [0][0][1][0][RTW89_FCC][75] = 22,
+ [0][0][1][0][RTW89_ETSI][75] = 127,
+ [0][0][1][0][RTW89_KCC][75] = 26,
+ [0][0][1][0][RTW89_FCC][77] = 22,
+ [0][0][1][0][RTW89_ETSI][77] = 127,
+ [0][0][1][0][RTW89_KCC][77] = 26,
+ [0][0][1][0][RTW89_FCC][79] = 22,
+ [0][0][1][0][RTW89_ETSI][79] = 127,
+ [0][0][1][0][RTW89_KCC][79] = 26,
+ [0][0][1][0][RTW89_FCC][81] = 22,
+ [0][0][1][0][RTW89_ETSI][81] = 127,
+ [0][0][1][0][RTW89_KCC][81] = 26,
+ [0][0][1][0][RTW89_FCC][83] = 22,
+ [0][0][1][0][RTW89_ETSI][83] = 127,
+ [0][0][1][0][RTW89_KCC][83] = 32,
+ [0][0][1][0][RTW89_FCC][85] = 22,
+ [0][0][1][0][RTW89_ETSI][85] = 127,
+ [0][0][1][0][RTW89_KCC][85] = 32,
+ [0][0][1][0][RTW89_FCC][87] = 22,
+ [0][0][1][0][RTW89_ETSI][87] = 127,
+ [0][0][1][0][RTW89_KCC][87] = 32,
+ [0][0][1][0][RTW89_FCC][89] = 22,
+ [0][0][1][0][RTW89_ETSI][89] = 127,
+ [0][0][1][0][RTW89_KCC][89] = 32,
+ [0][0][1][0][RTW89_FCC][90] = 22,
+ [0][0][1][0][RTW89_ETSI][90] = 127,
+ [0][0][1][0][RTW89_KCC][90] = 32,
+ [0][0][1][0][RTW89_FCC][92] = 22,
+ [0][0][1][0][RTW89_ETSI][92] = 127,
+ [0][0][1][0][RTW89_KCC][92] = 32,
+ [0][0][1][0][RTW89_FCC][94] = 22,
+ [0][0][1][0][RTW89_ETSI][94] = 127,
+ [0][0][1][0][RTW89_KCC][94] = 32,
+ [0][0][1][0][RTW89_FCC][96] = 22,
+ [0][0][1][0][RTW89_ETSI][96] = 127,
+ [0][0][1][0][RTW89_KCC][96] = 32,
+ [0][0][1][0][RTW89_FCC][98] = 22,
+ [0][0][1][0][RTW89_ETSI][98] = 127,
+ [0][0][1][0][RTW89_KCC][98] = 32,
+ [0][0][1][0][RTW89_FCC][100] = 22,
+ [0][0][1][0][RTW89_ETSI][100] = 127,
+ [0][0][1][0][RTW89_KCC][100] = 32,
+ [0][0][1][0][RTW89_FCC][102] = 22,
+ [0][0][1][0][RTW89_ETSI][102] = 127,
+ [0][0][1][0][RTW89_KCC][102] = 32,
+ [0][0][1][0][RTW89_FCC][104] = 22,
+ [0][0][1][0][RTW89_ETSI][104] = 127,
+ [0][0][1][0][RTW89_KCC][104] = 32,
+ [0][0][1][0][RTW89_FCC][105] = 22,
+ [0][0][1][0][RTW89_ETSI][105] = 127,
+ [0][0][1][0][RTW89_KCC][105] = 32,
+ [0][0][1][0][RTW89_FCC][107] = 24,
+ [0][0][1][0][RTW89_ETSI][107] = 127,
+ [0][0][1][0][RTW89_KCC][107] = 32,
+ [0][0][1][0][RTW89_FCC][109] = 24,
+ [0][0][1][0][RTW89_ETSI][109] = 127,
+ [0][0][1][0][RTW89_KCC][109] = 32,
+ [0][0][1][0][RTW89_FCC][111] = 127,
+ [0][0][1][0][RTW89_ETSI][111] = 127,
+ [0][0][1][0][RTW89_KCC][111] = 127,
+ [0][0][1][0][RTW89_FCC][113] = 127,
+ [0][0][1][0][RTW89_ETSI][113] = 127,
+ [0][0][1][0][RTW89_KCC][113] = 127,
+ [0][0][1][0][RTW89_FCC][115] = 127,
+ [0][0][1][0][RTW89_ETSI][115] = 127,
+ [0][0][1][0][RTW89_KCC][115] = 127,
+ [0][0][1][0][RTW89_FCC][117] = 127,
+ [0][0][1][0][RTW89_ETSI][117] = 127,
+ [0][0][1][0][RTW89_KCC][117] = 127,
+ [0][0][1][0][RTW89_FCC][119] = 127,
+ [0][0][1][0][RTW89_ETSI][119] = 127,
+ [0][0][1][0][RTW89_KCC][119] = 127,
+ [0][1][1][0][RTW89_FCC][0] = -2,
+ [0][1][1][0][RTW89_ETSI][0] = 54,
+ [0][1][1][0][RTW89_KCC][0] = 12,
+ [0][1][1][0][RTW89_FCC][2] = -4,
+ [0][1][1][0][RTW89_ETSI][2] = 54,
+ [0][1][1][0][RTW89_KCC][2] = 12,
+ [0][1][1][0][RTW89_FCC][4] = -4,
+ [0][1][1][0][RTW89_ETSI][4] = 54,
+ [0][1][1][0][RTW89_KCC][4] = 12,
+ [0][1][1][0][RTW89_FCC][6] = -4,
+ [0][1][1][0][RTW89_ETSI][6] = 54,
+ [0][1][1][0][RTW89_KCC][6] = 12,
+ [0][1][1][0][RTW89_FCC][8] = -4,
+ [0][1][1][0][RTW89_ETSI][8] = 54,
+ [0][1][1][0][RTW89_KCC][8] = 12,
+ [0][1][1][0][RTW89_FCC][10] = -4,
+ [0][1][1][0][RTW89_ETSI][10] = 54,
+ [0][1][1][0][RTW89_KCC][10] = 12,
+ [0][1][1][0][RTW89_FCC][12] = -4,
+ [0][1][1][0][RTW89_ETSI][12] = 54,
+ [0][1][1][0][RTW89_KCC][12] = 12,
+ [0][1][1][0][RTW89_FCC][14] = -4,
+ [0][1][1][0][RTW89_ETSI][14] = 54,
+ [0][1][1][0][RTW89_KCC][14] = 12,
+ [0][1][1][0][RTW89_FCC][15] = -4,
+ [0][1][1][0][RTW89_ETSI][15] = 54,
+ [0][1][1][0][RTW89_KCC][15] = 12,
+ [0][1][1][0][RTW89_FCC][17] = -4,
+ [0][1][1][0][RTW89_ETSI][17] = 54,
+ [0][1][1][0][RTW89_KCC][17] = 12,
+ [0][1][1][0][RTW89_FCC][19] = -4,
+ [0][1][1][0][RTW89_ETSI][19] = 54,
+ [0][1][1][0][RTW89_KCC][19] = 12,
+ [0][1][1][0][RTW89_FCC][21] = -4,
+ [0][1][1][0][RTW89_ETSI][21] = 54,
+ [0][1][1][0][RTW89_KCC][21] = 12,
+ [0][1][1][0][RTW89_FCC][23] = -4,
+ [0][1][1][0][RTW89_ETSI][23] = 54,
+ [0][1][1][0][RTW89_KCC][23] = 12,
+ [0][1][1][0][RTW89_FCC][25] = -4,
+ [0][1][1][0][RTW89_ETSI][25] = 54,
+ [0][1][1][0][RTW89_KCC][25] = 12,
+ [0][1][1][0][RTW89_FCC][27] = -4,
+ [0][1][1][0][RTW89_ETSI][27] = 54,
+ [0][1][1][0][RTW89_KCC][27] = 12,
+ [0][1][1][0][RTW89_FCC][29] = -4,
+ [0][1][1][0][RTW89_ETSI][29] = 54,
+ [0][1][1][0][RTW89_KCC][29] = 12,
+ [0][1][1][0][RTW89_FCC][30] = -4,
+ [0][1][1][0][RTW89_ETSI][30] = 54,
+ [0][1][1][0][RTW89_KCC][30] = 12,
+ [0][1][1][0][RTW89_FCC][32] = -4,
+ [0][1][1][0][RTW89_ETSI][32] = 54,
+ [0][1][1][0][RTW89_KCC][32] = 12,
+ [0][1][1][0][RTW89_FCC][34] = -4,
+ [0][1][1][0][RTW89_ETSI][34] = 54,
+ [0][1][1][0][RTW89_KCC][34] = 12,
+ [0][1][1][0][RTW89_FCC][36] = -4,
+ [0][1][1][0][RTW89_ETSI][36] = 54,
+ [0][1][1][0][RTW89_KCC][36] = 12,
+ [0][1][1][0][RTW89_FCC][38] = -4,
+ [0][1][1][0][RTW89_ETSI][38] = 54,
+ [0][1][1][0][RTW89_KCC][38] = 12,
+ [0][1][1][0][RTW89_FCC][40] = -4,
+ [0][1][1][0][RTW89_ETSI][40] = 54,
+ [0][1][1][0][RTW89_KCC][40] = 12,
+ [0][1][1][0][RTW89_FCC][42] = -4,
+ [0][1][1][0][RTW89_ETSI][42] = 54,
+ [0][1][1][0][RTW89_KCC][42] = 12,
+ [0][1][1][0][RTW89_FCC][44] = -2,
+ [0][1][1][0][RTW89_ETSI][44] = 54,
+ [0][1][1][0][RTW89_KCC][44] = 12,
+ [0][1][1][0][RTW89_FCC][45] = -2,
+ [0][1][1][0][RTW89_ETSI][45] = 127,
+ [0][1][1][0][RTW89_KCC][45] = 12,
+ [0][1][1][0][RTW89_FCC][47] = -2,
+ [0][1][1][0][RTW89_ETSI][47] = 127,
+ [0][1][1][0][RTW89_KCC][47] = 12,
+ [0][1][1][0][RTW89_FCC][49] = -2,
+ [0][1][1][0][RTW89_ETSI][49] = 127,
+ [0][1][1][0][RTW89_KCC][49] = 12,
+ [0][1][1][0][RTW89_FCC][51] = -2,
+ [0][1][1][0][RTW89_ETSI][51] = 127,
+ [0][1][1][0][RTW89_KCC][51] = 12,
+ [0][1][1][0][RTW89_FCC][53] = -2,
+ [0][1][1][0][RTW89_ETSI][53] = 127,
+ [0][1][1][0][RTW89_KCC][53] = 12,
+ [0][1][1][0][RTW89_FCC][55] = -2,
+ [0][1][1][0][RTW89_ETSI][55] = 127,
+ [0][1][1][0][RTW89_KCC][55] = 12,
+ [0][1][1][0][RTW89_FCC][57] = -2,
+ [0][1][1][0][RTW89_ETSI][57] = 127,
+ [0][1][1][0][RTW89_KCC][57] = 12,
+ [0][1][1][0][RTW89_FCC][59] = -2,
+ [0][1][1][0][RTW89_ETSI][59] = 127,
+ [0][1][1][0][RTW89_KCC][59] = 12,
+ [0][1][1][0][RTW89_FCC][60] = -2,
+ [0][1][1][0][RTW89_ETSI][60] = 127,
+ [0][1][1][0][RTW89_KCC][60] = 12,
+ [0][1][1][0][RTW89_FCC][62] = -2,
+ [0][1][1][0][RTW89_ETSI][62] = 127,
+ [0][1][1][0][RTW89_KCC][62] = 12,
+ [0][1][1][0][RTW89_FCC][64] = -2,
+ [0][1][1][0][RTW89_ETSI][64] = 127,
+ [0][1][1][0][RTW89_KCC][64] = 12,
+ [0][1][1][0][RTW89_FCC][66] = -2,
+ [0][1][1][0][RTW89_ETSI][66] = 127,
+ [0][1][1][0][RTW89_KCC][66] = 12,
+ [0][1][1][0][RTW89_FCC][68] = -2,
+ [0][1][1][0][RTW89_ETSI][68] = 127,
+ [0][1][1][0][RTW89_KCC][68] = 12,
+ [0][1][1][0][RTW89_FCC][70] = -2,
+ [0][1][1][0][RTW89_ETSI][70] = 127,
+ [0][1][1][0][RTW89_KCC][70] = 12,
+ [0][1][1][0][RTW89_FCC][72] = -2,
+ [0][1][1][0][RTW89_ETSI][72] = 127,
+ [0][1][1][0][RTW89_KCC][72] = 12,
+ [0][1][1][0][RTW89_FCC][74] = -2,
+ [0][1][1][0][RTW89_ETSI][74] = 127,
+ [0][1][1][0][RTW89_KCC][74] = 12,
+ [0][1][1][0][RTW89_FCC][75] = -2,
+ [0][1][1][0][RTW89_ETSI][75] = 127,
+ [0][1][1][0][RTW89_KCC][75] = 12,
+ [0][1][1][0][RTW89_FCC][77] = -2,
+ [0][1][1][0][RTW89_ETSI][77] = 127,
+ [0][1][1][0][RTW89_KCC][77] = 12,
+ [0][1][1][0][RTW89_FCC][79] = -2,
+ [0][1][1][0][RTW89_ETSI][79] = 127,
+ [0][1][1][0][RTW89_KCC][79] = 12,
+ [0][1][1][0][RTW89_FCC][81] = -2,
+ [0][1][1][0][RTW89_ETSI][81] = 127,
+ [0][1][1][0][RTW89_KCC][81] = 12,
+ [0][1][1][0][RTW89_FCC][83] = -2,
+ [0][1][1][0][RTW89_ETSI][83] = 127,
+ [0][1][1][0][RTW89_KCC][83] = 20,
+ [0][1][1][0][RTW89_FCC][85] = -2,
+ [0][1][1][0][RTW89_ETSI][85] = 127,
+ [0][1][1][0][RTW89_KCC][85] = 20,
+ [0][1][1][0][RTW89_FCC][87] = -2,
+ [0][1][1][0][RTW89_ETSI][87] = 127,
+ [0][1][1][0][RTW89_KCC][87] = 20,
+ [0][1][1][0][RTW89_FCC][89] = -2,
+ [0][1][1][0][RTW89_ETSI][89] = 127,
+ [0][1][1][0][RTW89_KCC][89] = 20,
+ [0][1][1][0][RTW89_FCC][90] = -2,
+ [0][1][1][0][RTW89_ETSI][90] = 127,
+ [0][1][1][0][RTW89_KCC][90] = 20,
+ [0][1][1][0][RTW89_FCC][92] = -2,
+ [0][1][1][0][RTW89_ETSI][92] = 127,
+ [0][1][1][0][RTW89_KCC][92] = 20,
+ [0][1][1][0][RTW89_FCC][94] = -2,
+ [0][1][1][0][RTW89_ETSI][94] = 127,
+ [0][1][1][0][RTW89_KCC][94] = 20,
+ [0][1][1][0][RTW89_FCC][96] = -2,
+ [0][1][1][0][RTW89_ETSI][96] = 127,
+ [0][1][1][0][RTW89_KCC][96] = 20,
+ [0][1][1][0][RTW89_FCC][98] = -2,
+ [0][1][1][0][RTW89_ETSI][98] = 127,
+ [0][1][1][0][RTW89_KCC][98] = 20,
+ [0][1][1][0][RTW89_FCC][100] = -2,
+ [0][1][1][0][RTW89_ETSI][100] = 127,
+ [0][1][1][0][RTW89_KCC][100] = 20,
+ [0][1][1][0][RTW89_FCC][102] = -2,
+ [0][1][1][0][RTW89_ETSI][102] = 127,
+ [0][1][1][0][RTW89_KCC][102] = 20,
+ [0][1][1][0][RTW89_FCC][104] = -2,
+ [0][1][1][0][RTW89_ETSI][104] = 127,
+ [0][1][1][0][RTW89_KCC][104] = 20,
+ [0][1][1][0][RTW89_FCC][105] = -2,
+ [0][1][1][0][RTW89_ETSI][105] = 127,
+ [0][1][1][0][RTW89_KCC][105] = 20,
+ [0][1][1][0][RTW89_FCC][107] = 0,
+ [0][1][1][0][RTW89_ETSI][107] = 127,
+ [0][1][1][0][RTW89_KCC][107] = 20,
+ [0][1][1][0][RTW89_FCC][109] = 0,
+ [0][1][1][0][RTW89_ETSI][109] = 127,
+ [0][1][1][0][RTW89_KCC][109] = 20,
+ [0][1][1][0][RTW89_FCC][111] = 127,
+ [0][1][1][0][RTW89_ETSI][111] = 127,
+ [0][1][1][0][RTW89_KCC][111] = 127,
+ [0][1][1][0][RTW89_FCC][113] = 127,
+ [0][1][1][0][RTW89_ETSI][113] = 127,
+ [0][1][1][0][RTW89_KCC][113] = 127,
+ [0][1][1][0][RTW89_FCC][115] = 127,
+ [0][1][1][0][RTW89_ETSI][115] = 127,
+ [0][1][1][0][RTW89_KCC][115] = 127,
+ [0][1][1][0][RTW89_FCC][117] = 127,
+ [0][1][1][0][RTW89_ETSI][117] = 127,
+ [0][1][1][0][RTW89_KCC][117] = 127,
+ [0][1][1][0][RTW89_FCC][119] = 127,
+ [0][1][1][0][RTW89_ETSI][119] = 127,
+ [0][1][1][0][RTW89_KCC][119] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 24,
+ [0][0][2][0][RTW89_ETSI][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 24,
+ [0][0][2][0][RTW89_FCC][2] = 22,
+ [0][0][2][0][RTW89_ETSI][2] = 66,
+ [0][0][2][0][RTW89_KCC][2] = 24,
+ [0][0][2][0][RTW89_FCC][4] = 22,
+ [0][0][2][0][RTW89_ETSI][4] = 66,
+ [0][0][2][0][RTW89_KCC][4] = 24,
+ [0][0][2][0][RTW89_FCC][6] = 22,
+ [0][0][2][0][RTW89_ETSI][6] = 66,
+ [0][0][2][0][RTW89_KCC][6] = 24,
+ [0][0][2][0][RTW89_FCC][8] = 22,
+ [0][0][2][0][RTW89_ETSI][8] = 66,
+ [0][0][2][0][RTW89_KCC][8] = 24,
+ [0][0][2][0][RTW89_FCC][10] = 22,
+ [0][0][2][0][RTW89_ETSI][10] = 66,
+ [0][0][2][0][RTW89_KCC][10] = 24,
+ [0][0][2][0][RTW89_FCC][12] = 22,
+ [0][0][2][0][RTW89_ETSI][12] = 66,
+ [0][0][2][0][RTW89_KCC][12] = 24,
+ [0][0][2][0][RTW89_FCC][14] = 22,
+ [0][0][2][0][RTW89_ETSI][14] = 66,
+ [0][0][2][0][RTW89_KCC][14] = 24,
+ [0][0][2][0][RTW89_FCC][15] = 22,
+ [0][0][2][0][RTW89_ETSI][15] = 66,
+ [0][0][2][0][RTW89_KCC][15] = 24,
+ [0][0][2][0][RTW89_FCC][17] = 22,
+ [0][0][2][0][RTW89_ETSI][17] = 66,
+ [0][0][2][0][RTW89_KCC][17] = 24,
+ [0][0][2][0][RTW89_FCC][19] = 22,
+ [0][0][2][0][RTW89_ETSI][19] = 66,
+ [0][0][2][0][RTW89_KCC][19] = 24,
+ [0][0][2][0][RTW89_FCC][21] = 22,
+ [0][0][2][0][RTW89_ETSI][21] = 66,
+ [0][0][2][0][RTW89_KCC][21] = 24,
+ [0][0][2][0][RTW89_FCC][23] = 22,
+ [0][0][2][0][RTW89_ETSI][23] = 66,
+ [0][0][2][0][RTW89_KCC][23] = 24,
+ [0][0][2][0][RTW89_FCC][25] = 22,
+ [0][0][2][0][RTW89_ETSI][25] = 66,
+ [0][0][2][0][RTW89_KCC][25] = 24,
+ [0][0][2][0][RTW89_FCC][27] = 22,
+ [0][0][2][0][RTW89_ETSI][27] = 66,
+ [0][0][2][0][RTW89_KCC][27] = 24,
+ [0][0][2][0][RTW89_FCC][29] = 22,
+ [0][0][2][0][RTW89_ETSI][29] = 66,
+ [0][0][2][0][RTW89_KCC][29] = 24,
+ [0][0][2][0][RTW89_FCC][30] = 22,
+ [0][0][2][0][RTW89_ETSI][30] = 66,
+ [0][0][2][0][RTW89_KCC][30] = 24,
+ [0][0][2][0][RTW89_FCC][32] = 22,
+ [0][0][2][0][RTW89_ETSI][32] = 66,
+ [0][0][2][0][RTW89_KCC][32] = 24,
+ [0][0][2][0][RTW89_FCC][34] = 22,
+ [0][0][2][0][RTW89_ETSI][34] = 66,
+ [0][0][2][0][RTW89_KCC][34] = 24,
+ [0][0][2][0][RTW89_FCC][36] = 22,
+ [0][0][2][0][RTW89_ETSI][36] = 66,
+ [0][0][2][0][RTW89_KCC][36] = 24,
+ [0][0][2][0][RTW89_FCC][38] = 22,
+ [0][0][2][0][RTW89_ETSI][38] = 66,
+ [0][0][2][0][RTW89_KCC][38] = 24,
+ [0][0][2][0][RTW89_FCC][40] = 22,
+ [0][0][2][0][RTW89_ETSI][40] = 66,
+ [0][0][2][0][RTW89_KCC][40] = 24,
+ [0][0][2][0][RTW89_FCC][42] = 22,
+ [0][0][2][0][RTW89_ETSI][42] = 66,
+ [0][0][2][0][RTW89_KCC][42] = 24,
+ [0][0][2][0][RTW89_FCC][44] = 22,
+ [0][0][2][0][RTW89_ETSI][44] = 66,
+ [0][0][2][0][RTW89_KCC][44] = 24,
+ [0][0][2][0][RTW89_FCC][45] = 22,
+ [0][0][2][0][RTW89_ETSI][45] = 127,
+ [0][0][2][0][RTW89_KCC][45] = 24,
+ [0][0][2][0][RTW89_FCC][47] = 22,
+ [0][0][2][0][RTW89_ETSI][47] = 127,
+ [0][0][2][0][RTW89_KCC][47] = 24,
+ [0][0][2][0][RTW89_FCC][49] = 24,
+ [0][0][2][0][RTW89_ETSI][49] = 127,
+ [0][0][2][0][RTW89_KCC][49] = 24,
+ [0][0][2][0][RTW89_FCC][51] = 22,
+ [0][0][2][0][RTW89_ETSI][51] = 127,
+ [0][0][2][0][RTW89_KCC][51] = 24,
+ [0][0][2][0][RTW89_FCC][53] = 22,
+ [0][0][2][0][RTW89_ETSI][53] = 127,
+ [0][0][2][0][RTW89_KCC][53] = 24,
+ [0][0][2][0][RTW89_FCC][55] = 22,
+ [0][0][2][0][RTW89_ETSI][55] = 127,
+ [0][0][2][0][RTW89_KCC][55] = 26,
+ [0][0][2][0][RTW89_FCC][57] = 22,
+ [0][0][2][0][RTW89_ETSI][57] = 127,
+ [0][0][2][0][RTW89_KCC][57] = 26,
+ [0][0][2][0][RTW89_FCC][59] = 22,
+ [0][0][2][0][RTW89_ETSI][59] = 127,
+ [0][0][2][0][RTW89_KCC][59] = 26,
+ [0][0][2][0][RTW89_FCC][60] = 22,
+ [0][0][2][0][RTW89_ETSI][60] = 127,
+ [0][0][2][0][RTW89_KCC][60] = 26,
+ [0][0][2][0][RTW89_FCC][62] = 22,
+ [0][0][2][0][RTW89_ETSI][62] = 127,
+ [0][0][2][0][RTW89_KCC][62] = 26,
+ [0][0][2][0][RTW89_FCC][64] = 22,
+ [0][0][2][0][RTW89_ETSI][64] = 127,
+ [0][0][2][0][RTW89_KCC][64] = 26,
+ [0][0][2][0][RTW89_FCC][66] = 22,
+ [0][0][2][0][RTW89_ETSI][66] = 127,
+ [0][0][2][0][RTW89_KCC][66] = 26,
+ [0][0][2][0][RTW89_FCC][68] = 22,
+ [0][0][2][0][RTW89_ETSI][68] = 127,
+ [0][0][2][0][RTW89_KCC][68] = 26,
+ [0][0][2][0][RTW89_FCC][70] = 24,
+ [0][0][2][0][RTW89_ETSI][70] = 127,
+ [0][0][2][0][RTW89_KCC][70] = 26,
+ [0][0][2][0][RTW89_FCC][72] = 22,
+ [0][0][2][0][RTW89_ETSI][72] = 127,
+ [0][0][2][0][RTW89_KCC][72] = 26,
+ [0][0][2][0][RTW89_FCC][74] = 22,
+ [0][0][2][0][RTW89_ETSI][74] = 127,
+ [0][0][2][0][RTW89_KCC][74] = 26,
+ [0][0][2][0][RTW89_FCC][75] = 22,
+ [0][0][2][0][RTW89_ETSI][75] = 127,
+ [0][0][2][0][RTW89_KCC][75] = 26,
+ [0][0][2][0][RTW89_FCC][77] = 22,
+ [0][0][2][0][RTW89_ETSI][77] = 127,
+ [0][0][2][0][RTW89_KCC][77] = 26,
+ [0][0][2][0][RTW89_FCC][79] = 22,
+ [0][0][2][0][RTW89_ETSI][79] = 127,
+ [0][0][2][0][RTW89_KCC][79] = 26,
+ [0][0][2][0][RTW89_FCC][81] = 22,
+ [0][0][2][0][RTW89_ETSI][81] = 127,
+ [0][0][2][0][RTW89_KCC][81] = 26,
+ [0][0][2][0][RTW89_FCC][83] = 22,
+ [0][0][2][0][RTW89_ETSI][83] = 127,
+ [0][0][2][0][RTW89_KCC][83] = 32,
+ [0][0][2][0][RTW89_FCC][85] = 22,
+ [0][0][2][0][RTW89_ETSI][85] = 127,
+ [0][0][2][0][RTW89_KCC][85] = 32,
+ [0][0][2][0][RTW89_FCC][87] = 22,
+ [0][0][2][0][RTW89_ETSI][87] = 127,
+ [0][0][2][0][RTW89_KCC][87] = 32,
+ [0][0][2][0][RTW89_FCC][89] = 22,
+ [0][0][2][0][RTW89_ETSI][89] = 127,
+ [0][0][2][0][RTW89_KCC][89] = 32,
+ [0][0][2][0][RTW89_FCC][90] = 22,
+ [0][0][2][0][RTW89_ETSI][90] = 127,
+ [0][0][2][0][RTW89_KCC][90] = 32,
+ [0][0][2][0][RTW89_FCC][92] = 22,
+ [0][0][2][0][RTW89_ETSI][92] = 127,
+ [0][0][2][0][RTW89_KCC][92] = 32,
+ [0][0][2][0][RTW89_FCC][94] = 22,
+ [0][0][2][0][RTW89_ETSI][94] = 127,
+ [0][0][2][0][RTW89_KCC][94] = 32,
+ [0][0][2][0][RTW89_FCC][96] = 22,
+ [0][0][2][0][RTW89_ETSI][96] = 127,
+ [0][0][2][0][RTW89_KCC][96] = 32,
+ [0][0][2][0][RTW89_FCC][98] = 22,
+ [0][0][2][0][RTW89_ETSI][98] = 127,
+ [0][0][2][0][RTW89_KCC][98] = 32,
+ [0][0][2][0][RTW89_FCC][100] = 22,
+ [0][0][2][0][RTW89_ETSI][100] = 127,
+ [0][0][2][0][RTW89_KCC][100] = 32,
+ [0][0][2][0][RTW89_FCC][102] = 22,
+ [0][0][2][0][RTW89_ETSI][102] = 127,
+ [0][0][2][0][RTW89_KCC][102] = 32,
+ [0][0][2][0][RTW89_FCC][104] = 22,
+ [0][0][2][0][RTW89_ETSI][104] = 127,
+ [0][0][2][0][RTW89_KCC][104] = 32,
+ [0][0][2][0][RTW89_FCC][105] = 22,
+ [0][0][2][0][RTW89_ETSI][105] = 127,
+ [0][0][2][0][RTW89_KCC][105] = 32,
+ [0][0][2][0][RTW89_FCC][107] = 24,
+ [0][0][2][0][RTW89_ETSI][107] = 127,
+ [0][0][2][0][RTW89_KCC][107] = 32,
+ [0][0][2][0][RTW89_FCC][109] = 24,
+ [0][0][2][0][RTW89_ETSI][109] = 127,
+ [0][0][2][0][RTW89_KCC][109] = 32,
+ [0][0][2][0][RTW89_FCC][111] = 127,
+ [0][0][2][0][RTW89_ETSI][111] = 127,
+ [0][0][2][0][RTW89_KCC][111] = 127,
+ [0][0][2][0][RTW89_FCC][113] = 127,
+ [0][0][2][0][RTW89_ETSI][113] = 127,
+ [0][0][2][0][RTW89_KCC][113] = 127,
+ [0][0][2][0][RTW89_FCC][115] = 127,
+ [0][0][2][0][RTW89_ETSI][115] = 127,
+ [0][0][2][0][RTW89_KCC][115] = 127,
+ [0][0][2][0][RTW89_FCC][117] = 127,
+ [0][0][2][0][RTW89_ETSI][117] = 127,
+ [0][0][2][0][RTW89_KCC][117] = 127,
+ [0][0][2][0][RTW89_FCC][119] = 127,
+ [0][0][2][0][RTW89_ETSI][119] = 127,
+ [0][0][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][0][RTW89_FCC][0] = -2,
+ [0][1][2][0][RTW89_ETSI][0] = 54,
+ [0][1][2][0][RTW89_KCC][0] = 12,
+ [0][1][2][0][RTW89_FCC][2] = -4,
+ [0][1][2][0][RTW89_ETSI][2] = 54,
+ [0][1][2][0][RTW89_KCC][2] = 12,
+ [0][1][2][0][RTW89_FCC][4] = -4,
+ [0][1][2][0][RTW89_ETSI][4] = 54,
+ [0][1][2][0][RTW89_KCC][4] = 12,
+ [0][1][2][0][RTW89_FCC][6] = -4,
+ [0][1][2][0][RTW89_ETSI][6] = 54,
+ [0][1][2][0][RTW89_KCC][6] = 12,
+ [0][1][2][0][RTW89_FCC][8] = -4,
+ [0][1][2][0][RTW89_ETSI][8] = 54,
+ [0][1][2][0][RTW89_KCC][8] = 12,
+ [0][1][2][0][RTW89_FCC][10] = -4,
+ [0][1][2][0][RTW89_ETSI][10] = 54,
+ [0][1][2][0][RTW89_KCC][10] = 12,
+ [0][1][2][0][RTW89_FCC][12] = -4,
+ [0][1][2][0][RTW89_ETSI][12] = 54,
+ [0][1][2][0][RTW89_KCC][12] = 12,
+ [0][1][2][0][RTW89_FCC][14] = -4,
+ [0][1][2][0][RTW89_ETSI][14] = 54,
+ [0][1][2][0][RTW89_KCC][14] = 12,
+ [0][1][2][0][RTW89_FCC][15] = -4,
+ [0][1][2][0][RTW89_ETSI][15] = 54,
+ [0][1][2][0][RTW89_KCC][15] = 12,
+ [0][1][2][0][RTW89_FCC][17] = -4,
+ [0][1][2][0][RTW89_ETSI][17] = 54,
+ [0][1][2][0][RTW89_KCC][17] = 12,
+ [0][1][2][0][RTW89_FCC][19] = -4,
+ [0][1][2][0][RTW89_ETSI][19] = 54,
+ [0][1][2][0][RTW89_KCC][19] = 12,
+ [0][1][2][0][RTW89_FCC][21] = -4,
+ [0][1][2][0][RTW89_ETSI][21] = 54,
+ [0][1][2][0][RTW89_KCC][21] = 12,
+ [0][1][2][0][RTW89_FCC][23] = -4,
+ [0][1][2][0][RTW89_ETSI][23] = 54,
+ [0][1][2][0][RTW89_KCC][23] = 12,
+ [0][1][2][0][RTW89_FCC][25] = -4,
+ [0][1][2][0][RTW89_ETSI][25] = 54,
+ [0][1][2][0][RTW89_KCC][25] = 12,
+ [0][1][2][0][RTW89_FCC][27] = -4,
+ [0][1][2][0][RTW89_ETSI][27] = 54,
+ [0][1][2][0][RTW89_KCC][27] = 12,
+ [0][1][2][0][RTW89_FCC][29] = -4,
+ [0][1][2][0][RTW89_ETSI][29] = 54,
+ [0][1][2][0][RTW89_KCC][29] = 12,
+ [0][1][2][0][RTW89_FCC][30] = -4,
+ [0][1][2][0][RTW89_ETSI][30] = 54,
+ [0][1][2][0][RTW89_KCC][30] = 12,
+ [0][1][2][0][RTW89_FCC][32] = -4,
+ [0][1][2][0][RTW89_ETSI][32] = 54,
+ [0][1][2][0][RTW89_KCC][32] = 12,
+ [0][1][2][0][RTW89_FCC][34] = -4,
+ [0][1][2][0][RTW89_ETSI][34] = 54,
+ [0][1][2][0][RTW89_KCC][34] = 12,
+ [0][1][2][0][RTW89_FCC][36] = -4,
+ [0][1][2][0][RTW89_ETSI][36] = 54,
+ [0][1][2][0][RTW89_KCC][36] = 12,
+ [0][1][2][0][RTW89_FCC][38] = -4,
+ [0][1][2][0][RTW89_ETSI][38] = 54,
+ [0][1][2][0][RTW89_KCC][38] = 12,
+ [0][1][2][0][RTW89_FCC][40] = -4,
+ [0][1][2][0][RTW89_ETSI][40] = 54,
+ [0][1][2][0][RTW89_KCC][40] = 12,
+ [0][1][2][0][RTW89_FCC][42] = -4,
+ [0][1][2][0][RTW89_ETSI][42] = 54,
+ [0][1][2][0][RTW89_KCC][42] = 12,
+ [0][1][2][0][RTW89_FCC][44] = -2,
+ [0][1][2][0][RTW89_ETSI][44] = 54,
+ [0][1][2][0][RTW89_KCC][44] = 12,
+ [0][1][2][0][RTW89_FCC][45] = -2,
+ [0][1][2][0][RTW89_ETSI][45] = 127,
+ [0][1][2][0][RTW89_KCC][45] = 12,
+ [0][1][2][0][RTW89_FCC][47] = -2,
+ [0][1][2][0][RTW89_ETSI][47] = 127,
+ [0][1][2][0][RTW89_KCC][47] = 12,
+ [0][1][2][0][RTW89_FCC][49] = -2,
+ [0][1][2][0][RTW89_ETSI][49] = 127,
+ [0][1][2][0][RTW89_KCC][49] = 12,
+ [0][1][2][0][RTW89_FCC][51] = -2,
+ [0][1][2][0][RTW89_ETSI][51] = 127,
+ [0][1][2][0][RTW89_KCC][51] = 12,
+ [0][1][2][0][RTW89_FCC][53] = -2,
+ [0][1][2][0][RTW89_ETSI][53] = 127,
+ [0][1][2][0][RTW89_KCC][53] = 12,
+ [0][1][2][0][RTW89_FCC][55] = -2,
+ [0][1][2][0][RTW89_ETSI][55] = 127,
+ [0][1][2][0][RTW89_KCC][55] = 12,
+ [0][1][2][0][RTW89_FCC][57] = -2,
+ [0][1][2][0][RTW89_ETSI][57] = 127,
+ [0][1][2][0][RTW89_KCC][57] = 12,
+ [0][1][2][0][RTW89_FCC][59] = -2,
+ [0][1][2][0][RTW89_ETSI][59] = 127,
+ [0][1][2][0][RTW89_KCC][59] = 12,
+ [0][1][2][0][RTW89_FCC][60] = -2,
+ [0][1][2][0][RTW89_ETSI][60] = 127,
+ [0][1][2][0][RTW89_KCC][60] = 12,
+ [0][1][2][0][RTW89_FCC][62] = -2,
+ [0][1][2][0][RTW89_ETSI][62] = 127,
+ [0][1][2][0][RTW89_KCC][62] = 12,
+ [0][1][2][0][RTW89_FCC][64] = -2,
+ [0][1][2][0][RTW89_ETSI][64] = 127,
+ [0][1][2][0][RTW89_KCC][64] = 12,
+ [0][1][2][0][RTW89_FCC][66] = -2,
+ [0][1][2][0][RTW89_ETSI][66] = 127,
+ [0][1][2][0][RTW89_KCC][66] = 12,
+ [0][1][2][0][RTW89_FCC][68] = -2,
+ [0][1][2][0][RTW89_ETSI][68] = 127,
+ [0][1][2][0][RTW89_KCC][68] = 12,
+ [0][1][2][0][RTW89_FCC][70] = -2,
+ [0][1][2][0][RTW89_ETSI][70] = 127,
+ [0][1][2][0][RTW89_KCC][70] = 12,
+ [0][1][2][0][RTW89_FCC][72] = -2,
+ [0][1][2][0][RTW89_ETSI][72] = 127,
+ [0][1][2][0][RTW89_KCC][72] = 12,
+ [0][1][2][0][RTW89_FCC][74] = -2,
+ [0][1][2][0][RTW89_ETSI][74] = 127,
+ [0][1][2][0][RTW89_KCC][74] = 12,
+ [0][1][2][0][RTW89_FCC][75] = -2,
+ [0][1][2][0][RTW89_ETSI][75] = 127,
+ [0][1][2][0][RTW89_KCC][75] = 12,
+ [0][1][2][0][RTW89_FCC][77] = -2,
+ [0][1][2][0][RTW89_ETSI][77] = 127,
+ [0][1][2][0][RTW89_KCC][77] = 12,
+ [0][1][2][0][RTW89_FCC][79] = -2,
+ [0][1][2][0][RTW89_ETSI][79] = 127,
+ [0][1][2][0][RTW89_KCC][79] = 12,
+ [0][1][2][0][RTW89_FCC][81] = -2,
+ [0][1][2][0][RTW89_ETSI][81] = 127,
+ [0][1][2][0][RTW89_KCC][81] = 12,
+ [0][1][2][0][RTW89_FCC][83] = -2,
+ [0][1][2][0][RTW89_ETSI][83] = 127,
+ [0][1][2][0][RTW89_KCC][83] = 20,
+ [0][1][2][0][RTW89_FCC][85] = -2,
+ [0][1][2][0][RTW89_ETSI][85] = 127,
+ [0][1][2][0][RTW89_KCC][85] = 20,
+ [0][1][2][0][RTW89_FCC][87] = -2,
+ [0][1][2][0][RTW89_ETSI][87] = 127,
+ [0][1][2][0][RTW89_KCC][87] = 20,
+ [0][1][2][0][RTW89_FCC][89] = -2,
+ [0][1][2][0][RTW89_ETSI][89] = 127,
+ [0][1][2][0][RTW89_KCC][89] = 20,
+ [0][1][2][0][RTW89_FCC][90] = -2,
+ [0][1][2][0][RTW89_ETSI][90] = 127,
+ [0][1][2][0][RTW89_KCC][90] = 20,
+ [0][1][2][0][RTW89_FCC][92] = -2,
+ [0][1][2][0][RTW89_ETSI][92] = 127,
+ [0][1][2][0][RTW89_KCC][92] = 20,
+ [0][1][2][0][RTW89_FCC][94] = -2,
+ [0][1][2][0][RTW89_ETSI][94] = 127,
+ [0][1][2][0][RTW89_KCC][94] = 20,
+ [0][1][2][0][RTW89_FCC][96] = -2,
+ [0][1][2][0][RTW89_ETSI][96] = 127,
+ [0][1][2][0][RTW89_KCC][96] = 20,
+ [0][1][2][0][RTW89_FCC][98] = -2,
+ [0][1][2][0][RTW89_ETSI][98] = 127,
+ [0][1][2][0][RTW89_KCC][98] = 20,
+ [0][1][2][0][RTW89_FCC][100] = -2,
+ [0][1][2][0][RTW89_ETSI][100] = 127,
+ [0][1][2][0][RTW89_KCC][100] = 20,
+ [0][1][2][0][RTW89_FCC][102] = -2,
+ [0][1][2][0][RTW89_ETSI][102] = 127,
+ [0][1][2][0][RTW89_KCC][102] = 20,
+ [0][1][2][0][RTW89_FCC][104] = -2,
+ [0][1][2][0][RTW89_ETSI][104] = 127,
+ [0][1][2][0][RTW89_KCC][104] = 20,
+ [0][1][2][0][RTW89_FCC][105] = -2,
+ [0][1][2][0][RTW89_ETSI][105] = 127,
+ [0][1][2][0][RTW89_KCC][105] = 20,
+ [0][1][2][0][RTW89_FCC][107] = 0,
+ [0][1][2][0][RTW89_ETSI][107] = 127,
+ [0][1][2][0][RTW89_KCC][107] = 20,
+ [0][1][2][0][RTW89_FCC][109] = 0,
+ [0][1][2][0][RTW89_ETSI][109] = 127,
+ [0][1][2][0][RTW89_KCC][109] = 20,
+ [0][1][2][0][RTW89_FCC][111] = 127,
+ [0][1][2][0][RTW89_ETSI][111] = 127,
+ [0][1][2][0][RTW89_KCC][111] = 127,
+ [0][1][2][0][RTW89_FCC][113] = 127,
+ [0][1][2][0][RTW89_ETSI][113] = 127,
+ [0][1][2][0][RTW89_KCC][113] = 127,
+ [0][1][2][0][RTW89_FCC][115] = 127,
+ [0][1][2][0][RTW89_ETSI][115] = 127,
+ [0][1][2][0][RTW89_KCC][115] = 127,
+ [0][1][2][0][RTW89_FCC][117] = 127,
+ [0][1][2][0][RTW89_ETSI][117] = 127,
+ [0][1][2][0][RTW89_KCC][117] = 127,
+ [0][1][2][0][RTW89_FCC][119] = 127,
+ [0][1][2][0][RTW89_ETSI][119] = 127,
+ [0][1][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][1][RTW89_FCC][0] = -2,
+ [0][1][2][1][RTW89_ETSI][0] = 42,
+ [0][1][2][1][RTW89_KCC][0] = 12,
+ [0][1][2][1][RTW89_FCC][2] = -4,
+ [0][1][2][1][RTW89_ETSI][2] = 42,
+ [0][1][2][1][RTW89_KCC][2] = 12,
+ [0][1][2][1][RTW89_FCC][4] = -4,
+ [0][1][2][1][RTW89_ETSI][4] = 42,
+ [0][1][2][1][RTW89_KCC][4] = 12,
+ [0][1][2][1][RTW89_FCC][6] = -4,
+ [0][1][2][1][RTW89_ETSI][6] = 42,
+ [0][1][2][1][RTW89_KCC][6] = 12,
+ [0][1][2][1][RTW89_FCC][8] = -4,
+ [0][1][2][1][RTW89_ETSI][8] = 42,
+ [0][1][2][1][RTW89_KCC][8] = 12,
+ [0][1][2][1][RTW89_FCC][10] = -4,
+ [0][1][2][1][RTW89_ETSI][10] = 42,
+ [0][1][2][1][RTW89_KCC][10] = 12,
+ [0][1][2][1][RTW89_FCC][12] = -4,
+ [0][1][2][1][RTW89_ETSI][12] = 42,
+ [0][1][2][1][RTW89_KCC][12] = 12,
+ [0][1][2][1][RTW89_FCC][14] = -4,
+ [0][1][2][1][RTW89_ETSI][14] = 42,
+ [0][1][2][1][RTW89_KCC][14] = 12,
+ [0][1][2][1][RTW89_FCC][15] = -4,
+ [0][1][2][1][RTW89_ETSI][15] = 42,
+ [0][1][2][1][RTW89_KCC][15] = 12,
+ [0][1][2][1][RTW89_FCC][17] = -4,
+ [0][1][2][1][RTW89_ETSI][17] = 42,
+ [0][1][2][1][RTW89_KCC][17] = 12,
+ [0][1][2][1][RTW89_FCC][19] = -4,
+ [0][1][2][1][RTW89_ETSI][19] = 42,
+ [0][1][2][1][RTW89_KCC][19] = 12,
+ [0][1][2][1][RTW89_FCC][21] = -4,
+ [0][1][2][1][RTW89_ETSI][21] = 42,
+ [0][1][2][1][RTW89_KCC][21] = 12,
+ [0][1][2][1][RTW89_FCC][23] = -4,
+ [0][1][2][1][RTW89_ETSI][23] = 42,
+ [0][1][2][1][RTW89_KCC][23] = 12,
+ [0][1][2][1][RTW89_FCC][25] = -4,
+ [0][1][2][1][RTW89_ETSI][25] = 42,
+ [0][1][2][1][RTW89_KCC][25] = 12,
+ [0][1][2][1][RTW89_FCC][27] = -4,
+ [0][1][2][1][RTW89_ETSI][27] = 42,
+ [0][1][2][1][RTW89_KCC][27] = 12,
+ [0][1][2][1][RTW89_FCC][29] = -4,
+ [0][1][2][1][RTW89_ETSI][29] = 42,
+ [0][1][2][1][RTW89_KCC][29] = 12,
+ [0][1][2][1][RTW89_FCC][30] = -4,
+ [0][1][2][1][RTW89_ETSI][30] = 42,
+ [0][1][2][1][RTW89_KCC][30] = 12,
+ [0][1][2][1][RTW89_FCC][32] = -4,
+ [0][1][2][1][RTW89_ETSI][32] = 42,
+ [0][1][2][1][RTW89_KCC][32] = 12,
+ [0][1][2][1][RTW89_FCC][34] = -4,
+ [0][1][2][1][RTW89_ETSI][34] = 42,
+ [0][1][2][1][RTW89_KCC][34] = 12,
+ [0][1][2][1][RTW89_FCC][36] = -4,
+ [0][1][2][1][RTW89_ETSI][36] = 42,
+ [0][1][2][1][RTW89_KCC][36] = 12,
+ [0][1][2][1][RTW89_FCC][38] = -4,
+ [0][1][2][1][RTW89_ETSI][38] = 42,
+ [0][1][2][1][RTW89_KCC][38] = 12,
+ [0][1][2][1][RTW89_FCC][40] = -4,
+ [0][1][2][1][RTW89_ETSI][40] = 42,
+ [0][1][2][1][RTW89_KCC][40] = 12,
+ [0][1][2][1][RTW89_FCC][42] = -4,
+ [0][1][2][1][RTW89_ETSI][42] = 42,
+ [0][1][2][1][RTW89_KCC][42] = 12,
+ [0][1][2][1][RTW89_FCC][44] = -2,
+ [0][1][2][1][RTW89_ETSI][44] = 42,
+ [0][1][2][1][RTW89_KCC][44] = 12,
+ [0][1][2][1][RTW89_FCC][45] = -2,
+ [0][1][2][1][RTW89_ETSI][45] = 127,
+ [0][1][2][1][RTW89_KCC][45] = 12,
+ [0][1][2][1][RTW89_FCC][47] = -2,
+ [0][1][2][1][RTW89_ETSI][47] = 127,
+ [0][1][2][1][RTW89_KCC][47] = 12,
+ [0][1][2][1][RTW89_FCC][49] = -2,
+ [0][1][2][1][RTW89_ETSI][49] = 127,
+ [0][1][2][1][RTW89_KCC][49] = 12,
+ [0][1][2][1][RTW89_FCC][51] = -2,
+ [0][1][2][1][RTW89_ETSI][51] = 127,
+ [0][1][2][1][RTW89_KCC][51] = 12,
+ [0][1][2][1][RTW89_FCC][53] = -2,
+ [0][1][2][1][RTW89_ETSI][53] = 127,
+ [0][1][2][1][RTW89_KCC][53] = 12,
+ [0][1][2][1][RTW89_FCC][55] = -2,
+ [0][1][2][1][RTW89_ETSI][55] = 127,
+ [0][1][2][1][RTW89_KCC][55] = 12,
+ [0][1][2][1][RTW89_FCC][57] = -2,
+ [0][1][2][1][RTW89_ETSI][57] = 127,
+ [0][1][2][1][RTW89_KCC][57] = 12,
+ [0][1][2][1][RTW89_FCC][59] = -2,
+ [0][1][2][1][RTW89_ETSI][59] = 127,
+ [0][1][2][1][RTW89_KCC][59] = 12,
+ [0][1][2][1][RTW89_FCC][60] = -2,
+ [0][1][2][1][RTW89_ETSI][60] = 127,
+ [0][1][2][1][RTW89_KCC][60] = 12,
+ [0][1][2][1][RTW89_FCC][62] = -2,
+ [0][1][2][1][RTW89_ETSI][62] = 127,
+ [0][1][2][1][RTW89_KCC][62] = 12,
+ [0][1][2][1][RTW89_FCC][64] = -2,
+ [0][1][2][1][RTW89_ETSI][64] = 127,
+ [0][1][2][1][RTW89_KCC][64] = 12,
+ [0][1][2][1][RTW89_FCC][66] = -2,
+ [0][1][2][1][RTW89_ETSI][66] = 127,
+ [0][1][2][1][RTW89_KCC][66] = 12,
+ [0][1][2][1][RTW89_FCC][68] = -2,
+ [0][1][2][1][RTW89_ETSI][68] = 127,
+ [0][1][2][1][RTW89_KCC][68] = 12,
+ [0][1][2][1][RTW89_FCC][70] = -2,
+ [0][1][2][1][RTW89_ETSI][70] = 127,
+ [0][1][2][1][RTW89_KCC][70] = 12,
+ [0][1][2][1][RTW89_FCC][72] = -2,
+ [0][1][2][1][RTW89_ETSI][72] = 127,
+ [0][1][2][1][RTW89_KCC][72] = 12,
+ [0][1][2][1][RTW89_FCC][74] = -2,
+ [0][1][2][1][RTW89_ETSI][74] = 127,
+ [0][1][2][1][RTW89_KCC][74] = 12,
+ [0][1][2][1][RTW89_FCC][75] = -2,
+ [0][1][2][1][RTW89_ETSI][75] = 127,
+ [0][1][2][1][RTW89_KCC][75] = 12,
+ [0][1][2][1][RTW89_FCC][77] = -2,
+ [0][1][2][1][RTW89_ETSI][77] = 127,
+ [0][1][2][1][RTW89_KCC][77] = 12,
+ [0][1][2][1][RTW89_FCC][79] = -2,
+ [0][1][2][1][RTW89_ETSI][79] = 127,
+ [0][1][2][1][RTW89_KCC][79] = 12,
+ [0][1][2][1][RTW89_FCC][81] = -2,
+ [0][1][2][1][RTW89_ETSI][81] = 127,
+ [0][1][2][1][RTW89_KCC][81] = 12,
+ [0][1][2][1][RTW89_FCC][83] = -2,
+ [0][1][2][1][RTW89_ETSI][83] = 127,
+ [0][1][2][1][RTW89_KCC][83] = 20,
+ [0][1][2][1][RTW89_FCC][85] = -2,
+ [0][1][2][1][RTW89_ETSI][85] = 127,
+ [0][1][2][1][RTW89_KCC][85] = 20,
+ [0][1][2][1][RTW89_FCC][87] = -2,
+ [0][1][2][1][RTW89_ETSI][87] = 127,
+ [0][1][2][1][RTW89_KCC][87] = 20,
+ [0][1][2][1][RTW89_FCC][89] = -2,
+ [0][1][2][1][RTW89_ETSI][89] = 127,
+ [0][1][2][1][RTW89_KCC][89] = 20,
+ [0][1][2][1][RTW89_FCC][90] = -2,
+ [0][1][2][1][RTW89_ETSI][90] = 127,
+ [0][1][2][1][RTW89_KCC][90] = 20,
+ [0][1][2][1][RTW89_FCC][92] = -2,
+ [0][1][2][1][RTW89_ETSI][92] = 127,
+ [0][1][2][1][RTW89_KCC][92] = 20,
+ [0][1][2][1][RTW89_FCC][94] = -2,
+ [0][1][2][1][RTW89_ETSI][94] = 127,
+ [0][1][2][1][RTW89_KCC][94] = 20,
+ [0][1][2][1][RTW89_FCC][96] = -2,
+ [0][1][2][1][RTW89_ETSI][96] = 127,
+ [0][1][2][1][RTW89_KCC][96] = 20,
+ [0][1][2][1][RTW89_FCC][98] = -2,
+ [0][1][2][1][RTW89_ETSI][98] = 127,
+ [0][1][2][1][RTW89_KCC][98] = 20,
+ [0][1][2][1][RTW89_FCC][100] = -2,
+ [0][1][2][1][RTW89_ETSI][100] = 127,
+ [0][1][2][1][RTW89_KCC][100] = 20,
+ [0][1][2][1][RTW89_FCC][102] = -2,
+ [0][1][2][1][RTW89_ETSI][102] = 127,
+ [0][1][2][1][RTW89_KCC][102] = 20,
+ [0][1][2][1][RTW89_FCC][104] = -2,
+ [0][1][2][1][RTW89_ETSI][104] = 127,
+ [0][1][2][1][RTW89_KCC][104] = 20,
+ [0][1][2][1][RTW89_FCC][105] = -2,
+ [0][1][2][1][RTW89_ETSI][105] = 127,
+ [0][1][2][1][RTW89_KCC][105] = 20,
+ [0][1][2][1][RTW89_FCC][107] = 0,
+ [0][1][2][1][RTW89_ETSI][107] = 127,
+ [0][1][2][1][RTW89_KCC][107] = 20,
+ [0][1][2][1][RTW89_FCC][109] = 0,
+ [0][1][2][1][RTW89_ETSI][109] = 127,
+ [0][1][2][1][RTW89_KCC][109] = 20,
+ [0][1][2][1][RTW89_FCC][111] = 127,
+ [0][1][2][1][RTW89_ETSI][111] = 127,
+ [0][1][2][1][RTW89_KCC][111] = 127,
+ [0][1][2][1][RTW89_FCC][113] = 127,
+ [0][1][2][1][RTW89_ETSI][113] = 127,
+ [0][1][2][1][RTW89_KCC][113] = 127,
+ [0][1][2][1][RTW89_FCC][115] = 127,
+ [0][1][2][1][RTW89_ETSI][115] = 127,
+ [0][1][2][1][RTW89_KCC][115] = 127,
+ [0][1][2][1][RTW89_FCC][117] = 127,
+ [0][1][2][1][RTW89_ETSI][117] = 127,
+ [0][1][2][1][RTW89_KCC][117] = 127,
+ [0][1][2][1][RTW89_FCC][119] = 127,
+ [0][1][2][1][RTW89_ETSI][119] = 127,
+ [0][1][2][1][RTW89_KCC][119] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 34,
+ [1][0][2][0][RTW89_ETSI][1] = 66,
+ [1][0][2][0][RTW89_KCC][1] = 40,
+ [1][0][2][0][RTW89_FCC][5] = 34,
+ [1][0][2][0][RTW89_ETSI][5] = 66,
+ [1][0][2][0][RTW89_KCC][5] = 40,
+ [1][0][2][0][RTW89_FCC][9] = 34,
+ [1][0][2][0][RTW89_ETSI][9] = 66,
+ [1][0][2][0][RTW89_KCC][9] = 40,
+ [1][0][2][0][RTW89_FCC][13] = 34,
+ [1][0][2][0][RTW89_ETSI][13] = 66,
+ [1][0][2][0][RTW89_KCC][13] = 40,
+ [1][0][2][0][RTW89_FCC][16] = 34,
+ [1][0][2][0][RTW89_ETSI][16] = 66,
+ [1][0][2][0][RTW89_KCC][16] = 40,
+ [1][0][2][0][RTW89_FCC][20] = 34,
+ [1][0][2][0][RTW89_ETSI][20] = 66,
+ [1][0][2][0][RTW89_KCC][20] = 40,
+ [1][0][2][0][RTW89_FCC][24] = 36,
+ [1][0][2][0][RTW89_ETSI][24] = 66,
+ [1][0][2][0][RTW89_KCC][24] = 40,
+ [1][0][2][0][RTW89_FCC][28] = 34,
+ [1][0][2][0][RTW89_ETSI][28] = 66,
+ [1][0][2][0][RTW89_KCC][28] = 40,
+ [1][0][2][0][RTW89_FCC][31] = 34,
+ [1][0][2][0][RTW89_ETSI][31] = 66,
+ [1][0][2][0][RTW89_KCC][31] = 40,
+ [1][0][2][0][RTW89_FCC][35] = 34,
+ [1][0][2][0][RTW89_ETSI][35] = 66,
+ [1][0][2][0][RTW89_KCC][35] = 40,
+ [1][0][2][0][RTW89_FCC][39] = 34,
+ [1][0][2][0][RTW89_ETSI][39] = 66,
+ [1][0][2][0][RTW89_KCC][39] = 40,
+ [1][0][2][0][RTW89_FCC][43] = 34,
+ [1][0][2][0][RTW89_ETSI][43] = 66,
+ [1][0][2][0][RTW89_KCC][43] = 40,
+ [1][0][2][0][RTW89_FCC][46] = 34,
+ [1][0][2][0][RTW89_ETSI][46] = 127,
+ [1][0][2][0][RTW89_KCC][46] = 40,
+ [1][0][2][0][RTW89_FCC][50] = 34,
+ [1][0][2][0][RTW89_ETSI][50] = 127,
+ [1][0][2][0][RTW89_KCC][50] = 40,
+ [1][0][2][0][RTW89_FCC][54] = 36,
+ [1][0][2][0][RTW89_ETSI][54] = 127,
+ [1][0][2][0][RTW89_KCC][54] = 40,
+ [1][0][2][0][RTW89_FCC][58] = 36,
+ [1][0][2][0][RTW89_ETSI][58] = 127,
+ [1][0][2][0][RTW89_KCC][58] = 40,
+ [1][0][2][0][RTW89_FCC][61] = 34,
+ [1][0][2][0][RTW89_ETSI][61] = 127,
+ [1][0][2][0][RTW89_KCC][61] = 40,
+ [1][0][2][0][RTW89_FCC][65] = 34,
+ [1][0][2][0][RTW89_ETSI][65] = 127,
+ [1][0][2][0][RTW89_KCC][65] = 40,
+ [1][0][2][0][RTW89_FCC][69] = 34,
+ [1][0][2][0][RTW89_ETSI][69] = 127,
+ [1][0][2][0][RTW89_KCC][69] = 40,
+ [1][0][2][0][RTW89_FCC][73] = 34,
+ [1][0][2][0][RTW89_ETSI][73] = 127,
+ [1][0][2][0][RTW89_KCC][73] = 40,
+ [1][0][2][0][RTW89_FCC][76] = 34,
+ [1][0][2][0][RTW89_ETSI][76] = 127,
+ [1][0][2][0][RTW89_KCC][76] = 40,
+ [1][0][2][0][RTW89_FCC][80] = 34,
+ [1][0][2][0][RTW89_ETSI][80] = 127,
+ [1][0][2][0][RTW89_KCC][80] = 42,
+ [1][0][2][0][RTW89_FCC][84] = 34,
+ [1][0][2][0][RTW89_ETSI][84] = 127,
+ [1][0][2][0][RTW89_KCC][84] = 42,
+ [1][0][2][0][RTW89_FCC][88] = 34,
+ [1][0][2][0][RTW89_ETSI][88] = 127,
+ [1][0][2][0][RTW89_KCC][88] = 42,
+ [1][0][2][0][RTW89_FCC][91] = 36,
+ [1][0][2][0][RTW89_ETSI][91] = 127,
+ [1][0][2][0][RTW89_KCC][91] = 42,
+ [1][0][2][0][RTW89_FCC][95] = 34,
+ [1][0][2][0][RTW89_ETSI][95] = 127,
+ [1][0][2][0][RTW89_KCC][95] = 42,
+ [1][0][2][0][RTW89_FCC][99] = 34,
+ [1][0][2][0][RTW89_ETSI][99] = 127,
+ [1][0][2][0][RTW89_KCC][99] = 42,
+ [1][0][2][0][RTW89_FCC][103] = 34,
+ [1][0][2][0][RTW89_ETSI][103] = 127,
+ [1][0][2][0][RTW89_KCC][103] = 42,
+ [1][0][2][0][RTW89_FCC][106] = 36,
+ [1][0][2][0][RTW89_ETSI][106] = 127,
+ [1][0][2][0][RTW89_KCC][106] = 42,
+ [1][0][2][0][RTW89_FCC][110] = 127,
+ [1][0][2][0][RTW89_ETSI][110] = 127,
+ [1][0][2][0][RTW89_KCC][110] = 127,
+ [1][0][2][0][RTW89_FCC][114] = 127,
+ [1][0][2][0][RTW89_ETSI][114] = 127,
+ [1][0][2][0][RTW89_KCC][114] = 127,
+ [1][0][2][0][RTW89_FCC][118] = 127,
+ [1][0][2][0][RTW89_ETSI][118] = 127,
+ [1][0][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 10,
+ [1][1][2][0][RTW89_ETSI][1] = 54,
+ [1][1][2][0][RTW89_KCC][1] = 28,
+ [1][1][2][0][RTW89_FCC][5] = 10,
+ [1][1][2][0][RTW89_ETSI][5] = 54,
+ [1][1][2][0][RTW89_KCC][5] = 28,
+ [1][1][2][0][RTW89_FCC][9] = 10,
+ [1][1][2][0][RTW89_ETSI][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 28,
+ [1][1][2][0][RTW89_FCC][13] = 10,
+ [1][1][2][0][RTW89_ETSI][13] = 54,
+ [1][1][2][0][RTW89_KCC][13] = 28,
+ [1][1][2][0][RTW89_FCC][16] = 10,
+ [1][1][2][0][RTW89_ETSI][16] = 54,
+ [1][1][2][0][RTW89_KCC][16] = 28,
+ [1][1][2][0][RTW89_FCC][20] = 10,
+ [1][1][2][0][RTW89_ETSI][20] = 54,
+ [1][1][2][0][RTW89_KCC][20] = 28,
+ [1][1][2][0][RTW89_FCC][24] = 10,
+ [1][1][2][0][RTW89_ETSI][24] = 54,
+ [1][1][2][0][RTW89_KCC][24] = 28,
+ [1][1][2][0][RTW89_FCC][28] = 10,
+ [1][1][2][0][RTW89_ETSI][28] = 54,
+ [1][1][2][0][RTW89_KCC][28] = 28,
+ [1][1][2][0][RTW89_FCC][31] = 10,
+ [1][1][2][0][RTW89_ETSI][31] = 54,
+ [1][1][2][0][RTW89_KCC][31] = 28,
+ [1][1][2][0][RTW89_FCC][35] = 10,
+ [1][1][2][0][RTW89_ETSI][35] = 54,
+ [1][1][2][0][RTW89_KCC][35] = 28,
+ [1][1][2][0][RTW89_FCC][39] = 10,
+ [1][1][2][0][RTW89_ETSI][39] = 54,
+ [1][1][2][0][RTW89_KCC][39] = 28,
+ [1][1][2][0][RTW89_FCC][43] = 10,
+ [1][1][2][0][RTW89_ETSI][43] = 54,
+ [1][1][2][0][RTW89_KCC][43] = 28,
+ [1][1][2][0][RTW89_FCC][46] = 12,
+ [1][1][2][0][RTW89_ETSI][46] = 127,
+ [1][1][2][0][RTW89_KCC][46] = 28,
+ [1][1][2][0][RTW89_FCC][50] = 12,
+ [1][1][2][0][RTW89_ETSI][50] = 127,
+ [1][1][2][0][RTW89_KCC][50] = 28,
+ [1][1][2][0][RTW89_FCC][54] = 10,
+ [1][1][2][0][RTW89_ETSI][54] = 127,
+ [1][1][2][0][RTW89_KCC][54] = 28,
+ [1][1][2][0][RTW89_FCC][58] = 10,
+ [1][1][2][0][RTW89_ETSI][58] = 127,
+ [1][1][2][0][RTW89_KCC][58] = 28,
+ [1][1][2][0][RTW89_FCC][61] = 10,
+ [1][1][2][0][RTW89_ETSI][61] = 127,
+ [1][1][2][0][RTW89_KCC][61] = 28,
+ [1][1][2][0][RTW89_FCC][65] = 10,
+ [1][1][2][0][RTW89_ETSI][65] = 127,
+ [1][1][2][0][RTW89_KCC][65] = 28,
+ [1][1][2][0][RTW89_FCC][69] = 10,
+ [1][1][2][0][RTW89_ETSI][69] = 127,
+ [1][1][2][0][RTW89_KCC][69] = 28,
+ [1][1][2][0][RTW89_FCC][73] = 10,
+ [1][1][2][0][RTW89_ETSI][73] = 127,
+ [1][1][2][0][RTW89_KCC][73] = 28,
+ [1][1][2][0][RTW89_FCC][76] = 10,
+ [1][1][2][0][RTW89_ETSI][76] = 127,
+ [1][1][2][0][RTW89_KCC][76] = 28,
+ [1][1][2][0][RTW89_FCC][80] = 10,
+ [1][1][2][0][RTW89_ETSI][80] = 127,
+ [1][1][2][0][RTW89_KCC][80] = 32,
+ [1][1][2][0][RTW89_FCC][84] = 10,
+ [1][1][2][0][RTW89_ETSI][84] = 127,
+ [1][1][2][0][RTW89_KCC][84] = 32,
+ [1][1][2][0][RTW89_FCC][88] = 10,
+ [1][1][2][0][RTW89_ETSI][88] = 127,
+ [1][1][2][0][RTW89_KCC][88] = 32,
+ [1][1][2][0][RTW89_FCC][91] = 12,
+ [1][1][2][0][RTW89_ETSI][91] = 127,
+ [1][1][2][0][RTW89_KCC][91] = 32,
+ [1][1][2][0][RTW89_FCC][95] = 10,
+ [1][1][2][0][RTW89_ETSI][95] = 127,
+ [1][1][2][0][RTW89_KCC][95] = 32,
+ [1][1][2][0][RTW89_FCC][99] = 10,
+ [1][1][2][0][RTW89_ETSI][99] = 127,
+ [1][1][2][0][RTW89_KCC][99] = 32,
+ [1][1][2][0][RTW89_FCC][103] = 10,
+ [1][1][2][0][RTW89_ETSI][103] = 127,
+ [1][1][2][0][RTW89_KCC][103] = 32,
+ [1][1][2][0][RTW89_FCC][106] = 12,
+ [1][1][2][0][RTW89_ETSI][106] = 127,
+ [1][1][2][0][RTW89_KCC][106] = 32,
+ [1][1][2][0][RTW89_FCC][110] = 127,
+ [1][1][2][0][RTW89_ETSI][110] = 127,
+ [1][1][2][0][RTW89_KCC][110] = 127,
+ [1][1][2][0][RTW89_FCC][114] = 127,
+ [1][1][2][0][RTW89_ETSI][114] = 127,
+ [1][1][2][0][RTW89_KCC][114] = 127,
+ [1][1][2][0][RTW89_FCC][118] = 127,
+ [1][1][2][0][RTW89_ETSI][118] = 127,
+ [1][1][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 10,
+ [1][1][2][1][RTW89_ETSI][1] = 42,
+ [1][1][2][1][RTW89_KCC][1] = 28,
+ [1][1][2][1][RTW89_FCC][5] = 10,
+ [1][1][2][1][RTW89_ETSI][5] = 42,
+ [1][1][2][1][RTW89_KCC][5] = 28,
+ [1][1][2][1][RTW89_FCC][9] = 10,
+ [1][1][2][1][RTW89_ETSI][9] = 42,
+ [1][1][2][1][RTW89_KCC][9] = 28,
+ [1][1][2][1][RTW89_FCC][13] = 10,
+ [1][1][2][1][RTW89_ETSI][13] = 42,
+ [1][1][2][1][RTW89_KCC][13] = 28,
+ [1][1][2][1][RTW89_FCC][16] = 10,
+ [1][1][2][1][RTW89_ETSI][16] = 42,
+ [1][1][2][1][RTW89_KCC][16] = 28,
+ [1][1][2][1][RTW89_FCC][20] = 10,
+ [1][1][2][1][RTW89_ETSI][20] = 42,
+ [1][1][2][1][RTW89_KCC][20] = 28,
+ [1][1][2][1][RTW89_FCC][24] = 10,
+ [1][1][2][1][RTW89_ETSI][24] = 42,
+ [1][1][2][1][RTW89_KCC][24] = 28,
+ [1][1][2][1][RTW89_FCC][28] = 10,
+ [1][1][2][1][RTW89_ETSI][28] = 42,
+ [1][1][2][1][RTW89_KCC][28] = 28,
+ [1][1][2][1][RTW89_FCC][31] = 10,
+ [1][1][2][1][RTW89_ETSI][31] = 42,
+ [1][1][2][1][RTW89_KCC][31] = 28,
+ [1][1][2][1][RTW89_FCC][35] = 10,
+ [1][1][2][1][RTW89_ETSI][35] = 42,
+ [1][1][2][1][RTW89_KCC][35] = 28,
+ [1][1][2][1][RTW89_FCC][39] = 10,
+ [1][1][2][1][RTW89_ETSI][39] = 42,
+ [1][1][2][1][RTW89_KCC][39] = 28,
+ [1][1][2][1][RTW89_FCC][43] = 10,
+ [1][1][2][1][RTW89_ETSI][43] = 42,
+ [1][1][2][1][RTW89_KCC][43] = 28,
+ [1][1][2][1][RTW89_FCC][46] = 12,
+ [1][1][2][1][RTW89_ETSI][46] = 127,
+ [1][1][2][1][RTW89_KCC][46] = 28,
+ [1][1][2][1][RTW89_FCC][50] = 12,
+ [1][1][2][1][RTW89_ETSI][50] = 127,
+ [1][1][2][1][RTW89_KCC][50] = 28,
+ [1][1][2][1][RTW89_FCC][54] = 10,
+ [1][1][2][1][RTW89_ETSI][54] = 127,
+ [1][1][2][1][RTW89_KCC][54] = 28,
+ [1][1][2][1][RTW89_FCC][58] = 10,
+ [1][1][2][1][RTW89_ETSI][58] = 127,
+ [1][1][2][1][RTW89_KCC][58] = 28,
+ [1][1][2][1][RTW89_FCC][61] = 10,
+ [1][1][2][1][RTW89_ETSI][61] = 127,
+ [1][1][2][1][RTW89_KCC][61] = 28,
+ [1][1][2][1][RTW89_FCC][65] = 10,
+ [1][1][2][1][RTW89_ETSI][65] = 127,
+ [1][1][2][1][RTW89_KCC][65] = 28,
+ [1][1][2][1][RTW89_FCC][69] = 10,
+ [1][1][2][1][RTW89_ETSI][69] = 127,
+ [1][1][2][1][RTW89_KCC][69] = 28,
+ [1][1][2][1][RTW89_FCC][73] = 10,
+ [1][1][2][1][RTW89_ETSI][73] = 127,
+ [1][1][2][1][RTW89_KCC][73] = 28,
+ [1][1][2][1][RTW89_FCC][76] = 10,
+ [1][1][2][1][RTW89_ETSI][76] = 127,
+ [1][1][2][1][RTW89_KCC][76] = 28,
+ [1][1][2][1][RTW89_FCC][80] = 10,
+ [1][1][2][1][RTW89_ETSI][80] = 127,
+ [1][1][2][1][RTW89_KCC][80] = 32,
+ [1][1][2][1][RTW89_FCC][84] = 10,
+ [1][1][2][1][RTW89_ETSI][84] = 127,
+ [1][1][2][1][RTW89_KCC][84] = 32,
+ [1][1][2][1][RTW89_FCC][88] = 10,
+ [1][1][2][1][RTW89_ETSI][88] = 127,
+ [1][1][2][1][RTW89_KCC][88] = 32,
+ [1][1][2][1][RTW89_FCC][91] = 12,
+ [1][1][2][1][RTW89_ETSI][91] = 127,
+ [1][1][2][1][RTW89_KCC][91] = 32,
+ [1][1][2][1][RTW89_FCC][95] = 10,
+ [1][1][2][1][RTW89_ETSI][95] = 127,
+ [1][1][2][1][RTW89_KCC][95] = 32,
+ [1][1][2][1][RTW89_FCC][99] = 10,
+ [1][1][2][1][RTW89_ETSI][99] = 127,
+ [1][1][2][1][RTW89_KCC][99] = 32,
+ [1][1][2][1][RTW89_FCC][103] = 10,
+ [1][1][2][1][RTW89_ETSI][103] = 127,
+ [1][1][2][1][RTW89_KCC][103] = 32,
+ [1][1][2][1][RTW89_FCC][106] = 12,
+ [1][1][2][1][RTW89_ETSI][106] = 127,
+ [1][1][2][1][RTW89_KCC][106] = 32,
+ [1][1][2][1][RTW89_FCC][110] = 127,
+ [1][1][2][1][RTW89_ETSI][110] = 127,
+ [1][1][2][1][RTW89_KCC][110] = 127,
+ [1][1][2][1][RTW89_FCC][114] = 127,
+ [1][1][2][1][RTW89_ETSI][114] = 127,
+ [1][1][2][1][RTW89_KCC][114] = 127,
+ [1][1][2][1][RTW89_FCC][118] = 127,
+ [1][1][2][1][RTW89_ETSI][118] = 127,
+ [1][1][2][1][RTW89_KCC][118] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 46,
+ [2][0][2][0][RTW89_ETSI][3] = 48,
+ [2][0][2][0][RTW89_KCC][3] = 50,
+ [2][0][2][0][RTW89_FCC][11] = 46,
+ [2][0][2][0][RTW89_ETSI][11] = 48,
+ [2][0][2][0][RTW89_KCC][11] = 50,
+ [2][0][2][0][RTW89_FCC][18] = 46,
+ [2][0][2][0][RTW89_ETSI][18] = 48,
+ [2][0][2][0][RTW89_KCC][18] = 50,
+ [2][0][2][0][RTW89_FCC][26] = 46,
+ [2][0][2][0][RTW89_ETSI][26] = 48,
+ [2][0][2][0][RTW89_KCC][26] = 50,
+ [2][0][2][0][RTW89_FCC][33] = 46,
+ [2][0][2][0][RTW89_ETSI][33] = 48,
+ [2][0][2][0][RTW89_KCC][33] = 50,
+ [2][0][2][0][RTW89_FCC][41] = 46,
+ [2][0][2][0][RTW89_ETSI][41] = 48,
+ [2][0][2][0][RTW89_KCC][41] = 50,
+ [2][0][2][0][RTW89_FCC][48] = 46,
+ [2][0][2][0][RTW89_ETSI][48] = 127,
+ [2][0][2][0][RTW89_KCC][48] = 48,
+ [2][0][2][0][RTW89_FCC][56] = 46,
+ [2][0][2][0][RTW89_ETSI][56] = 127,
+ [2][0][2][0][RTW89_KCC][56] = 48,
+ [2][0][2][0][RTW89_FCC][63] = 46,
+ [2][0][2][0][RTW89_ETSI][63] = 127,
+ [2][0][2][0][RTW89_KCC][63] = 48,
+ [2][0][2][0][RTW89_FCC][71] = 46,
+ [2][0][2][0][RTW89_ETSI][71] = 127,
+ [2][0][2][0][RTW89_KCC][71] = 48,
+ [2][0][2][0][RTW89_FCC][78] = 46,
+ [2][0][2][0][RTW89_ETSI][78] = 127,
+ [2][0][2][0][RTW89_KCC][78] = 52,
+ [2][0][2][0][RTW89_FCC][86] = 46,
+ [2][0][2][0][RTW89_ETSI][86] = 127,
+ [2][0][2][0][RTW89_KCC][86] = 52,
+ [2][0][2][0][RTW89_FCC][93] = 46,
+ [2][0][2][0][RTW89_ETSI][93] = 127,
+ [2][0][2][0][RTW89_KCC][93] = 50,
+ [2][0][2][0][RTW89_FCC][101] = 44,
+ [2][0][2][0][RTW89_ETSI][101] = 127,
+ [2][0][2][0][RTW89_KCC][101] = 50,
+ [2][0][2][0][RTW89_FCC][108] = 127,
+ [2][0][2][0][RTW89_ETSI][108] = 127,
+ [2][0][2][0][RTW89_KCC][108] = 127,
+ [2][0][2][0][RTW89_FCC][116] = 127,
+ [2][0][2][0][RTW89_ETSI][116] = 127,
+ [2][0][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 22,
+ [2][1][2][0][RTW89_ETSI][3] = 48,
+ [2][1][2][0][RTW89_KCC][3] = 38,
+ [2][1][2][0][RTW89_FCC][11] = 20,
+ [2][1][2][0][RTW89_ETSI][11] = 48,
+ [2][1][2][0][RTW89_KCC][11] = 38,
+ [2][1][2][0][RTW89_FCC][18] = 20,
+ [2][1][2][0][RTW89_ETSI][18] = 48,
+ [2][1][2][0][RTW89_KCC][18] = 38,
+ [2][1][2][0][RTW89_FCC][26] = 20,
+ [2][1][2][0][RTW89_ETSI][26] = 48,
+ [2][1][2][0][RTW89_KCC][26] = 38,
+ [2][1][2][0][RTW89_FCC][33] = 20,
+ [2][1][2][0][RTW89_ETSI][33] = 48,
+ [2][1][2][0][RTW89_KCC][33] = 38,
+ [2][1][2][0][RTW89_FCC][41] = 22,
+ [2][1][2][0][RTW89_ETSI][41] = 48,
+ [2][1][2][0][RTW89_KCC][41] = 38,
+ [2][1][2][0][RTW89_FCC][48] = 22,
+ [2][1][2][0][RTW89_ETSI][48] = 127,
+ [2][1][2][0][RTW89_KCC][48] = 38,
+ [2][1][2][0][RTW89_FCC][56] = 20,
+ [2][1][2][0][RTW89_ETSI][56] = 127,
+ [2][1][2][0][RTW89_KCC][56] = 38,
+ [2][1][2][0][RTW89_FCC][63] = 22,
+ [2][1][2][0][RTW89_ETSI][63] = 127,
+ [2][1][2][0][RTW89_KCC][63] = 38,
+ [2][1][2][0][RTW89_FCC][71] = 20,
+ [2][1][2][0][RTW89_ETSI][71] = 127,
+ [2][1][2][0][RTW89_KCC][71] = 38,
+ [2][1][2][0][RTW89_FCC][78] = 20,
+ [2][1][2][0][RTW89_ETSI][78] = 127,
+ [2][1][2][0][RTW89_KCC][78] = 38,
+ [2][1][2][0][RTW89_FCC][86] = 20,
+ [2][1][2][0][RTW89_ETSI][86] = 127,
+ [2][1][2][0][RTW89_KCC][86] = 38,
+ [2][1][2][0][RTW89_FCC][93] = 22,
+ [2][1][2][0][RTW89_ETSI][93] = 127,
+ [2][1][2][0][RTW89_KCC][93] = 38,
+ [2][1][2][0][RTW89_FCC][101] = 22,
+ [2][1][2][0][RTW89_ETSI][101] = 127,
+ [2][1][2][0][RTW89_KCC][101] = 38,
+ [2][1][2][0][RTW89_FCC][108] = 127,
+ [2][1][2][0][RTW89_ETSI][108] = 127,
+ [2][1][2][0][RTW89_KCC][108] = 127,
+ [2][1][2][0][RTW89_FCC][116] = 127,
+ [2][1][2][0][RTW89_ETSI][116] = 127,
+ [2][1][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 22,
+ [2][1][2][1][RTW89_ETSI][3] = 42,
+ [2][1][2][1][RTW89_KCC][3] = 38,
+ [2][1][2][1][RTW89_FCC][11] = 20,
+ [2][1][2][1][RTW89_ETSI][11] = 42,
+ [2][1][2][1][RTW89_KCC][11] = 38,
+ [2][1][2][1][RTW89_FCC][18] = 20,
+ [2][1][2][1][RTW89_ETSI][18] = 42,
+ [2][1][2][1][RTW89_KCC][18] = 38,
+ [2][1][2][1][RTW89_FCC][26] = 20,
+ [2][1][2][1][RTW89_ETSI][26] = 42,
+ [2][1][2][1][RTW89_KCC][26] = 38,
+ [2][1][2][1][RTW89_FCC][33] = 20,
+ [2][1][2][1][RTW89_ETSI][33] = 42,
+ [2][1][2][1][RTW89_KCC][33] = 38,
+ [2][1][2][1][RTW89_FCC][41] = 22,
+ [2][1][2][1][RTW89_ETSI][41] = 42,
+ [2][1][2][1][RTW89_KCC][41] = 38,
+ [2][1][2][1][RTW89_FCC][48] = 22,
+ [2][1][2][1][RTW89_ETSI][48] = 127,
+ [2][1][2][1][RTW89_KCC][48] = 38,
+ [2][1][2][1][RTW89_FCC][56] = 20,
+ [2][1][2][1][RTW89_ETSI][56] = 127,
+ [2][1][2][1][RTW89_KCC][56] = 38,
+ [2][1][2][1][RTW89_FCC][63] = 22,
+ [2][1][2][1][RTW89_ETSI][63] = 127,
+ [2][1][2][1][RTW89_KCC][63] = 38,
+ [2][1][2][1][RTW89_FCC][71] = 20,
+ [2][1][2][1][RTW89_ETSI][71] = 127,
+ [2][1][2][1][RTW89_KCC][71] = 38,
+ [2][1][2][1][RTW89_FCC][78] = 20,
+ [2][1][2][1][RTW89_ETSI][78] = 127,
+ [2][1][2][1][RTW89_KCC][78] = 38,
+ [2][1][2][1][RTW89_FCC][86] = 20,
+ [2][1][2][1][RTW89_ETSI][86] = 127,
+ [2][1][2][1][RTW89_KCC][86] = 38,
+ [2][1][2][1][RTW89_FCC][93] = 22,
+ [2][1][2][1][RTW89_ETSI][93] = 127,
+ [2][1][2][1][RTW89_KCC][93] = 38,
+ [2][1][2][1][RTW89_FCC][101] = 22,
+ [2][1][2][1][RTW89_ETSI][101] = 127,
+ [2][1][2][1][RTW89_KCC][101] = 38,
+ [2][1][2][1][RTW89_FCC][108] = 127,
+ [2][1][2][1][RTW89_ETSI][108] = 127,
+ [2][1][2][1][RTW89_KCC][108] = 127,
+ [2][1][2][1][RTW89_FCC][116] = 127,
+ [2][1][2][1][RTW89_ETSI][116] = 127,
+ [2][1][2][1][RTW89_KCC][116] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 52,
+ [3][0][2][0][RTW89_ETSI][7] = 38,
+ [3][0][2][0][RTW89_KCC][7] = 42,
+ [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_ETSI][22] = 38,
+ [3][0][2][0][RTW89_KCC][22] = 42,
+ [3][0][2][0][RTW89_FCC][37] = 52,
+ [3][0][2][0][RTW89_ETSI][37] = 38,
+ [3][0][2][0][RTW89_KCC][37] = 42,
+ [3][0][2][0][RTW89_FCC][52] = 54,
+ [3][0][2][0][RTW89_ETSI][52] = 127,
+ [3][0][2][0][RTW89_KCC][52] = 56,
+ [3][0][2][0][RTW89_FCC][67] = 54,
+ [3][0][2][0][RTW89_ETSI][67] = 127,
+ [3][0][2][0][RTW89_KCC][67] = 54,
+ [3][0][2][0][RTW89_FCC][82] = 54,
+ [3][0][2][0][RTW89_ETSI][82] = 127,
+ [3][0][2][0][RTW89_KCC][82] = 26,
+ [3][0][2][0][RTW89_FCC][97] = 40,
+ [3][0][2][0][RTW89_ETSI][97] = 127,
+ [3][0][2][0][RTW89_KCC][97] = 26,
+ [3][0][2][0][RTW89_FCC][112] = 127,
+ [3][0][2][0][RTW89_ETSI][112] = 127,
+ [3][0][2][0][RTW89_KCC][112] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
+ [3][1][2][0][RTW89_ETSI][7] = 38,
+ [3][1][2][0][RTW89_KCC][7] = 40,
+ [3][1][2][0][RTW89_FCC][22] = 30,
+ [3][1][2][0][RTW89_ETSI][22] = 38,
+ [3][1][2][0][RTW89_KCC][22] = 40,
+ [3][1][2][0][RTW89_FCC][37] = 30,
+ [3][1][2][0][RTW89_ETSI][37] = 38,
+ [3][1][2][0][RTW89_KCC][37] = 40,
+ [3][1][2][0][RTW89_FCC][52] = 30,
+ [3][1][2][0][RTW89_ETSI][52] = 127,
+ [3][1][2][0][RTW89_KCC][52] = 48,
+ [3][1][2][0][RTW89_FCC][67] = 32,
+ [3][1][2][0][RTW89_ETSI][67] = 127,
+ [3][1][2][0][RTW89_KCC][67] = 48,
+ [3][1][2][0][RTW89_FCC][82] = 32,
+ [3][1][2][0][RTW89_ETSI][82] = 127,
+ [3][1][2][0][RTW89_KCC][82] = 24,
+ [3][1][2][0][RTW89_FCC][97] = 14,
+ [3][1][2][0][RTW89_ETSI][97] = 127,
+ [3][1][2][0][RTW89_KCC][97] = 24,
+ [3][1][2][0][RTW89_FCC][112] = 127,
+ [3][1][2][0][RTW89_ETSI][112] = 127,
+ [3][1][2][0][RTW89_KCC][112] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
+ [3][1][2][1][RTW89_ETSI][7] = 38,
+ [3][1][2][1][RTW89_KCC][7] = 40,
+ [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_ETSI][22] = 38,
+ [3][1][2][1][RTW89_KCC][22] = 40,
+ [3][1][2][1][RTW89_FCC][37] = 30,
+ [3][1][2][1][RTW89_ETSI][37] = 38,
+ [3][1][2][1][RTW89_KCC][37] = 40,
+ [3][1][2][1][RTW89_FCC][52] = 30,
+ [3][1][2][1][RTW89_ETSI][52] = 127,
+ [3][1][2][1][RTW89_KCC][52] = 48,
+ [3][1][2][1][RTW89_FCC][67] = 32,
+ [3][1][2][1][RTW89_ETSI][67] = 127,
+ [3][1][2][1][RTW89_KCC][67] = 48,
+ [3][1][2][1][RTW89_FCC][82] = 32,
+ [3][1][2][1][RTW89_ETSI][82] = 127,
+ [3][1][2][1][RTW89_KCC][82] = 24,
+ [3][1][2][1][RTW89_FCC][97] = 14,
+ [3][1][2][1][RTW89_ETSI][97] = 127,
+ [3][1][2][1][RTW89_KCC][97] = 24,
+ [3][1][2][1][RTW89_FCC][112] = 127,
+ [3][1][2][1][RTW89_ETSI][112] = 127,
+ [3][1][2][1][RTW89_KCC][112] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 32,
+ [0][0][RTW89_WW][1] = 32,
+ [0][0][RTW89_WW][2] = 32,
+ [0][0][RTW89_WW][3] = 32,
+ [0][0][RTW89_WW][4] = 32,
+ [0][0][RTW89_WW][5] = 32,
+ [0][0][RTW89_WW][6] = 32,
+ [0][0][RTW89_WW][7] = 32,
+ [0][0][RTW89_WW][8] = 32,
+ [0][0][RTW89_WW][9] = 32,
+ [0][0][RTW89_WW][10] = 32,
+ [0][0][RTW89_WW][11] = 26,
+ [0][0][RTW89_WW][12] = -20,
+ [0][0][RTW89_WW][13] = 0,
+ [0][1][RTW89_WW][0] = 20,
+ [0][1][RTW89_WW][1] = 22,
+ [0][1][RTW89_WW][2] = 22,
+ [0][1][RTW89_WW][3] = 22,
+ [0][1][RTW89_WW][4] = 22,
+ [0][1][RTW89_WW][5] = 22,
+ [0][1][RTW89_WW][6] = 22,
+ [0][1][RTW89_WW][7] = 22,
+ [0][1][RTW89_WW][8] = 22,
+ [0][1][RTW89_WW][9] = 22,
+ [0][1][RTW89_WW][10] = 22,
+ [0][1][RTW89_WW][11] = 22,
+ [0][1][RTW89_WW][12] = -30,
+ [0][1][RTW89_WW][13] = 0,
+ [1][0][RTW89_WW][0] = 42,
+ [1][0][RTW89_WW][1] = 44,
+ [1][0][RTW89_WW][2] = 44,
+ [1][0][RTW89_WW][3] = 44,
+ [1][0][RTW89_WW][4] = 44,
+ [1][0][RTW89_WW][5] = 44,
+ [1][0][RTW89_WW][6] = 44,
+ [1][0][RTW89_WW][7] = 44,
+ [1][0][RTW89_WW][8] = 44,
+ [1][0][RTW89_WW][9] = 44,
+ [1][0][RTW89_WW][10] = 44,
+ [1][0][RTW89_WW][11] = 36,
+ [1][0][RTW89_WW][12] = 4,
+ [1][0][RTW89_WW][13] = 0,
+ [1][1][RTW89_WW][0] = 32,
+ [1][1][RTW89_WW][1] = 32,
+ [1][1][RTW89_WW][2] = 32,
+ [1][1][RTW89_WW][3] = 32,
+ [1][1][RTW89_WW][4] = 32,
+ [1][1][RTW89_WW][5] = 32,
+ [1][1][RTW89_WW][6] = 32,
+ [1][1][RTW89_WW][7] = 32,
+ [1][1][RTW89_WW][8] = 32,
+ [1][1][RTW89_WW][9] = 32,
+ [1][1][RTW89_WW][10] = 32,
+ [1][1][RTW89_WW][11] = 30,
+ [1][1][RTW89_WW][12] = -6,
+ [1][1][RTW89_WW][13] = 0,
+ [2][0][RTW89_WW][0] = 56,
+ [2][0][RTW89_WW][1] = 56,
+ [2][0][RTW89_WW][2] = 56,
+ [2][0][RTW89_WW][3] = 56,
+ [2][0][RTW89_WW][4] = 56,
+ [2][0][RTW89_WW][5] = 56,
+ [2][0][RTW89_WW][6] = 56,
+ [2][0][RTW89_WW][7] = 56,
+ [2][0][RTW89_WW][8] = 56,
+ [2][0][RTW89_WW][9] = 56,
+ [2][0][RTW89_WW][10] = 56,
+ [2][0][RTW89_WW][11] = 48,
+ [2][0][RTW89_WW][12] = 16,
+ [2][0][RTW89_WW][13] = 0,
+ [2][1][RTW89_WW][0] = 44,
+ [2][1][RTW89_WW][1] = 44,
+ [2][1][RTW89_WW][2] = 44,
+ [2][1][RTW89_WW][3] = 44,
+ [2][1][RTW89_WW][4] = 44,
+ [2][1][RTW89_WW][5] = 44,
+ [2][1][RTW89_WW][6] = 44,
+ [2][1][RTW89_WW][7] = 44,
+ [2][1][RTW89_WW][8] = 44,
+ [2][1][RTW89_WW][9] = 44,
+ [2][1][RTW89_WW][10] = 44,
+ [2][1][RTW89_WW][11] = 44,
+ [2][1][RTW89_WW][12] = 6,
+ [2][1][RTW89_WW][13] = 0,
+ [0][0][RTW89_FCC][0] = 60,
+ [0][0][RTW89_ETSI][0] = 34,
+ [0][0][RTW89_MKK][0] = 36,
+ [0][0][RTW89_IC][0] = 60,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 34,
+ [0][0][RTW89_CN][0] = 32,
+ [0][0][RTW89_UK][0] = 34,
+ [0][0][RTW89_FCC][1] = 60,
+ [0][0][RTW89_ETSI][1] = 38,
+ [0][0][RTW89_MKK][1] = 40,
+ [0][0][RTW89_IC][1] = 60,
+ [0][0][RTW89_KCC][1] = 42,
+ [0][0][RTW89_ACMA][1] = 38,
+ [0][0][RTW89_CN][1] = 32,
+ [0][0][RTW89_UK][1] = 38,
+ [0][0][RTW89_FCC][2] = 64,
+ [0][0][RTW89_ETSI][2] = 38,
+ [0][0][RTW89_MKK][2] = 40,
+ [0][0][RTW89_IC][2] = 64,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 38,
+ [0][0][RTW89_CN][2] = 32,
+ [0][0][RTW89_UK][2] = 38,
+ [0][0][RTW89_FCC][3] = 68,
+ [0][0][RTW89_ETSI][3] = 38,
+ [0][0][RTW89_MKK][3] = 40,
+ [0][0][RTW89_IC][3] = 68,
+ [0][0][RTW89_KCC][3] = 42,
+ [0][0][RTW89_ACMA][3] = 38,
+ [0][0][RTW89_CN][3] = 32,
+ [0][0][RTW89_UK][3] = 38,
+ [0][0][RTW89_FCC][4] = 68,
+ [0][0][RTW89_ETSI][4] = 38,
+ [0][0][RTW89_MKK][4] = 40,
+ [0][0][RTW89_IC][4] = 68,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 38,
+ [0][0][RTW89_CN][4] = 32,
+ [0][0][RTW89_UK][4] = 38,
+ [0][0][RTW89_FCC][5] = 78,
+ [0][0][RTW89_ETSI][5] = 38,
+ [0][0][RTW89_MKK][5] = 40,
+ [0][0][RTW89_IC][5] = 78,
+ [0][0][RTW89_KCC][5] = 42,
+ [0][0][RTW89_ACMA][5] = 38,
+ [0][0][RTW89_CN][5] = 32,
+ [0][0][RTW89_UK][5] = 38,
+ [0][0][RTW89_FCC][6] = 54,
+ [0][0][RTW89_ETSI][6] = 38,
+ [0][0][RTW89_MKK][6] = 40,
+ [0][0][RTW89_IC][6] = 54,
+ [0][0][RTW89_KCC][6] = 42,
+ [0][0][RTW89_ACMA][6] = 38,
+ [0][0][RTW89_CN][6] = 32,
+ [0][0][RTW89_UK][6] = 38,
+ [0][0][RTW89_FCC][7] = 54,
+ [0][0][RTW89_ETSI][7] = 38,
+ [0][0][RTW89_MKK][7] = 40,
+ [0][0][RTW89_IC][7] = 54,
+ [0][0][RTW89_KCC][7] = 42,
+ [0][0][RTW89_ACMA][7] = 38,
+ [0][0][RTW89_CN][7] = 32,
+ [0][0][RTW89_UK][7] = 38,
+ [0][0][RTW89_FCC][8] = 50,
+ [0][0][RTW89_ETSI][8] = 38,
+ [0][0][RTW89_MKK][8] = 40,
+ [0][0][RTW89_IC][8] = 50,
+ [0][0][RTW89_KCC][8] = 42,
+ [0][0][RTW89_ACMA][8] = 38,
+ [0][0][RTW89_CN][8] = 32,
+ [0][0][RTW89_UK][8] = 38,
+ [0][0][RTW89_FCC][9] = 46,
+ [0][0][RTW89_ETSI][9] = 38,
+ [0][0][RTW89_MKK][9] = 40,
+ [0][0][RTW89_IC][9] = 46,
+ [0][0][RTW89_KCC][9] = 40,
+ [0][0][RTW89_ACMA][9] = 38,
+ [0][0][RTW89_CN][9] = 32,
+ [0][0][RTW89_UK][9] = 38,
+ [0][0][RTW89_FCC][10] = 46,
+ [0][0][RTW89_ETSI][10] = 38,
+ [0][0][RTW89_MKK][10] = 40,
+ [0][0][RTW89_IC][10] = 46,
+ [0][0][RTW89_KCC][10] = 40,
+ [0][0][RTW89_ACMA][10] = 38,
+ [0][0][RTW89_CN][10] = 32,
+ [0][0][RTW89_UK][10] = 38,
+ [0][0][RTW89_FCC][11] = 26,
+ [0][0][RTW89_ETSI][11] = 38,
+ [0][0][RTW89_MKK][11] = 40,
+ [0][0][RTW89_IC][11] = 26,
+ [0][0][RTW89_KCC][11] = 40,
+ [0][0][RTW89_ACMA][11] = 38,
+ [0][0][RTW89_CN][11] = 32,
+ [0][0][RTW89_UK][11] = 38,
+ [0][0][RTW89_FCC][12] = -20,
+ [0][0][RTW89_ETSI][12] = 34,
+ [0][0][RTW89_MKK][12] = 36,
+ [0][0][RTW89_IC][12] = -20,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 34,
+ [0][0][RTW89_CN][12] = 32,
+ [0][0][RTW89_UK][12] = 34,
+ [0][0][RTW89_FCC][13] = 127,
+ [0][0][RTW89_ETSI][13] = 127,
+ [0][0][RTW89_MKK][13] = 127,
+ [0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_KCC][13] = 127,
+ [0][0][RTW89_ACMA][13] = 127,
+ [0][0][RTW89_CN][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
+ [0][1][RTW89_FCC][0] = 56,
+ [0][1][RTW89_ETSI][0] = 22,
+ [0][1][RTW89_MKK][0] = 24,
+ [0][1][RTW89_IC][0] = 56,
+ [0][1][RTW89_KCC][0] = 30,
+ [0][1][RTW89_ACMA][0] = 22,
+ [0][1][RTW89_CN][0] = 20,
+ [0][1][RTW89_UK][0] = 22,
+ [0][1][RTW89_FCC][1] = 56,
+ [0][1][RTW89_ETSI][1] = 24,
+ [0][1][RTW89_MKK][1] = 30,
+ [0][1][RTW89_IC][1] = 56,
+ [0][1][RTW89_KCC][1] = 30,
+ [0][1][RTW89_ACMA][1] = 24,
+ [0][1][RTW89_CN][1] = 22,
+ [0][1][RTW89_UK][1] = 24,
+ [0][1][RTW89_FCC][2] = 60,
+ [0][1][RTW89_ETSI][2] = 24,
+ [0][1][RTW89_MKK][2] = 30,
+ [0][1][RTW89_IC][2] = 60,
+ [0][1][RTW89_KCC][2] = 30,
+ [0][1][RTW89_ACMA][2] = 24,
+ [0][1][RTW89_CN][2] = 22,
+ [0][1][RTW89_UK][2] = 24,
+ [0][1][RTW89_FCC][3] = 64,
+ [0][1][RTW89_ETSI][3] = 24,
+ [0][1][RTW89_MKK][3] = 30,
+ [0][1][RTW89_IC][3] = 64,
+ [0][1][RTW89_KCC][3] = 30,
+ [0][1][RTW89_ACMA][3] = 24,
+ [0][1][RTW89_CN][3] = 22,
+ [0][1][RTW89_UK][3] = 24,
+ [0][1][RTW89_FCC][4] = 68,
+ [0][1][RTW89_ETSI][4] = 24,
+ [0][1][RTW89_MKK][4] = 30,
+ [0][1][RTW89_IC][4] = 68,
+ [0][1][RTW89_KCC][4] = 28,
+ [0][1][RTW89_ACMA][4] = 24,
+ [0][1][RTW89_CN][4] = 22,
+ [0][1][RTW89_UK][4] = 24,
+ [0][1][RTW89_FCC][5] = 76,
+ [0][1][RTW89_ETSI][5] = 24,
+ [0][1][RTW89_MKK][5] = 30,
+ [0][1][RTW89_IC][5] = 76,
+ [0][1][RTW89_KCC][5] = 28,
+ [0][1][RTW89_ACMA][5] = 24,
+ [0][1][RTW89_CN][5] = 22,
+ [0][1][RTW89_UK][5] = 24,
+ [0][1][RTW89_FCC][6] = 54,
+ [0][1][RTW89_ETSI][6] = 24,
+ [0][1][RTW89_MKK][6] = 30,
+ [0][1][RTW89_IC][6] = 54,
+ [0][1][RTW89_KCC][6] = 28,
+ [0][1][RTW89_ACMA][6] = 24,
+ [0][1][RTW89_CN][6] = 22,
+ [0][1][RTW89_UK][6] = 24,
+ [0][1][RTW89_FCC][7] = 50,
+ [0][1][RTW89_ETSI][7] = 24,
+ [0][1][RTW89_MKK][7] = 30,
+ [0][1][RTW89_IC][7] = 50,
+ [0][1][RTW89_KCC][7] = 28,
+ [0][1][RTW89_ACMA][7] = 24,
+ [0][1][RTW89_CN][7] = 22,
+ [0][1][RTW89_UK][7] = 24,
+ [0][1][RTW89_FCC][8] = 46,
+ [0][1][RTW89_ETSI][8] = 24,
+ [0][1][RTW89_MKK][8] = 30,
+ [0][1][RTW89_IC][8] = 46,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 24,
+ [0][1][RTW89_CN][8] = 22,
+ [0][1][RTW89_UK][8] = 24,
+ [0][1][RTW89_FCC][9] = 42,
+ [0][1][RTW89_ETSI][9] = 24,
+ [0][1][RTW89_MKK][9] = 30,
+ [0][1][RTW89_IC][9] = 42,
+ [0][1][RTW89_KCC][9] = 28,
+ [0][1][RTW89_ACMA][9] = 24,
+ [0][1][RTW89_CN][9] = 22,
+ [0][1][RTW89_UK][9] = 24,
+ [0][1][RTW89_FCC][10] = 42,
+ [0][1][RTW89_ETSI][10] = 24,
+ [0][1][RTW89_MKK][10] = 30,
+ [0][1][RTW89_IC][10] = 42,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 24,
+ [0][1][RTW89_CN][10] = 22,
+ [0][1][RTW89_UK][10] = 24,
+ [0][1][RTW89_FCC][11] = 22,
+ [0][1][RTW89_ETSI][11] = 24,
+ [0][1][RTW89_MKK][11] = 30,
+ [0][1][RTW89_IC][11] = 22,
+ [0][1][RTW89_KCC][11] = 28,
+ [0][1][RTW89_ACMA][11] = 24,
+ [0][1][RTW89_CN][11] = 22,
+ [0][1][RTW89_UK][11] = 24,
+ [0][1][RTW89_FCC][12] = -30,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_MKK][12] = 24,
+ [0][1][RTW89_IC][12] = -30,
+ [0][1][RTW89_KCC][12] = 28,
+ [0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_CN][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
+ [0][1][RTW89_FCC][13] = 127,
+ [0][1][RTW89_ETSI][13] = 127,
+ [0][1][RTW89_MKK][13] = 127,
+ [0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_KCC][13] = 127,
+ [0][1][RTW89_ACMA][13] = 127,
+ [0][1][RTW89_CN][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
+ [1][0][RTW89_FCC][0] = 66,
+ [1][0][RTW89_ETSI][0] = 46,
+ [1][0][RTW89_MKK][0] = 48,
+ [1][0][RTW89_IC][0] = 66,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 46,
+ [1][0][RTW89_CN][0] = 42,
+ [1][0][RTW89_UK][0] = 46,
+ [1][0][RTW89_FCC][1] = 66,
+ [1][0][RTW89_ETSI][1] = 46,
+ [1][0][RTW89_MKK][1] = 48,
+ [1][0][RTW89_IC][1] = 66,
+ [1][0][RTW89_KCC][1] = 50,
+ [1][0][RTW89_ACMA][1] = 46,
+ [1][0][RTW89_CN][1] = 44,
+ [1][0][RTW89_UK][1] = 46,
+ [1][0][RTW89_FCC][2] = 70,
+ [1][0][RTW89_ETSI][2] = 46,
+ [1][0][RTW89_MKK][2] = 48,
+ [1][0][RTW89_IC][2] = 70,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 46,
+ [1][0][RTW89_CN][2] = 44,
+ [1][0][RTW89_UK][2] = 46,
+ [1][0][RTW89_FCC][3] = 72,
+ [1][0][RTW89_ETSI][3] = 46,
+ [1][0][RTW89_MKK][3] = 48,
+ [1][0][RTW89_IC][3] = 72,
+ [1][0][RTW89_KCC][3] = 50,
+ [1][0][RTW89_ACMA][3] = 46,
+ [1][0][RTW89_CN][3] = 44,
+ [1][0][RTW89_UK][3] = 46,
+ [1][0][RTW89_FCC][4] = 72,
+ [1][0][RTW89_ETSI][4] = 46,
+ [1][0][RTW89_MKK][4] = 48,
+ [1][0][RTW89_IC][4] = 72,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 46,
+ [1][0][RTW89_CN][4] = 44,
+ [1][0][RTW89_UK][4] = 46,
+ [1][0][RTW89_FCC][5] = 82,
+ [1][0][RTW89_ETSI][5] = 46,
+ [1][0][RTW89_MKK][5] = 48,
+ [1][0][RTW89_IC][5] = 82,
+ [1][0][RTW89_KCC][5] = 50,
+ [1][0][RTW89_ACMA][5] = 46,
+ [1][0][RTW89_CN][5] = 44,
+ [1][0][RTW89_UK][5] = 46,
+ [1][0][RTW89_FCC][6] = 58,
+ [1][0][RTW89_ETSI][6] = 44,
+ [1][0][RTW89_MKK][6] = 48,
+ [1][0][RTW89_IC][6] = 58,
+ [1][0][RTW89_KCC][6] = 50,
+ [1][0][RTW89_ACMA][6] = 44,
+ [1][0][RTW89_CN][6] = 44,
+ [1][0][RTW89_UK][6] = 44,
+ [1][0][RTW89_FCC][7] = 58,
+ [1][0][RTW89_ETSI][7] = 46,
+ [1][0][RTW89_MKK][7] = 48,
+ [1][0][RTW89_IC][7] = 58,
+ [1][0][RTW89_KCC][7] = 50,
+ [1][0][RTW89_ACMA][7] = 46,
+ [1][0][RTW89_CN][7] = 44,
+ [1][0][RTW89_UK][7] = 46,
+ [1][0][RTW89_FCC][8] = 58,
+ [1][0][RTW89_ETSI][8] = 46,
+ [1][0][RTW89_MKK][8] = 48,
+ [1][0][RTW89_IC][8] = 58,
+ [1][0][RTW89_KCC][8] = 50,
+ [1][0][RTW89_ACMA][8] = 46,
+ [1][0][RTW89_CN][8] = 44,
+ [1][0][RTW89_UK][8] = 46,
+ [1][0][RTW89_FCC][9] = 54,
+ [1][0][RTW89_ETSI][9] = 46,
+ [1][0][RTW89_MKK][9] = 48,
+ [1][0][RTW89_IC][9] = 54,
+ [1][0][RTW89_KCC][9] = 50,
+ [1][0][RTW89_ACMA][9] = 46,
+ [1][0][RTW89_CN][9] = 44,
+ [1][0][RTW89_UK][9] = 46,
+ [1][0][RTW89_FCC][10] = 54,
+ [1][0][RTW89_ETSI][10] = 46,
+ [1][0][RTW89_MKK][10] = 48,
+ [1][0][RTW89_IC][10] = 54,
+ [1][0][RTW89_KCC][10] = 50,
+ [1][0][RTW89_ACMA][10] = 46,
+ [1][0][RTW89_CN][10] = 44,
+ [1][0][RTW89_UK][10] = 46,
+ [1][0][RTW89_FCC][11] = 36,
+ [1][0][RTW89_ETSI][11] = 46,
+ [1][0][RTW89_MKK][11] = 48,
+ [1][0][RTW89_IC][11] = 36,
+ [1][0][RTW89_KCC][11] = 50,
+ [1][0][RTW89_ACMA][11] = 46,
+ [1][0][RTW89_CN][11] = 44,
+ [1][0][RTW89_UK][11] = 46,
+ [1][0][RTW89_FCC][12] = 4,
+ [1][0][RTW89_ETSI][12] = 46,
+ [1][0][RTW89_MKK][12] = 46,
+ [1][0][RTW89_IC][12] = 4,
+ [1][0][RTW89_KCC][12] = 50,
+ [1][0][RTW89_ACMA][12] = 46,
+ [1][0][RTW89_CN][12] = 42,
+ [1][0][RTW89_UK][12] = 46,
+ [1][0][RTW89_FCC][13] = 127,
+ [1][0][RTW89_ETSI][13] = 127,
+ [1][0][RTW89_MKK][13] = 127,
+ [1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_KCC][13] = 127,
+ [1][0][RTW89_ACMA][13] = 127,
+ [1][0][RTW89_CN][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
+ [1][1][RTW89_FCC][0] = 58,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_MKK][0] = 34,
+ [1][1][RTW89_IC][0] = 58,
+ [1][1][RTW89_KCC][0] = 38,
+ [1][1][RTW89_ACMA][0] = 32,
+ [1][1][RTW89_CN][0] = 32,
+ [1][1][RTW89_UK][0] = 32,
+ [1][1][RTW89_FCC][1] = 58,
+ [1][1][RTW89_ETSI][1] = 34,
+ [1][1][RTW89_MKK][1] = 34,
+ [1][1][RTW89_IC][1] = 58,
+ [1][1][RTW89_KCC][1] = 38,
+ [1][1][RTW89_ACMA][1] = 34,
+ [1][1][RTW89_CN][1] = 32,
+ [1][1][RTW89_UK][1] = 34,
+ [1][1][RTW89_FCC][2] = 62,
+ [1][1][RTW89_ETSI][2] = 34,
+ [1][1][RTW89_MKK][2] = 34,
+ [1][1][RTW89_IC][2] = 62,
+ [1][1][RTW89_KCC][2] = 38,
+ [1][1][RTW89_ACMA][2] = 34,
+ [1][1][RTW89_CN][2] = 32,
+ [1][1][RTW89_UK][2] = 34,
+ [1][1][RTW89_FCC][3] = 66,
+ [1][1][RTW89_ETSI][3] = 34,
+ [1][1][RTW89_MKK][3] = 34,
+ [1][1][RTW89_IC][3] = 66,
+ [1][1][RTW89_KCC][3] = 38,
+ [1][1][RTW89_ACMA][3] = 34,
+ [1][1][RTW89_CN][3] = 32,
+ [1][1][RTW89_UK][3] = 34,
+ [1][1][RTW89_FCC][4] = 70,
+ [1][1][RTW89_ETSI][4] = 34,
+ [1][1][RTW89_MKK][4] = 34,
+ [1][1][RTW89_IC][4] = 70,
+ [1][1][RTW89_KCC][4] = 38,
+ [1][1][RTW89_ACMA][4] = 34,
+ [1][1][RTW89_CN][4] = 32,
+ [1][1][RTW89_UK][4] = 34,
+ [1][1][RTW89_FCC][5] = 82,
+ [1][1][RTW89_ETSI][5] = 34,
+ [1][1][RTW89_MKK][5] = 34,
+ [1][1][RTW89_IC][5] = 82,
+ [1][1][RTW89_KCC][5] = 38,
+ [1][1][RTW89_ACMA][5] = 34,
+ [1][1][RTW89_CN][5] = 32,
+ [1][1][RTW89_UK][5] = 34,
+ [1][1][RTW89_FCC][6] = 60,
+ [1][1][RTW89_ETSI][6] = 34,
+ [1][1][RTW89_MKK][6] = 34,
+ [1][1][RTW89_IC][6] = 60,
+ [1][1][RTW89_KCC][6] = 38,
+ [1][1][RTW89_ACMA][6] = 34,
+ [1][1][RTW89_CN][6] = 32,
+ [1][1][RTW89_UK][6] = 34,
+ [1][1][RTW89_FCC][7] = 56,
+ [1][1][RTW89_ETSI][7] = 34,
+ [1][1][RTW89_MKK][7] = 34,
+ [1][1][RTW89_IC][7] = 56,
+ [1][1][RTW89_KCC][7] = 38,
+ [1][1][RTW89_ACMA][7] = 34,
+ [1][1][RTW89_CN][7] = 32,
+ [1][1][RTW89_UK][7] = 34,
+ [1][1][RTW89_FCC][8] = 52,
+ [1][1][RTW89_ETSI][8] = 34,
+ [1][1][RTW89_MKK][8] = 34,
+ [1][1][RTW89_IC][8] = 52,
+ [1][1][RTW89_KCC][8] = 38,
+ [1][1][RTW89_ACMA][8] = 34,
+ [1][1][RTW89_CN][8] = 32,
+ [1][1][RTW89_UK][8] = 34,
+ [1][1][RTW89_FCC][9] = 48,
+ [1][1][RTW89_ETSI][9] = 34,
+ [1][1][RTW89_MKK][9] = 34,
+ [1][1][RTW89_IC][9] = 48,
+ [1][1][RTW89_KCC][9] = 38,
+ [1][1][RTW89_ACMA][9] = 34,
+ [1][1][RTW89_CN][9] = 32,
+ [1][1][RTW89_UK][9] = 34,
+ [1][1][RTW89_FCC][10] = 48,
+ [1][1][RTW89_ETSI][10] = 34,
+ [1][1][RTW89_MKK][10] = 34,
+ [1][1][RTW89_IC][10] = 48,
+ [1][1][RTW89_KCC][10] = 38,
+ [1][1][RTW89_ACMA][10] = 34,
+ [1][1][RTW89_CN][10] = 32,
+ [1][1][RTW89_UK][10] = 34,
+ [1][1][RTW89_FCC][11] = 30,
+ [1][1][RTW89_ETSI][11] = 34,
+ [1][1][RTW89_MKK][11] = 34,
+ [1][1][RTW89_IC][11] = 30,
+ [1][1][RTW89_KCC][11] = 38,
+ [1][1][RTW89_ACMA][11] = 34,
+ [1][1][RTW89_CN][11] = 32,
+ [1][1][RTW89_UK][11] = 34,
+ [1][1][RTW89_FCC][12] = -6,
+ [1][1][RTW89_ETSI][12] = 34,
+ [1][1][RTW89_MKK][12] = 34,
+ [1][1][RTW89_IC][12] = -6,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 34,
+ [1][1][RTW89_CN][12] = 32,
+ [1][1][RTW89_UK][12] = 34,
+ [1][1][RTW89_FCC][13] = 127,
+ [1][1][RTW89_ETSI][13] = 127,
+ [1][1][RTW89_MKK][13] = 127,
+ [1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_KCC][13] = 127,
+ [1][1][RTW89_ACMA][13] = 127,
+ [1][1][RTW89_CN][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
+ [2][0][RTW89_FCC][0] = 70,
+ [2][0][RTW89_ETSI][0] = 58,
+ [2][0][RTW89_MKK][0] = 58,
+ [2][0][RTW89_IC][0] = 70,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 58,
+ [2][0][RTW89_CN][0] = 56,
+ [2][0][RTW89_UK][0] = 58,
+ [2][0][RTW89_FCC][1] = 70,
+ [2][0][RTW89_ETSI][1] = 58,
+ [2][0][RTW89_MKK][1] = 58,
+ [2][0][RTW89_IC][1] = 70,
+ [2][0][RTW89_KCC][1] = 64,
+ [2][0][RTW89_ACMA][1] = 58,
+ [2][0][RTW89_CN][1] = 56,
+ [2][0][RTW89_UK][1] = 58,
+ [2][0][RTW89_FCC][2] = 72,
+ [2][0][RTW89_ETSI][2] = 58,
+ [2][0][RTW89_MKK][2] = 58,
+ [2][0][RTW89_IC][2] = 72,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 58,
+ [2][0][RTW89_CN][2] = 56,
+ [2][0][RTW89_UK][2] = 58,
+ [2][0][RTW89_FCC][3] = 72,
+ [2][0][RTW89_ETSI][3] = 58,
+ [2][0][RTW89_MKK][3] = 58,
+ [2][0][RTW89_IC][3] = 72,
+ [2][0][RTW89_KCC][3] = 64,
+ [2][0][RTW89_ACMA][3] = 58,
+ [2][0][RTW89_CN][3] = 56,
+ [2][0][RTW89_UK][3] = 58,
+ [2][0][RTW89_FCC][4] = 72,
+ [2][0][RTW89_ETSI][4] = 58,
+ [2][0][RTW89_MKK][4] = 58,
+ [2][0][RTW89_IC][4] = 72,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 58,
+ [2][0][RTW89_CN][4] = 56,
+ [2][0][RTW89_UK][4] = 58,
+ [2][0][RTW89_FCC][5] = 82,
+ [2][0][RTW89_ETSI][5] = 58,
+ [2][0][RTW89_MKK][5] = 58,
+ [2][0][RTW89_IC][5] = 82,
+ [2][0][RTW89_KCC][5] = 64,
+ [2][0][RTW89_ACMA][5] = 58,
+ [2][0][RTW89_CN][5] = 56,
+ [2][0][RTW89_UK][5] = 58,
+ [2][0][RTW89_FCC][6] = 66,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_MKK][6] = 58,
+ [2][0][RTW89_IC][6] = 66,
+ [2][0][RTW89_KCC][6] = 64,
+ [2][0][RTW89_ACMA][6] = 56,
+ [2][0][RTW89_CN][6] = 56,
+ [2][0][RTW89_UK][6] = 56,
+ [2][0][RTW89_FCC][7] = 66,
+ [2][0][RTW89_ETSI][7] = 58,
+ [2][0][RTW89_MKK][7] = 58,
+ [2][0][RTW89_IC][7] = 66,
+ [2][0][RTW89_KCC][7] = 64,
+ [2][0][RTW89_ACMA][7] = 58,
+ [2][0][RTW89_CN][7] = 56,
+ [2][0][RTW89_UK][7] = 58,
+ [2][0][RTW89_FCC][8] = 66,
+ [2][0][RTW89_ETSI][8] = 58,
+ [2][0][RTW89_MKK][8] = 58,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_KCC][8] = 64,
+ [2][0][RTW89_ACMA][8] = 58,
+ [2][0][RTW89_CN][8] = 56,
+ [2][0][RTW89_UK][8] = 58,
+ [2][0][RTW89_FCC][9] = 64,
+ [2][0][RTW89_ETSI][9] = 58,
+ [2][0][RTW89_MKK][9] = 58,
+ [2][0][RTW89_IC][9] = 64,
+ [2][0][RTW89_KCC][9] = 64,
+ [2][0][RTW89_ACMA][9] = 58,
+ [2][0][RTW89_CN][9] = 56,
+ [2][0][RTW89_UK][9] = 58,
+ [2][0][RTW89_FCC][10] = 64,
+ [2][0][RTW89_ETSI][10] = 58,
+ [2][0][RTW89_MKK][10] = 58,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 64,
+ [2][0][RTW89_ACMA][10] = 58,
+ [2][0][RTW89_CN][10] = 56,
+ [2][0][RTW89_UK][10] = 58,
+ [2][0][RTW89_FCC][11] = 48,
+ [2][0][RTW89_ETSI][11] = 58,
+ [2][0][RTW89_MKK][11] = 58,
+ [2][0][RTW89_IC][11] = 48,
+ [2][0][RTW89_KCC][11] = 64,
+ [2][0][RTW89_ACMA][11] = 58,
+ [2][0][RTW89_CN][11] = 56,
+ [2][0][RTW89_UK][11] = 58,
+ [2][0][RTW89_FCC][12] = 16,
+ [2][0][RTW89_ETSI][12] = 58,
+ [2][0][RTW89_MKK][12] = 58,
+ [2][0][RTW89_IC][12] = 16,
+ [2][0][RTW89_KCC][12] = 64,
+ [2][0][RTW89_ACMA][12] = 58,
+ [2][0][RTW89_CN][12] = 56,
+ [2][0][RTW89_UK][12] = 58,
+ [2][0][RTW89_FCC][13] = 127,
+ [2][0][RTW89_ETSI][13] = 127,
+ [2][0][RTW89_MKK][13] = 127,
+ [2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_KCC][13] = 127,
+ [2][0][RTW89_ACMA][13] = 127,
+ [2][0][RTW89_CN][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
+ [2][1][RTW89_FCC][0] = 64,
+ [2][1][RTW89_ETSI][0] = 46,
+ [2][1][RTW89_MKK][0] = 46,
+ [2][1][RTW89_IC][0] = 64,
+ [2][1][RTW89_KCC][0] = 52,
+ [2][1][RTW89_ACMA][0] = 46,
+ [2][1][RTW89_CN][0] = 44,
+ [2][1][RTW89_UK][0] = 46,
+ [2][1][RTW89_FCC][1] = 64,
+ [2][1][RTW89_ETSI][1] = 46,
+ [2][1][RTW89_MKK][1] = 46,
+ [2][1][RTW89_IC][1] = 64,
+ [2][1][RTW89_KCC][1] = 52,
+ [2][1][RTW89_ACMA][1] = 46,
+ [2][1][RTW89_CN][1] = 44,
+ [2][1][RTW89_UK][1] = 46,
+ [2][1][RTW89_FCC][2] = 68,
+ [2][1][RTW89_ETSI][2] = 46,
+ [2][1][RTW89_MKK][2] = 46,
+ [2][1][RTW89_IC][2] = 68,
+ [2][1][RTW89_KCC][2] = 52,
+ [2][1][RTW89_ACMA][2] = 46,
+ [2][1][RTW89_CN][2] = 44,
+ [2][1][RTW89_UK][2] = 46,
+ [2][1][RTW89_FCC][3] = 72,
+ [2][1][RTW89_ETSI][3] = 46,
+ [2][1][RTW89_MKK][3] = 46,
+ [2][1][RTW89_IC][3] = 72,
+ [2][1][RTW89_KCC][3] = 52,
+ [2][1][RTW89_ACMA][3] = 46,
+ [2][1][RTW89_CN][3] = 44,
+ [2][1][RTW89_UK][3] = 46,
+ [2][1][RTW89_FCC][4] = 74,
+ [2][1][RTW89_ETSI][4] = 46,
+ [2][1][RTW89_MKK][4] = 46,
+ [2][1][RTW89_IC][4] = 74,
+ [2][1][RTW89_KCC][4] = 50,
+ [2][1][RTW89_ACMA][4] = 46,
+ [2][1][RTW89_CN][4] = 44,
+ [2][1][RTW89_UK][4] = 46,
+ [2][1][RTW89_FCC][5] = 82,
+ [2][1][RTW89_ETSI][5] = 46,
+ [2][1][RTW89_MKK][5] = 46,
+ [2][1][RTW89_IC][5] = 82,
+ [2][1][RTW89_KCC][5] = 50,
+ [2][1][RTW89_ACMA][5] = 46,
+ [2][1][RTW89_CN][5] = 44,
+ [2][1][RTW89_UK][5] = 46,
+ [2][1][RTW89_FCC][6] = 72,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_MKK][6] = 46,
+ [2][1][RTW89_IC][6] = 72,
+ [2][1][RTW89_KCC][6] = 50,
+ [2][1][RTW89_ACMA][6] = 44,
+ [2][1][RTW89_CN][6] = 44,
+ [2][1][RTW89_UK][6] = 44,
+ [2][1][RTW89_FCC][7] = 72,
+ [2][1][RTW89_ETSI][7] = 46,
+ [2][1][RTW89_MKK][7] = 46,
+ [2][1][RTW89_IC][7] = 72,
+ [2][1][RTW89_KCC][7] = 50,
+ [2][1][RTW89_ACMA][7] = 46,
+ [2][1][RTW89_CN][7] = 44,
+ [2][1][RTW89_UK][7] = 46,
+ [2][1][RTW89_FCC][8] = 68,
+ [2][1][RTW89_ETSI][8] = 46,
+ [2][1][RTW89_MKK][8] = 46,
+ [2][1][RTW89_IC][8] = 68,
+ [2][1][RTW89_KCC][8] = 50,
+ [2][1][RTW89_ACMA][8] = 46,
+ [2][1][RTW89_CN][8] = 44,
+ [2][1][RTW89_UK][8] = 46,
+ [2][1][RTW89_FCC][9] = 64,
+ [2][1][RTW89_ETSI][9] = 46,
+ [2][1][RTW89_MKK][9] = 46,
+ [2][1][RTW89_IC][9] = 64,
+ [2][1][RTW89_KCC][9] = 52,
+ [2][1][RTW89_ACMA][9] = 46,
+ [2][1][RTW89_CN][9] = 44,
+ [2][1][RTW89_UK][9] = 46,
+ [2][1][RTW89_FCC][10] = 64,
+ [2][1][RTW89_ETSI][10] = 46,
+ [2][1][RTW89_MKK][10] = 46,
+ [2][1][RTW89_IC][10] = 64,
+ [2][1][RTW89_KCC][10] = 52,
+ [2][1][RTW89_ACMA][10] = 46,
+ [2][1][RTW89_CN][10] = 44,
+ [2][1][RTW89_UK][10] = 46,
+ [2][1][RTW89_FCC][11] = 46,
+ [2][1][RTW89_ETSI][11] = 46,
+ [2][1][RTW89_MKK][11] = 46,
+ [2][1][RTW89_IC][11] = 46,
+ [2][1][RTW89_KCC][11] = 52,
+ [2][1][RTW89_ACMA][11] = 46,
+ [2][1][RTW89_CN][11] = 44,
+ [2][1][RTW89_UK][11] = 46,
+ [2][1][RTW89_FCC][12] = 6,
+ [2][1][RTW89_ETSI][12] = 44,
+ [2][1][RTW89_MKK][12] = 46,
+ [2][1][RTW89_IC][12] = 6,
+ [2][1][RTW89_KCC][12] = 52,
+ [2][1][RTW89_ACMA][12] = 44,
+ [2][1][RTW89_CN][12] = 42,
+ [2][1][RTW89_UK][12] = 44,
+ [2][1][RTW89_FCC][13] = 127,
+ [2][1][RTW89_ETSI][13] = 127,
+ [2][1][RTW89_MKK][13] = 127,
+ [2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_KCC][13] = 127,
+ [2][1][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_CN][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = 16,
+ [0][0][RTW89_WW][2] = 16,
+ [0][0][RTW89_WW][4] = 16,
+ [0][0][RTW89_WW][6] = 10,
+ [0][0][RTW89_WW][8] = 16,
+ [0][0][RTW89_WW][10] = 16,
+ [0][0][RTW89_WW][12] = 16,
+ [0][0][RTW89_WW][14] = 16,
+ [0][0][RTW89_WW][15] = 30,
+ [0][0][RTW89_WW][17] = 30,
+ [0][0][RTW89_WW][19] = 30,
+ [0][0][RTW89_WW][21] = 30,
+ [0][0][RTW89_WW][23] = 30,
+ [0][0][RTW89_WW][25] = 30,
+ [0][0][RTW89_WW][27] = 30,
+ [0][0][RTW89_WW][29] = 30,
+ [0][0][RTW89_WW][31] = 30,
+ [0][0][RTW89_WW][33] = 30,
+ [0][0][RTW89_WW][35] = 30,
+ [0][0][RTW89_WW][37] = 30,
+ [0][0][RTW89_WW][38] = 28,
+ [0][0][RTW89_WW][40] = 28,
+ [0][0][RTW89_WW][42] = 28,
+ [0][0][RTW89_WW][44] = 28,
+ [0][0][RTW89_WW][46] = 28,
+ [0][0][RTW89_WW][48] = 46,
+ [0][0][RTW89_WW][50] = 44,
+ [0][0][RTW89_WW][52] = 34,
+ [0][1][RTW89_WW][0] = 4,
+ [0][1][RTW89_WW][2] = 4,
+ [0][1][RTW89_WW][4] = 4,
+ [0][1][RTW89_WW][6] = 1,
+ [0][1][RTW89_WW][8] = 4,
+ [0][1][RTW89_WW][10] = 4,
+ [0][1][RTW89_WW][12] = 4,
+ [0][1][RTW89_WW][14] = 4,
+ [0][1][RTW89_WW][15] = 18,
+ [0][1][RTW89_WW][17] = 18,
+ [0][1][RTW89_WW][19] = 18,
+ [0][1][RTW89_WW][21] = 18,
+ [0][1][RTW89_WW][23] = 18,
+ [0][1][RTW89_WW][25] = 18,
+ [0][1][RTW89_WW][27] = 16,
+ [0][1][RTW89_WW][29] = 16,
+ [0][1][RTW89_WW][31] = 16,
+ [0][1][RTW89_WW][33] = 16,
+ [0][1][RTW89_WW][35] = 16,
+ [0][1][RTW89_WW][37] = 18,
+ [0][1][RTW89_WW][38] = 16,
+ [0][1][RTW89_WW][40] = 16,
+ [0][1][RTW89_WW][42] = 16,
+ [0][1][RTW89_WW][44] = 16,
+ [0][1][RTW89_WW][46] = 16,
+ [0][1][RTW89_WW][48] = 20,
+ [0][1][RTW89_WW][50] = 20,
+ [0][1][RTW89_WW][52] = 8,
+ [1][0][RTW89_WW][0] = 26,
+ [1][0][RTW89_WW][2] = 26,
+ [1][0][RTW89_WW][4] = 26,
+ [1][0][RTW89_WW][6] = 24,
+ [1][0][RTW89_WW][8] = 26,
+ [1][0][RTW89_WW][10] = 26,
+ [1][0][RTW89_WW][12] = 26,
+ [1][0][RTW89_WW][14] = 26,
+ [1][0][RTW89_WW][15] = 40,
+ [1][0][RTW89_WW][17] = 40,
+ [1][0][RTW89_WW][19] = 40,
+ [1][0][RTW89_WW][21] = 40,
+ [1][0][RTW89_WW][23] = 40,
+ [1][0][RTW89_WW][25] = 40,
+ [1][0][RTW89_WW][27] = 42,
+ [1][0][RTW89_WW][29] = 42,
+ [1][0][RTW89_WW][31] = 42,
+ [1][0][RTW89_WW][33] = 42,
+ [1][0][RTW89_WW][35] = 42,
+ [1][0][RTW89_WW][37] = 42,
+ [1][0][RTW89_WW][38] = 28,
+ [1][0][RTW89_WW][40] = 28,
+ [1][0][RTW89_WW][42] = 28,
+ [1][0][RTW89_WW][44] = 28,
+ [1][0][RTW89_WW][46] = 28,
+ [1][0][RTW89_WW][48] = 56,
+ [1][0][RTW89_WW][50] = 58,
+ [1][0][RTW89_WW][52] = 56,
+ [1][1][RTW89_WW][0] = 14,
+ [1][1][RTW89_WW][2] = 14,
+ [1][1][RTW89_WW][4] = 14,
+ [1][1][RTW89_WW][6] = 8,
+ [1][1][RTW89_WW][8] = 14,
+ [1][1][RTW89_WW][10] = 14,
+ [1][1][RTW89_WW][12] = 14,
+ [1][1][RTW89_WW][14] = 14,
+ [1][1][RTW89_WW][15] = 28,
+ [1][1][RTW89_WW][17] = 28,
+ [1][1][RTW89_WW][19] = 28,
+ [1][1][RTW89_WW][21] = 28,
+ [1][1][RTW89_WW][23] = 28,
+ [1][1][RTW89_WW][25] = 28,
+ [1][1][RTW89_WW][27] = 30,
+ [1][1][RTW89_WW][29] = 30,
+ [1][1][RTW89_WW][31] = 30,
+ [1][1][RTW89_WW][33] = 30,
+ [1][1][RTW89_WW][35] = 30,
+ [1][1][RTW89_WW][37] = 32,
+ [1][1][RTW89_WW][38] = 16,
+ [1][1][RTW89_WW][40] = 16,
+ [1][1][RTW89_WW][42] = 16,
+ [1][1][RTW89_WW][44] = 16,
+ [1][1][RTW89_WW][46] = 16,
+ [1][1][RTW89_WW][48] = 34,
+ [1][1][RTW89_WW][50] = 34,
+ [1][1][RTW89_WW][52] = 30,
+ [2][0][RTW89_WW][0] = 40,
+ [2][0][RTW89_WW][2] = 40,
+ [2][0][RTW89_WW][4] = 40,
+ [2][0][RTW89_WW][6] = 36,
+ [2][0][RTW89_WW][8] = 40,
+ [2][0][RTW89_WW][10] = 40,
+ [2][0][RTW89_WW][12] = 40,
+ [2][0][RTW89_WW][14] = 40,
+ [2][0][RTW89_WW][15] = 52,
+ [2][0][RTW89_WW][17] = 52,
+ [2][0][RTW89_WW][19] = 52,
+ [2][0][RTW89_WW][21] = 52,
+ [2][0][RTW89_WW][23] = 52,
+ [2][0][RTW89_WW][25] = 52,
+ [2][0][RTW89_WW][27] = 52,
+ [2][0][RTW89_WW][29] = 52,
+ [2][0][RTW89_WW][31] = 52,
+ [2][0][RTW89_WW][33] = 52,
+ [2][0][RTW89_WW][35] = 52,
+ [2][0][RTW89_WW][37] = 52,
+ [2][0][RTW89_WW][38] = 28,
+ [2][0][RTW89_WW][40] = 28,
+ [2][0][RTW89_WW][42] = 28,
+ [2][0][RTW89_WW][44] = 28,
+ [2][0][RTW89_WW][46] = 28,
+ [2][0][RTW89_WW][48] = 64,
+ [2][0][RTW89_WW][50] = 64,
+ [2][0][RTW89_WW][52] = 64,
+ [2][1][RTW89_WW][0] = 26,
+ [2][1][RTW89_WW][2] = 26,
+ [2][1][RTW89_WW][4] = 26,
+ [2][1][RTW89_WW][6] = 20,
+ [2][1][RTW89_WW][8] = 28,
+ [2][1][RTW89_WW][10] = 28,
+ [2][1][RTW89_WW][12] = 28,
+ [2][1][RTW89_WW][14] = 28,
+ [2][1][RTW89_WW][15] = 40,
+ [2][1][RTW89_WW][17] = 40,
+ [2][1][RTW89_WW][19] = 40,
+ [2][1][RTW89_WW][21] = 40,
+ [2][1][RTW89_WW][23] = 40,
+ [2][1][RTW89_WW][25] = 40,
+ [2][1][RTW89_WW][27] = 40,
+ [2][1][RTW89_WW][29] = 40,
+ [2][1][RTW89_WW][31] = 40,
+ [2][1][RTW89_WW][33] = 40,
+ [2][1][RTW89_WW][35] = 40,
+ [2][1][RTW89_WW][37] = 42,
+ [2][1][RTW89_WW][38] = 16,
+ [2][1][RTW89_WW][40] = 16,
+ [2][1][RTW89_WW][42] = 16,
+ [2][1][RTW89_WW][44] = 16,
+ [2][1][RTW89_WW][46] = 16,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
+ [0][0][RTW89_FCC][0] = 50,
+ [0][0][RTW89_ETSI][0] = 30,
+ [0][0][RTW89_MKK][0] = 36,
+ [0][0][RTW89_IC][0] = 32,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 30,
+ [0][0][RTW89_CN][0] = 16,
+ [0][0][RTW89_UK][0] = 30,
+ [0][0][RTW89_FCC][2] = 50,
+ [0][0][RTW89_ETSI][2] = 30,
+ [0][0][RTW89_MKK][2] = 36,
+ [0][0][RTW89_IC][2] = 32,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 30,
+ [0][0][RTW89_CN][2] = 16,
+ [0][0][RTW89_UK][2] = 30,
+ [0][0][RTW89_FCC][4] = 50,
+ [0][0][RTW89_ETSI][4] = 30,
+ [0][0][RTW89_MKK][4] = 22,
+ [0][0][RTW89_IC][4] = 32,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 30,
+ [0][0][RTW89_CN][4] = 16,
+ [0][0][RTW89_UK][4] = 30,
+ [0][0][RTW89_FCC][6] = 50,
+ [0][0][RTW89_ETSI][6] = 30,
+ [0][0][RTW89_MKK][6] = 22,
+ [0][0][RTW89_IC][6] = 32,
+ [0][0][RTW89_KCC][6] = 10,
+ [0][0][RTW89_ACMA][6] = 30,
+ [0][0][RTW89_CN][6] = 16,
+ [0][0][RTW89_UK][6] = 30,
+ [0][0][RTW89_FCC][8] = 52,
+ [0][0][RTW89_ETSI][8] = 28,
+ [0][0][RTW89_MKK][8] = 18,
+ [0][0][RTW89_IC][8] = 52,
+ [0][0][RTW89_KCC][8] = 44,
+ [0][0][RTW89_ACMA][8] = 28,
+ [0][0][RTW89_CN][8] = 16,
+ [0][0][RTW89_UK][8] = 28,
+ [0][0][RTW89_FCC][10] = 52,
+ [0][0][RTW89_ETSI][10] = 28,
+ [0][0][RTW89_MKK][10] = 18,
+ [0][0][RTW89_IC][10] = 52,
+ [0][0][RTW89_KCC][10] = 44,
+ [0][0][RTW89_ACMA][10] = 28,
+ [0][0][RTW89_CN][10] = 16,
+ [0][0][RTW89_UK][10] = 28,
+ [0][0][RTW89_FCC][12] = 52,
+ [0][0][RTW89_ETSI][12] = 28,
+ [0][0][RTW89_MKK][12] = 34,
+ [0][0][RTW89_IC][12] = 52,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 28,
+ [0][0][RTW89_CN][12] = 16,
+ [0][0][RTW89_UK][12] = 28,
+ [0][0][RTW89_FCC][14] = 52,
+ [0][0][RTW89_ETSI][14] = 28,
+ [0][0][RTW89_MKK][14] = 34,
+ [0][0][RTW89_IC][14] = 52,
+ [0][0][RTW89_KCC][14] = 40,
+ [0][0][RTW89_ACMA][14] = 28,
+ [0][0][RTW89_CN][14] = 16,
+ [0][0][RTW89_UK][14] = 28,
+ [0][0][RTW89_FCC][15] = 52,
+ [0][0][RTW89_ETSI][15] = 30,
+ [0][0][RTW89_MKK][15] = 56,
+ [0][0][RTW89_IC][15] = 52,
+ [0][0][RTW89_KCC][15] = 42,
+ [0][0][RTW89_ACMA][15] = 30,
+ [0][0][RTW89_CN][15] = 127,
+ [0][0][RTW89_UK][15] = 30,
+ [0][0][RTW89_FCC][17] = 52,
+ [0][0][RTW89_ETSI][17] = 30,
+ [0][0][RTW89_MKK][17] = 58,
+ [0][0][RTW89_IC][17] = 52,
+ [0][0][RTW89_KCC][17] = 42,
+ [0][0][RTW89_ACMA][17] = 30,
+ [0][0][RTW89_CN][17] = 127,
+ [0][0][RTW89_UK][17] = 30,
+ [0][0][RTW89_FCC][19] = 52,
+ [0][0][RTW89_ETSI][19] = 30,
+ [0][0][RTW89_MKK][19] = 58,
+ [0][0][RTW89_IC][19] = 52,
+ [0][0][RTW89_KCC][19] = 42,
+ [0][0][RTW89_ACMA][19] = 30,
+ [0][0][RTW89_CN][19] = 127,
+ [0][0][RTW89_UK][19] = 30,
+ [0][0][RTW89_FCC][21] = 52,
+ [0][0][RTW89_ETSI][21] = 30,
+ [0][0][RTW89_MKK][21] = 58,
+ [0][0][RTW89_IC][21] = 52,
+ [0][0][RTW89_KCC][21] = 42,
+ [0][0][RTW89_ACMA][21] = 30,
+ [0][0][RTW89_CN][21] = 127,
+ [0][0][RTW89_UK][21] = 30,
+ [0][0][RTW89_FCC][23] = 52,
+ [0][0][RTW89_ETSI][23] = 30,
+ [0][0][RTW89_MKK][23] = 58,
+ [0][0][RTW89_IC][23] = 52,
+ [0][0][RTW89_KCC][23] = 42,
+ [0][0][RTW89_ACMA][23] = 30,
+ [0][0][RTW89_CN][23] = 127,
+ [0][0][RTW89_UK][23] = 30,
+ [0][0][RTW89_FCC][25] = 52,
+ [0][0][RTW89_ETSI][25] = 30,
+ [0][0][RTW89_MKK][25] = 58,
+ [0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_KCC][25] = 42,
+ [0][0][RTW89_ACMA][25] = 127,
+ [0][0][RTW89_CN][25] = 127,
+ [0][0][RTW89_UK][25] = 30,
+ [0][0][RTW89_FCC][27] = 52,
+ [0][0][RTW89_ETSI][27] = 30,
+ [0][0][RTW89_MKK][27] = 58,
+ [0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_KCC][27] = 42,
+ [0][0][RTW89_ACMA][27] = 127,
+ [0][0][RTW89_CN][27] = 127,
+ [0][0][RTW89_UK][27] = 30,
+ [0][0][RTW89_FCC][29] = 52,
+ [0][0][RTW89_ETSI][29] = 30,
+ [0][0][RTW89_MKK][29] = 58,
+ [0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_KCC][29] = 42,
+ [0][0][RTW89_ACMA][29] = 127,
+ [0][0][RTW89_CN][29] = 127,
+ [0][0][RTW89_UK][29] = 30,
+ [0][0][RTW89_FCC][31] = 52,
+ [0][0][RTW89_ETSI][31] = 30,
+ [0][0][RTW89_MKK][31] = 58,
+ [0][0][RTW89_IC][31] = 44,
+ [0][0][RTW89_KCC][31] = 42,
+ [0][0][RTW89_ACMA][31] = 30,
+ [0][0][RTW89_CN][31] = 127,
+ [0][0][RTW89_UK][31] = 30,
+ [0][0][RTW89_FCC][33] = 44,
+ [0][0][RTW89_ETSI][33] = 30,
+ [0][0][RTW89_MKK][33] = 58,
+ [0][0][RTW89_IC][33] = 44,
+ [0][0][RTW89_KCC][33] = 42,
+ [0][0][RTW89_ACMA][33] = 30,
+ [0][0][RTW89_CN][33] = 127,
+ [0][0][RTW89_UK][33] = 30,
+ [0][0][RTW89_FCC][35] = 44,
+ [0][0][RTW89_ETSI][35] = 30,
+ [0][0][RTW89_MKK][35] = 58,
+ [0][0][RTW89_IC][35] = 44,
+ [0][0][RTW89_KCC][35] = 42,
+ [0][0][RTW89_ACMA][35] = 30,
+ [0][0][RTW89_CN][35] = 127,
+ [0][0][RTW89_UK][35] = 30,
+ [0][0][RTW89_FCC][37] = 52,
+ [0][0][RTW89_ETSI][37] = 127,
+ [0][0][RTW89_MKK][37] = 58,
+ [0][0][RTW89_IC][37] = 52,
+ [0][0][RTW89_KCC][37] = 42,
+ [0][0][RTW89_ACMA][37] = 52,
+ [0][0][RTW89_CN][37] = 127,
+ [0][0][RTW89_UK][37] = 30,
+ [0][0][RTW89_FCC][38] = 64,
+ [0][0][RTW89_ETSI][38] = 28,
+ [0][0][RTW89_MKK][38] = 127,
+ [0][0][RTW89_IC][38] = 64,
+ [0][0][RTW89_KCC][38] = 42,
+ [0][0][RTW89_ACMA][38] = 64,
+ [0][0][RTW89_CN][38] = 54,
+ [0][0][RTW89_UK][38] = 30,
+ [0][0][RTW89_FCC][40] = 64,
+ [0][0][RTW89_ETSI][40] = 28,
+ [0][0][RTW89_MKK][40] = 127,
+ [0][0][RTW89_IC][40] = 64,
+ [0][0][RTW89_KCC][40] = 42,
+ [0][0][RTW89_ACMA][40] = 64,
+ [0][0][RTW89_CN][40] = 54,
+ [0][0][RTW89_UK][40] = 30,
+ [0][0][RTW89_FCC][42] = 60,
+ [0][0][RTW89_ETSI][42] = 28,
+ [0][0][RTW89_MKK][42] = 127,
+ [0][0][RTW89_IC][42] = 60,
+ [0][0][RTW89_KCC][42] = 42,
+ [0][0][RTW89_ACMA][42] = 60,
+ [0][0][RTW89_CN][42] = 54,
+ [0][0][RTW89_UK][42] = 30,
+ [0][0][RTW89_FCC][44] = 60,
+ [0][0][RTW89_ETSI][44] = 28,
+ [0][0][RTW89_MKK][44] = 127,
+ [0][0][RTW89_IC][44] = 60,
+ [0][0][RTW89_KCC][44] = 42,
+ [0][0][RTW89_ACMA][44] = 60,
+ [0][0][RTW89_CN][44] = 54,
+ [0][0][RTW89_UK][44] = 30,
+ [0][0][RTW89_FCC][46] = 60,
+ [0][0][RTW89_ETSI][46] = 28,
+ [0][0][RTW89_MKK][46] = 127,
+ [0][0][RTW89_IC][46] = 60,
+ [0][0][RTW89_KCC][46] = 42,
+ [0][0][RTW89_ACMA][46] = 60,
+ [0][0][RTW89_CN][46] = 54,
+ [0][0][RTW89_UK][46] = 30,
+ [0][0][RTW89_FCC][48] = 46,
+ [0][0][RTW89_ETSI][48] = 127,
+ [0][0][RTW89_MKK][48] = 127,
+ [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_KCC][48] = 127,
+ [0][0][RTW89_ACMA][48] = 127,
+ [0][0][RTW89_CN][48] = 127,
+ [0][0][RTW89_UK][48] = 127,
+ [0][0][RTW89_FCC][50] = 44,
+ [0][0][RTW89_ETSI][50] = 127,
+ [0][0][RTW89_MKK][50] = 127,
+ [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_KCC][50] = 127,
+ [0][0][RTW89_ACMA][50] = 127,
+ [0][0][RTW89_CN][50] = 127,
+ [0][0][RTW89_UK][50] = 127,
+ [0][0][RTW89_FCC][52] = 34,
+ [0][0][RTW89_ETSI][52] = 127,
+ [0][0][RTW89_MKK][52] = 127,
+ [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_KCC][52] = 127,
+ [0][0][RTW89_ACMA][52] = 127,
+ [0][0][RTW89_CN][52] = 127,
+ [0][0][RTW89_UK][52] = 127,
+ [0][1][RTW89_FCC][0] = 30,
+ [0][1][RTW89_ETSI][0] = 18,
+ [0][1][RTW89_MKK][0] = 20,
+ [0][1][RTW89_IC][0] = 8,
+ [0][1][RTW89_KCC][0] = 26,
+ [0][1][RTW89_ACMA][0] = 18,
+ [0][1][RTW89_CN][0] = 4,
+ [0][1][RTW89_UK][0] = 18,
+ [0][1][RTW89_FCC][2] = 32,
+ [0][1][RTW89_ETSI][2] = 18,
+ [0][1][RTW89_MKK][2] = 20,
+ [0][1][RTW89_IC][2] = 8,
+ [0][1][RTW89_KCC][2] = 26,
+ [0][1][RTW89_ACMA][2] = 18,
+ [0][1][RTW89_CN][2] = 4,
+ [0][1][RTW89_UK][2] = 18,
+ [0][1][RTW89_FCC][4] = 30,
+ [0][1][RTW89_ETSI][4] = 18,
+ [0][1][RTW89_MKK][4] = 8,
+ [0][1][RTW89_IC][4] = 8,
+ [0][1][RTW89_KCC][4] = 26,
+ [0][1][RTW89_ACMA][4] = 18,
+ [0][1][RTW89_CN][4] = 4,
+ [0][1][RTW89_UK][4] = 18,
+ [0][1][RTW89_FCC][6] = 30,
+ [0][1][RTW89_ETSI][6] = 18,
+ [0][1][RTW89_MKK][6] = 8,
+ [0][1][RTW89_IC][6] = 8,
+ [0][1][RTW89_KCC][6] = 0,
+ [0][1][RTW89_ACMA][6] = 18,
+ [0][1][RTW89_CN][6] = 4,
+ [0][1][RTW89_UK][6] = 18,
+ [0][1][RTW89_FCC][8] = 30,
+ [0][1][RTW89_ETSI][8] = 16,
+ [0][1][RTW89_MKK][8] = 20,
+ [0][1][RTW89_IC][8] = 30,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 16,
+ [0][1][RTW89_CN][8] = 4,
+ [0][1][RTW89_UK][8] = 16,
+ [0][1][RTW89_FCC][10] = 30,
+ [0][1][RTW89_ETSI][10] = 16,
+ [0][1][RTW89_MKK][10] = 20,
+ [0][1][RTW89_IC][10] = 30,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 16,
+ [0][1][RTW89_CN][10] = 4,
+ [0][1][RTW89_UK][10] = 16,
+ [0][1][RTW89_FCC][12] = 30,
+ [0][1][RTW89_ETSI][12] = 16,
+ [0][1][RTW89_MKK][12] = 34,
+ [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_KCC][12] = 28,
+ [0][1][RTW89_ACMA][12] = 16,
+ [0][1][RTW89_CN][12] = 4,
+ [0][1][RTW89_UK][12] = 16,
+ [0][1][RTW89_FCC][14] = 30,
+ [0][1][RTW89_ETSI][14] = 16,
+ [0][1][RTW89_MKK][14] = 34,
+ [0][1][RTW89_IC][14] = 30,
+ [0][1][RTW89_KCC][14] = 28,
+ [0][1][RTW89_ACMA][14] = 16,
+ [0][1][RTW89_CN][14] = 4,
+ [0][1][RTW89_UK][14] = 16,
+ [0][1][RTW89_FCC][15] = 32,
+ [0][1][RTW89_ETSI][15] = 18,
+ [0][1][RTW89_MKK][15] = 44,
+ [0][1][RTW89_IC][15] = 32,
+ [0][1][RTW89_KCC][15] = 28,
+ [0][1][RTW89_ACMA][15] = 18,
+ [0][1][RTW89_CN][15] = 127,
+ [0][1][RTW89_UK][15] = 18,
+ [0][1][RTW89_FCC][17] = 32,
+ [0][1][RTW89_ETSI][17] = 18,
+ [0][1][RTW89_MKK][17] = 44,
+ [0][1][RTW89_IC][17] = 32,
+ [0][1][RTW89_KCC][17] = 28,
+ [0][1][RTW89_ACMA][17] = 18,
+ [0][1][RTW89_CN][17] = 127,
+ [0][1][RTW89_UK][17] = 18,
+ [0][1][RTW89_FCC][19] = 32,
+ [0][1][RTW89_ETSI][19] = 18,
+ [0][1][RTW89_MKK][19] = 44,
+ [0][1][RTW89_IC][19] = 32,
+ [0][1][RTW89_KCC][19] = 28,
+ [0][1][RTW89_ACMA][19] = 18,
+ [0][1][RTW89_CN][19] = 127,
+ [0][1][RTW89_UK][19] = 18,
+ [0][1][RTW89_FCC][21] = 32,
+ [0][1][RTW89_ETSI][21] = 18,
+ [0][1][RTW89_MKK][21] = 44,
+ [0][1][RTW89_IC][21] = 32,
+ [0][1][RTW89_KCC][21] = 28,
+ [0][1][RTW89_ACMA][21] = 18,
+ [0][1][RTW89_CN][21] = 127,
+ [0][1][RTW89_UK][21] = 18,
+ [0][1][RTW89_FCC][23] = 32,
+ [0][1][RTW89_ETSI][23] = 18,
+ [0][1][RTW89_MKK][23] = 44,
+ [0][1][RTW89_IC][23] = 32,
+ [0][1][RTW89_KCC][23] = 28,
+ [0][1][RTW89_ACMA][23] = 18,
+ [0][1][RTW89_CN][23] = 127,
+ [0][1][RTW89_UK][23] = 18,
+ [0][1][RTW89_FCC][25] = 32,
+ [0][1][RTW89_ETSI][25] = 18,
+ [0][1][RTW89_MKK][25] = 44,
+ [0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_KCC][25] = 28,
+ [0][1][RTW89_ACMA][25] = 127,
+ [0][1][RTW89_CN][25] = 127,
+ [0][1][RTW89_UK][25] = 18,
+ [0][1][RTW89_FCC][27] = 32,
+ [0][1][RTW89_ETSI][27] = 16,
+ [0][1][RTW89_MKK][27] = 44,
+ [0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_KCC][27] = 28,
+ [0][1][RTW89_ACMA][27] = 127,
+ [0][1][RTW89_CN][27] = 127,
+ [0][1][RTW89_UK][27] = 16,
+ [0][1][RTW89_FCC][29] = 32,
+ [0][1][RTW89_ETSI][29] = 16,
+ [0][1][RTW89_MKK][29] = 44,
+ [0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_KCC][29] = 28,
+ [0][1][RTW89_ACMA][29] = 127,
+ [0][1][RTW89_CN][29] = 127,
+ [0][1][RTW89_UK][29] = 16,
+ [0][1][RTW89_FCC][31] = 32,
+ [0][1][RTW89_ETSI][31] = 16,
+ [0][1][RTW89_MKK][31] = 44,
+ [0][1][RTW89_IC][31] = 30,
+ [0][1][RTW89_KCC][31] = 28,
+ [0][1][RTW89_ACMA][31] = 16,
+ [0][1][RTW89_CN][31] = 127,
+ [0][1][RTW89_UK][31] = 16,
+ [0][1][RTW89_FCC][33] = 30,
+ [0][1][RTW89_ETSI][33] = 16,
+ [0][1][RTW89_MKK][33] = 44,
+ [0][1][RTW89_IC][33] = 30,
+ [0][1][RTW89_KCC][33] = 28,
+ [0][1][RTW89_ACMA][33] = 16,
+ [0][1][RTW89_CN][33] = 127,
+ [0][1][RTW89_UK][33] = 16,
+ [0][1][RTW89_FCC][35] = 30,
+ [0][1][RTW89_ETSI][35] = 16,
+ [0][1][RTW89_MKK][35] = 44,
+ [0][1][RTW89_IC][35] = 30,
+ [0][1][RTW89_KCC][35] = 28,
+ [0][1][RTW89_ACMA][35] = 16,
+ [0][1][RTW89_CN][35] = 127,
+ [0][1][RTW89_UK][35] = 16,
+ [0][1][RTW89_FCC][37] = 34,
+ [0][1][RTW89_ETSI][37] = 127,
+ [0][1][RTW89_MKK][37] = 44,
+ [0][1][RTW89_IC][37] = 34,
+ [0][1][RTW89_KCC][37] = 28,
+ [0][1][RTW89_ACMA][37] = 34,
+ [0][1][RTW89_CN][37] = 127,
+ [0][1][RTW89_UK][37] = 18,
+ [0][1][RTW89_FCC][38] = 62,
+ [0][1][RTW89_ETSI][38] = 16,
+ [0][1][RTW89_MKK][38] = 127,
+ [0][1][RTW89_IC][38] = 62,
+ [0][1][RTW89_KCC][38] = 28,
+ [0][1][RTW89_ACMA][38] = 62,
+ [0][1][RTW89_CN][38] = 42,
+ [0][1][RTW89_UK][38] = 18,
+ [0][1][RTW89_FCC][40] = 62,
+ [0][1][RTW89_ETSI][40] = 16,
+ [0][1][RTW89_MKK][40] = 127,
+ [0][1][RTW89_IC][40] = 62,
+ [0][1][RTW89_KCC][40] = 28,
+ [0][1][RTW89_ACMA][40] = 62,
+ [0][1][RTW89_CN][40] = 42,
+ [0][1][RTW89_UK][40] = 18,
+ [0][1][RTW89_FCC][42] = 58,
+ [0][1][RTW89_ETSI][42] = 16,
+ [0][1][RTW89_MKK][42] = 127,
+ [0][1][RTW89_IC][42] = 58,
+ [0][1][RTW89_KCC][42] = 28,
+ [0][1][RTW89_ACMA][42] = 58,
+ [0][1][RTW89_CN][42] = 42,
+ [0][1][RTW89_UK][42] = 18,
+ [0][1][RTW89_FCC][44] = 56,
+ [0][1][RTW89_ETSI][44] = 16,
+ [0][1][RTW89_MKK][44] = 127,
+ [0][1][RTW89_IC][44] = 56,
+ [0][1][RTW89_KCC][44] = 28,
+ [0][1][RTW89_ACMA][44] = 56,
+ [0][1][RTW89_CN][44] = 42,
+ [0][1][RTW89_UK][44] = 18,
+ [0][1][RTW89_FCC][46] = 56,
+ [0][1][RTW89_ETSI][46] = 16,
+ [0][1][RTW89_MKK][46] = 127,
+ [0][1][RTW89_IC][46] = 56,
+ [0][1][RTW89_KCC][46] = 28,
+ [0][1][RTW89_ACMA][46] = 56,
+ [0][1][RTW89_CN][46] = 42,
+ [0][1][RTW89_UK][46] = 18,
+ [0][1][RTW89_FCC][48] = 20,
+ [0][1][RTW89_ETSI][48] = 127,
+ [0][1][RTW89_MKK][48] = 127,
+ [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_KCC][48] = 127,
+ [0][1][RTW89_ACMA][48] = 127,
+ [0][1][RTW89_CN][48] = 127,
+ [0][1][RTW89_UK][48] = 127,
+ [0][1][RTW89_FCC][50] = 20,
+ [0][1][RTW89_ETSI][50] = 127,
+ [0][1][RTW89_MKK][50] = 127,
+ [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_KCC][50] = 127,
+ [0][1][RTW89_ACMA][50] = 127,
+ [0][1][RTW89_CN][50] = 127,
+ [0][1][RTW89_UK][50] = 127,
+ [0][1][RTW89_FCC][52] = 8,
+ [0][1][RTW89_ETSI][52] = 127,
+ [0][1][RTW89_MKK][52] = 127,
+ [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_KCC][52] = 127,
+ [0][1][RTW89_ACMA][52] = 127,
+ [0][1][RTW89_CN][52] = 127,
+ [0][1][RTW89_UK][52] = 127,
+ [1][0][RTW89_FCC][0] = 62,
+ [1][0][RTW89_ETSI][0] = 40,
+ [1][0][RTW89_MKK][0] = 48,
+ [1][0][RTW89_IC][0] = 42,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 40,
+ [1][0][RTW89_CN][0] = 26,
+ [1][0][RTW89_UK][0] = 40,
+ [1][0][RTW89_FCC][2] = 62,
+ [1][0][RTW89_ETSI][2] = 40,
+ [1][0][RTW89_MKK][2] = 48,
+ [1][0][RTW89_IC][2] = 42,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 40,
+ [1][0][RTW89_CN][2] = 26,
+ [1][0][RTW89_UK][2] = 40,
+ [1][0][RTW89_FCC][4] = 64,
+ [1][0][RTW89_ETSI][4] = 40,
+ [1][0][RTW89_MKK][4] = 40,
+ [1][0][RTW89_IC][4] = 42,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 40,
+ [1][0][RTW89_CN][4] = 26,
+ [1][0][RTW89_UK][4] = 40,
+ [1][0][RTW89_FCC][6] = 64,
+ [1][0][RTW89_ETSI][6] = 40,
+ [1][0][RTW89_MKK][6] = 40,
+ [1][0][RTW89_IC][6] = 42,
+ [1][0][RTW89_KCC][6] = 24,
+ [1][0][RTW89_ACMA][6] = 40,
+ [1][0][RTW89_CN][6] = 26,
+ [1][0][RTW89_UK][6] = 40,
+ [1][0][RTW89_FCC][8] = 62,
+ [1][0][RTW89_ETSI][8] = 40,
+ [1][0][RTW89_MKK][8] = 34,
+ [1][0][RTW89_IC][8] = 62,
+ [1][0][RTW89_KCC][8] = 52,
+ [1][0][RTW89_ACMA][8] = 40,
+ [1][0][RTW89_CN][8] = 26,
+ [1][0][RTW89_UK][8] = 40,
+ [1][0][RTW89_FCC][10] = 62,
+ [1][0][RTW89_ETSI][10] = 40,
+ [1][0][RTW89_MKK][10] = 34,
+ [1][0][RTW89_IC][10] = 62,
+ [1][0][RTW89_KCC][10] = 52,
+ [1][0][RTW89_ACMA][10] = 40,
+ [1][0][RTW89_CN][10] = 26,
+ [1][0][RTW89_UK][10] = 40,
+ [1][0][RTW89_FCC][12] = 62,
+ [1][0][RTW89_ETSI][12] = 40,
+ [1][0][RTW89_MKK][12] = 46,
+ [1][0][RTW89_IC][12] = 62,
+ [1][0][RTW89_KCC][12] = 52,
+ [1][0][RTW89_ACMA][12] = 40,
+ [1][0][RTW89_CN][12] = 26,
+ [1][0][RTW89_UK][12] = 40,
+ [1][0][RTW89_FCC][14] = 62,
+ [1][0][RTW89_ETSI][14] = 40,
+ [1][0][RTW89_MKK][14] = 46,
+ [1][0][RTW89_IC][14] = 62,
+ [1][0][RTW89_KCC][14] = 52,
+ [1][0][RTW89_ACMA][14] = 40,
+ [1][0][RTW89_CN][14] = 26,
+ [1][0][RTW89_UK][14] = 40,
+ [1][0][RTW89_FCC][15] = 62,
+ [1][0][RTW89_ETSI][15] = 40,
+ [1][0][RTW89_MKK][15] = 62,
+ [1][0][RTW89_IC][15] = 62,
+ [1][0][RTW89_KCC][15] = 52,
+ [1][0][RTW89_ACMA][15] = 40,
+ [1][0][RTW89_CN][15] = 127,
+ [1][0][RTW89_UK][15] = 40,
+ [1][0][RTW89_FCC][17] = 62,
+ [1][0][RTW89_ETSI][17] = 40,
+ [1][0][RTW89_MKK][17] = 68,
+ [1][0][RTW89_IC][17] = 62,
+ [1][0][RTW89_KCC][17] = 52,
+ [1][0][RTW89_ACMA][17] = 40,
+ [1][0][RTW89_CN][17] = 127,
+ [1][0][RTW89_UK][17] = 40,
+ [1][0][RTW89_FCC][19] = 64,
+ [1][0][RTW89_ETSI][19] = 40,
+ [1][0][RTW89_MKK][19] = 68,
+ [1][0][RTW89_IC][19] = 64,
+ [1][0][RTW89_KCC][19] = 52,
+ [1][0][RTW89_ACMA][19] = 40,
+ [1][0][RTW89_CN][19] = 127,
+ [1][0][RTW89_UK][19] = 40,
+ [1][0][RTW89_FCC][21] = 64,
+ [1][0][RTW89_ETSI][21] = 40,
+ [1][0][RTW89_MKK][21] = 68,
+ [1][0][RTW89_IC][21] = 64,
+ [1][0][RTW89_KCC][21] = 52,
+ [1][0][RTW89_ACMA][21] = 40,
+ [1][0][RTW89_CN][21] = 127,
+ [1][0][RTW89_UK][21] = 40,
+ [1][0][RTW89_FCC][23] = 64,
+ [1][0][RTW89_ETSI][23] = 40,
+ [1][0][RTW89_MKK][23] = 68,
+ [1][0][RTW89_IC][23] = 64,
+ [1][0][RTW89_KCC][23] = 52,
+ [1][0][RTW89_ACMA][23] = 40,
+ [1][0][RTW89_CN][23] = 127,
+ [1][0][RTW89_UK][23] = 40,
+ [1][0][RTW89_FCC][25] = 64,
+ [1][0][RTW89_ETSI][25] = 40,
+ [1][0][RTW89_MKK][25] = 68,
+ [1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_KCC][25] = 52,
+ [1][0][RTW89_ACMA][25] = 127,
+ [1][0][RTW89_CN][25] = 127,
+ [1][0][RTW89_UK][25] = 40,
+ [1][0][RTW89_FCC][27] = 64,
+ [1][0][RTW89_ETSI][27] = 42,
+ [1][0][RTW89_MKK][27] = 68,
+ [1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_KCC][27] = 52,
+ [1][0][RTW89_ACMA][27] = 127,
+ [1][0][RTW89_CN][27] = 127,
+ [1][0][RTW89_UK][27] = 42,
+ [1][0][RTW89_FCC][29] = 64,
+ [1][0][RTW89_ETSI][29] = 42,
+ [1][0][RTW89_MKK][29] = 68,
+ [1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_KCC][29] = 52,
+ [1][0][RTW89_ACMA][29] = 127,
+ [1][0][RTW89_CN][29] = 127,
+ [1][0][RTW89_UK][29] = 42,
+ [1][0][RTW89_FCC][31] = 64,
+ [1][0][RTW89_ETSI][31] = 42,
+ [1][0][RTW89_MKK][31] = 68,
+ [1][0][RTW89_IC][31] = 56,
+ [1][0][RTW89_KCC][31] = 52,
+ [1][0][RTW89_ACMA][31] = 42,
+ [1][0][RTW89_CN][31] = 127,
+ [1][0][RTW89_UK][31] = 42,
+ [1][0][RTW89_FCC][33] = 56,
+ [1][0][RTW89_ETSI][33] = 42,
+ [1][0][RTW89_MKK][33] = 68,
+ [1][0][RTW89_IC][33] = 56,
+ [1][0][RTW89_KCC][33] = 52,
+ [1][0][RTW89_ACMA][33] = 42,
+ [1][0][RTW89_CN][33] = 127,
+ [1][0][RTW89_UK][33] = 42,
+ [1][0][RTW89_FCC][35] = 56,
+ [1][0][RTW89_ETSI][35] = 42,
+ [1][0][RTW89_MKK][35] = 68,
+ [1][0][RTW89_IC][35] = 56,
+ [1][0][RTW89_KCC][35] = 52,
+ [1][0][RTW89_ACMA][35] = 42,
+ [1][0][RTW89_CN][35] = 127,
+ [1][0][RTW89_UK][35] = 42,
+ [1][0][RTW89_FCC][37] = 66,
+ [1][0][RTW89_ETSI][37] = 127,
+ [1][0][RTW89_MKK][37] = 68,
+ [1][0][RTW89_IC][37] = 66,
+ [1][0][RTW89_KCC][37] = 52,
+ [1][0][RTW89_ACMA][37] = 66,
+ [1][0][RTW89_CN][37] = 127,
+ [1][0][RTW89_UK][37] = 42,
+ [1][0][RTW89_FCC][38] = 76,
+ [1][0][RTW89_ETSI][38] = 28,
+ [1][0][RTW89_MKK][38] = 127,
+ [1][0][RTW89_IC][38] = 76,
+ [1][0][RTW89_KCC][38] = 54,
+ [1][0][RTW89_ACMA][38] = 76,
+ [1][0][RTW89_CN][38] = 66,
+ [1][0][RTW89_UK][38] = 44,
+ [1][0][RTW89_FCC][40] = 76,
+ [1][0][RTW89_ETSI][40] = 28,
+ [1][0][RTW89_MKK][40] = 127,
+ [1][0][RTW89_IC][40] = 76,
+ [1][0][RTW89_KCC][40] = 54,
+ [1][0][RTW89_ACMA][40] = 76,
+ [1][0][RTW89_CN][40] = 66,
+ [1][0][RTW89_UK][40] = 44,
+ [1][0][RTW89_FCC][42] = 68,
+ [1][0][RTW89_ETSI][42] = 28,
+ [1][0][RTW89_MKK][42] = 127,
+ [1][0][RTW89_IC][42] = 68,
+ [1][0][RTW89_KCC][42] = 54,
+ [1][0][RTW89_ACMA][42] = 68,
+ [1][0][RTW89_CN][42] = 66,
+ [1][0][RTW89_UK][42] = 44,
+ [1][0][RTW89_FCC][44] = 70,
+ [1][0][RTW89_ETSI][44] = 28,
+ [1][0][RTW89_MKK][44] = 127,
+ [1][0][RTW89_IC][44] = 70,
+ [1][0][RTW89_KCC][44] = 54,
+ [1][0][RTW89_ACMA][44] = 70,
+ [1][0][RTW89_CN][44] = 66,
+ [1][0][RTW89_UK][44] = 42,
+ [1][0][RTW89_FCC][46] = 70,
+ [1][0][RTW89_ETSI][46] = 28,
+ [1][0][RTW89_MKK][46] = 127,
+ [1][0][RTW89_IC][46] = 70,
+ [1][0][RTW89_KCC][46] = 54,
+ [1][0][RTW89_ACMA][46] = 70,
+ [1][0][RTW89_CN][46] = 66,
+ [1][0][RTW89_UK][46] = 42,
+ [1][0][RTW89_FCC][48] = 56,
+ [1][0][RTW89_ETSI][48] = 127,
+ [1][0][RTW89_MKK][48] = 127,
+ [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_KCC][48] = 127,
+ [1][0][RTW89_ACMA][48] = 127,
+ [1][0][RTW89_CN][48] = 127,
+ [1][0][RTW89_UK][48] = 127,
+ [1][0][RTW89_FCC][50] = 58,
+ [1][0][RTW89_ETSI][50] = 127,
+ [1][0][RTW89_MKK][50] = 127,
+ [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_KCC][50] = 127,
+ [1][0][RTW89_ACMA][50] = 127,
+ [1][0][RTW89_CN][50] = 127,
+ [1][0][RTW89_UK][50] = 127,
+ [1][0][RTW89_FCC][52] = 56,
+ [1][0][RTW89_ETSI][52] = 127,
+ [1][0][RTW89_MKK][52] = 127,
+ [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_KCC][52] = 127,
+ [1][0][RTW89_ACMA][52] = 127,
+ [1][0][RTW89_CN][52] = 127,
+ [1][0][RTW89_UK][52] = 127,
+ [1][1][RTW89_FCC][0] = 44,
+ [1][1][RTW89_ETSI][0] = 30,
+ [1][1][RTW89_MKK][0] = 34,
+ [1][1][RTW89_IC][0] = 20,
+ [1][1][RTW89_KCC][0] = 34,
+ [1][1][RTW89_ACMA][0] = 30,
+ [1][1][RTW89_CN][0] = 14,
+ [1][1][RTW89_UK][0] = 30,
+ [1][1][RTW89_FCC][2] = 44,
+ [1][1][RTW89_ETSI][2] = 30,
+ [1][1][RTW89_MKK][2] = 34,
+ [1][1][RTW89_IC][2] = 18,
+ [1][1][RTW89_KCC][2] = 34,
+ [1][1][RTW89_ACMA][2] = 30,
+ [1][1][RTW89_CN][2] = 14,
+ [1][1][RTW89_UK][2] = 30,
+ [1][1][RTW89_FCC][4] = 46,
+ [1][1][RTW89_ETSI][4] = 30,
+ [1][1][RTW89_MKK][4] = 26,
+ [1][1][RTW89_IC][4] = 20,
+ [1][1][RTW89_KCC][4] = 34,
+ [1][1][RTW89_ACMA][4] = 30,
+ [1][1][RTW89_CN][4] = 14,
+ [1][1][RTW89_UK][4] = 30,
+ [1][1][RTW89_FCC][6] = 46,
+ [1][1][RTW89_ETSI][6] = 30,
+ [1][1][RTW89_MKK][6] = 26,
+ [1][1][RTW89_IC][6] = 20,
+ [1][1][RTW89_KCC][6] = 8,
+ [1][1][RTW89_ACMA][6] = 30,
+ [1][1][RTW89_CN][6] = 14,
+ [1][1][RTW89_UK][6] = 30,
+ [1][1][RTW89_FCC][8] = 44,
+ [1][1][RTW89_ETSI][8] = 30,
+ [1][1][RTW89_MKK][8] = 20,
+ [1][1][RTW89_IC][8] = 44,
+ [1][1][RTW89_KCC][8] = 34,
+ [1][1][RTW89_ACMA][8] = 30,
+ [1][1][RTW89_CN][8] = 14,
+ [1][1][RTW89_UK][8] = 30,
+ [1][1][RTW89_FCC][10] = 44,
+ [1][1][RTW89_ETSI][10] = 30,
+ [1][1][RTW89_MKK][10] = 20,
+ [1][1][RTW89_IC][10] = 44,
+ [1][1][RTW89_KCC][10] = 34,
+ [1][1][RTW89_ACMA][10] = 30,
+ [1][1][RTW89_CN][10] = 14,
+ [1][1][RTW89_UK][10] = 30,
+ [1][1][RTW89_FCC][12] = 44,
+ [1][1][RTW89_ETSI][12] = 30,
+ [1][1][RTW89_MKK][12] = 34,
+ [1][1][RTW89_IC][12] = 44,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 30,
+ [1][1][RTW89_CN][12] = 14,
+ [1][1][RTW89_UK][12] = 30,
+ [1][1][RTW89_FCC][14] = 44,
+ [1][1][RTW89_ETSI][14] = 30,
+ [1][1][RTW89_MKK][14] = 34,
+ [1][1][RTW89_IC][14] = 44,
+ [1][1][RTW89_KCC][14] = 38,
+ [1][1][RTW89_ACMA][14] = 30,
+ [1][1][RTW89_CN][14] = 14,
+ [1][1][RTW89_UK][14] = 30,
+ [1][1][RTW89_FCC][15] = 44,
+ [1][1][RTW89_ETSI][15] = 28,
+ [1][1][RTW89_MKK][15] = 56,
+ [1][1][RTW89_IC][15] = 44,
+ [1][1][RTW89_KCC][15] = 36,
+ [1][1][RTW89_ACMA][15] = 28,
+ [1][1][RTW89_CN][15] = 127,
+ [1][1][RTW89_UK][15] = 28,
+ [1][1][RTW89_FCC][17] = 44,
+ [1][1][RTW89_ETSI][17] = 28,
+ [1][1][RTW89_MKK][17] = 58,
+ [1][1][RTW89_IC][17] = 44,
+ [1][1][RTW89_KCC][17] = 36,
+ [1][1][RTW89_ACMA][17] = 28,
+ [1][1][RTW89_CN][17] = 127,
+ [1][1][RTW89_UK][17] = 28,
+ [1][1][RTW89_FCC][19] = 44,
+ [1][1][RTW89_ETSI][19] = 28,
+ [1][1][RTW89_MKK][19] = 58,
+ [1][1][RTW89_IC][19] = 44,
+ [1][1][RTW89_KCC][19] = 36,
+ [1][1][RTW89_ACMA][19] = 28,
+ [1][1][RTW89_CN][19] = 127,
+ [1][1][RTW89_UK][19] = 28,
+ [1][1][RTW89_FCC][21] = 44,
+ [1][1][RTW89_ETSI][21] = 28,
+ [1][1][RTW89_MKK][21] = 58,
+ [1][1][RTW89_IC][21] = 44,
+ [1][1][RTW89_KCC][21] = 36,
+ [1][1][RTW89_ACMA][21] = 28,
+ [1][1][RTW89_CN][21] = 127,
+ [1][1][RTW89_UK][21] = 28,
+ [1][1][RTW89_FCC][23] = 44,
+ [1][1][RTW89_ETSI][23] = 28,
+ [1][1][RTW89_MKK][23] = 58,
+ [1][1][RTW89_IC][23] = 44,
+ [1][1][RTW89_KCC][23] = 36,
+ [1][1][RTW89_ACMA][23] = 28,
+ [1][1][RTW89_CN][23] = 127,
+ [1][1][RTW89_UK][23] = 28,
+ [1][1][RTW89_FCC][25] = 44,
+ [1][1][RTW89_ETSI][25] = 28,
+ [1][1][RTW89_MKK][25] = 58,
+ [1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_KCC][25] = 36,
+ [1][1][RTW89_ACMA][25] = 127,
+ [1][1][RTW89_CN][25] = 127,
+ [1][1][RTW89_UK][25] = 28,
+ [1][1][RTW89_FCC][27] = 44,
+ [1][1][RTW89_ETSI][27] = 30,
+ [1][1][RTW89_MKK][27] = 58,
+ [1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_KCC][27] = 36,
+ [1][1][RTW89_ACMA][27] = 127,
+ [1][1][RTW89_CN][27] = 127,
+ [1][1][RTW89_UK][27] = 30,
+ [1][1][RTW89_FCC][29] = 44,
+ [1][1][RTW89_ETSI][29] = 30,
+ [1][1][RTW89_MKK][29] = 58,
+ [1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_KCC][29] = 36,
+ [1][1][RTW89_ACMA][29] = 127,
+ [1][1][RTW89_CN][29] = 127,
+ [1][1][RTW89_UK][29] = 30,
+ [1][1][RTW89_FCC][31] = 44,
+ [1][1][RTW89_ETSI][31] = 30,
+ [1][1][RTW89_MKK][31] = 58,
+ [1][1][RTW89_IC][31] = 38,
+ [1][1][RTW89_KCC][31] = 36,
+ [1][1][RTW89_ACMA][31] = 30,
+ [1][1][RTW89_CN][31] = 127,
+ [1][1][RTW89_UK][31] = 30,
+ [1][1][RTW89_FCC][33] = 38,
+ [1][1][RTW89_ETSI][33] = 30,
+ [1][1][RTW89_MKK][33] = 58,
+ [1][1][RTW89_IC][33] = 38,
+ [1][1][RTW89_KCC][33] = 36,
+ [1][1][RTW89_ACMA][33] = 30,
+ [1][1][RTW89_CN][33] = 127,
+ [1][1][RTW89_UK][33] = 30,
+ [1][1][RTW89_FCC][35] = 38,
+ [1][1][RTW89_ETSI][35] = 30,
+ [1][1][RTW89_MKK][35] = 58,
+ [1][1][RTW89_IC][35] = 38,
+ [1][1][RTW89_KCC][35] = 36,
+ [1][1][RTW89_ACMA][35] = 30,
+ [1][1][RTW89_CN][35] = 127,
+ [1][1][RTW89_UK][35] = 30,
+ [1][1][RTW89_FCC][37] = 46,
+ [1][1][RTW89_ETSI][37] = 127,
+ [1][1][RTW89_MKK][37] = 58,
+ [1][1][RTW89_IC][37] = 46,
+ [1][1][RTW89_KCC][37] = 36,
+ [1][1][RTW89_ACMA][37] = 46,
+ [1][1][RTW89_CN][37] = 127,
+ [1][1][RTW89_UK][37] = 32,
+ [1][1][RTW89_FCC][38] = 74,
+ [1][1][RTW89_ETSI][38] = 16,
+ [1][1][RTW89_MKK][38] = 127,
+ [1][1][RTW89_IC][38] = 74,
+ [1][1][RTW89_KCC][38] = 36,
+ [1][1][RTW89_ACMA][38] = 74,
+ [1][1][RTW89_CN][38] = 54,
+ [1][1][RTW89_UK][38] = 30,
+ [1][1][RTW89_FCC][40] = 74,
+ [1][1][RTW89_ETSI][40] = 16,
+ [1][1][RTW89_MKK][40] = 127,
+ [1][1][RTW89_IC][40] = 74,
+ [1][1][RTW89_KCC][40] = 36,
+ [1][1][RTW89_ACMA][40] = 74,
+ [1][1][RTW89_CN][40] = 54,
+ [1][1][RTW89_UK][40] = 30,
+ [1][1][RTW89_FCC][42] = 74,
+ [1][1][RTW89_ETSI][42] = 16,
+ [1][1][RTW89_MKK][42] = 127,
+ [1][1][RTW89_IC][42] = 74,
+ [1][1][RTW89_KCC][42] = 36,
+ [1][1][RTW89_ACMA][42] = 74,
+ [1][1][RTW89_CN][42] = 54,
+ [1][1][RTW89_UK][42] = 30,
+ [1][1][RTW89_FCC][44] = 74,
+ [1][1][RTW89_ETSI][44] = 16,
+ [1][1][RTW89_MKK][44] = 127,
+ [1][1][RTW89_IC][44] = 74,
+ [1][1][RTW89_KCC][44] = 36,
+ [1][1][RTW89_ACMA][44] = 74,
+ [1][1][RTW89_CN][44] = 54,
+ [1][1][RTW89_UK][44] = 30,
+ [1][1][RTW89_FCC][46] = 74,
+ [1][1][RTW89_ETSI][46] = 16,
+ [1][1][RTW89_MKK][46] = 127,
+ [1][1][RTW89_IC][46] = 74,
+ [1][1][RTW89_KCC][46] = 36,
+ [1][1][RTW89_ACMA][46] = 74,
+ [1][1][RTW89_CN][46] = 54,
+ [1][1][RTW89_UK][46] = 30,
+ [1][1][RTW89_FCC][48] = 34,
+ [1][1][RTW89_ETSI][48] = 127,
+ [1][1][RTW89_MKK][48] = 127,
+ [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_KCC][48] = 127,
+ [1][1][RTW89_ACMA][48] = 127,
+ [1][1][RTW89_CN][48] = 127,
+ [1][1][RTW89_UK][48] = 127,
+ [1][1][RTW89_FCC][50] = 34,
+ [1][1][RTW89_ETSI][50] = 127,
+ [1][1][RTW89_MKK][50] = 127,
+ [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_KCC][50] = 127,
+ [1][1][RTW89_ACMA][50] = 127,
+ [1][1][RTW89_CN][50] = 127,
+ [1][1][RTW89_UK][50] = 127,
+ [1][1][RTW89_FCC][52] = 30,
+ [1][1][RTW89_ETSI][52] = 127,
+ [1][1][RTW89_MKK][52] = 127,
+ [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_KCC][52] = 127,
+ [1][1][RTW89_ACMA][52] = 127,
+ [1][1][RTW89_CN][52] = 127,
+ [1][1][RTW89_UK][52] = 127,
+ [2][0][RTW89_FCC][0] = 68,
+ [2][0][RTW89_ETSI][0] = 52,
+ [2][0][RTW89_MKK][0] = 60,
+ [2][0][RTW89_IC][0] = 52,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 52,
+ [2][0][RTW89_CN][0] = 40,
+ [2][0][RTW89_UK][0] = 52,
+ [2][0][RTW89_FCC][2] = 64,
+ [2][0][RTW89_ETSI][2] = 52,
+ [2][0][RTW89_MKK][2] = 60,
+ [2][0][RTW89_IC][2] = 50,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 52,
+ [2][0][RTW89_CN][2] = 40,
+ [2][0][RTW89_UK][2] = 52,
+ [2][0][RTW89_FCC][4] = 68,
+ [2][0][RTW89_ETSI][4] = 52,
+ [2][0][RTW89_MKK][4] = 50,
+ [2][0][RTW89_IC][4] = 50,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 52,
+ [2][0][RTW89_CN][4] = 40,
+ [2][0][RTW89_UK][4] = 52,
+ [2][0][RTW89_FCC][6] = 68,
+ [2][0][RTW89_ETSI][6] = 52,
+ [2][0][RTW89_MKK][6] = 50,
+ [2][0][RTW89_IC][6] = 50,
+ [2][0][RTW89_KCC][6] = 36,
+ [2][0][RTW89_ACMA][6] = 52,
+ [2][0][RTW89_CN][6] = 40,
+ [2][0][RTW89_UK][6] = 52,
+ [2][0][RTW89_FCC][8] = 68,
+ [2][0][RTW89_ETSI][8] = 52,
+ [2][0][RTW89_MKK][8] = 44,
+ [2][0][RTW89_IC][8] = 64,
+ [2][0][RTW89_KCC][8] = 62,
+ [2][0][RTW89_ACMA][8] = 52,
+ [2][0][RTW89_CN][8] = 40,
+ [2][0][RTW89_UK][8] = 52,
+ [2][0][RTW89_FCC][10] = 68,
+ [2][0][RTW89_ETSI][10] = 52,
+ [2][0][RTW89_MKK][10] = 44,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 62,
+ [2][0][RTW89_ACMA][10] = 52,
+ [2][0][RTW89_CN][10] = 40,
+ [2][0][RTW89_UK][10] = 52,
+ [2][0][RTW89_FCC][12] = 68,
+ [2][0][RTW89_ETSI][12] = 52,
+ [2][0][RTW89_MKK][12] = 58,
+ [2][0][RTW89_IC][12] = 64,
+ [2][0][RTW89_KCC][12] = 62,
+ [2][0][RTW89_ACMA][12] = 52,
+ [2][0][RTW89_CN][12] = 40,
+ [2][0][RTW89_UK][12] = 52,
+ [2][0][RTW89_FCC][14] = 68,
+ [2][0][RTW89_ETSI][14] = 52,
+ [2][0][RTW89_MKK][14] = 58,
+ [2][0][RTW89_IC][14] = 64,
+ [2][0][RTW89_KCC][14] = 62,
+ [2][0][RTW89_ACMA][14] = 52,
+ [2][0][RTW89_CN][14] = 40,
+ [2][0][RTW89_UK][14] = 52,
+ [2][0][RTW89_FCC][15] = 68,
+ [2][0][RTW89_ETSI][15] = 52,
+ [2][0][RTW89_MKK][15] = 68,
+ [2][0][RTW89_IC][15] = 68,
+ [2][0][RTW89_KCC][15] = 62,
+ [2][0][RTW89_ACMA][15] = 52,
+ [2][0][RTW89_CN][15] = 127,
+ [2][0][RTW89_UK][15] = 52,
+ [2][0][RTW89_FCC][17] = 68,
+ [2][0][RTW89_ETSI][17] = 52,
+ [2][0][RTW89_MKK][17] = 74,
+ [2][0][RTW89_IC][17] = 68,
+ [2][0][RTW89_KCC][17] = 62,
+ [2][0][RTW89_ACMA][17] = 52,
+ [2][0][RTW89_CN][17] = 127,
+ [2][0][RTW89_UK][17] = 52,
+ [2][0][RTW89_FCC][19] = 70,
+ [2][0][RTW89_ETSI][19] = 52,
+ [2][0][RTW89_MKK][19] = 74,
+ [2][0][RTW89_IC][19] = 70,
+ [2][0][RTW89_KCC][19] = 62,
+ [2][0][RTW89_ACMA][19] = 52,
+ [2][0][RTW89_CN][19] = 127,
+ [2][0][RTW89_UK][19] = 52,
+ [2][0][RTW89_FCC][21] = 70,
+ [2][0][RTW89_ETSI][21] = 52,
+ [2][0][RTW89_MKK][21] = 74,
+ [2][0][RTW89_IC][21] = 70,
+ [2][0][RTW89_KCC][21] = 62,
+ [2][0][RTW89_ACMA][21] = 52,
+ [2][0][RTW89_CN][21] = 127,
+ [2][0][RTW89_UK][21] = 52,
+ [2][0][RTW89_FCC][23] = 70,
+ [2][0][RTW89_ETSI][23] = 52,
+ [2][0][RTW89_MKK][23] = 74,
+ [2][0][RTW89_IC][23] = 70,
+ [2][0][RTW89_KCC][23] = 62,
+ [2][0][RTW89_ACMA][23] = 52,
+ [2][0][RTW89_CN][23] = 127,
+ [2][0][RTW89_UK][23] = 52,
+ [2][0][RTW89_FCC][25] = 70,
+ [2][0][RTW89_ETSI][25] = 52,
+ [2][0][RTW89_MKK][25] = 74,
+ [2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_KCC][25] = 62,
+ [2][0][RTW89_ACMA][25] = 127,
+ [2][0][RTW89_CN][25] = 127,
+ [2][0][RTW89_UK][25] = 52,
+ [2][0][RTW89_FCC][27] = 70,
+ [2][0][RTW89_ETSI][27] = 52,
+ [2][0][RTW89_MKK][27] = 74,
+ [2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_KCC][27] = 62,
+ [2][0][RTW89_ACMA][27] = 127,
+ [2][0][RTW89_CN][27] = 127,
+ [2][0][RTW89_UK][27] = 52,
+ [2][0][RTW89_FCC][29] = 70,
+ [2][0][RTW89_ETSI][29] = 52,
+ [2][0][RTW89_MKK][29] = 74,
+ [2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_KCC][29] = 62,
+ [2][0][RTW89_ACMA][29] = 127,
+ [2][0][RTW89_CN][29] = 127,
+ [2][0][RTW89_UK][29] = 52,
+ [2][0][RTW89_FCC][31] = 70,
+ [2][0][RTW89_ETSI][31] = 52,
+ [2][0][RTW89_MKK][31] = 74,
+ [2][0][RTW89_IC][31] = 62,
+ [2][0][RTW89_KCC][31] = 62,
+ [2][0][RTW89_ACMA][31] = 52,
+ [2][0][RTW89_CN][31] = 127,
+ [2][0][RTW89_UK][31] = 52,
+ [2][0][RTW89_FCC][33] = 62,
+ [2][0][RTW89_ETSI][33] = 52,
+ [2][0][RTW89_MKK][33] = 74,
+ [2][0][RTW89_IC][33] = 62,
+ [2][0][RTW89_KCC][33] = 62,
+ [2][0][RTW89_ACMA][33] = 52,
+ [2][0][RTW89_CN][33] = 127,
+ [2][0][RTW89_UK][33] = 52,
+ [2][0][RTW89_FCC][35] = 62,
+ [2][0][RTW89_ETSI][35] = 52,
+ [2][0][RTW89_MKK][35] = 74,
+ [2][0][RTW89_IC][35] = 62,
+ [2][0][RTW89_KCC][35] = 62,
+ [2][0][RTW89_ACMA][35] = 52,
+ [2][0][RTW89_CN][35] = 127,
+ [2][0][RTW89_UK][35] = 52,
+ [2][0][RTW89_FCC][37] = 70,
+ [2][0][RTW89_ETSI][37] = 127,
+ [2][0][RTW89_MKK][37] = 74,
+ [2][0][RTW89_IC][37] = 70,
+ [2][0][RTW89_KCC][37] = 62,
+ [2][0][RTW89_ACMA][37] = 70,
+ [2][0][RTW89_CN][37] = 127,
+ [2][0][RTW89_UK][37] = 52,
+ [2][0][RTW89_FCC][38] = 82,
+ [2][0][RTW89_ETSI][38] = 28,
+ [2][0][RTW89_MKK][38] = 127,
+ [2][0][RTW89_IC][38] = 82,
+ [2][0][RTW89_KCC][38] = 64,
+ [2][0][RTW89_ACMA][38] = 82,
+ [2][0][RTW89_CN][38] = 68,
+ [2][0][RTW89_UK][38] = 54,
+ [2][0][RTW89_FCC][40] = 82,
+ [2][0][RTW89_ETSI][40] = 28,
+ [2][0][RTW89_MKK][40] = 127,
+ [2][0][RTW89_IC][40] = 82,
+ [2][0][RTW89_KCC][40] = 64,
+ [2][0][RTW89_ACMA][40] = 82,
+ [2][0][RTW89_CN][40] = 68,
+ [2][0][RTW89_UK][40] = 54,
+ [2][0][RTW89_FCC][42] = 76,
+ [2][0][RTW89_ETSI][42] = 28,
+ [2][0][RTW89_MKK][42] = 127,
+ [2][0][RTW89_IC][42] = 76,
+ [2][0][RTW89_KCC][42] = 64,
+ [2][0][RTW89_ACMA][42] = 76,
+ [2][0][RTW89_CN][42] = 68,
+ [2][0][RTW89_UK][42] = 54,
+ [2][0][RTW89_FCC][44] = 80,
+ [2][0][RTW89_ETSI][44] = 28,
+ [2][0][RTW89_MKK][44] = 127,
+ [2][0][RTW89_IC][44] = 80,
+ [2][0][RTW89_KCC][44] = 64,
+ [2][0][RTW89_ACMA][44] = 80,
+ [2][0][RTW89_CN][44] = 68,
+ [2][0][RTW89_UK][44] = 54,
+ [2][0][RTW89_FCC][46] = 80,
+ [2][0][RTW89_ETSI][46] = 28,
+ [2][0][RTW89_MKK][46] = 127,
+ [2][0][RTW89_IC][46] = 80,
+ [2][0][RTW89_KCC][46] = 64,
+ [2][0][RTW89_ACMA][46] = 80,
+ [2][0][RTW89_CN][46] = 68,
+ [2][0][RTW89_UK][46] = 54,
+ [2][0][RTW89_FCC][48] = 64,
+ [2][0][RTW89_ETSI][48] = 127,
+ [2][0][RTW89_MKK][48] = 127,
+ [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_ACMA][48] = 127,
+ [2][0][RTW89_CN][48] = 127,
+ [2][0][RTW89_UK][48] = 127,
+ [2][0][RTW89_FCC][50] = 64,
+ [2][0][RTW89_ETSI][50] = 127,
+ [2][0][RTW89_MKK][50] = 127,
+ [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_ACMA][50] = 127,
+ [2][0][RTW89_CN][50] = 127,
+ [2][0][RTW89_UK][50] = 127,
+ [2][0][RTW89_FCC][52] = 64,
+ [2][0][RTW89_ETSI][52] = 127,
+ [2][0][RTW89_MKK][52] = 127,
+ [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_ACMA][52] = 127,
+ [2][0][RTW89_CN][52] = 127,
+ [2][0][RTW89_UK][52] = 127,
+ [2][1][RTW89_FCC][0] = 50,
+ [2][1][RTW89_ETSI][0] = 40,
+ [2][1][RTW89_MKK][0] = 44,
+ [2][1][RTW89_IC][0] = 26,
+ [2][1][RTW89_KCC][0] = 44,
+ [2][1][RTW89_ACMA][0] = 40,
+ [2][1][RTW89_CN][0] = 28,
+ [2][1][RTW89_UK][0] = 40,
+ [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_ETSI][2] = 40,
+ [2][1][RTW89_MKK][2] = 44,
+ [2][1][RTW89_IC][2] = 26,
+ [2][1][RTW89_KCC][2] = 44,
+ [2][1][RTW89_ACMA][2] = 40,
+ [2][1][RTW89_CN][2] = 28,
+ [2][1][RTW89_UK][2] = 40,
+ [2][1][RTW89_FCC][4] = 50,
+ [2][1][RTW89_ETSI][4] = 40,
+ [2][1][RTW89_MKK][4] = 36,
+ [2][1][RTW89_IC][4] = 26,
+ [2][1][RTW89_KCC][4] = 44,
+ [2][1][RTW89_ACMA][4] = 40,
+ [2][1][RTW89_CN][4] = 28,
+ [2][1][RTW89_UK][4] = 40,
+ [2][1][RTW89_FCC][6] = 50,
+ [2][1][RTW89_ETSI][6] = 40,
+ [2][1][RTW89_MKK][6] = 36,
+ [2][1][RTW89_IC][6] = 26,
+ [2][1][RTW89_KCC][6] = 20,
+ [2][1][RTW89_ACMA][6] = 40,
+ [2][1][RTW89_CN][6] = 28,
+ [2][1][RTW89_UK][6] = 40,
+ [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_ETSI][8] = 40,
+ [2][1][RTW89_MKK][8] = 32,
+ [2][1][RTW89_IC][8] = 50,
+ [2][1][RTW89_KCC][8] = 46,
+ [2][1][RTW89_ACMA][8] = 40,
+ [2][1][RTW89_CN][8] = 28,
+ [2][1][RTW89_UK][8] = 40,
+ [2][1][RTW89_FCC][10] = 50,
+ [2][1][RTW89_ETSI][10] = 40,
+ [2][1][RTW89_MKK][10] = 32,
+ [2][1][RTW89_IC][10] = 50,
+ [2][1][RTW89_KCC][10] = 46,
+ [2][1][RTW89_ACMA][10] = 40,
+ [2][1][RTW89_CN][10] = 28,
+ [2][1][RTW89_UK][10] = 40,
+ [2][1][RTW89_FCC][12] = 48,
+ [2][1][RTW89_ETSI][12] = 40,
+ [2][1][RTW89_MKK][12] = 44,
+ [2][1][RTW89_IC][12] = 48,
+ [2][1][RTW89_KCC][12] = 46,
+ [2][1][RTW89_ACMA][12] = 40,
+ [2][1][RTW89_CN][12] = 28,
+ [2][1][RTW89_UK][12] = 40,
+ [2][1][RTW89_FCC][14] = 48,
+ [2][1][RTW89_ETSI][14] = 40,
+ [2][1][RTW89_MKK][14] = 44,
+ [2][1][RTW89_IC][14] = 48,
+ [2][1][RTW89_KCC][14] = 46,
+ [2][1][RTW89_ACMA][14] = 40,
+ [2][1][RTW89_CN][14] = 28,
+ [2][1][RTW89_UK][14] = 40,
+ [2][1][RTW89_FCC][15] = 50,
+ [2][1][RTW89_ETSI][15] = 40,
+ [2][1][RTW89_MKK][15] = 66,
+ [2][1][RTW89_IC][15] = 50,
+ [2][1][RTW89_KCC][15] = 46,
+ [2][1][RTW89_ACMA][15] = 40,
+ [2][1][RTW89_CN][15] = 127,
+ [2][1][RTW89_UK][15] = 40,
+ [2][1][RTW89_FCC][17] = 50,
+ [2][1][RTW89_ETSI][17] = 40,
+ [2][1][RTW89_MKK][17] = 66,
+ [2][1][RTW89_IC][17] = 50,
+ [2][1][RTW89_KCC][17] = 46,
+ [2][1][RTW89_ACMA][17] = 40,
+ [2][1][RTW89_CN][17] = 127,
+ [2][1][RTW89_UK][17] = 40,
+ [2][1][RTW89_FCC][19] = 50,
+ [2][1][RTW89_ETSI][19] = 40,
+ [2][1][RTW89_MKK][19] = 66,
+ [2][1][RTW89_IC][19] = 50,
+ [2][1][RTW89_KCC][19] = 46,
+ [2][1][RTW89_ACMA][19] = 40,
+ [2][1][RTW89_CN][19] = 127,
+ [2][1][RTW89_UK][19] = 40,
+ [2][1][RTW89_FCC][21] = 50,
+ [2][1][RTW89_ETSI][21] = 40,
+ [2][1][RTW89_MKK][21] = 66,
+ [2][1][RTW89_IC][21] = 50,
+ [2][1][RTW89_KCC][21] = 46,
+ [2][1][RTW89_ACMA][21] = 40,
+ [2][1][RTW89_CN][21] = 127,
+ [2][1][RTW89_UK][21] = 40,
+ [2][1][RTW89_FCC][23] = 50,
+ [2][1][RTW89_ETSI][23] = 40,
+ [2][1][RTW89_MKK][23] = 66,
+ [2][1][RTW89_IC][23] = 50,
+ [2][1][RTW89_KCC][23] = 46,
+ [2][1][RTW89_ACMA][23] = 40,
+ [2][1][RTW89_CN][23] = 127,
+ [2][1][RTW89_UK][23] = 40,
+ [2][1][RTW89_FCC][25] = 50,
+ [2][1][RTW89_ETSI][25] = 40,
+ [2][1][RTW89_MKK][25] = 66,
+ [2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_KCC][25] = 46,
+ [2][1][RTW89_ACMA][25] = 127,
+ [2][1][RTW89_CN][25] = 127,
+ [2][1][RTW89_UK][25] = 40,
+ [2][1][RTW89_FCC][27] = 50,
+ [2][1][RTW89_ETSI][27] = 40,
+ [2][1][RTW89_MKK][27] = 66,
+ [2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_KCC][27] = 46,
+ [2][1][RTW89_ACMA][27] = 127,
+ [2][1][RTW89_CN][27] = 127,
+ [2][1][RTW89_UK][27] = 40,
+ [2][1][RTW89_FCC][29] = 50,
+ [2][1][RTW89_ETSI][29] = 40,
+ [2][1][RTW89_MKK][29] = 66,
+ [2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_KCC][29] = 46,
+ [2][1][RTW89_ACMA][29] = 127,
+ [2][1][RTW89_CN][29] = 127,
+ [2][1][RTW89_UK][29] = 40,
+ [2][1][RTW89_FCC][31] = 50,
+ [2][1][RTW89_ETSI][31] = 40,
+ [2][1][RTW89_MKK][31] = 66,
+ [2][1][RTW89_IC][31] = 48,
+ [2][1][RTW89_KCC][31] = 46,
+ [2][1][RTW89_ACMA][31] = 40,
+ [2][1][RTW89_CN][31] = 127,
+ [2][1][RTW89_UK][31] = 40,
+ [2][1][RTW89_FCC][33] = 48,
+ [2][1][RTW89_ETSI][33] = 40,
+ [2][1][RTW89_MKK][33] = 66,
+ [2][1][RTW89_IC][33] = 48,
+ [2][1][RTW89_KCC][33] = 46,
+ [2][1][RTW89_ACMA][33] = 40,
+ [2][1][RTW89_CN][33] = 127,
+ [2][1][RTW89_UK][33] = 40,
+ [2][1][RTW89_FCC][35] = 48,
+ [2][1][RTW89_ETSI][35] = 40,
+ [2][1][RTW89_MKK][35] = 66,
+ [2][1][RTW89_IC][35] = 48,
+ [2][1][RTW89_KCC][35] = 46,
+ [2][1][RTW89_ACMA][35] = 40,
+ [2][1][RTW89_CN][35] = 127,
+ [2][1][RTW89_UK][35] = 40,
+ [2][1][RTW89_FCC][37] = 52,
+ [2][1][RTW89_ETSI][37] = 127,
+ [2][1][RTW89_MKK][37] = 66,
+ [2][1][RTW89_IC][37] = 52,
+ [2][1][RTW89_KCC][37] = 46,
+ [2][1][RTW89_ACMA][37] = 52,
+ [2][1][RTW89_CN][37] = 127,
+ [2][1][RTW89_UK][37] = 42,
+ [2][1][RTW89_FCC][38] = 78,
+ [2][1][RTW89_ETSI][38] = 16,
+ [2][1][RTW89_MKK][38] = 127,
+ [2][1][RTW89_IC][38] = 78,
+ [2][1][RTW89_KCC][38] = 46,
+ [2][1][RTW89_ACMA][38] = 78,
+ [2][1][RTW89_CN][38] = 56,
+ [2][1][RTW89_UK][38] = 42,
+ [2][1][RTW89_FCC][40] = 78,
+ [2][1][RTW89_ETSI][40] = 16,
+ [2][1][RTW89_MKK][40] = 127,
+ [2][1][RTW89_IC][40] = 78,
+ [2][1][RTW89_KCC][40] = 46,
+ [2][1][RTW89_ACMA][40] = 78,
+ [2][1][RTW89_CN][40] = 56,
+ [2][1][RTW89_UK][40] = 42,
+ [2][1][RTW89_FCC][42] = 78,
+ [2][1][RTW89_ETSI][42] = 16,
+ [2][1][RTW89_MKK][42] = 127,
+ [2][1][RTW89_IC][42] = 78,
+ [2][1][RTW89_KCC][42] = 46,
+ [2][1][RTW89_ACMA][42] = 78,
+ [2][1][RTW89_CN][42] = 56,
+ [2][1][RTW89_UK][42] = 42,
+ [2][1][RTW89_FCC][44] = 74,
+ [2][1][RTW89_ETSI][44] = 16,
+ [2][1][RTW89_MKK][44] = 127,
+ [2][1][RTW89_IC][44] = 74,
+ [2][1][RTW89_KCC][44] = 46,
+ [2][1][RTW89_ACMA][44] = 74,
+ [2][1][RTW89_CN][44] = 56,
+ [2][1][RTW89_UK][44] = 42,
+ [2][1][RTW89_FCC][46] = 74,
+ [2][1][RTW89_ETSI][46] = 16,
+ [2][1][RTW89_MKK][46] = 127,
+ [2][1][RTW89_IC][46] = 74,
+ [2][1][RTW89_KCC][46] = 46,
+ [2][1][RTW89_ACMA][46] = 74,
+ [2][1][RTW89_CN][46] = 56,
+ [2][1][RTW89_UK][46] = 42,
+ [2][1][RTW89_FCC][48] = 40,
+ [2][1][RTW89_ETSI][48] = 127,
+ [2][1][RTW89_MKK][48] = 127,
+ [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_KCC][48] = 127,
+ [2][1][RTW89_ACMA][48] = 127,
+ [2][1][RTW89_CN][48] = 127,
+ [2][1][RTW89_UK][48] = 127,
+ [2][1][RTW89_FCC][50] = 40,
+ [2][1][RTW89_ETSI][50] = 127,
+ [2][1][RTW89_MKK][50] = 127,
+ [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_KCC][50] = 127,
+ [2][1][RTW89_ACMA][50] = 127,
+ [2][1][RTW89_CN][50] = 127,
+ [2][1][RTW89_UK][50] = 127,
+ [2][1][RTW89_FCC][52] = 40,
+ [2][1][RTW89_ETSI][52] = 127,
+ [2][1][RTW89_MKK][52] = 127,
+ [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_KCC][52] = 127,
+ [2][1][RTW89_ACMA][52] = 127,
+ [2][1][RTW89_CN][52] = 127,
+ [2][1][RTW89_UK][52] = 127,
+};
+
+const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
+ [0][0][RTW89_WW][0] = -16,
+ [0][0][RTW89_WW][2] = -18,
+ [0][0][RTW89_WW][4] = -18,
+ [0][0][RTW89_WW][6] = -18,
+ [0][0][RTW89_WW][8] = -18,
+ [0][0][RTW89_WW][10] = -18,
+ [0][0][RTW89_WW][12] = -18,
+ [0][0][RTW89_WW][14] = -18,
+ [0][0][RTW89_WW][15] = -18,
+ [0][0][RTW89_WW][17] = -18,
+ [0][0][RTW89_WW][19] = -18,
+ [0][0][RTW89_WW][21] = -18,
+ [0][0][RTW89_WW][23] = -18,
+ [0][0][RTW89_WW][25] = -18,
+ [0][0][RTW89_WW][27] = -18,
+ [0][0][RTW89_WW][29] = -18,
+ [0][0][RTW89_WW][30] = -18,
+ [0][0][RTW89_WW][32] = -18,
+ [0][0][RTW89_WW][34] = -18,
+ [0][0][RTW89_WW][36] = -18,
+ [0][0][RTW89_WW][38] = -18,
+ [0][0][RTW89_WW][40] = -18,
+ [0][0][RTW89_WW][42] = -18,
+ [0][0][RTW89_WW][44] = -16,
+ [0][0][RTW89_WW][45] = -16,
+ [0][0][RTW89_WW][47] = -18,
+ [0][0][RTW89_WW][49] = -18,
+ [0][0][RTW89_WW][51] = -18,
+ [0][0][RTW89_WW][53] = -16,
+ [0][0][RTW89_WW][55] = -18,
+ [0][0][RTW89_WW][57] = -18,
+ [0][0][RTW89_WW][59] = -18,
+ [0][0][RTW89_WW][60] = -18,
+ [0][0][RTW89_WW][62] = -18,
+ [0][0][RTW89_WW][64] = -18,
+ [0][0][RTW89_WW][66] = -18,
+ [0][0][RTW89_WW][68] = -18,
+ [0][0][RTW89_WW][70] = -16,
+ [0][0][RTW89_WW][72] = -18,
+ [0][0][RTW89_WW][74] = -18,
+ [0][0][RTW89_WW][75] = -18,
+ [0][0][RTW89_WW][77] = -18,
+ [0][0][RTW89_WW][79] = -18,
+ [0][0][RTW89_WW][81] = -18,
+ [0][0][RTW89_WW][83] = -18,
+ [0][0][RTW89_WW][85] = -18,
+ [0][0][RTW89_WW][87] = -16,
+ [0][0][RTW89_WW][89] = -16,
+ [0][0][RTW89_WW][90] = -16,
+ [0][0][RTW89_WW][92] = -16,
+ [0][0][RTW89_WW][94] = -16,
+ [0][0][RTW89_WW][96] = -16,
+ [0][0][RTW89_WW][98] = -16,
+ [0][0][RTW89_WW][100] = -16,
+ [0][0][RTW89_WW][102] = -16,
+ [0][0][RTW89_WW][104] = -16,
+ [0][0][RTW89_WW][105] = -16,
+ [0][0][RTW89_WW][107] = -12,
+ [0][0][RTW89_WW][109] = -12,
+ [0][0][RTW89_WW][111] = 0,
+ [0][0][RTW89_WW][113] = 0,
+ [0][0][RTW89_WW][115] = 0,
+ [0][0][RTW89_WW][117] = 0,
+ [0][0][RTW89_WW][119] = 0,
+ [0][1][RTW89_WW][0] = -40,
+ [0][1][RTW89_WW][2] = -40,
+ [0][1][RTW89_WW][4] = -40,
+ [0][1][RTW89_WW][6] = -40,
+ [0][1][RTW89_WW][8] = -40,
+ [0][1][RTW89_WW][10] = -40,
+ [0][1][RTW89_WW][12] = -40,
+ [0][1][RTW89_WW][14] = -40,
+ [0][1][RTW89_WW][15] = -40,
+ [0][1][RTW89_WW][17] = -40,
+ [0][1][RTW89_WW][19] = -40,
+ [0][1][RTW89_WW][21] = -40,
+ [0][1][RTW89_WW][23] = -40,
+ [0][1][RTW89_WW][25] = -40,
+ [0][1][RTW89_WW][27] = -40,
+ [0][1][RTW89_WW][29] = -40,
+ [0][1][RTW89_WW][30] = -40,
+ [0][1][RTW89_WW][32] = -40,
+ [0][1][RTW89_WW][34] = -40,
+ [0][1][RTW89_WW][36] = -40,
+ [0][1][RTW89_WW][38] = -40,
+ [0][1][RTW89_WW][40] = -40,
+ [0][1][RTW89_WW][42] = -40,
+ [0][1][RTW89_WW][44] = -40,
+ [0][1][RTW89_WW][45] = -40,
+ [0][1][RTW89_WW][47] = -40,
+ [0][1][RTW89_WW][49] = -40,
+ [0][1][RTW89_WW][51] = -40,
+ [0][1][RTW89_WW][53] = -40,
+ [0][1][RTW89_WW][55] = -40,
+ [0][1][RTW89_WW][57] = -40,
+ [0][1][RTW89_WW][59] = -40,
+ [0][1][RTW89_WW][60] = -40,
+ [0][1][RTW89_WW][62] = -40,
+ [0][1][RTW89_WW][64] = -40,
+ [0][1][RTW89_WW][66] = -40,
+ [0][1][RTW89_WW][68] = -40,
+ [0][1][RTW89_WW][70] = -38,
+ [0][1][RTW89_WW][72] = -38,
+ [0][1][RTW89_WW][74] = -38,
+ [0][1][RTW89_WW][75] = -38,
+ [0][1][RTW89_WW][77] = -38,
+ [0][1][RTW89_WW][79] = -38,
+ [0][1][RTW89_WW][81] = -38,
+ [0][1][RTW89_WW][83] = -38,
+ [0][1][RTW89_WW][85] = -38,
+ [0][1][RTW89_WW][87] = -40,
+ [0][1][RTW89_WW][89] = -38,
+ [0][1][RTW89_WW][90] = -38,
+ [0][1][RTW89_WW][92] = -38,
+ [0][1][RTW89_WW][94] = -38,
+ [0][1][RTW89_WW][96] = -38,
+ [0][1][RTW89_WW][98] = -38,
+ [0][1][RTW89_WW][100] = -38,
+ [0][1][RTW89_WW][102] = -38,
+ [0][1][RTW89_WW][104] = -38,
+ [0][1][RTW89_WW][105] = -38,
+ [0][1][RTW89_WW][107] = -34,
+ [0][1][RTW89_WW][109] = -34,
+ [0][1][RTW89_WW][111] = 0,
+ [0][1][RTW89_WW][113] = 0,
+ [0][1][RTW89_WW][115] = 0,
+ [0][1][RTW89_WW][117] = 0,
+ [0][1][RTW89_WW][119] = 0,
+ [1][0][RTW89_WW][0] = -4,
+ [1][0][RTW89_WW][2] = -4,
+ [1][0][RTW89_WW][4] = -4,
+ [1][0][RTW89_WW][6] = -4,
+ [1][0][RTW89_WW][8] = -4,
+ [1][0][RTW89_WW][10] = -4,
+ [1][0][RTW89_WW][12] = -4,
+ [1][0][RTW89_WW][14] = -4,
+ [1][0][RTW89_WW][15] = -4,
+ [1][0][RTW89_WW][17] = -4,
+ [1][0][RTW89_WW][19] = -4,
+ [1][0][RTW89_WW][21] = -4,
+ [1][0][RTW89_WW][23] = -4,
+ [1][0][RTW89_WW][25] = -4,
+ [1][0][RTW89_WW][27] = -4,
+ [1][0][RTW89_WW][29] = -4,
+ [1][0][RTW89_WW][30] = -4,
+ [1][0][RTW89_WW][32] = -4,
+ [1][0][RTW89_WW][34] = -4,
+ [1][0][RTW89_WW][36] = -4,
+ [1][0][RTW89_WW][38] = -4,
+ [1][0][RTW89_WW][40] = -4,
+ [1][0][RTW89_WW][42] = -4,
+ [1][0][RTW89_WW][44] = -4,
+ [1][0][RTW89_WW][45] = -4,
+ [1][0][RTW89_WW][47] = -4,
+ [1][0][RTW89_WW][49] = -4,
+ [1][0][RTW89_WW][51] = -4,
+ [1][0][RTW89_WW][53] = -4,
+ [1][0][RTW89_WW][55] = -4,
+ [1][0][RTW89_WW][57] = -4,
+ [1][0][RTW89_WW][59] = -4,
+ [1][0][RTW89_WW][60] = -4,
+ [1][0][RTW89_WW][62] = -4,
+ [1][0][RTW89_WW][64] = -4,
+ [1][0][RTW89_WW][66] = -4,
+ [1][0][RTW89_WW][68] = -4,
+ [1][0][RTW89_WW][70] = -4,
+ [1][0][RTW89_WW][72] = -4,
+ [1][0][RTW89_WW][74] = -4,
+ [1][0][RTW89_WW][75] = -4,
+ [1][0][RTW89_WW][77] = -4,
+ [1][0][RTW89_WW][79] = -4,
+ [1][0][RTW89_WW][81] = -4,
+ [1][0][RTW89_WW][83] = -4,
+ [1][0][RTW89_WW][85] = -4,
+ [1][0][RTW89_WW][87] = -4,
+ [1][0][RTW89_WW][89] = -4,
+ [1][0][RTW89_WW][90] = -4,
+ [1][0][RTW89_WW][92] = -4,
+ [1][0][RTW89_WW][94] = -4,
+ [1][0][RTW89_WW][96] = -4,
+ [1][0][RTW89_WW][98] = -4,
+ [1][0][RTW89_WW][100] = -4,
+ [1][0][RTW89_WW][102] = -4,
+ [1][0][RTW89_WW][104] = -4,
+ [1][0][RTW89_WW][105] = -4,
+ [1][0][RTW89_WW][107] = 1,
+ [1][0][RTW89_WW][109] = 2,
+ [1][0][RTW89_WW][111] = 0,
+ [1][0][RTW89_WW][113] = 0,
+ [1][0][RTW89_WW][115] = 0,
+ [1][0][RTW89_WW][117] = 0,
+ [1][0][RTW89_WW][119] = 0,
+ [1][1][RTW89_WW][0] = -26,
+ [1][1][RTW89_WW][2] = -28,
+ [1][1][RTW89_WW][4] = -28,
+ [1][1][RTW89_WW][6] = -28,
+ [1][1][RTW89_WW][8] = -28,
+ [1][1][RTW89_WW][10] = -28,
+ [1][1][RTW89_WW][12] = -28,
+ [1][1][RTW89_WW][14] = -28,
+ [1][1][RTW89_WW][15] = -28,
+ [1][1][RTW89_WW][17] = -28,
+ [1][1][RTW89_WW][19] = -28,
+ [1][1][RTW89_WW][21] = -28,
+ [1][1][RTW89_WW][23] = -28,
+ [1][1][RTW89_WW][25] = -28,
+ [1][1][RTW89_WW][27] = -28,
+ [1][1][RTW89_WW][29] = -28,
+ [1][1][RTW89_WW][30] = -28,
+ [1][1][RTW89_WW][32] = -28,
+ [1][1][RTW89_WW][34] = -28,
+ [1][1][RTW89_WW][36] = -28,
+ [1][1][RTW89_WW][38] = -28,
+ [1][1][RTW89_WW][40] = -28,
+ [1][1][RTW89_WW][42] = -28,
+ [1][1][RTW89_WW][44] = -28,
+ [1][1][RTW89_WW][45] = -26,
+ [1][1][RTW89_WW][47] = -28,
+ [1][1][RTW89_WW][49] = -28,
+ [1][1][RTW89_WW][51] = -28,
+ [1][1][RTW89_WW][53] = -26,
+ [1][1][RTW89_WW][55] = -28,
+ [1][1][RTW89_WW][57] = -28,
+ [1][1][RTW89_WW][59] = -28,
+ [1][1][RTW89_WW][60] = -28,
+ [1][1][RTW89_WW][62] = -28,
+ [1][1][RTW89_WW][64] = -28,
+ [1][1][RTW89_WW][66] = -28,
+ [1][1][RTW89_WW][68] = -28,
+ [1][1][RTW89_WW][70] = -26,
+ [1][1][RTW89_WW][72] = -28,
+ [1][1][RTW89_WW][74] = -28,
+ [1][1][RTW89_WW][75] = -28,
+ [1][1][RTW89_WW][77] = -28,
+ [1][1][RTW89_WW][79] = -28,
+ [1][1][RTW89_WW][81] = -28,
+ [1][1][RTW89_WW][83] = -28,
+ [1][1][RTW89_WW][85] = -28,
+ [1][1][RTW89_WW][87] = -28,
+ [1][1][RTW89_WW][89] = -26,
+ [1][1][RTW89_WW][90] = -26,
+ [1][1][RTW89_WW][92] = -26,
+ [1][1][RTW89_WW][94] = -26,
+ [1][1][RTW89_WW][96] = -26,
+ [1][1][RTW89_WW][98] = -26,
+ [1][1][RTW89_WW][100] = -26,
+ [1][1][RTW89_WW][102] = -26,
+ [1][1][RTW89_WW][104] = -26,
+ [1][1][RTW89_WW][105] = -26,
+ [1][1][RTW89_WW][107] = -22,
+ [1][1][RTW89_WW][109] = -22,
+ [1][1][RTW89_WW][111] = 0,
+ [1][1][RTW89_WW][113] = 0,
+ [1][1][RTW89_WW][115] = 0,
+ [1][1][RTW89_WW][117] = 0,
+ [1][1][RTW89_WW][119] = 0,
+ [2][0][RTW89_WW][0] = 8,
+ [2][0][RTW89_WW][2] = 8,
+ [2][0][RTW89_WW][4] = 8,
+ [2][0][RTW89_WW][6] = 8,
+ [2][0][RTW89_WW][8] = 8,
+ [2][0][RTW89_WW][10] = 8,
+ [2][0][RTW89_WW][12] = 8,
+ [2][0][RTW89_WW][14] = 8,
+ [2][0][RTW89_WW][15] = 8,
+ [2][0][RTW89_WW][17] = 8,
+ [2][0][RTW89_WW][19] = 8,
+ [2][0][RTW89_WW][21] = 8,
+ [2][0][RTW89_WW][23] = 8,
+ [2][0][RTW89_WW][25] = 8,
+ [2][0][RTW89_WW][27] = 8,
+ [2][0][RTW89_WW][29] = 8,
+ [2][0][RTW89_WW][30] = 8,
+ [2][0][RTW89_WW][32] = 8,
+ [2][0][RTW89_WW][34] = 8,
+ [2][0][RTW89_WW][36] = 8,
+ [2][0][RTW89_WW][38] = 8,
+ [2][0][RTW89_WW][40] = 8,
+ [2][0][RTW89_WW][42] = 8,
+ [2][0][RTW89_WW][44] = 8,
+ [2][0][RTW89_WW][45] = 8,
+ [2][0][RTW89_WW][47] = 8,
+ [2][0][RTW89_WW][49] = 8,
+ [2][0][RTW89_WW][51] = 8,
+ [2][0][RTW89_WW][53] = 8,
+ [2][0][RTW89_WW][55] = 8,
+ [2][0][RTW89_WW][57] = 8,
+ [2][0][RTW89_WW][59] = 8,
+ [2][0][RTW89_WW][60] = 8,
+ [2][0][RTW89_WW][62] = 8,
+ [2][0][RTW89_WW][64] = 8,
+ [2][0][RTW89_WW][66] = 8,
+ [2][0][RTW89_WW][68] = 8,
+ [2][0][RTW89_WW][70] = 8,
+ [2][0][RTW89_WW][72] = 8,
+ [2][0][RTW89_WW][74] = 8,
+ [2][0][RTW89_WW][75] = 8,
+ [2][0][RTW89_WW][77] = 8,
+ [2][0][RTW89_WW][79] = 8,
+ [2][0][RTW89_WW][81] = 8,
+ [2][0][RTW89_WW][83] = 8,
+ [2][0][RTW89_WW][85] = 8,
+ [2][0][RTW89_WW][87] = 8,
+ [2][0][RTW89_WW][89] = 8,
+ [2][0][RTW89_WW][90] = 8,
+ [2][0][RTW89_WW][92] = 8,
+ [2][0][RTW89_WW][94] = 8,
+ [2][0][RTW89_WW][96] = 8,
+ [2][0][RTW89_WW][98] = 8,
+ [2][0][RTW89_WW][100] = 8,
+ [2][0][RTW89_WW][102] = 8,
+ [2][0][RTW89_WW][104] = 8,
+ [2][0][RTW89_WW][105] = 8,
+ [2][0][RTW89_WW][107] = 10,
+ [2][0][RTW89_WW][109] = 12,
+ [2][0][RTW89_WW][111] = 0,
+ [2][0][RTW89_WW][113] = 0,
+ [2][0][RTW89_WW][115] = 0,
+ [2][0][RTW89_WW][117] = 0,
+ [2][0][RTW89_WW][119] = 0,
+ [2][1][RTW89_WW][0] = -16,
+ [2][1][RTW89_WW][2] = -16,
+ [2][1][RTW89_WW][4] = -16,
+ [2][1][RTW89_WW][6] = -16,
+ [2][1][RTW89_WW][8] = -16,
+ [2][1][RTW89_WW][10] = -16,
+ [2][1][RTW89_WW][12] = -16,
+ [2][1][RTW89_WW][14] = -16,
+ [2][1][RTW89_WW][15] = -16,
+ [2][1][RTW89_WW][17] = -16,
+ [2][1][RTW89_WW][19] = -16,
+ [2][1][RTW89_WW][21] = -16,
+ [2][1][RTW89_WW][23] = -16,
+ [2][1][RTW89_WW][25] = -16,
+ [2][1][RTW89_WW][27] = -16,
+ [2][1][RTW89_WW][29] = -16,
+ [2][1][RTW89_WW][30] = -16,
+ [2][1][RTW89_WW][32] = -16,
+ [2][1][RTW89_WW][34] = -16,
+ [2][1][RTW89_WW][36] = -16,
+ [2][1][RTW89_WW][38] = -16,
+ [2][1][RTW89_WW][40] = -16,
+ [2][1][RTW89_WW][42] = -16,
+ [2][1][RTW89_WW][44] = -16,
+ [2][1][RTW89_WW][45] = -16,
+ [2][1][RTW89_WW][47] = -16,
+ [2][1][RTW89_WW][49] = -16,
+ [2][1][RTW89_WW][51] = -16,
+ [2][1][RTW89_WW][53] = -16,
+ [2][1][RTW89_WW][55] = -16,
+ [2][1][RTW89_WW][57] = -16,
+ [2][1][RTW89_WW][59] = -16,
+ [2][1][RTW89_WW][60] = -16,
+ [2][1][RTW89_WW][62] = -16,
+ [2][1][RTW89_WW][64] = -16,
+ [2][1][RTW89_WW][66] = -16,
+ [2][1][RTW89_WW][68] = -16,
+ [2][1][RTW89_WW][70] = -16,
+ [2][1][RTW89_WW][72] = -16,
+ [2][1][RTW89_WW][74] = -16,
+ [2][1][RTW89_WW][75] = -16,
+ [2][1][RTW89_WW][77] = -16,
+ [2][1][RTW89_WW][79] = -16,
+ [2][1][RTW89_WW][81] = -16,
+ [2][1][RTW89_WW][83] = -16,
+ [2][1][RTW89_WW][85] = -18,
+ [2][1][RTW89_WW][87] = -16,
+ [2][1][RTW89_WW][89] = -16,
+ [2][1][RTW89_WW][90] = -16,
+ [2][1][RTW89_WW][92] = -16,
+ [2][1][RTW89_WW][94] = -16,
+ [2][1][RTW89_WW][96] = -16,
+ [2][1][RTW89_WW][98] = -16,
+ [2][1][RTW89_WW][100] = -16,
+ [2][1][RTW89_WW][102] = -16,
+ [2][1][RTW89_WW][104] = -16,
+ [2][1][RTW89_WW][105] = -16,
+ [2][1][RTW89_WW][107] = -12,
+ [2][1][RTW89_WW][109] = -10,
+ [2][1][RTW89_WW][111] = 0,
+ [2][1][RTW89_WW][113] = 0,
+ [2][1][RTW89_WW][115] = 0,
+ [2][1][RTW89_WW][117] = 0,
+ [2][1][RTW89_WW][119] = 0,
+ [0][0][RTW89_FCC][0] = -16,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_FCC][2] = -18,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_FCC][4] = -18,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_FCC][6] = -18,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_FCC][8] = -18,
+ [0][0][RTW89_ETSI][8] = 32,
+ [0][0][RTW89_FCC][10] = -18,
+ [0][0][RTW89_ETSI][10] = 32,
+ [0][0][RTW89_FCC][12] = -18,
+ [0][0][RTW89_ETSI][12] = 32,
+ [0][0][RTW89_FCC][14] = -18,
+ [0][0][RTW89_ETSI][14] = 32,
+ [0][0][RTW89_FCC][15] = -18,
+ [0][0][RTW89_ETSI][15] = 32,
+ [0][0][RTW89_FCC][17] = -18,
+ [0][0][RTW89_ETSI][17] = 32,
+ [0][0][RTW89_FCC][19] = -18,
+ [0][0][RTW89_ETSI][19] = 32,
+ [0][0][RTW89_FCC][21] = -18,
+ [0][0][RTW89_ETSI][21] = 32,
+ [0][0][RTW89_FCC][23] = -18,
+ [0][0][RTW89_ETSI][23] = 32,
+ [0][0][RTW89_FCC][25] = -18,
+ [0][0][RTW89_ETSI][25] = 32,
+ [0][0][RTW89_FCC][27] = -18,
+ [0][0][RTW89_ETSI][27] = 32,
+ [0][0][RTW89_FCC][29] = -18,
+ [0][0][RTW89_ETSI][29] = 32,
+ [0][0][RTW89_FCC][30] = -18,
+ [0][0][RTW89_ETSI][30] = 32,
+ [0][0][RTW89_FCC][32] = -18,
+ [0][0][RTW89_ETSI][32] = 32,
+ [0][0][RTW89_FCC][34] = -18,
+ [0][0][RTW89_ETSI][34] = 32,
+ [0][0][RTW89_FCC][36] = -18,
+ [0][0][RTW89_ETSI][36] = 32,
+ [0][0][RTW89_FCC][38] = -18,
+ [0][0][RTW89_ETSI][38] = 32,
+ [0][0][RTW89_FCC][40] = -18,
+ [0][0][RTW89_ETSI][40] = 32,
+ [0][0][RTW89_FCC][42] = -18,
+ [0][0][RTW89_ETSI][42] = 32,
+ [0][0][RTW89_FCC][44] = -16,
+ [0][0][RTW89_ETSI][44] = 32,
+ [0][0][RTW89_FCC][45] = -16,
+ [0][0][RTW89_ETSI][45] = 127,
+ [0][0][RTW89_FCC][47] = -18,
+ [0][0][RTW89_ETSI][47] = 127,
+ [0][0][RTW89_FCC][49] = -18,
+ [0][0][RTW89_ETSI][49] = 127,
+ [0][0][RTW89_FCC][51] = -18,
+ [0][0][RTW89_ETSI][51] = 127,
+ [0][0][RTW89_FCC][53] = -16,
+ [0][0][RTW89_ETSI][53] = 127,
+ [0][0][RTW89_FCC][55] = -18,
+ [0][0][RTW89_ETSI][55] = 127,
+ [0][0][RTW89_FCC][57] = -18,
+ [0][0][RTW89_ETSI][57] = 127,
+ [0][0][RTW89_FCC][59] = -18,
+ [0][0][RTW89_ETSI][59] = 127,
+ [0][0][RTW89_FCC][60] = -18,
+ [0][0][RTW89_ETSI][60] = 127,
+ [0][0][RTW89_FCC][62] = -18,
+ [0][0][RTW89_ETSI][62] = 127,
+ [0][0][RTW89_FCC][64] = -18,
+ [0][0][RTW89_ETSI][64] = 127,
+ [0][0][RTW89_FCC][66] = -18,
+ [0][0][RTW89_ETSI][66] = 127,
+ [0][0][RTW89_FCC][68] = -18,
+ [0][0][RTW89_ETSI][68] = 127,
+ [0][0][RTW89_FCC][70] = -16,
+ [0][0][RTW89_ETSI][70] = 127,
+ [0][0][RTW89_FCC][72] = -18,
+ [0][0][RTW89_ETSI][72] = 127,
+ [0][0][RTW89_FCC][74] = -18,
+ [0][0][RTW89_ETSI][74] = 127,
+ [0][0][RTW89_FCC][75] = -18,
+ [0][0][RTW89_ETSI][75] = 127,
+ [0][0][RTW89_FCC][77] = -18,
+ [0][0][RTW89_ETSI][77] = 127,
+ [0][0][RTW89_FCC][79] = -18,
+ [0][0][RTW89_ETSI][79] = 127,
+ [0][0][RTW89_FCC][81] = -18,
+ [0][0][RTW89_ETSI][81] = 127,
+ [0][0][RTW89_FCC][83] = -18,
+ [0][0][RTW89_ETSI][83] = 127,
+ [0][0][RTW89_FCC][85] = -18,
+ [0][0][RTW89_ETSI][85] = 127,
+ [0][0][RTW89_FCC][87] = -16,
+ [0][0][RTW89_ETSI][87] = 127,
+ [0][0][RTW89_FCC][89] = -16,
+ [0][0][RTW89_ETSI][89] = 127,
+ [0][0][RTW89_FCC][90] = -16,
+ [0][0][RTW89_ETSI][90] = 127,
+ [0][0][RTW89_FCC][92] = -16,
+ [0][0][RTW89_ETSI][92] = 127,
+ [0][0][RTW89_FCC][94] = -16,
+ [0][0][RTW89_ETSI][94] = 127,
+ [0][0][RTW89_FCC][96] = -16,
+ [0][0][RTW89_ETSI][96] = 127,
+ [0][0][RTW89_FCC][98] = -16,
+ [0][0][RTW89_ETSI][98] = 127,
+ [0][0][RTW89_FCC][100] = -16,
+ [0][0][RTW89_ETSI][100] = 127,
+ [0][0][RTW89_FCC][102] = -16,
+ [0][0][RTW89_ETSI][102] = 127,
+ [0][0][RTW89_FCC][104] = -16,
+ [0][0][RTW89_ETSI][104] = 127,
+ [0][0][RTW89_FCC][105] = -16,
+ [0][0][RTW89_ETSI][105] = 127,
+ [0][0][RTW89_FCC][107] = -12,
+ [0][0][RTW89_ETSI][107] = 127,
+ [0][0][RTW89_FCC][109] = -12,
+ [0][0][RTW89_ETSI][109] = 127,
+ [0][0][RTW89_FCC][111] = 127,
+ [0][0][RTW89_ETSI][111] = 127,
+ [0][0][RTW89_FCC][113] = 127,
+ [0][0][RTW89_ETSI][113] = 127,
+ [0][0][RTW89_FCC][115] = 127,
+ [0][0][RTW89_ETSI][115] = 127,
+ [0][0][RTW89_FCC][117] = 127,
+ [0][0][RTW89_ETSI][117] = 127,
+ [0][0][RTW89_FCC][119] = 127,
+ [0][0][RTW89_ETSI][119] = 127,
+ [0][1][RTW89_FCC][0] = -40,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_FCC][2] = -40,
+ [0][1][RTW89_ETSI][2] = 20,
+ [0][1][RTW89_FCC][4] = -40,
+ [0][1][RTW89_ETSI][4] = 20,
+ [0][1][RTW89_FCC][6] = -40,
+ [0][1][RTW89_ETSI][6] = 20,
+ [0][1][RTW89_FCC][8] = -40,
+ [0][1][RTW89_ETSI][8] = 20,
+ [0][1][RTW89_FCC][10] = -40,
+ [0][1][RTW89_ETSI][10] = 20,
+ [0][1][RTW89_FCC][12] = -40,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_FCC][14] = -40,
+ [0][1][RTW89_ETSI][14] = 20,
+ [0][1][RTW89_FCC][15] = -40,
+ [0][1][RTW89_ETSI][15] = 20,
+ [0][1][RTW89_FCC][17] = -40,
+ [0][1][RTW89_ETSI][17] = 20,
+ [0][1][RTW89_FCC][19] = -40,
+ [0][1][RTW89_ETSI][19] = 20,
+ [0][1][RTW89_FCC][21] = -40,
+ [0][1][RTW89_ETSI][21] = 20,
+ [0][1][RTW89_FCC][23] = -40,
+ [0][1][RTW89_ETSI][23] = 20,
+ [0][1][RTW89_FCC][25] = -40,
+ [0][1][RTW89_ETSI][25] = 20,
+ [0][1][RTW89_FCC][27] = -40,
+ [0][1][RTW89_ETSI][27] = 20,
+ [0][1][RTW89_FCC][29] = -40,
+ [0][1][RTW89_ETSI][29] = 20,
+ [0][1][RTW89_FCC][30] = -40,
+ [0][1][RTW89_ETSI][30] = 20,
+ [0][1][RTW89_FCC][32] = -40,
+ [0][1][RTW89_ETSI][32] = 20,
+ [0][1][RTW89_FCC][34] = -40,
+ [0][1][RTW89_ETSI][34] = 20,
+ [0][1][RTW89_FCC][36] = -40,
+ [0][1][RTW89_ETSI][36] = 20,
+ [0][1][RTW89_FCC][38] = -40,
+ [0][1][RTW89_ETSI][38] = 20,
+ [0][1][RTW89_FCC][40] = -40,
+ [0][1][RTW89_ETSI][40] = 20,
+ [0][1][RTW89_FCC][42] = -40,
+ [0][1][RTW89_ETSI][42] = 20,
+ [0][1][RTW89_FCC][44] = -40,
+ [0][1][RTW89_ETSI][44] = 20,
+ [0][1][RTW89_FCC][45] = -40,
+ [0][1][RTW89_ETSI][45] = 127,
+ [0][1][RTW89_FCC][47] = -40,
+ [0][1][RTW89_ETSI][47] = 127,
+ [0][1][RTW89_FCC][49] = -40,
+ [0][1][RTW89_ETSI][49] = 127,
+ [0][1][RTW89_FCC][51] = -40,
+ [0][1][RTW89_ETSI][51] = 127,
+ [0][1][RTW89_FCC][53] = -40,
+ [0][1][RTW89_ETSI][53] = 127,
+ [0][1][RTW89_FCC][55] = -40,
+ [0][1][RTW89_ETSI][55] = 127,
+ [0][1][RTW89_FCC][57] = -40,
+ [0][1][RTW89_ETSI][57] = 127,
+ [0][1][RTW89_FCC][59] = -40,
+ [0][1][RTW89_ETSI][59] = 127,
+ [0][1][RTW89_FCC][60] = -40,
+ [0][1][RTW89_ETSI][60] = 127,
+ [0][1][RTW89_FCC][62] = -40,
+ [0][1][RTW89_ETSI][62] = 127,
+ [0][1][RTW89_FCC][64] = -40,
+ [0][1][RTW89_ETSI][64] = 127,
+ [0][1][RTW89_FCC][66] = -40,
+ [0][1][RTW89_ETSI][66] = 127,
+ [0][1][RTW89_FCC][68] = -40,
+ [0][1][RTW89_ETSI][68] = 127,
+ [0][1][RTW89_FCC][70] = -38,
+ [0][1][RTW89_ETSI][70] = 127,
+ [0][1][RTW89_FCC][72] = -38,
+ [0][1][RTW89_ETSI][72] = 127,
+ [0][1][RTW89_FCC][74] = -38,
+ [0][1][RTW89_ETSI][74] = 127,
+ [0][1][RTW89_FCC][75] = -38,
+ [0][1][RTW89_ETSI][75] = 127,
+ [0][1][RTW89_FCC][77] = -38,
+ [0][1][RTW89_ETSI][77] = 127,
+ [0][1][RTW89_FCC][79] = -38,
+ [0][1][RTW89_ETSI][79] = 127,
+ [0][1][RTW89_FCC][81] = -38,
+ [0][1][RTW89_ETSI][81] = 127,
+ [0][1][RTW89_FCC][83] = -38,
+ [0][1][RTW89_ETSI][83] = 127,
+ [0][1][RTW89_FCC][85] = -38,
+ [0][1][RTW89_ETSI][85] = 127,
+ [0][1][RTW89_FCC][87] = -40,
+ [0][1][RTW89_ETSI][87] = 127,
+ [0][1][RTW89_FCC][89] = -38,
+ [0][1][RTW89_ETSI][89] = 127,
+ [0][1][RTW89_FCC][90] = -38,
+ [0][1][RTW89_ETSI][90] = 127,
+ [0][1][RTW89_FCC][92] = -38,
+ [0][1][RTW89_ETSI][92] = 127,
+ [0][1][RTW89_FCC][94] = -38,
+ [0][1][RTW89_ETSI][94] = 127,
+ [0][1][RTW89_FCC][96] = -38,
+ [0][1][RTW89_ETSI][96] = 127,
+ [0][1][RTW89_FCC][98] = -38,
+ [0][1][RTW89_ETSI][98] = 127,
+ [0][1][RTW89_FCC][100] = -38,
+ [0][1][RTW89_ETSI][100] = 127,
+ [0][1][RTW89_FCC][102] = -38,
+ [0][1][RTW89_ETSI][102] = 127,
+ [0][1][RTW89_FCC][104] = -38,
+ [0][1][RTW89_ETSI][104] = 127,
+ [0][1][RTW89_FCC][105] = -38,
+ [0][1][RTW89_ETSI][105] = 127,
+ [0][1][RTW89_FCC][107] = -34,
+ [0][1][RTW89_ETSI][107] = 127,
+ [0][1][RTW89_FCC][109] = -34,
+ [0][1][RTW89_ETSI][109] = 127,
+ [0][1][RTW89_FCC][111] = 127,
+ [0][1][RTW89_ETSI][111] = 127,
+ [0][1][RTW89_FCC][113] = 127,
+ [0][1][RTW89_ETSI][113] = 127,
+ [0][1][RTW89_FCC][115] = 127,
+ [0][1][RTW89_ETSI][115] = 127,
+ [0][1][RTW89_FCC][117] = 127,
+ [0][1][RTW89_ETSI][117] = 127,
+ [0][1][RTW89_FCC][119] = 127,
+ [0][1][RTW89_ETSI][119] = 127,
+ [1][0][RTW89_FCC][0] = -4,
+ [1][0][RTW89_ETSI][0] = 46,
+ [1][0][RTW89_FCC][2] = -4,
+ [1][0][RTW89_ETSI][2] = 46,
+ [1][0][RTW89_FCC][4] = -4,
+ [1][0][RTW89_ETSI][4] = 46,
+ [1][0][RTW89_FCC][6] = -4,
+ [1][0][RTW89_ETSI][6] = 46,
+ [1][0][RTW89_FCC][8] = -4,
+ [1][0][RTW89_ETSI][8] = 46,
+ [1][0][RTW89_FCC][10] = -4,
+ [1][0][RTW89_ETSI][10] = 46,
+ [1][0][RTW89_FCC][12] = -4,
+ [1][0][RTW89_ETSI][12] = 46,
+ [1][0][RTW89_FCC][14] = -4,
+ [1][0][RTW89_ETSI][14] = 46,
+ [1][0][RTW89_FCC][15] = -4,
+ [1][0][RTW89_ETSI][15] = 46,
+ [1][0][RTW89_FCC][17] = -4,
+ [1][0][RTW89_ETSI][17] = 46,
+ [1][0][RTW89_FCC][19] = -4,
+ [1][0][RTW89_ETSI][19] = 46,
+ [1][0][RTW89_FCC][21] = -4,
+ [1][0][RTW89_ETSI][21] = 46,
+ [1][0][RTW89_FCC][23] = -4,
+ [1][0][RTW89_ETSI][23] = 46,
+ [1][0][RTW89_FCC][25] = -4,
+ [1][0][RTW89_ETSI][25] = 46,
+ [1][0][RTW89_FCC][27] = -4,
+ [1][0][RTW89_ETSI][27] = 46,
+ [1][0][RTW89_FCC][29] = -4,
+ [1][0][RTW89_ETSI][29] = 46,
+ [1][0][RTW89_FCC][30] = -4,
+ [1][0][RTW89_ETSI][30] = 46,
+ [1][0][RTW89_FCC][32] = -4,
+ [1][0][RTW89_ETSI][32] = 46,
+ [1][0][RTW89_FCC][34] = -4,
+ [1][0][RTW89_ETSI][34] = 46,
+ [1][0][RTW89_FCC][36] = -4,
+ [1][0][RTW89_ETSI][36] = 46,
+ [1][0][RTW89_FCC][38] = -4,
+ [1][0][RTW89_ETSI][38] = 46,
+ [1][0][RTW89_FCC][40] = -4,
+ [1][0][RTW89_ETSI][40] = 46,
+ [1][0][RTW89_FCC][42] = -4,
+ [1][0][RTW89_ETSI][42] = 46,
+ [1][0][RTW89_FCC][44] = -4,
+ [1][0][RTW89_ETSI][44] = 46,
+ [1][0][RTW89_FCC][45] = -4,
+ [1][0][RTW89_ETSI][45] = 127,
+ [1][0][RTW89_FCC][47] = -4,
+ [1][0][RTW89_ETSI][47] = 127,
+ [1][0][RTW89_FCC][49] = -4,
+ [1][0][RTW89_ETSI][49] = 127,
+ [1][0][RTW89_FCC][51] = -4,
+ [1][0][RTW89_ETSI][51] = 127,
+ [1][0][RTW89_FCC][53] = -4,
+ [1][0][RTW89_ETSI][53] = 127,
+ [1][0][RTW89_FCC][55] = -4,
+ [1][0][RTW89_ETSI][55] = 127,
+ [1][0][RTW89_FCC][57] = -4,
+ [1][0][RTW89_ETSI][57] = 127,
+ [1][0][RTW89_FCC][59] = -4,
+ [1][0][RTW89_ETSI][59] = 127,
+ [1][0][RTW89_FCC][60] = -4,
+ [1][0][RTW89_ETSI][60] = 127,
+ [1][0][RTW89_FCC][62] = -4,
+ [1][0][RTW89_ETSI][62] = 127,
+ [1][0][RTW89_FCC][64] = -4,
+ [1][0][RTW89_ETSI][64] = 127,
+ [1][0][RTW89_FCC][66] = -4,
+ [1][0][RTW89_ETSI][66] = 127,
+ [1][0][RTW89_FCC][68] = -4,
+ [1][0][RTW89_ETSI][68] = 127,
+ [1][0][RTW89_FCC][70] = -4,
+ [1][0][RTW89_ETSI][70] = 127,
+ [1][0][RTW89_FCC][72] = -4,
+ [1][0][RTW89_ETSI][72] = 127,
+ [1][0][RTW89_FCC][74] = -4,
+ [1][0][RTW89_ETSI][74] = 127,
+ [1][0][RTW89_FCC][75] = -4,
+ [1][0][RTW89_ETSI][75] = 127,
+ [1][0][RTW89_FCC][77] = -4,
+ [1][0][RTW89_ETSI][77] = 127,
+ [1][0][RTW89_FCC][79] = -4,
+ [1][0][RTW89_ETSI][79] = 127,
+ [1][0][RTW89_FCC][81] = -4,
+ [1][0][RTW89_ETSI][81] = 127,
+ [1][0][RTW89_FCC][83] = -4,
+ [1][0][RTW89_ETSI][83] = 127,
+ [1][0][RTW89_FCC][85] = -4,
+ [1][0][RTW89_ETSI][85] = 127,
+ [1][0][RTW89_FCC][87] = -4,
+ [1][0][RTW89_ETSI][87] = 127,
+ [1][0][RTW89_FCC][89] = -4,
+ [1][0][RTW89_ETSI][89] = 127,
+ [1][0][RTW89_FCC][90] = -4,
+ [1][0][RTW89_ETSI][90] = 127,
+ [1][0][RTW89_FCC][92] = -4,
+ [1][0][RTW89_ETSI][92] = 127,
+ [1][0][RTW89_FCC][94] = -4,
+ [1][0][RTW89_ETSI][94] = 127,
+ [1][0][RTW89_FCC][96] = -4,
+ [1][0][RTW89_ETSI][96] = 127,
+ [1][0][RTW89_FCC][98] = -4,
+ [1][0][RTW89_ETSI][98] = 127,
+ [1][0][RTW89_FCC][100] = -4,
+ [1][0][RTW89_ETSI][100] = 127,
+ [1][0][RTW89_FCC][102] = -4,
+ [1][0][RTW89_ETSI][102] = 127,
+ [1][0][RTW89_FCC][104] = -4,
+ [1][0][RTW89_ETSI][104] = 127,
+ [1][0][RTW89_FCC][105] = -4,
+ [1][0][RTW89_ETSI][105] = 127,
+ [1][0][RTW89_FCC][107] = 0,
+ [1][0][RTW89_ETSI][107] = 127,
+ [1][0][RTW89_FCC][109] = 2,
+ [1][0][RTW89_ETSI][109] = 127,
+ [1][0][RTW89_FCC][111] = 127,
+ [1][0][RTW89_ETSI][111] = 127,
+ [1][0][RTW89_FCC][113] = 127,
+ [1][0][RTW89_ETSI][113] = 127,
+ [1][0][RTW89_FCC][115] = 127,
+ [1][0][RTW89_ETSI][115] = 127,
+ [1][0][RTW89_FCC][117] = 127,
+ [1][0][RTW89_ETSI][117] = 127,
+ [1][0][RTW89_FCC][119] = 127,
+ [1][0][RTW89_ETSI][119] = 127,
+ [1][1][RTW89_FCC][0] = -26,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_FCC][2] = -28,
+ [1][1][RTW89_ETSI][2] = 32,
+ [1][1][RTW89_FCC][4] = -28,
+ [1][1][RTW89_ETSI][4] = 32,
+ [1][1][RTW89_FCC][6] = -28,
+ [1][1][RTW89_ETSI][6] = 32,
+ [1][1][RTW89_FCC][8] = -28,
+ [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_FCC][10] = -28,
+ [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_FCC][12] = -28,
+ [1][1][RTW89_ETSI][12] = 32,
+ [1][1][RTW89_FCC][14] = -28,
+ [1][1][RTW89_ETSI][14] = 32,
+ [1][1][RTW89_FCC][15] = -28,
+ [1][1][RTW89_ETSI][15] = 32,
+ [1][1][RTW89_FCC][17] = -28,
+ [1][1][RTW89_ETSI][17] = 32,
+ [1][1][RTW89_FCC][19] = -28,
+ [1][1][RTW89_ETSI][19] = 32,
+ [1][1][RTW89_FCC][21] = -28,
+ [1][1][RTW89_ETSI][21] = 32,
+ [1][1][RTW89_FCC][23] = -28,
+ [1][1][RTW89_ETSI][23] = 32,
+ [1][1][RTW89_FCC][25] = -28,
+ [1][1][RTW89_ETSI][25] = 32,
+ [1][1][RTW89_FCC][27] = -28,
+ [1][1][RTW89_ETSI][27] = 32,
+ [1][1][RTW89_FCC][29] = -28,
+ [1][1][RTW89_ETSI][29] = 32,
+ [1][1][RTW89_FCC][30] = -28,
+ [1][1][RTW89_ETSI][30] = 32,
+ [1][1][RTW89_FCC][32] = -28,
+ [1][1][RTW89_ETSI][32] = 32,
+ [1][1][RTW89_FCC][34] = -28,
+ [1][1][RTW89_ETSI][34] = 32,
+ [1][1][RTW89_FCC][36] = -28,
+ [1][1][RTW89_ETSI][36] = 32,
+ [1][1][RTW89_FCC][38] = -28,
+ [1][1][RTW89_ETSI][38] = 32,
+ [1][1][RTW89_FCC][40] = -28,
+ [1][1][RTW89_ETSI][40] = 32,
+ [1][1][RTW89_FCC][42] = -28,
+ [1][1][RTW89_ETSI][42] = 32,
+ [1][1][RTW89_FCC][44] = -28,
+ [1][1][RTW89_ETSI][44] = 34,
+ [1][1][RTW89_FCC][45] = -26,
+ [1][1][RTW89_ETSI][45] = 127,
+ [1][1][RTW89_FCC][47] = -28,
+ [1][1][RTW89_ETSI][47] = 127,
+ [1][1][RTW89_FCC][49] = -28,
+ [1][1][RTW89_ETSI][49] = 127,
+ [1][1][RTW89_FCC][51] = -28,
+ [1][1][RTW89_ETSI][51] = 127,
+ [1][1][RTW89_FCC][53] = -26,
+ [1][1][RTW89_ETSI][53] = 127,
+ [1][1][RTW89_FCC][55] = -28,
+ [1][1][RTW89_ETSI][55] = 127,
+ [1][1][RTW89_FCC][57] = -28,
+ [1][1][RTW89_ETSI][57] = 127,
+ [1][1][RTW89_FCC][59] = -28,
+ [1][1][RTW89_ETSI][59] = 127,
+ [1][1][RTW89_FCC][60] = -28,
+ [1][1][RTW89_ETSI][60] = 127,
+ [1][1][RTW89_FCC][62] = -28,
+ [1][1][RTW89_ETSI][62] = 127,
+ [1][1][RTW89_FCC][64] = -28,
+ [1][1][RTW89_ETSI][64] = 127,
+ [1][1][RTW89_FCC][66] = -28,
+ [1][1][RTW89_ETSI][66] = 127,
+ [1][1][RTW89_FCC][68] = -28,
+ [1][1][RTW89_ETSI][68] = 127,
+ [1][1][RTW89_FCC][70] = -26,
+ [1][1][RTW89_ETSI][70] = 127,
+ [1][1][RTW89_FCC][72] = -28,
+ [1][1][RTW89_ETSI][72] = 127,
+ [1][1][RTW89_FCC][74] = -28,
+ [1][1][RTW89_ETSI][74] = 127,
+ [1][1][RTW89_FCC][75] = -28,
+ [1][1][RTW89_ETSI][75] = 127,
+ [1][1][RTW89_FCC][77] = -28,
+ [1][1][RTW89_ETSI][77] = 127,
+ [1][1][RTW89_FCC][79] = -28,
+ [1][1][RTW89_ETSI][79] = 127,
+ [1][1][RTW89_FCC][81] = -28,
+ [1][1][RTW89_ETSI][81] = 127,
+ [1][1][RTW89_FCC][83] = -28,
+ [1][1][RTW89_ETSI][83] = 127,
+ [1][1][RTW89_FCC][85] = -28,
+ [1][1][RTW89_ETSI][85] = 127,
+ [1][1][RTW89_FCC][87] = -28,
+ [1][1][RTW89_ETSI][87] = 127,
+ [1][1][RTW89_FCC][89] = -26,
+ [1][1][RTW89_ETSI][89] = 127,
+ [1][1][RTW89_FCC][90] = -26,
+ [1][1][RTW89_ETSI][90] = 127,
+ [1][1][RTW89_FCC][92] = -26,
+ [1][1][RTW89_ETSI][92] = 127,
+ [1][1][RTW89_FCC][94] = -26,
+ [1][1][RTW89_ETSI][94] = 127,
+ [1][1][RTW89_FCC][96] = -26,
+ [1][1][RTW89_ETSI][96] = 127,
+ [1][1][RTW89_FCC][98] = -26,
+ [1][1][RTW89_ETSI][98] = 127,
+ [1][1][RTW89_FCC][100] = -26,
+ [1][1][RTW89_ETSI][100] = 127,
+ [1][1][RTW89_FCC][102] = -26,
+ [1][1][RTW89_ETSI][102] = 127,
+ [1][1][RTW89_FCC][104] = -26,
+ [1][1][RTW89_ETSI][104] = 127,
+ [1][1][RTW89_FCC][105] = -26,
+ [1][1][RTW89_ETSI][105] = 127,
+ [1][1][RTW89_FCC][107] = -22,
+ [1][1][RTW89_ETSI][107] = 127,
+ [1][1][RTW89_FCC][109] = -22,
+ [1][1][RTW89_ETSI][109] = 127,
+ [1][1][RTW89_FCC][111] = 127,
+ [1][1][RTW89_ETSI][111] = 127,
+ [1][1][RTW89_FCC][113] = 127,
+ [1][1][RTW89_ETSI][113] = 127,
+ [1][1][RTW89_FCC][115] = 127,
+ [1][1][RTW89_ETSI][115] = 127,
+ [1][1][RTW89_FCC][117] = 127,
+ [1][1][RTW89_ETSI][117] = 127,
+ [1][1][RTW89_FCC][119] = 127,
+ [1][1][RTW89_ETSI][119] = 127,
+ [2][0][RTW89_FCC][0] = 8,
+ [2][0][RTW89_ETSI][0] = 56,
+ [2][0][RTW89_FCC][2] = 8,
+ [2][0][RTW89_ETSI][2] = 56,
+ [2][0][RTW89_FCC][4] = 8,
+ [2][0][RTW89_ETSI][4] = 56,
+ [2][0][RTW89_FCC][6] = 8,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_FCC][8] = 8,
+ [2][0][RTW89_ETSI][8] = 56,
+ [2][0][RTW89_FCC][10] = 8,
+ [2][0][RTW89_ETSI][10] = 56,
+ [2][0][RTW89_FCC][12] = 8,
+ [2][0][RTW89_ETSI][12] = 56,
+ [2][0][RTW89_FCC][14] = 8,
+ [2][0][RTW89_ETSI][14] = 56,
+ [2][0][RTW89_FCC][15] = 8,
+ [2][0][RTW89_ETSI][15] = 56,
+ [2][0][RTW89_FCC][17] = 8,
+ [2][0][RTW89_ETSI][17] = 56,
+ [2][0][RTW89_FCC][19] = 8,
+ [2][0][RTW89_ETSI][19] = 56,
+ [2][0][RTW89_FCC][21] = 8,
+ [2][0][RTW89_ETSI][21] = 56,
+ [2][0][RTW89_FCC][23] = 8,
+ [2][0][RTW89_ETSI][23] = 56,
+ [2][0][RTW89_FCC][25] = 8,
+ [2][0][RTW89_ETSI][25] = 56,
+ [2][0][RTW89_FCC][27] = 8,
+ [2][0][RTW89_ETSI][27] = 56,
+ [2][0][RTW89_FCC][29] = 8,
+ [2][0][RTW89_ETSI][29] = 56,
+ [2][0][RTW89_FCC][30] = 8,
+ [2][0][RTW89_ETSI][30] = 56,
+ [2][0][RTW89_FCC][32] = 8,
+ [2][0][RTW89_ETSI][32] = 56,
+ [2][0][RTW89_FCC][34] = 8,
+ [2][0][RTW89_ETSI][34] = 56,
+ [2][0][RTW89_FCC][36] = 8,
+ [2][0][RTW89_ETSI][36] = 56,
+ [2][0][RTW89_FCC][38] = 8,
+ [2][0][RTW89_ETSI][38] = 56,
+ [2][0][RTW89_FCC][40] = 8,
+ [2][0][RTW89_ETSI][40] = 56,
+ [2][0][RTW89_FCC][42] = 8,
+ [2][0][RTW89_ETSI][42] = 56,
+ [2][0][RTW89_FCC][44] = 8,
+ [2][0][RTW89_ETSI][44] = 56,
+ [2][0][RTW89_FCC][45] = 8,
+ [2][0][RTW89_ETSI][45] = 127,
+ [2][0][RTW89_FCC][47] = 8,
+ [2][0][RTW89_ETSI][47] = 127,
+ [2][0][RTW89_FCC][49] = 8,
+ [2][0][RTW89_ETSI][49] = 127,
+ [2][0][RTW89_FCC][51] = 8,
+ [2][0][RTW89_ETSI][51] = 127,
+ [2][0][RTW89_FCC][53] = 8,
+ [2][0][RTW89_ETSI][53] = 127,
+ [2][0][RTW89_FCC][55] = 8,
+ [2][0][RTW89_ETSI][55] = 127,
+ [2][0][RTW89_FCC][57] = 8,
+ [2][0][RTW89_ETSI][57] = 127,
+ [2][0][RTW89_FCC][59] = 8,
+ [2][0][RTW89_ETSI][59] = 127,
+ [2][0][RTW89_FCC][60] = 8,
+ [2][0][RTW89_ETSI][60] = 127,
+ [2][0][RTW89_FCC][62] = 8,
+ [2][0][RTW89_ETSI][62] = 127,
+ [2][0][RTW89_FCC][64] = 8,
+ [2][0][RTW89_ETSI][64] = 127,
+ [2][0][RTW89_FCC][66] = 8,
+ [2][0][RTW89_ETSI][66] = 127,
+ [2][0][RTW89_FCC][68] = 8,
+ [2][0][RTW89_ETSI][68] = 127,
+ [2][0][RTW89_FCC][70] = 8,
+ [2][0][RTW89_ETSI][70] = 127,
+ [2][0][RTW89_FCC][72] = 8,
+ [2][0][RTW89_ETSI][72] = 127,
+ [2][0][RTW89_FCC][74] = 8,
+ [2][0][RTW89_ETSI][74] = 127,
+ [2][0][RTW89_FCC][75] = 8,
+ [2][0][RTW89_ETSI][75] = 127,
+ [2][0][RTW89_FCC][77] = 8,
+ [2][0][RTW89_ETSI][77] = 127,
+ [2][0][RTW89_FCC][79] = 8,
+ [2][0][RTW89_ETSI][79] = 127,
+ [2][0][RTW89_FCC][81] = 8,
+ [2][0][RTW89_ETSI][81] = 127,
+ [2][0][RTW89_FCC][83] = 8,
+ [2][0][RTW89_ETSI][83] = 127,
+ [2][0][RTW89_FCC][85] = 8,
+ [2][0][RTW89_ETSI][85] = 127,
+ [2][0][RTW89_FCC][87] = 8,
+ [2][0][RTW89_ETSI][87] = 127,
+ [2][0][RTW89_FCC][89] = 8,
+ [2][0][RTW89_ETSI][89] = 127,
+ [2][0][RTW89_FCC][90] = 8,
+ [2][0][RTW89_ETSI][90] = 127,
+ [2][0][RTW89_FCC][92] = 8,
+ [2][0][RTW89_ETSI][92] = 127,
+ [2][0][RTW89_FCC][94] = 8,
+ [2][0][RTW89_ETSI][94] = 127,
+ [2][0][RTW89_FCC][96] = 8,
+ [2][0][RTW89_ETSI][96] = 127,
+ [2][0][RTW89_FCC][98] = 8,
+ [2][0][RTW89_ETSI][98] = 127,
+ [2][0][RTW89_FCC][100] = 8,
+ [2][0][RTW89_ETSI][100] = 127,
+ [2][0][RTW89_FCC][102] = 8,
+ [2][0][RTW89_ETSI][102] = 127,
+ [2][0][RTW89_FCC][104] = 8,
+ [2][0][RTW89_ETSI][104] = 127,
+ [2][0][RTW89_FCC][105] = 8,
+ [2][0][RTW89_ETSI][105] = 127,
+ [2][0][RTW89_FCC][107] = 10,
+ [2][0][RTW89_ETSI][107] = 127,
+ [2][0][RTW89_FCC][109] = 12,
+ [2][0][RTW89_ETSI][109] = 127,
+ [2][0][RTW89_FCC][111] = 127,
+ [2][0][RTW89_ETSI][111] = 127,
+ [2][0][RTW89_FCC][113] = 127,
+ [2][0][RTW89_ETSI][113] = 127,
+ [2][0][RTW89_FCC][115] = 127,
+ [2][0][RTW89_ETSI][115] = 127,
+ [2][0][RTW89_FCC][117] = 127,
+ [2][0][RTW89_ETSI][117] = 127,
+ [2][0][RTW89_FCC][119] = 127,
+ [2][0][RTW89_ETSI][119] = 127,
+ [2][1][RTW89_FCC][0] = -16,
+ [2][1][RTW89_ETSI][0] = 44,
+ [2][1][RTW89_FCC][2] = -16,
+ [2][1][RTW89_ETSI][2] = 44,
+ [2][1][RTW89_FCC][4] = -16,
+ [2][1][RTW89_ETSI][4] = 44,
+ [2][1][RTW89_FCC][6] = -16,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_FCC][8] = -16,
+ [2][1][RTW89_ETSI][8] = 44,
+ [2][1][RTW89_FCC][10] = -16,
+ [2][1][RTW89_ETSI][10] = 44,
+ [2][1][RTW89_FCC][12] = -16,
+ [2][1][RTW89_ETSI][12] = 44,
+ [2][1][RTW89_FCC][14] = -16,
+ [2][1][RTW89_ETSI][14] = 44,
+ [2][1][RTW89_FCC][15] = -16,
+ [2][1][RTW89_ETSI][15] = 44,
+ [2][1][RTW89_FCC][17] = -16,
+ [2][1][RTW89_ETSI][17] = 44,
+ [2][1][RTW89_FCC][19] = -16,
+ [2][1][RTW89_ETSI][19] = 44,
+ [2][1][RTW89_FCC][21] = -16,
+ [2][1][RTW89_ETSI][21] = 44,
+ [2][1][RTW89_FCC][23] = -16,
+ [2][1][RTW89_ETSI][23] = 44,
+ [2][1][RTW89_FCC][25] = -16,
+ [2][1][RTW89_ETSI][25] = 44,
+ [2][1][RTW89_FCC][27] = -16,
+ [2][1][RTW89_ETSI][27] = 44,
+ [2][1][RTW89_FCC][29] = -16,
+ [2][1][RTW89_ETSI][29] = 44,
+ [2][1][RTW89_FCC][30] = -16,
+ [2][1][RTW89_ETSI][30] = 44,
+ [2][1][RTW89_FCC][32] = -16,
+ [2][1][RTW89_ETSI][32] = 44,
+ [2][1][RTW89_FCC][34] = -16,
+ [2][1][RTW89_ETSI][34] = 44,
+ [2][1][RTW89_FCC][36] = -16,
+ [2][1][RTW89_ETSI][36] = 44,
+ [2][1][RTW89_FCC][38] = -16,
+ [2][1][RTW89_ETSI][38] = 44,
+ [2][1][RTW89_FCC][40] = -16,
+ [2][1][RTW89_ETSI][40] = 44,
+ [2][1][RTW89_FCC][42] = -16,
+ [2][1][RTW89_ETSI][42] = 44,
+ [2][1][RTW89_FCC][44] = -16,
+ [2][1][RTW89_ETSI][44] = 44,
+ [2][1][RTW89_FCC][45] = -16,
+ [2][1][RTW89_ETSI][45] = 127,
+ [2][1][RTW89_FCC][47] = -16,
+ [2][1][RTW89_ETSI][47] = 127,
+ [2][1][RTW89_FCC][49] = -16,
+ [2][1][RTW89_ETSI][49] = 127,
+ [2][1][RTW89_FCC][51] = -16,
+ [2][1][RTW89_ETSI][51] = 127,
+ [2][1][RTW89_FCC][53] = -16,
+ [2][1][RTW89_ETSI][53] = 127,
+ [2][1][RTW89_FCC][55] = -16,
+ [2][1][RTW89_ETSI][55] = 127,
+ [2][1][RTW89_FCC][57] = -16,
+ [2][1][RTW89_ETSI][57] = 127,
+ [2][1][RTW89_FCC][59] = -16,
+ [2][1][RTW89_ETSI][59] = 127,
+ [2][1][RTW89_FCC][60] = -16,
+ [2][1][RTW89_ETSI][60] = 127,
+ [2][1][RTW89_FCC][62] = -16,
+ [2][1][RTW89_ETSI][62] = 127,
+ [2][1][RTW89_FCC][64] = -16,
+ [2][1][RTW89_ETSI][64] = 127,
+ [2][1][RTW89_FCC][66] = -16,
+ [2][1][RTW89_ETSI][66] = 127,
+ [2][1][RTW89_FCC][68] = -16,
+ [2][1][RTW89_ETSI][68] = 127,
+ [2][1][RTW89_FCC][70] = -16,
+ [2][1][RTW89_ETSI][70] = 127,
+ [2][1][RTW89_FCC][72] = -16,
+ [2][1][RTW89_ETSI][72] = 127,
+ [2][1][RTW89_FCC][74] = -16,
+ [2][1][RTW89_ETSI][74] = 127,
+ [2][1][RTW89_FCC][75] = -16,
+ [2][1][RTW89_ETSI][75] = 127,
+ [2][1][RTW89_FCC][77] = -16,
+ [2][1][RTW89_ETSI][77] = 127,
+ [2][1][RTW89_FCC][79] = -16,
+ [2][1][RTW89_ETSI][79] = 127,
+ [2][1][RTW89_FCC][81] = -16,
+ [2][1][RTW89_ETSI][81] = 127,
+ [2][1][RTW89_FCC][83] = -16,
+ [2][1][RTW89_ETSI][83] = 127,
+ [2][1][RTW89_FCC][85] = -18,
+ [2][1][RTW89_ETSI][85] = 127,
+ [2][1][RTW89_FCC][87] = -16,
+ [2][1][RTW89_ETSI][87] = 127,
+ [2][1][RTW89_FCC][89] = -16,
+ [2][1][RTW89_ETSI][89] = 127,
+ [2][1][RTW89_FCC][90] = -16,
+ [2][1][RTW89_ETSI][90] = 127,
+ [2][1][RTW89_FCC][92] = -16,
+ [2][1][RTW89_ETSI][92] = 127,
+ [2][1][RTW89_FCC][94] = -16,
+ [2][1][RTW89_ETSI][94] = 127,
+ [2][1][RTW89_FCC][96] = -16,
+ [2][1][RTW89_ETSI][96] = 127,
+ [2][1][RTW89_FCC][98] = -16,
+ [2][1][RTW89_ETSI][98] = 127,
+ [2][1][RTW89_FCC][100] = -16,
+ [2][1][RTW89_ETSI][100] = 127,
+ [2][1][RTW89_FCC][102] = -16,
+ [2][1][RTW89_ETSI][102] = 127,
+ [2][1][RTW89_FCC][104] = -16,
+ [2][1][RTW89_ETSI][104] = 127,
+ [2][1][RTW89_FCC][105] = -16,
+ [2][1][RTW89_ETSI][105] = 127,
+ [2][1][RTW89_FCC][107] = -12,
+ [2][1][RTW89_ETSI][107] = 127,
+ [2][1][RTW89_FCC][109] = -10,
+ [2][1][RTW89_ETSI][109] = 127,
+ [2][1][RTW89_FCC][111] = 127,
+ [2][1][RTW89_ETSI][111] = 127,
+ [2][1][RTW89_FCC][113] = 127,
+ [2][1][RTW89_ETSI][113] = 127,
+ [2][1][RTW89_FCC][115] = 127,
+ [2][1][RTW89_ETSI][115] = 127,
+ [2][1][RTW89_FCC][117] = 127,
+ [2][1][RTW89_ETSI][117] = 127,
+ [2][1][RTW89_FCC][119] = 127,
+ [2][1][RTW89_ETSI][119] = 127,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_bb_table = {
+ .regs = rtw89_8852c_phy_bb_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table = {
+ .regs = rtw89_8852c_phy_bb_reg_gain,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_reg_gain),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_radioa_table = {
+ .regs = rtw89_8852c_phy_radioa_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radioa_regs),
+ .rf_path = RF_PATH_A,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_radiob_table = {
+ .regs = rtw89_8852c_phy_radiob_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radiob_regs),
+ .rf_path = RF_PATH_B,
+ .config = rtw89_phy_config_rf_reg_v1,
+};
+
+const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = {
+ .regs = rtw89_8852c_phy_nctl_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852c_phy_nctl_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_txpwr_table rtw89_8852c_byr_table = {
+ .data = rtw89_8852c_txpwr_byrate,
+ .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate),
+ .load = rtw89_phy_load_txpwr_byrate,
+};
+
+const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = {
+ .delta_swingidx_6gb_n = _txpwr_track_delta_swingidx_6gb_n,
+ .delta_swingidx_6gb_p = _txpwr_track_delta_swingidx_6gb_p,
+ .delta_swingidx_6ga_n = _txpwr_track_delta_swingidx_6ga_n,
+ .delta_swingidx_6ga_p = _txpwr_track_delta_swingidx_6ga_p,
+ .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n,
+ .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p,
+ .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n,
+ .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p,
+ .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n,
+ .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p,
+ .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n,
+ .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p,
+ .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n,
+ .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p,
+ .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n,
+ .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p,
+};
+
+const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = {
+ .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h
new file mode 100644
index 000000000000..7d71a92e2d27
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_TABLE_H__
+#define __RTW89_8852C_TABLE_H__
+
+#include "core.h"
+
+extern const struct rtw89_phy_table rtw89_8852c_phy_bb_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table;
+extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table;
+extern const struct rtw89_txpwr_table rtw89_8852c_byr_table;
+extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table;
+extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg;
+extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
+ [RTW89_REGD_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
new file mode 100644
index 000000000000..35901f64d17d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
+ .tx_bd_addrs = {R_AX_DRV_FW_HSK_0, R_AX_DRV_FW_HSK_1, R_AX_DRV_FW_HSK_2,
+ R_AX_DRV_FW_HSK_3, 0, 0,
+ 0, 0, R_AX_DRV_FW_HSK_4,
+ 0, 0, 0,
+ R_AX_DRV_FW_HSK_5},
+ .rx_bd_addrs = {R_AX_DRV_FW_HSK_6, R_AX_DRV_FW_HSK_7},
+};
+
+static const struct rtw89_pci_info rtw8852c_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_V1_256B,
+ .rx_burst = MAC_AX_RX_BURST_V1_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_ENABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_HAXI_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN_V1,
+ .rxhci_en_bit = B_AX_RXHCI_EN_V1,
+ .rxbd_mode_bit = B_AX_RXBD_MODE_V1,
+ .exp_ctrl_reg = R_AX_HAXI_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1,
+ .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1,
+ .dma_stop1 = {R_AX_HAXI_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_HAXI_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_HAXI_DMA_BUSY1, DMA_BUSY1_CHECK},
+ .dma_busy2_reg = R_AX_HAXI_DMA_BUSY2,
+ .dma_busy3_reg = R_AX_HAXI_DMA_BUSY3,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM_V1,
+ .cpwm_addr = R_AX_PCIE_CRPWM,
+ .tx_dma_ch_mask = 0,
+ .bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+
+ .ltr_set = rtw89_pci_ltr_set_v1,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .config_intr_mask = rtw89_pci_config_intr_mask_v1,
+ .enable_intr = rtw89_pci_enable_intr_v1,
+ .disable_intr = rtw89_pci_disable_intr_v1,
+ .recognize_intrs = rtw89_pci_recognize_intrs_v1,
+};
+
+static const struct rtw89_driver_info rtw89_8852ce_info = {
+ .chip = &rtw8852c_chip_info,
+ .bus = {
+ .pci = &rtw8852c_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ce_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xc852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ce_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ce_id_table);
+
+static struct pci_driver rtw89_8852ce_driver = {
+ .name = "rtw89_8852ce",
+ .id_table = rtw89_8852ce_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852CE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 097c87899cea..dfccae81c380 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -5,15 +5,122 @@
#include "debug.h"
#include "sar.h"
+static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "center freq: %u to SAR subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_SAR_2GHZ_SUBBAND;
+ case 5180 ... 5320:
+ return RTW89_SAR_5GHZ_SUBBAND_1_2;
+ case 5500 ... 5720:
+ return RTW89_SAR_5GHZ_SUBBAND_2_E;
+ case 5745 ... 5825:
+ return RTW89_SAR_5GHZ_SUBBAND_3;
+ case 5955 ... 6155:
+ return RTW89_SAR_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_SAR_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_SAR_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_SAR_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_SAR_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H
+ * and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_sar_span in the following.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_SAR_6GHZ_SUBBAND_8;
+ }
+}
+
+struct rtw89_sar_span {
+ enum rtw89_sar_subband subband_low;
+ enum rtw89_sar_subband subband_high;
+};
+
+#define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high)
+
+#define RTW89_SAR_6GHZ_SPAN_HEAD 6145
+#define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \
+ ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2)
+
+#define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \
+ [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \
+ .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
+ .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ }
+
+/* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR
+ * subbands. In the following, we describe each of them with rtw89_sar_span.
+ */
+static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
+ RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
+};
+
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- enum rtw89_subband subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u32 center_freq = chan->freq;
+ const struct rtw89_sar_span *span = NULL;
+ enum rtw89_sar_subband subband_l, subband_h;
+ int idx;
+
+ if (band == RTW89_BAND_6G) {
+ idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq);
+ /* To decrease size of rtw89_sar_overlapping_6ghz[],
+ * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs
+ * to make first span as index 0 of the table. So, if center
+ * frequency is less than the first one, it will get netative.
+ */
+ if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz))
+ span = &rtw89_sar_overlapping_6ghz[idx];
+ }
+
+ if (span && RTW89_SAR_SPAN_VALID(span)) {
+ subband_l = span->subband_low;
+ subband_h = span->subband_high;
+ } else {
+ subband_l = rtw89_sar_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "for {band %u, center_freq %u}, SAR subband: {%u, %u}\n",
+ band, center_freq, subband_l, subband_h);
- if (!rtwsar->set[subband])
+ if (!rtwsar->set[subband_l] && !rtwsar->set[subband_h])
return -ENODATA;
- *cfg = rtwsar->cfg[subband];
+ if (!rtwsar->set[subband_l])
+ *cfg = rtwsar->cfg[subband_h];
+ else if (!rtwsar->set[subband_h])
+ *cfg = rtwsar->cfg[subband_l];
+ else
+ *cfg = min(rtwsar->cfg[subband_l], rtwsar->cfg[subband_h]);
+
return 0;
}
@@ -121,28 +228,27 @@ static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
}
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
return ret;
}
-static const u8 rtw89_common_sar_subband_map[] = {
- RTW89_CH_2G,
- RTW89_CH_5G_BAND_1,
- RTW89_CH_5G_BAND_3,
- RTW89_CH_5G_BAND_4,
-};
-
static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = {
{ .start_freq = 2412, .end_freq = 2484, },
{ .start_freq = 5180, .end_freq = 5320, },
{ .start_freq = 5500, .end_freq = 5720, },
{ .start_freq = 5745, .end_freq = 5825, },
+ { .start_freq = 5955, .end_freq = 6155, },
+ { .start_freq = 6175, .end_freq = 6415, },
+ { .start_freq = 6435, .end_freq = 6515, },
+ { .start_freq = 6535, .end_freq = 6695, },
+ { .start_freq = 6715, .end_freq = 6875, },
+ { .start_freq = 6875, .end_freq = 7115, },
};
-static_assert(ARRAY_SIZE(rtw89_common_sar_subband_map) ==
+static_assert(RTW89_SAR_SUBBAND_NR ==
ARRAY_SIZE(rtw89_common_sar_freq_ranges));
const struct cfg80211_sar_capa rtw89_sar_capa = {
@@ -159,7 +265,6 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
u8 fct;
u32 freq_start;
u32 freq_end;
- u32 band;
s32 power;
u32 i, idx;
@@ -175,15 +280,14 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
freq_start = rtw89_common_sar_freq_ranges[idx].start_freq;
freq_end = rtw89_common_sar_freq_ranges[idx].end_freq;
- band = rtw89_common_sar_subband_map[idx];
power = sar->sub_specs[i].power;
- rtw89_info(rtwdev, "On freq %u to %u, ", freq_start, freq_end);
- rtw89_info(rtwdev, "set SAR power limit %d (unit: 1/%lu dBm)\n",
- power, BIT(fct));
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "On freq %u to %u, set SAR limit %d (unit: 1/%lu dBm)\n",
+ freq_start, freq_end, power, BIT(fct));
- sar_common.set[band] = true;
- sar_common.cfg[band] = power;
+ sar_common.set[idx] = true;
+ sar_common.cfg[idx] = power;
}
return rtw89_apply_sar_common(rtwdev, &sar_common);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 837cdc366a61..c1a4bc1c64d1 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -2,10 +2,15 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/devcoredump.h>
+
#include "cam.h"
+#include "chan.h"
#include "debug.h"
+#include "fw.h"
#include "mac.h"
#include "ps.h"
+#include "reg.h"
#include "ser.h"
#include "util.h"
@@ -67,6 +72,80 @@ static char *ser_st_name(struct rtw89_ser *ser)
return "err_st_name";
}
+#define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
+struct ser_cd_ ## _name { \
+ u32 type; \
+ u32 type_size; \
+ u64 padding; \
+ u8 data[_size]; \
+} __packed; \
+static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
+{ \
+ p->type = _type; \
+ p->type_size = sizeof(p->data); \
+ p->padding = 0x0123456789abcdef; \
+}
+
+enum rtw89_ser_cd_type {
+ RTW89_SER_CD_FW_RSVD_PLE = 0,
+ RTW89_SER_CD_FW_BACKTRACE = 1,
+};
+
+RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
+ RTW89_SER_CD_FW_RSVD_PLE,
+ RTW89_FW_RSVD_PLE_SIZE);
+
+RTW89_DEF_SER_CD_TYPE(fw_backtrace,
+ RTW89_SER_CD_FW_BACKTRACE,
+ RTW89_FW_BACKTRACE_MAX_SIZE);
+
+struct rtw89_ser_cd_buffer {
+ struct ser_cd_fw_rsvd_ple fwple;
+ struct ser_cd_fw_backtrace fwbt;
+} __packed;
+
+static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ser_cd_buffer *buf;
+
+ buf = vzalloc(sizeof(*buf));
+ if (!buf)
+ return NULL;
+
+ ser_cd_fw_rsvd_ple_init(&buf->fwple);
+ ser_cd_fw_backtrace_init(&buf->fwbt);
+
+ return buf;
+}
+
+static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
+ struct rtw89_ser_cd_buffer *buf)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
+
+ /* After calling dev_coredump, buf's lifetime is supposed to be
+ * handled by the device coredump framework. Note that a new dump
+ * will be discarded if a previous one hasn't been released by
+ * framework yet.
+ */
+ dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
+}
+
+static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
+ struct rtw89_ser_cd_buffer *buf, bool free_self)
+{
+ if (!free_self)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
+
+ /* When some problems happen during filling data of core dump,
+ * we won't send it to device coredump framework. Instead, we
+ * free buf by ourselves.
+ */
+ vfree(buf);
+}
+
static void ser_state_run(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
@@ -74,7 +153,10 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
+ mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -220,11 +302,39 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwvif->trigger = false;
}
+static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+ if (sta->tdls)
+ rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
+
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
+}
+
+static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ ser_sta_deinit_cam_iter,
+ rtwvif);
+
+ rtw89_cam_deinit(rtwdev, rtwvif);
+
+ bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
+}
+
static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
{
struct rtw89_vif *rtwvif;
rtw89_cam_reset_keys(rtwdev);
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ ser_deinit_cam(rtwdev, rtwvif);
+
rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
ser_reset_vif(rtwdev, rtwvif);
@@ -281,8 +391,12 @@ static void hal_send_m4_event(struct rtw89_ser *ser)
/* state handler */
static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
switch (evt) {
case SER_EV_STATE_IN:
+ rtw89_hci_recovery_complete(rtwdev);
+ clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
break;
case SER_EV_L1_RESET:
ser_state_goto(ser, SER_RESET_TRX_ST);
@@ -291,6 +405,8 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
ser_state_goto(ser, SER_L2_RESET_ST);
break;
case SER_EV_STATE_OUT:
+ rtw89_hci_recovery_start(rtwdev);
+ break;
default:
break;
}
@@ -365,6 +481,139 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
}
}
+static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
+ u8 sel, u32 start_addr, u32 len)
+{
+ u32 *ptr = (u32 *)buf;
+ u32 base_addr, start_page, residue;
+ u32 cnt = 0;
+ u32 i;
+
+ start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
+ residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr = rtw89_mac_mem_base_addrs[sel];
+ base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
+
+ while (cnt < len) {
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
+
+ for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
+ i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
+ i += 4, ptr++) {
+ *ptr = rtw89_read32(rtwdev, i);
+ cnt += 4;
+ if (cnt >= len)
+ break;
+ }
+
+ residue = 0;
+ base_addr += MAC_MEM_DUMP_PAGE_SIZE;
+ }
+}
+
+static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
+{
+ u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
+ start_addr);
+ ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
+ RTW89_FW_RSVD_PLE_SIZE);
+}
+
+struct __fw_backtrace_entry {
+ u32 wcpu_addr;
+ u32 size;
+ u32 key;
+} __packed;
+
+struct __fw_backtrace_info {
+ u32 ra;
+ u32 sp;
+} __packed;
+
+static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
+ sizeof(struct __fw_backtrace_info));
+
+static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
+ const struct __fw_backtrace_entry *ent)
+{
+ struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
+ u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
+ u32 fwbt_size = ent->size;
+ u32 fwbt_key = ent->key;
+ u32 i;
+
+ if (fwbt_addr == 0) {
+ rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
+ fwbt_addr);
+ return -EINVAL;
+ }
+
+ if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
+ rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
+ fwbt_key);
+ return -EINVAL;
+ }
+
+ if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
+ fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
+ rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
+ fwbt_size);
+ return -EINVAL;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
+
+ for (i = R_AX_INDIR_ACCESS_ENTRY;
+ i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
+ i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
+ *ptr = (struct __fw_backtrace_info){
+ .ra = rtw89_read32(rtwdev, i),
+ .sp = rtw89_read32(rtwdev, i + 4),
+ };
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "next sp: 0x%x, next ra: 0x%x\n",
+ ptr->sp, ptr->ra);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
+ return 0;
+}
+
+static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ struct rtw89_ser_cd_buffer *buf;
+ struct __fw_backtrace_entry fwbt_ent;
+ int ret = 0;
+
+ buf = rtw89_ser_cd_prep(rtwdev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto bottom;
+ }
+
+ rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
+
+ fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
+ ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
+ if (ret)
+ goto bottom;
+
+ rtw89_ser_cd_send(rtwdev, buf);
+
+bottom:
+ rtw89_ser_cd_free(rtwdev, buf, !!ret);
+
+ ser_reset_mac_binding(rtwdev);
+ rtw89_core_stop(rtwdev);
+ rtw89_entity_init(rtwdev);
+ INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
+}
+
static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
@@ -372,8 +621,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
mutex_lock(&rtwdev->mutex);
- ser_reset_mac_binding(rtwdev);
- rtw89_core_stop(rtwdev);
+ ser_l2_reset_st_pre_hdl(ser);
mutex_unlock(&rtwdev->mutex);
ieee80211_restart_hw(rtwdev->hw);
@@ -396,7 +644,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
}
}
-static struct event_ent ser_ev_tbl[] = {
+static const struct event_ent ser_ev_tbl[] = {
{SER_EV_NONE, "SER_EV_NONE"},
{SER_EV_STATE_IN, "SER_EV_STATE_IN"},
{SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
@@ -412,7 +660,7 @@ static struct event_ent ser_ev_tbl[] = {
{SER_EV_MAXX, "SER_EV_MAX"}
};
-static struct state_ent ser_st_tbl[] = {
+static const struct state_ent ser_st_tbl[] = {
{SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
{SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
{SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
@@ -456,7 +704,7 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
{
u8 event = SER_EV_NONE;
- rtw89_info(rtwdev, "ser event = 0x%04x\n", err);
+ rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
switch (err) {
case MAC_AX_ERR_L1_ERR_DMAC:
@@ -482,8 +730,10 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
break;
}
- if (event == SER_EV_NONE)
+ if (event == SER_EV_NONE) {
+ rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
return -EINVAL;
+ }
ser_send_msg(&rtwdev->ser, event);
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 75b11249f306..b889e7bf34c0 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -24,6 +24,7 @@
/* TX WD BODY DWORD 0 */
#define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24)
+#define RTW89_TXWD_BODY0_WP_OFFSET_V1 GENMASK(28, 24)
#define RTW89_TXWD_BODY0_MORE_DATA BIT(23)
#define RTW89_TXWD_BODY0_WD_INFO_EN BIT(22)
#define RTW89_TXWD_BODY0_FW_DL BIT(20)
@@ -31,9 +32,14 @@
#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
+#define RTW89_TXWD_BODY0_HW_SSN_SEL GENMASK(3, 2)
+#define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0)
/* TX WD BODY DWORD 1 */
+#define RTW89_TXWD_BODY1_ADDR_INFO_NUM GENMASK(31, 26)
#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
+#define RTW89_TXWD_BODY1_SEC_KEYID GENMASK(5, 4)
+#define RTW89_TXWD_BODY1_SEC_TYPE GENMASK(3, 0)
/* TX WD BODY DWORD 2 */
#define RTW89_TXWD_BODY2_MACID GENMASK(30, 24)
@@ -47,8 +53,22 @@
#define RTW89_TXWD_BODY3_SW_SEQ GENMASK(11, 0)
/* TX WD BODY DWORD 4 */
+#define RTW89_TXWD_BODY4_SEC_IV_L1 GENMASK(31, 24)
+#define RTW89_TXWD_BODY4_SEC_IV_L0 GENMASK(23, 16)
/* TX WD BODY DWORD 5 */
+#define RTW89_TXWD_BODY5_SEC_IV_H5 GENMASK(31, 24)
+#define RTW89_TXWD_BODY5_SEC_IV_H4 GENMASK(23, 16)
+#define RTW89_TXWD_BODY5_SEC_IV_H3 GENMASK(15, 8)
+#define RTW89_TXWD_BODY5_SEC_IV_H2 GENMASK(7, 0)
+
+/* TX WD BODY DWORD 6 (V1) */
+
+/* TX WD BODY DWORD 7 (V1) */
+#define RTW89_TXWD_BODY7_USE_RATE_V1 BIT(31)
+#define RTW89_TXWD_BODY7_DATA_BW GENMASK(29, 28)
+#define RTW89_TXWD_BODY7_GI_LTF GENMASK(27, 25)
+#define RTW89_TXWD_BODY7_DATA_RATE GENMASK(24, 16)
/* TX WD INFO DWORD 0 */
#define RTW89_TXWD_INFO0_USE_RATE BIT(30)
@@ -56,6 +76,7 @@
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16)
@@ -66,6 +87,7 @@
#define RTW89_TXWD_INFO2_AMPDU_DENSITY GENMASK(20, 18)
#define RTW89_TXWD_INFO2_SEC_TYPE GENMASK(12, 9)
#define RTW89_TXWD_INFO2_SEC_HW_ENC BIT(8)
+#define RTW89_TXWD_INFO2_FORCE_KEY_EN BIT(8)
#define RTW89_TXWD_INFO2_SEC_CAM_IDX GENMASK(7, 0)
/* TX WD INFO DWORD 3 */
@@ -76,6 +98,92 @@
/* TX WD INFO DWORD 5 */
+/* RX WD dword0 */
+#define AX_RXD_RPKT_LEN_MASK GENMASK(13, 0)
+#define AX_RXD_SHIFT_MASK GENMASK(15, 14)
+#define AX_RXD_WL_HD_IV_LEN_MASK GENMASK(21, 16)
+#define AX_RXD_BB_SEL BIT(22)
+#define AX_RXD_MAC_INFO_VLD BIT(23)
+#define AX_RXD_RPKT_TYPE_MASK GENMASK(27, 24)
+#define AX_RXD_DRV_INFO_SIZE_MASK GENMASK(30, 28)
+#define AX_RXD_LONG_RXD BIT(31)
+
+/* RX WD dword1 */
+#define AX_RXD_PPDU_TYPE_MASK GENMASK(3, 0)
+#define AX_RXD_PPDU_CNT_MASK GENMASK(6, 4)
+#define AX_RXD_SR_EN BIT(7)
+#define AX_RXD_USER_ID_MASK GENMASK(15, 8)
+#define AX_RXD_USER_ID_v1_MASK GENMASK(13, 8)
+#define AX_RXD_RX_DATARATE_MASK GENMASK(24, 16)
+#define AX_RXD_RX_GI_LTF_MASK GENMASK(27, 25)
+#define AX_RXD_NON_SRG_PPDU BIT(28)
+#define AX_RXD_INTER_PPDU BIT(29)
+#define AX_RXD_NON_SRG_PPDU_v1 BIT(14)
+#define AX_RXD_INTER_PPDU_v1 BIT(15)
+#define AX_RXD_BW_MASK GENMASK(31, 30)
+#define AX_RXD_BW_v1_MASK GENMASK(31, 29)
+
+/* RX WD dword2 */
+#define AX_RXD_FREERUN_CNT_MASK GENMASK(31, 0)
+
+/* RX WD dword3 */
+#define AX_RXD_A1_MATCH BIT(0)
+#define AX_RXD_SW_DEC BIT(1)
+#define AX_RXD_HW_DEC BIT(2)
+#define AX_RXD_AMPDU BIT(3)
+#define AX_RXD_AMPDU_END_PKT BIT(4)
+#define AX_RXD_AMSDU BIT(5)
+#define AX_RXD_AMSDU_CUT BIT(6)
+#define AX_RXD_LAST_MSDU BIT(7)
+#define AX_RXD_BYPASS BIT(8)
+#define AX_RXD_CRC32_ERR BIT(9)
+#define AX_RXD_ICV_ERR BIT(10)
+#define AX_RXD_MAGIC_WAKE BIT(11)
+#define AX_RXD_UNICAST_WAKE BIT(12)
+#define AX_RXD_PATTERN_WAKE BIT(13)
+#define AX_RXD_GET_CH_INFO_MASK GENMASK(15, 14)
+#define AX_RXD_PATTERN_IDX_MASK GENMASK(20, 16)
+#define AX_RXD_TARGET_IDC_MASK GENMASK(23, 21)
+#define AX_RXD_CHKSUM_OFFLOAD_EN BIT(24)
+#define AX_RXD_WITH_LLC BIT(25)
+#define AX_RXD_RX_STATISTICS BIT(26)
+
+/* RX WD dword4 */
+#define AX_RXD_TYPE_MASK GENMASK(1, 0)
+#define AX_RXD_MC BIT(2)
+#define AX_RXD_BC BIT(3)
+#define AX_RXD_MD BIT(4)
+#define AX_RXD_MF BIT(5)
+#define AX_RXD_PWR BIT(6)
+#define AX_RXD_QOS BIT(7)
+#define AX_RXD_TID_MASK GENMASK(11, 8)
+#define AX_RXD_EOSP BIT(12)
+#define AX_RXD_HTC BIT(13)
+#define AX_RXD_QNULL BIT(14)
+#define AX_RXD_SEQ_MASK GENMASK(27, 16)
+#define AX_RXD_FRAG_MASK GENMASK(31, 28)
+
+/* RX WD dword5 */
+#define AX_RXD_SEC_CAM_IDX_MASK GENMASK(7, 0)
+#define AX_RXD_ADDR_CAM_MASK GENMASK(15, 8)
+#define AX_RXD_MAC_ID_MASK GENMASK(23, 16)
+#define AX_RXD_RX_PL_ID_MASK GENMASK(27, 24)
+#define AX_RXD_ADDR_CAM_VLD BIT(28)
+#define AX_RXD_ADDR_FWD_EN BIT(29)
+#define AX_RXD_RX_PL_MATCH BIT(30)
+
+/* RX WD dword6 */
+#define AX_RXD_MAC_ADDR_MASK GENMASK(31, 0)
+
+/* RX WD dword7 */
+#define AX_RXD_MAC_ADDR_H_MASK GENMASK(15, 0)
+#define AX_RXD_SMART_ANT BIT(16)
+#define AX_RXD_SEC_TYPE_MASK GENMASK(20, 17)
+#define AX_RXD_HDR_CNV BIT(21)
+#define AX_RXD_HDR_OFFSET_MASK GENMASK(26, 22)
+#define AX_RXD_BIP_KEYID BIT(27)
+#define AX_RXD_BIP_ENC BIT(28)
+
/* RX DESC helpers */
/* Short Descriptor */
#define RTW89_GET_RXWD_LONG_RXD(rxdesc) \
@@ -96,6 +204,8 @@
le32_get_bits((rxdesc)->dword0, GENMASK(13, 0))
#define RTW89_GET_RXWD_BW(rxdesc) \
le32_get_bits((rxdesc)->dword1, GENMASK(31, 30))
+#define RTW89_GET_RXWD_BW_V1(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(31, 29))
#define RTW89_GET_RXWD_GI_LTF(rxdesc) \
le32_get_bits((rxdesc)->dword1, GENMASK(27, 25))
#define RTW89_GET_RXWD_DATA_RATE(rxdesc) \
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index 229e81009de6..1ae80b7561da 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -14,4 +14,34 @@
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+/* The result of negative dividend and positive divisor is undefined, but it
+ * should be one case of round-down or round-up. So, make it round-down if the
+ * result is round-up.
+ * Note: the maximum value of divisor is 0x7FFF_FFFF, because we cast it to
+ * signed value to make compiler to use signed divide instruction.
+ */
+static inline s32 s32_div_u32_round_down(s32 dividend, u32 divisor, s32 *remainder)
+{
+ s32 i_divisor = (s32)divisor;
+ s32 i_remainder;
+ s32 quotient;
+
+ quotient = dividend / i_divisor;
+ i_remainder = dividend % i_divisor;
+
+ if (i_remainder < 0) {
+ quotient--;
+ i_remainder += i_divisor;
+ }
+
+ if (remainder)
+ *remainder = i_remainder;
+ return quotient;
+}
+
+static inline s32 s32_div_u32_round_closest(s32 dividend, u32 divisor)
+{
+ return s32_div_u32_round_down(dividend + divisor / 2, divisor, NULL);
+}
+
#endif
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ff2448394a1e..82a7458e01ae 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -251,7 +251,7 @@ struct ndis_80211_bssid_ex {
struct ndis_80211_bssid_list_ex {
__le32 num_items;
- struct ndis_80211_bssid_ex bssid[];
+ u8 bssid_data[];
} __packed;
struct ndis_80211_fixed_ies {
@@ -489,14 +489,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast);
+ int link_id, u8 key_index, bool unicast,
+ bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo);
@@ -2082,7 +2084,8 @@ resize_buf:
netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
bssid_len = 0;
- bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len);
+ bssid = next_bssid_list_item((void *)bssid_list->bssid_data,
+ &bssid_len, buf, len);
/* Device returns incorrect 'num_items'. Workaround by ignoring the
* received 'num_items' and walking through full bssid buffer instead.
@@ -2377,8 +2380,8 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2413,7 +2416,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2424,7 +2428,8 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2813,8 +2818,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
resp_ie_len, 0, GFP_KERNEL);
} else {
struct cfg80211_roam_info roam_info = {
- .channel = get_current_channel(usbdev, NULL),
- .bssid = bssid,
+ .links[0].channel =
+ get_current_channel(usbdev, NULL),
+ .links[0].bssid = bssid,
.req_ie = req_ie,
.req_ie_len = req_ie_len,
.resp_ie = resp_ie,
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index a0c5d02ae88c..8a3d86897ea8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common)
rsi_coex_sched_tx_pkts(coex_cb);
} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
- complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
+ kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
}
int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 6bfaab48b507..0f3a80f66b61 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -420,7 +420,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
rsi_hal_send_sta_notify_frame(common,
RSI_IFTYPE_STATION,
STA_CONNECTED, bss->bssid,
- bss->qos, bss->aid, 0,
+ bss->qos, vif->cfg.aid,
+ 0,
vif);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index dca81a4bbdd7..c61f83a7333b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -295,7 +295,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct rsi_hw *adapter = common->priv;
struct ieee80211_vif *vif;
struct ieee80211_tx_info *info;
- struct ieee80211_bss_conf *bss;
int status = -EINVAL;
if (!skb)
@@ -307,11 +306,10 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
if (!info->control.vif)
goto err;
vif = info->control.vif;
- bss = &vif->bss_conf;
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
- (!bss->assoc))
+ (!vif->cfg.assoc))
goto err;
status = rsi_send_pkt_to_bus(common, skb);
@@ -336,7 +334,6 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_bss_conf *bss;
struct ieee80211_hdr *wh;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
@@ -361,13 +358,13 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
return status;
}
- bss = &info->control.vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
/* Indicate to firmware to give cfm for probe */
- if (ieee80211_is_probe_req(wh->frame_control) && !bss->assoc) {
+ if (ieee80211_is_probe_req(wh->frame_control) &&
+ !info->control.vif->cfg.assoc) {
rsi_dbg(INFO_ZONE,
"%s: blocking mgmt queue\n", __func__);
mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST;
@@ -444,7 +441,7 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
return -EINVAL;
mac_bcn = ieee80211_beacon_get_tim(adapter->hw,
vif,
- &tim_offset, NULL);
+ &tim_offset, NULL, 0);
if (!mac_bcn) {
rsi_dbg(ERR_ZONE, "Failed to get beacon from mac80211\n");
return -EINVAL;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 913e11fb3807..2fbec51c8f94 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -237,7 +237,6 @@ static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
struct cfg80211_scan_request *scan_req = &hw_req->req;
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- struct ieee80211_bss_conf *bss = &vif->bss_conf;
rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n");
common->mac_ops_resumed = false;
@@ -256,7 +255,7 @@ static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
/* If STA is not connected, return with special value 1, in order
* to start sw_scan in mac80211
*/
- if (!bss->assoc)
+ if (!vif->cfg.assoc)
return 1;
mutex_lock(&common->mutex);
@@ -579,7 +578,6 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
u16 channel = curchan->hw_value;
struct ieee80211_vif *vif;
- struct ieee80211_bss_conf *bss;
bool assoc = false;
int i;
@@ -593,8 +591,7 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
if (!vif)
continue;
if (vif->type == NL80211_IFTYPE_STATION) {
- bss = &vif->bss_conf;
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
assoc = true;
break;
}
@@ -700,7 +697,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
}
if ((vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_P2P_CLIENT) &&
- (!sta_vif || vif->bss_conf.assoc))
+ (!sta_vif || vif->cfg.assoc))
sta_vif = vif;
}
if (set_ps && sta_vif) {
@@ -786,7 +783,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
@@ -797,8 +794,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
if (changed & BSS_CHANGED_ASSOC) {
rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
- __func__, bss_conf->assoc);
- if (bss_conf->assoc) {
+ __func__, vif->cfg.assoc);
+ if (vif->cfg.assoc) {
/* Send the RX filter frame */
rx_filter_word = (ALLOW_DATA_ASSOC_PEER |
ALLOW_CTRL_ASSOC_PEER |
@@ -807,17 +804,17 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
}
rsi_inform_bss_status(common,
RSI_OPMODE_STA,
- bss_conf->assoc,
+ vif->cfg.assoc,
bss_conf->bssid,
bss_conf->qos,
- bss_conf->aid,
+ vif->cfg.aid,
NULL, 0,
bss_conf->assoc_capability, vif);
adapter->ps_info.dtim_interval_duration = bss->dtim_period;
adapter->ps_info.listen_interval = conf->listen_interval;
/* If U-APSD is updated, send ps parameters to firmware */
- if (bss->assoc) {
+ if (vif->cfg.assoc) {
if (common->uapsd_bitmap) {
rsi_dbg(INFO_ZONE, "Configuring UAPSD\n");
rsi_conf_uapsd(adapter, vif);
@@ -892,13 +889,15 @@ static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
* for a hardware TX queue.
* @hw: Pointer to the ieee80211_hw structure
* @vif: Pointer to the ieee80211_vif structure.
+ * @link_id: the link ID if MLO is used, otherwise 0
* @queue: Queue number.
* @params: Pointer to ieee80211_tx_queue_params structure.
*
* Return: 0 on success, negative error code on failure.
*/
static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct rsi_hw *adapter = hw->priv;
@@ -1359,7 +1358,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
if (!bss)
return;
/* CQM only for connected AP beacons, the RSSI is a weighted avg */
- if (bss->assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
+ if (vif->cfg.assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
if (ieee80211_is_beacon(hdr->frame_control))
rsi_perform_cqm(common, hdr->addr2, rxs->signal, vif);
}
@@ -1490,13 +1489,13 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
- common->bitrate_mask[common->band] = sta->supp_rates[common->band];
- common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
- if (sta->ht_cap.ht_supported) {
+ common->bitrate_mask[common->band] = sta->deflink.supp_rates[common->band];
+ common->vif_info[0].is_ht = sta->deflink.ht_cap.ht_supported;
+ if (sta->deflink.ht_cap.ht_supported) {
common->bitrate_mask[NL80211_BAND_2GHZ] =
- sta->supp_rates[NL80211_BAND_2GHZ];
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ sta->deflink.supp_rates[NL80211_BAND_2GHZ];
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
common->vif_info[0].sgi = true;
ieee80211_start_tx_ba_session(sta, 0, 0);
}
@@ -1737,7 +1736,7 @@ static void rsi_resume_conn_channel(struct rsi_common *common)
}
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
- vif->bss_conf.assoc) {
+ vif->cfg.assoc) {
rsi_switch_channel(adapter, vif);
break;
}
@@ -1862,17 +1861,15 @@ static u16 rsi_wow_map_triggers(struct rsi_common *common,
int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
{
struct rsi_common *common = adapter->priv;
+ struct ieee80211_vif *vif = adapter->vifs[0];
u16 triggers = 0;
u16 rx_filter_word = 0;
- struct ieee80211_bss_conf *bss = NULL;
rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n");
- if (!adapter->vifs[0])
+ if (!vif)
return -EINVAL;
- bss = &adapter->vifs[0]->bss_conf;
-
if (WARN_ON(!wowlan)) {
rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n");
return -EINVAL;
@@ -1884,7 +1881,7 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__);
return -EINVAL;
}
- if (!bss->assoc) {
+ if (!vif->cfg.assoc) {
rsi_dbg(ERR_ZONE,
"Cannot configure WoWLAN (Station not connected)\n");
common->wow_flags |= RSI_WOW_NO_CONNECTION;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 5d1490fc32db..f9f004446b07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -264,7 +264,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common)
if (common->init_done)
rsi_core_qos_processor(common);
} while (atomic_read(&common->tx_thread.thread_done) == 0);
- complete_and_exit(&common->tx_thread.completion, 0);
+ kthread_complete_and_exit(&common->tx_thread.completion, 0);
}
#ifdef CONFIG_RSI_COEX
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 0848f7a7e76c..1b309e47a1f1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1357,10 +1357,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
is_ht = common->vif_info[0].is_ht;
is_sgi = common->vif_info[0].sgi;
} else {
- rate_bitmap = sta->supp_rates[band];
- is_ht = sta->ht_cap.ht_supported;
- if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
- (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ rate_bitmap = sta->deflink.supp_rates[band];
+ is_ht = sta->deflink.ht_cap.ht_supported;
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
is_sgi = true;
}
@@ -1635,7 +1635,6 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
struct ieee80211_vif *vif)
{
struct rsi_common *common = adapter->priv;
- struct ieee80211_bss_conf *bss = &vif->bss_conf;
struct rsi_request_ps *ps;
struct rsi_ps_info *ps_info;
struct sk_buff *skb;
@@ -1669,7 +1668,7 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
ps->ps_sleep.sleep_duration =
cpu_to_le32(ps_info->deep_sleep_wakeup_period);
- if (bss->assoc)
+ if (vif->cfg.assoc)
ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP;
else
ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 9f16128e4ffa..d09998796ac0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -796,7 +796,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
* rsi_sdio_host_intf_read_pkt() - This function reads the packet
* from the device.
* @adapter: Pointer to the adapter data structure.
- * @pkt: Pointer to the packet data to be read from the the device.
+ * @pkt: Pointer to the packet data to be read from the device.
* @length: Length of the data to be read from the device.
*
* Return: 0 on success, -1 on failure.
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 8ace1874e5cb..b2b47a0abcbf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common)
rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__);
atomic_inc(&sdev->rx_thread.thread_done);
- complete_and_exit(&sdev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&sdev->rx_thread.completion, 0);
}
/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 4ffcdde1acb1..5130b0e72adc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common)
out:
rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
skb_queue_purge(&dev->rx_q);
- complete_and_exit(&dev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&dev->rx_thread.completion, 0);
}
diff --git a/drivers/net/wireless/silabs/Kconfig b/drivers/net/wireless/silabs/Kconfig
new file mode 100644
index 000000000000..6262a799bf36
--- /dev/null
+++ b/drivers/net/wireless/silabs/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config WLAN_VENDOR_SILABS
+ bool "Silicon Laboratories devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_SILABS
+
+source "drivers/net/wireless/silabs/wfx/Kconfig"
+
+endif # WLAN_VENDOR_SILABS
diff --git a/drivers/net/wireless/silabs/Makefile b/drivers/net/wireless/silabs/Makefile
new file mode 100644
index 000000000000..c2263ee21006
--- /dev/null
+++ b/drivers/net/wireless/silabs/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/net/wireless/silabs/wfx/Kconfig b/drivers/net/wireless/silabs/wfx/Kconfig
new file mode 100644
index 000000000000..835a855409d8
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WFX
+ tristate "Silicon Labs wireless chips WF200 and further"
+ depends on MAC80211
+ depends on MMC || !MMC # do not allow WFX=y if MMC=m
+ depends on (SPI || MMC)
+ help
+ This is a driver for Silicons Labs WFxxx series (WF200 and further)
+ chipsets. This chip can be found on SPI or SDIO buses.
+
+ Silabs does not use a reliable SDIO vendor ID. So, to avoid conflicts,
+ the driver won't probe the device if it is not also declared in the
+ Device Tree.
diff --git a/drivers/net/wireless/silabs/wfx/Makefile b/drivers/net/wireless/silabs/wfx/Makefile
new file mode 100644
index 000000000000..c8b356f71c99
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# Necessary for CREATE_TRACE_POINTS
+CFLAGS_debug.o = -I$(src)
+
+wfx-y := \
+ bh.o \
+ hwio.o \
+ fwio.o \
+ hif_tx_mib.o \
+ hif_tx.o \
+ hif_rx.o \
+ queue.o \
+ data_tx.o \
+ data_rx.o \
+ scan.o \
+ sta.o \
+ key.o \
+ main.o \
+ debug.o
+wfx-$(CONFIG_SPI) += bus_spi.o
+# When CONFIG_MMC == m, append to 'wfx-y' (and not to 'wfx-m')
+wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o
+
+obj-$(CONFIG_WFX) += wfx.o
diff --git a/drivers/net/wireless/silabs/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c
new file mode 100644
index 000000000000..21dfdcf9cc27
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/bh.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Interrupt bottom half (BH).
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/gpio/consumer.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "traces.h"
+#include "hif_rx.h"
+#include "hif_api_cmd.h"
+
+static void device_wakeup(struct wfx_dev *wdev)
+{
+ int max_retry = 3;
+
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+ if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0)
+ return;
+
+ if (wfx_api_older_than(wdev, 1, 4)) {
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
+ if (!completion_done(&wdev->hif.ctrl_ready))
+ usleep_range(2000, 2500);
+ return;
+ }
+ for (;;) {
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
+ /* completion.h does not provide any function to wait completion without consume it
+ * (a kind of wait_for_completion_done_timeout()). So we have to emulate it.
+ */
+ if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) {
+ complete(&wdev->hif.ctrl_ready);
+ return;
+ } else if (max_retry-- > 0) {
+ /* Older firmwares have a race in sleep/wake-up process. Redo the process
+ * is sufficient to unfreeze the chip.
+ */
+ dev_err(wdev->dev, "timeout while wake up chip\n");
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
+ usleep_range(2000, 2500);
+ } else {
+ dev_err(wdev->dev, "max wake-up retries reached\n");
+ return;
+ }
+ }
+}
+
+static void device_release(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
+}
+
+static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
+{
+ struct sk_buff *skb;
+ struct wfx_hif_msg *hif;
+ size_t alloc_len;
+ size_t computed_len;
+ int release_count;
+ int piggyback = 0;
+
+ WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability");
+
+ /* Add 2 to take into account piggyback size */
+ alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
+ skb = dev_alloc_skb(alloc_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (wfx_data_read(wdev, skb->data, alloc_len))
+ goto err;
+
+ piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2));
+ _trace_piggyback(piggyback, false);
+
+ hif = (struct wfx_hif_msg *)skb->data;
+ WARN(hif->encrypted & 0x3, "encryption is unsupported");
+ if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read"))
+ goto err;
+ computed_len = le16_to_cpu(hif->len);
+ computed_len = round_up(computed_len, 2);
+ if (computed_len != read_len) {
+ dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
+ computed_len, read_len);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
+ hif, read_len, true);
+ goto err;
+ }
+
+ if (!(hif->id & HIF_ID_IS_INDICATION)) {
+ (*is_cnf)++;
+ if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
+ release_count =
+ ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs;
+ else
+ release_count = 1;
+ WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
+ wdev->hif.tx_buffers_used -= release_count;
+ }
+ _trace_hif_recv(hif, wdev->hif.tx_buffers_used);
+
+ if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
+ if (hif->seqnum != wdev->hif.rx_seqnum)
+ dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
+ hif->seqnum, wdev->hif.rx_seqnum);
+ wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
+ }
+
+ skb_put(skb, le16_to_cpu(hif->len));
+ /* wfx_handle_rx takes care on SKB livetime */
+ wfx_handle_rx(wdev, skb);
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
+
+ return piggyback;
+
+err:
+ if (skb)
+ dev_kfree_skb(skb);
+ return -EIO;
+}
+
+static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
+{
+ size_t len;
+ int i;
+ int ctrl_reg, piggyback;
+
+ piggyback = 0;
+ for (i = 0; i < max_msg; i++) {
+ if (piggyback & CTRL_NEXT_LEN_MASK)
+ ctrl_reg = piggyback;
+ else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
+ else
+ ctrl_reg = 0;
+ if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
+ return i;
+ /* ctrl_reg units are 16bits words */
+ len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
+ piggyback = rx_helper(wdev, len, num_cnf);
+ if (piggyback < 0)
+ return i;
+ if (!(piggyback & CTRL_WLAN_READY))
+ dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
+ piggyback);
+ }
+ if (piggyback & CTRL_NEXT_LEN_MASK) {
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
+ complete(&wdev->hif.ctrl_ready);
+ if (ctrl_reg)
+ dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
+ ctrl_reg, piggyback);
+ }
+ return i;
+}
+
+static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif)
+{
+ int ret;
+ void *data;
+ bool is_encrypted = false;
+ size_t len = le16_to_cpu(hif->len);
+
+ WARN(len < sizeof(*hif), "try to send corrupted data");
+
+ hif->seqnum = wdev->hif.tx_seqnum;
+ wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
+
+ data = hif;
+ WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf),
+ "request exceed the chip capability: %zu > %d\n",
+ len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf));
+ len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
+ ret = wfx_data_write(wdev, data, len);
+ if (ret)
+ goto end;
+
+ wdev->hif.tx_buffers_used++;
+ _trace_hif_send(hif, wdev->hif.tx_buffers_used);
+end:
+ if (is_encrypted)
+ kfree(data);
+}
+
+static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
+{
+ struct wfx_hif_msg *hif;
+ int i;
+
+ for (i = 0; i < max_msg; i++) {
+ hif = NULL;
+ if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) {
+ if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+ hif = wdev->hif_cmd.buf_send;
+ } else {
+ hif = wfx_tx_queues_get(wdev);
+ }
+ }
+ if (!hif)
+ return i;
+ tx_helper(wdev, hif);
+ }
+ return i;
+}
+
+/* In SDIO mode, it is necessary to make an access to a register to acknowledge last received
+ * message. It could be possible to restrict this acknowledge to SDIO mode and only if last
+ * operation was rx.
+ */
+static void ack_sdio_data(struct wfx_dev *wdev)
+{
+ u32 cfg_reg;
+
+ wfx_config_reg_read(wdev, &cfg_reg);
+ if (cfg_reg & 0xFF) {
+ dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF);
+ wfx_config_reg_write_bits(wdev, 0xFF, 0x00);
+ }
+}
+
+static void bh_work(struct work_struct *work)
+{
+ struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
+ int stats_req = 0, stats_cnf = 0, stats_ind = 0;
+ bool release_chip = false, last_op_is_rx = false;
+ int num_tx, num_rx;
+
+ device_wakeup(wdev);
+ do {
+ num_tx = bh_work_tx(wdev, 32);
+ stats_req += num_tx;
+ if (num_tx)
+ last_op_is_rx = false;
+ num_rx = bh_work_rx(wdev, 32, &stats_cnf);
+ stats_ind += num_rx;
+ if (num_rx)
+ last_op_is_rx = true;
+ } while (num_rx || num_tx);
+ stats_ind -= stats_cnf;
+
+ if (last_op_is_rx)
+ ack_sdio_data(wdev);
+ if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
+ device_release(wdev);
+ release_chip = true;
+ }
+ _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip);
+}
+
+/* An IRQ from chip did occur */
+void wfx_bh_request_rx(struct wfx_dev *wdev)
+{
+ u32 cur, prev;
+
+ wfx_control_reg_read(wdev, &cur);
+ prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
+ complete(&wdev->hif.ctrl_ready);
+ queue_work(wdev->bh_wq, &wdev->hif.bh);
+
+ if (!(cur & CTRL_NEXT_LEN_MASK))
+ dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
+ cur);
+ if (prev != 0)
+ dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
+ prev, cur);
+}
+
+/* Driver want to send data */
+void wfx_bh_request_tx(struct wfx_dev *wdev)
+{
+ queue_work(wdev->bh_wq, &wdev->hif.bh);
+}
+
+/* If IRQ is not available, this function allow to manually poll the control register and simulate
+ * an IRQ ahen an event happened.
+ *
+ * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is
+ * lost. So, use this function carefully (only duing device initialisation).
+ */
+void wfx_bh_poll_irq(struct wfx_dev *wdev)
+{
+ ktime_t now, start;
+ u32 reg;
+
+ WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
+ flush_workqueue(wdev->bh_wq);
+ start = ktime_get();
+ for (;;) {
+ wfx_control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & 0xFFF)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, 1000))) {
+ dev_err(wdev->dev, "time out while polling control register\n");
+ return;
+ }
+ udelay(200);
+ }
+ wfx_bh_request_rx(wdev);
+}
+
+void wfx_bh_register(struct wfx_dev *wdev)
+{
+ INIT_WORK(&wdev->hif.bh, bh_work);
+ init_completion(&wdev->hif.ctrl_ready);
+ init_waitqueue_head(&wdev->hif.tx_buffers_empty);
+}
+
+void wfx_bh_unregister(struct wfx_dev *wdev)
+{
+ flush_work(&wdev->hif.bh);
+}
diff --git a/drivers/net/wireless/silabs/wfx/bh.h b/drivers/net/wireless/silabs/wfx/bh.h
new file mode 100644
index 000000000000..a44c8b421b7c
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/bh.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interrupt bottom half (BH).
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BH_H
+#define WFX_BH_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+struct wfx_dev;
+
+struct wfx_hif {
+ struct work_struct bh;
+ struct completion ctrl_ready;
+ wait_queue_head_t tx_buffers_empty;
+ atomic_t ctrl_reg;
+ int rx_seqnum;
+ int tx_seqnum;
+ int tx_buffers_used;
+};
+
+void wfx_bh_register(struct wfx_dev *wdev);
+void wfx_bh_unregister(struct wfx_dev *wdev);
+void wfx_bh_request_rx(struct wfx_dev *wdev);
+void wfx_bh_request_tx(struct wfx_dev *wdev);
+void wfx_bh_poll_irq(struct wfx_dev *wdev);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h
new file mode 100644
index 000000000000..ccadfdd6873c
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/bus.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common bus abstraction layer.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BUS_H
+#define WFX_BUS_H
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+
+#define WFX_REG_CONFIG 0x0
+#define WFX_REG_CONTROL 0x1
+#define WFX_REG_IN_OUT_QUEUE 0x2
+#define WFX_REG_AHB_DPORT 0x3
+#define WFX_REG_BASE_ADDR 0x4
+#define WFX_REG_SRAM_DPORT 0x5
+#define WFX_REG_SET_GEN_R_W 0x6
+#define WFX_REG_FRAME_OUT 0x7
+
+struct wfx_hwbus_ops {
+ int (*copy_from_io)(void *bus_priv, unsigned int addr, void *dst, size_t count);
+ int (*copy_to_io)(void *bus_priv, unsigned int addr, const void *src, size_t count);
+ int (*irq_subscribe)(void *bus_priv);
+ int (*irq_unsubscribe)(void *bus_priv);
+ void (*lock)(void *bus_priv);
+ void (*unlock)(void *bus_priv);
+ size_t (*align_size)(void *bus_priv, size_t size);
+};
+
+extern struct sdio_driver wfx_sdio_driver;
+extern struct spi_driver wfx_spi_driver;
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
new file mode 100644
index 000000000000..51a0d58a9070
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SDIO interface.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/align.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static const struct wfx_platform_data pdata_wf200 = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/wf200.pds",
+};
+
+static const struct wfx_platform_data pdata_brd4001a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd4001a.pds",
+};
+
+static const struct wfx_platform_data pdata_brd8022a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd8022a.pds",
+};
+
+static const struct wfx_platform_data pdata_brd8023a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd8023a.pds",
+};
+
+struct wfx_sdio_priv {
+ struct sdio_func *func;
+ struct wfx_dev *core;
+ u8 buf_id_tx;
+ u8 buf_id_rx;
+ int of_irq;
+};
+
+static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id, void *dst, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(!IS_ALIGNED((uintptr_t)dst, 4), "unaligned buffer address");
+ WARN(!IS_ALIGNED(count, 4), "unaligned buffer size");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= (bus->buf_id_rx + 1) << 7;
+ ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_rx = (bus->buf_id_rx + 1) % 4;
+
+ return ret;
+}
+
+static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id, const void *src, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(!IS_ALIGNED((uintptr_t)src, 4), "unaligned buffer address");
+ WARN(!IS_ALIGNED(count, 4), "unaligned buffer size");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= bus->buf_id_tx << 7;
+ /* FIXME: discards 'const' qualifier for src */
+ ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
+
+ return ret;
+}
+
+static void wfx_sdio_lock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_claim_host(bus->func);
+}
+
+static void wfx_sdio_unlock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_release_host(bus->func);
+}
+
+static void wfx_sdio_irq_handler(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ wfx_bh_request_rx(bus->core);
+}
+
+static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_claim_host(bus->func);
+ wfx_bh_request_rx(bus->core);
+ sdio_release_host(bus->func);
+ return IRQ_HANDLED;
+}
+
+static int wfx_sdio_irq_subscribe(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+ u32 flags;
+ int ret;
+ u8 cccr;
+
+ if (!bus->of_irq) {
+ sdio_claim_host(bus->func);
+ ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
+ sdio_release_host(bus->func);
+ return ret;
+ }
+
+ flags = irq_get_trigger_type(bus->of_irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
+ wfx_sdio_irq_handler_ext, flags, "wfx", bus);
+ if (ret)
+ return ret;
+ sdio_claim_host(bus->func);
+ cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
+ cccr |= BIT(0);
+ cccr |= BIT(bus->func->num);
+ sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
+ sdio_release_host(bus->func);
+ return 0;
+}
+
+static int wfx_sdio_irq_unsubscribe(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+ int ret;
+
+ if (bus->of_irq)
+ devm_free_irq(&bus->func->dev, bus->of_irq, bus);
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
+ return ret;
+}
+
+static size_t wfx_sdio_align_size(void *priv, size_t size)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ return sdio_align_size(bus->func, size);
+}
+
+static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
+ .copy_from_io = wfx_sdio_copy_from_io,
+ .copy_to_io = wfx_sdio_copy_to_io,
+ .irq_subscribe = wfx_sdio_irq_subscribe,
+ .irq_unsubscribe = wfx_sdio_irq_unsubscribe,
+ .lock = wfx_sdio_lock,
+ .unlock = wfx_sdio_unlock,
+ .align_size = wfx_sdio_align_size,
+};
+
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wf200", .data = &pdata_wf200 },
+ { .compatible = "silabs,brd4001a", .data = &pdata_brd4001a },
+ { .compatible = "silabs,brd8022a", .data = &pdata_brd8022a },
+ { .compatible = "silabs,brd8023a", .data = &pdata_brd8023a },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+
+static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+{
+ const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev);
+ struct device_node *np = func->dev.of_node;
+ struct wfx_sdio_priv *bus;
+ int ret;
+
+ if (func->num != 1) {
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n",
+ func->num);
+ return -ENODEV;
+ }
+
+ if (!pdata) {
+ dev_warn(&func->dev, "no compatible device found in DT\n");
+ return -ENODEV;
+ }
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->func = func;
+ bus->of_irq = irq_of_parse_and_map(np, 0);
+ sdio_set_drvdata(func, bus);
+
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ /* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */
+ sdio_set_block_size(func, 64);
+ sdio_release_host(func);
+ if (ret)
+ return ret;
+
+ bus->core = wfx_init_common(&func->dev, pdata, &wfx_sdio_hwbus_ops, bus);
+ if (!bus->core) {
+ ret = -EIO;
+ goto sdio_release;
+ }
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ goto sdio_release;
+
+ return 0;
+
+sdio_release:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ return ret;
+}
+
+static void wfx_sdio_remove(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ wfx_release(bus->core);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+}
+
+static const struct sdio_device_id wfx_sdio_ids[] = {
+ /* WF200 does not have official VID/PID */
+ { SDIO_DEVICE(0x0000, 0x1000) },
+ { },
+};
+MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+
+struct sdio_driver wfx_sdio_driver = {
+ .name = "wfx-sdio",
+ .id_table = wfx_sdio_ids,
+ .probe = wfx_sdio_probe,
+ .remove = wfx_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .of_match_table = wfx_sdio_of_match,
+ }
+};
diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c
new file mode 100644
index 000000000000..7fb1afb8ed31
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/bus_spi.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI interface.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, Sagrad Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000 /* usage: or operation */
+
+static const struct wfx_platform_data pdata_wf200 = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/wf200.pds",
+ .use_rising_clk = true,
+};
+
+static const struct wfx_platform_data pdata_brd4001a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd4001a.pds",
+ .use_rising_clk = true,
+};
+
+static const struct wfx_platform_data pdata_brd8022a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd8022a.pds",
+ .use_rising_clk = true,
+};
+
+static const struct wfx_platform_data pdata_brd8023a = {
+ .file_fw = "wfx/wfm_wf200",
+ .file_pds = "wfx/brd8023a.pds",
+ .use_rising_clk = true,
+};
+
+struct wfx_spi_priv {
+ struct spi_device *func;
+ struct wfx_dev *core;
+ struct gpio_desc *gpio_reset;
+ bool need_swab;
+};
+
+/* The chip reads 16bits of data at time and place them directly into (little endian) CPU register.
+ * So, the chip expects bytes order to be "B1 B0 B3 B2" (while LE is "B0 B1 B2 B3" and BE is
+ * "B3 B2 B1 B0")
+ *
+ * A little endian host with bits_per_word == 16 should do the right job natively. The code below to
+ * support big endian host and commonly used SPI 8bits.
+ */
+static int wfx_spi_copy_from_io(void *priv, unsigned int addr, void *dst, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2) | SET_READ;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .rx_buf = dst,
+ .len = count,
+ };
+ u16 *dst16 = dst;
+ int ret, i;
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+
+ cpu_to_le16s(&regaddr);
+ if (bus->need_swab)
+ swab16s(&regaddr);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&dst16[i]);
+ return ret;
+}
+
+static int wfx_spi_copy_to_io(void *priv, unsigned int addr, const void *src, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2);
+ /* FIXME: use a bounce buffer */
+ u16 *src16 = (void *)src;
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .tx_buf = src,
+ .len = count,
+ };
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+ WARN(regaddr & SET_READ, "bad addr or size overflow");
+
+ cpu_to_le16s(&regaddr);
+
+ /* Register address and CONFIG content always use 16bit big endian
+ * ("BADC" order)
+ */
+ if (bus->need_swab)
+ swab16s(&regaddr);
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+ return ret;
+}
+
+static void wfx_spi_lock(void *priv)
+{
+}
+
+static void wfx_spi_unlock(void *priv)
+{
+}
+
+static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ wfx_bh_request_rx(bus->core);
+ return IRQ_HANDLED;
+}
+
+static int wfx_spi_irq_subscribe(void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+ u32 flags;
+
+ flags = irq_get_trigger_type(bus->func->irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL,
+ wfx_spi_irq_handler, flags, "wfx", bus);
+}
+
+static int wfx_spi_irq_unsubscribe(void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ devm_free_irq(&bus->func->dev, bus->func->irq, bus);
+ return 0;
+}
+
+static size_t wfx_spi_align_size(void *priv, size_t size)
+{
+ /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned */
+ return ALIGN(size, 4);
+}
+
+static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = {
+ .copy_from_io = wfx_spi_copy_from_io,
+ .copy_to_io = wfx_spi_copy_to_io,
+ .irq_subscribe = wfx_spi_irq_subscribe,
+ .irq_unsubscribe = wfx_spi_irq_unsubscribe,
+ .lock = wfx_spi_lock,
+ .unlock = wfx_spi_unlock,
+ .align_size = wfx_spi_align_size,
+};
+
+static int wfx_spi_probe(struct spi_device *func)
+{
+ struct wfx_platform_data *pdata;
+ struct wfx_spi_priv *bus;
+ int ret;
+
+ if (!func->bits_per_word)
+ func->bits_per_word = 16;
+ ret = spi_setup(func);
+ if (ret)
+ return ret;
+ pdata = (struct wfx_platform_data *)spi_get_device_id(func)->driver_data;
+ if (!pdata) {
+ dev_err(&func->dev, "unable to retrieve driver data (please report)\n");
+ return -ENODEV;
+ }
+
+ /* Trace below is also displayed by spi_setup() if compiled with DEBUG */
+ dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n",
+ func->chip_select, func->mode, func->bits_per_word, func->max_speed_hz);
+ if (func->bits_per_word != 16 && func->bits_per_word != 8)
+ dev_warn(&func->dev, "unusual bits/word value: %d\n", func->bits_per_word);
+ if (func->max_speed_hz > 50000000)
+ dev_warn(&func->dev, "%dHz is a very high speed\n", func->max_speed_hz);
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ bus->func = func;
+ if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ bus->need_swab = true;
+ spi_set_drvdata(func, bus);
+
+ bus->gpio_reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(bus->gpio_reset))
+ return PTR_ERR(bus->gpio_reset);
+ if (!bus->gpio_reset) {
+ dev_warn(&func->dev, "gpio reset is not defined, trying to load firmware anyway\n");
+ } else {
+ gpiod_set_consumer_name(bus->gpio_reset, "wfx reset");
+ gpiod_set_value_cansleep(bus->gpio_reset, 1);
+ usleep_range(100, 150);
+ gpiod_set_value_cansleep(bus->gpio_reset, 0);
+ usleep_range(2000, 2500);
+ }
+
+ bus->core = wfx_init_common(&func->dev, pdata, &wfx_spi_hwbus_ops, bus);
+ if (!bus->core)
+ return -EIO;
+
+ return wfx_probe(bus->core);
+}
+
+static void wfx_spi_remove(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ wfx_release(bus->core);
+}
+
+/* For dynamic driver binding, kernel does not use OF to match driver. It only
+ * use modalias and modalias is a copy of 'compatible' DT node with vendor
+ * stripped.
+ */
+static const struct spi_device_id wfx_spi_id[] = {
+ { "wf200", (kernel_ulong_t)&pdata_wf200 },
+ { "brd4001a", (kernel_ulong_t)&pdata_brd4001a },
+ { "brd8022a", (kernel_ulong_t)&pdata_brd8022a },
+ { "brd8023a", (kernel_ulong_t)&pdata_brd8023a },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, wfx_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_spi_of_match[] = {
+ { .compatible = "silabs,wf200" },
+ { .compatible = "silabs,brd4001a" },
+ { .compatible = "silabs,brd8022a" },
+ { .compatible = "silabs,brd8023a" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
+#endif
+
+struct spi_driver wfx_spi_driver = {
+ .driver = {
+ .name = "wfx-spi",
+ .of_match_table = of_match_ptr(wfx_spi_of_match),
+ },
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_remove,
+};
diff --git a/drivers/net/wireless/silabs/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c
new file mode 100644
index 000000000000..e099a9e65bae
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/data_rx.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Data receiving implementation.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "data_rx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+
+static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ int params, tid;
+
+ if (wfx_api_older_than(wvif->wdev, 3, 6))
+ return;
+
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+ tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ ieee80211_start_rx_ba_session_offl(vif, mgmt->sa, tid);
+ break;
+ case WLAN_ACTION_DELBA:
+ params = le16_to_cpu(mgmt->u.action.u.delba.params);
+ tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
+ ieee80211_stop_rx_ba_session_offl(vif, mgmt->sa, tid);
+ break;
+ }
+}
+
+void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (arg->status == HIF_STATUS_RX_FAIL_MIC)
+ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
+ else if (arg->status)
+ goto drop;
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ dev_warn(wvif->wdev->dev, "malformed SDU received\n");
+ goto drop;
+ }
+
+ hdr->band = NL80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(arg->channel_number, hdr->band);
+
+ if (arg->rxed_rate >= 14) {
+ hdr->encoding = RX_ENC_HT;
+ hdr->rate_idx = arg->rxed_rate - 14;
+ } else if (arg->rxed_rate >= 4) {
+ hdr->rate_idx = arg->rxed_rate - 2;
+ } else {
+ hdr->rate_idx = arg->rxed_rate;
+ }
+
+ if (!arg->rcpi_rssi) {
+ hdr->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ dev_info(wvif->wdev->dev, "received frame without RSSI data\n");
+ }
+ hdr->signal = arg->rcpi_rssi / 2 - 110;
+ hdr->antenna = 0;
+
+ if (arg->encryp)
+ hdr->flag |= RX_FLAG_DECRYPTED;
+
+ /* Block ack negotiation is offloaded by the firmware. However, re-ordering must be done by
+ * the mac80211.
+ */
+ if (ieee80211_is_action(frame->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ skb->len > IEEE80211_MIN_ACTION_SIZE) {
+ wfx_rx_handle_ba(wvif, mgmt);
+ goto drop;
+ }
+
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ return;
+
+drop:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wireless/silabs/wfx/data_rx.h b/drivers/net/wireless/silabs/wfx/data_rx.h
new file mode 100644
index 000000000000..cf708f16d602
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/data_rx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Data receiving implementation.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_RX_H
+#define WFX_DATA_RX_H
+
+struct wfx_vif;
+struct sk_buff;
+struct wfx_hif_ind_rx;
+
+void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
new file mode 100644
index 000000000000..6a5e52a96d18
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/data_tx.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Data transmitting implementation.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "data_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+#include "queue.h"
+#include "debug.h"
+#include "traces.h"
+#include "hif_tx_mib.h"
+
+static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate)
+{
+ struct ieee80211_supported_band *band;
+
+ if (rate->idx < 0)
+ return -1;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate->idx > 7) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return rate->idx + 14;
+ }
+ /* The device only support 2GHz, else band information should be retrieved from
+ * ieee80211_tx_info
+ */
+ band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (rate->idx >= band->n_bitrates) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return band->bitrates[rate->idx].hw_value;
+}
+
+/* TX policy cache implementation */
+
+static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy,
+ struct ieee80211_tx_rate *rates)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+ int i, rateid;
+ u8 count;
+
+ WARN(rates[0].idx < 0, "invalid rate policy");
+ memset(policy, 0, sizeof(*policy));
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ if (rates[i].idx < 0)
+ break;
+ WARN_ON(rates[i].count > 15);
+ rateid = wfx_get_hw_rate(wdev, &rates[i]);
+ /* Pack two values in each byte of policy->rates */
+ count = rates[i].count;
+ if (rateid % 2)
+ count <<= 4;
+ policy->rates[rateid / 2] |= count;
+ }
+}
+
+static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b)
+{
+ return !memcmp(a->rates, b->rates, sizeof(a->rates));
+}
+
+static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted)
+{
+ struct wfx_tx_policy *it;
+
+ list_for_each_entry(it, &cache->used, link)
+ if (wfx_tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ list_for_each_entry(it, &cache->free, link)
+ if (wfx_tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ return -1;
+}
+
+static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
+{
+ ++entry->usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
+{
+ int ret = --entry->usage_count;
+
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew)
+{
+ int idx;
+ struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct wfx_tx_policy wanted;
+ struct wfx_tx_policy *entry;
+
+ wfx_tx_policy_build(wvif, &wanted, rates);
+
+ spin_lock_bh(&cache->lock);
+ if (list_empty(&cache->free)) {
+ WARN(1, "unable to get a valid Tx policy");
+ spin_unlock_bh(&cache->lock);
+ return HIF_TX_RETRY_POLICY_INVALID;
+ }
+ idx = wfx_tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ *renew = false;
+ } else {
+ /* If policy is not found create a new one using the oldest entry in "free" list */
+ *renew = true;
+ entry = list_entry(cache->free.prev, struct wfx_tx_policy, link);
+ memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
+ entry->uploaded = false;
+ entry->usage_count = 0;
+ idx = entry - cache->cache;
+ }
+ wfx_tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free))
+ ieee80211_stop_queues(wvif->wdev->hw);
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
+{
+ int usage, locked;
+ struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
+
+ if (idx == HIF_TX_RETRY_POLICY_INVALID)
+ return;
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage)
+ ieee80211_wake_queues(wvif->wdev->hw);
+ spin_unlock_bh(&cache->lock);
+}
+
+static int wfx_tx_policy_upload(struct wfx_vif *wvif)
+{
+ struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache;
+ u8 tmp_rates[12];
+ int i, is_used;
+
+ do {
+ spin_lock_bh(&wvif->tx_policy_cache.lock);
+ for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
+ is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates));
+ if (!policies[i].uploaded && is_used)
+ break;
+ }
+ if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
+ policies[i].uploaded = true;
+ memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
+ wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
+ } else {
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
+ }
+ } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
+ return 0;
+}
+
+void wfx_tx_policy_upload_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work);
+
+ wfx_tx_policy_upload(wvif);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+void wfx_tx_policy_init(struct wfx_vif *wvif)
+{
+ struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+
+ for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+/* Tx implementation */
+
+static bool wfx_is_action_back(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+ return true;
+}
+
+static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
+{
+ struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ const u8 *da = ieee80211_get_DA(hdr);
+
+ if (sta_priv && sta_priv->link_id)
+ return sta_priv->link_id;
+ if (vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (is_multicast_ether_addr(da))
+ return 0;
+ return HIF_LINK_ID_NOT_ASSOCIATED;
+}
+
+static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+{
+ int i;
+ bool finished;
+
+ /* Firmware is not able to mix rates with different flags */
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ /* Sort rates and remove duplicates */
+ do {
+ finished = true;
+ for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+ if (rates[i + 1].idx == rates[i].idx &&
+ rates[i].idx != -1) {
+ rates[i].count += rates[i + 1].count;
+ if (rates[i].count > 15)
+ rates[i].count = 15;
+ rates[i + 1].idx = -1;
+ rates[i + 1].count = 0;
+
+ finished = false;
+ }
+ if (rates[i + 1].idx > rates[i].idx) {
+ swap(rates[i + 1], rates[i]);
+ finished = false;
+ }
+ }
+ } while (!finished);
+ /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[i].idx == 0)
+ break;
+ if (rates[i].idx == -1) {
+ rates[i].idx = 0;
+ rates[i].count = 8; /* == hw->max_rate_tries */
+ rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+ break;
+ }
+ }
+ /* All retries use long GI */
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+}
+
+static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+{
+ bool tx_policy_renew = false;
+ u8 ret;
+
+ ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
+ if (ret == HIF_TX_RETRY_POLICY_INVALID)
+ dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
+
+ if (tx_policy_renew) {
+ wfx_tx_lock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work))
+ wfx_tx_unlock(wvif->wdev);
+ }
+ return ret;
+}
+
+static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
+{
+ if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS))
+ return HIF_FRAME_FORMAT_NON_HT;
+ else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD))
+ return HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ else
+ return HIF_FRAME_FORMAT_GF_HT_11N;
+}
+
+static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
+{
+ int mic_space;
+
+ if (!hw_key)
+ return 0;
+ if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return 0;
+ mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
+ return hw_key->icv_len + mic_space;
+}
+
+static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ struct wfx_hif_msg *hif_msg;
+ struct wfx_hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue_id = skb_get_queue_mapping(skb);
+ size_t offset = (size_t)skb->data & 3;
+ int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset;
+
+ WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
+ wfx_tx_fixup_rates(tx_info->driver_rates);
+
+ /* From now tx_info->control is unusable */
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ /* Fill tx_priv */
+ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
+ tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
+
+ /* Fill hif_msg */
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+ skb_put(skb, tx_priv->icv_size);
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct wfx_hif_msg *)skb->data;
+ hif_msg->len = cpu_to_le16(skb->len);
+ hif_msg->id = HIF_REQ_ID_TX;
+ hif_msg->interface = wvif->id;
+ if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) {
+ dev_warn(wvif->wdev->dev,
+ "requested frame size (%d) is larger than maximum supported (%d)\n",
+ skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf));
+ skb_pull(skb, wmsg_len);
+ return -EIO;
+ }
+
+ /* Fill tx request */
+ req = (struct wfx_hif_req_tx *)hif_msg->body;
+ /* packet_id just need to be unique on device. 32bits are more than necessary for that task,
+ * so we take advantage of it to add some extra data for debug.
+ */
+ req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
+ req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
+ req->packet_id |= queue_id << 28;
+
+ req->fc_offset = offset;
+ /* Queue index are inverted between firmware and Linux */
+ req->queue_id = 3 - queue_id;
+ req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
+ req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
+ req->frame_format = wfx_tx_get_frame_format(tx_info);
+ if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ req->short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ req->after_dtim = 1;
+
+ /* Auxiliary operations */
+ wfx_tx_queues_put(wvif, skb);
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ schedule_work(&wvif->update_tim_work);
+ wfx_bh_request_tx(wvif->wdev);
+ return 0;
+}
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+ struct ieee80211_sta *sta = control ? control->sta : NULL;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data);
+
+ BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room,
+ "struct tx_priv is too large");
+ WARN(skb->next || skb->prev, "skb is already member of a list");
+ /* control.vif can be NULL for injected frames */
+ if (tx_info->control.vif)
+ wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
+ else
+ wvif = wvif_iterate(wdev, NULL);
+ if (WARN_ON(!wvif))
+ goto drop;
+ /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session
+ * management frame. The check below exist just in case.
+ */
+ if (wfx_is_action_back(hdr)) {
+ dev_info(wdev->dev, "drop BA action\n");
+ goto drop;
+ }
+ if (wfx_tx_inner(wvif, sta, skb))
+ goto drop;
+
+ return;
+
+drop:
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
+
+static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data;
+ struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body;
+ unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
+ req->fc_offset;
+
+ if (!wvif) {
+ pr_warn("vif associated with the skb does not exist anymore\n");
+ return;
+ }
+ wfx_tx_policy_put(wvif, req->retry_policy_index);
+ skb_pull(skb, offset);
+ ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
+}
+
+static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info,
+ const struct wfx_hif_cnf_tx *arg)
+{
+ struct ieee80211_tx_rate *rate;
+ int tx_count;
+ int i;
+
+ tx_count = arg->ack_failures;
+ if (!arg->status || arg->ack_failures)
+ tx_count += 1; /* Also report success */
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0)
+ break;
+ if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
+ arg->ack_failures)
+ dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
+ rate->count, tx_count);
+ if (tx_count <= rate->count && tx_count &&
+ arg->txed_rate != wfx_get_hw_rate(wdev, rate))
+ dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate, wfx_get_hw_rate(wdev, rate));
+ if (tx_count > rate->count) {
+ tx_count -= rate->count;
+ } else if (!tx_count) {
+ rate->count = 0;
+ rate->idx = -1;
+ } else {
+ rate->count = tx_count;
+ tx_count = 0;
+ }
+ }
+ if (tx_count)
+ dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
+}
+
+void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg)
+{
+ const struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info;
+ struct wfx_vif *wvif;
+ struct sk_buff *skb;
+
+ skb = wfx_pending_get(wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface);
+ WARN_ON(!wvif);
+ if (!wvif)
+ return;
+
+ /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
+ _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
+ wfx_tx_fill_rates(wdev, tx_info, arg);
+ skb_trim(skb, skb->len - tx_priv->icv_size);
+
+ /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */
+ /* FIXME: use ieee80211_tx_info_clear_status() */
+ memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
+ memset(tx_info->pad, 0, sizeof(tx_info->pad));
+
+ if (!arg->status) {
+ tx_info->status.tx_time = le32_to_cpu(arg->media_delay) -
+ le32_to_cpu(arg->tx_queue_delay);
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
+ WARN(!arg->requeue, "incoherent status and result_flags");
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
+ schedule_work(&wvif->update_tim_work);
+ }
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ }
+ wfx_skb_dtor(wvif, skb);
+}
+
+static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped)
+{
+ struct wfx_queue *queue;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (!(BIT(i) & queues))
+ continue;
+ queue = &wvif->tx_queue[i];
+ if (dropped)
+ wfx_tx_queue_drop(wvif, queue, dropped);
+ }
+ if (wvif->wdev->chip_frozen)
+ return;
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (!(BIT(i) & queues))
+ continue;
+ queue = &wvif->tx_queue[i];
+ if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue),
+ msecs_to_jiffies(1000)) <= 0)
+ dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?");
+ }
+}
+
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct sk_buff_head dropped;
+ struct wfx_vif *wvif;
+ struct wfx_hif_msg *hif;
+ struct sk_buff *skb;
+
+ skb_queue_head_init(&dropped);
+ if (vif) {
+ wvif = (struct wfx_vif *)vif->drv_priv;
+ wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
+ } else {
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
+ }
+ wfx_tx_flush(wdev);
+ if (wdev->chip_frozen)
+ wfx_pending_drop(wdev, &dropped);
+ while ((skb = skb_dequeue(&dropped)) != NULL) {
+ hif = (struct wfx_hif_msg *)skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
+ wfx_skb_dtor(wvif, skb);
+ }
+}
diff --git a/drivers/net/wireless/silabs/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h
new file mode 100644
index 000000000000..983470705e4b
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/data_tx.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Data transmitting implementation.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_TX_H
+#define WFX_DATA_TX_H
+
+#include <linux/list.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+struct wfx_tx_priv;
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_tx_policy {
+ struct list_head link;
+ int usage_count;
+ u8 rates[12];
+ bool uploaded;
+};
+
+struct wfx_tx_policy_cache {
+ struct wfx_tx_policy cache[HIF_TX_RETRY_POLICY_MAX];
+ /* FIXME: use a trees and drop hash from tx_policy */
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
+ unsigned char icv_size;
+};
+
+void wfx_tx_policy_init(struct wfx_vif *wvif);
+void wfx_tx_policy_upload_work(struct work_struct *work);
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb);
+void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop);
+
+static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return NULL;
+ tx_info = IEEE80211_SKB_CB(skb);
+ return (struct wfx_tx_priv *)tx_info->rate_driver_data;
+}
+
+static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
+{
+ struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data;
+ struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body;
+
+ return req;
+}
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/debug.c b/drivers/net/wireless/silabs/wfx/debug.c
new file mode 100644
index 000000000000..e8265208f9a5
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/debug.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+
+#include "debug.h"
+#include "wfx.h"
+#include "sta.h"
+#include "main.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define CREATE_TRACE_POINTS
+#include "traces.h"
+
+static const struct trace_print_flags hif_msg_print_map[] = {
+ hif_msg_list,
+};
+
+static const struct trace_print_flags hif_mib_print_map[] = {
+ hif_mib_list,
+};
+
+static const struct trace_print_flags wfx_reg_print_map[] = {
+ wfx_reg_list,
+};
+
+static const char *get_symbol(unsigned long val, const struct trace_print_flags *symbol_array)
+{
+ int i;
+
+ for (i = 0; symbol_array[i].mask != -1; i++) {
+ if (val == symbol_array[i].mask)
+ return symbol_array[i].name;
+ }
+
+ return "unknown";
+}
+
+const char *wfx_get_hif_name(unsigned long id)
+{
+ return get_symbol(id, hif_msg_print_map);
+}
+
+const char *wfx_get_mib_name(unsigned long id)
+{
+ return get_symbol(id, hif_mib_print_map);
+}
+
+const char *wfx_get_reg_name(unsigned long id)
+{
+ return get_symbol(id, wfx_reg_print_map);
+}
+
+static int wfx_counters_show(struct seq_file *seq, void *v)
+{
+ int ret, i;
+ struct wfx_dev *wdev = seq->private;
+ struct wfx_hif_mib_extended_count_table counters[3];
+
+ for (i = 0; i < ARRAY_SIZE(counters); i++) {
+ ret = wfx_hif_get_counters_table(wdev, i, counters + i);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+ }
+
+ seq_printf(seq, "%-24s %12s %12s %12s\n", "", "global", "iface 0", "iface 1");
+
+#define PUT_COUNTER(name) \
+ seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \
+ le32_to_cpu(counters[2].count_##name), \
+ le32_to_cpu(counters[0].count_##name), \
+ le32_to_cpu(counters[1].count_##name))
+
+ PUT_COUNTER(tx_frames);
+ PUT_COUNTER(tx_frames_multicast);
+ PUT_COUNTER(tx_frames_success);
+ PUT_COUNTER(tx_frames_retried);
+ PUT_COUNTER(tx_frames_multi_retried);
+ PUT_COUNTER(tx_frames_failed);
+
+ PUT_COUNTER(ack_failed);
+ PUT_COUNTER(rts_success);
+ PUT_COUNTER(rts_failed);
+
+ PUT_COUNTER(rx_frames);
+ PUT_COUNTER(rx_frames_multicast);
+ PUT_COUNTER(rx_frames_success);
+ PUT_COUNTER(rx_frames_failed);
+ PUT_COUNTER(drop_plcp);
+ PUT_COUNTER(drop_fcs);
+ PUT_COUNTER(drop_no_key);
+ PUT_COUNTER(drop_decryption);
+ PUT_COUNTER(drop_tkip_mic);
+ PUT_COUNTER(drop_bip_mic);
+ PUT_COUNTER(drop_cmac_icv);
+ PUT_COUNTER(drop_cmac_replay);
+ PUT_COUNTER(drop_ccmp_replay);
+ PUT_COUNTER(drop_duplicate);
+
+ PUT_COUNTER(rx_bcn_miss);
+ PUT_COUNTER(rx_bcn_success);
+ PUT_COUNTER(rx_bcn_dtim);
+ PUT_COUNTER(rx_bcn_dtim_aid0_clr);
+ PUT_COUNTER(rx_bcn_dtim_aid0_set);
+
+#undef PUT_COUNTER
+
+ for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++)
+ seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "",
+ le32_to_cpu(counters[2].reserved[i]),
+ le32_to_cpu(counters[0].reserved[i]),
+ le32_to_cpu(counters[1].reserved[i]));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_counters);
+
+static const char * const channel_names[] = {
+ [0] = "1M",
+ [1] = "2M",
+ [2] = "5.5M",
+ [3] = "11M",
+ /* Entries 4 and 5 does not exist */
+ [6] = "6M",
+ [7] = "9M",
+ [8] = "12M",
+ [9] = "18M",
+ [10] = "24M",
+ [11] = "36M",
+ [12] = "48M",
+ [13] = "54M",
+ [14] = "MCS0",
+ [15] = "MCS1",
+ [16] = "MCS2",
+ [17] = "MCS3",
+ [18] = "MCS4",
+ [19] = "MCS5",
+ [20] = "MCS6",
+ [21] = "MCS7",
+};
+
+static int wfx_rx_stats_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct wfx_hif_rx_stats *st = &wdev->rx_stats;
+ int i;
+
+ mutex_lock(&wdev->rx_stats_lock);
+ seq_printf(seq, "Timestamp: %dus\n", st->date);
+ seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
+ le32_to_cpu(st->pwr_clk_freq), st->is_ext_pwr_clk ? "yes" : "no");
+ seq_printf(seq, "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ st->nb_rx_frame, st->per_total, st->throughput);
+ seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
+ seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
+ for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
+ if (channel_names[i])
+ seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
+ channel_names[i],
+ le32_to_cpu(st->nb_rx_by_rate[i]),
+ le16_to_cpu(st->per[i]),
+ (s16)le16_to_cpu(st->rssi[i]) / 100,
+ (s16)le16_to_cpu(st->snr[i]) / 100,
+ (s16)le16_to_cpu(st->cfo[i]));
+ }
+ mutex_unlock(&wdev->rx_stats_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+
+static int wfx_tx_power_loop_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct wfx_hif_tx_power_loop_info *st = &wdev->tx_power_loop_info;
+ int tmp;
+
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ tmp = le16_to_cpu(st->tx_gain_dig);
+ seq_printf(seq, "Tx gain digital: %d\n", tmp);
+ tmp = le16_to_cpu(st->tx_gain_pa);
+ seq_printf(seq, "Tx gain PA: %d\n", tmp);
+ tmp = (s16)le16_to_cpu(st->target_pout);
+ seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = (s16)le16_to_cpu(st->p_estimation);
+ seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = le16_to_cpu(st->vpdet);
+ seq_printf(seq, "Vpdet: %d mV\n", tmp);
+ seq_printf(seq, "Measure index: %d\n", st->measurement_index);
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop);
+
+static ssize_t wfx_send_pds_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+ char *buf;
+ int ret;
+
+ if (*ppos != 0) {
+ dev_dbg(wdev->dev, "PDS data must be written in one transaction");
+ return -EBUSY;
+ }
+ buf = memdup_user(user_buf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ *ppos = *ppos + count;
+ ret = wfx_send_pds(wdev, buf, count);
+ kfree(buf);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static const struct file_operations wfx_send_pds_fops = {
+ .open = simple_open,
+ .write = wfx_send_pds_write,
+};
+
+struct dbgfs_hif_msg {
+ struct wfx_dev *wdev;
+ struct completion complete;
+ u8 reply[1024];
+ int ret;
+};
+
+static ssize_t wfx_send_hif_msg_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ struct wfx_dev *wdev = context->wdev;
+ struct wfx_hif_msg *request;
+
+ if (completion_done(&context->complete)) {
+ dev_dbg(wdev->dev, "read previous result before start a new one\n");
+ return -EBUSY;
+ }
+ if (count < sizeof(struct wfx_hif_msg))
+ return -EINVAL;
+
+ /* wfx_cmd_send() checks that reply buffer is wide enough, but does not return precise
+ * length read. User have to know how many bytes should be read. Filling reply buffer with a
+ * memory pattern may help user.
+ */
+ memset(context->reply, 0xFF, sizeof(context->reply));
+ request = memdup_user(user_buf, count);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ if (le16_to_cpu(request->len) != count) {
+ kfree(request);
+ return -EINVAL;
+ }
+ context->ret = wfx_cmd_send(wdev, request, context->reply, sizeof(context->reply), false);
+
+ kfree(request);
+ complete(&context->complete);
+ return count;
+}
+
+static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ int ret;
+
+ if (count > sizeof(context->reply))
+ return -EINVAL;
+ ret = wait_for_completion_interruptible(&context->complete);
+ if (ret)
+ return ret;
+ if (context->ret < 0)
+ return context->ret;
+ /* Be careful, write() is waiting for a full message while read() only returns a payload */
+ if (copy_to_user(user_buf, context->reply, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int wfx_send_hif_msg_open(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+ if (!context)
+ return -ENOMEM;
+ context->wdev = inode->i_private;
+ init_completion(&context->complete);
+ file->private_data = context;
+ return 0;
+}
+
+static int wfx_send_hif_msg_release(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+
+ kfree(context);
+ return 0;
+}
+
+static const struct file_operations wfx_send_hif_msg_fops = {
+ .open = wfx_send_hif_msg_open,
+ .release = wfx_send_hif_msg_release,
+ .write = wfx_send_hif_msg_write,
+ .read = wfx_send_hif_msg_read,
+};
+
+int wfx_debug_init(struct wfx_dev *wdev)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
+ debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
+ debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("tx_power_loop", 0444, d, wdev, &wfx_tx_power_loop_fops);
+ debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
+ debugfs_create_file("send_hif_msg", 0600, d, wdev, &wfx_send_hif_msg_fops);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/silabs/wfx/debug.h b/drivers/net/wireless/silabs/wfx/debug.h
new file mode 100644
index 000000000000..3840575e5e28
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, ST-Ericsson
+ */
+#ifndef WFX_DEBUG_H
+#define WFX_DEBUG_H
+
+struct wfx_dev;
+
+int wfx_debug_init(struct wfx_dev *wdev);
+
+const char *wfx_get_hif_name(unsigned long id);
+const char *wfx_get_mib_name(unsigned long id);
+const char *wfx_get_reg_name(unsigned long id);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/fwio.c b/drivers/net/wireless/silabs/wfx/fwio.c
new file mode 100644
index 000000000000..52c7f560b062
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/fwio.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitfield.h>
+
+#include "fwio.h"
+#include "wfx.h"
+#include "hwio.h"
+
+/* Addresses below are in SRAM area */
+#define WFX_DNLD_FIFO 0x09004000
+#define DNLD_BLOCK_SIZE 0x0400
+#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */
+/* Download Control Area (DCA) */
+#define WFX_DCA_IMAGE_SIZE 0x0900C000
+#define WFX_DCA_PUT 0x0900C004
+#define WFX_DCA_GET 0x0900C008
+#define WFX_DCA_HOST_STATUS 0x0900C00C
+#define HOST_READY 0x87654321
+#define HOST_INFO_READ 0xA753BD99
+#define HOST_UPLOAD_PENDING 0xABCDDCBA
+#define HOST_UPLOAD_COMPLETE 0xD4C64A99
+#define HOST_OK_TO_JUMP 0x174FC882
+#define WFX_DCA_NCP_STATUS 0x0900C010
+#define NCP_NOT_READY 0x12345678
+#define NCP_READY 0x87654321
+#define NCP_INFO_READY 0xBD53EF99
+#define NCP_DOWNLOAD_PENDING 0xABCDDCBA
+#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA
+#define NCP_AUTH_OK 0xD4C64A99
+#define NCP_AUTH_FAIL 0x174FC882
+#define NCP_PUB_KEY_RDY 0x7AB41D19
+#define WFX_DCA_FW_SIGNATURE 0x0900C014
+#define FW_SIGNATURE_SIZE 0x40
+#define WFX_DCA_FW_HASH 0x0900C054
+#define FW_HASH_SIZE 0x08
+#define WFX_DCA_FW_VERSION 0x0900C05C
+#define FW_VERSION_SIZE 0x04
+#define WFX_DCA_RESERVED 0x0900C060
+#define DCA_RESERVED_SIZE 0x20
+#define WFX_STATUS_INFO 0x0900C080
+#define WFX_BOOTLOADER_LABEL 0x0900C084
+#define BOOTLOADER_LABEL_SIZE 0x3C
+#define WFX_PTE_INFO 0x0900C0C0
+#define PTE_INFO_KEYSET_IDX 0x0D
+#define PTE_INFO_SIZE 0x10
+#define WFX_ERR_INFO 0x0900C0D0
+#define ERR_INVALID_SEC_TYPE 0x05
+#define ERR_SIG_VERIF_FAILED 0x0F
+#define ERR_AES_CTRL_KEY 0x10
+#define ERR_ECC_PUB_KEY 0x11
+#define ERR_MAC_KEY 0x18
+
+#define DCA_TIMEOUT 50 /* milliseconds */
+#define WAKEUP_TIMEOUT 200 /* milliseconds */
+
+static const char * const fwio_errors[] = {
+ [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
+ [ERR_SIG_VERIF_FAILED] = "Signature verification failed",
+ [ERR_AES_CTRL_KEY] = "AES control key not initialized",
+ [ERR_ECC_PUB_KEY] = "ECC public key not initialized",
+ [ERR_MAC_KEY] = "MAC key not initialized",
+};
+
+/* request_firmware() allocate data using vmalloc(). It is not compatible with underlying hardware
+ * that use DMA. Function below detect this case and allocate a bounce buffer if necessary.
+ *
+ * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to detect this problem at
+ * runtime (else, kernel silently fail).
+ *
+ * NOTE: it may also be possible to use 'pages' from struct firmware and avoid bounce buffer
+ */
+static int wfx_sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf, size_t len)
+{
+ int ret;
+ const u8 *tmp;
+
+ if (!virt_addr_valid(buf)) {
+ tmp = kmemdup(buf, len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ } else {
+ tmp = buf;
+ }
+ ret = wfx_sram_buf_write(wdev, addr, tmp, len);
+ if (tmp != buf)
+ kfree(tmp);
+ return ret;
+}
+
+static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
+{
+ int keyset_file;
+ char filename[256];
+ const char *data;
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_%02X.sec",
+ wdev->pdata.file_fw, keyset_chip);
+ ret = firmware_request_nowarn(fw, filename, wdev->dev);
+ if (ret) {
+ dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
+ filename, wdev->pdata.file_fw);
+ snprintf(filename, sizeof(filename), "%s.sec", wdev->pdata.file_fw);
+ ret = request_firmware(fw, filename, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load %s\n", filename);
+ *fw = NULL;
+ return ret;
+ }
+ }
+
+ data = (*fw)->data;
+ if (memcmp(data, "KEYSET", 6) != 0) {
+ /* Legacy firmware format */
+ *file_offset = 0;
+ keyset_file = 0x90;
+ } else {
+ *file_offset = 8;
+ keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]);
+ if (keyset_file < 0) {
+ dev_err(wdev->dev, "%s corrupted\n", filename);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -EINVAL;
+ }
+ }
+ if (keyset_file != keyset_chip) {
+ dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n",
+ keyset_file, keyset_chip);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENODEV;
+ }
+ wdev->keyset = keyset_file;
+ return 0;
+}
+
+static int wait_ncp_status(struct wfx_dev *wdev, u32 status)
+{
+ ktime_t now, start;
+ u32 reg;
+ int ret;
+
+ start = ktime_get();
+ for (;;) {
+ ret = wfx_sram_reg_read(wdev, WFX_DCA_NCP_STATUS, &reg);
+ if (ret < 0)
+ return -EIO;
+ now = ktime_get();
+ if (reg == status)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "chip answer after %lldus\n", ktime_us_delta(now, start));
+ else
+ dev_dbg(wdev->dev, "chip answer immediately\n");
+ return 0;
+}
+
+static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
+{
+ int ret;
+ u32 offs, bytes_done = 0;
+ ktime_t now, start;
+
+ if (len % DNLD_BLOCK_SIZE) {
+ dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n");
+ return -EIO;
+ }
+ offs = 0;
+ while (offs < len) {
+ start = ktime_get();
+ for (;;) {
+ now = ktime_get();
+ if (offs + DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ ret = wfx_sram_reg_read(wdev, WFX_DCA_GET, &bytes_done);
+ if (ret < 0)
+ return ret;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "answer after %lldus\n", ktime_us_delta(now, start));
+
+ ret = wfx_sram_write_dma_safe(wdev, WFX_DNLD_FIFO + (offs % DNLD_FIFO_SIZE),
+ data + offs, DNLD_BLOCK_SIZE);
+ if (ret < 0)
+ return ret;
+
+ /* The device seems to not support writing 0 in this register during first loop */
+ offs += DNLD_BLOCK_SIZE;
+ ret = wfx_sram_reg_write(wdev, WFX_DCA_PUT, offs);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void print_boot_status(struct wfx_dev *wdev)
+{
+ u32 reg;
+
+ wfx_sram_reg_read(wdev, WFX_STATUS_INFO, &reg);
+ if (reg == 0x12345678)
+ return;
+ wfx_sram_reg_read(wdev, WFX_ERR_INFO, &reg);
+ if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg])
+ dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]);
+ else
+ dev_info(wdev->dev, "secure boot: Error %#02x\n", reg);
+}
+
+static int load_firmware_secure(struct wfx_dev *wdev)
+{
+ const struct firmware *fw = NULL;
+ int header_size;
+ int fw_offset;
+ ktime_t start;
+ u8 *buf;
+ int ret;
+
+ BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE);
+ buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY);
+ ret = wait_ncp_status(wdev, NCP_INFO_READY);
+ if (ret)
+ goto error;
+
+ wfx_sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE);
+ buf[BOOTLOADER_LABEL_SIZE] = 0;
+ dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf);
+
+ wfx_sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE);
+ ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset);
+ if (ret)
+ goto error;
+ header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE;
+
+ wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ);
+ ret = wait_ncp_status(wdev, NCP_READY);
+ if (ret)
+ goto error;
+
+ wfx_sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */
+ wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00", FW_VERSION_SIZE);
+ wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
+ FW_SIGNATURE_SIZE);
+ wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_HASH, fw->data + fw_offset + FW_SIGNATURE_SIZE,
+ FW_HASH_SIZE);
+ wfx_sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size);
+ wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING);
+ ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING);
+ if (ret)
+ goto error;
+
+ start = ktime_get();
+ ret = upload_firmware(wdev, fw->data + header_size, fw->size - header_size);
+ if (ret)
+ goto error;
+ dev_dbg(wdev->dev, "firmware load after %lldus\n",
+ ktime_us_delta(ktime_get(), start));
+
+ wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
+ ret = wait_ncp_status(wdev, NCP_AUTH_OK);
+ /* Legacy ROM support */
+ if (ret < 0)
+ ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
+ if (ret < 0)
+ goto error;
+ wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP);
+
+error:
+ kfree(buf);
+ release_firmware(fw);
+ if (ret)
+ print_boot_status(wdev);
+ return ret;
+}
+
+static int init_gpr(struct wfx_dev *wdev)
+{
+ int ret, i;
+ static const struct {
+ int index;
+ u32 value;
+ } gpr_init[] = {
+ { 0x07, 0x208775 },
+ { 0x08, 0x2EC020 },
+ { 0x09, 0x3C3C3C },
+ { 0x0B, 0x322C44 },
+ { 0x0C, 0xA06497 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gpr_init); i++) {
+ ret = wfx_igpr_reg_write(wdev, gpr_init[i].index, gpr_init[i].value);
+ if (ret < 0)
+ return ret;
+ dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index, gpr_init[i].value);
+ }
+ return 0;
+}
+
+int wfx_init_device(struct wfx_dev *wdev)
+{
+ int ret;
+ int hw_revision, hw_type;
+ int wakeup_timeout = 50; /* ms */
+ ktime_t now, start;
+ u32 reg;
+
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD;
+ if (wdev->pdata.use_rising_clk)
+ reg |= CFG_CLK_RISE_EDGE;
+ ret = wfx_config_reg_write(wdev, reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n");
+ return -EIO;
+ }
+
+ ret = wfx_config_reg_read(wdev, &reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n");
+ return -EIO;
+ }
+ if (reg == 0 || reg == ~0) {
+ dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n");
+ return -EIO;
+ }
+ dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
+
+ hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
+ if (hw_revision == 0) {
+ dev_err(wdev->dev, "bad hardware revision number: %d\n", hw_revision);
+ return -ENODEV;
+ }
+ hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg);
+ if (hw_type == 1) {
+ dev_notice(wdev->dev, "development hardware detected\n");
+ wakeup_timeout = 2000;
+ }
+
+ ret = init_gpr(wdev);
+ if (ret < 0)
+ return ret;
+
+ ret = wfx_control_reg_write(wdev, CTRL_WLAN_WAKEUP);
+ if (ret < 0)
+ return -EIO;
+ start = ktime_get();
+ for (;;) {
+ ret = wfx_control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & CTRL_WLAN_READY)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) {
+ dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n");
+ return -ETIMEDOUT;
+ }
+ }
+ dev_dbg(wdev->dev, "chip wake up after %lldus\n", ktime_us_delta(now, start));
+
+ ret = wfx_config_reg_write_bits(wdev, CFG_CPU_RESET, 0);
+ if (ret < 0)
+ return ret;
+ ret = load_firmware_secure(wdev);
+ if (ret < 0)
+ return ret;
+ return wfx_config_reg_write_bits(wdev,
+ CFG_DIRECT_ACCESS_MODE |
+ CFG_IRQ_ENABLE_DATA |
+ CFG_IRQ_ENABLE_WRDY,
+ CFG_IRQ_ENABLE_DATA);
+}
diff --git a/drivers/net/wireless/silabs/wfx/fwio.h b/drivers/net/wireless/silabs/wfx/fwio.h
new file mode 100644
index 000000000000..eeea61210eca
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/fwio.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_FWIO_H
+#define WFX_FWIO_H
+
+struct wfx_dev;
+
+int wfx_init_device(struct wfx_dev *wdev);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_api_cmd.h b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h
new file mode 100644
index 000000000000..8b91b1d4a46b
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */
+/*
+ * WF200 hardware interface definitions
+ *
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_CMD_H
+#define WFX_HIF_API_CMD_H
+
+#include "hif_api_general.h"
+
+enum wfx_hif_requests_ids {
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
+};
+
+enum wfx_hif_confirmations_ids {
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
+};
+
+enum wfx_hif_indications_ids {
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
+};
+
+struct wfx_hif_req_reset {
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
+} __packed;
+
+struct wfx_hif_cnf_reset {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_read_mib {
+ __le16 mib_id;
+ __le16 reserved;
+} __packed;
+
+struct wfx_hif_cnf_read_mib {
+ __le32 status;
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
+} __packed;
+
+struct wfx_hif_req_write_mib {
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
+} __packed;
+
+struct wfx_hif_cnf_write_mib {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_update_ie {
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
+ __le16 num_ies;
+ u8 ie[];
+} __packed;
+
+struct wfx_hif_cnf_update_ie {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_ssid_def {
+ __le32 ssid_length;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+#define HIF_API_MAX_NB_SSIDS 2
+#define HIF_API_MAX_NB_CHANNELS 14
+
+struct wfx_hif_req_start_scan_alt {
+ u8 band;
+ u8 maintain_current_bss:1;
+ u8 periodic:1;
+ u8 reserved1:6;
+ u8 disallow_ps:1;
+ u8 reserved2:1;
+ u8 short_preamble:1;
+ u8 reserved3:5;
+ u8 max_transmit_rate;
+ __le16 periodic_interval;
+ u8 reserved4;
+ s8 periodic_rssi_thr;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssids;
+ u8 num_of_channels;
+ __le32 min_channel_time;
+ __le32 max_channel_time;
+ __le32 tx_power_level; /* signed value */
+ struct wfx_hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
+ u8 channel_list[];
+} __packed;
+
+struct wfx_hif_cnf_start_scan {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_cnf_stop_scan {
+ __le32 status;
+} __packed;
+
+enum wfx_hif_pm_mode_status {
+ HIF_PM_MODE_ACTIVE = 0x0,
+ HIF_PM_MODE_PS = 0x1,
+ HIF_PM_MODE_UNDETERMINED = 0x2
+};
+
+struct wfx_hif_ind_scan_cmpl {
+ __le32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ __le16 reserved;
+} __packed;
+
+enum wfx_hif_queue_id {
+ HIF_QUEUE_ID_BACKGROUND = 0x0,
+ HIF_QUEUE_ID_BESTEFFORT = 0x1,
+ HIF_QUEUE_ID_VIDEO = 0x2,
+ HIF_QUEUE_ID_VOICE = 0x3
+};
+
+enum wfx_hif_frame_format {
+ HIF_FRAME_FORMAT_NON_HT = 0x0,
+ HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1,
+ HIF_FRAME_FORMAT_GF_HT_11N = 0x2
+};
+
+struct wfx_hif_req_tx {
+ /* packet_id is not interpreted by the device, so it is not necessary to declare it little
+ * endian
+ */
+ u32 packet_id;
+ u8 max_tx_rate;
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved1:2;
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 after_dtim:1;
+ u8 reserved2:3;
+ u8 start_exp:1;
+ u8 reserved3:3;
+ u8 retry_policy_index:4;
+ __le32 reserved4;
+ __le32 expire_time;
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved5:1;
+ u8 stbc:1;
+ u8 reserved6;
+ u8 aggregation:1;
+ u8 reserved7:7;
+ u8 reserved8;
+ u8 frame[];
+} __packed;
+
+enum wfx_hif_qos_ackplcy {
+ HIF_QOS_ACKPLCY_NORMAL = 0x0,
+ HIF_QOS_ACKPLCY_TXNOACK = 0x1,
+ HIF_QOS_ACKPLCY_NOEXPACK = 0x2,
+ HIF_QOS_ACKPLCY_BLCKACK = 0x3
+};
+
+struct wfx_hif_cnf_tx {
+ __le32 status;
+ /* packet_id is copied from struct wfx_hif_req_tx without been interpreted by the device, so
+ * it is not necessary to declare it little endian
+ */
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
+ __le32 media_delay;
+ __le32 tx_queue_delay;
+} __packed;
+
+struct wfx_hif_cnf_multi_transmit {
+ u8 num_tx_confs;
+ u8 reserved[3];
+ struct wfx_hif_cnf_tx tx_conf_payload[];
+} __packed;
+
+enum wfx_hif_ri_flags_encrypt {
+ HIF_RI_FLAGS_UNENCRYPTED = 0x0,
+ HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1,
+ HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2,
+ HIF_RI_FLAGS_AES_ENCRYPTED = 0x3,
+ HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4
+};
+
+struct wfx_hif_ind_rx {
+ __le32 status;
+ u8 channel_number;
+ u8 reserved1;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved2:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved3:1;
+ u8 peer_sta_id:4;
+ u8 reserved4:2;
+ u8 reserved5:1;
+ u8 frame[];
+} __packed;
+
+struct wfx_hif_req_edca_queue_params {
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ __le16 cw_min;
+ __le16 cw_max;
+ __le16 tx_op_limit;
+ __le16 allowed_medium_time;
+ __le32 reserved3;
+} __packed;
+
+struct wfx_hif_cnf_edca_queue_params {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_join {
+ u8 infrastructure_bss_mode:1;
+ u8 reserved1:7;
+ u8 band;
+ u8 channel_number;
+ u8 reserved2;
+ u8 bssid[ETH_ALEN];
+ __le16 atim_window;
+ u8 short_preamble:1;
+ u8 reserved3:7;
+ u8 probe_for_join;
+ u8 reserved4;
+ u8 reserved5:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved6:4;
+ __le32 ssid_length;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ __le32 beacon_interval;
+ __le32 basic_rate_set;
+} __packed;
+
+struct wfx_hif_cnf_join {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_ind_join_complete {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_set_bss_params {
+ u8 lost_count_only:1;
+ u8 reserved:7;
+ u8 beacon_lost_count;
+ __le16 aid;
+ __le32 operational_rate_set;
+} __packed;
+
+struct wfx_hif_cnf_set_bss_params {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_set_pm_mode {
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
+} __packed;
+
+struct wfx_hif_cnf_set_pm_mode {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_ind_set_pm_mode_cmpl {
+ __le32 status;
+ u8 pm_mode;
+ u8 reserved[3];
+} __packed;
+
+struct wfx_hif_req_start {
+ u8 mode;
+ u8 band;
+ u8 channel_number;
+ u8 reserved1;
+ __le32 reserved2;
+ __le32 beacon_interval;
+ u8 dtim_period;
+ u8 short_preamble:1;
+ u8 reserved3:7;
+ u8 reserved4;
+ u8 ssid_length;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ __le32 basic_rate_set;
+} __packed;
+
+struct wfx_hif_cnf_start {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_beacon_transmit {
+ u8 enable_beaconing;
+ u8 reserved[3];
+} __packed;
+
+struct wfx_hif_cnf_beacon_transmit {
+ __le32 status;
+} __packed;
+
+#define HIF_LINK_ID_MAX 14
+#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1)
+
+struct wfx_hif_req_map_link {
+ u8 mac_addr[ETH_ALEN];
+ u8 unmap:1;
+ u8 mfpc:1;
+ u8 reserved:6;
+ u8 peer_sta_id;
+} __packed;
+
+struct wfx_hif_cnf_map_link {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_ind_suspend_resume_tx {
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
+ __le16 peer_sta_set;
+} __packed;
+
+
+#define MAX_KEY_ENTRIES 24
+#define HIF_API_WEP_KEY_DATA_SIZE 16
+#define HIF_API_TKIP_KEY_DATA_SIZE 16
+#define HIF_API_RX_MIC_KEY_SIZE 8
+#define HIF_API_TX_MIC_KEY_SIZE 8
+#define HIF_API_AES_KEY_DATA_SIZE 16
+#define HIF_API_WAPI_KEY_DATA_SIZE 16
+#define HIF_API_MIC_KEY_DATA_SIZE 16
+#define HIF_API_IGTK_KEY_DATA_SIZE 16
+#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8
+#define HIF_API_IPN_SIZE 8
+
+enum wfx_hif_key_type {
+ HIF_KEY_TYPE_WEP_DEFAULT = 0x0,
+ HIF_KEY_TYPE_WEP_PAIRWISE = 0x1,
+ HIF_KEY_TYPE_TKIP_GROUP = 0x2,
+ HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3,
+ HIF_KEY_TYPE_AES_GROUP = 0x4,
+ HIF_KEY_TYPE_AES_PAIRWISE = 0x5,
+ HIF_KEY_TYPE_WAPI_GROUP = 0x6,
+ HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7,
+ HIF_KEY_TYPE_IGTK_GROUP = 0x8,
+ HIF_KEY_TYPE_NONE = 0x9
+};
+
+struct wfx_hif_wep_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct wfx_hif_wep_group_key {
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct wfx_hif_tkip_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+} __packed;
+
+struct wfx_hif_tkip_group_key {
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct wfx_hif_aes_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+} __packed;
+
+struct wfx_hif_aes_group_key {
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct wfx_hif_wapi_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+} __packed;
+
+struct wfx_hif_wapi_group_key {
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+} __packed;
+
+struct wfx_hif_igtk_group_key {
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
+} __packed;
+
+struct wfx_hif_req_add_key {
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ union {
+ struct wfx_hif_wep_pairwise_key wep_pairwise_key;
+ struct wfx_hif_wep_group_key wep_group_key;
+ struct wfx_hif_tkip_pairwise_key tkip_pairwise_key;
+ struct wfx_hif_tkip_group_key tkip_group_key;
+ struct wfx_hif_aes_pairwise_key aes_pairwise_key;
+ struct wfx_hif_aes_group_key aes_group_key;
+ struct wfx_hif_wapi_pairwise_key wapi_pairwise_key;
+ struct wfx_hif_wapi_group_key wapi_group_key;
+ struct wfx_hif_igtk_group_key igtk_group_key;
+ } key;
+} __packed;
+
+struct wfx_hif_cnf_add_key {
+ __le32 status;
+} __packed;
+
+struct wfx_hif_req_remove_key {
+ u8 entry_index;
+ u8 reserved[3];
+} __packed;
+
+struct wfx_hif_cnf_remove_key {
+ __le32 status;
+} __packed;
+
+enum wfx_hif_event_ind {
+ HIF_EVENT_IND_BSSLOST = 0x1,
+ HIF_EVENT_IND_BSSREGAINED = 0x2,
+ HIF_EVENT_IND_RCPI_RSSI = 0x3,
+ HIF_EVENT_IND_PS_MODE_ERROR = 0x4,
+ HIF_EVENT_IND_INACTIVITY = 0x5
+};
+
+enum wfx_hif_ps_mode_error {
+ HIF_PS_ERROR_NO_ERROR = 0,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2,
+ HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3,
+ HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4
+};
+
+struct wfx_hif_ind_event {
+ __le32 event_id;
+ union {
+ u8 rcpi_rssi;
+ __le32 ps_mode_error;
+ __le32 peer_sta_set;
+ } event_data;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_api_general.h b/drivers/net/wireless/silabs/wfx/hif_api_general.h
new file mode 100644
index 000000000000..4d400fdc2252
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_api_general.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */
+/*
+ * WF200 hardware interface definitions
+ *
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_GENERAL_H
+#define WFX_HIF_API_GENERAL_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
+
+struct wfx_hif_msg {
+ __le16 len;
+ u8 id;
+ u8 reserved:1;
+ u8 interface:2;
+ u8 seqnum:3;
+ u8 encrypted:2;
+ u8 body[];
+} __packed;
+
+enum wfx_hif_general_requests_ids {
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
+};
+
+enum wfx_hif_general_confirmations_ids {
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
+};
+
+enum wfx_hif_general_indications_ids {
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+};
+
+#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000))
+#define HIF_STATUS_FAIL (cpu_to_le32(0x0001))
+#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002))
+#define HIF_STATUS_WARNING (cpu_to_le32(0x0003))
+#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004))
+#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010))
+#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011))
+#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012))
+#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013))
+#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014))
+#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015))
+#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016))
+#define HIF_STATUS_BUSY (cpu_to_le32(0x0017))
+#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A))
+#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B))
+#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C))
+#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D))
+#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E))
+#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF))
+#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234))
+#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256))
+
+enum wfx_hif_api_rate_index {
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
+};
+
+struct wfx_hif_ind_startup {
+ __le32 status;
+ __le16 hardware_id;
+ u8 opn[14];
+ u8 uid[8];
+ __le16 num_inp_ch_bufs;
+ __le16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[2];
+ u8 region_sel_mode:4;
+ u8 reserved5:4;
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
+ __le32 supported_rate_mask;
+ u8 firmware_label[128];
+} __packed;
+
+struct wfx_hif_ind_wakeup {
+} __packed;
+
+struct wfx_hif_req_configuration {
+ __le16 length;
+ u8 pds_data[];
+} __packed;
+
+struct wfx_hif_cnf_configuration {
+ __le32 status;
+} __packed;
+
+enum wfx_hif_gpio_mode {
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
+};
+
+struct wfx_hif_req_control_gpio {
+ u8 gpio_label;
+ u8 gpio_mode;
+} __packed;
+
+struct wfx_hif_cnf_control_gpio {
+ __le32 status;
+ __le32 value;
+} __packed;
+
+enum wfx_hif_generic_indication_type {
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2,
+ HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3,
+};
+
+struct wfx_hif_rx_stats {
+ __le32 nb_rx_frame;
+ __le32 nb_crc_frame;
+ __le32 per_total;
+ __le32 throughput;
+ __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ __le16 per[API_RATE_NUM_ENTRIES];
+ __le16 snr[API_RATE_NUM_ENTRIES]; /* signed value */
+ __le16 rssi[API_RATE_NUM_ENTRIES]; /* signed value */
+ __le16 cfo[API_RATE_NUM_ENTRIES]; /* signed value */
+ __le32 date;
+ __le32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
+ s8 current_temp;
+} __packed;
+
+struct wfx_hif_tx_power_loop_info {
+ __le16 tx_gain_dig;
+ __le16 tx_gain_pa;
+ __le16 target_pout; /* signed value */
+ __le16 p_estimation; /* signed value */
+ __le16 vpdet;
+ u8 measurement_index;
+ u8 reserved;
+} __packed;
+
+struct wfx_hif_ind_generic {
+ __le32 type;
+ union {
+ struct wfx_hif_rx_stats rx_stats;
+ struct wfx_hif_tx_power_loop_info tx_power_loop_info;
+ } data;
+} __packed;
+
+enum wfx_hif_error {
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x00,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01,
+ HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02,
+ HIF_ERROR_SLK_SESSION_KEY = 0x03,
+ HIF_ERROR_OOR_VOLTAGE = 0x04,
+ HIF_ERROR_PDS_PAYLOAD = 0x05,
+ HIF_ERROR_OOR_TEMPERATURE = 0x06,
+ HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07,
+ HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08,
+ HIF_ERROR_SLK_OVERFLOW = 0x09,
+ HIF_ERROR_SLK_DECRYPTION = 0x0a,
+ HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b,
+ HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c,
+ HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e,
+ HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d,
+ HIF_ERROR_HIF_BUS = 0x0f,
+ HIF_ERROR_PDS_TESTFEATURE = 0x10,
+ HIF_ERROR_SLK_UNCONFIGURED = 0x11,
+};
+
+struct wfx_hif_ind_error {
+ __le32 type;
+ u8 data[];
+} __packed;
+
+struct wfx_hif_ind_exception {
+ __le32 type;
+ u8 data[];
+} __packed;
+
+enum wfx_hif_secure_link_state {
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
+};
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_api_mib.h b/drivers/net/wireless/silabs/wfx/hif_api_mib.h
new file mode 100644
index 000000000000..7b68b83866c9
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_api_mib.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */
+/*
+ * WF200 hardware interface definitions
+ *
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_MIB_H
+#define WFX_HIF_API_MIB_H
+
+#include "hif_api_general.h"
+
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
+
+enum wfx_hif_mib_ids {
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
+};
+
+enum wfx_hif_op_power_mode {
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
+};
+
+struct wfx_hif_mib_gl_operational_power_mode {
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
+} __packed;
+
+struct wfx_hif_mib_gl_set_multi_msg {
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum wfx_hif_arp_ns_frame_treatment {
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
+};
+
+struct wfx_hif_mib_arp_ip_addr_table {
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct wfx_hif_mib_rx_filter {
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
+} __packed;
+
+struct wfx_hif_ie_table_entry {
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[3];
+ u8 match_data[3];
+} __packed;
+
+struct wfx_hif_mib_bcn_filter_table {
+ __le32 num_of_info_elmts;
+ struct wfx_hif_ie_table_entry ie_table[];
+} __packed;
+
+enum wfx_hif_beacon_filter {
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
+};
+
+struct wfx_hif_mib_bcn_filter_enable {
+ __le32 enable;
+ __le32 bcn_count;
+} __packed;
+
+struct wfx_hif_mib_extended_count_table {
+ __le32 count_drop_plcp;
+ __le32 count_drop_fcs;
+ __le32 count_tx_frames;
+ __le32 count_rx_frames;
+ __le32 count_rx_frames_failed;
+ __le32 count_drop_decryption;
+ __le32 count_drop_tkip_mic;
+ __le32 count_drop_no_key;
+ __le32 count_tx_frames_multicast;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frames_failed;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_drop_duplicate;
+ __le32 count_rts_success;
+ __le32 count_rts_failed;
+ __le32 count_ack_failed;
+ __le32 count_rx_frames_multicast;
+ __le32 count_rx_frames_success;
+ __le32 count_drop_cmac_icv;
+ __le32 count_drop_cmac_replay;
+ __le32 count_drop_ccmp_replay;
+ __le32 count_drop_bip_mic;
+ __le32 count_rx_bcn_success;
+ __le32 count_rx_bcn_miss;
+ __le32 count_rx_bcn_dtim;
+ __le32 count_rx_bcn_dtim_aid0_clr;
+ __le32 count_rx_bcn_dtim_aid0_set;
+ __le32 reserved[12];
+} __packed;
+
+struct wfx_hif_mib_count_table {
+ __le32 count_drop_plcp;
+ __le32 count_drop_fcs;
+ __le32 count_tx_frames;
+ __le32 count_rx_frames;
+ __le32 count_rx_frames_failed;
+ __le32 count_drop_decryption;
+ __le32 count_drop_tkip_mic;
+ __le32 count_drop_no_key;
+ __le32 count_tx_frames_multicast;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frames_failed;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_drop_duplicate;
+ __le32 count_rts_success;
+ __le32 count_rts_failed;
+ __le32 count_ack_failed;
+ __le32 count_rx_frames_multicast;
+ __le32 count_rx_frames_success;
+ __le32 count_drop_cmac_icv;
+ __le32 count_drop_cmac_replay;
+ __le32 count_drop_ccmp_replay;
+ __le32 count_drop_bip_mic;
+} __packed;
+
+struct wfx_hif_mib_mac_address {
+ u8 mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed;
+
+struct wfx_hif_mib_wep_default_key_id {
+ u8 wep_default_key_id;
+ u8 reserved[3];
+} __packed;
+
+struct wfx_hif_mib_dot11_rts_threshold {
+ __le32 threshold;
+} __packed;
+
+struct wfx_hif_mib_slot_time {
+ __le32 slot_time;
+} __packed;
+
+struct wfx_hif_mib_current_tx_power_level {
+ __le32 power_level; /* signed value */
+} __packed;
+
+struct wfx_hif_mib_non_erp_protection {
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum wfx_hif_tmplt {
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
+};
+
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+
+struct wfx_hif_mib_template_frame {
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ __le16 frame_length;
+ u8 frame[];
+} __packed;
+
+struct wfx_hif_mib_beacon_wake_up_period {
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
+} __packed;
+
+struct wfx_hif_mib_rcpi_rssi_threshold {
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
+} __packed;
+
+#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
+
+struct wfx_hif_mib_block_ack_policy {
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
+} __packed;
+
+enum wfx_hif_mpdu_start_spacing {
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+};
+
+struct wfx_hif_mib_set_association_mode {
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved1:4;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 greenfield:1;
+ u8 reserved3:7;
+ u8 mpdu_start_spacing;
+ __le32 basic_rate_set;
+} __packed;
+
+struct wfx_hif_mib_set_uapsd_information {
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ __le16 min_auto_trigger_interval;
+ __le16 max_auto_trigger_interval;
+ __le16 auto_trigger_step;
+} __packed;
+
+struct wfx_hif_tx_rate_retry_policy {
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
+} __packed;
+
+#define HIF_TX_RETRY_POLICY_MAX 15
+#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX
+
+struct wfx_hif_mib_set_tx_rate_retry_policy {
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
+ struct wfx_hif_tx_rate_retry_policy tx_rate_retry_policy[];
+} __packed;
+
+struct wfx_hif_mib_protected_mgmt_policy {
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
+} __packed;
+
+struct wfx_hif_mib_keep_alive_period {
+ __le16 keep_alive_period;
+ u8 reserved[2];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.c b/drivers/net/wireless/silabs/wfx/hif_rx.c
new file mode 100644
index 000000000000..64ca8acb8e4f
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_rx.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handling of the chip-to-host events (aka indications) of the hardware API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_rx.h"
+#include "wfx.h"
+#include "scan.h"
+#include "bh.h"
+#include "sta.h"
+#include "data_rx.h"
+#include "hif_api_cmd.h"
+
+static int wfx_hif_generic_confirm(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ /* All confirm messages start with status */
+ int status = le32_to_cpup((__le32 *)buf);
+ int cmd = hif->id;
+ int len = le16_to_cpu(hif->len) - 4; /* drop header */
+
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+
+ if (!wdev->hif_cmd.buf_send) {
+ dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd);
+ return -EINVAL;
+ }
+
+ if (cmd != wdev->hif_cmd.buf_send->id) {
+ dev_warn(wdev->dev, "chip response mismatch request: 0x%.2x vs 0x%.2x\n",
+ cmd, wdev->hif_cmd.buf_send->id);
+ return -EINVAL;
+ }
+
+ if (wdev->hif_cmd.buf_recv) {
+ if (wdev->hif_cmd.len_recv >= len && len > 0)
+ memcpy(wdev->hif_cmd.buf_recv, buf, len);
+ else
+ status = -EIO;
+ }
+ wdev->hif_cmd.ret = status;
+
+ complete(&wdev->hif_cmd.done);
+ return status;
+}
+
+static int wfx_hif_tx_confirm(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_cnf_tx *body = buf;
+
+ wfx_tx_confirm_cb(wdev, body);
+ return 0;
+}
+
+static int wfx_hif_multi_tx_confirm(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_cnf_multi_transmit *body = buf;
+ int i;
+
+ WARN(body->num_tx_confs <= 0, "corrupted message");
+ for (i = 0; i < body->num_tx_confs; i++)
+ wfx_tx_confirm_cb(wdev, &body->tx_conf_payload[i]);
+ return 0;
+}
+
+static int wfx_hif_startup_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_ind_startup *body = buf;
+
+ if (body->status || body->firmware_type > 4) {
+ dev_err(wdev->dev, "received invalid startup indication");
+ return -EINVAL;
+ }
+ memcpy(&wdev->hw_caps, body, sizeof(struct wfx_hif_ind_startup));
+ complete(&wdev->firmware_ready);
+ return 0;
+}
+
+static int wfx_hif_wakeup_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ if (!wdev->pdata.gpio_wakeup || gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) {
+ dev_warn(wdev->dev, "unexpected wake-up indication\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int wfx_hif_receive_indication(struct wfx_dev *wdev, const struct wfx_hif_msg *hif,
+ const void *buf, struct sk_buff *skb)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ const struct wfx_hif_ind_rx *body = buf;
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ skb_pull(skb, sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_ind_rx));
+ wfx_rx_cb(wvif, body, skb);
+
+ return 0;
+}
+
+static int wfx_hif_event_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ const struct wfx_hif_ind_event *body = buf;
+ int type = le32_to_cpu(body->event_id);
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+
+ switch (type) {
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_BSSLOST:
+ schedule_delayed_work(&wvif->beacon_loss_work, 0);
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ cancel_delayed_work(&wvif->beacon_loss_work);
+ dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n");
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ dev_warn(wdev->dev, "error while processing power save request: %d\n",
+ le32_to_cpu(body->event_data.ps_mode_error));
+ break;
+ default:
+ dev_warn(wdev->dev, "unhandled event indication: %.2x\n", type);
+ break;
+ }
+ return 0;
+}
+
+static int wfx_hif_pm_mode_complete_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ complete(&wvif->set_pm_mode_complete);
+
+ return 0;
+}
+
+static int wfx_hif_scan_complete_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ const struct wfx_hif_ind_scan_cmpl *body = buf;
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+
+ wfx_scan_complete(wvif, body->num_channels_completed);
+
+ return 0;
+}
+
+static int wfx_hif_join_complete_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
+
+ return 0;
+}
+
+static int wfx_hif_suspend_resume_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_ind_suspend_resume_tx *body = buf;
+ struct wfx_vif *wvif;
+
+ if (body->bc_mc_only) {
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ if (body->resume)
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
+ } else {
+ WARN(body->peer_sta_set, "misunderstood indication");
+ WARN(hif->interface != 2, "misunderstood indication");
+ if (body->resume)
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP);
+ }
+
+ return 0;
+}
+
+static int wfx_hif_generic_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_ind_generic *body = buf;
+ int type = le32_to_cpu(body->type);
+
+ switch (type) {
+ case HIF_GENERIC_INDICATION_TYPE_RAW:
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_STRING:
+ dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data);
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
+ mutex_lock(&wdev->rx_stats_lock);
+ /* Older firmware send a generic indication beside RxStats */
+ if (!wfx_api_older_than(wdev, 1, 4))
+ dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n",
+ body->data.rx_stats.current_temp);
+ memcpy(&wdev->rx_stats, &body->data.rx_stats, sizeof(wdev->rx_stats));
+ mutex_unlock(&wdev->rx_stats_lock);
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO:
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ memcpy(&wdev->tx_power_loop_info, &body->data.tx_power_loop_info,
+ sizeof(wdev->tx_power_loop_info));
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+ return 0;
+ default:
+ dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n", type);
+ return -EIO;
+ }
+}
+
+static const struct {
+ int val;
+ const char *str;
+ bool has_param;
+} hif_errors[] = {
+ { HIF_ERROR_FIRMWARE_ROLLBACK,
+ "rollback status" },
+ { HIF_ERROR_FIRMWARE_DEBUG_ENABLED,
+ "debug feature enabled" },
+ { HIF_ERROR_PDS_PAYLOAD,
+ "PDS version is not supported" },
+ { HIF_ERROR_PDS_TESTFEATURE,
+ "PDS ask for an unknown test mode" },
+ { HIF_ERROR_OOR_VOLTAGE,
+ "out-of-range power supply voltage", true },
+ { HIF_ERROR_OOR_TEMPERATURE,
+ "out-of-range temperature", true },
+ { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE,
+ "secure link does not expect request during key exchange" },
+ { HIF_ERROR_SLK_SESSION_KEY,
+ "secure link session key is invalid" },
+ { HIF_ERROR_SLK_OVERFLOW,
+ "secure link overflow" },
+ { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE,
+ "secure link messages list does not match message encryption" },
+ { HIF_ERROR_SLK_UNCONFIGURED,
+ "secure link not yet configured" },
+ { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW,
+ "bus clock is too slow (<1kHz)" },
+ { HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
+ "HIF message too large" },
+ /* Following errors only exists in old firmware versions: */
+ { HIF_ERROR_HIF_TX_QUEUE_FULL,
+ "HIF messages queue is full" },
+ { HIF_ERROR_HIF_BUS,
+ "HIF bus" },
+ { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED,
+ "secure link does not support multi-tx confirmations" },
+ { HIF_ERROR_SLK_OUTDATED_SESSION_KEY,
+ "secure link session key is outdated" },
+ { HIF_ERROR_SLK_DECRYPTION,
+ "secure link params (nonce or tag) mismatch" },
+};
+
+static int wfx_hif_error_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_ind_error *body = buf;
+ int type = le32_to_cpu(body->type);
+ int param = (s8)body->data[0];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hif_errors); i++)
+ if (type == hif_errors[i].val)
+ break;
+ if (i < ARRAY_SIZE(hif_errors))
+ if (hif_errors[i].has_param)
+ dev_err(wdev->dev, "asynchronous error: %s: %d\n",
+ hif_errors[i].str, param);
+ else
+ dev_err(wdev->dev, "asynchronous error: %s\n", hif_errors[i].str);
+ else
+ dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
+
+ return 0;
+};
+
+static int wfx_hif_exception_indication(struct wfx_dev *wdev,
+ const struct wfx_hif_msg *hif, const void *buf)
+{
+ const struct wfx_hif_ind_exception *body = buf;
+ int type = le32_to_cpu(body->type);
+
+ if (type == 4)
+ dev_err(wdev->dev, "firmware assert %d\n", le32_to_cpup((__le32 *)body->data));
+ else
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
+
+ return -1;
+}
+
+static const struct {
+ int msg_id;
+ int (*handler)(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, const void *buf);
+} hif_handlers[] = {
+ /* Confirmations */
+ { HIF_CNF_ID_TX, wfx_hif_tx_confirm },
+ { HIF_CNF_ID_MULTI_TRANSMIT, wfx_hif_multi_tx_confirm },
+ /* Indications */
+ { HIF_IND_ID_STARTUP, wfx_hif_startup_indication },
+ { HIF_IND_ID_WAKEUP, wfx_hif_wakeup_indication },
+ { HIF_IND_ID_JOIN_COMPLETE, wfx_hif_join_complete_indication },
+ { HIF_IND_ID_SET_PM_MODE_CMPL, wfx_hif_pm_mode_complete_indication },
+ { HIF_IND_ID_SCAN_CMPL, wfx_hif_scan_complete_indication },
+ { HIF_IND_ID_SUSPEND_RESUME_TX, wfx_hif_suspend_resume_indication },
+ { HIF_IND_ID_EVENT, wfx_hif_event_indication },
+ { HIF_IND_ID_GENERIC, wfx_hif_generic_indication },
+ { HIF_IND_ID_ERROR, wfx_hif_error_indication },
+ { HIF_IND_ID_EXCEPTION, wfx_hif_exception_indication },
+ /* FIXME: allocate skb_p from wfx_hif_receive_indication and make it generic */
+ //{ HIF_IND_ID_RX, wfx_hif_receive_indication },
+};
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ int i;
+ const struct wfx_hif_msg *hif = (const struct wfx_hif_msg *)skb->data;
+ int hif_id = hif->id;
+
+ if (hif_id == HIF_IND_ID_RX) {
+ /* wfx_hif_receive_indication take care of skb lifetime */
+ wfx_hif_receive_indication(wdev, hif, hif->body, skb);
+ return;
+ }
+ /* Note: mutex_is_lock cause an implicit memory barrier that protect buf_send */
+ if (mutex_is_locked(&wdev->hif_cmd.lock) &&
+ wdev->hif_cmd.buf_send && wdev->hif_cmd.buf_send->id == hif_id) {
+ wfx_hif_generic_confirm(wdev, hif, hif->body);
+ goto free;
+ }
+ for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) {
+ if (hif_handlers[i].msg_id == hif_id) {
+ if (hif_handlers[i].handler)
+ hif_handlers[i].handler(wdev, hif, hif->body);
+ goto free;
+ }
+ }
+ if (hif_id & HIF_ID_IS_INDICATION)
+ dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n", hif_id);
+ else
+ dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n", hif_id);
+free:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.h b/drivers/net/wireless/silabs/wfx/hif_rx.h
new file mode 100644
index 000000000000..96543b81fa77
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_rx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Handling of the chip-to-host events (aka indications) of the hardware API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_RX_H
+#define WFX_HIF_RX_H
+
+struct wfx_dev;
+struct sk_buff;
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
new file mode 100644
index 000000000000..9402503fbde3
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the host-to-chip commands (aka request/confirmation) of the
+ * hardware API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+
+#include "hif_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "hwio.h"
+#include "debug.h"
+#include "sta.h"
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
+{
+ init_completion(&hif_cmd->ready);
+ init_completion(&hif_cmd->done);
+ mutex_init(&hif_cmd->lock);
+}
+
+static void wfx_fill_header(struct wfx_hif_msg *hif, int if_id, unsigned int cmd, size_t size)
+{
+ if (if_id == -1)
+ if_id = 2;
+
+ WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd);
+ WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
+ WARN(if_id > 0x3, "invalid interface ID %d", if_id);
+
+ hif->len = cpu_to_le16(size + 4);
+ hif->id = cmd;
+ hif->interface = if_id;
+}
+
+static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif)
+{
+ *hif = kzalloc(sizeof(struct wfx_hif_msg) + body_len, GFP_KERNEL);
+ if (*hif)
+ return (*hif)->body;
+ else
+ return NULL;
+}
+
+int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request,
+ void *reply, size_t reply_len, bool no_reply)
+{
+ const char *mib_name = "";
+ const char *mib_sep = "";
+ int cmd = request->id;
+ int vif = request->interface;
+ int ret;
+
+ /* Do not wait for any reply if chip is frozen */
+ if (wdev->chip_frozen)
+ return -ETIMEDOUT;
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ WARN(wdev->hif_cmd.buf_send, "data locking error");
+
+ /* Note: call to complete() below has an implicit memory barrier that hopefully protect
+ * buf_send
+ */
+ wdev->hif_cmd.buf_send = request;
+ wdev->hif_cmd.buf_recv = reply;
+ wdev->hif_cmd.len_recv = reply_len;
+ complete(&wdev->hif_cmd.ready);
+
+ wfx_bh_request_tx(wdev);
+
+ if (no_reply) {
+ /* Chip won't reply. Ensure the wq has send the buffer before to continue. */
+ flush_workqueue(wdev->bh_wq);
+ ret = 0;
+ goto end;
+ }
+
+ if (wdev->poll_irq)
+ wfx_bh_poll_irq(wdev);
+
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
+ if (!ret) {
+ dev_err(wdev->dev, "chip is abnormally long to answer\n");
+ reinit_completion(&wdev->hif_cmd.ready);
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ);
+ }
+ if (!ret) {
+ dev_err(wdev->dev, "chip did not answer\n");
+ wfx_pending_dump_old_frames(wdev, 3000);
+ wdev->chip_frozen = true;
+ reinit_completion(&wdev->hif_cmd.done);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wdev->hif_cmd.ret;
+ }
+
+end:
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+
+ if (ret &&
+ (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
+ mib_name = wfx_get_mib_name(((u16 *)request)[2]);
+ mib_sep = "/";
+ }
+ if (ret < 0)
+ dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n",
+ wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+ if (ret > 0)
+ dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n",
+ wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+
+ return ret;
+}
+
+/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any request anymore.
+ * Obviously, only call this function during device unregister.
+ */
+int wfx_hif_shutdown(struct wfx_dev *wdev)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+
+ wfx_alloc_hif(0, &hif);
+ if (!hif)
+ return -ENOMEM;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
+ if (wdev->pdata.gpio_wakeup)
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+ else
+ wfx_control_reg_write(wdev, 0);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
+{
+ int ret;
+ size_t buf_len = sizeof(struct wfx_hif_req_configuration) + len;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->length = cpu_to_le16(len);
+ memcpy(body->pds_data, conf, len);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->reset_stat = reset_stat;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ int buf_len = sizeof(struct wfx_hif_cnf_read_mib) + val_len;
+ struct wfx_hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
+ struct wfx_hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+
+ if (!body || !reply) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ body->mib_id = cpu_to_le16(mib_id);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
+
+ if (!ret && mib_id != le16_to_cpu(reply->mib_id)) {
+ dev_warn(wdev->dev, "%s: confirmation mismatch request\n", __func__);
+ ret = -EIO;
+ }
+ if (ret == -ENOMEM)
+ dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n",
+ wfx_get_mib_name(mib_id), val_len, le16_to_cpu(reply->length));
+ if (!ret)
+ memcpy(val, &reply->mib_data, le16_to_cpu(reply->length));
+ else
+ memset(val, 0xFF, val_len);
+out:
+ kfree(hif);
+ kfree(reply);
+ return ret;
+}
+
+int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ int buf_len = sizeof(struct wfx_hif_req_write_mib) + val_len;
+ struct wfx_hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->mib_id = cpu_to_le16(mib_id);
+ body->length = cpu_to_le16(val_len);
+ memcpy(&body->mib_data, val, val_len);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
+ int chan_start_idx, int chan_num)
+{
+ int ret, i;
+ struct wfx_hif_msg *hif;
+ size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + chan_num * sizeof(u8);
+ struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
+
+ WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
+
+ if (!hif)
+ return -ENOMEM;
+ for (i = 0; i < req->n_ssids; i++) {
+ memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid, IEEE80211_MAX_SSID_LEN);
+ body->ssid_def[i].ssid_length = cpu_to_le32(req->ssids[i].ssid_len);
+ }
+ body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
+ body->maintain_current_bss = 1;
+ body->disallow_ps = 1;
+ body->tx_power_level = cpu_to_le32(req->channels[chan_start_idx]->max_power);
+ body->num_of_channels = chan_num;
+ for (i = 0; i < chan_num; i++)
+ body->channel_list[i] = req->channels[i + chan_start_idx]->hw_value;
+ if (req->no_cck)
+ body->max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ body->max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) {
+ body->min_channel_time = cpu_to_le32(50);
+ body->max_channel_time = cpu_to_le32(150);
+ } else {
+ body->min_channel_time = cpu_to_le32(10);
+ body->max_channel_time = cpu_to_le32(50);
+ body->num_of_probe_requests = 2;
+ body->probe_delay = 100;
+ }
+
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_stop_scan(struct wfx_vif *wvif)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ /* body associated to HIF_REQ_ID_STOP_SCAN is empty */
+ wfx_alloc_hif(0, &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
+{
+ struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
+ bss_conf);
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ WARN_ON(!conf->beacon_int);
+ WARN_ON(!conf->basic_rates);
+ WARN_ON(sizeof(body->ssid) < ssid_len);
+ WARN(!vif->cfg.ibss_joined && !ssid_len, "joining an unknown BSS");
+ if (!hif)
+ return -ENOMEM;
+ body->infrastructure_bss_mode = !vif->cfg.ibss_joined;
+ body->short_preamble = conf->use_short_preamble;
+ body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR);
+ body->channel_number = channel->hw_value;
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
+ if (ssid) {
+ body->ssid_length = cpu_to_le32(ssid_len);
+ memcpy(body->ssid, ssid, ssid_len);
+ }
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->aid = cpu_to_le16(aid);
+ body->beacon_lost_count = beacon_lost_count;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ /* FIXME: only send necessary bits */
+ struct wfx_hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ /* FIXME: swap bytes as necessary in body */
+ memcpy(body, arg, sizeof(*body));
+ if (wfx_api_older_than(wdev, 1, 5))
+ /* Legacy firmwares expect that add_key to be sent on right interface. */
+ wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY, sizeof(*body));
+ else
+ wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_remove_key(struct wfx_dev *wdev, int idx)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->entry_index = idx;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!body)
+ return -ENOMEM;
+
+ WARN_ON(arg->aifs > 255);
+ if (!hif)
+ return -ENOMEM;
+ body->aifsn = arg->aifs;
+ body->cw_min = cpu_to_le16(arg->cw_min);
+ body->cw_max = cpu_to_le16(arg->cw_max);
+ body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
+ body->queue_id = 3 - queue;
+ /* API 2.0 has changed queue IDs values */
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
+ body->queue_id = HIF_QUEUE_ID_BACKGROUND;
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
+ body->queue_id = HIF_QUEUE_ID_BESTEFFORT;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!body)
+ return -ENOMEM;
+
+ if (!hif)
+ return -ENOMEM;
+ if (ps) {
+ body->enter_psm = 1;
+ /* Firmware does not support more than 128ms */
+ body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
+ if (body->fast_psm_idle_period)
+ body->fast_psm = 1;
+ }
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel)
+{
+ struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
+ bss_conf);
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ WARN_ON(!conf->beacon_int);
+ if (!hif)
+ return -ENOMEM;
+ body->dtim_period = conf->dtim_period;
+ body->short_preamble = conf->use_short_preamble;
+ body->channel_number = channel->hw_value;
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ body->ssid_length = vif->cfg.ssid_len;
+ memcpy(body->ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->enable_beaconing = enable ? 1 : 0;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ struct wfx_hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ if (mac_addr)
+ ether_addr_copy(body->mac_addr, mac_addr);
+ body->mfpc = mfp ? 1 : 0;
+ body->unmap = unmap ? 1 : 0;
+ body->peer_sta_id = sta_id;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
+{
+ int ret;
+ struct wfx_hif_msg *hif;
+ int buf_len = sizeof(struct wfx_hif_req_update_ie) + ies_len;
+ struct wfx_hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+
+ if (!hif)
+ return -ENOMEM;
+ body->beacon = 1;
+ body->num_ies = cpu_to_le16(1);
+ memcpy(body->ie, ies, ies_len);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h
new file mode 100644
index 000000000000..71817a6571f0
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of the host-to-chip commands (aka request/confirmation) of the
+ * hardware API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_H
+#define WFX_HIF_TX_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+
+struct ieee80211_channel;
+struct ieee80211_bss_conf;
+struct ieee80211_tx_queue_params;
+struct cfg80211_scan_request;
+struct wfx_hif_req_add_key;
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_hif_cmd {
+ struct mutex lock;
+ struct completion ready;
+ struct completion done;
+ struct wfx_hif_msg *buf_send;
+ void *buf_recv;
+ size_t len_recv;
+ int ret;
+};
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd);
+int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request,
+ void *reply, size_t reply_len, bool async);
+
+int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size);
+int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size);
+int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel);
+int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat);
+int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
+int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp);
+int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg);
+int wfx_hif_remove_key(struct wfx_dev *wdev, int idx);
+int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
+int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count);
+int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg);
+int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
+int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
+int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
+ int chan_start, int chan_num);
+int wfx_hif_stop_scan(struct wfx_vif *wvif);
+int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len);
+int wfx_hif_shutdown(struct wfx_dev *wdev);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.c b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c
new file mode 100644
index 000000000000..df1bcb1e2c02
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the host-to-chip MIBs of the hardware API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+#include "hif_api_mib.h"
+
+int wfx_hif_set_output_power(struct wfx_vif *wvif, int val)
+{
+ struct wfx_hif_mib_current_tx_power_level arg = {
+ .power_level = cpu_to_le32(val * 10),
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval, unsigned int listen_interval)
+{
+ struct wfx_hif_mib_beacon_wake_up_period arg = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = listen_interval,
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst)
+{
+ struct wfx_hif_mib_rcpi_rssi_threshold arg = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ if (!rssi_thold && !rssi_hyst) {
+ arg.upperthresh = 1;
+ arg.lowerthresh = 1;
+ } else {
+ arg.upper_threshold = rssi_thold + rssi_hyst;
+ arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+ arg.lower_threshold = rssi_thold;
+ arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+ }
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RCPI_RSSI_THRESHOLD,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct wfx_hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ /* extended_count_table is wider than count_table */
+ memset(arg, 0xFF, sizeof(*arg));
+ return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct wfx_hif_mib_count_table));
+ } else {
+ return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_EXTENDED_COUNTERS_TABLE,
+ arg, sizeof(struct wfx_hif_mib_extended_count_table));
+ }
+}
+
+int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct wfx_hif_mib_mac_address arg = { };
+
+ if (mac)
+ ether_addr_copy(arg.mac_addr, mac);
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool filter_prbreq)
+{
+ struct wfx_hif_mib_rx_filter arg = { };
+
+ if (filter_bssid)
+ arg.bssid_filter = 1;
+ if (!filter_prbreq)
+ arg.fwd_probe_req = 1;
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER, &arg, sizeof(arg));
+}
+
+int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct wfx_hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct wfx_hif_mib_bcn_filter_table *arg;
+ int buf_len = struct_size(arg, ie_table, tbl_len);
+
+ arg = kzalloc(buf_len, GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+ arg->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len));
+ ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_TABLE,
+ arg, buf_len);
+ kfree(arg);
+ return ret;
+}
+
+int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count)
+{
+ struct wfx_hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode)
+{
+ struct wfx_hif_mib_gl_operational_power_mode arg = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate)
+{
+ struct wfx_hif_mib_template_frame *arg;
+
+ WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big");
+ skb_push(skb, 4);
+ arg = (struct wfx_hif_mib_template_frame *)skb->data;
+ skb_pull(skb, 4);
+ arg->init_rate = init_rate;
+ arg->frame_type = frame_type;
+ arg->frame_length = cpu_to_le16(skb->len);
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg) + skb->len);
+}
+
+int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct wfx_hif_mib_protected_mgmt_policy arg = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ arg.pmf_enable = 1;
+ arg.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ arg.unpmf_allowed = 1;
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct wfx_hif_mib_block_ack_policy arg = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density,
+ bool greenfield, bool short_preamble)
+{
+ struct wfx_hif_mib_set_association_mode arg = {
+ .preambtype_use = 1,
+ .mode = 1,
+ .spacing = 1,
+ .short_preamble = short_preamble,
+ .greenfield = greenfield,
+ .mpdu_start_spacing = ampdu_density,
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates)
+{
+ struct wfx_hif_mib_set_tx_rate_retry_policy *arg;
+ size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+ int ret;
+
+ arg = kzalloc(size, GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+ arg->num_tx_rate_policies = 1;
+ arg->tx_rate_retry_policy[0].policy_index = policy_index;
+ arg->tx_rate_retry_policy[0].short_retry_count = 255;
+ arg->tx_rate_retry_policy[0].long_retry_count = 255;
+ arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+ arg->tx_rate_retry_policy[0].terminate = 1;
+ arg->tx_rate_retry_policy[0].count_init = 1;
+ memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+ sizeof(arg->tx_rate_retry_policy[0].rates));
+ ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY,
+ arg, size);
+ kfree(arg);
+ return ret;
+}
+
+int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct wfx_hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr)
+{
+ struct wfx_hif_mib_arp_ip_addr_table arg = {
+ .condition_idx = idx,
+ .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+ };
+
+ if (addr) {
+ /* Caution: type of addr is __be32 */
+ memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+ arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ }
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
+{
+ struct wfx_hif_mib_gl_set_multi_msg arg = {
+ .enable_multi_tx_conf = enable,
+ };
+
+ return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG, &arg, sizeof(arg));
+}
+
+int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
+{
+ struct wfx_hif_mib_set_uapsd_information arg = { };
+
+ if (val & BIT(IEEE80211_AC_VO))
+ arg.trig_voice = 1;
+ if (val & BIT(IEEE80211_AC_VI))
+ arg.trig_video = 1;
+ if (val & BIT(IEEE80211_AC_BE))
+ arg.trig_be = 1;
+ if (val & BIT(IEEE80211_AC_BK))
+ arg.trig_bckgrnd = 1;
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ struct wfx_hif_mib_non_erp_protection arg = {
+ .use_cts_to_self = enable,
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_NON_ERP_PROTECTION,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ struct wfx_hif_mib_slot_time arg = {
+ .slot_time = cpu_to_le32(val),
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME, &arg, sizeof(arg));
+}
+
+int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ struct wfx_hif_mib_wep_default_key_id arg = {
+ .wep_default_key_id = val,
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ struct wfx_hif_mib_dot11_rts_threshold arg = {
+ .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+ };
+
+ return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_RTS_THRESHOLD,
+ &arg, sizeof(arg));
+}
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.h b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h
new file mode 100644
index 000000000000..bcd4ef6a8497
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of the host-to-chip MIBs of the hardware API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_MIB_H
+#define WFX_HIF_TX_MIB_H
+
+#include <linux/types.h>
+
+struct sk_buff;
+struct wfx_vif;
+struct wfx_dev;
+struct wfx_hif_ie_table_entry;
+struct wfx_hif_mib_extended_count_table;
+
+int wfx_hif_set_output_power(struct wfx_vif *wvif, int val);
+int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval, unsigned int listen_interval);
+int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst);
+int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct wfx_hif_mib_extended_count_table *arg);
+int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac);
+int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool fwd_probe_req);
+int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct wfx_hif_ie_table_entry *tbl);
+int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count);
+int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode);
+int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate);
+int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required);
+int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy);
+int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density,
+ bool greenfield, bool short_preamble);
+int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates);
+int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period);
+int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr);
+int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable);
+int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val);
+int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable);
+int wfx_hif_slot_time(struct wfx_vif *wvif, int val);
+int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val);
+int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/hwio.c b/drivers/net/wireless/silabs/wfx/hwio.c
new file mode 100644
index 000000000000..3f9750b470be
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hwio.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level I/O functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/align.h>
+
+#include "hwio.h"
+#include "wfx.h"
+#include "bus.h"
+#include "traces.h"
+
+#define WFX_HIF_BUFFER_SIZE 0x2000
+
+static int wfx_read32(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ *val = ~0; /* Never return undefined value */
+ if (!tmp)
+ return -ENOMEM;
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp, sizeof(u32));
+ if (ret >= 0)
+ *val = le32_to_cpu(*tmp);
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
+ return ret;
+}
+
+static int wfx_write32(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp, sizeof(u32));
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
+ return ret;
+}
+
+static int wfx_read32_locked(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_read32(wdev, reg, val);
+ _trace_io_read32(reg, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int wfx_write32_locked(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_write32(wdev, reg, val);
+ _trace_io_write32(reg, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int wfx_write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val)
+{
+ int ret;
+ u32 val_r, val_w;
+
+ WARN_ON(~mask & val);
+ val &= mask;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_read32(wdev, reg, &val_r);
+ _trace_io_read32(reg, val_r);
+ if (ret < 0)
+ goto err;
+ val_w = (val_r & ~mask) | val;
+ if (val_w != val_r) {
+ ret = wfx_write32(wdev, reg, val_w);
+ _trace_io_write32(reg, val_w);
+ }
+err:
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int wfx_indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, size_t len)
+{
+ int ret;
+ int i;
+ u32 cfg;
+ u32 prefetch;
+
+ WARN_ON(len >= WFX_HIF_BUFFER_SIZE);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+
+ if (reg == WFX_REG_AHB_DPORT)
+ prefetch = CFG_PREFETCH_AHB;
+ else if (reg == WFX_REG_SRAM_DPORT)
+ prefetch = CFG_PREFETCH_SRAM;
+ else
+ return -ENODEV;
+
+ ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ goto err;
+
+ ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+
+ ret = wfx_write32(wdev, WFX_REG_CONFIG, cfg | prefetch);
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < 20; i++) {
+ ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+ if (!(cfg & prefetch))
+ break;
+ usleep_range(200, 250);
+ }
+ if (i == 20) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len);
+
+err:
+ if (ret < 0)
+ memset(buf, 0xFF, len); /* Never return undefined value */
+ return ret;
+}
+
+static int wfx_indirect_write(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ WARN_ON(len >= WFX_HIF_BUFFER_SIZE);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+ ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ return ret;
+
+ return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len);
+}
+
+static int wfx_indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_indirect_read(wdev, reg, addr, buf, len);
+ _trace_io_ind_read(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int wfx_indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_indirect_write(wdev, reg, addr, buf, len);
+ _trace_io_ind_write(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int wfx_indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_indirect_read(wdev, reg, addr, tmp, sizeof(u32));
+ *val = le32_to_cpu(*tmp);
+ _trace_io_ind_read32(reg, addr, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+static int wfx_indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wfx_indirect_write(wdev, reg, addr, tmp, sizeof(u32));
+ _trace_io_ind_write32(reg, addr, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
+{
+ int ret;
+
+ WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer");
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
+ return ret;
+}
+
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
+{
+ int ret;
+
+ WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer");
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
+ return ret;
+}
+
+int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return wfx_indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return wfx_indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return wfx_indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return wfx_indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return wfx_indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return wfx_indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return wfx_indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return wfx_indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return wfx_read32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int wfx_config_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return wfx_write32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return wfx_write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val);
+}
+
+int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return wfx_read32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int wfx_control_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return wfx_write32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return wfx_write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val);
+}
+
+int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
+{
+ int ret;
+
+ *val = ~0; /* Never return undefined value */
+ ret = wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
+ if (ret)
+ return ret;
+ ret = wfx_read32_locked(wdev, WFX_REG_SET_GEN_R_W, val);
+ if (ret)
+ return ret;
+ *val &= IGPR_VALUE;
+ return ret;
+}
+
+int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val)
+{
+ return wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val);
+}
diff --git a/drivers/net/wireless/silabs/wfx/hwio.h b/drivers/net/wireless/silabs/wfx/hwio.h
new file mode 100644
index 000000000000..c6e7b065b7ff
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/hwio.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level I/O functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_HWIO_H
+#define WFX_HWIO_H
+
+#include <linux/types.h>
+
+struct wfx_dev;
+
+/* Caution: in the functions below, 'buf' will used with a DMA. So, it must be kmalloc'd (do not use
+ * stack allocated buffers). In doubt, enable CONFIG_DEBUG_SG to detect badly located buffer.
+ */
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len);
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len);
+
+int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+#define CFG_ERR_SPI_FRAME 0x00000001 /* only with SPI */
+#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 /* only with SDIO */
+#define CFG_ERR_BUF_UNDERRUN 0x00000002
+#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004
+#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008
+#define CFG_ERR_BUF_OVERRUN 0x00000010
+#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020
+#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
+#define CFG_ERR_HOST_CRC_MISS 0x00000080 /* only with SDIO */
+#define CFG_SPI_IGNORE_CS 0x00000080 /* only with SPI */
+#define CFG_BYTE_ORDER_MASK 0x00000300 /* only writable with SPI */
+#define CFG_BYTE_ORDER_BADC 0x00000000
+#define CFG_BYTE_ORDER_DCBA 0x00000100
+#define CFG_BYTE_ORDER_ABCD 0x00000200 /* SDIO always use this value */
+#define CFG_DIRECT_ACCESS_MODE 0x00000400
+#define CFG_PREFETCH_AHB 0x00000800
+#define CFG_DISABLE_CPU_CLK 0x00001000
+#define CFG_PREFETCH_SRAM 0x00002000
+#define CFG_CPU_RESET 0x00004000
+#define CFG_SDIO_DISABLE_IRQ 0x00008000 /* only with SDIO */
+#define CFG_IRQ_ENABLE_DATA 0x00010000
+#define CFG_IRQ_ENABLE_WRDY 0x00020000
+#define CFG_CLK_RISE_EDGE 0x00040000
+#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 /* only with SDIO */
+#define CFG_RESERVED 0x00F00000
+#define CFG_DEVICE_ID_MAJOR 0x07000000
+#define CFG_DEVICE_ID_RESERVED 0x78000000
+#define CFG_DEVICE_ID_TYPE 0x80000000
+int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val);
+int wfx_config_reg_write(struct wfx_dev *wdev, u32 val);
+int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define CTRL_NEXT_LEN_MASK 0x00000FFF
+#define CTRL_WLAN_WAKEUP 0x00001000
+#define CTRL_WLAN_READY 0x00002000
+int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val);
+int wfx_control_reg_write(struct wfx_dev *wdev, u32 val);
+int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define IGPR_RW 0x80000000
+#define IGPR_INDEX 0x7F000000
+#define IGPR_VALUE 0x00FFFFFF
+int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val);
+int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c
new file mode 100644
index 000000000000..196d64ef68f3
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/key.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Key management related functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "key.h"
+#include "wfx.h"
+#include "hif_tx_mib.h"
+
+static int wfx_alloc_key(struct wfx_dev *wdev)
+{
+ int idx;
+
+ idx = ffs(~wdev->key_map) - 1;
+ if (idx < 0 || idx >= MAX_KEY_ENTRIES)
+ return -1;
+
+ wdev->key_map |= BIT(idx);
+ return idx;
+}
+
+static void wfx_free_key(struct wfx_dev *wdev, int idx)
+{
+ WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
+ wdev->key_map &= ~BIT(idx);
+}
+
+static u8 fill_wep_pair(struct wfx_hif_wep_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_WEP_PAIRWISE;
+}
+
+static u8 fill_wep_group(struct wfx_hif_wep_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_id = key->keyidx;
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_WEP_DEFAULT;
+}
+
+static u8 fill_tkip_pair(struct wfx_hif_tkip_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data) + sizeof(msg->tx_mic_key) +
+ sizeof(msg->rx_mic_key), "inconsistent data");
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key));
+ keybuf += sizeof(msg->tx_mic_key);
+ memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key));
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_TKIP_PAIRWISE;
+}
+
+static u8 fill_tkip_group(struct wfx_hif_tkip_group_key *msg, struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq, enum nl80211_iftype iftype)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data) + 2 * sizeof(msg->rx_mic_key),
+ "inconsistent data");
+ msg->key_id = key->keyidx;
+ memcpy(msg->rx_sequence_counter, &seq->tkip.iv16, sizeof(seq->tkip.iv16));
+ memcpy(msg->rx_sequence_counter + sizeof(u16), &seq->tkip.iv32, sizeof(seq->tkip.iv32));
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ if (iftype == NL80211_IFTYPE_AP)
+ /* Use Tx MIC Key */
+ memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
+ else
+ /* Use Rx MIC Key */
+ memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
+ return HIF_KEY_TYPE_TKIP_GROUP;
+}
+
+static u8 fill_ccmp_pair(struct wfx_hif_aes_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_AES_PAIRWISE;
+}
+
+static u8 fill_ccmp_group(struct wfx_hif_aes_group_key *msg,
+ struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn));
+ memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_AES_GROUP;
+}
+
+static u8 fill_sms4_pair(struct wfx_hif_wapi_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data),
+ "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_PAIRWISE;
+}
+
+static u8 fill_sms4_group(struct wfx_hif_wapi_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data),
+ "inconsistent data");
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_GROUP;
+}
+
+static u8 fill_aes_cmac_group(struct wfx_hif_igtk_group_key *msg,
+ struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
+ memcpy(msg->igtk_key_data, key->key, key->keylen);
+ memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn));
+ memreverse(msg->ipn, sizeof(seq->aes_cmac.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_IGTK_GROUP;
+}
+
+static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret;
+ struct wfx_hif_req_add_key k = { };
+ struct ieee80211_key_seq seq;
+ struct wfx_dev *wdev = wvif->wdev;
+ int idx = wfx_alloc_key(wvif->wdev);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+
+ WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (idx < 0)
+ return -EINVAL;
+ k.int_id = wvif->id;
+ k.entry_index = idx;
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ if (pairwise)
+ k.type = fill_wep_pair(&k.key.wep_pairwise_key, key, sta->addr);
+ else
+ k.type = fill_wep_group(&k.key.wep_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ if (pairwise)
+ k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr);
+ else
+ k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq,
+ vif->type);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
+ if (pairwise)
+ k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr);
+ else
+ k.type = fill_ccmp_group(&k.key.aes_group_key, key, &seq);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
+ if (pairwise)
+ k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key, sta->addr);
+ else
+ k.type = fill_sms4_group(&k.key.wapi_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq);
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ } else {
+ dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ ret = wfx_hif_add_key(wdev, &k);
+ if (ret) {
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+ key->hw_key_idx = idx;
+ return 0;
+}
+
+static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key)
+{
+ WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx");
+ wfx_free_key(wvif->wdev, key->hw_key_idx);
+ return wfx_hif_remove_key(wvif->wdev, key->hw_key_idx);
+}
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ if (cmd == SET_KEY)
+ ret = wfx_add_key(wvif, sta, key);
+ if (cmd == DISABLE_KEY)
+ ret = wfx_remove_key(wvif, key);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return ret;
+}
diff --git a/drivers/net/wireless/silabs/wfx/key.h b/drivers/net/wireless/silabs/wfx/key.h
new file mode 100644
index 000000000000..2234e36dbbcd
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/key.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Key management related functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_KEY_H
+#define WFX_KEY_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
new file mode 100644
index 000000000000..84d82ddded56
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+
+#include "main.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bus.h"
+#include "bh.h"
+#include "sta.h"
+#include "key.h"
+#include "scan.h"
+#include "debug.h"
+#include "data_tx.h"
+#include "hif_tx_mib.h"
+#include "hif_api_cmd.h"
+
+#define WFX_PDS_TLV_TYPE 0x4450 // "PD" (Platform Data) in ascii little-endian
+#define WFX_PDS_MAX_CHUNK_SIZE 1500
+
+MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200");
+MODULE_AUTHOR("Jérôme Pouiller <jerome.pouiller@silabs.com>");
+MODULE_LICENSE("GPL");
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+}
+
+static struct ieee80211_rate wfx_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel wfx_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_supported_band wfx_band_2ghz = {
+ .channels = wfx_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(wfx_2ghz_chantable),
+ .bitrates = wfx_rates,
+ .n_bitrates = ARRAY_SIZE(wfx_rates),
+ .ht_cap = {
+ /* Receive caps */
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask = { 0xFF }, /* MCS0 to MCS7 */
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static const struct ieee80211_iface_limit wdev_iface_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 2,
+ .limits = wdev_iface_limits,
+ .n_limits = ARRAY_SIZE(wdev_iface_limits),
+ }
+};
+
+static const struct ieee80211_ops wfx_ops = {
+ .start = wfx_start,
+ .stop = wfx_stop,
+ .add_interface = wfx_add_interface,
+ .remove_interface = wfx_remove_interface,
+ .config = wfx_config,
+ .tx = wfx_tx,
+ .join_ibss = wfx_join_ibss,
+ .leave_ibss = wfx_leave_ibss,
+ .conf_tx = wfx_conf_tx,
+ .hw_scan = wfx_hw_scan,
+ .cancel_hw_scan = wfx_cancel_hw_scan,
+ .start_ap = wfx_start_ap,
+ .stop_ap = wfx_stop_ap,
+ .sta_add = wfx_sta_add,
+ .sta_remove = wfx_sta_remove,
+ .set_tim = wfx_set_tim,
+ .set_key = wfx_set_key,
+ .set_rts_threshold = wfx_set_rts_threshold,
+ .set_default_unicast_key = wfx_set_default_unicast_key,
+ .bss_info_changed = wfx_bss_info_changed,
+ .configure_filter = wfx_configure_filter,
+ .ampdu_action = wfx_ampdu_action,
+ .flush = wfx_flush,
+ .add_chanctx = wfx_add_chanctx,
+ .remove_chanctx = wfx_remove_chanctx,
+ .change_chanctx = wfx_change_chanctx,
+ .assign_vif_chanctx = wfx_assign_vif_chanctx,
+ .unassign_vif_chanctx = wfx_unassign_vif_chanctx,
+};
+
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
+{
+ if (wdev->hw_caps.api_version_major < major)
+ return true;
+ if (wdev->hw_caps.api_version_major > major)
+ return false;
+ if (wdev->hw_caps.api_version_minor < minor)
+ return true;
+ return false;
+}
+
+/* The device needs data about the antenna configuration. This information in provided by PDS
+ * (Platform Data Set, this is the wording used in WF200 documentation) files. For hardware
+ * integrators, the full process to create PDS files is described here:
+ * https://github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md
+ *
+ * The PDS file is an array of Time-Length-Value structs.
+ */
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
+{
+ int ret, chunk_type, chunk_len, chunk_num = 0;
+
+ if (*buf == '{') {
+ dev_err(wdev->dev, "PDS: malformed file (legacy format?)\n");
+ return -EINVAL;
+ }
+ while (len > 0) {
+ chunk_type = get_unaligned_le16(buf + 0);
+ chunk_len = get_unaligned_le16(buf + 2);
+ if (chunk_len < 4 || chunk_len > len) {
+ dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num);
+ return -EINVAL;
+ }
+ if (chunk_type != WFX_PDS_TLV_TYPE) {
+ dev_info(wdev->dev, "PDS:%d: skip unknown data\n", chunk_num);
+ goto next;
+ }
+ if (chunk_len > WFX_PDS_MAX_CHUNK_SIZE)
+ dev_warn(wdev->dev, "PDS:%d: unexpectedly large chunk\n", chunk_num);
+ if (buf[4] != '{' || buf[chunk_len - 1] != '}')
+ dev_warn(wdev->dev, "PDS:%d: unexpected content\n", chunk_num);
+
+ ret = wfx_hif_configuration(wdev, buf + 4, chunk_len - 4);
+ if (ret > 0) {
+ dev_err(wdev->dev, "PDS:%d: invalid data (unsupported options?)\n", chunk_num);
+ return -EINVAL;
+ }
+ if (ret == -ETIMEDOUT) {
+ dev_err(wdev->dev, "PDS:%d: chip didn't reply (corrupted file?)\n", chunk_num);
+ return ret;
+ }
+ if (ret) {
+ dev_err(wdev->dev, "PDS:%d: chip returned an unknown error\n", chunk_num);
+ return -EIO;
+ }
+next:
+ chunk_num++;
+ len -= chunk_len;
+ buf += chunk_len;
+ }
+ return 0;
+}
+
+static int wfx_send_pdata_pds(struct wfx_dev *wdev)
+{
+ int ret = 0;
+ const struct firmware *pds;
+ u8 *tmp_buf;
+
+ ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n",
+ wdev->pdata.file_pds);
+ return ret;
+ }
+ tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ ret = wfx_send_pds(wdev, tmp_buf, pds->size);
+ kfree(tmp_buf);
+release_fw:
+ release_firmware(pds);
+ return ret;
+}
+
+static void wfx_free_common(void *data)
+{
+ struct wfx_dev *wdev = data;
+
+ mutex_destroy(&wdev->tx_power_loop_info_lock);
+ mutex_destroy(&wdev->rx_stats_lock);
+ mutex_destroy(&wdev->conf_mutex);
+ ieee80211_free_hw(wdev->hw);
+}
+
+struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata,
+ const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv)
+{
+ struct ieee80211_hw *hw;
+ struct wfx_dev *wdev;
+
+ hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops);
+ if (!hw)
+ return NULL;
+
+ SET_IEEE80211_DEV(hw, dev);
+
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->vif_data_size = sizeof(struct wfx_vif);
+ hw->sta_data_size = sizeof(struct wfx_sta_priv);
+ hw->queues = 4;
+ hw->max_rates = 8;
+ hw->max_rate_tries = 8;
+ hw->extra_tx_headroom = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
+ 4 /* alignment */ + 8 /* TKIP IV */;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX;
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
+ hw->wiphy->iface_combinations = wfx_iface_combinations;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL);
+ if (!hw->wiphy->bands[NL80211_BAND_2GHZ])
+ goto err;
+
+ /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */
+ memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz));
+
+ wdev = hw->priv;
+ wdev->hw = hw;
+ wdev->dev = dev;
+ wdev->hwbus_ops = hwbus_ops;
+ wdev->hwbus_priv = hwbus_priv;
+ memcpy(&wdev->pdata, pdata, sizeof(*pdata));
+ of_property_read_string(dev->of_node, "silabs,antenna-config-file", &wdev->pdata.file_pds);
+ wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW);
+ if (IS_ERR(wdev->pdata.gpio_wakeup))
+ goto err;
+
+ if (wdev->pdata.gpio_wakeup)
+ gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup");
+
+ mutex_init(&wdev->conf_mutex);
+ mutex_init(&wdev->rx_stats_lock);
+ mutex_init(&wdev->tx_power_loop_info_lock);
+ init_completion(&wdev->firmware_ready);
+ INIT_DELAYED_WORK(&wdev->cooling_timeout_work, wfx_cooling_timeout_work);
+ skb_queue_head_init(&wdev->tx_pending);
+ init_waitqueue_head(&wdev->tx_dequeue);
+ wfx_init_hif_cmd(&wdev->hif_cmd);
+
+ if (devm_add_action_or_reset(dev, wfx_free_common, wdev))
+ return NULL;
+
+ return wdev;
+
+err:
+ ieee80211_free_hw(hw);
+ return NULL;
+}
+
+int wfx_probe(struct wfx_dev *wdev)
+{
+ int i;
+ int err;
+ struct gpio_desc *gpio_saved;
+
+ /* During first part of boot, gpio_wakeup cannot yet been used. So prevent bh() to touch
+ * it.
+ */
+ gpio_saved = wdev->pdata.gpio_wakeup;
+ wdev->pdata.gpio_wakeup = NULL;
+ wdev->poll_irq = true;
+
+ wdev->bh_wq = alloc_workqueue("wfx_bh_wq", WQ_HIGHPRI, 0);
+ if (!wdev->bh_wq)
+ return -ENOMEM;
+
+ wfx_bh_register(wdev);
+
+ err = wfx_init_device(wdev);
+ if (err)
+ goto bh_unregister;
+
+ wfx_bh_poll_irq(wdev);
+ err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ);
+ if (err <= 0) {
+ if (err == 0) {
+ dev_err(wdev->dev, "timeout while waiting for startup indication\n");
+ err = -ETIMEDOUT;
+ } else if (err == -ERESTARTSYS) {
+ dev_info(wdev->dev, "probe interrupted by user\n");
+ }
+ goto bh_unregister;
+ }
+
+ /* FIXME: fill wiphy::hw_version */
+ dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
+ wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
+ wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor,
+ wdev->keyset, wdev->hw_caps.link_mode);
+ snprintf(wdev->hw->wiphy->fw_version,
+ sizeof(wdev->hw->wiphy->fw_version),
+ "%d.%d.%d",
+ wdev->hw_caps.firmware_major,
+ wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build);
+
+ if (wfx_api_older_than(wdev, 1, 0)) {
+ dev_err(wdev->dev, "unsupported firmware API version (expect 1 while firmware returns %d)\n",
+ wdev->hw_caps.api_version_major);
+ err = -EOPNOTSUPP;
+ goto bh_unregister;
+ }
+
+ if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) {
+ dev_err(wdev->dev, "chip require secure_link, but can't negotiate it\n");
+ goto bh_unregister;
+ }
+
+ if (wdev->hw_caps.region_sel_mode) {
+ wdev->hw->wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |=
+ IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |=
+ IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |=
+ IEEE80211_CHAN_DISABLED;
+ }
+
+ dev_dbg(wdev->dev, "sending configuration file %s\n", wdev->pdata.file_pds);
+ err = wfx_send_pdata_pds(wdev);
+ if (err < 0 && err != -ENOENT)
+ goto bh_unregister;
+
+ wdev->poll_irq = false;
+ err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv);
+ if (err)
+ goto bh_unregister;
+
+ err = wfx_hif_use_multi_tx_conf(wdev, true);
+ if (err)
+ dev_err(wdev->dev, "misconfigured IRQ?\n");
+
+ wdev->pdata.gpio_wakeup = gpio_saved;
+ if (wdev->pdata.gpio_wakeup) {
+ dev_dbg(wdev->dev, "enable 'quiescent' power mode with wakeup GPIO and PDS file %s\n",
+ wdev->pdata.file_pds);
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
+ wfx_control_reg_write(wdev, 0);
+ wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
+ } else {
+ wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
+ eth_zero_addr(wdev->addresses[i].addr);
+ err = of_get_mac_address(wdev->dev->of_node, wdev->addresses[i].addr);
+ if (!err)
+ wdev->addresses[i].addr[ETH_ALEN - 1] += i;
+ else
+ ether_addr_copy(wdev->addresses[i].addr, wdev->hw_caps.mac_addr[i]);
+ if (!is_valid_ether_addr(wdev->addresses[i].addr)) {
+ dev_warn(wdev->dev, "using random MAC address\n");
+ eth_random_addr(wdev->addresses[i].addr);
+ }
+ dev_info(wdev->dev, "MAC address %d: %pM\n", i, wdev->addresses[i].addr);
+ }
+ wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
+ wdev->hw->wiphy->addresses = wdev->addresses;
+
+ if (!wfx_api_older_than(wdev, 3, 8))
+ wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
+ err = ieee80211_register_hw(wdev->hw);
+ if (err)
+ goto irq_unsubscribe;
+
+ err = wfx_debug_init(wdev);
+ if (err)
+ goto ieee80211_unregister;
+
+ return 0;
+
+ieee80211_unregister:
+ ieee80211_unregister_hw(wdev->hw);
+irq_unsubscribe:
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
+bh_unregister:
+ wfx_bh_unregister(wdev);
+ destroy_workqueue(wdev->bh_wq);
+ return err;
+}
+
+void wfx_release(struct wfx_dev *wdev)
+{
+ ieee80211_unregister_hw(wdev->hw);
+ wfx_hif_shutdown(wdev);
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
+ wfx_bh_unregister(wdev);
+ destroy_workqueue(wdev->bh_wq);
+}
+
+static int __init wfx_core_init(void)
+{
+ int ret = 0;
+
+ if (IS_ENABLED(CONFIG_SPI))
+ ret = spi_register_driver(&wfx_spi_driver);
+ if (IS_ENABLED(CONFIG_MMC) && !ret)
+ ret = sdio_register_driver(&wfx_sdio_driver);
+ return ret;
+}
+module_init(wfx_core_init);
+
+static void __exit wfx_core_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MMC))
+ sdio_unregister_driver(&wfx_sdio_driver);
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&wfx_spi_driver);
+}
+module_exit(wfx_core_exit);
diff --git a/drivers/net/wireless/silabs/wfx/main.h b/drivers/net/wireless/silabs/wfx/main.h
new file mode 100644
index 000000000000..68c665307153
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/main.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_MAIN_H
+#define WFX_MAIN_H
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+
+#include "hif_api_general.h"
+
+struct wfx_dev;
+struct wfx_hwbus_ops;
+
+struct wfx_platform_data {
+ /* Keyset and ".sec" extension will be appended to this string */
+ const char *file_fw;
+ const char *file_pds;
+ struct gpio_desc *gpio_wakeup;
+ /* if true HIF D_out is sampled on the rising edge of the clock (intended to be used in
+ * 50Mhz SDIO)
+ */
+ bool use_rising_clk;
+};
+
+struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata,
+ const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv);
+
+int wfx_probe(struct wfx_dev *wdev);
+void wfx_release(struct wfx_dev *wdev);
+
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c
new file mode 100644
index 000000000000..37f492e5d3be
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/queue.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Queue between the tx operation and the bh workqueue.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wfx.h"
+#include "sta.h"
+#include "data_tx.h"
+#include "traces.h"
+
+void wfx_tx_lock(struct wfx_dev *wdev)
+{
+ atomic_inc(&wdev->tx_lock);
+}
+
+void wfx_tx_unlock(struct wfx_dev *wdev)
+{
+ int tx_lock = atomic_dec_return(&wdev->tx_lock);
+
+ WARN(tx_lock < 0, "inconsistent tx_lock value");
+ if (!tx_lock)
+ wfx_bh_request_tx(wdev);
+}
+
+void wfx_tx_flush(struct wfx_dev *wdev)
+{
+ int ret;
+
+ /* Do not wait for any reply if chip is frozen */
+ if (wdev->chip_frozen)
+ return;
+
+ wfx_tx_lock(wdev);
+ mutex_lock(&wdev->hif_cmd.lock);
+ ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used,
+ msecs_to_jiffies(3000));
+ if (!ret) {
+ dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
+ wdev->hif.tx_buffers_used);
+ wfx_pending_dump_old_frames(wdev, 3000);
+ /* FIXME: drop pending frames here */
+ wdev->chip_frozen = true;
+ }
+ mutex_unlock(&wdev->hif_cmd.lock);
+ wfx_tx_unlock(wdev);
+}
+
+void wfx_tx_lock_flush(struct wfx_dev *wdev)
+{
+ wfx_tx_lock(wdev);
+ wfx_tx_flush(wdev);
+}
+
+void wfx_tx_queues_init(struct wfx_vif *wvif)
+{
+ /* The device is in charge to respect the details of the QoS parameters. The driver just
+ * ensure that it roughtly respect the priorities to avoid any shortage.
+ */
+ const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb_queue_head_init(&wvif->tx_queue[i].normal);
+ skb_queue_head_init(&wvif->tx_queue[i].cab);
+ wvif->tx_queue[i].priority = priorities[i];
+ }
+}
+
+bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
+{
+ return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab);
+}
+
+void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
+{
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
+ WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
+ }
+}
+
+static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
+ struct sk_buff_head *skb_queue, struct sk_buff_head *dropped)
+{
+ struct sk_buff *skb, *tmp;
+
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk_safe(skb_queue, skb, tmp) {
+ __skb_unlink(skb, skb_queue);
+ skb_queue_head(dropped, skb);
+ }
+ spin_unlock_bh(&skb_queue->lock);
+}
+
+void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
+ struct sk_buff_head *dropped)
+{
+ __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
+ __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
+ wake_up(&wvif->wdev->tx_dequeue);
+}
+
+void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ skb_queue_tail(&queue->cab, skb);
+ else
+ skb_queue_tail(&queue->normal, skb);
+}
+
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
+{
+ struct wfx_queue *queue;
+ struct wfx_vif *wvif;
+ struct wfx_hif_msg *hif;
+ struct sk_buff *skb;
+
+ WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__);
+ while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
+ hif = (struct wfx_hif_msg *)skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (wvif) {
+ queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ }
+ skb_queue_head(dropped, skb);
+ }
+}
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+{
+ struct wfx_queue *queue;
+ struct wfx_hif_req_tx *req;
+ struct wfx_vif *wvif;
+ struct wfx_hif_msg *hif;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
+ hif = (struct wfx_hif_msg *)skb->data;
+ req = (struct wfx_hif_req_tx *)hif->body;
+ if (req->packet_id != packet_id)
+ continue;
+ spin_unlock_bh(&wdev->tx_pending.lock);
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (wvif) {
+ queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ }
+ skb_unlink(skb, &wdev->tx_pending);
+ return skb;
+ }
+ spin_unlock_bh(&wdev->tx_pending.lock);
+ WARN(1, "cannot find packet in pending queue");
+ return NULL;
+}
+
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv;
+ struct wfx_hif_req_tx *req;
+ struct sk_buff *skb;
+ bool first = true;
+
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ req = wfx_skb_txreq(skb);
+ if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) {
+ if (first) {
+ dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
+ limit_ms);
+ first = false;
+ }
+ dev_info(wdev->dev, " id %08x sent %lldms ago\n",
+ req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp));
+ }
+ }
+ spin_unlock_bh(&wdev->tx_pending.lock);
+}
+
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ return ktime_us_delta(now, tx_priv->xmit_timestamp);
+}
+
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ int i;
+
+ if (vif->type != NL80211_IFTYPE_AP)
+ return false;
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ /* Note: since only AP can have mcast frames in queue and only one vif can be AP,
+ * all queued frames has same interface id
+ */
+ if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
+ return true;
+ return false;
+}
+
+static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
+{
+ return atomic_read(&queue->pending_frames) * queue->priority;
+}
+
+static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
+{
+ struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
+ int i, j, num_queues = 0;
+ struct wfx_vif *wvif;
+ struct wfx_hif_msg *hif;
+ struct sk_buff *skb;
+
+ /* sort the queues */
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ WARN_ON(num_queues >= ARRAY_SIZE(queues));
+ queues[num_queues] = &wvif->tx_queue[i];
+ for (j = num_queues; j > 0; j--)
+ if (wfx_tx_queue_get_weight(queues[j]) <
+ wfx_tx_queue_get_weight(queues[j - 1]))
+ swap(queues[j - 1], queues[j]);
+ num_queues++;
+ }
+ }
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ if (!wvif->after_dtim_tx_allowed)
+ continue;
+ for (i = 0; i < num_queues; i++) {
+ skb = skb_dequeue(&queues[i]->cab);
+ if (!skb)
+ continue;
+ /* Note: since only AP can have mcast frames in queue and only one vif can
+ * be AP, all queued frames has same interface id
+ */
+ hif = (struct wfx_hif_msg *)skb->data;
+ WARN_ON(hif->interface != wvif->id);
+ WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&queues[i]->pending_frames);
+ trace_queues_stats(wdev, queues[i]);
+ return skb;
+ }
+ /* No more multicast to sent */
+ wvif->after_dtim_tx_allowed = false;
+ schedule_work(&wvif->update_tim_work);
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ skb = skb_dequeue(&queues[i]->normal);
+ if (skb) {
+ atomic_inc(&queues[i]->pending_frames);
+ trace_queues_stats(wdev, queues[i]);
+ return skb;
+ }
+ }
+ return NULL;
+}
+
+struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
+{
+ struct wfx_tx_priv *tx_priv;
+ struct sk_buff *skb;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+ skb = wfx_tx_queues_get_skb(wdev);
+ if (!skb)
+ return NULL;
+ skb_queue_tail(&wdev->tx_pending, skb);
+ wake_up(&wdev->tx_dequeue);
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ return (struct wfx_hif_msg *)skb->data;
+}
diff --git a/drivers/net/wireless/silabs/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h
new file mode 100644
index 000000000000..4731debca93d
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/queue.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Queue between the tx operation and the bh workqueue.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_QUEUE_H
+#define WFX_QUEUE_H
+
+#include <linux/skbuff.h>
+#include <linux/atomic.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_queue {
+ struct sk_buff_head normal;
+ struct sk_buff_head cab; /* Content After (DTIM) Beacon */
+ atomic_t pending_frames;
+ int priority;
+};
+
+void wfx_tx_lock(struct wfx_dev *wdev);
+void wfx_tx_unlock(struct wfx_dev *wdev);
+void wfx_tx_flush(struct wfx_dev *wdev);
+void wfx_tx_lock_flush(struct wfx_dev *wdev);
+
+void wfx_tx_queues_init(struct wfx_vif *wvif);
+void wfx_tx_queues_check_empty(struct wfx_vif *wvif);
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif);
+void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb);
+struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+
+bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue);
+void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
+ struct sk_buff_head *dropped);
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped);
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c
new file mode 100644
index 000000000000..16f619ed22e0
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/scan.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "scan.h"
+#include "wfx.h"
+#include "sta.h"
+#include "hif_tx_mib.h"
+
+static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ ieee80211_scan_completed(hw, &info);
+}
+
+static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb;
+
+ skb = ieee80211_probereq_get(wvif->wdev->hw, vif->addr, NULL, 0,
+ req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, req->ie, req->ie_len);
+ wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_channel *ch_start, *ch_cur;
+ int i, ret;
+
+ for (i = start_idx; i < req->n_channels; i++) {
+ ch_start = req->channels[start_idx];
+ ch_cur = req->channels[i];
+ WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported");
+ if (ch_cur->max_power != ch_start->max_power)
+ break;
+ if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR)
+ break;
+ }
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->scan_abort = false;
+ reinit_completion(&wvif->scan_complete);
+ ret = wfx_hif_scan(wvif, req, start_idx, i - start_idx);
+ if (ret) {
+ wfx_tx_unlock(wvif->wdev);
+ return -EIO;
+ }
+ ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
+ if (!ret) {
+ wfx_hif_stop_scan(wvif);
+ ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
+ dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n",
+ wvif->scan_nb_chan_done);
+ }
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "scan didn't stop\n");
+ ret = -ETIMEDOUT;
+ } else if (wvif->scan_abort) {
+ dev_notice(wvif->wdev->dev, "scan abort\n");
+ ret = -ECONNABORTED;
+ } else if (wvif->scan_nb_chan_done > i - start_idx) {
+ ret = -EIO;
+ } else {
+ ret = wvif->scan_nb_chan_done;
+ }
+ if (req->channels[start_idx]->max_power != vif->bss_conf.txpower)
+ wfx_hif_set_output_power(wvif, vif->bss_conf.txpower);
+ wfx_tx_unlock(wvif->wdev);
+ return ret;
+}
+
+/* It is not really necessary to run scan request asynchronously. However,
+ * there is a bug in "iw scan" when ieee80211_scan_completed() is called before
+ * wfx_hw_scan() return
+ */
+void wfx_hw_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
+ struct ieee80211_scan_request *hw_req = wvif->scan_req;
+ int chan_cur, ret, err;
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ mutex_lock(&wvif->scan_lock);
+ if (wvif->join_in_progress) {
+ dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN");
+ wfx_reset(wvif);
+ }
+ update_probe_tmpl(wvif, &hw_req->req);
+ chan_cur = 0;
+ err = 0;
+ do {
+ ret = send_scan_req(wvif, &hw_req->req, chan_cur);
+ if (ret > 0) {
+ chan_cur += ret;
+ err = 0;
+ }
+ if (!ret)
+ err++;
+ if (err > 2) {
+ dev_err(wvif->wdev->dev, "scan has not been able to start\n");
+ ret = -ETIMEDOUT;
+ }
+ } while (ret >= 0 && chan_cur < hw_req->req.n_channels);
+ mutex_unlock(&wvif->scan_lock);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
+}
+
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
+ wvif->scan_req = hw_req;
+ schedule_work(&wvif->scan_work);
+ return 0;
+}
+
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wvif->scan_abort = true;
+ wfx_hif_stop_scan(wvif);
+}
+
+void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done)
+{
+ wvif->scan_nb_chan_done = nb_chan_done;
+ complete(&wvif->scan_complete);
+}
diff --git a/drivers/net/wireless/silabs/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h
new file mode 100644
index 000000000000..78e3b984f375
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/scan.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_SCAN_H
+#define WFX_SCAN_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+void wfx_hw_scan_work(struct work_struct *work);
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
new file mode 100644
index 000000000000..626dfb4b7a55
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "sta.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "bh.h"
+#include "key.h"
+#include "scan.h"
+#include "debug.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
+
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+{
+ int i;
+ u32 ret = 0;
+ /* The device only supports 2GHz */
+ struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (rates & BIT(i)) {
+ if (i >= sband->n_bitrates)
+ dev_warn(wdev->dev, "unsupported basic rate\n");
+ else
+ ret |= BIT(sband->bitrates[i].hw_value);
+ }
+ }
+ return ret;
+}
+
+void wfx_cooling_timeout_work(struct work_struct *work)
+{
+ struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev,
+ cooling_timeout_work);
+
+ wdev->chip_frozen = true;
+ wfx_tx_unlock(wdev);
+}
+
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd)
+{
+ if (cmd == STA_NOTIFY_AWAKE) {
+ /* Device recover normal temperature */
+ if (cancel_delayed_work(&wdev->cooling_timeout_work))
+ wfx_tx_unlock(wdev);
+ } else {
+ /* Device is too hot */
+ schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ);
+ wfx_tx_lock(wdev);
+ }
+}
+
+static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
+{
+ static const struct wfx_hif_ie_table_entry filter_ies[] = {
+ {
+ .ie_id = WLAN_EID_VENDOR_SPECIFIC,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ .oui = { 0x50, 0x6F, 0x9A },
+ }, {
+ .ie_id = WLAN_EID_HT_OPERATION,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_ERP_INFO,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_CHANNEL_SWITCH,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }
+ };
+
+ if (!filter_beacon) {
+ wfx_hif_beacon_filter_control(wvif, 0, 1);
+ } else {
+ wfx_hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies);
+ wfx_hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0);
+ }
+}
+
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused)
+{
+ bool filter_bssid, filter_prbreq, filter_beacon;
+ struct ieee80211_vif *vif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
+
+ /* Notes:
+ * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
+ * - PS-Poll (FIF_PSPOLL) are never filtered
+ * - RTS, CTS and Ack (FIF_CONTROL) are always filtered
+ * - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
+ * - Firmware does (yet) allow to forward unicast traffic sent to other stations (aka.
+ * promiscuous mode)
+ */
+ *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS |
+ FIF_PROBE_REQ | FIF_PSPOLL;
+
+ mutex_lock(&wdev->conf_mutex);
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ mutex_lock(&wvif->scan_lock);
+
+ /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
+ * beacons from other BSS
+ */
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+ filter_beacon = false;
+ else
+ filter_beacon = true;
+ wfx_filter_beacon(wvif, filter_beacon);
+
+ if (*total_flags & FIF_OTHER_BSS)
+ filter_bssid = false;
+ else
+ filter_bssid = true;
+
+ vif = wvif_to_vif(wvif);
+ /* In AP mode, chip can reply to probe request itself */
+ if (*total_flags & FIF_PROBE_REQ && vif->type == NL80211_IFTYPE_AP) {
+ dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
+ *total_flags &= ~FIF_PROBE_REQ;
+ }
+
+ if (*total_flags & FIF_PROBE_REQ)
+ filter_prbreq = false;
+ else
+ filter_prbreq = true;
+ wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq);
+
+ mutex_unlock(&wvif->scan_lock);
+ }
+ mutex_unlock(&wdev->conf_mutex);
+}
+
+static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
+{
+ struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
+ struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+
+ WARN(!vif->cfg.assoc && enable_ps,
+ "enable_ps is reliable only if associated");
+ if (wdev_to_wvif(wvif->wdev, 0)) {
+ struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
+ struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
+
+ chan0 = vif_ch0->bss_conf.chandef.chan;
+ }
+ if (wdev_to_wvif(wvif->wdev, 1)) {
+ struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
+ struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
+
+ chan1 = vif_ch1->bss_conf.chandef.chan;
+ }
+ if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
+ if (chan0->hw_value == chan1->hw_value) {
+ /* It is useless to enable PS if channels are the same. */
+ if (enable_ps)
+ *enable_ps = false;
+ if (vif->cfg.assoc && vif->cfg.ps)
+ dev_info(wvif->wdev->dev, "ignoring requested PS mode");
+ return -1;
+ }
+ /* It is necessary to enable PS if channels are different. */
+ if (enable_ps)
+ *enable_ps = true;
+ if (wfx_api_older_than(wvif->wdev, 3, 2))
+ return 0;
+ else
+ return 30;
+ }
+ if (enable_ps)
+ *enable_ps = vif->cfg.ps;
+ if (vif->cfg.assoc && vif->cfg.ps)
+ return conf->dynamic_ps_timeout;
+ else
+ return -1;
+}
+
+int wfx_update_pm(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ int ps_timeout;
+ bool ps;
+
+ if (!vif->cfg.assoc)
+ return 0;
+ ps_timeout = wfx_get_ps_timeout(wvif, &ps);
+ if (!ps)
+ ps_timeout = 0;
+ WARN_ON(ps_timeout < 0);
+ if (wvif->uapsd_mask)
+ ps_timeout = 0;
+
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete, TU_TO_JIFFIES(512)))
+ dev_warn(wvif->wdev->dev, "timeout while waiting of set_pm_mode_complete\n");
+ return wfx_hif_set_pm(wvif, ps, ps_timeout);
+}
+
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ int old_uapsd = wvif->uapsd_mask;
+
+ WARN_ON(queue >= hw->queues);
+
+ mutex_lock(&wdev->conf_mutex);
+ assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
+ wfx_hif_set_edca_queue_params(wvif, queue, params);
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ old_uapsd != wvif->uapsd_mask) {
+ wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask);
+ wfx_update_pm(wvif);
+ }
+ mutex_unlock(&wdev->conf_mutex);
+ return 0;
+}
+
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_hif_rts_threshold(wvif, value);
+ return 0;
+}
+
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+{
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ int rcpi_rssi;
+ int cqm_evt;
+
+ rcpi_rssi = raw_rcpi_rssi / 2 - 110;
+ if (rcpi_rssi <= vif->bss_conf.cqm_rssi_thold)
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ ieee80211_cqm_rssi_notify(vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
+}
+
+static void wfx_beacon_loss_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif,
+ beacon_loss_work);
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ ieee80211_beacon_loss(vif);
+ schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int));
+}
+
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_hif_wep_default_key_id(wvif, idx);
+}
+
+void wfx_reset(struct wfx_vif *wvif)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+
+ wfx_tx_lock_flush(wdev);
+ wfx_hif_reset(wvif, false);
+ wfx_tx_policy_init(wvif);
+ if (wvif_count(wdev) <= 1)
+ wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wfx_tx_unlock(wdev);
+ wvif->join_in_progress = false;
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+}
+
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+
+ sta_priv->vif_id = wvif->id;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wfx_hif_set_mfp(wvif, sta->mfp, sta->mfp);
+
+ /* In station mode, the firmware interprets new link-id as a TDLS peer */
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ return 0;
+ sta_priv->link_id = ffz(wvif->link_id_map);
+ wvif->link_id_map |= BIT(sta_priv->link_id);
+ WARN_ON(!sta_priv->link_id);
+ WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX);
+ wfx_hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp);
+
+ return 0;
+}
+
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+
+ /* See note in wfx_sta_add() */
+ if (!sta_priv->link_id)
+ return 0;
+ /* FIXME add a mutex? */
+ wfx_hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false);
+ wvif->link_id_map &= ~BIT(sta_priv->link_id);
+ return 0;
+}
+
+static int wfx_upload_ap_templates(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
+ if (!skb)
+ return -ENOMEM;
+ wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS);
+ dev_kfree_skb(skb);
+
+ skb = ieee80211_proberesp_get(wvif->wdev->hw, vif);
+ if (!skb)
+ return -ENOMEM;
+ wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void wfx_set_mfp_ap(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
+ const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+ skb->len - ieoffset);
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
+
+ if (ptr) {
+ ptr += pairwise_cipher_suite_count_offset;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ ptr += 1 + akm_suite_size * *ptr;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
+ }
+}
+
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_dev *wdev = wvif->wdev;
+ int ret;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+ wvif = (struct wfx_vif *)vif->drv_priv;
+ wfx_upload_ap_templates(wvif);
+ ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel);
+ if (ret > 0)
+ return -EIO;
+ wfx_set_mfp_ap(wvif);
+ return ret;
+}
+
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_reset(wvif);
+}
+
+static void wfx_join(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_bss_conf *conf = &vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ const u8 *ssid_ie = NULL;
+ int ssid_len = 0;
+ int ret;
+
+ wfx_tx_lock_flush(wvif->wdev);
+
+ bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (!bss && !vif->cfg.ibss_joined) {
+ wfx_tx_unlock(wvif->wdev);
+ return;
+ }
+
+ rcu_read_lock(); /* protect ssid_ie */
+ if (bss)
+ ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssid_ie) {
+ ssid_len = ssid_ie[1];
+ if (ssid_len > IEEE80211_MAX_SSID_LEN)
+ ssid_len = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssid_ie[2], ssid_len);
+ }
+ rcu_read_unlock();
+
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
+
+ wvif->join_in_progress = true;
+ ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len);
+ if (ret) {
+ ieee80211_connection_loss(vif);
+ wfx_reset(wvif);
+ } else {
+ /* Due to beacon filtering it is possible that the AP's beacon is not known for the
+ * mac80211 stack. Disable filtering temporary to make sure the stack receives at
+ * least one
+ */
+ wfx_filter_beacon(wvif, false);
+ }
+ wfx_tx_unlock(wvif->wdev);
+}
+
+static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_sta *sta = NULL;
+ int ampdu_density = 0;
+ bool greenfield = false;
+
+ rcu_read_lock(); /* protect sta */
+ if (info->bssid && !vif->cfg.ibss_joined)
+ sta = ieee80211_find_sta(vif, info->bssid);
+ if (sta && sta->deflink.ht_cap.ht_supported)
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ if (sta && sta->deflink.ht_cap.ht_supported &&
+ !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ greenfield = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ rcu_read_unlock();
+
+ wvif->join_in_progress = false;
+ wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble);
+ wfx_hif_keep_alive_period(wvif, 0);
+ /* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */
+ wfx_hif_set_bss_params(wvif, vif->cfg.aid, 7);
+ wfx_hif_set_beacon_wakeup_period(wvif, 1, 1);
+ wfx_update_pm(wvif);
+}
+
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_upload_ap_templates(wvif);
+ wfx_join(wvif);
+ return 0;
+}
+
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_reset(wvif);
+}
+
+static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
+{
+ /* Driver has Content After DTIM Beacon in queue. Driver is waiting for a signal from the
+ * firmware. Since we are going to stop to send beacons, this signal will never happens. See
+ * also wfx_suspend_resume_mc()
+ */
+ if (!enable && wfx_tx_queues_has_cab(wvif)) {
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ wfx_hif_beacon_transmit(wvif, enable);
+}
+
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u64 changed)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ int i;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ if (changed & BSS_CHANGED_BASIC_RATES ||
+ changed & BSS_CHANGED_BEACON_INT ||
+ changed & BSS_CHANGED_BSSID) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wfx_join(wvif);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc || vif->cfg.ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else if (!vif->cfg.assoc && vif->type == NL80211_IFTYPE_STATION)
+ wfx_reset(wvif);
+ else
+ dev_warn(wdev->dev, "misunderstood change: ASSOC\n");
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ if (vif->type != NL80211_IFTYPE_STATION)
+ dev_warn(wdev->dev, "misunderstood change: BEACON_INFO\n");
+ wfx_hif_set_beacon_wakeup_period(wvif, info->dtim_period, info->dtim_period);
+ /* We temporary forwarded beacon for join process. It is now no more necessary. */
+ wfx_filter_beacon(wvif, true);
+ }
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
+ __be32 *arp_addr = &vif->cfg.arp_addr_list[i];
+
+ if (vif->cfg.arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ arp_addr = NULL;
+ if (i >= vif->cfg.arp_addr_cnt)
+ arp_addr = NULL;
+ wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP || changed & BSS_CHANGED_BEACON)
+ wfx_upload_ap_templates(wvif);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ wfx_enable_beacon(wvif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_KEEP_ALIVE)
+ wfx_hif_keep_alive_period(wvif,
+ info->max_idle_period * USEC_PER_TU / USEC_PER_MSEC);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ wfx_hif_erp_use_protection(wvif, info->use_cts_prot);
+
+ if (changed & BSS_CHANGED_ERP_SLOT)
+ wfx_hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
+
+ if (changed & BSS_CHANGED_CQM)
+ wfx_hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold, info->cqm_rssi_hyst);
+
+ if (changed & BSS_CHANGED_TXPOWER)
+ wfx_hif_set_output_power(wvif, info->txpower);
+
+ if (changed & BSS_CHANGED_PS)
+ wfx_update_pm(wvif);
+
+ mutex_unlock(&wdev->conf_mutex);
+}
+
+static int wfx_update_tim(struct wfx_vif *wvif)
+{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb;
+ u16 tim_offset, tim_length;
+ u8 *tim_ptr;
+
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset,
+ &tim_length, 0);
+ if (!skb)
+ return -ENOENT;
+ tim_ptr = skb->data + tim_offset;
+
+ if (tim_offset && tim_length >= 6) {
+ /* Firmware handles DTIM counter internally */
+ tim_ptr[2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (wfx_tx_queues_has_cab(wvif))
+ tim_ptr[4] |= 1;
+ else
+ tim_ptr[4] &= ~1;
+ }
+
+ wfx_hif_update_ie_beacon(wvif, tim_ptr, tim_length);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void wfx_update_tim_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
+
+ wfx_update_tim(wvif);
+}
+
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ schedule_work(&wvif->update_tim_work);
+ return 0;
+}
+
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
+{
+ struct wfx_vif *wvif_it;
+
+ if (notify_cmd != STA_NOTIFY_AWAKE)
+ return;
+
+ /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to
+ * skip this DTIM and wait for the next one.
+ */
+ wvif_it = NULL;
+ while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL)
+ if (mutex_is_locked(&wvif_it->scan_lock))
+ return;
+
+ if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed)
+ dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)",
+ wfx_tx_queues_has_cab(wvif));
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
+}
+
+int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ /* Aggregation is implemented fully in firmware */
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* Just acknowledge it to enable frame re-ordering */
+ return 0;
+ default:
+ /* Leave the firmware doing its business for tx aggregation */
+ return -EOPNOTSUPP;
+ }
+}
+
+int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf)
+{
+ return 0;
+}
+
+void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf)
+{
+}
+
+void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed)
+{
+}
+
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel, "channel overwrite");
+ wvif->channel = ch;
+
+ return 0;
+}
+
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel != ch, "channel mismatch");
+ wvif->channel = NULL;
+}
+
+int wfx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int i;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ wvif->wdev = wdev;
+
+ wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */
+ INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
+ INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work);
+
+ init_completion(&wvif->set_pm_mode_complete);
+ complete(&wvif->set_pm_mode_complete);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ mutex_init(&wvif->scan_lock);
+ init_completion(&wvif->scan_complete);
+ INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
+
+ wfx_tx_queues_init(wvif);
+ wfx_tx_policy_init(wvif);
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ if (!wdev->vif[i]) {
+ wdev->vif[i] = vif;
+ wvif->id = i;
+ break;
+ }
+ }
+ WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported");
+
+ wfx_hif_set_macaddr(wvif, vif->addr);
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ /* Combo mode does not support Block Acks. We can re-enable them */
+ if (wvif_count(wdev) == 1)
+ wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ }
+ return 0;
+}
+
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
+ wfx_tx_queues_check_empty(wvif);
+
+ mutex_lock(&wdev->conf_mutex);
+ WARN(wvif->link_id_map != 1, "corrupted state");
+
+ wfx_hif_reset(wvif, false);
+ wfx_hif_set_macaddr(wvif, NULL);
+ wfx_tx_policy_init(wvif);
+
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
+ wdev->vif[wvif->id] = NULL;
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ /* Combo mode does not support Block Acks. We can re-enable them */
+ if (wvif_count(wdev) == 1)
+ wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ }
+}
+
+int wfx_start(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_stop(struct ieee80211_hw *hw)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
+}
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
new file mode 100644
index 000000000000..888db5cd3206
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_STA_H
+#define WFX_STA_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_sta_priv {
+ int link_id;
+ int vif_id;
+};
+
+/* mac80211 interface */
+int wfx_start(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw);
+int wfx_config(struct ieee80211_hw *hw, u32 changed);
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx);
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused);
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u64 changed);
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta);
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta);
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta);
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf);
+void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf);
+void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed);
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *conf);
+
+/* Hardware API Callbacks */
+void wfx_cooling_timeout_work(struct work_struct *work);
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd);
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi);
+int wfx_update_pm(struct wfx_vif *wvif);
+
+/* Other Helpers */
+void wfx_reset(struct wfx_vif *wvif);
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
+
+#endif
diff --git a/drivers/net/wireless/silabs/wfx/traces.h b/drivers/net/wireless/silabs/wfx/traces.h
new file mode 100644
index 000000000000..e011e8a46bd5
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/traces.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints definitions.
+ *
+ * Copyright (c) 2018-2020, Silicon Laboratories, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wfx
+
+#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _WFX_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+
+#include "bus.h"
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+/* The hell below need some explanations. For each symbolic number, we need to define it with
+ * TRACE_DEFINE_ENUM() and in a list for __print_symbolic.
+ *
+ * 1. Define a new macro that call TRACE_DEFINE_ENUM():
+ *
+ * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym);
+ *
+ * 2. Define list of all symbols:
+ *
+ * #define list_names \
+ * ... \
+ * xxx_name(XXX) \
+ * ...
+ *
+ * 3. Instantiate that list_names:
+ *
+ * list_names
+ *
+ * 4. Redefine xxx_name() as an entry of array for __print_symbolic()
+ *
+ * #undef xxx_name
+ * #define xxx_name(msg) { msg, #msg },
+ *
+ * 5. list_name can now nearly be used with __print_symbolic() but, __print_symbolic() dislike
+ * last comma of list. So we define a new list with a dummy element:
+ *
+ * #define list_for_print_symbolic list_names { -1, NULL }
+ */
+
+#define _hif_msg_list \
+ hif_cnf_name(ADD_KEY) \
+ hif_cnf_name(BEACON_TRANSMIT) \
+ hif_cnf_name(EDCA_QUEUE_PARAMS) \
+ hif_cnf_name(JOIN) \
+ hif_cnf_name(MAP_LINK) \
+ hif_cnf_name(READ_MIB) \
+ hif_cnf_name(REMOVE_KEY) \
+ hif_cnf_name(RESET) \
+ hif_cnf_name(SET_BSS_PARAMS) \
+ hif_cnf_name(SET_PM_MODE) \
+ hif_cnf_name(START) \
+ hif_cnf_name(START_SCAN) \
+ hif_cnf_name(STOP_SCAN) \
+ hif_cnf_name(TX) \
+ hif_cnf_name(MULTI_TRANSMIT) \
+ hif_cnf_name(UPDATE_IE) \
+ hif_cnf_name(WRITE_MIB) \
+ hif_cnf_name(CONFIGURATION) \
+ hif_cnf_name(CONTROL_GPIO) \
+ hif_cnf_name(PREVENT_ROLLBACK) \
+ hif_cnf_name(SET_SL_MAC_KEY) \
+ hif_cnf_name(SL_CONFIGURE) \
+ hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_cnf_name(SHUT_DOWN) \
+ hif_ind_name(EVENT) \
+ hif_ind_name(JOIN_COMPLETE) \
+ hif_ind_name(RX) \
+ hif_ind_name(SCAN_CMPL) \
+ hif_ind_name(SET_PM_MODE_CMPL) \
+ hif_ind_name(SUSPEND_RESUME_TX) \
+ hif_ind_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_ind_name(ERROR) \
+ hif_ind_name(EXCEPTION) \
+ hif_ind_name(GENERIC) \
+ hif_ind_name(WAKEUP) \
+ hif_ind_name(STARTUP)
+
+#define hif_msg_list_enum _hif_msg_list
+
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg);
+#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg);
+hif_msg_list_enum
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg },
+#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg },
+#define hif_msg_list hif_msg_list_enum { -1, NULL }
+
+#define _hif_mib_list \
+ hif_mib_name(ARP_IP_ADDRESSES_TABLE) \
+ hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
+ hif_mib_name(BEACON_FILTER_ENABLE) \
+ hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_STATS) \
+ hif_mib_name(BEACON_WAKEUP_PERIOD) \
+ hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CCA_CONFIG) \
+ hif_mib_name(CONFIG_DATA_FILTER) \
+ hif_mib_name(COUNTERS_TABLE) \
+ hif_mib_name(CURRENT_TX_POWER_LEVEL) \
+ hif_mib_name(DOT11_MAC_ADDRESS) \
+ hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \
+ hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
+ hif_mib_name(DOT11_RTS_THRESHOLD) \
+ hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(EXTENDED_COUNTERS_TABLE) \
+ hif_mib_name(GL_BLOCK_ACK_INFO) \
+ hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
+ hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(GRP_SEQ_COUNTER) \
+ hif_mib_name(INACTIVITY_TIMER) \
+ hif_mib_name(INTERFACE_PROTECTION) \
+ hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(KEEP_ALIVE_PERIOD) \
+ hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(MAX_TX_POWER_LEVEL) \
+ hif_mib_name(NON_ERP_PROTECTION) \
+ hif_mib_name(NS_IP_ADDRESSES_TABLE) \
+ hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
+ hif_mib_name(PROTECTED_MGMT_POLICY) \
+ hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(RX_FILTER) \
+ hif_mib_name(SET_ASSOCIATION_MODE) \
+ hif_mib_name(SET_DATA_FILTERING) \
+ hif_mib_name(SET_HT_PROTECTION) \
+ hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
+ hif_mib_name(SET_UAPSD_INFORMATION) \
+ hif_mib_name(SLOT_TIME) \
+ hif_mib_name(STATISTICS_TABLE) \
+ hif_mib_name(TEMPLATE_FRAME) \
+ hif_mib_name(TSF_COUNTER) \
+ hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION)
+
+#define hif_mib_list_enum _hif_mib_list
+
+#undef hif_mib_name
+#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib);
+hif_mib_list_enum
+#undef hif_mib_name
+#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib },
+#define hif_mib_list hif_mib_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(hif_data,
+ TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv),
+ TP_STRUCT__entry(
+ __field(int, tx_fill_level)
+ __field(int, msg_id)
+ __field(const char *, msg_type)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __field(int, if_id)
+ __field(int, mib)
+ __array(u8, buf, 128)
+ ),
+ TP_fast_assign(
+ int header_len;
+
+ __entry->tx_fill_level = tx_fill_level;
+ __entry->msg_len = le16_to_cpu(hif->len);
+ __entry->msg_id = hif->id;
+ __entry->if_id = hif->interface;
+ if (is_recv)
+ __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF";
+ else
+ __entry->msg_type = "REQ";
+ if (!is_recv &&
+ (__entry->msg_id == HIF_REQ_ID_READ_MIB ||
+ __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
+ __entry->mib = le16_to_cpup((__le16 *)hif->body);
+ header_len = 4;
+ } else {
+ __entry->mib = -1;
+ header_len = 0;
+ }
+ __entry->buf_len = min_t(int, __entry->msg_len, sizeof(__entry->buf))
+ - sizeof(struct wfx_hif_msg) - header_len;
+ memcpy(__entry->buf, hif->body + header_len, __entry->buf_len);
+ ),
+ TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
+ __entry->tx_fill_level,
+ __entry->if_id,
+ __entry->msg_type,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
+ __entry->mib != -1 ? "/" : "",
+ __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(hif_data, hif_send,
+ TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_send(hif, tx_fill_level)\
+ trace_hif_send(hif, tx_fill_level, false)
+DEFINE_EVENT(hif_data, hif_recv,
+ TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_recv(hif, tx_fill_level)\
+ trace_hif_recv(hif, tx_fill_level, true)
+
+#define wfx_reg_list_enum \
+ wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \
+ wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \
+ wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \
+ wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \
+ wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \
+ wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \
+ wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \
+ wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT")
+
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym);
+wfx_reg_list_enum
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) { sym, name },
+#define wfx_reg_list wfx_reg_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(io_data,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __array(u8, buf, 32)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->msg_len = len;
+ __entry->buf_len = min_t(int, sizeof(__entry->buf), __entry->msg_len);
+ memcpy(__entry->buf, io_buf, __entry->buf_len);
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %s%s (%d bytes)",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(io_data, io_write,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_write(reg, addr, io_buf, len)\
+ trace_io_write(reg, addr, io_buf, len)
+#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len)
+DEFINE_EVENT(io_data, io_read,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_read(reg, addr, io_buf, len)\
+ trace_io_read(reg, addr, io_buf, len)
+#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len)
+
+DECLARE_EVENT_CLASS(io_data32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, val)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->val = val;
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %08x",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __entry->val
+ )
+);
+DEFINE_EVENT(io_data32, io_write32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val)
+#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val)
+DEFINE_EVENT(io_data32, io_read32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val)
+#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val)
+
+DECLARE_EVENT_CLASS(piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored),
+ TP_STRUCT__entry(
+ __field(int, val)
+ __field(bool, ignored)
+ ),
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->ignored = ignored;
+ ),
+ TP_printk("CONTROL: %08x%s",
+ __entry->val,
+ __entry->ignored ? " (ignored)" : ""
+ )
+);
+DEFINE_EVENT(piggyback, piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored));
+#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored)
+
+TRACE_EVENT(bh_stats,
+ TP_PROTO(int ind, int req, int cnf, int busy, bool release),
+ TP_ARGS(ind, req, cnf, busy, release),
+ TP_STRUCT__entry(
+ __field(int, ind)
+ __field(int, req)
+ __field(int, cnf)
+ __field(int, busy)
+ __field(bool, release)
+ ),
+ TP_fast_assign(
+ __entry->ind = ind;
+ __entry->req = req;
+ __entry->cnf = cnf;
+ __entry->busy = busy;
+ __entry->release = release;
+ ),
+ TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s",
+ __entry->ind,
+ __entry->req,
+ __entry->cnf,
+ __entry->busy,
+ __entry->release ? "release" : "keep"
+ )
+);
+#define _trace_bh_stats(ind, req, cnf, busy, release)\
+ trace_bh_stats(ind, req, cnf, busy, release)
+
+TRACE_EVENT(tx_stats,
+ TP_PROTO(const struct wfx_hif_cnf_tx *tx_cnf, const struct sk_buff *skb,
+ int delay),
+ TP_ARGS(tx_cnf, skb, delay),
+ TP_STRUCT__entry(
+ __field(int, pkt_id)
+ __field(int, delay_media)
+ __field(int, delay_queue)
+ __field(int, delay_fw)
+ __field(int, ack_failures)
+ __field(int, flags)
+ __array(int, rate, 4)
+ __array(int, tx_count, 4)
+ ),
+ TP_fast_assign(
+ /* Keep sync with wfx_rates definition in main.c */
+ static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13 };
+ const struct ieee80211_tx_info *tx_info =
+ (const struct ieee80211_tx_info *)skb->cb;
+ const struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ int i;
+
+ __entry->pkt_id = tx_cnf->packet_id;
+ __entry->delay_media = le32_to_cpu(tx_cnf->media_delay);
+ __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay);
+ __entry->delay_fw = delay;
+ __entry->ack_failures = tx_cnf->ack_failures;
+ if (!tx_cnf->status || __entry->ack_failures)
+ __entry->ack_failures += 1;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->rate[i] = rates[i].idx;
+ else
+ __entry->rate[i] = hw_rate[rates[i].idx];
+ __entry->tx_count[i] = rates[i].count;
+ }
+ __entry->flags = 0;
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->flags |= 0x01;
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ __entry->flags |= 0x02;
+ if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ __entry->flags |= 0x04;
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ __entry->flags |= 0x08;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ __entry->flags |= 0x10;
+ if (tx_cnf->status)
+ __entry->flags |= 0x20;
+ if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE)
+ __entry->flags |= 0x40;
+ ),
+ TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
+ __entry->pkt_id,
+ __print_flags(__entry->flags, NULL,
+ { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" }, { 0x08, "R" },
+ { 0x10, "D" }, { 0x20, "F" }, { 0x40, "Q" }),
+ __entry->rate[0],
+ __entry->tx_count[0],
+ __entry->rate[1],
+ __entry->tx_count[1],
+ __entry->rate[2],
+ __entry->tx_count[2],
+ __entry->rate[3],
+ __entry->tx_count[3],
+ __entry->ack_failures,
+ __entry->delay_media,
+ __entry->delay_queue,
+ __entry->delay_fw
+ )
+);
+#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay)
+
+TRACE_EVENT(queues_stats,
+ TP_PROTO(struct wfx_dev *wdev, const struct wfx_queue *elected_queue),
+ TP_ARGS(wdev, elected_queue),
+ TP_STRUCT__entry(
+ __field(int, vif_id)
+ __field(int, queue_id)
+ __array(int, hw, IEEE80211_NUM_ACS * 2)
+ __array(int, drv, IEEE80211_NUM_ACS * 2)
+ __array(int, cab, IEEE80211_NUM_ACS * 2)
+ ),
+ TP_fast_assign(
+ const struct wfx_queue *queue;
+ struct wfx_vif *wvif;
+ int i, j;
+
+ for (j = 0; j < IEEE80211_NUM_ACS * 2; j++) {
+ __entry->hw[j] = -1;
+ __entry->drv[j] = -1;
+ __entry->cab[j] = -1;
+ }
+ __entry->vif_id = -1;
+ __entry->queue_id = -1;
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ j = wvif->id * IEEE80211_NUM_ACS + i;
+ WARN_ON(j >= IEEE80211_NUM_ACS * 2);
+ queue = &wvif->tx_queue[i];
+ __entry->hw[j] = atomic_read(&queue->pending_frames);
+ __entry->drv[j] = skb_queue_len(&queue->normal);
+ __entry->cab[j] = skb_queue_len(&queue->cab);
+ if (queue == elected_queue) {
+ __entry->vif_id = wvif->id;
+ __entry->queue_id = i;
+ }
+ }
+ }
+ ),
+ TP_printk("got skb from %d/%d, pend. hw/norm/cab: [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ] [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ]",
+ __entry->vif_id, __entry->queue_id,
+ __entry->hw[0], __entry->drv[0], __entry->cab[0],
+ __entry->hw[1], __entry->drv[1], __entry->cab[1],
+ __entry->hw[2], __entry->drv[2], __entry->cab[2],
+ __entry->hw[3], __entry->drv[3], __entry->cab[3],
+ __entry->hw[4], __entry->drv[4], __entry->cab[4],
+ __entry->hw[5], __entry->drv[5], __entry->cab[5],
+ __entry->hw[6], __entry->drv[6], __entry->cab[6],
+ __entry->hw[7], __entry->drv[7], __entry->cab[7]
+ )
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE traces
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h
new file mode 100644
index 000000000000..13ba84b3b2c3
--- /dev/null
+++ b/drivers/net/wireless/silabs/wfx/wfx.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common private data.
+ *
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_H
+#define WFX_H
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "data_tx.h"
+#include "main.h"
+#include "queue.h"
+#include "hif_tx.h"
+
+#define USEC_PER_TXOP 32 /* see struct ieee80211_tx_queue_params */
+#define USEC_PER_TU 1024
+
+struct wfx_hwbus_ops;
+
+struct wfx_dev {
+ struct wfx_platform_data pdata;
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif[2];
+ struct mac_address addresses[2];
+ const struct wfx_hwbus_ops *hwbus_ops;
+ void *hwbus_priv;
+
+ u8 keyset;
+ struct completion firmware_ready;
+ struct wfx_hif_ind_startup hw_caps;
+ struct wfx_hif hif;
+ struct delayed_work cooling_timeout_work;
+ bool poll_irq;
+ bool chip_frozen;
+ struct mutex conf_mutex;
+
+ struct wfx_hif_cmd hif_cmd;
+ struct sk_buff_head tx_pending;
+ wait_queue_head_t tx_dequeue;
+ atomic_t tx_lock;
+
+ atomic_t packet_id;
+ u32 key_map;
+
+ struct wfx_hif_rx_stats rx_stats;
+ struct mutex rx_stats_lock;
+ struct wfx_hif_tx_power_loop_info tx_power_loop_info;
+ struct mutex tx_power_loop_info_lock;
+ struct workqueue_struct *bh_wq;
+};
+
+struct wfx_vif {
+ struct wfx_dev *wdev;
+ struct ieee80211_channel *channel;
+ int id;
+
+ u32 link_id_map;
+
+ bool after_dtim_tx_allowed;
+ bool join_in_progress;
+
+ struct delayed_work beacon_loss_work;
+
+ struct wfx_queue tx_queue[4];
+ struct wfx_tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ struct work_struct update_tim_work;
+
+ unsigned long uapsd_mask;
+
+ /* avoid some operations in parallel with scan */
+ struct mutex scan_lock;
+ struct work_struct scan_work;
+ struct completion scan_complete;
+ int scan_nb_chan_done;
+ bool scan_abort;
+ struct ieee80211_scan_request *scan_req;
+
+ struct completion set_pm_mode_complete;
+};
+
+static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif)
+{
+ return container_of((void *)wvif, struct ieee80211_vif, drv_priv);
+}
+
+static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
+{
+ if (vif_id >= ARRAY_SIZE(wdev->vif)) {
+ dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id);
+ return NULL;
+ }
+ vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
+ if (!wdev->vif[vif_id])
+ return NULL;
+ return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
+}
+
+static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur)
+{
+ int i;
+ int mark = 0;
+ struct wfx_vif *tmp;
+
+ if (!cur)
+ mark = 1;
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ tmp = wdev_to_wvif(wdev, i);
+ if (mark && tmp)
+ return tmp;
+ if (tmp == cur)
+ mark = 1;
+ }
+ return NULL;
+}
+
+static inline int wvif_count(struct wfx_dev *wdev)
+{
+ int i;
+ int ret = 0;
+ struct wfx_vif *wvif;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ wvif = wdev_to_wvif(wdev, i);
+ if (wvif)
+ ret++;
+ }
+ return ret;
+}
+
+static inline void memreverse(u8 *src, u8 length)
+{
+ u8 *lo = src;
+ u8 *hi = src + length - 1;
+ u8 swap;
+
+ while (lo < hi) {
+ swap = *lo;
+ *lo++ = *hi;
+ *hi-- = swap;
+ }
+}
+
+static inline int memzcmp(void *src, unsigned int size)
+{
+ u8 *buf = src;
+
+ if (!size)
+ return 0;
+ if (*buf)
+ return 1;
+ return memcmp(buf, buf + 1, size - 1);
+}
+
+#endif
diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c
index 10e019cddcc6..3b4ded2ac801 100644
--- a/drivers/net/wireless/st/cw1200/bh.c
+++ b/drivers/net/wireless/st/cw1200/bh.c
@@ -327,18 +327,12 @@ static int cw1200_bh_rx_helper(struct cw1200_common *priv,
if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
goto err;
- if (skb_rx) {
- dev_kfree_skb(skb_rx);
- skb_rx = NULL;
- }
+ dev_kfree_skb(skb_rx);
return 0;
err:
- if (skb_rx) {
- dev_kfree_skb(skb_rx);
- skb_rx = NULL;
- }
+ dev_kfree_skb(skb_rx);
return -1;
}
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 271ed2ce2d7f..fe0d220da44d 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -423,7 +423,7 @@ static int cw1200_spi_probe(struct spi_device *func)
}
/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int cw1200_spi_disconnect(struct spi_device *func)
+static void cw1200_spi_disconnect(struct spi_device *func)
{
struct hwbus_priv *self = spi_get_drvdata(func);
@@ -435,8 +435,6 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
}
cw1200_spi_off(dev_get_platdata(&func->dev));
-
- return 0;
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 12952b1c29df..805a3c1bf8fe 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <linux/sched.h>
+#include <linux/jiffies.h>
#include "queue.h"
#include "cw1200.h"
#include "debug.h"
@@ -90,23 +91,25 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
bool unlock)
{
struct cw1200_queue_stats *stats = queue->stats;
- struct cw1200_queue_item *item = NULL, *tmp;
+ struct cw1200_queue_item *item = NULL, *iter, *tmp;
bool wakeup_stats = false;
- list_for_each_entry_safe(item, tmp, &queue->queue, head) {
- if (jiffies - item->queue_timestamp < queue->ttl)
+ list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
+ if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
+ item = iter;
break;
+ }
--queue->num_queued;
- --queue->link_map_cache[item->txpriv.link_id];
+ --queue->link_map_cache[iter->txpriv.link_id];
spin_lock_bh(&stats->lock);
--stats->num_queued;
- if (!--stats->link_map_cache[item->txpriv.link_id])
+ if (!--stats->link_map_cache[iter->txpriv.link_id])
wakeup_stats = true;
spin_unlock_bh(&stats->lock);
cw1200_debug_tx_ttl(stats->priv);
- cw1200_queue_register_post_gc(head, item);
- item->skb = NULL;
- list_move_tail(&item->head, &queue->free_pool);
+ cw1200_queue_register_post_gc(head, iter);
+ iter->skb = NULL;
+ list_move_tail(&iter->head, &queue->free_pool);
}
if (wakeup_stats)
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 236022d4ae2a..8ef1d06b9bbd 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -195,7 +195,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, -1, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
@@ -606,7 +606,8 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
}
int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct cw1200_common *priv = dev->priv;
int ret = 0;
@@ -1208,8 +1209,8 @@ static void cw1200_do_join(struct cw1200_common *priv)
struct cfg80211_bss *bss = NULL;
struct wsm_protected_mgmt_policy mgmt_policy;
struct wsm_join join = {
- .mode = conf->ibss_joined ?
- WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
+ .mode = priv->vif->cfg.ibss_joined ?
+ WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
.preamble_type = WSM_JOIN_PREAMBLE_LONG,
.probe_for_join = 1,
.atim_window = 0,
@@ -1230,7 +1231,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
- if (!bss && !conf->ibss_joined) {
+ if (!bss && !priv->vif->cfg.ibss_joined) {
wsm_unlock_tx(priv);
return;
}
@@ -1284,7 +1285,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
join.bssid,
join.dtim_period, priv->beacon_int);
- if (!conf->ibss_joined) {
+ if (!priv->vif->cfg.ibss_joined) {
const u8 *ssidie;
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
@@ -1302,7 +1303,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
}
/* Enable asynchronous join calls */
- if (!conf->ibss_joined) {
+ if (!priv->vif->cfg.ibss_joined) {
join.flags |= WSM_JOIN_FLAGS_FORCE;
join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND;
}
@@ -1671,7 +1672,7 @@ static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis");
skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
- &tim_offset, &tim_length);
+ &tim_offset, &tim_length, 0);
if (!skb) {
if (!__cw1200_flush(priv, true))
wsm_unlock_tx(priv);
@@ -1796,14 +1797,14 @@ static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
void cw1200_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed)
+ u64 changed)
{
struct cw1200_common *priv = dev->priv;
bool do_join = false;
mutex_lock(&priv->conf_mutex);
- pr_debug("BSS CHANGED: %08x\n", changed);
+ pr_debug("BSS CHANGED: %llx\n", changed);
/* TODO: BSS_CHANGED_QOS */
/* TODO: BSS_CHANGED_TXPOWER */
@@ -1813,15 +1814,15 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
int i;
pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n",
- info->arp_addr_cnt);
+ vif->cfg.arp_addr_cnt);
/* Currently only one IP address is supported by firmware.
* In case of more IPs arp filtering will be disabled.
*/
- if (info->arp_addr_cnt > 0 &&
- info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
- for (i = 0; i < info->arp_addr_cnt; i++) {
- filter.ipv4addrs[i] = info->arp_addr_list[i];
+ if (vif->cfg.arp_addr_cnt > 0 &&
+ vif->cfg.arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
+ for (i = 0; i < vif->cfg.arp_addr_cnt; i++) {
+ filter.ipv4addrs[i] = vif->cfg.arp_addr_list[i];
pr_debug("[STA] addr[%d]: 0x%X\n",
i, filter.ipv4addrs[i]);
}
@@ -1857,7 +1858,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
if (changed & BSS_CHANGED_BEACON_INT) {
pr_debug("CHANGED_BEACON_INT\n");
- if (info->ibss_joined)
+ if (vif->cfg.ibss_joined)
do_join = true;
else if (priv->join_status == CW1200_JOIN_STATUS_AP)
cw1200_update_beaconing(priv);
@@ -1882,7 +1883,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_HT)) {
pr_debug("BSS_CHANGED_ASSOC\n");
- if (info->assoc) {
+ if (vif->cfg.assoc) {
if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) {
ieee80211_connection_loss(vif);
mutex_unlock(&priv->conf_mutex);
@@ -1894,7 +1895,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
do_join = true;
}
- if (info->assoc || info->ibss_joined) {
+ if (vif->cfg.assoc || vif->cfg.ibss_joined) {
struct ieee80211_sta *sta = NULL;
__le32 htprot = 0;
@@ -1904,13 +1905,13 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
rcu_read_lock();
- if (info->bssid && !info->ibss_joined)
+ if (info->bssid && !vif->cfg.ibss_joined)
sta = ieee80211_find_sta(vif, info->bssid);
if (sta) {
- priv->ht_info.ht_cap = sta->ht_cap;
+ priv->ht_info.ht_cap = sta->deflink.ht_cap;
priv->bss_params.operational_rate_set =
cw1200_rate_mask_to_wsm(priv,
- sta->supp_rates[priv->channel->band]);
+ sta->deflink.supp_rates[priv->channel->band]);
priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef);
priv->ht_info.operation_mode = info->ht_operation_mode;
} else {
@@ -1958,7 +1959,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
cancel_work_sync(&priv->unjoin_work);
priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count;
- priv->bss_params.aid = info->aid;
+ priv->bss_params.aid = vif->cfg.aid;
if (priv->join_dtim_period < 1)
priv->join_dtim_period = 1;
@@ -1973,7 +1974,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
priv->association_mode.basic_rate_set);
wsm_set_association_mode(priv, &priv->association_mode);
- if (!info->ibss_joined) {
+ if (!vif->cfg.ibss_joined) {
wsm_keep_alive_period(priv, 30 /* sec */);
wsm_set_bss_params(priv, &priv->bss_params);
priv->setbssparams_done = true;
@@ -2203,7 +2204,7 @@ static int cw1200_upload_beacon(struct cw1200_common *priv)
frame.rate = WSM_TRANSMIT_RATE_6;
frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
- &tim_offset, &tim_len);
+ &tim_offset, &tim_len, 0);
if (!frame.skb)
return -ENOMEM;
@@ -2262,7 +2263,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif,-1, false);
if (!frame.skb)
return -ENOMEM;
@@ -2330,8 +2331,8 @@ static int cw1200_start_ap(struct cw1200_common *priv)
memset(start.ssid, 0, sizeof(start.ssid));
if (!conf->hidden_ssid) {
- start.ssid_len = conf->ssid_len;
- memcpy(start.ssid, conf->ssid, start.ssid_len);
+ start.ssid_len = priv->vif->cfg.ssid_len;
+ memcpy(start.ssid, priv->vif->cfg.ssid, start.ssid_len);
}
priv->beacon_int = conf->beacon_int;
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index 706dab8e73bf..a49f187c7049 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -28,7 +28,8 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
unsigned int *total_flags,
u64 multicast);
int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
int cw1200_get_stats(struct ieee80211_hw *dev,
struct ieee80211_low_level_stats *stats);
int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
@@ -103,7 +104,7 @@ void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
void cw1200_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
- u32 changed);
+ u64 changed);
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 7de666b90ff5..6894b919ff94 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -762,8 +762,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (ret)
goto drop;
- rcu_read_lock();
- sta = rcu_dereference(t.sta);
+ sta = t.sta;
spin_lock_bh(&priv->ps_state_lock);
{
@@ -776,8 +775,6 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (tid_update && sta)
ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
- rcu_read_unlock();
-
cw1200_bh_wakeup(priv);
return;
@@ -1145,8 +1142,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
/* Remove TSF from the end of frame */
if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
- memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
- hdr->mactime = le64_to_cpu(hdr->mactime);
+ hdr->mactime = get_unaligned_le64(skb->data + skb->len - 8);
if (skb->len >= 8)
skb_trim(skb, skb->len - 8);
} else {
@@ -1183,8 +1179,8 @@ void cw1200_rx_cb(struct cw1200_common *priv,
/* Disable beacon filter once we're associated... */
if (priv->disable_beacon_filter &&
- (priv->vif->bss_conf.assoc ||
- priv->vif->bss_conf.ibss_joined)) {
+ (priv->vif->cfg.assoc ||
+ priv->vif->cfg.ibss_joined)) {
priv->disable_beacon_filter = false;
queue_work(priv->workqueue,
&priv->update_filtering_work);
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 99624dd34886..4a9e4b5d3547 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -537,7 +537,7 @@ int wsm_set_tx_queue_params(struct cw1200_common *priv,
{
int ret;
struct wsm_buf *buf = &priv->wsm_cmd_buf;
- u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+ static const u8 queue_id_to_wmm_aci[] = { 3, 2, 0, 1 };
wsm_cmd_lock(priv);
@@ -1594,7 +1594,7 @@ static int cw1200_get_prio_queue(struct cw1200_common *priv,
edca = &priv->edca.params[i];
score = ((edca->aifns + edca->cwmin) << 16) +
((edca->cwmax - edca->cwmin) *
- (get_random_int() & 0xFFFF));
+ get_random_u16());
if (score < best && (winner < 0 || i != 3)) {
best = score;
winner = i;
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index 1da6ba95d3d4..1da6ab664e41 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -1229,7 +1229,7 @@ struct wl1251_acx_arp_filter {
u8 address[16]; /* The IP address used to filter ARP packets.
ARP packets that do not match this address are
dropped. When the IP Version is 4, the last 12
- bytes of the the address are ignored. */
+ bytes of the address are ignored. */
} __attribute__((packed));
struct wl1251_acx_ac_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index e6d426edab56..e945aafd88ee 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -169,11 +169,9 @@ int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms)
msleep(1);
/* read from both event fields */
- wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector,
- sizeof(events_vector));
+ events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[0]);
event = events_vector & mask;
- wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector,
- sizeof(events_vector));
+ events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[1]);
event |= events_vector & mask;
} while (!event);
@@ -202,7 +200,7 @@ void wl1251_event_mbox_config(struct wl1251 *wl)
int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num)
{
- struct event_mailbox mbox;
+ struct event_mailbox *mbox;
int ret;
wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
@@ -210,12 +208,20 @@ int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num)
if (mbox_num > 1)
return -EINVAL;
+ mbox = kmalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox) {
+ wl1251_error("can not allocate mbox buffer");
+ return -ENOMEM;
+ }
+
/* first we read the mbox descriptor */
- wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox,
- sizeof(struct event_mailbox));
+ wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], mbox,
+ sizeof(*mbox));
/* process the descriptor */
- ret = wl1251_event_process(wl, &mbox);
+ ret = wl1251_event_process(wl, mbox);
+ kfree(mbox);
+
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c
index 5ebe7958ed5c..e8d567af74b4 100644
--- a/drivers/net/wireless/ti/wl1251/io.c
+++ b/drivers/net/wireless/ti/wl1251/io.c
@@ -121,7 +121,13 @@ void wl1251_set_partition(struct wl1251 *wl,
u32 mem_start, u32 mem_size,
u32 reg_start, u32 reg_size)
{
- struct wl1251_partition partition[2];
+ struct wl1251_partition_set *partition;
+
+ partition = kmalloc(sizeof(*partition), GFP_KERNEL);
+ if (!partition) {
+ wl1251_error("can not allocate partition buffer");
+ return;
+ }
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
@@ -164,10 +170,10 @@ void wl1251_set_partition(struct wl1251 *wl,
reg_start, reg_size);
}
- partition[0].start = mem_start;
- partition[0].size = mem_size;
- partition[1].start = reg_start;
- partition[1].size = reg_size;
+ partition->mem.start = mem_start;
+ partition->mem.size = mem_size;
+ partition->reg.start = reg_start;
+ partition->reg.size = reg_size;
wl->physical_mem_addr = mem_start;
wl->physical_reg_addr = reg_start;
@@ -176,5 +182,7 @@ void wl1251_set_partition(struct wl1251 *wl,
wl->virtual_reg_addr = mem_size;
wl->if_ops->write(wl, HW_ACCESS_PART0_SIZE_ADDR, partition,
- sizeof(partition));
+ sizeof(*partition));
+
+ kfree(partition);
}
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index a25a6143e65f..289371689a8d 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -546,7 +546,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, -1, false);
if (!skb)
goto out;
size = skb->len;
@@ -1077,7 +1077,7 @@ out:
static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
@@ -1123,7 +1123,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
wl->beacon_int = bss_conf->beacon_int;
skb = ieee80211_pspoll_get(wl->hw, wl->vif);
@@ -1137,7 +1137,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, bss_conf->aid);
+ ret = wl1251_acx_aid(wl, vif->cfg.aid);
if (ret < 0)
goto out_sleep;
} else {
@@ -1176,17 +1176,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ARP_FILTER) {
- __be32 addr = bss_conf->arp_addr_list[0];
+ __be32 addr = vif->cfg.arp_addr_list[0];
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
- enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+ enable = vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc;
ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
if (changed & BSS_CHANGED_BEACON) {
- beacon = ieee80211_beacon_get(hw, vif);
+ beacon = ieee80211_beacon_get(hw, vif, 0);
if (!beacon)
goto out_sleep;
@@ -1282,7 +1282,8 @@ static struct ieee80211_channel wl1251_channels[] = {
};
static int wl1251_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
enum wl1251_acx_ps_scheme ps_scheme;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 5b894bd6237e..9df38726e8b0 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -327,14 +327,12 @@ out_free:
return ret;
}
-static int wl1251_spi_remove(struct spi_device *spi)
+static void wl1251_spi_remove(struct spi_device *spi)
{
struct wl1251 *wl = spi_get_drvdata(spi);
wl1251_free_hw(wl);
regulator_disable(wl->vio);
-
- return 0;
}
static struct spi_driver wl1251_spi_driver = {
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 98cd39619d57..e9dc3c72bb11 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -443,19 +443,25 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
void wl1251_tx_complete(struct wl1251 *wl)
{
int i, result_index, num_complete = 0, queue_len;
- struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
+ struct tx_result *result, *result_ptr;
unsigned long flags;
if (unlikely(wl->state != WL1251_STATE_ON))
return;
+ result = kmalloc_array(FW_TX_CMPLT_BLOCK_SIZE, sizeof(*result), GFP_KERNEL);
+ if (!result) {
+ wl1251_error("can not allocate result buffer");
+ return;
+ }
+
/* First we read the result */
- wl1251_mem_read(wl, wl->data_path->tx_complete_addr,
- result, sizeof(result));
+ wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result,
+ FW_TX_CMPLT_BLOCK_SIZE * sizeof(*result));
result_index = wl->next_tx_complete;
- for (i = 0; i < ARRAY_SIZE(result); i++) {
+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) {
result_ptr = &result[result_index];
if (result_ptr->done_1 == 1 &&
@@ -538,6 +544,7 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
+ kfree(result);
wl->next_tx_complete = result_index;
}
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index c6da0cfb4afb..d06a2c419447 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1924,13 +1924,10 @@ static int wl12xx_remove(struct platform_device *pdev)
struct wl1271 *wl = platform_get_drvdata(pdev);
struct wl12xx_priv *priv;
- if (!wl)
- goto out;
priv = wl->priv;
kfree(priv->rx_mem_addr);
-out:
return wlcore_remove(pdev);
}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 2f921a44f1e2..80fbf740fe6d 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -264,11 +264,9 @@ static ssize_t radar_detection_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
@@ -306,11 +304,9 @@ static ssize_t dynamic_fw_traces_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
@@ -368,11 +364,9 @@ static ssize_t radar_debug_mode_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 13d78ada4bb6..34d95f458e1a 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -131,10 +131,10 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
if (vector & TIME_SYNC_EVENT_ID)
wlcore_event_time_sync(wl,
- mbox->time_sync_tsf_high_msb,
- mbox->time_sync_tsf_high_lsb,
- mbox->time_sync_tsf_low_msb,
- mbox->time_sync_tsf_low_lsb);
+ le16_to_cpu(mbox->time_sync_tsf_high_msb),
+ le16_to_cpu(mbox->time_sync_tsf_high_lsb),
+ le16_to_cpu(mbox->time_sync_tsf_low_msb),
+ le16_to_cpu(mbox->time_sync_tsf_low_lsb));
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 8b798b5fcaf5..a939fd89a7f5 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -178,11 +178,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto free_vector;
- }
do {
if (time_after(jiffies, timeout_time)) {
@@ -677,8 +675,8 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
- cmd->ap.ssid_len = bss_conf->ssid_len;
- memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
+ cmd->ap.ssid_len = vif->cfg.ssid_len;
+ memcpy(cmd->ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
}
supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
@@ -1067,7 +1065,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
} else {
skb = ieee80211_nullfunc_get(wl->hw,
wl12xx_wlvif_to_vif(wlvif),
- false);
+ -1, false);
if (!skb)
goto out;
size = skb->len;
@@ -1094,7 +1092,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, vif,-1, false);
if (!skb)
goto out;
@@ -1558,11 +1556,11 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wlvif->band];
- if (sta->ht_cap.ht_supported)
+ sta_rates = sta->deflink.supp_rates[wlvif->band];
+ if (sta->deflink.ht_cap.ht_supported)
sta_rates |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
- (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
+ (sta->deflink.ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index cce8d75d8b81..eb3d3f0e0b4d 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -52,11 +52,9 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
@@ -108,12 +106,9 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
return;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
-
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
return;
- }
chip_op = arg;
chip_op(wl);
@@ -279,11 +274,9 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
@@ -349,11 +342,9 @@ static ssize_t forced_ps_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
@@ -831,11 +822,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
wl->conf.rx_streaming.interval = value;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
@@ -889,11 +878,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
wl->conf.rx_streaming.always = value;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
@@ -939,11 +926,9 @@ static ssize_t beacon_filtering_write(struct file *file,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
@@ -1021,11 +1006,9 @@ static ssize_t sleep_auth_write(struct file *file,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
@@ -1254,9 +1237,8 @@ static ssize_t fw_logger_write(struct file *file,
}
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
count = ret;
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5669f17b395f..28c0f06e311f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -141,11 +141,9 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (!wl->conf.rx_streaming.interval)
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
@@ -174,11 +172,9 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work)
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
@@ -223,11 +219,9 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
@@ -537,11 +531,9 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
while (!done && loopcount--) {
smp_mb__after_atomic();
@@ -838,11 +830,9 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
- error = pm_runtime_get_sync(wl->dev);
- if (error < 0) {
- pm_runtime_put_noidle(wl->dev);
+ error = pm_runtime_resume_and_get(wl->dev);
+ if (error < 0)
return;
- }
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
@@ -937,11 +927,9 @@ static void wl1271_recovery_work(struct work_struct *work)
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
- error = pm_runtime_get_sync(wl->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(wl->dev);
+ if (error < 0)
wl1271_warning("Enable for recovery failed");
- pm_runtime_put_noidle(wl->dev);
- }
wlcore_disable_interrupts_nosync(wl);
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1741,9 +1729,8 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
return ret;
}
@@ -1855,11 +1842,9 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -2060,11 +2045,9 @@ static void wlcore_channel_switch_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_cmd_stop_channel_switch(wl, wlvif);
@@ -2131,11 +2114,9 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
@@ -2591,11 +2572,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
* Call runtime PM only after possible wl12xx_init_fw() above
* is done. Otherwise we do not have interrupts enabled.
*/
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_unlock;
- }
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
@@ -2691,11 +2670,9 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto deinit;
- }
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
@@ -2927,10 +2904,12 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_bss_conf *bss_conf,
u32 sta_rate_set)
{
+ struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
+ bss_conf);
int ieoffset;
int ret;
- wlvif->aid = bss_conf->aid;
+ wlvif->aid = vif->cfg.aid;
wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -3129,11 +3108,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
@@ -3213,11 +3190,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -3470,11 +3445,9 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_wake_queues;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_wake_queues;
- }
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
@@ -3622,11 +3595,9 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
goto out_unlock;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_unlock;
- }
wlvif->default_key = key_idx;
@@ -3659,11 +3630,9 @@ void wlcore_regdomain_config(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0) {
@@ -3706,11 +3675,9 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
@@ -3749,11 +3716,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
@@ -3800,11 +3765,9 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
@@ -3834,11 +3797,9 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl->ops->sched_scan_stop(wl, wlvif);
@@ -3862,11 +3823,9 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
@@ -3894,11 +3853,9 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
@@ -3980,7 +3937,6 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
u32 rates)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
@@ -3993,7 +3949,7 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
probe_rsp_len, 0,
rates);
- if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
+ if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
wl1271_error("probe_rsp template too big");
return -EINVAL;
}
@@ -4015,12 +3971,12 @@ static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
/* insert SSID from bss_conf */
probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
- probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
+ probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
memcpy(probe_rsp_templ + ssid_ie_offset + 2,
- bss_conf->ssid, bss_conf->ssid_len);
- templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
+ vif->cfg.ssid, vif->cfg.ssid_len);
+ templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
- memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
+ memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
ptr, probe_rsp_len - (ptr - probe_rsp_data));
templ_len += probe_rsp_len - (ptr - probe_rsp_data);
@@ -4083,7 +4039,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
u32 min_rate;
int ret;
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
- struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
u16 tmpl_id;
if (!beacon) {
@@ -4300,15 +4256,15 @@ out:
}
static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct ieee80211_bss_conf *bss_conf,
- u32 sta_rate_set)
+ struct ieee80211_vif *vif, u32 sta_rate_set)
{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u32 rates;
int ret;
wl1271_debug(DEBUG_MAC80211,
"changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
- bss_conf->bssid, bss_conf->aid,
+ bss_conf->bssid, vif->cfg.aid,
bss_conf->beacon_int,
bss_conf->basic_rates, sta_rate_set);
@@ -4396,7 +4352,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_IBSS) {
- if (bss_conf->ibss_joined) {
+ if (vif->cfg.ibss_joined) {
set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
@@ -4420,7 +4376,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_IDLE && !is_ibss)
- wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
@@ -4439,15 +4395,15 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
- u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
+ u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
/* save the supp_rates of the ap */
- sta_rate_set = sta->supp_rates[wlvif->band];
- if (sta->ht_cap.ht_supported)
+ sta_rate_set = sta->deflink.supp_rates[wlvif->band];
+ if (sta->deflink.ht_cap.ht_supported)
sta_rate_set |=
(rx_mask[0] << HW_HT_RATES_OFFSET) |
(rx_mask[1] << HW_MIMO_RATES_OFFSET);
- sta_ht_cap = sta->ht_cap;
+ sta_ht_cap = sta->deflink.ht_cap;
sta_exists = true;
}
@@ -4456,7 +4412,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(bss_conf->bssid)) {
- ret = wlcore_set_bssid(wl, wlvif, bss_conf,
+ ret = wlcore_set_bssid(wl, wlvif, vif,
sta_rate_set);
if (ret < 0)
goto out;
@@ -4472,9 +4428,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
- bss_conf->ibss_joined);
+ vif->cfg.ibss_joined);
- if (bss_conf->ibss_joined) {
+ if (vif->cfg.ibss_joined) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
@@ -4511,7 +4467,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_ASSOC) {
- if (bss_conf->assoc) {
+ if (vif->cfg.assoc) {
ret = wlcore_set_assoc(wl, wlvif, bss_conf,
sta_rate_set);
if (ret < 0)
@@ -4525,7 +4481,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
if (changed & BSS_CHANGED_PS) {
- if ((bss_conf->ps) &&
+ if (vif->cfg.ps &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
int ps_mode;
@@ -4545,7 +4501,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (ret < 0)
wl1271_warning("enter %s ps failed %d",
ps_mode_str, ret);
- } else if (!bss_conf->ps &&
+ } else if (!vif->cfg.ps &&
test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "auto ps disabled");
@@ -4586,11 +4542,11 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle arp filtering. Done after join. */
if ((changed & BSS_CHANGED_ARP_FILTER) ||
(!is_ibss && (changed & BSS_CHANGED_QOS))) {
- __be32 addr = bss_conf->arp_addr_list[0];
+ __be32 addr = vif->cfg.arp_addr_list[0];
wlvif->sta.qos = bss_conf->qos;
WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
- if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
+ if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
wlvif->ip_addr = addr;
/*
* The template should have been configured only upon
@@ -4624,7 +4580,7 @@ out:
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changed)
+ u64 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -4653,11 +4609,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
@@ -4714,17 +4668,15 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
rcu_read_lock();
- if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
rcu_read_unlock();
continue;
}
@@ -4749,6 +4701,7 @@ out:
static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
@@ -4771,11 +4724,9 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
@@ -4801,6 +4752,7 @@ out:
static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
@@ -4823,11 +4775,9 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
@@ -4893,11 +4843,9 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
@@ -4916,7 +4864,8 @@ out:
}
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
@@ -4939,11 +4888,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/*
* the txop is confed in units of 32us by the mac80211,
@@ -4987,11 +4934,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
@@ -5225,7 +5170,8 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
if (ret < 0)
return ret;
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
+ true,
wl_sta->hlid);
if (ret)
return ret;
@@ -5305,11 +5251,9 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
@@ -5363,11 +5307,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ba_bitmap = &wl->links[hlid].ba_bitmap;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
@@ -5475,11 +5417,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
@@ -5517,11 +5457,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
/* TODO: change mac80211 to pass vif as param */
@@ -5556,7 +5494,7 @@ static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
{
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
struct sk_buff *beacon =
- ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
+ ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
if (!beacon)
return NULL;
@@ -5611,11 +5549,9 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
@@ -5666,11 +5602,9 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
@@ -5723,11 +5657,9 @@ static int wlcore_roc_completed(struct wl1271 *wl)
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = __wlcore_roc_completed(wl);
@@ -5786,8 +5718,9 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
return;
/* this callback is atomic, so schedule a new work */
- wlvif->rc_update_bw = sta->bandwidth;
- memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
+ wlvif->rc_update_bw = sta->deflink.bandwidth;
+ memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
+ sizeof(sta->deflink.ht_cap));
ieee80211_queue_work(hw, &wlvif->rc_update_work);
}
@@ -5808,11 +5741,9 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out_sleep;
- }
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
@@ -6169,7 +6100,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
- nic_addr = get_random_int();
+ nic_addr = get_random_u32();
} else {
oui_addr = wl->fuse_oui_addr;
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 29fa51c37e88..b414305acc32 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -53,11 +53,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 72fc41ac83c0..cf8d909fa826 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -132,9 +132,8 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
- ret = pm_runtime_get_sync(&card->dev);
+ ret = pm_runtime_resume_and_get(&card->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&card->dev);
dev_err(glue->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
@@ -146,7 +145,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
* To guarantee that the SDIO card is power cycled, as required to make
* the FW programming to succeed, let's do a brute force HW reset.
*/
- mmc_hw_reset(card->host);
+ mmc_hw_reset(card);
sdio_enable_func(func);
sdio_release_host(func);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 354a7e1c3315..7eae1ec2eb2b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -546,13 +546,11 @@ out_dev_put:
return ret;
}
-static int wl1271_remove(struct spi_device *spi)
+static void wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
platform_device_unregister(glue->core);
-
- return 0;
}
static struct spi_driver wl1271_spi_driver = {
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 35b535c125b6..f0c7e09b314d 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -56,11 +56,9 @@ static ssize_t bt_coex_state_store(struct device *dev,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
wl1271_acx_sg_enable(wl, wl->sg_enabled);
pm_runtime_mark_last_busy(wl->dev);
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 3a17b9a8207e..3f338b8096c7 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -83,11 +83,9 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
@@ -158,11 +156,9 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index e20e18cd04ae..7bd3ce2f0804 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -855,11 +855,9 @@ void wl1271_tx_work(struct work_struct *work)
int ret;
mutex_lock(&wl->mutex);
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index e1bd344c4ebc..e4269e2b0098 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -53,11 +53,9 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
@@ -88,11 +86,9 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_stop(wl);
@@ -135,11 +131,9 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
goto out;
}
- ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(wl->dev);
+ ret = pm_runtime_resume_and_get(wl->dev);
+ if (ret < 0)
goto out;
- }
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 514f2c1124b6..ba14d83353a4 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -654,7 +654,7 @@ static int __init virt_wifi_init_module(void)
{
int err;
- /* Guaranteed to be locallly-administered and not multicast. */
+ /* Guaranteed to be locally-administered and not multicast. */
eth_random_addr(fake_router_bssid);
err = register_netdevice_notifier(&virt_wifi_notifier);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index dad38fc04243..1b532e00a56f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1441,7 +1441,7 @@ static void wl3501_detach(struct pcmcia_device *link)
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- strlcpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
+ strscpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
return 0;
}
@@ -1652,7 +1652,7 @@ static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info,
if (wrqu->data.length > sizeof(this->nick))
return -E2BIG;
- strlcpy(this->nick, extra, wrqu->data.length);
+ strscpy(this->nick, extra, wrqu->data.length);
return 0;
}
@@ -1661,7 +1661,7 @@ static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info,
{
struct wl3501_card *this = netdev_priv(dev);
- strlcpy(extra, this->nick, 32);
+ strscpy(extra, this->nick, 32);
wrqu->data.length = strlen(extra);
return 0;
}
@@ -1965,7 +1965,7 @@ static int wl3501_config(struct pcmcia_device *link)
this->firmware_date[0] = '\0';
this->rssi = 255;
this->chan = iw_default_channel(this->reg_domain);
- strlcpy(this->nick, "Planet WL3501", sizeof(this->nick));
+ strscpy(this->nick, "Planet WL3501", sizeof(this->nick));
spin_lock_init(&this->lock);
init_waitqueue_head(&this->wait);
netif_start_queue(dev);
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index e64e4e579518..82bc0d44212e 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
zd->rxdatas = 0;
zd->rxlen = 0;
for (seq=0; len > 0; seq++) {
- request = kmalloc(16, gfp_mask);
+ request = kzalloc(16, gfp_mask);
if (!request)
return -ENOMEM;
urb = usb_alloc_urb(0, gfp_mask);
@@ -529,7 +529,6 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
kfree(request);
return -ENOMEM;
}
- memset(request, 0, 16);
reqlen = len>12 ? 12 : len;
request[0] = ZD1201_USB_RESREQ;
request[1] = seq;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 3ef8533205f9..80b905d49954 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -398,7 +398,7 @@ int zd_restore_settings(struct zd_mac *mac)
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
if (mac->vif != NULL) {
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, false);
}
@@ -1167,7 +1167,7 @@ static void zd_beacon_done(struct zd_mac *mac)
/*
* Fetch next beacon so that tim_count is updated.
*/
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon)
zd_mac_config_beacon(mac->hw, beacon, true);
@@ -1278,19 +1278,20 @@ static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+ u64 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
int associated;
- dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+ dev_dbg_f(zd_mac_dev(mac), "changes: %llx\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif,
+ 0);
if (beacon) {
zd_chip_disable_hwint(&mac->chip);
@@ -1447,7 +1448,7 @@ static void beacon_watchdog_handler(struct work_struct *work)
zd_chip_disable_hwint(&mac->chip);
- beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0);
if (beacon) {
zd_mac_free_cur_beacon(mac);
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 609fd4a2c865..ac4d73b5626f 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -94,7 +94,7 @@ config RPMSG_WWAN_CTRL
config IOSM
tristate "IOSM Driver for Intel M.2 WWAN Device"
- depends on INTEL_IOMMU
+ depends on PCI
select NET_DEVLINK
select RELAY if WWAN_DEBUGFS
help
@@ -105,6 +105,20 @@ config IOSM
If unsure, say N.
+config MTK_T7XX
+ tristate "MediaTek PCIe 5G WWAN modem T7xx device"
+ depends on PCI
+ help
+ Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
+ Adapts WWAN framework and provides network interface like wwan0
+ and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0
+ (MBIM protocol), etc.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mtk_t7xx.
+
+ If unsure, say N.
+
endif # WWAN
endmenu
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
index e722650bebea..3960c0ae2445 100644
--- a/drivers/net/wwan/Makefile
+++ b/drivers/net/wwan/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o
obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o
obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
obj-$(CONFIG_IOSM) += iosm/
+obj-$(CONFIG_MTK_T7XX) += t7xx/
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
index 9acd87724c9d..26ca30476f40 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_coredump.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
+#include <linux/vmalloc.h>
#include "iosm_ipc_coredump.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
index 0809ba664276..3da5ec75e0f0 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_coredump.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
@@ -14,9 +14,6 @@
/* Max buffer allocated to receive coredump data */
#define MAX_DATA_SIZE 0x00010000
-/* Max number of file entries */
-#define MAX_NOF_ENTRY 256
-
/* Max length */
#define MAX_SIZE_LEN 32
@@ -38,7 +35,7 @@ struct iosm_cd_list_entry {
*/
struct iosm_cd_list {
__le32 num_entries;
- struct iosm_cd_list_entry entry[MAX_NOF_ENTRY];
+ struct iosm_cd_list_entry entry[];
} __packed;
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
index f2f57751a7d2..e916139b8cd4 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
@@ -12,10 +12,10 @@
void ipc_debugfs_init(struct iosm_imem *ipc_imem)
{
- struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+ ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
- debugfs_pdev);
+ ipc_imem->debugfs_wwan_dir);
ipc_imem->trace = ipc_trace_init(ipc_imem);
if (!ipc_imem->trace)
@@ -26,4 +26,5 @@ void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
{
ipc_trace_deinit(ipc_imem->trace);
debugfs_remove_recursive(ipc_imem->debugfs_dir);
+ wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 17da85a8f337..2fe724d623c0 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
+#include <linux/vmalloc.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_coredump.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index f9e8e0ee4de3..1e6a47976642 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -114,17 +114,35 @@ ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
return HRTIMER_NORESTART;
}
+static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_mux_ul_adb_finish(ipc_imem->mux);
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, adb_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
struct ipc_mux_config *cfg)
{
ipc_mmio_update_cp_capability(ipc_imem->mmio);
- if (!ipc_imem->mmio->has_mux_lite) {
+ if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
dev_err(ipc_imem->dev, "Failed to get Mux capability.");
return -EINVAL;
}
- cfg->protocol = MUX_LITE;
+ cfg->protocol = ipc_imem->mmio->mux_protocol;
cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
MUX_UL_ON_CREDITS :
@@ -153,6 +171,10 @@ void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
IPC_MSG_PREP_FEATURE_SET, &prep_args);
}
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
+ * @ipc_imem: Pointer to imem data-struct
+ */
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
/* Use the TD update timer only in the runtime phase */
@@ -179,6 +201,21 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
hrtimer_cancel(hr_timer);
}
+/**
+ * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
+{
+ if (!hrtimer_active(&ipc_imem->adb_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
+ hrtimer_start(&ipc_imem->adb_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
@@ -550,6 +587,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ ctrl_chl_idx++;
+ continue;
+ }
if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_port,
@@ -680,8 +722,11 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
}
/* Try to generate new ADB or ADGH. */
- if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
ipc_imem_td_update_timer_start(ipc_imem);
+ if (ipc_imem->mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_imem);
+ }
/* Continue the send procedure with accumulated SIO or NETIF packets.
* Reset the debounce flags.
@@ -1330,6 +1375,9 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
HRTIMER_MODE_REL);
ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
goto imem_config_fail;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 98554e9beb01..e700dc8bfe0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -317,6 +317,7 @@ enum ipc_phase {
* @tdupdate_timer: Delay the TD update doorbell.
* @fast_update_timer: forced head pointer update delay timer.
* @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @adb_timer: Timer for finishing the ADB.
* @rom_exit_code: Mapped boot rom exit code.
* @enter_runtime: 1 means the transition to runtime phase was
* executed.
@@ -340,6 +341,7 @@ enum ipc_phase {
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
+ * @debugfs_wwan_dir: WWAN Debug FS directory entry
* @debugfs_dir: Debug FS directory for driver-specific entries
*/
struct iosm_imem {
@@ -364,6 +366,7 @@ struct iosm_imem {
struct hrtimer tdupdate_timer;
struct hrtimer fast_update_timer;
struct hrtimer td_alloc_timer;
+ struct hrtimer adb_timer;
enum rom_exit_code rom_exit_code;
u32 enter_runtime;
struct completion ul_pend_sem;
@@ -382,6 +385,7 @@ struct iosm_imem {
reset_det_n:1,
pcie_wake_n:1;
#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_wwan_dir;
struct dentry *debugfs_dir;
#endif
};
@@ -593,4 +597,7 @@ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
* Returns: 0 on success, -1 on failure
*/
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
+
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 57304a5adf68..66b90cc4c346 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -91,6 +91,14 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
}
ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+
+ if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
+ ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
+ chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
+ chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
+ }
+
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
IRQ_MOD_OFF);
@@ -590,7 +598,7 @@ int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
goto out;
}
- memcpy(skb_put(skb, count), buf, count);
+ skb_put_data(skb, buf, count);
IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index f09e5e77a2a5..63eb08c43c05 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_mux.h"
/* Definition of MMIO offsets
* note that MMIO_CI offsets are relative to end of chip info structure
@@ -71,8 +72,9 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
ver = ipc_mmio_get_cp_version(ipc_mmio);
cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
- ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
- !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+ ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
+ (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
+ : MUX_LITE;
ipc_mmio->has_ul_flow_credit =
(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
index f861994a6d90..193d7ba2478a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -72,7 +72,7 @@ struct mmio_offset {
* @context_info_addr: Physical base address of context info structure
* @chip_info_version: Version of chip info structure
* @chip_info_size: Size of chip info structure
- * @has_mux_lite: It doesn't support mux aggergation
+ * @mux_protocol: mux protocol
* @has_ul_flow_credit: Ul flow credit support
* @has_slp_no_prot: Device sleep no protocol support
* @has_mcr_support: Usage of mcr support
@@ -84,8 +84,8 @@ struct iosm_mmio {
phys_addr_t context_info_addr;
unsigned int chip_info_version;
unsigned int chip_info_size;
- u8 has_mux_lite:1,
- has_ul_flow_credit:1,
+ u32 mux_protocol;
+ u8 has_ul_flow_credit:1,
has_slp_no_prot:1,
has_mcr_support:1;
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index 8e66ffe92055..9c7a9a2a1f25 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -279,9 +279,10 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
struct iosm_imem *imem)
{
struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
- int i, ul_tds, ul_td_size;
+ int i, j, ul_tds, ul_td_size;
struct sk_buff_head *free_list;
struct sk_buff *skb;
+ int qlt_size;
if (!ipc_mux)
return NULL;
@@ -321,6 +322,24 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
ipc_mux->channel_id = -1;
ipc_mux->channel = NULL;
+ if (ipc_mux->protocol != MUX_LITE) {
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
+ GFP_ATOMIC);
+ if (!ipc_mux->ul_adb.pp_qlt[i]) {
+ for (j = i - 1; j >= 0; j--)
+ kfree(ipc_mux->ul_adb.pp_qlt[j]);
+ return NULL;
+ }
+ }
+
+ ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
+ ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ }
+
/* Allocate the list of UL ADB. */
for (i = 0; i < ul_tds; i++) {
dma_addr_t mapping;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
index 88debaa1ed31..9968bb885c1f 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -8,9 +8,13 @@
#include "iosm_ipc_protocol.h"
-/* Size of the buffer for the IP MUX data buffer. */
-#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
-#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+#define IPC_MEM_MAX_UL_DG_ENTRIES 100
+#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+#define IPC_MEM_MAX_TDS_MUX_AGGR_DL 60
+
+#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
/* Size of the buffer for the IP MUX Lite data buffer. */
#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
@@ -167,6 +171,7 @@ enum mux_state {
enum ipc_mux_protocol {
MUX_UNKNOWN,
MUX_LITE,
+ MUX_AGGREGATION,
};
/* Supported UL data transfer methods. */
@@ -192,24 +197,111 @@ struct mux_session {
flush:1; /* flush net interface ? */
};
-/* State of a single UL data block. */
+/**
+ * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram
+ * Table Header.
+ * @datagram_index : Index (in bytes) to the k-th datagram in the table.
+ * Index shall count from the start of the block including
+ * the 16-byte header. This value shall be non-zero.
+ * @datagram_length: Length of the k-th datagram including the head padding.
+ * This value shall be non-zero.
+ * @service_class: Service class identifier for the datagram.
+ * @reserved: Reserved bytes. Set to zero
+ */
+struct mux_adth_dg {
+ __le32 datagram_index;
+ __le16 datagram_length;
+ u8 service_class;
+ u8 reserved;
+};
+
+/**
+ * struct mux_qlth_ql - Structure of the queue level in the Aggregated
+ * Datagram Queue Level Table Header.
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_qlth_ql {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table
+ * Header.
+ * @signature: Signature of the Queue Level Table Header
+ * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H')
+ * @table_length: Length (in bytes) of the datagram table. This length
+ * shall include the queue level table header size.
+ * Minimum value:0x10
+ * @if_id: ID of the interface the queue levels in the table
+ * belong to.
+ * @reserved: Reserved byte. Set to zero.
+ * @next_table_index: Index (in bytes) to the next table in the buffer. Index
+ * shall count from the start of the block including the
+ * 16-byte header. Value of zero indicates end of the list.
+ * @reserved2: Reserved bytes. Set to zero
+ * @ql: Queue level table with variable length
+ */
+struct mux_qlth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_qlth_ql ql;
+};
+
+/**
+ * struct mux_adb - Structure of State of a single UL data block.
+ * @dest_skb: Current UL skb for the data block.
+ * @buf: ADB memory
+ * @adgh: ADGH pointer
+ * @qlth_skb: QLTH pointer
+ * @next_table_index: Pointer to next table index.
+ * @free_list: List of alloc. ADB for the UL sess.
+ * @size: Size of the ADB memory.
+ * @if_cnt: Statistic counter
+ * @dg_cnt_total: Datagram count total
+ * @payload_size: Payload Size
+ * @dg: Datagram table.
+ * @pp_qlt: Pointers to hold Queue Level Tables of session
+ * @adbh: ADBH pointer
+ * @qlt_updated: Queue level table updated
+ * @dg_count: Datagram count
+ */
struct mux_adb {
- struct sk_buff *dest_skb; /* Current UL skb for the data block. */
- u8 *buf; /* ADB memory. */
- struct mux_adgh *adgh; /* ADGH pointer */
- struct sk_buff *qlth_skb; /* QLTH pointer */
- u32 *next_table_index; /* Pointer to next table index. */
- struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
- int size; /* Size of the ADB memory. */
- u32 if_cnt; /* Statistic counter */
+ struct sk_buff *dest_skb;
+ u8 *buf;
+ struct mux_adgh *adgh;
+ struct sk_buff *qlth_skb;
+ u32 *next_table_index;
+ struct sk_buff_head free_list;
+ int size;
+ u32 if_cnt;
u32 dg_cnt_total;
u32 payload_size;
+ struct mux_adth_dg
+ dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES];
+ struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct mux_adbh *adbh;
+ u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES];
};
-/* Temporary ACB state. */
+/**
+ * struct mux_acb - Structure of Temporary ACB state.
+ * @skb: Used UL skb.
+ * @if_id: Session id.
+ * @buf_p: Command buffer.
+ * @wanted_response: Wanted Response
+ * @got_response: Got response
+ * @cmd: command
+ * @got_param: Received command/response parameter
+ */
struct mux_acb {
struct sk_buff *skb; /* Used UL skb. */
int if_id; /* Session id. */
+ u8 *buf_p;
u32 wanted_response;
u32 got_response;
u32 cmd;
@@ -241,6 +333,12 @@ struct mux_acb {
* @wwan_q_offset: This will hold the offset of the given instance
* Useful while passing or receiving packets from
* wwan/imem layer.
+ * @adb_finish_timer: Timer for forcefully finishing the ADB
+ * @acb_tx_sequence_nr: Sequence number for the ACB header.
+ * @params: user configurable parameters
+ * @adb_tx_sequence_nr: Sequence number for ADB header
+ * @acc_adb_size: Statistic data for logging
+ * @acc_payload_size: Statistic data for logging
* @initialized: MUX object is initialized
* @ev_mux_net_transmit_pending:
* 0 means inform the IPC tasklet to pass the
@@ -269,10 +367,16 @@ struct iosm_mux {
long long ul_data_pend_bytes;
struct mux_acb acb;
int wwan_q_offset;
+ struct hrtimer adb_finish_timer;
+ u16 acb_tx_sequence_nr;
+ struct ipc_params *params;
+ u16 adb_tx_sequence_nr;
+ unsigned long long acc_adb_size;
+ unsigned long long acc_payload_size;
u8 initialized:1,
ev_mux_net_transmit_pending:1,
- adb_prep_ongoing:1;
-};
+ adb_prep_ongoing;
+} __packed;
/* MUX configuration structure */
struct ipc_mux_config {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index 40fb54a0513e..d41e373f9c0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -54,6 +54,49 @@ static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
return 0;
}
+/* Initialize the command header. */
+static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_acbh *header;
+
+ header = (struct mux_acbh *)(acb->skb)->data;
+ header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
+ header->first_cmd_index = header->block_length;
+ header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
+ header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
+}
+
+/* Add a command to the ACB. */
+static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
+ void *param, u32 param_size)
+{
+ struct mux_acbh *header;
+ struct mux_cmdh *cmdh;
+ struct mux_acb *acb;
+
+ acb = &ipc_mux->acb;
+ header = (struct mux_acbh *)(acb->skb)->data;
+ cmdh = (struct mux_cmdh *)
+ ((acb->skb)->data + le32_to_cpu(header->block_length));
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le32_to_cpu(header->block_length) +
+ le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
/* Prepare mux Command */
static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
u32 cmd, struct mux_acb *acb,
@@ -104,7 +147,7 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
size_t res_size, bool blocking, bool respond)
{
struct mux_acb *acb = &ipc_mux->acb;
- struct mux_lite_cmdh *ack_lite;
+ union mux_type_cmdh cmdh;
int ret = 0;
acb->if_id = if_id;
@@ -112,11 +155,23 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
if (ret)
return ret;
- ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
- res_size);
- if (respond)
- ack_lite->transaction_id = cpu_to_le32(transaction_id);
+ if (ipc_mux->protocol == MUX_LITE) {
+ cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
+ param, res_size);
+ if (respond)
+ cmdh.ack_lite->transaction_id =
+ cpu_to_le32(transaction_id);
+ } else {
+ /* Initialize the ACB header. */
+ ipc_mux_acb_init(ipc_mux);
+ cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
+ res_size);
+
+ if (respond)
+ cmdh.ack_aggr->transaction_id =
+ cpu_to_le32(transaction_id);
+ }
ret = ipc_mux_acb_send(ipc_mux, blocking);
return ret;
@@ -129,15 +184,17 @@ void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
}
static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+ union mux_cmd_param param,
+ __le32 command_type, u8 if_id,
+ __le32 transaction_id)
{
struct mux_acb *acb = &ipc_mux->acb;
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_CMD_OPEN_SESSION_RESP:
case MUX_CMD_CLOSE_SESSION_RESP:
/* Resume the control application. */
- acb->got_param = cmdh->param;
+ acb->got_param = param;
break;
case MUX_LITE_CMD_FLOW_CTL_ACK:
@@ -147,8 +204,16 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
if (ipc_mux->protocol != MUX_LITE)
return -EINVAL;
- dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
- cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
+ if_id, le32_to_cpu(transaction_id));
+ break;
+
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Lite version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol == MUX_LITE)
+ return -EINVAL;
break;
default:
@@ -156,38 +221,39 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
}
acb->wanted_response = MUX_CMD_INVALID;
- acb->got_response = le32_to_cpu(cmdh->command_type);
+ acb->got_response = le32_to_cpu(command_type);
complete(&ipc_mux->channel->ul_sem);
return 0;
}
-static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param *param,
+ __le32 command_type, u8 if_id,
+ __le16 cmd_len, int size)
{
- union mux_cmd_param *param = &cmdh->param;
struct mux_session *session;
- int new_size;
+ struct hrtimer *adb_timer;
dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
- cmdh->if_id, le32_to_cpu(cmdh->command_type));
+ if_id, le32_to_cpu(command_type));
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
- if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
- cmdh->if_id);
+ if_id);
return -EINVAL; /* No session interface id. */
}
- session = &ipc_mux->session[cmdh->if_id];
+ session = &ipc_mux->session[if_id];
+ adb_timer = &ipc_mux->imem->adb_timer;
- new_size = offsetof(struct mux_lite_cmdh, param) +
- sizeof(param->flow_ctl);
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -197,6 +263,16 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* to limit uplink session queueing
*/
session->net_tx_stop = true;
+
+ /* We have to call Finish ADB here.
+ * Otherwise any already queued data
+ * will be sent to CP when ADB is full
+ * for some other sessions.
+ */
+ if (ipc_mux->protocol == MUX_AGGREGATION) {
+ ipc_mux_ul_adb_finish(ipc_mux);
+ ipc_imem_hrtimer_stop(adb_timer);
+ }
/* Update the stats */
session->flow_ctl_en_cnt++;
} else if (param->flow_ctl.mask == 0) {
@@ -205,8 +281,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* our internal Tx flag and enabling kernel
* flow control
*/
+ dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
+ if_id, le32_to_cpu(param->flow_ctl.mask));
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -217,7 +295,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
break;
}
- dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ ipc_mux->acc_adb_size = 0;
+ ipc_mux->acc_payload_size = 0;
+
+ dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
le32_to_cpu(param->flow_ctl.mask));
break;
@@ -235,12 +316,20 @@ static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id;
+ int size;
- if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->transaction_id)) {
/* Unable to decode command response indicates the cmd_type
* may be a command instead of response. So try to decoding it.
*/
- if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->cmd_len, size)) {
/* Decoded command may need a response. Give the
* response according to the command type.
*/
@@ -349,7 +438,7 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
adgh = (struct mux_adgh *)block;
- if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "invalid ADGH signature received");
return;
}
@@ -392,6 +481,192 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
ipc_mux->session[if_id].flush = 1;
}
+static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
+ struct mux_cmdh *cmdh, int size)
+{
+ u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
+ u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
+ u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
+ u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
+ union mux_cmd_param *cmd_p = NULL;
+ u32 cmd = link_st;
+ u32 trans_id;
+
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ size = 0;
+ if (cmdh->command_type == cpu_to_le32(link_st)) {
+ cmd_p = &cmdh->param;
+ cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
+ } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
+ (cmdh->command_type == cpu_to_le32(fctl_dis))) {
+ cmd = fctl_ack;
+ } else {
+ return;
+ }
+ trans_id = le32_to_cpu(cmdh->transaction_id);
+ ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ trans_id, cmd_p, size, false, true);
+ }
+}
+
+/* Decode an aggregated command block. */
+static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_acbh *acbh;
+ struct mux_cmdh *cmdh;
+ u32 next_cmd_index;
+ u8 *block;
+ int size;
+
+ acbh = (struct mux_acbh *)(skb->data);
+ block = (u8 *)(skb->data);
+
+ next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
+ next_cmd_index = array_index_nospec(next_cmd_index,
+ sizeof(struct mux_cmdh));
+
+ while (next_cmd_index != 0) {
+ cmdh = (struct mux_cmdh *)&block[next_cmd_index];
+ next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->transaction_id)) {
+ size = offsetof(struct mux_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
+ }
+ }
+}
+
+/* process datagram */
+static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
+ struct mux_adth_dg *dg, struct sk_buff *skb,
+ int if_id, int nr_of_dg)
+{
+ u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
+ u32 packet_offset, i, rc;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index)
+ < sizeof(struct mux_adbh))
+ goto dg_error;
+
+ /* Is the packet inside of the ADB */
+ if (le32_to_cpu(dg->datagram_index) >=
+ le32_to_cpu(adbh->block_length)) {
+ goto dg_error;
+ } else {
+ packet_offset =
+ le32_to_cpu(dg->datagram_index) +
+ dl_head_pad_len;
+ /* Pass the packet to the netif layer. */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
+ packet_offset,
+ dg->service_class,
+ skb);
+ if (rc)
+ goto dg_error;
+ }
+ }
+ return 0;
+dg_error:
+ return -1;
+}
+
+/* Decode an aggregated data block. */
+static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ struct mux_adth_dg *dg;
+ struct iosm_wwan *wwan;
+ struct mux_adbh *adbh;
+ struct mux_adth *adth;
+ int nr_of_dg, if_id;
+ u32 adth_index;
+ u8 *block;
+
+ block = skb->data;
+ adbh = (struct mux_adbh *)block;
+
+ /* Process the aggregated datagram tables. */
+ adth_index = le32_to_cpu(adbh->first_table_index);
+
+ /* Has CP sent an empty ADB ? */
+ if (adth_index < 1) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ goto adb_decode_err;
+ }
+
+ /* Loop through mixed session tables. */
+ while (adth_index) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)(block + adth_index);
+
+ /* Get the interface id and map it to the netif id. */
+ if_id = adth->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ goto adb_decode_err;
+
+ if_id = array_index_nospec(if_id,
+ IPC_MEM_MUX_IP_SESSION_ENTRIES);
+
+ /* Is the session active ? */
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan)
+ goto adb_decode_err;
+
+ /* Consistency checks for aggregated datagram table. */
+ if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
+ goto adb_decode_err;
+
+ if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
+ sizeof(struct mux_adth_dg)))
+ goto adb_decode_err;
+
+ /* Calculate the number of datagrams. */
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ /* Is the datagram table empty ? */
+ if (nr_of_dg < 1) {
+ dev_err(ipc_mux->dev,
+ "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
+ adth_index, nr_of_dg,
+ le32_to_cpu(adth->next_table_index));
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ continue;
+ }
+
+ /* New aggregated datagram table. */
+ dg = &adth->dg;
+ if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
+ nr_of_dg) < 0)
+ goto adb_decode_err;
+
+ /* mark session for final flush */
+ ipc_mux->session[if_id].flush = 1;
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ }
+
+adb_decode_err:
+ return;
+}
+
+/**
+ * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
u32 signature;
@@ -403,14 +678,18 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
signature = le32_to_cpup((__le32 *)skb->data);
switch (signature) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
+ mux_dl_adb_decode(ipc_mux, skb);
+ break;
+ case IOSM_AGGR_MUX_SIG_ADGH:
ipc_mux_dl_adgh_decode(ipc_mux, skb);
break;
-
case MUX_SIG_FCTH:
ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
break;
-
+ case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
+ ipc_mux_dl_acb_decode(ipc_mux, skb);
+ break;
case MUX_SIG_CMDH:
ipc_mux_dl_cmd_decode(ipc_mux, skb);
break;
@@ -427,7 +706,10 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
{
/* Take the first element of the free list. */
struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+ u32 *next_tb_id;
int qlt_size;
+ u32 if_id;
if (!skb)
return -EBUSY; /* Wait for a free ADB skb. */
@@ -436,7 +718,37 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
+
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ /* Initialize the ADBH. */
+ ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
+ memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
+ ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
+ ul_adb->adbh->block_length =
+ cpu_to_le32(sizeof(struct mux_adbh));
+ next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
+ ul_adb->next_table_index = next_tb_id;
+
+ /* Clear the local copy of DGs for new ADB */
+ memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
+
+ /* Clear the DG count and QLT updated status for new ADB */
+ for (if_id = 0; if_id < no_if; if_id++) {
+ ul_adb->dg_count[if_id] = 0;
+ ul_adb->qlt_updated[if_id] = 0;
+ }
+ break;
+
+ case IOSM_AGGR_MUX_SIG_ADGH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
@@ -506,6 +818,94 @@ static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
str, bytes);
}
+static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, int *out_offset)
+{
+ int i, qlt_size, offset = *out_offset;
+ struct mux_qlth *p_adb_qlt;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u16 adth_dg_size;
+ u32 *next_tb_id;
+
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ if (ul_adb->dg_count[i] > 0) {
+ adth_dg_size = offsetof(struct mux_adth, dg) +
+ ul_adb->dg_count[i] * sizeof(*dg);
+
+ *ul_adb->next_table_index = offset;
+ adth = (struct mux_adth *)&ul_adb->buf[offset];
+ next_tb_id = (unsigned int *)&adth->next_table_index;
+ ul_adb->next_table_index = next_tb_id;
+ offset += adth_dg_size;
+ adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
+ adth->if_id = i;
+ adth->table_length = cpu_to_le16(adth_dg_size);
+ adth_dg_size -= offsetof(struct mux_adth, dg);
+ memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
+ ul_adb->if_cnt++;
+ }
+
+ if (ul_adb->qlt_updated[i]) {
+ *ul_adb->next_table_index = offset;
+ p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
+ ul_adb->next_table_index =
+ (u32 *)&p_adb_qlt->next_table_index;
+ memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
+ offset += qlt_size;
+ }
+ }
+ *out_offset = offset;
+}
+
+/**
+ * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
+ * @ipc_mux: Pointer to MUX data-struct.
+ */
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
+{
+ bool ul_data_pend = false;
+ struct mux_adb *ul_adb;
+ unsigned long flags;
+ int offset;
+
+ ul_adb = &ipc_mux->ul_adb;
+ if (!ul_adb->dest_skb)
+ return;
+
+ offset = *ul_adb->next_table_index;
+ ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
+ ul_adb->adbh->block_length = cpu_to_le32(offset);
+
+ if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
+ ul_adb->dest_skb = NULL;
+ return;
+ }
+
+ *ul_adb->next_table_index = 0;
+ ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
+ skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
+
+ spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
+ __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
+
+ ul_adb->dest_skb = NULL;
+ /* Updates the TDs with ul_list */
+ ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ if (ul_data_pend)
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
+ ipc_mux->acc_payload_size += ul_adb->payload_size;
+ ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
+}
+
/* Allocates an ADB from the free list and initializes it with ADBH */
static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
struct mux_adb *adb, int *size_needed,
@@ -688,7 +1088,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
while (nr_of_pkts > 0) {
/* get destination skb allocated */
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
- MUX_SIG_ADGH)) {
+ IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH");
return -ENOMEM;
}
@@ -720,7 +1120,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
- adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
adb->adgh->if_id = session_id;
adb->adgh->length =
cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
@@ -762,6 +1162,187 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
return adb_updated;
}
+/**
+ * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
+ * @ipc_mux: pointer to MUX instance data
+ * @p_adb: pointer to UL aggegated data block
+ * @session_id: session id
+ * @qlth_n_ql_size: Length (in bytes) of the datagram table
+ * @ul_list: pointer to skb buffer head
+ */
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list)
+{
+ int qlevel = ul_list->qlen;
+ struct mux_qlth *p_qlt;
+
+ p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
+
+ /* Initialize QLTH if not been done */
+ if (p_adb->qlt_updated[session_id] == 0) {
+ p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ p_qlt->if_id = session_id;
+ p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
+ p_qlt->reserved = 0;
+ p_qlt->reserved2 = 0;
+ }
+
+ /* Update Queue Level information always */
+ p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
+ p_adb->qlt_updated[session_id] = 1;
+}
+
+/* Update the next table index. */
+static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
+ int session_id,
+ struct sk_buff_head *ul_list,
+ struct mux_adth_dg *dg,
+ int aligned_size,
+ u32 qlth_n_ql_size,
+ struct mux_adb *adb,
+ struct sk_buff *src_skb)
+{
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ ipc_mux_ul_adb_finish(ipc_mux);
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH)) {
+ dev_kfree_skb(src_skb);
+ return -ENOMEM;
+ }
+ ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
+
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+ return 0;
+}
+
+/* Process encode session UL data. */
+static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
+ struct mux_adth_dg *dg,
+ struct sk_buff_head *ul_list,
+ struct sk_buff *src_skb, int session_id,
+ int pkt_to_send, u32 qlth_n_ql_size,
+ int *out_offset, int head_pad_len)
+{
+ int aligned_size;
+ int offset = *out_offset;
+ unsigned long flags;
+ int nr_of_skb = 0;
+
+ while (pkt_to_send > 0) {
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ pkt_to_send);
+ return -1;
+ }
+ aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size ||
+ ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
+ *adb->next_table_index = offset;
+ if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
+ ul_list, dg,
+ aligned_size,
+ qlth_n_ql_size, adb,
+ src_skb) < 0)
+ return -ENOMEM;
+ nr_of_skb = 0;
+ offset = le32_to_cpu(adb->adbh->block_length);
+ /* Load pointer to next available datagram entry */
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+ }
+ /* Add buffer without head padding to next pending transfer. */
+ memcpy(adb->buf + offset + head_pad_len,
+ src_skb->data, src_skb->len);
+ /* Setup datagram entry. */
+ dg->datagram_index = cpu_to_le32(offset);
+ dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
+ dg->service_class = (((struct sk_buff *)src_skb)->priority);
+ dg->reserved = 0;
+ adb->dg_cnt_total++;
+ adb->payload_size += le16_to_cpu(dg->datagram_length);
+ dg++;
+ adb->dg_count[session_id]++;
+ offset += aligned_size;
+ /* Remove the processed elements and free it. */
+ spin_lock_irqsave(&ul_list->lock, flags);
+ src_skb = __skb_dequeue(ul_list);
+ spin_unlock_irqrestore(&ul_list->lock, flags);
+
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+ pkt_to_send--;
+ }
+ *out_offset = offset;
+ return nr_of_skb;
+}
+
+/* Process encode session UL data to ADB. */
+static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list, struct mux_adb *adb,
+ int pkt_to_send)
+{
+ int adb_updated = -EINVAL;
+ int head_pad_len, offset;
+ struct sk_buff *src_skb = NULL;
+ struct mux_adth_dg *dg;
+ u32 qlth_n_ql_size;
+
+ /* If any of the opened session has set Flow Control ON then limit the
+ * UL data to mux_flow_ctrl_high_thresh_b bytes
+ */
+ if (ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+ return adb_updated;
+ }
+
+ qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+ head_pad_len = session->ul_head_pad_len;
+
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ offset = le32_to_cpu(adb->adbh->block_length);
+
+ if (ipc_mux->size_needed == 0)
+ ipc_mux->size_needed = offset;
+
+ /* Calculate the size needed for ADTH, QLTH and QL*/
+ if (adb->dg_count[session_id] == 0) {
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ }
+
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+
+ if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
+ session_id, pkt_to_send, qlth_n_ql_size, &offset,
+ head_pad_len) > 0) {
+ adb_updated = 1;
+ *adb->next_table_index = offset;
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ adb->adbh->block_length = cpu_to_le32(offset);
+ }
+
+ return adb_updated;
+}
+
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{
struct sk_buff_head *ul_list;
@@ -802,28 +1383,88 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
* -> try next session id.
*/
continue;
-
- updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
- ul_list, &ipc_mux->ul_adb,
- dg_n);
+ if (ipc_mux->protocol == MUX_LITE)
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ else
+ updated = mux_ul_adb_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
}
ipc_mux->adb_prep_ongoing = false;
return updated == 1;
}
-void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+/* Calculates the Payload from any given ADB. */
+static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
+ struct mux_adbh *p_adbh)
{
- struct mux_adgh *adgh;
- u16 adgh_len;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u32 payload_size = 0;
+ u32 next_table_idx;
+ int nr_of_dg, i;
+
+ /* Process the aggregated datagram tables. */
+ next_table_idx = le32_to_cpu(p_adbh->first_table_index);
+
+ if (next_table_idx < sizeof(struct mux_adbh)) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ return payload_size;
+ }
- adgh = (struct mux_adgh *)skb->data;
- adgh_len = le16_to_cpu(adgh->length);
+ while (next_table_idx != 0) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
- if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
- ipc_mux->ul_flow == MUX_UL)
- ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
- adgh_len;
+ if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ if (nr_of_dg <= 0)
+ return payload_size;
+
+ dg = &adth->dg;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index) <
+ sizeof(struct mux_adbh)) {
+ return payload_size;
+ }
+ payload_size +=
+ le16_to_cpu(dg->datagram_length);
+ }
+ }
+ next_table_idx = le32_to_cpu(adth->next_table_index);
+ }
+
+ return payload_size;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ union mux_type_header hr;
+ u16 adgh_len;
+ int payload;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ hr.adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(hr.adgh->length);
+ if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes =
+ ipc_mux->ul_data_pend_bytes - adgh_len;
+ } else {
+ hr.adbh = (struct mux_adbh *)(skb->data);
+ payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
+ ipc_mux->ul_data_pend_bytes -= payload;
+ }
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
@@ -846,10 +1487,13 @@ static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
- if (ul_data_pend)
+ if (ul_data_pend) {
+ if (ipc_mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_mux->imem);
+
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
-
+ }
/* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
index aae83db5cbb8..5d4e3b89542c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -13,6 +13,39 @@
*/
#define MUX_QUEUE_LEVEL 1
+/* ADB finish timer value */
+#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000)
+
+/* Enables the flow control (Flow is not allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5
+
+/* Disables the flow control (Flow is allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command
+ */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7
+
+/* Aggregation Protocol Command for report packet indicating link quality
+ */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8
+
+/* Response to a report packet */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9
+
+/* ACBH: Signature of the Aggregated Command Block Header. */
+#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341
+
+/* ADTH: Signature of the Aggregated Datagram Table Header. */
+#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441
+
+/* ADBH: Signature of the Aggregated Data Block Header. */
+#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441
+
+/* ADGH: Signature of the Datagram Header. */
+#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441
+
/* Size of the buffer for the IP MUX commands. */
#define MUX_MAX_UL_ACB_BUF_SIZE 256
@@ -53,6 +86,85 @@
#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
/**
+ * struct mux_cmdh - Structure of Command Header.
+ * @signature: Signature of the Command Header.
+ * @cmd_len: Length (in bytes) of the Aggregated Command Block.
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved. Set to zero.
+ * @next_cmd_index: Index (in bytes) to the next command in the buffer.
+ * @command_type: Command Enum. See table Session Management chapter for
+ * details.
+ * @transaction_id: The Transaction ID shall be unique to the command
+ * @param: Optional parameters used with the command.
+ */
+struct mux_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_cmd_index;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_acbh - Structure of the Aggregated Command Block Header.
+ * @signature: Signature of the Aggregated Command Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Command Block.
+ * @first_cmd_index: Index (in bytes) to the first command in the buffer.
+ */
+struct mux_acbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_cmd_index;
+};
+
+/**
+ * struct mux_adbh - Structure of the Aggregated Data Block Header.
+ * @signature: Signature of the Aggregated Data Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Data Block.
+ * @first_table_index: Index (in bytes) to the first Datagram Table in
+ * the buffer.
+ */
+struct mux_adbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_table_index;
+};
+
+/**
+ * struct mux_adth - Structure of the Aggregated Datagram Table Header.
+ * @signature: Signature of the Aggregated Datagram Table Header.
+ * @table_length: Length (in bytes) of the datagram table.
+ * @if_id: ID of the interface the datagrams in the table
+ * belong to.
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint.
+ * @reserved: Reserved bits. Set to zero.
+ * @next_table_index: Index (in bytes) to the next Datagram Table in
+ * the buffer.
+ * @reserved2: Reserved bytes. Set to zero
+ * @dg: datagramm table with variable length
+ */
+struct mux_adth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_adth_dg dg;
+};
+
+/**
* struct mux_adgh - Aggregated Datagram Header.
* @signature: Signature of the Aggregated Datagram Header(0x48474441)
* @length: Length (in bytes) of the datagram header. This length
@@ -129,11 +241,25 @@ struct ipc_mem_lite_gen_tbl {
};
/**
- * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
- * depending on Header.
- * @ipc_mux: Pointer to MUX data-struct
- * @skb: Pointer to ipc_skb.
+ * struct mux_type_cmdh - Structure of command header for mux lite and aggr
+ * @ack_lite: MUX Lite Command Header pointer
+ * @ack_aggr: Command Header pointer
*/
+union mux_type_cmdh {
+ struct mux_lite_cmdh *ack_lite;
+ struct mux_cmdh *ack_aggr;
+};
+
+/**
+ * struct mux_type_header - Structure of mux header type
+ * @adgh: Aggregated Datagram Header pointer
+ * @adbh: Aggregated Data Block Header pointer
+ */
+union mux_type_header {
+ struct mux_adgh *adgh;
+ struct mux_adbh *adbh;
+};
+
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
/**
@@ -147,7 +273,7 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
* @blocking: True for blocking send
* @respond: If true return transaction ID
*
- * Returns: 0 in success and failure value on error
+ * Returns: 0 in success and failure value on error
*/
int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
u32 transaction_id, union mux_cmd_param *param,
@@ -190,4 +316,10 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
*/
void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux);
+
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index d73894e2a84e..d3d34d1c4704 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -232,6 +232,7 @@ static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
*/
static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
{
+ enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
union acpi_object *object;
acpi_handle handle_acpi;
@@ -242,18 +243,23 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
}
object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
+ if (!object)
+ goto default_ret;
+
+ if (object->integer.value == 3)
+ sleep_state = IPC_PCIE_D3L2;
- if (object && object->integer.value == 3)
- return IPC_PCIE_D3L2;
+ kfree(object);
default_ret:
- return IPC_PCIE_D0L12;
+ return sleep_state;
}
static int ipc_pcie_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
+ int ret;
pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
pci_id->vendor);
@@ -286,6 +292,12 @@ static int ipc_pcie_probe(struct pci_dev *pci,
goto pci_enable_fail;
}
+ ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
+ return ret;
+ }
+
ipc_pcie_config_aspm(ipc_pcie);
dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
@@ -320,6 +332,7 @@ ret_fail:
static const struct pci_device_id iosm_ipc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
{}
};
MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
index 7d1f0cd7364c..844cf1fed994 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -14,6 +14,7 @@
/* Device ID */
#define INTEL_CP_DEVICE_7560_ID 0x7560
+#define INTEL_CP_DEVICE_7360_ID 0x7360
/* Define for BAR area usage */
#define IPC_DOORBELL_BAR0 0
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
index c6b032f95d2e..4627847c6daa 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -372,8 +372,6 @@ bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
- u32 tail =
- le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
struct ipc_protocol_td *p_td;
struct sk_buff *skb;
@@ -403,14 +401,6 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
goto ret;
}
- if (!IPC_CB(skb)) {
- dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL",
- pipe->pipe_nr, tail);
- ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
- skb = NULL;
- goto ret;
- }
-
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
(unsigned long long)p_td->buffer.address, skb->data);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 27151148c782..4c9022a93e01 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -40,13 +40,11 @@ struct iosm_netdev_priv {
* @ipc_imem: Pointer to imem data-struct
* @sub_netlist: List of active netdevs
* @dev: Pointer device structure
- * @if_mutex: Mutex used for add and remove interface id
*/
struct iosm_wwan {
struct iosm_imem *ipc_imem;
struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
struct device *dev;
- struct mutex if_mutex; /* Mutex used for add and remove interface id */
};
/* Bring-up the wwan net link */
@@ -55,14 +53,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
- int ret;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- mutex_lock(&ipc_wwan->if_mutex);
-
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
@@ -70,8 +65,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
/* enable tx path, DL data may follow */
@@ -80,10 +74,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
- ret = 0;
-out:
- mutex_unlock(&ipc_wwan->if_mutex);
- return ret;
+ return 0;
}
/* Bring-down the wwan net link */
@@ -93,18 +84,16 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
netif_stop_queue(netdev);
- mutex_lock(&priv->ipc_wwan->if_mutex);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
- mutex_unlock(&priv->ipc_wwan->if_mutex);
return 0;
}
/* Transmit a packet */
-static int ipc_wwan_link_transmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
@@ -168,6 +157,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->max_mtu = ETH_MAX_MTU;
iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ iosm_dev->needs_free_netdev = true;
iosm_dev->netdev_ops = &ipc_inm_ops;
}
@@ -189,26 +179,17 @@ static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
priv->netdev = dev;
priv->ipc_wwan = ipc_wwan;
- mutex_lock(&ipc_wwan->if_mutex);
- if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
- err = -EBUSY;
- goto out_unlock;
- }
+ if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
+ return -EBUSY;
err = register_netdevice(dev);
if (err)
- goto out_unlock;
+ return err;
rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
- mutex_unlock(&ipc_wwan->if_mutex);
-
netif_device_attach(dev);
return 0;
-
-out_unlock:
- mutex_unlock(&ipc_wwan->if_mutex);
- return err;
}
static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
@@ -222,17 +203,12 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
return;
- mutex_lock(&ipc_wwan->if_mutex);
-
if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
- goto unlock;
+ return;
RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
/* unregistering includes synchronize_net() */
unregister_netdevice_queue(dev, head);
-
-unlock:
- mutex_unlock(&ipc_wwan->if_mutex);
}
static const struct wwan_ops iosm_wwan_ops = {
@@ -330,8 +306,6 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
return NULL;
}
- mutex_init(&ipc_wwan->if_mutex);
-
return ipc_wwan;
}
@@ -340,7 +314,5 @@ void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
/* This call will remove all child netdev(s) */
wwan_unregister_ops(ipc_wwan->dev);
- mutex_destroy(&ipc_wwan->if_mutex);
-
kfree(ipc_wwan);
}
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
index e4d0f696687f..f7ca52353f40 100644
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -258,6 +258,7 @@ static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
{ .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "DUN2", .driver_data = WWAN_PORT_AT },
{ .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
{ .chan = "QMI", .driver_data = WWAN_PORT_QMI },
{ .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 71bf9b4f769f..ef70bb7c88ad 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -385,13 +385,13 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
int err;
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
- struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL);
+ struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
if (unlikely(!skb))
break;
err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
- MHI_DEFAULT_MRU, MHI_EOT);
+ mbim->mru, MHI_EOT);
if (unlikely(err)) {
kfree_skb(skb);
break;
@@ -582,6 +582,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
ndev->tx_queue_len = 1000;
+ ndev->needs_free_netdev = true;
}
static const struct wwan_ops mhi_mbim_wwan_ops = {
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
index 5dfa2eba6014..17d46f4d2913 100644
--- a/drivers/net/wwan/qcom_bam_dmux.c
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -755,7 +755,7 @@ static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
return 0;
dmux->tx = dma_request_chan(dev, "tx");
- if (IS_ERR(dmux->rx)) {
+ if (IS_ERR(dmux->tx)) {
dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
dmux->tx = NULL;
bam_dmux_runtime_suspend(dev);
diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
new file mode 100644
index 000000000000..dc6a7d682c15
--- /dev/null
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y += -Werror
+
+obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
+mtk_t7xx-y:= t7xx_pci.o \
+ t7xx_pcie_mac.o \
+ t7xx_mhccif.o \
+ t7xx_state_monitor.o \
+ t7xx_modem_ops.o \
+ t7xx_cldma.o \
+ t7xx_hif_cldma.o \
+ t7xx_port_proxy.o \
+ t7xx_port_ctrl_msg.o \
+ t7xx_port_wwan.o \
+ t7xx_hif_dpmaif.o \
+ t7xx_hif_dpmaif_tx.o \
+ t7xx_hif_dpmaif_rx.o \
+ t7xx_dpmaif.o \
+ t7xx_netdev.o
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
new file mode 100644
index 000000000000..9f43f256db1d
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/types.h>
+
+#include "t7xx_cldma.h"
+
+#define ADDR_SIZE 8
+
+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
+{
+ u32 val;
+
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
+ val |= IP_BUSY_WAKEUP;
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
+}
+
+/**
+ * t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
+ * @hw_info: Pointer to struct t7xx_cldma_hw.
+ *
+ * Restore HW after resume. Writes uplink configuration for CLDMA HW.
+ */
+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
+{
+ u32 ul_cfg;
+
+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
+
+ if (hw_info->hw_mode == MODE_BIT_64)
+ ul_cfg |= UL_CFG_BIT_MODE_64;
+ else if (hw_info->hw_mode == MODE_BIT_40)
+ ul_cfg |= UL_CFG_BIT_MODE_40;
+ else if (hw_info->hw_mode == MODE_BIT_36)
+ ul_cfg |= UL_CFG_BIT_MODE_36;
+
+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ /* Disable TX and RX invalid address check */
+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
+}
+
+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
+{
+ /* Enable the TX & RX interrupts */
+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
+ /* Enable the empty queue interrupt */
+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
+}
+
+void t7xx_cldma_hw_reset(void __iomem *ao_base)
+{
+ u32 val;
+
+ val = ioread32(ao_base + REG_INFRA_RST2_SET);
+ val |= RST2_PMIC_SW_RST_SET;
+ iowrite32(val, ao_base + REG_INFRA_RST2_SET);
+ val = ioread32(ao_base + REG_INFRA_RST4_SET);
+ val |= RST4_CLDMA1_SW_RST_SET;
+ iowrite32(val, ao_base + REG_INFRA_RST4_SET);
+ udelay(1);
+
+ val = ioread32(ao_base + REG_INFRA_RST4_CLR);
+ val |= RST4_CLDMA1_SW_RST_CLR;
+ iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
+ val = ioread32(ao_base + REG_INFRA_RST2_CLR);
+ val |= RST2_PMIC_SW_RST_CLR;
+ iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
+}
+
+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
+{
+ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
+
+ return ioread64(hw_info->ap_pdn_base + offset);
+}
+
+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
+ enum mtk_txrx tx_rx)
+{
+ u32 offset = qno * ADDR_SIZE;
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
+ iowrite64(address, reg + offset);
+}
+
+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *base = hw_info->ap_pdn_base;
+
+ if (tx_rx == MTK_RX)
+ iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
+ else
+ iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
+}
+
+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 mask, val;
+
+ mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
+ val = ioread32(reg);
+
+ return val & mask;
+}
+
+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
+{
+ unsigned int ch_id;
+
+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ ch_id &= bitmask;
+ /* Clear the ch IDs in the TX interrupt status register */
+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+}
+
+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
+{
+ unsigned int ch_id;
+
+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ ch_id &= bitmask;
+ /* Clear the ch IDs in the RX interrupt status register */
+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+}
+
+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
+ val = ioread32(reg);
+ return val & bitmask;
+}
+
+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
+}
+
+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val, reg);
+}
+
+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
+}
+
+/**
+ * t7xx_cldma_hw_init() - Initialize CLDMA HW.
+ * @hw_info: Pointer to struct t7xx_cldma_hw.
+ *
+ * Write uplink and downlink configuration to CLDMA HW.
+ */
+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
+{
+ u32 ul_cfg, dl_cfg;
+
+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
+ /* Configure the DRAM address mode */
+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
+ dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
+
+ if (hw_info->hw_mode == MODE_BIT_64) {
+ ul_cfg |= UL_CFG_BIT_MODE_64;
+ dl_cfg |= DL_CFG_BIT_MODE_64;
+ } else if (hw_info->hw_mode == MODE_BIT_40) {
+ ul_cfg |= UL_CFG_BIT_MODE_40;
+ dl_cfg |= DL_CFG_BIT_MODE_40;
+ } else if (hw_info->hw_mode == MODE_BIT_36) {
+ ul_cfg |= UL_CFG_BIT_MODE_36;
+ dl_cfg |= DL_CFG_BIT_MODE_36;
+ }
+
+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
+ dl_cfg |= DL_CFG_UP_HW_LAST;
+ iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
+ iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
+ iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
+}
+
+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
+ iowrite32(CLDMA_ALL_Q, reg);
+}
+
+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
+{
+ void __iomem *reg;
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
+ iowrite32(TXRX_STATUS_BITMASK, reg);
+ iowrite32(EMPTY_STATUS_BITMASK, reg);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h
new file mode 100644
index 000000000000..8949e8377fb0
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_CLDMA_H__
+#define __T7XX_CLDMA_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define CLDMA_TXQ_NUM 8
+#define CLDMA_RXQ_NUM 8
+#define CLDMA_ALL_Q GENMASK(7, 0)
+
+/* Interrupt status bits */
+#define EMPTY_STATUS_BITMASK GENMASK(15, 8)
+#define TXRX_STATUS_BITMASK GENMASK(7, 0)
+#define EQ_STA_BIT_OFFSET 8
+#define L2_INT_BIT_COUNT 16
+#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK)
+
+#define TQ_ERR_INT_BITMASK GENMASK(23, 16)
+#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
+
+#define RQ_ERR_INT_BITMASK GENMASK(23, 16)
+#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
+
+#define CLDMA0_AO_BASE 0x10049000
+#define CLDMA0_PD_BASE 0x1021d000
+#define CLDMA1_AO_BASE 0x1004b000
+#define CLDMA1_PD_BASE 0x1021f000
+
+#define CLDMA_R_AO_BASE 0x10023000
+#define CLDMA_R_PD_BASE 0x1023d000
+
+/* CLDMA TX */
+#define REG_CLDMA_UL_START_ADDRL_0 0x0004
+#define REG_CLDMA_UL_START_ADDRH_0 0x0008
+#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044
+#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048
+#define REG_CLDMA_UL_STATUS 0x0084
+#define REG_CLDMA_UL_START_CMD 0x0088
+#define REG_CLDMA_UL_RESUME_CMD 0x008c
+#define REG_CLDMA_UL_STOP_CMD 0x0090
+#define REG_CLDMA_UL_ERROR 0x0094
+#define REG_CLDMA_UL_CFG 0x0098
+#define UL_CFG_BIT_MODE_36 BIT(5)
+#define UL_CFG_BIT_MODE_40 BIT(6)
+#define UL_CFG_BIT_MODE_64 BIT(7)
+#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5)
+
+#define REG_CLDMA_UL_MEM 0x009c
+#define UL_MEM_CHECK_DIS BIT(0)
+
+/* CLDMA RX */
+#define REG_CLDMA_DL_START_CMD 0x05bc
+#define REG_CLDMA_DL_RESUME_CMD 0x05c0
+#define REG_CLDMA_DL_STOP_CMD 0x05c4
+#define REG_CLDMA_DL_MEM 0x0508
+#define DL_MEM_CHECK_DIS BIT(0)
+
+#define REG_CLDMA_DL_CFG 0x0404
+#define DL_CFG_UP_HW_LAST BIT(2)
+#define DL_CFG_BIT_MODE_36 BIT(10)
+#define DL_CFG_BIT_MODE_40 BIT(11)
+#define DL_CFG_BIT_MODE_64 BIT(12)
+#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10)
+
+#define REG_CLDMA_DL_START_ADDRL_0 0x0478
+#define REG_CLDMA_DL_START_ADDRH_0 0x047c
+#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8
+#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc
+#define REG_CLDMA_DL_STATUS 0x04f8
+
+/* CLDMA MISC */
+#define REG_CLDMA_L2TISAR0 0x0810
+#define REG_CLDMA_L2TISAR1 0x0814
+#define REG_CLDMA_L2TIMR0 0x0818
+#define REG_CLDMA_L2TIMR1 0x081c
+#define REG_CLDMA_L2TIMCR0 0x0820
+#define REG_CLDMA_L2TIMCR1 0x0824
+#define REG_CLDMA_L2TIMSR0 0x0828
+#define REG_CLDMA_L2TIMSR1 0x082c
+#define REG_CLDMA_L3TISAR0 0x0830
+#define REG_CLDMA_L3TISAR1 0x0834
+#define REG_CLDMA_L2RISAR0 0x0850
+#define REG_CLDMA_L2RISAR1 0x0854
+#define REG_CLDMA_L3RISAR0 0x0870
+#define REG_CLDMA_L3RISAR1 0x0874
+#define REG_CLDMA_IP_BUSY 0x08b4
+#define IP_BUSY_WAKEUP BIT(0)
+#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0)
+#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0)
+
+/* CLDMA MISC */
+#define REG_CLDMA_L2RIMR0 0x0858
+#define REG_CLDMA_L2RIMR1 0x085c
+#define REG_CLDMA_L2RIMCR0 0x0860
+#define REG_CLDMA_L2RIMCR1 0x0864
+#define REG_CLDMA_L2RIMSR0 0x0868
+#define REG_CLDMA_L2RIMSR1 0x086c
+#define REG_CLDMA_BUSY_MASK 0x0954
+#define BUSY_MASK_PCIE BIT(0)
+#define BUSY_MASK_AP BIT(1)
+#define BUSY_MASK_MD BIT(2)
+
+#define REG_CLDMA_INT_MASK 0x0960
+
+/* CLDMA RESET */
+#define REG_INFRA_RST4_SET 0x0730
+#define RST4_CLDMA1_SW_RST_SET BIT(20)
+
+#define REG_INFRA_RST4_CLR 0x0734
+#define RST4_CLDMA1_SW_RST_CLR BIT(20)
+
+#define REG_INFRA_RST2_SET 0x0140
+#define RST2_PMIC_SW_RST_SET BIT(18)
+
+#define REG_INFRA_RST2_CLR 0x0144
+#define RST2_PMIC_SW_RST_CLR BIT(18)
+
+enum mtk_txrx {
+ MTK_TX,
+ MTK_RX,
+};
+
+enum t7xx_hw_mode {
+ MODE_BIT_32,
+ MODE_BIT_36,
+ MODE_BIT_40,
+ MODE_BIT_64,
+};
+
+struct t7xx_cldma_hw {
+ enum t7xx_hw_mode hw_mode;
+ void __iomem *ap_ao_base;
+ void __iomem *ap_pdn_base;
+ u32 phy_interrupt_id;
+};
+
+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info,
+ unsigned int qno, u64 address, enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_reset(void __iomem *ao_base);
+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
+ enum mtk_txrx tx_rx);
+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info);
+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info);
+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
+#endif
diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
new file mode 100644
index 000000000000..6d3edadecbec
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
@@ -0,0 +1,1281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_reg.h"
+
+#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us)
+
+static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;
+ u32 value, ul_intr_enable, dl_intr_enable;
+ int ret;
+
+ ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
+ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+
+ /* Set interrupt enable mask */
+ iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
+ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
+
+ /* Check mask status */
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_intr_enable) != ul_intr_enable, 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR;
+ isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable;
+ ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN |
+ DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN;
+ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
+ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+
+ /* Set DL ISR PD enable mask */
+ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0,
+ value, (value & ul_intr_enable) != ul_intr_enable, 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY;
+ iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+ iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk,
+ hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK);
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
+ value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
+ iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK);
+
+ return 0;
+}
+
+static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk;
+ u32 value, ul_int_que_done;
+ int ret;
+
+ isr_en_msk = &hw_info->isr_en_mask;
+ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done;
+ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_int_que_done) == ul_int_que_done, 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev,
+ "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+}
+
+void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ struct dpmaif_isr_en_mask *isr_en_msk;
+ u32 value, ul_int_que_done;
+ int ret;
+
+ isr_en_msk = &hw_info->isr_en_mask;
+ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
+ isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done;
+ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
+ value, (value & ul_int_que_done) != ul_int_que_done, 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev,
+ "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+}
+
+void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
+{
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
+{
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
+ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ return value;
+}
+
+static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 value, q_done;
+ int ret;
+
+ q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
+ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+
+ ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done,
+ 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done);
+ if (ret) {
+ dev_err(hw_info->dev,
+ "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
+ value);
+ return -ETIMEDOUT;
+ }
+
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done;
+ return 0;
+}
+
+void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 mask;
+
+ mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
+ iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask;
+}
+
+void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info)
+{
+ u32 ip_busy_sts;
+
+ ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+ iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
+}
+
+static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno)
+{
+ if (qno == DPF_RX_QNO0)
+ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ else
+ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+}
+
+void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno)
+{
+ if (qno == DPF_RX_QNO0)
+ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+ else
+ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
+}
+
+void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+}
+
+void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+}
+
+static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para,
+ enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue)
+{
+ para->intr_types[para->intr_cnt] = intr_type;
+ para->intr_queues[para->intr_cnt] = intr_queue;
+ para->intr_cnt++;
+}
+
+/* The para->intr_cnt counter is set to zero before this function is called.
+ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
+ */
+static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int intr_status,
+ struct dpmaif_hw_intr_st_para *para)
+{
+ unsigned long value;
+
+ value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status);
+ if (value) {
+ unsigned int index;
+
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value);
+
+ for_each_set_bit(index, &value, DPMAIF_TXQ_NUM)
+ t7xx_dpmaif_mask_ulq_intr(hw_info, index);
+ }
+
+ value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value);
+
+ value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value);
+
+ value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value);
+
+ value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status);
+ if (value)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value);
+
+ /* Clear interrupt status */
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+}
+
+/* The para->intr_cnt counter is set to zero before this function is called.
+ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
+ */
+static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int intr_status,
+ struct dpmaif_hw_intr_st_para *para, int qno)
+{
+ if (qno == DPF_RX_QNO_DFT) {
+ if (intr_status & DP_DL_INT_SKB_LEN_ERR)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_BATCNT_LEN_ERR,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ }
+
+ if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT);
+ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR;
+ iowrite32(DP_DL_INT_PITCNT_LEN_ERR,
+ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
+ }
+
+ if (intr_status & DP_DL_INT_PKT_EMPTY_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_FRG_EMPTY_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_MTU_ERR_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno));
+ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
+ }
+
+ if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR)
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR,
+ DPF_RX_QNO_DFT);
+
+ if (intr_status & DP_DL_INT_Q0_DONE) {
+ /* Mask RX done interrupt immediately after it occurs, do not clear
+ * the interrupt if the mask operation fails.
+ */
+ if (!t7xx_mask_dlq_intr(hw_info, qno))
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno));
+ else
+ intr_status &= ~DP_DL_INT_Q0_DONE;
+ }
+ } else {
+ if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) {
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno));
+ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
+ }
+
+ if (intr_status & DP_DL_INT_Q1_DONE) {
+ if (!t7xx_mask_dlq_intr(hw_info, qno))
+ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno));
+ else
+ intr_status &= ~DP_DL_INT_Q1_DONE;
+ }
+ }
+
+ intr_status |= DP_DL_INT_BATCNT_LEN_ERR;
+ /* Clear interrupt status */
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+}
+
+/**
+ * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW.
+ * @hw_info: Pointer to struct hw_info.
+ * @para: Pointer to struct dpmaif_hw_intr_st_para.
+ * @qno: Queue number.
+ *
+ * Reads RX/TX interrupt status from HW and clears UL/DL status as needed.
+ *
+ * Return: Interrupt count.
+ */
+int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_intr_st_para *para, int qno)
+{
+ u32 rx_intr_status, tx_intr_status = 0;
+ u32 rx_intr_qdone, tx_intr_qdone = 0;
+
+ rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
+ rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0);
+
+ /* TX interrupt status */
+ if (qno == DPF_RX_QNO_DFT) {
+ /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts
+ * when a DLQ1 interrupt has occurred.
+ */
+ tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
+ }
+
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+
+ if (qno == DPF_RX_QNO_DFT) {
+ /* Do not schedule bottom half again or clear UL interrupt status when we
+ * have already masked it.
+ */
+ tx_intr_status &= ~tx_intr_qdone;
+ if (tx_intr_status)
+ t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para);
+ }
+
+ if (rx_intr_status) {
+ if (qno == DPF_RX_QNO0) {
+ rx_intr_status &= DP_DL_Q0_STATUS_MASK;
+ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE)
+ /* Do not schedule bottom half again or clear DL
+ * queue done interrupt status when we have already masked it.
+ */
+ rx_intr_status &= ~DP_DL_INT_Q0_DONE;
+ } else {
+ rx_intr_status &= DP_DL_Q1_STATUS_MASK;
+ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE)
+ rx_intr_status &= ~DP_DL_INT_Q1_DONE;
+ }
+
+ if (rx_intr_status)
+ t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno);
+ }
+
+ return para->intr_cnt;
+}
+
+static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
+ value |= DPMAIF_MEM_CLR;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
+
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR,
+ value, !(value & DPMAIF_MEM_CLR), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+}
+
+static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT);
+ udelay(2);
+ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT);
+ udelay(2);
+}
+
+static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info)
+{
+ u32 ap_port_mode;
+ int ret;
+
+ t7xx_dpmaif_hw_reset(hw_info);
+
+ ret = t7xx_dpmaif_sram_init(hw_info);
+ if (ret)
+ return ret;
+
+ ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ ap_port_mode |= DPMAIF_PORT_MODE_PCIE;
+ iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN);
+ return 0;
+}
+
+static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW);
+}
+
+static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info)
+{
+ u32 enable_bat_cache, enable_pit_burst;
+
+ enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI;
+ iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN;
+ iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+ /* DPMAIF DL DLQ part HW setting */
+
+static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2;
+ value |= DPMAIF_HASH_PRIME_DF << 4;
+ value |= DPMAIF_HPC_TOTAL_NUM << 8;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL);
+}
+
+static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG);
+}
+
+static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF,
+ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5);
+}
+
+static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0);
+}
+
+static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value, i;
+
+ /* Each register holds two DLQ threshold timeout values */
+ for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) {
+ value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF);
+ value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK,
+ DPMAIF_DLQ_TIMEOUT_THRES_DF);
+ iowrite32(value,
+ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i);
+ }
+}
+
+static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES);
+}
+
+static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info)
+{
+ t7xx_dpmaif_hw_hpc_cntl_set(hw_info);
+ t7xx_dpmaif_hw_agg_cfg_set(hw_info);
+ t7xx_dpmaif_hw_hash_bit_choose_set(hw_info);
+ t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info);
+ t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info);
+ t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info);
+}
+
+static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en)
+{
+ u32 value, dl_bat_init = 0;
+ int ret;
+
+ if (frg_en)
+ dl_bat_init = DPMAIF_DL_BAT_FRG_INIT;
+
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET;
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
+ return ret;
+ }
+
+ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (ret)
+ dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n");
+
+ return ret;
+}
+
+static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info,
+ dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0);
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3);
+}
+
+static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ value &= ~DPMAIF_BAT_SIZE_MSK;
+ value |= size & DPMAIF_BAT_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+}
+
+static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ if (enable)
+ value |= DPMAIF_BAT_EN_MSK;
+ else
+ value &= ~DPMAIF_BAT_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+ value &= ~DPMAIF_BAT_BID_MAXCNT_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+}
+
+static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info)
+{
+ iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1);
+}
+
+static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_PIT_CHK_NUM_MSK;
+ value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+ value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK,
+ DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_BAT_BUF_SZ_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK,
+ DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+ value &= ~DPMAIF_BAT_RSV_LEN_MSK;
+ value |= DPMAIF_HW_BAT_RSVLEN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
+}
+
+static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value &= ~DPMAIF_PKT_ALIGN_MSK;
+ value |= DPMAIF_PKT_ALIGN_EN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value |= DPMAIF_DL_PKT_CHECKSUM_EN;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+ value &= ~DPMAIF_FRG_CHECK_THRES_MSK;
+ value |= DPMAIF_HW_CHK_FRG_NUM;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+ value &= ~DPMAIF_FRG_BUF_SZ_MSK;
+ value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK,
+ DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+
+ if (enable)
+ value |= DPMAIF_FRG_EN_MSK;
+ else
+ value &= ~DPMAIF_FRG_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+ value &= ~DPMAIF_BAT_CHECK_THRES_MSK;
+ value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM);
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
+}
+
+static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
+ value &= ~DPMAIF_DL_PIT_SEQ_MSK;
+ value |= DPMAIF_DL_PIT_SEQ_VALUE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
+}
+
+static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info,
+ dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0);
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4);
+}
+
+static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
+ value &= ~DPMAIF_PIT_SIZE_MSK;
+ value |= size & DPMAIF_PIT_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5);
+ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6);
+}
+
+static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+ value |= DPMAIF_DLQPIT_EN_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
+}
+
+static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info,
+ unsigned int pit_idx)
+{
+ unsigned int dl_pit_init;
+ int timeout;
+ u32 value;
+
+ dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET;
+ dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS);
+ dl_pit_init |= DPMAIF_DL_PIT_INIT_EN;
+
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
+ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
+ DPMAIF_CHECK_DELAY_US,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout) {
+ dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n");
+ return;
+ }
+
+ iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT);
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
+ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
+ DPMAIF_CHECK_DELAY_US,
+ DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n");
+}
+
+static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ struct dpmaif_dl *dl_que)
+{
+ t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base);
+ t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt);
+ t7xx_dpmaif_dl_dlq_pit_en(hw_info);
+ t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num);
+}
+
+static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++)
+ t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]);
+}
+
+static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ u32 dl_bat_init, value;
+ int timeout;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+
+ if (enable)
+ value |= DPMAIF_BAT_EN_MSK;
+ else
+ value &= ~DPMAIF_BAT_EN_MSK;
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
+ dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT;
+ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
+
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n");
+
+ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
+ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
+ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (timeout)
+ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
+}
+
+static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_dl *dl_que;
+ int ret;
+
+ t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info);
+
+ dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */
+ if (!dl_que->que_started)
+ return -EBUSY;
+
+ t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info);
+ t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info);
+ t7xx_dpmaif_dl_set_pkt_alignment(hw_info);
+ t7xx_dpmaif_dl_set_pit_seqnum(hw_info);
+ t7xx_dpmaif_dl_set_ao_mtu(hw_info);
+ t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info);
+ t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info);
+ t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info);
+ t7xx_dpmaif_dl_frg_ao_en(hw_info, true);
+
+ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base);
+ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt);
+ t7xx_dpmaif_dl_bat_en(hw_info, true);
+
+ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true);
+ if (ret)
+ return ret;
+
+ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base);
+ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt);
+ t7xx_dpmaif_dl_bat_en(hw_info, false);
+
+ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false);
+ if (ret)
+ return ret;
+
+ /* Init PIT (two PIT table) */
+ t7xx_dpmaif_config_all_dlq_hw(hw_info);
+ t7xx_dpmaif_dl_all_q_en(hw_info, true);
+ t7xx_dpmaif_dl_set_pkt_checksum(hw_info);
+ return 0;
+}
+
+static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, unsigned int size)
+{
+ unsigned int value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
+ value &= ~DPMAIF_DRB_SIZE_MSK;
+ value |= size & DPMAIF_DRB_SIZE_MSK;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
+}
+
+static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, dma_addr_t addr)
+{
+ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num));
+ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num));
+}
+
+static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, bool ready)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (ready)
+ value |= BIT(q_num);
+ else
+ value &= ~BIT(q_num);
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info,
+ unsigned int q_num, bool enable)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (enable)
+ value |= BIT(q_num + 8);
+ else
+ value &= ~BIT(q_num + 8);
+
+ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info)
+{
+ struct dpmaif_ul *ul_que;
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ ul_que = &hw_info->ul_que[i];
+ if (ul_que->que_started) {
+ t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt *
+ DPMAIF_UL_DRB_SIZE_WORD);
+ t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base);
+ t7xx_dpmaif_ul_rdy_en(hw_info, i, true);
+ t7xx_dpmaif_ul_arb_en(hw_info, i, true);
+ } else {
+ t7xx_dpmaif_ul_arb_en(hw_info, i, false);
+ }
+ }
+}
+
+static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info)
+{
+ u32 ap_cfg;
+ int ret;
+
+ ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
+ ap_cfg |= DPMAIF_SRAM_SYNC;
+ iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG,
+ ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET);
+ iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET);
+ return 0;
+}
+
+static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info)
+{
+ u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY);
+
+ return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS);
+}
+
+static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
+{
+ u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+
+ if (enable)
+ ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN;
+ else
+ ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN;
+
+ iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
+}
+
+static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info)
+{
+ u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY);
+
+ return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS);
+}
+
+void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ unsigned int drb_entry_cnt)
+{
+ u32 ul_update, value;
+ int err;
+
+ ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK;
+ ul_update |= DPMAIF_UL_ADD_UPDATE;
+
+ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
+ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (err) {
+ dev_err(hw_info->dev, "UL add is not ready\n");
+ return;
+ }
+
+ iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num));
+
+ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
+ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (err)
+ dev_err(hw_info->dev, "Timeout updating UL add\n");
+}
+
+unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num));
+
+ return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD;
+}
+
+int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
+ unsigned int pit_remain_cnt)
+{
+ u32 dl_update, value;
+ int ret;
+
+ dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK;
+ dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n");
+ return ret;
+ }
+
+ iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD);
+
+ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+ if (ret) {
+ dev_err(hw_info->dev, "Data plane modem add dlq failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
+ unsigned int dlq_pit_idx)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX +
+ dlq_pit_idx * DLQ_PIT_IDX_SIZE);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
+{
+ u32 value;
+
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
+ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
+ DPMAIF_CHECK_TIMEOUT_US);
+}
+
+int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)
+{
+ unsigned int value;
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "DL add BAT not ready\n");
+ return -EBUSY;
+ }
+
+ value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
+ value |= DPMAIF_DL_ADD_UPDATE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "DL add BAT timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt)
+{
+ unsigned int value;
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n");
+ return -EBUSY;
+ }
+
+ value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
+ value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE;
+ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
+
+ if (t7xx_dl_add_timedout(hw_info)) {
+ dev_err(hw_info->dev, "Data plane modem add frag DLQ failed");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
+{
+ u32 value;
+
+ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX);
+ return value & DPMAIF_DL_RD_WR_IDX_MSK;
+}
+
+static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_params *init_para)
+{
+ struct dpmaif_dl *dl_que;
+ struct dpmaif_ul *ul_que;
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ dl_que = &hw_info->dl_que[i];
+ dl_que->bat_base = init_para->pkt_bat_base_addr[i];
+ dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i];
+ dl_que->pit_base = init_para->pit_base_addr[i];
+ dl_que->pit_size_cnt = init_para->pit_size_cnt[i];
+ dl_que->frg_base = init_para->frg_bat_base_addr[i];
+ dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i];
+ dl_que->que_started = true;
+ }
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ ul_que = &hw_info->ul_que[i];
+ ul_que->drb_base = init_para->drb_base_addr[i];
+ ul_que->drb_size_cnt = init_para->drb_size_cnt[i];
+ ul_que->que_started = true;
+ }
+}
+
+/**
+ * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues.
+ * @hw_info: Pointer to struct hw_info.
+ *
+ * Disable HW UL queues. Checks busy UL queues to go to idle
+ * with an attempt count of 1000000.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ETIMEDOUT - Timed out checking busy queues
+ */
+int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info)
+{
+ int count = 0;
+
+ t7xx_dpmaif_ul_all_q_en(hw_info, false);
+ while (t7xx_dpmaif_ul_idle_check(hw_info)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n",
+ ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues.
+ * @hw_info: Pointer to struct hw_info.
+ *
+ * Disable HW DL queue. Checks busy UL queues to go to idle
+ * with an attempt count of 1000000.
+ * Check that HW PIT write index equals read index with the same
+ * attempt count.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ETIMEDOUT - Timed out checking busy queues.
+ */
+int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info)
+{
+ unsigned int wr_idx, rd_idx;
+ int count = 0;
+
+ t7xx_dpmaif_dl_all_q_en(hw_info, false);
+ while (t7xx_dpmaif_dl_idle_check(hw_info)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n",
+ ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY));
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Check middle PIT sync done */
+ count = 0;
+ do {
+ wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX);
+ wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
+ rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX);
+ rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
+
+ if (wr_idx == rd_idx)
+ return 0;
+ } while (++count < DPMAIF_MAX_CHECK_COUNT);
+
+ dev_err(hw_info->dev, "Check middle PIT sync fail\n");
+ return -ETIMEDOUT;
+}
+
+void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info)
+{
+ t7xx_dpmaif_ul_all_q_en(hw_info, true);
+ t7xx_dpmaif_dl_all_q_en(hw_info, true);
+}
+
+/**
+ * t7xx_dpmaif_hw_init() - Initialize HW data path API.
+ * @hw_info: Pointer to struct hw_info.
+ * @init_param: Pointer to struct dpmaif_hw_params.
+ *
+ * Configures port mode, clock config, HW interrupt initialization, and HW queue.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param)
+{
+ int ret;
+
+ ret = t7xx_dpmaif_hw_config(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW config failed\n");
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_init_intr(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n");
+ return ret;
+ }
+
+ t7xx_dpmaif_set_queue_property(hw_info, init_param);
+ t7xx_dpmaif_pcie_dpmaif_sign(hw_info);
+ t7xx_dpmaif_dl_performance(hw_info);
+
+ ret = t7xx_dpmaif_config_dlq_hw(hw_info);
+ if (ret) {
+ dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n");
+ return ret;
+ }
+
+ t7xx_dpmaif_config_ulq_hw(hw_info);
+
+ ret = t7xx_dpmaif_hw_init_done(hw_info);
+ if (ret)
+ dev_err(hw_info->dev, "DPMAIF HW queue init failed\n");
+
+ return ret;
+}
+
+bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
+{
+ u32 intr_status;
+
+ intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno);
+ if (intr_status) {
+ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_dpmaif.h
new file mode 100644
index 000000000000..ae292355a33d
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_DPMAIF_H__
+#define __T7XX_DPMAIF_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define DPMAIF_DL_PIT_SEQ_VALUE 251
+#define DPMAIF_UL_DRB_SIZE_WORD 4
+
+#define DPMAIF_MAX_CHECK_COUNT 1000000
+#define DPMAIF_CHECK_TIMEOUT_US 10000
+#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000
+#define DPMAIF_CHECK_DELAY_US 10
+
+#define DPMAIF_RXQ_NUM 2
+#define DPMAIF_TXQ_NUM 5
+
+struct dpmaif_isr_en_mask {
+ unsigned int ap_ul_l2intr_en_msk;
+ unsigned int ap_dl_l2intr_en_msk;
+ unsigned int ap_udl_ip_busy_en_msk;
+ unsigned int ap_dl_l2intr_err_en_msk;
+};
+
+struct dpmaif_ul {
+ bool que_started;
+ unsigned char reserved[3];
+ dma_addr_t drb_base;
+ unsigned int drb_size_cnt;
+};
+
+struct dpmaif_dl {
+ bool que_started;
+ unsigned char reserved[3];
+ dma_addr_t pit_base;
+ unsigned int pit_size_cnt;
+ dma_addr_t bat_base;
+ unsigned int bat_size_cnt;
+ dma_addr_t frg_base;
+ unsigned int frg_size_cnt;
+ unsigned int pit_seq;
+};
+
+struct dpmaif_hw_info {
+ struct device *dev;
+ void __iomem *pcie_base;
+ struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM];
+ struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM];
+ struct dpmaif_isr_en_mask isr_en_mask;
+};
+
+/* DPMAIF HW Initialization parameter structure */
+struct dpmaif_hw_params {
+ /* UL part */
+ dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM];
+ unsigned int drb_size_cnt[DPMAIF_TXQ_NUM];
+ /* DL part */
+ dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM];
+ dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM];
+ dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM];
+ unsigned int pit_size_cnt[DPMAIF_RXQ_NUM];
+};
+
+enum dpmaif_hw_intr_type {
+ DPF_INTR_INVALID_MIN,
+ DPF_INTR_UL_DONE,
+ DPF_INTR_UL_DRB_EMPTY,
+ DPF_INTR_UL_MD_NOTREADY,
+ DPF_INTR_UL_MD_PWR_NOTREADY,
+ DPF_INTR_UL_LEN_ERR,
+ DPF_INTR_DL_DONE,
+ DPF_INTR_DL_SKB_LEN_ERR,
+ DPF_INTR_DL_BATCNT_LEN_ERR,
+ DPF_INTR_DL_PITCNT_LEN_ERR,
+ DPF_INTR_DL_PKT_EMPTY_SET,
+ DPF_INTR_DL_FRG_EMPTY_SET,
+ DPF_INTR_DL_MTU_ERR,
+ DPF_INTR_DL_FRGCNT_LEN_ERR,
+ DPF_INTR_DL_Q0_PITCNT_LEN_ERR,
+ DPF_INTR_DL_Q1_PITCNT_LEN_ERR,
+ DPF_INTR_DL_HPC_ENT_TYPE_ERR,
+ DPF_INTR_DL_Q0_DONE,
+ DPF_INTR_DL_Q1_DONE,
+ DPF_INTR_INVALID_MAX
+};
+
+#define DPF_RX_QNO0 0
+#define DPF_RX_QNO1 1
+#define DPF_RX_QNO_DFT DPF_RX_QNO0
+
+struct dpmaif_hw_intr_st_para {
+ unsigned int intr_cnt;
+ enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1];
+ unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1];
+};
+
+#define DPMAIF_HW_BAT_REMAIN 64
+#define DPMAIF_HW_BAT_PKTBUF (128 * 28)
+#define DPMAIF_HW_FRG_PKTBUF 128
+#define DPMAIF_HW_BAT_RSVLEN 64
+#define DPMAIF_HW_PKT_BIDCNT 1
+#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8)
+#define DPMAIF_HW_CHK_BAT_NUM 62
+#define DPMAIF_HW_CHK_FRG_NUM 3
+#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM)
+
+#define DP_UL_INT_DONE_OFFSET 0
+#define DP_UL_INT_QDONE_MSK GENMASK(4, 0)
+#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5)
+#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10)
+#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15)
+#define DP_UL_INT_ERR_MSK GENMASK(24, 20)
+
+#define DP_DL_INT_QDONE_MSK BIT(0)
+#define DP_DL_INT_SKB_LEN_ERR BIT(1)
+#define DP_DL_INT_BATCNT_LEN_ERR BIT(2)
+#define DP_DL_INT_PITCNT_LEN_ERR BIT(3)
+#define DP_DL_INT_PKT_EMPTY_MSK BIT(4)
+#define DP_DL_INT_FRG_EMPTY_MSK BIT(5)
+#define DP_DL_INT_MTU_ERR_MSK BIT(6)
+#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7)
+#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8)
+#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9)
+#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10)
+#define DP_DL_INT_Q0_DONE BIT(13)
+#define DP_DL_INT_Q1_DONE BIT(14)
+
+#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE)
+#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE)
+
+int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param);
+int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info);
+int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info);
+int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
+ struct dpmaif_hw_intr_st_para *para, int qno);
+void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
+ unsigned int drb_entry_cnt);
+int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt);
+int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt);
+int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
+ unsigned int pit_remain_cnt);
+void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
+ unsigned int qno);
+void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
+bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno);
+void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info);
+void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info);
+unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num);
+unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
+ unsigned int dlq_pit_idx);
+
+#endif /* __T7XX_DPMAIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
new file mode 100644
index 000000000000..6ff30cb8eb16
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define MAX_TX_BUDGET 16
+#define MAX_RX_BUDGET 16
+
+#define CHECK_Q_STOP_TIMEOUT_US 1000000
+#define CHECK_Q_STOP_STEP_US 10000
+
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+
+static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
+ enum mtk_txrx tx_rx, unsigned int index)
+{
+ queue->dir = tx_rx;
+ queue->index = index;
+ queue->md_ctrl = md_ctrl;
+ queue->tr_ring = NULL;
+ queue->tr_done = NULL;
+ queue->tx_next = NULL;
+}
+
+static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
+ enum mtk_txrx tx_rx, unsigned int index)
+{
+ md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
+ init_waitqueue_head(&queue->req_wq);
+ spin_lock_init(&queue->ring_lock);
+}
+
+static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
+{
+ gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
+ gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
+}
+
+static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
+{
+ gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
+ gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
+}
+
+static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
+ size_t size, gfp_t gfp_mask)
+{
+ req->skb = __dev_alloc_skb(size, gfp_mask);
+ if (!req->skb)
+ return -ENOMEM;
+
+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
+ dev_kfree_skb_any(req->skb);
+ req->skb = NULL;
+ req->mapped_buff = 0;
+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ unsigned int hwo_polling_count = 0;
+ struct t7xx_cldma_hw *hw_info;
+ bool rx_not_done = true;
+ unsigned long flags;
+ int count = 0;
+
+ hw_info = &md_ctrl->hw_info;
+
+ do {
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ struct sk_buff *skb;
+ int ret;
+
+ req = queue->tr_done;
+ if (!req)
+ return -ENODATA;
+
+ gpd = req->gpd;
+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
+ dma_addr_t gpd_addr;
+
+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
+ dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
+ return -ENODEV;
+ }
+
+ gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
+ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
+ return 0;
+
+ udelay(1);
+ continue;
+ }
+
+ hwo_polling_count = 0;
+ skb = req->skb;
+
+ if (req->mapped_buff) {
+ dma_unmap_single(md_ctrl->dev, req->mapped_buff,
+ queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
+ req->mapped_buff = 0;
+ }
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, le16_to_cpu(gpd->data_buff_len));
+
+ ret = md_ctrl->recv_skb(queue, skb);
+ /* Break processing, will try again later */
+ if (ret < 0)
+ return ret;
+
+ req->skb = NULL;
+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ req = queue->rx_refill;
+
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ gpd = req->gpd;
+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
+ gpd->data_buff_len = 0;
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ rx_not_done = ++count < budget || !need_resched();
+ } while (rx_not_done);
+
+ *over_budget = true;
+ return 0;
+}
+
+static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned int pending_rx_int;
+ bool over_budget = false;
+ unsigned long flags;
+ int ret;
+
+ hw_info = &md_ctrl->hw_info;
+
+ do {
+ ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
+ if (ret == -ENODATA)
+ return 0;
+ else if (ret)
+ return ret;
+
+ pending_rx_int = 0;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->rxq_active & BIT(queue->index)) {
+ if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
+
+ pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
+ MTK_RX);
+ if (pending_rx_int) {
+ t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
+
+ if (over_budget) {
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ return -EAGAIN;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ } while (pending_rx_int);
+
+ return 0;
+}
+
+static void t7xx_cldma_rx_done(struct work_struct *work)
+{
+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ int value;
+
+ value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
+ if (value && md_ctrl->rxq_active & BIT(queue->index)) {
+ queue_work(queue->worker, &queue->cldma_work);
+ return;
+ }
+
+ t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+}
+
+static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ unsigned int dma_len, count = 0;
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+ dma_addr_t dma_free;
+ struct sk_buff *skb;
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ req = queue->tr_done;
+ if (!req) {
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ break;
+ }
+ gpd = req->gpd;
+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+ break;
+ }
+ queue->budget++;
+ dma_free = req->mapped_buff;
+ dma_len = le16_to_cpu(gpd->data_buff_len);
+ skb = req->skb;
+ req->skb = NULL;
+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ count++;
+ dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ if (count)
+ wake_up_nr(&queue->req_wq, count);
+
+ return count;
+}
+
+static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct cldma_request *req;
+ dma_addr_t ul_curr_addr;
+ unsigned long flags;
+ bool pending_gpd;
+
+ if (!(md_ctrl->txq_active & BIT(queue->index)))
+ return;
+
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (pending_gpd) {
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+
+ /* Check current processing TGPD, 64-bit address is in a table by Q index */
+ ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
+ if (req->gpd_addr != ul_curr_addr) {
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
+ md_ctrl->hif_id, queue->index);
+ return;
+ }
+
+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static void t7xx_cldma_tx_done(struct work_struct *work)
+{
+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned int l2_tx_int;
+ unsigned long flags;
+
+ hw_info = &md_ctrl->hw_info;
+ t7xx_cldma_gpd_tx_collect(queue);
+ l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
+ MTK_TX);
+ if (l2_tx_int & EQ_STA_BIT(queue->index)) {
+ t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
+ t7xx_cldma_txq_empty_hndl(queue);
+ }
+
+ if (l2_tx_int & BIT(queue->index)) {
+ t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
+ queue_work(queue->worker, &queue->cldma_work);
+ return;
+ }
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->txq_active & BIT(queue->index)) {
+ t7xx_cldma_clear_ip_busy(hw_info);
+ t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
+ t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+}
+
+static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
+ struct cldma_ring *ring, enum dma_data_direction tx_rx)
+{
+ struct cldma_request *req_cur, *req_next;
+
+ list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
+ if (req_cur->mapped_buff && req_cur->skb) {
+ dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
+ ring->pkt_size, tx_rx);
+ req_cur->mapped_buff = 0;
+ }
+
+ dev_kfree_skb_any(req_cur->skb);
+
+ if (req_cur->gpd)
+ dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
+
+ list_del(&req_cur->entry);
+ kfree(req_cur);
+ }
+}
+
+static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
+{
+ struct cldma_request *req;
+ int val;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
+ if (!req->gpd)
+ goto err_free_req;
+
+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
+ if (val)
+ goto err_free_pool;
+
+ return req;
+
+err_free_pool:
+ dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
+
+err_free_req:
+ kfree(req);
+
+ return NULL;
+}
+
+static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
+{
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ int i;
+
+ INIT_LIST_HEAD(&ring->gpd_ring);
+ ring->length = MAX_RX_BUDGET;
+
+ for (i = 0; i < ring->length; i++) {
+ req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
+ if (!req) {
+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+
+ gpd = req->gpd;
+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
+ gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+ INIT_LIST_HEAD(&req->entry);
+ list_add_tail(&req->entry, &ring->gpd_ring);
+ }
+
+ /* Link previous GPD to next GPD, circular */
+ list_for_each_entry(req, &ring->gpd_ring, entry) {
+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
+ gpd = req->gpd;
+ }
+
+ return 0;
+}
+
+static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
+{
+ struct cldma_request *req;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
+ if (!req->gpd) {
+ kfree(req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
+{
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ int i;
+
+ INIT_LIST_HEAD(&ring->gpd_ring);
+ ring->length = MAX_TX_BUDGET;
+
+ for (i = 0; i < ring->length; i++) {
+ req = t7xx_alloc_tx_request(md_ctrl);
+ if (!req) {
+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ gpd = req->gpd;
+ gpd->flags = GPD_FLAGS_IOC;
+ INIT_LIST_HEAD(&req->entry);
+ list_add_tail(&req->entry, &ring->gpd_ring);
+ }
+
+ /* Link previous GPD to next GPD, circular */
+ list_for_each_entry(req, &ring->gpd_ring, entry) {
+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
+ gpd = req->gpd;
+ }
+
+ return 0;
+}
+
+/**
+ * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
+ * @queue: Pointer to the queue structure.
+ *
+ * Called with ring_lock (unless called during initialization phase)
+ */
+static void t7xx_cldma_q_reset(struct cldma_queue *queue)
+{
+ struct cldma_request *req;
+
+ req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
+ queue->tr_done = req;
+ queue->budget = queue->tr_ring->length;
+
+ if (queue->dir == MTK_TX)
+ queue->tx_next = req;
+ else
+ queue->rx_refill = req;
+}
+
+static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+
+ queue->dir = MTK_RX;
+ queue->tr_ring = &md_ctrl->rx_ring[queue->index];
+ t7xx_cldma_q_reset(queue);
+}
+
+static void t7xx_cldma_txq_init(struct cldma_queue *queue)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+
+ queue->dir = MTK_TX;
+ queue->tr_ring = &md_ctrl->tx_ring[queue->index];
+ t7xx_cldma_q_reset(queue);
+}
+
+static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
+}
+
+static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
+}
+
+static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int i;
+
+ /* L2 raw interrupt status */
+ l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
+ l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
+ l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
+ l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
+ l2_tx_int &= ~l2_tx_int_msk;
+ l2_rx_int &= ~l2_rx_int_msk;
+
+ if (l2_tx_int) {
+ if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
+ /* Read and clear L3 TX interrupt status */
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
+ }
+
+ t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
+ if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
+ for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
+ if (i < CLDMA_TXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
+ queue_work(md_ctrl->txq[i].worker,
+ &md_ctrl->txq[i].cldma_work);
+ } else {
+ t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
+ }
+ }
+ }
+ }
+
+ if (l2_rx_int) {
+ if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
+ /* Read and clear L3 RX interrupt status */
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
+ }
+
+ t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
+ if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
+ l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
+ for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
+ queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
+ }
+ }
+ }
+}
+
+static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned int tx_active;
+ unsigned int rx_active;
+
+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
+ return false;
+
+ tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
+ rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
+
+ return tx_active || rx_active;
+}
+
+/**
+ * t7xx_cldma_stop() - Stop CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Stop TX and RX queues. Disable L1 and L2 interrupts.
+ * Clear status registers.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from polling cldma_queues_active.
+ */
+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ bool active;
+ int i, ret;
+
+ md_ctrl->rxq_active = 0;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
+ md_ctrl->txq_active = 0;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
+ md_ctrl->txq_started = 0;
+ t7xx_cldma_disable_irq(md_ctrl);
+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
+ t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
+ t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
+
+ if (md_ctrl->is_late_init) {
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ flush_work(&md_ctrl->txq[i].cldma_work);
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ flush_work(&md_ctrl->rxq[i].cldma_work);
+ }
+
+ ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
+ CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
+ if (ret)
+ dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
+
+ return ret;
+}
+
+static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
+{
+ int i;
+
+ if (!md_ctrl->is_late_init)
+ return;
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
+
+ dma_pool_destroy(md_ctrl->gpd_dmapool);
+ md_ctrl->gpd_dmapool = NULL;
+ md_ctrl->is_late_init = false;
+}
+
+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_ctrl->txq_active = 0;
+ md_ctrl->rxq_active = 0;
+ t7xx_cldma_disable_irq(md_ctrl);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ cancel_work_sync(&md_ctrl->txq[i].cldma_work);
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ t7xx_cldma_late_release(md_ctrl);
+}
+
+/**
+ * t7xx_cldma_start() - Start CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Set TX/RX start address.
+ * Start all RX queues and enable L2 interrupt.
+ */
+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->is_late_init) {
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int i;
+
+ t7xx_cldma_enable_irq(md_ctrl);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ if (md_ctrl->txq[i].tr_done)
+ t7xx_cldma_hw_set_start_addr(hw_info, i,
+ md_ctrl->txq[i].tr_done->gpd_addr,
+ MTK_TX);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ if (md_ctrl->rxq[i].tr_done)
+ t7xx_cldma_hw_set_start_addr(hw_info, i,
+ md_ctrl->rxq[i].tr_done->gpd_addr,
+ MTK_RX);
+ }
+
+ /* Enable L2 interrupt */
+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_start(hw_info);
+ md_ctrl->txq_started = 0;
+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
+{
+ struct cldma_queue *txq = &md_ctrl->txq[qnum];
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&txq->ring_lock, flags);
+ t7xx_cldma_q_reset(txq);
+ list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
+ gpd = req->gpd;
+ gpd->flags &= ~GPD_FLAGS_HWO;
+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
+ gpd->data_buff_len = 0;
+ dev_kfree_skb_any(req->skb);
+ req->skb = NULL;
+ }
+ spin_unlock_irqrestore(&txq->ring_lock, flags);
+}
+
+static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
+{
+ struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
+ struct cldma_request *req;
+ struct cldma_gpd *gpd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rxq->ring_lock, flags);
+ t7xx_cldma_q_reset(rxq);
+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
+ gpd = req->gpd;
+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
+ gpd->data_buff_len = 0;
+
+ if (req->skb) {
+ req->skb->len = 0;
+ skb_reset_tail_pointer(req->skb);
+ }
+ }
+
+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
+ if (req->skb)
+ continue;
+
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
+ if (ret)
+ break;
+
+ t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
+ }
+ spin_unlock_irqrestore(&rxq->ring_lock, flags);
+
+ return ret;
+}
+
+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
+{
+ int i;
+
+ if (tx_rx == MTK_TX) {
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_clear_txq(md_ctrl, i);
+ } else {
+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
+ t7xx_cldma_clear_rxq(md_ctrl, i);
+ }
+}
+
+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
+ if (tx_rx == MTK_RX)
+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
+ else
+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
+ struct sk_buff *skb)
+{
+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
+ struct cldma_gpd *gpd = tx_req->gpd;
+ unsigned long flags;
+
+ /* Update GPD */
+ tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
+ return -ENOMEM;
+ }
+
+ t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
+ gpd->data_buff_len = cpu_to_le16(skb->len);
+
+ /* This lock must cover TGPD setting, as even without a resume operation,
+ * CLDMA can send next HWO=1 if last TGPD just finished.
+ */
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (md_ctrl->txq_active & BIT(queue->index))
+ gpd->flags |= GPD_FLAGS_HWO;
+
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ tx_req->skb = skb;
+ return 0;
+}
+
+/* Called with cldma_lock */
+static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
+ struct cldma_request *prev_req)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+
+ /* Check whether the device was powered off (CLDMA start address is not set) */
+ if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
+ t7xx_cldma_hw_init(hw_info);
+ t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
+ md_ctrl->txq_started &= ~BIT(qno);
+ }
+
+ if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
+ if (md_ctrl->txq_started & BIT(qno))
+ t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
+ else
+ t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
+
+ md_ctrl->txq_started |= BIT(qno);
+ }
+}
+
+/**
+ * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
+ * @md_ctrl: CLDMA context structure.
+ * @recv_skb: Receiving skb callback.
+ */
+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
+{
+ md_ctrl->recv_skb = recv_skb;
+}
+
+/**
+ * t7xx_cldma_send_skb() - Send control data to modem.
+ * @md_ctrl: CLDMA context structure.
+ * @qno: Queue number.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENOMEM - Allocation failure.
+ * * -EINVAL - Invalid queue request.
+ * * -EIO - Queue is not active.
+ * * -ETIMEDOUT - Timeout waiting for the device to wake up.
+ */
+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
+{
+ struct cldma_request *tx_req;
+ struct cldma_queue *queue;
+ unsigned long flags;
+ int ret;
+
+ if (qno >= CLDMA_TXQ_NUM)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(md_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
+ queue = &md_ctrl->txq[qno];
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ if (!(md_ctrl->txq_active & BIT(qno))) {
+ ret = -EIO;
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ goto allow_sleep;
+ }
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ do {
+ spin_lock_irqsave(&queue->ring_lock, flags);
+ tx_req = queue->tx_next;
+ if (queue->budget > 0 && !tx_req->skb) {
+ struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
+
+ queue->budget--;
+ t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
+ queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ /* Protect the access to the modem for queues operations (resume/start)
+ * which access shared locations by all the queues.
+ * cldma_lock is independent of ring_lock which is per queue.
+ */
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ break;
+ }
+ spin_unlock_irqrestore(&queue->ring_lock, flags);
+
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ }
+
+ ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
+ } while (!ret);
+
+allow_sleep:
+ t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
+ return ret;
+}
+
+static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
+{
+ char dma_pool_name[32];
+ int i, j, ret;
+
+ if (md_ctrl->is_late_init) {
+ dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
+ return -EALREADY;
+ }
+
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
+
+ md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
+ sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
+ if (!md_ctrl->gpd_dmapool) {
+ dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
+ if (ret) {
+ dev_err(md_ctrl->dev, "control TX ring init fail\n");
+ goto err_free_tx_ring;
+ }
+ }
+
+ for (j = 0; j < CLDMA_RXQ_NUM; j++) {
+ md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
+
+ if (j == CLDMA_RXQ_NUM - 1)
+ md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
+ if (ret) {
+ dev_err(md_ctrl->dev, "Control RX ring init fail\n");
+ goto err_free_rx_ring;
+ }
+ }
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
+ t7xx_cldma_txq_init(&md_ctrl->txq[i]);
+
+ for (j = 0; j < CLDMA_RXQ_NUM; j++)
+ t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
+
+ md_ctrl->is_late_init = true;
+ return 0;
+
+err_free_rx_ring:
+ while (j--)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
+
+err_free_tx_ring:
+ while (i--)
+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
+{
+ return addr + phy_addr - addr_trs1;
+}
+
+static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ u32 phy_ao_base, phy_pd_base;
+
+ if (md_ctrl->hif_id != CLDMA_ID_MD)
+ return;
+
+ phy_ao_base = CLDMA1_AO_BASE;
+ phy_pd_base = CLDMA1_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA1_INT;
+ hw_info->hw_mode = MODE_BIT_64;
+ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+ pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
+ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+ pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
+}
+
+static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
+ if (!md_ctrl)
+ return -ENOMEM;
+
+ md_ctrl->t7xx_dev = t7xx_dev;
+ md_ctrl->dev = dev;
+ md_ctrl->hif_id = hif_id;
+ md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ t7xx_hw_info_init(md_ctrl);
+ t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
+ return 0;
+}
+
+static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+ int qno_t;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_restore(hw_info);
+ for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
+ MTK_TX);
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
+ MTK_RX);
+ }
+ t7xx_cldma_enable_irq(md_ctrl);
+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
+
+ return 0;
+}
+
+static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
+ t7xx_cldma_clear_ip_busy(hw_info);
+ t7xx_cldma_disable_irq(md_ctrl);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
+ md_ctrl->txq_started = 0;
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ return 0;
+}
+
+static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
+{
+ md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
+ if (!md_ctrl->pm_entity)
+ return -ENOMEM;
+
+ md_ctrl->pm_entity->entity_param = md_ctrl;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
+ else
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
+
+ md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
+ md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
+ md_ctrl->pm_entity->resume = t7xx_cldma_resume;
+ md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
+
+ return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+}
+
+static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
+{
+ if (!md_ctrl->pm_entity)
+ return -EINVAL;
+
+ t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+ kfree(md_ctrl->pm_entity);
+ md_ctrl->pm_entity = NULL;
+ return 0;
+}
+
+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
+ t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
+ t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
+ t7xx_cldma_hw_init(hw_info);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
+{
+ struct cldma_ctrl *md_ctrl = data;
+ u32 interrupt;
+
+ interrupt = md_ctrl->hw_info.phy_interrupt_id;
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
+ t7xx_cldma_irq_work_cb(md_ctrl);
+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
+ return IRQ_HANDLED;
+}
+
+static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
+{
+ int i;
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ if (md_ctrl->txq[i].worker) {
+ destroy_workqueue(md_ctrl->txq[i].worker);
+ md_ctrl->txq[i].worker = NULL;
+ }
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ if (md_ctrl->rxq[i].worker) {
+ destroy_workqueue(md_ctrl->rxq[i].worker);
+ md_ctrl->rxq[i].worker = NULL;
+ }
+ }
+}
+
+/**
+ * t7xx_cldma_init() - Initialize CLDMA.
+ * @md_ctrl: CLDMA context structure.
+ *
+ * Allocate and initialize device power management entity.
+ * Initialize HIF TX/RX queue structure.
+ * Register CLDMA callback ISR with PCIe driver.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
+{
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ int ret, i;
+
+ md_ctrl->txq_active = 0;
+ md_ctrl->rxq_active = 0;
+ md_ctrl->is_late_init = false;
+
+ ret = t7xx_cldma_pm_init(md_ctrl);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&md_ctrl->cldma_lock);
+
+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
+ md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
+ md_ctrl->txq[i].worker =
+ alloc_workqueue("md_hif%d_tx%d_worker",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
+ 1, md_ctrl->hif_id, i);
+ if (!md_ctrl->txq[i].worker)
+ goto err_workqueue;
+
+ INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
+ }
+
+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
+ md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
+ INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
+
+ md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1, md_ctrl->hif_id, i);
+ if (!md_ctrl->rxq[i].worker)
+ goto err_workqueue;
+ }
+
+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
+ md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
+ md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
+ md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
+ return 0;
+
+err_workqueue:
+ t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
+ return -ENOMEM;
+}
+
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_late_init(md_ctrl);
+}
+
+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
+{
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
new file mode 100644
index 000000000000..47a35e552da7
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#ifndef __T7XX_HIF_CLDMA_H__
+#define __T7XX_HIF_CLDMA_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/types.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_pci.h"
+
+/**
+ * enum cldma_id - Identifiers for CLDMA HW units.
+ * @CLDMA_ID_MD: Modem control channel.
+ * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
+ * @CLDMA_NUM: Number of CLDMA HW units available.
+ */
+enum cldma_id {
+ CLDMA_ID_MD,
+ CLDMA_ID_AP,
+ CLDMA_NUM
+};
+
+struct cldma_gpd {
+ u8 flags;
+ u8 not_used1;
+ __le16 rx_data_allow_len;
+ __le32 next_gpd_ptr_h;
+ __le32 next_gpd_ptr_l;
+ __le32 data_buff_bd_ptr_h;
+ __le32 data_buff_bd_ptr_l;
+ __le16 data_buff_len;
+ __le16 not_used2;
+};
+
+struct cldma_request {
+ struct cldma_gpd *gpd; /* Virtual address for CPU */
+ dma_addr_t gpd_addr; /* Physical address for DMA */
+ struct sk_buff *skb;
+ dma_addr_t mapped_buff;
+ struct list_head entry;
+};
+
+struct cldma_ring {
+ struct list_head gpd_ring; /* Ring of struct cldma_request */
+ unsigned int length; /* Number of struct cldma_request */
+ int pkt_size;
+};
+
+struct cldma_queue {
+ struct cldma_ctrl *md_ctrl;
+ enum mtk_txrx dir;
+ unsigned int index;
+ struct cldma_ring *tr_ring;
+ struct cldma_request *tr_done;
+ struct cldma_request *rx_refill;
+ struct cldma_request *tx_next;
+ int budget; /* Same as ring buffer size by default */
+ spinlock_t ring_lock;
+ wait_queue_head_t req_wq; /* Only for TX */
+ struct workqueue_struct *worker;
+ struct work_struct cldma_work;
+};
+
+struct cldma_ctrl {
+ enum cldma_id hif_id;
+ struct device *dev;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct cldma_queue txq[CLDMA_TXQ_NUM];
+ struct cldma_queue rxq[CLDMA_RXQ_NUM];
+ unsigned short txq_active;
+ unsigned short rxq_active;
+ unsigned short txq_started;
+ spinlock_t cldma_lock; /* Protects CLDMA structure */
+ /* Assumes T/R GPD/BD/SPD have the same size */
+ struct dma_pool *gpd_dmapool;
+ struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
+ struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
+ struct md_pm_entity *pm_entity;
+ struct t7xx_cldma_hw hw_info;
+ bool is_late_init;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
+};
+
+#define GPD_FLAGS_HWO BIT(0)
+#define GPD_FLAGS_IOC BIT(7)
+#define GPD_DMAPOOL_ALIGN 16
+
+#define CLDMA_MTU 3584 /* 3.5kB */
+
+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
+
+#endif /* __T7XX_HIF_CLDMA_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
new file mode 100644
index 000000000000..7eff3531b9a5
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_state_monitor.h"
+
+unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
+{
+ buf_idx++;
+
+ return buf_idx < buf_len ? buf_idx : 0;
+}
+
+unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
+ unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
+{
+ int pkt_cnt;
+
+ if (rd_wr == DPMAIF_READ)
+ pkt_cnt = wr_idx - rd_idx;
+ else
+ pkt_cnt = rd_idx - wr_idx - 1;
+
+ if (pkt_cnt < 0)
+ pkt_cnt += total_cnt;
+
+ return (unsigned int)pkt_cnt;
+}
+
+static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ }
+}
+
+static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ }
+}
+
+static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+ struct dpmaif_hw_intr_st_para intr_status;
+ struct device *dev = dpmaif_ctrl->dev;
+ struct dpmaif_hw_info *hw_info;
+ int i;
+
+ memset(&intr_status, 0, sizeof(intr_status));
+ hw_info = &dpmaif_ctrl->hw_info;
+
+ if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
+ dev_err(dev, "Failed to get HW interrupt count\n");
+ return;
+ }
+
+ t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ for (i = 0; i < intr_status.intr_cnt; i++) {
+ switch (intr_status.intr_types[i]) {
+ case DPF_INTR_UL_DONE:
+ t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
+ break;
+
+ case DPF_INTR_UL_DRB_EMPTY:
+ case DPF_INTR_UL_MD_NOTREADY:
+ case DPF_INTR_UL_MD_PWR_NOTREADY:
+ /* No need to log an error for these */
+ break;
+
+ case DPF_INTR_DL_BATCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
+ t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
+ break;
+
+ case DPF_INTR_DL_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
+ t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
+ break;
+
+ case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
+ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
+ break;
+
+ case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
+ dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
+ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
+ break;
+
+ case DPF_INTR_DL_DONE:
+ case DPF_INTR_DL_Q0_DONE:
+ case DPF_INTR_DL_Q1_DONE:
+ t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
+ break;
+
+ default:
+ dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
+ intr_status.intr_types[i]);
+ }
+ }
+}
+
+static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl;
+
+ dpmaif_ctrl = isr_para->dpmaif_ctrl;
+ if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
+ dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
+ return IRQ_HANDLED;
+ }
+
+ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ t7xx_dpmaif_irq_cb(isr_para);
+ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+ return IRQ_HANDLED;
+}
+
+static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_isr_para *isr_para;
+ unsigned char i;
+
+ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
+ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ isr_para->dpmaif_ctrl = dpmaif_ctrl;
+ isr_para->dlq_id = i;
+ isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
+ }
+}
+
+static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
+ struct dpmaif_isr_para *isr_para;
+ enum t7xx_int int_type;
+ int i;
+
+ t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ isr_para = &dpmaif_ctrl->isr_para[i];
+ int_type = isr_para->pcie_int;
+ t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
+
+ t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
+ t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->callback_param[int_type] = isr_para;
+
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
+ t7xx_pcie_mac_set_int(t7xx_dev, int_type);
+ }
+}
+
+static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rx_q;
+ struct dpmaif_tx_queue *tx_q;
+ int ret, rx_idx, tx_idx, i;
+
+ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
+ goto err_free_normal_bat;
+ }
+
+ for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
+ rx_q = &dpmaif_ctrl->rxq[rx_idx];
+ rx_q->index = rx_idx;
+ rx_q->dpmaif_ctrl = dpmaif_ctrl;
+ ret = t7xx_dpmaif_rxq_init(rx_q);
+ if (ret)
+ goto err_free_rxq;
+ }
+
+ for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
+ tx_q = &dpmaif_ctrl->txq[tx_idx];
+ tx_q->index = tx_idx;
+ tx_q->dpmaif_ctrl = dpmaif_ctrl;
+ ret = t7xx_dpmaif_txq_init(tx_q);
+ if (ret)
+ goto err_free_txq;
+ }
+
+ ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
+ goto err_free_txq;
+ }
+
+ ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
+ if (ret)
+ goto err_thread_rel;
+
+ return 0;
+
+err_thread_rel:
+ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
+
+err_free_txq:
+ for (i = 0; i < tx_idx; i++) {
+ tx_q = &dpmaif_ctrl->txq[i];
+ t7xx_dpmaif_txq_free(tx_q);
+ }
+
+err_free_rxq:
+ for (i = 0; i < rx_idx; i++) {
+ rx_q = &dpmaif_ctrl->rxq[i];
+ t7xx_dpmaif_rxq_free(rx_q);
+ }
+
+ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
+
+err_free_normal_bat:
+ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rx_q;
+ struct dpmaif_tx_queue *tx_q;
+ int i;
+
+ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
+ t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ tx_q = &dpmaif_ctrl->txq[i];
+ t7xx_dpmaif_txq_free(tx_q);
+ }
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ rx_q = &dpmaif_ctrl->rxq[i];
+ t7xx_dpmaif_rxq_free(rx_q);
+ }
+}
+
+static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
+ struct dpmaif_hw_params hw_init_para;
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_tx_queue *txq;
+ unsigned int buf_cnt;
+ int i, ret = 0;
+
+ if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
+ return -EFAULT;
+
+ memset(&hw_init_para, 0, sizeof(hw_init_para));
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ rxq = &dpmaif_ctrl->rxq[i];
+ rxq->que_started = true;
+ rxq->index = i;
+ rxq->budget = rxq->bat_req->bat_size_cnt - 1;
+
+ hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
+ hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
+ hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
+ hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
+ hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
+ hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
+ }
+
+ bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
+ buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
+ ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
+ return ret;
+ }
+
+ buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
+ ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
+ goto err_free_normal_bat;
+ }
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ txq = &dpmaif_ctrl->txq[i];
+ txq->que_started = true;
+
+ hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
+ hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
+ }
+
+ ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
+ if (ret) {
+ dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
+ goto err_free_frag_bat;
+ }
+
+ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
+ if (ret)
+ goto err_free_frag_bat;
+
+ ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
+ if (ret)
+ goto err_free_frag_bat;
+
+ t7xx_dpmaif_ul_clr_all_intr(hw_info);
+ t7xx_dpmaif_dl_clr_all_intr(hw_info);
+ dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
+ t7xx_dpmaif_enable_irq(dpmaif_ctrl);
+ wake_up(&dpmaif_ctrl->tx_wq);
+ return 0;
+
+err_free_frag_bat:
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
+
+err_free_normal_bat:
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ t7xx_dpmaif_tx_stop(dpmaif_ctrl);
+ t7xx_dpmaif_rx_stop(dpmaif_ctrl);
+}
+
+static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
+}
+
+static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (!dpmaif_ctrl->dpmaif_sw_init_done) {
+ dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
+ return -EFAULT;
+ }
+
+ if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
+ return -EFAULT;
+
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+ dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
+ t7xx_dpmaif_stop_sw(dpmaif_ctrl);
+ t7xx_dpmaif_tx_clear(dpmaif_ctrl);
+ t7xx_dpmaif_rx_clear(dpmaif_ctrl);
+ return 0;
+}
+
+static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ t7xx_dpmaif_tx_stop(dpmaif_ctrl);
+ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_rx_stop(dpmaif_ctrl);
+ return 0;
+}
+
+static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int qno;
+
+ for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
+ t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
+}
+
+static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_tx_queue *txq;
+ unsigned int que_cnt;
+
+ for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
+ txq = &dpmaif_ctrl->txq[que_cnt];
+ txq->que_started = true;
+ }
+
+ for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
+ rxq = &dpmaif_ctrl->rxq[que_cnt];
+ rxq->que_started = true;
+ }
+}
+
+static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ if (!dpmaif_ctrl)
+ return 0;
+
+ t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
+ t7xx_dpmaif_enable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
+ t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
+ wake_up(&dpmaif_ctrl->tx_wq);
+ return 0;
+}
+
+static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
+ dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
+ dpmaif_pm_entity->suspend_late = NULL;
+ dpmaif_pm_entity->resume_early = NULL;
+ dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
+ dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
+ dpmaif_pm_entity->entity_param = dpmaif_ctrl;
+
+ ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
+static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
+int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
+{
+ int ret = 0;
+
+ switch (state) {
+ case MD_STATE_WAITING_FOR_HS1:
+ ret = t7xx_dpmaif_start(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_EXCEPTION:
+ ret = t7xx_dpmaif_stop(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_STOPPED:
+ ret = t7xx_dpmaif_stop(dpmaif_ctrl);
+ break;
+
+ case MD_STATE_WAITING_TO_STOP:
+ t7xx_dpmaif_stop_hw(dpmaif_ctrl);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * t7xx_dpmaif_hif_init() - Initialize data path.
+ * @t7xx_dev: MTK context structure.
+ * @callbacks: Callbacks implemented by the network layer to handle RX skb and
+ * event notifications.
+ *
+ * Allocate and initialize datapath control block.
+ * Register datapath ISR, TX and RX resources.
+ *
+ * Return:
+ * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
+ * * NULL - In case of error.
+ */
+struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
+ struct dpmaif_callbacks *callbacks)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ int ret;
+
+ if (!callbacks)
+ return NULL;
+
+ dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
+ if (!dpmaif_ctrl)
+ return NULL;
+
+ dpmaif_ctrl->t7xx_dev = t7xx_dev;
+ dpmaif_ctrl->callbacks = callbacks;
+ dpmaif_ctrl->dev = dev;
+ dpmaif_ctrl->dpmaif_sw_init_done = false;
+ dpmaif_ctrl->hw_info.dev = dev;
+ dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+
+ ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
+ if (ret)
+ return NULL;
+
+ t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+
+ ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
+ if (ret) {
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
+ dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
+ return NULL;
+ }
+
+ dpmaif_ctrl->dpmaif_sw_init_done = true;
+ return dpmaif_ctrl;
+}
+
+void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (dpmaif_ctrl->dpmaif_sw_init_done) {
+ t7xx_dpmaif_stop(dpmaif_ctrl);
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
+ t7xx_dpmaif_sw_release(dpmaif_ctrl);
+ dpmaif_ctrl->dpmaif_sw_init_done = false;
+ }
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
new file mode 100644
index 000000000000..1225ca0ed51e
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMAIF_H__
+#define __T7XX_HIF_DPMAIF_H__
+
+#include <linux/bitmap.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_pci.h"
+#include "t7xx_state_monitor.h"
+
+/* SKB control buffer */
+struct t7xx_skb_cb {
+ u8 netif_idx;
+ u8 txq_number;
+ u8 rx_pkt_type;
+};
+
+#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb)
+
+enum dpmaif_rdwr {
+ DPMAIF_READ,
+ DPMAIF_WRITE,
+};
+
+/* Structure of DL BAT */
+struct dpmaif_cur_rx_skb_info {
+ bool msg_pit_received;
+ struct sk_buff *cur_skb;
+ unsigned int cur_chn_idx;
+ unsigned int check_sum;
+ unsigned int pit_dp;
+ unsigned int pkt_type;
+ int err_payload;
+};
+
+struct dpmaif_bat {
+ unsigned int p_buffer_addr;
+ unsigned int buffer_addr_ext;
+};
+
+struct dpmaif_bat_skb {
+ struct sk_buff *skb;
+ dma_addr_t data_bus_addr;
+ unsigned int data_len;
+};
+
+struct dpmaif_bat_page {
+ struct page *page;
+ dma_addr_t data_bus_addr;
+ unsigned int offset;
+ unsigned int data_len;
+};
+
+enum bat_type {
+ BAT_TYPE_NORMAL,
+ BAT_TYPE_FRAG,
+};
+
+struct dpmaif_bat_request {
+ void *bat_base;
+ dma_addr_t bat_bus_addr;
+ unsigned int bat_size_cnt;
+ unsigned int bat_wr_idx;
+ unsigned int bat_release_rd_idx;
+ void *bat_skb;
+ unsigned int pkt_buf_sz;
+ unsigned long *bat_bitmap;
+ atomic_t refcnt;
+ spinlock_t mask_lock; /* Protects BAT mask */
+ enum bat_type type;
+};
+
+struct dpmaif_rx_queue {
+ unsigned int index;
+ bool que_started;
+ unsigned int budget;
+
+ void *pit_base;
+ dma_addr_t pit_bus_addr;
+ unsigned int pit_size_cnt;
+
+ unsigned int pit_rd_idx;
+ unsigned int pit_wr_idx;
+ unsigned int pit_release_rd_idx;
+
+ struct dpmaif_bat_request *bat_req;
+ struct dpmaif_bat_request *bat_frag;
+
+ wait_queue_head_t rx_wq;
+ struct task_struct *rx_thread;
+ struct sk_buff_head skb_list;
+ unsigned int skb_list_max_len;
+
+ struct workqueue_struct *worker;
+ struct work_struct dpmaif_rxq_work;
+
+ atomic_t rx_processing;
+
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ unsigned int expect_pit_seq;
+ unsigned int pit_remain_release_cnt;
+ struct dpmaif_cur_rx_skb_info rx_data_info;
+};
+
+struct dpmaif_tx_queue {
+ unsigned int index;
+ bool que_started;
+ atomic_t tx_budget;
+ void *drb_base;
+ dma_addr_t drb_bus_addr;
+ unsigned int drb_size_cnt;
+ unsigned int drb_wr_idx;
+ unsigned int drb_rd_idx;
+ unsigned int drb_release_rd_idx;
+ void *drb_skb_base;
+ wait_queue_head_t req_wq;
+ struct workqueue_struct *worker;
+ struct work_struct dpmaif_tx_work;
+ spinlock_t tx_lock; /* Protects txq DRB */
+ atomic_t tx_processing;
+
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ struct sk_buff_head tx_skb_head;
+};
+
+struct dpmaif_isr_para {
+ struct dpmaif_ctrl *dpmaif_ctrl;
+ unsigned char pcie_int;
+ unsigned char dlq_id;
+};
+
+enum dpmaif_state {
+ DPMAIF_STATE_MIN,
+ DPMAIF_STATE_PWROFF,
+ DPMAIF_STATE_PWRON,
+ DPMAIF_STATE_EXCEPTION,
+ DPMAIF_STATE_MAX
+};
+
+enum dpmaif_txq_state {
+ DMPAIF_TXQ_STATE_IRQ,
+ DMPAIF_TXQ_STATE_FULL,
+};
+
+struct dpmaif_callbacks {
+ void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
+ enum dpmaif_txq_state state, int txq_number);
+ void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb);
+};
+
+struct dpmaif_ctrl {
+ struct device *dev;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity dpmaif_pm_entity;
+ enum dpmaif_state state;
+ bool dpmaif_sw_init_done;
+ struct dpmaif_hw_info hw_info;
+ struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM];
+ struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM];
+
+ unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM];
+ struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM];
+
+ struct dpmaif_bat_request bat_req;
+ struct dpmaif_bat_request bat_frag;
+ struct workqueue_struct *bat_release_wq;
+ struct work_struct bat_release_work;
+
+ wait_queue_head_t tx_wq;
+ struct task_struct *tx_thread;
+
+ struct dpmaif_callbacks *callbacks;
+};
+
+struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
+ struct dpmaif_callbacks *callbacks);
+void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state);
+unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx);
+unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
+ unsigned int wr_idx, enum dpmaif_rdwr);
+
+#endif /* __T7XX_HIF_DPMAIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
new file mode 100644
index 000000000000..91a0eb19e0d8
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_pci.h"
+
+#define DPMAIF_BAT_COUNT 8192
+#define DPMAIF_FRG_COUNT 4814
+#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
+
+#define DPMAIF_BAT_CNT_THRESHOLD 30
+#define DPMAIF_PIT_CNT_THRESHOLD 60
+#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
+#define DPMAIF_NOTIFY_RELEASE_COUNT 128
+#define DPMAIF_POLL_PIT_TIME_US 20
+#define DPMAIF_POLL_PIT_MAX_TIME_US 2000
+#define DPMAIF_WQ_TIME_LIMIT_MS 2
+#define DPMAIF_CS_RESULT_PASS 0
+
+/* Packet type */
+#define DES_PT_PD 0
+#define DES_PT_MSG 1
+/* Buffer type */
+#define PKT_BUF_FRAG 1
+
+static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
+{
+ u32 value;
+
+ value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
+ value <<= 13;
+ value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
+ return value;
+}
+
+static int t7xx_dpmaif_net_rx_push_thread(void *arg)
+{
+ struct dpmaif_rx_queue *q = arg;
+ struct dpmaif_ctrl *hif_ctrl;
+ struct dpmaif_callbacks *cb;
+
+ hif_ctrl = q->dpmaif_ctrl;
+ cb = hif_ctrl->callbacks;
+
+ while (!kthread_should_stop()) {
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (skb_queue_empty(&q->skb_list)) {
+ if (wait_event_interruptible(q->rx_wq,
+ !skb_queue_empty(&q->skb_list) ||
+ kthread_should_stop()))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ spin_lock_irqsave(&q->skb_list.lock, flags);
+ skb = __skb_dequeue(&q->skb_list);
+ spin_unlock_irqrestore(&q->skb_list.lock, flags);
+
+ if (!skb)
+ continue;
+
+ cb->recv_skb(hif_ctrl->t7xx_dev, skb);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num, const unsigned int bat_cnt)
+{
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
+ struct dpmaif_bat_request *bat_req = rxq->bat_req;
+ unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
+
+ if (!rxq->que_started) {
+ dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
+ return -EINVAL;
+ }
+
+ old_rl_idx = bat_req->bat_release_rd_idx;
+ old_wr_idx = bat_req->bat_wr_idx;
+ new_wr_idx = old_wr_idx + bat_cnt;
+
+ if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
+ goto err_flow;
+
+ if (new_wr_idx >= bat_req->bat_size_cnt) {
+ new_wr_idx -= bat_req->bat_size_cnt;
+ if (new_wr_idx >= old_rl_idx)
+ goto err_flow;
+ }
+
+ bat_req->bat_wr_idx = new_wr_idx;
+ return 0;
+
+err_flow:
+ dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
+ return -EINVAL;
+}
+
+static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int size, struct dpmaif_bat_skb *cur_skb)
+{
+ dma_addr_t data_bus_addr;
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return false;
+
+ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
+ dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cur_skb->skb = skb;
+ cur_skb->data_bus_addr = data_bus_addr;
+ cur_skb->data_len = size;
+
+ return true;
+}
+
+static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
+ unsigned int index)
+{
+ struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
+
+ if (bat_skb->skb) {
+ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
+ dev_kfree_skb(bat_skb->skb);
+ bat_skb->skb = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @q_num: Queue number.
+ * @buf_cnt: Number of buffers to allocate.
+ * @initial: Indicates if the ring is being populated for the first time.
+ *
+ * Allocate skb and store the start address of the data buffer into the BAT ring.
+ * If this is not the initial call, notify the HW about the new entries.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req,
+ const unsigned int q_num, const unsigned int buf_cnt,
+ const bool initial)
+{
+ unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
+ int ret;
+
+ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
+ return -EINVAL;
+
+ /* Check BAT buffer space */
+ bat_max_cnt = bat_req->bat_size_cnt;
+
+ bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
+ bat_req->bat_wr_idx, DPMAIF_WRITE);
+ if (buf_cnt > bat_cnt)
+ return -ENOMEM;
+
+ bat_start_idx = bat_req->bat_wr_idx;
+
+ for (i = 0; i < buf_cnt; i++) {
+ unsigned int cur_bat_idx = bat_start_idx + i;
+ struct dpmaif_bat_skb *cur_skb;
+ struct dpmaif_bat *cur_bat;
+
+ if (cur_bat_idx >= bat_max_cnt)
+ cur_bat_idx -= bat_max_cnt;
+
+ cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
+ if (!cur_skb->skb &&
+ !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
+ break;
+
+ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
+ cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
+ cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
+ }
+
+ if (!i)
+ return -ENOMEM;
+
+ ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
+ if (ret)
+ goto err_unmap_skbs;
+
+ if (!initial) {
+ unsigned int hw_wr_idx;
+
+ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
+ if (ret)
+ goto err_unmap_skbs;
+
+ hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
+ DPF_RX_QNO_DFT);
+ if (hw_wr_idx != bat_req->bat_wr_idx) {
+ ret = -EFAULT;
+ dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
+ goto err_unmap_skbs;
+ }
+ }
+
+ return 0;
+
+err_unmap_skbs:
+ while (--i > 0)
+ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+
+ return ret;
+}
+
+static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
+ const unsigned int rel_entry_num)
+{
+ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
+ unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
+ int ret;
+
+ if (!rxq->que_started)
+ return 0;
+
+ if (rel_entry_num >= rxq->pit_size_cnt) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
+ return -EINVAL;
+ }
+
+ old_rel_idx = rxq->pit_release_rd_idx;
+ new_rel_idx = old_rel_idx + rel_entry_num;
+ hw_wr_idx = rxq->pit_wr_idx;
+ if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
+ new_rel_idx -= rxq->pit_size_cnt;
+
+ ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
+ if (ret) {
+ dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
+ return ret;
+ }
+
+ rxq->pit_release_rd_idx = new_rel_idx;
+ return 0;
+}
+
+static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bat_req->mask_lock, flags);
+ set_bit(idx, bat_req->bat_bitmap);
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+}
+
+static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
+ const unsigned int cur_bid)
+{
+ struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
+ struct dpmaif_bat_page *bat_page;
+
+ if (cur_bid >= DPMAIF_FRG_COUNT)
+ return -EINVAL;
+
+ bat_page = bat_frag->bat_skb + cur_bid;
+ if (!bat_page->page)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
+ unsigned int index)
+{
+ struct dpmaif_bat_page *bat_page = bat_page_base + index;
+
+ if (bat_page->page) {
+ dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
+ put_page(bat_page->page);
+ bat_page->page = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @buf_cnt: Number of buffers to allocate.
+ * @initial: Indicates if the ring is being populated for the first time.
+ *
+ * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
+ * This function allocates a page fragment and stores the start address of the page
+ * into the Fragment BAT ring.
+ * If this is not the initial call, notify the HW about the new entries.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const unsigned int buf_cnt, const bool initial)
+{
+ unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
+ struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
+ int ret = 0, i;
+
+ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
+ return -EINVAL;
+
+ buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
+ bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
+ DPMAIF_WRITE);
+ if (buf_cnt > buf_space) {
+ dev_err(dpmaif_ctrl->dev,
+ "Requested more buffers than the space available in RX frag ring\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < buf_cnt; i++) {
+ struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
+ struct dpmaif_bat *cur_bat;
+ dma_addr_t data_base_addr;
+
+ if (!cur_page->page) {
+ unsigned long offset;
+ struct page *page;
+ void *data;
+
+ data = netdev_alloc_frag(bat_req->pkt_buf_sz);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+
+ data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
+ bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
+ put_page(virt_to_head_page(data));
+ dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
+ break;
+ }
+
+ cur_page->page = page;
+ cur_page->data_bus_addr = data_base_addr;
+ cur_page->offset = offset;
+ cur_page->data_len = bat_req->pkt_buf_sz;
+ }
+
+ data_base_addr = cur_page->data_bus_addr;
+ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
+ cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
+ cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
+ cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
+ }
+
+ bat_req->bat_wr_idx = cur_bat_idx;
+
+ if (!initial)
+ t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
+
+ if (i < buf_cnt) {
+ ret = -ENOMEM;
+ if (initial) {
+ while (--i > 0)
+ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ }
+ }
+
+ return ret;
+}
+
+static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct sk_buff *skb)
+{
+ unsigned long long data_bus_addr, data_base_addr;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_bat_page *page_info;
+ unsigned int data_len;
+ int data_offset;
+
+ page_info = rxq->bat_frag->bat_skb;
+ page_info += t7xx_normal_pit_bid(pkt_info);
+ dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
+
+ if (!page_info->page)
+ return -EINVAL;
+
+ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
+ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
+ data_base_addr = page_info->data_bus_addr;
+ data_offset = data_bus_addr - data_base_addr;
+ data_offset += page_info->offset;
+ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ data_offset, data_len, page_info->data_len);
+
+ page_info->page = NULL;
+ page_info->offset = 0;
+ page_info->data_len = 0;
+ return 0;
+}
+
+static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ const struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
+ int ret;
+
+ ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
+ if (ret < 0)
+ return ret;
+
+ ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
+ if (ret < 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
+ return ret;
+ }
+
+ t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
+ return 0;
+}
+
+static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
+{
+ struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
+
+ bat_skb += cur_bid;
+ if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
+{
+ return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
+}
+
+static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pit)
+{
+ unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
+
+ if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
+ cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
+ DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
+ return -EFAULT;
+
+ rxq->expect_pit_seq++;
+ if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
+ rxq->expect_pit_seq = 0;
+
+ return 0;
+}
+
+static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
+{
+ unsigned int zero_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bat_req->mask_lock, flags);
+
+ zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
+ bat_req->bat_release_rd_idx);
+
+ if (zero_index < bat_req->bat_size_cnt) {
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+ return zero_index - bat_req->bat_release_rd_idx;
+ }
+
+ /* limiting the search till bat_release_rd_idx */
+ zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
+ spin_unlock_irqrestore(&bat_req->mask_lock, flags);
+ return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
+}
+
+static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
+ const unsigned int rel_entry_num,
+ const enum bat_type buf_type)
+{
+ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
+ unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
+ struct dpmaif_bat_request *bat;
+ unsigned long flags;
+
+ if (!rxq->que_started || !rel_entry_num)
+ return -EINVAL;
+
+ if (buf_type == BAT_TYPE_FRAG) {
+ bat = rxq->bat_frag;
+ hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
+ } else {
+ bat = rxq->bat_req;
+ hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
+ }
+
+ if (rel_entry_num >= bat->bat_size_cnt)
+ return -EINVAL;
+
+ old_rel_idx = bat->bat_release_rd_idx;
+ new_rel_idx = old_rel_idx + rel_entry_num;
+
+ /* Do not need to release if the queue is empty */
+ if (bat->bat_wr_idx == old_rel_idx)
+ return 0;
+
+ if (hw_rd_idx >= old_rel_idx) {
+ if (new_rel_idx > hw_rd_idx)
+ return -EINVAL;
+ }
+
+ if (new_rel_idx >= bat->bat_size_cnt) {
+ new_rel_idx -= bat->bat_size_cnt;
+ if (new_rel_idx > hw_rd_idx)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bat->mask_lock, flags);
+ for (i = 0; i < rel_entry_num; i++) {
+ unsigned int index = bat->bat_release_rd_idx + i;
+
+ if (index >= bat->bat_size_cnt)
+ index -= bat->bat_size_cnt;
+
+ clear_bit(index, bat->bat_bitmap);
+ }
+ spin_unlock_irqrestore(&bat->mask_lock, flags);
+
+ bat->bat_release_rd_idx = new_rel_idx;
+ return rel_entry_num;
+}
+
+static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
+{
+ int ret;
+
+ if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
+ if (ret)
+ return ret;
+
+ rxq->pit_remain_release_cnt = 0;
+ return 0;
+}
+
+static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
+{
+ unsigned int bid_cnt;
+ int ret;
+
+ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
+ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
+ if (ret <= 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
+ if (ret < 0)
+ dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
+
+ return ret;
+}
+
+static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
+{
+ unsigned int bid_cnt;
+ int ret;
+
+ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
+ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
+ return 0;
+
+ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
+ if (ret <= 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
+ return ret;
+ }
+
+ return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
+}
+
+static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *msg_pit,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ int header = le32_to_cpu(msg_pit->header);
+
+ skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
+ skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
+ skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
+ skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
+}
+
+static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned long long data_bus_addr, data_base_addr;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_bat_skb *bat_skb;
+ unsigned int data_len;
+ struct sk_buff *skb;
+ int data_offset;
+
+ bat_skb = rxq->bat_req->bat_skb;
+ bat_skb += t7xx_normal_pit_bid(pkt_info);
+ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
+
+ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
+ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
+ data_base_addr = bat_skb->data_bus_addr;
+ data_offset = data_bus_addr - data_base_addr;
+ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
+ skb = bat_skb->skb;
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ skb_reserve(skb, data_offset);
+
+ if (skb->tail + data_len > skb->end) {
+ dev_err(dev, "No buffer space available\n");
+ return -ENOBUFS;
+ }
+
+ skb_put(skb, data_len);
+ skb_info->cur_skb = skb;
+ bat_skb->skb = NULL;
+ return 0;
+}
+
+static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
+ const struct dpmaif_pit *pkt_info,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
+ int ret;
+
+ ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
+ if (ret < 0)
+ return ret;
+
+ ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
+ if (ret < 0) {
+ dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
+ return ret;
+ }
+
+ t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
+ return 0;
+}
+
+static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
+ int ret;
+
+ queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
+
+ ret = t7xx_dpmaif_pit_release_and_add(rxq);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
+
+ return ret;
+}
+
+static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->skb_list.lock, flags);
+ if (rxq->skb_list.qlen < rxq->skb_list_max_len)
+ __skb_queue_tail(&rxq->skb_list, skb);
+ else
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
+}
+
+static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
+ struct dpmaif_cur_rx_skb_info *skb_info)
+{
+ struct sk_buff *skb = skb_info->cur_skb;
+ struct t7xx_skb_cb *skb_cb;
+ u8 netif_id;
+
+ skb_info->cur_skb = NULL;
+
+ if (skb_info->pit_dp) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
+ CHECKSUM_NONE;
+ netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
+ skb_cb = T7XX_SKB_CB(skb);
+ skb_cb->netif_idx = netif_id;
+ skb_cb->rx_pkt_type = skb_info->pkt_type;
+ t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
+}
+
+static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
+ const unsigned long timeout)
+{
+ unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
+ struct device *dev = rxq->dpmaif_ctrl->dev;
+ struct dpmaif_cur_rx_skb_info *skb_info;
+ int ret = 0;
+
+ pit_len = rxq->pit_size_cnt;
+ skb_info = &rxq->rx_data_info;
+ cur_pit = rxq->pit_rd_idx;
+
+ for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
+ struct dpmaif_pit *pkt_info;
+ u32 val;
+
+ if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
+ break;
+
+ pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
+ if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
+ dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
+ return -EAGAIN;
+ }
+
+ val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
+ if (val == DES_PT_MSG) {
+ if (skb_info->msg_pit_received)
+ dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
+
+ skb_info->msg_pit_received = true;
+ t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
+ } else { /* DES_PT_PD */
+ val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
+ if (val != PKT_BUF_FRAG)
+ ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
+ else if (!skb_info->cur_skb)
+ ret = -EINVAL;
+ else
+ ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
+
+ if (ret < 0) {
+ skb_info->err_payload = 1;
+ dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
+ }
+
+ val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
+ if (!val) {
+ if (!skb_info->err_payload) {
+ t7xx_dpmaif_rx_skb(rxq, skb_info);
+ } else if (skb_info->cur_skb) {
+ dev_kfree_skb_any(skb_info->cur_skb);
+ skb_info->cur_skb = NULL;
+ }
+
+ memset(skb_info, 0, sizeof(*skb_info));
+
+ recv_skb_cnt++;
+ if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
+ wake_up_all(&rxq->rx_wq);
+ recv_skb_cnt = 0;
+ }
+ }
+ }
+
+ cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
+ rxq->pit_rd_idx = cur_pit;
+ rxq->pit_remain_release_cnt++;
+
+ if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
+ ret = t7xx_dpmaifq_rx_notify_hw(rxq);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ if (recv_skb_cnt)
+ wake_up_all(&rxq->rx_wq);
+
+ if (!ret)
+ ret = t7xx_dpmaifq_rx_notify_hw(rxq);
+
+ if (ret)
+ return ret;
+
+ return rx_cnt;
+}
+
+static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
+{
+ unsigned int hw_wr_idx, pit_cnt;
+
+ if (!rxq->que_started)
+ return 0;
+
+ hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
+ DPMAIF_READ);
+ rxq->pit_wr_idx = hw_wr_idx;
+ return pit_cnt;
+}
+
+static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num, const unsigned int budget)
+{
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
+ unsigned long time_limit;
+ unsigned int cnt;
+
+ time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
+
+ while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
+ unsigned int rd_cnt;
+ int real_cnt;
+
+ rd_cnt = min(cnt, budget);
+
+ real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
+ if (real_cnt < 0)
+ return real_cnt;
+
+ if (real_cnt < cnt)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
+{
+ struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
+ int ret;
+
+ ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
+ if (ret < 0) {
+ /* Try one more time */
+ queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
+ }
+}
+
+static void t7xx_dpmaif_rxq_work(struct work_struct *work)
+{
+ struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
+ int ret;
+
+ atomic_set(&rxq->rx_processing, 1);
+ /* Ensure rx_processing is changed to 1 before actually begin RX flow */
+ smp_mb();
+
+ if (!rxq->que_started) {
+ atomic_set(&rxq->rx_processing, 0);
+ dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
+ return;
+ }
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
+}
+
+void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
+{
+ struct dpmaif_rx_queue *rxq;
+ int qno;
+
+ qno = ffs(que_mask) - 1;
+ if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
+ dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
+ return;
+ }
+
+ rxq = &dpmaif_ctrl->rxq[qno];
+ queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
+}
+
+static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req)
+{
+ if (bat_req->bat_base)
+ dma_free_coherent(dpmaif_ctrl->dev,
+ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
+ bat_req->bat_base, bat_req->bat_bus_addr);
+}
+
+/**
+ * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
+ * @dpmaif_ctrl: Pointer to DPMAIF context structure.
+ * @bat_req: Pointer to BAT request structure.
+ * @buf_type: BAT ring type.
+ *
+ * This function allocates the BAT ring buffer shared with the HW device, also allocates
+ * a buffer used to store information about the BAT skbs for further release.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code.
+ */
+int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const enum bat_type buf_type)
+{
+ int sw_buf_size;
+
+ if (buf_type == BAT_TYPE_FRAG) {
+ sw_buf_size = sizeof(struct dpmaif_bat_page);
+ bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
+ bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
+ } else {
+ sw_buf_size = sizeof(struct dpmaif_bat_skb);
+ bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
+ bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
+ }
+
+ bat_req->type = buf_type;
+ bat_req->bat_wr_idx = 0;
+ bat_req->bat_release_rd_idx = 0;
+
+ bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
+ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
+ &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!bat_req->bat_base)
+ return -ENOMEM;
+
+ /* For AP SW to record skb information */
+ bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
+ GFP_KERNEL);
+ if (!bat_req->bat_skb)
+ goto err_free_dma_mem;
+
+ bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
+ if (!bat_req->bat_bitmap)
+ goto err_free_dma_mem;
+
+ spin_lock_init(&bat_req->mask_lock);
+ atomic_set(&bat_req->refcnt, 0);
+ return 0;
+
+err_free_dma_mem:
+ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
+
+ return -ENOMEM;
+}
+
+void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
+{
+ if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
+ return;
+
+ bitmap_free(bat_req->bat_bitmap);
+ bat_req->bat_bitmap = NULL;
+
+ if (bat_req->bat_skb) {
+ unsigned int i;
+
+ for (i = 0; i < bat_req->bat_size_cnt; i++) {
+ if (bat_req->type == BAT_TYPE_FRAG)
+ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ else
+ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
+ }
+ }
+
+ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
+}
+
+static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
+{
+ rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
+ rxq->pit_rd_idx = 0;
+ rxq->pit_wr_idx = 0;
+ rxq->pit_release_rd_idx = 0;
+ rxq->expect_pit_seq = 0;
+ rxq->pit_remain_release_cnt = 0;
+ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
+
+ rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
+ rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
+ &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!rxq->pit_base)
+ return -ENOMEM;
+
+ rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
+ atomic_inc(&rxq->bat_req->refcnt);
+
+ rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
+ atomic_inc(&rxq->bat_frag->refcnt);
+ return 0;
+}
+
+static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
+{
+ if (!rxq->dpmaif_ctrl)
+ return;
+
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
+ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
+
+ if (rxq->pit_base)
+ dma_free_coherent(rxq->dpmaif_ctrl->dev,
+ rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
+ rxq->pit_base, rxq->pit_bus_addr);
+}
+
+int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
+{
+ int ret;
+
+ ret = t7xx_dpmaif_rx_alloc(queue);
+ if (ret < 0) {
+ dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
+ return ret;
+ }
+
+ INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
+
+ queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
+ if (!queue->worker) {
+ ret = -ENOMEM;
+ goto err_free_rx_buffer;
+ }
+
+ init_waitqueue_head(&queue->rx_wq);
+ skb_queue_head_init(&queue->skb_list);
+ queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
+ queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
+ queue, "dpmaif_rx%d_push", queue->index);
+
+ ret = PTR_ERR_OR_ZERO(queue->rx_thread);
+ if (ret)
+ goto err_free_workqueue;
+
+ return 0;
+
+err_free_workqueue:
+ destroy_workqueue(queue->worker);
+
+err_free_rx_buffer:
+ t7xx_dpmaif_rx_buf_free(queue);
+
+ return ret;
+}
+
+void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
+{
+ if (queue->worker)
+ destroy_workqueue(queue->worker);
+
+ if (queue->rx_thread)
+ kthread_stop(queue->rx_thread);
+
+ skb_queue_purge(&queue->skb_list);
+ t7xx_dpmaif_rx_buf_free(queue);
+}
+
+static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
+ struct dpmaif_rx_queue *rxq;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+
+ /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
+ rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ t7xx_dpmaif_bat_release_and_add(rxq);
+ t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ }
+
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+}
+
+int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
+ WQ_MEM_RECLAIM, 1);
+ if (!dpmaif_ctrl->bat_release_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
+ return 0;
+}
+
+void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ flush_work(&dpmaif_ctrl->bat_release_work);
+
+ if (dpmaif_ctrl->bat_release_wq) {
+ destroy_workqueue(dpmaif_ctrl->bat_release_wq);
+ dpmaif_ctrl->bat_release_wq = NULL;
+ }
+}
+
+/**
+ * t7xx_dpmaif_rx_stop() - Suspend RX flow.
+ * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
+ *
+ * Wait for all the RX work to finish executing and mark the RX queue as paused.
+ */
+void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ unsigned int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
+ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
+ int timeout, value;
+
+ flush_work(&rxq->dpmaif_rxq_work);
+
+ timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
+ !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
+ if (timeout)
+ dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
+
+ /* Ensure RX processing has stopped before we set rxq->que_started to false */
+ smp_mb();
+ rxq->que_started = false;
+ }
+}
+
+static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
+{
+ int cnt, j = 0;
+
+ flush_work(&rxq->dpmaif_rxq_work);
+ rxq->que_started = false;
+
+ do {
+ cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
+ rxq->pit_wr_idx, DPMAIF_READ);
+
+ if (++j >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
+ break;
+ }
+ } while (cnt);
+
+ memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
+ memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
+ bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
+ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
+
+ rxq->pit_rd_idx = 0;
+ rxq->pit_wr_idx = 0;
+ rxq->pit_release_rd_idx = 0;
+ rxq->expect_pit_seq = 0;
+ rxq->pit_remain_release_cnt = 0;
+ rxq->bat_req->bat_release_rd_idx = 0;
+ rxq->bat_req->bat_wr_idx = 0;
+ rxq->bat_frag->bat_release_rd_idx = 0;
+ rxq->bat_frag->bat_wr_idx = 0;
+}
+
+void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_RXQ_NUM; i++)
+ t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
new file mode 100644
index 000000000000..182f62dfe398
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMA_RX_H__
+#define __T7XX_HIF_DPMA_RX_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+
+#define NETIF_MASK GENMASK(4, 0)
+
+#define PKT_TYPE_IP4 0
+#define PKT_TYPE_IP6 1
+
+/* Structure of DL PIT */
+struct dpmaif_pit {
+ __le32 header;
+ union {
+ struct {
+ __le32 data_addr_l;
+ __le32 data_addr_h;
+ __le32 footer;
+ } pd;
+ struct {
+ __le32 params_1;
+ __le32 params_2;
+ __le32 params_3;
+ } msg;
+ };
+};
+
+/* PIT header fields */
+#define PD_PIT_DATA_LEN GENMASK(31, 16)
+#define PD_PIT_BUFFER_ID GENMASK(15, 3)
+#define PD_PIT_BUFFER_TYPE BIT(2)
+#define PD_PIT_CONT BIT(1)
+#define PD_PIT_PACKET_TYPE BIT(0)
+/* PIT footer fields */
+#define PD_PIT_DLQ_DONE GENMASK(31, 30)
+#define PD_PIT_ULQ_DONE GENMASK(29, 24)
+#define PD_PIT_HEADER_OFFSET GENMASK(23, 19)
+#define PD_PIT_BI_F GENMASK(18, 17)
+#define PD_PIT_IG BIT(16)
+#define PD_PIT_RES GENMASK(15, 11)
+#define PD_PIT_H_BID GENMASK(10, 8)
+#define PD_PIT_PIT_SEQ GENMASK(7, 0)
+
+#define MSG_PIT_DP BIT(31)
+#define MSG_PIT_RES GENMASK(30, 27)
+#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24)
+#define MSG_PIT_CHANNEL_ID GENMASK(23, 16)
+#define MSG_PIT_RES2 GENMASK(15, 12)
+#define MSG_PIT_HPC_IDX GENMASK(11, 8)
+#define MSG_PIT_SRC_QID GENMASK(7, 5)
+#define MSG_PIT_ERROR_BIT BIT(4)
+#define MSG_PIT_CHECKSUM GENMASK(3, 2)
+#define MSG_PIT_CONT BIT(1)
+#define MSG_PIT_PACKET_TYPE BIT(0)
+
+#define MSG_PIT_HP_IDX GENMASK(31, 27)
+#define MSG_PIT_CMD GENMASK(26, 24)
+#define MSG_PIT_RES3 GENMASK(23, 21)
+#define MSG_PIT_FLOW GENMASK(20, 16)
+#define MSG_PIT_COUNT GENMASK(15, 0)
+
+#define MSG_PIT_HASH GENMASK(31, 24)
+#define MSG_PIT_RES4 GENMASK(23, 18)
+#define MSG_PIT_PRO GENMASK(17, 16)
+#define MSG_PIT_VBID GENMASK(15, 3)
+#define MSG_PIT_RES5 GENMASK(2, 0)
+
+#define MSG_PIT_DLQ_DONE GENMASK(31, 30)
+#define MSG_PIT_ULQ_DONE GENMASK(29, 24)
+#define MSG_PIT_IP BIT(23)
+#define MSG_PIT_RES6 BIT(22)
+#define MSG_PIT_MR GENMASK(21, 20)
+#define MSG_PIT_RES7 GENMASK(19, 17)
+#define MSG_PIT_IG BIT(16)
+#define MSG_PIT_RES8 GENMASK(15, 11)
+#define MSG_PIT_H_BID GENMASK(10, 8)
+#define MSG_PIT_PIT_SEQ GENMASK(7, 0)
+
+int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue);
+void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
+ const struct dpmaif_bat_request *bat_req,
+ const unsigned int q_num, const unsigned int buf_cnt,
+ const bool initial);
+int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const unsigned int buf_cnt, const bool first_time);
+void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask);
+void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue);
+void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
+ const enum bat_type buf_type);
+void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
+ struct dpmaif_bat_request *bat_req);
+
+#endif /* __T7XX_HIF_DPMA_RX_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
new file mode 100644
index 000000000000..46514208d4f9
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_dpmaif.h"
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_pci.h"
+
+#define DPMAIF_SKB_TX_BURST_CNT 5
+#define DPMAIF_DRB_LIST_LEN 6144
+
+/* DRB dtype */
+#define DES_DTYP_PD 0
+#define DES_DTYP_MSG 1
+
+static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
+ unsigned long flags;
+
+ if (!txq->que_started)
+ return 0;
+
+ old_sw_rd_idx = txq->drb_rd_idx;
+ new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
+ if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
+ dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
+ return 0;
+ }
+
+ if (old_sw_rd_idx <= new_hw_rd_idx)
+ drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
+ else
+ drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ txq->drb_rd_idx = new_hw_rd_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ return drb_cnt;
+}
+
+static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num, unsigned int release_cnt)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
+ struct dpmaif_drb *cur_drb, *drb_base;
+ unsigned int drb_cnt, i, cur_idx;
+ unsigned long flags;
+
+ drb_skb_base = txq->drb_skb_base;
+ drb_base = txq->drb_base;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ drb_cnt = txq->drb_size_cnt;
+ cur_idx = txq->drb_release_rd_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ for (i = 0; i < release_cnt; i++) {
+ cur_drb = drb_base + cur_idx;
+ if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
+ cur_drb_skb = drb_skb_base + cur_idx;
+ if (!cur_drb_skb->is_msg)
+ dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
+ cur_drb_skb->data_len, DMA_TO_DEVICE);
+
+ if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
+ if (!cur_drb_skb->skb) {
+ dev_err(dpmaif_ctrl->dev,
+ "txq%u: DRB check fail, invalid skb\n", q_num);
+ continue;
+ }
+
+ dev_kfree_skb_any(cur_drb_skb->skb);
+ }
+
+ cur_drb_skb->skb = NULL;
+ }
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
+ txq->drb_release_rd_idx = cur_idx;
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
+ }
+
+ if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
+ dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
+
+ return i;
+}
+
+static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
+ unsigned int q_num, unsigned int budget)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
+ unsigned int rel_cnt, real_rel_cnt;
+
+ /* Update read index from HW */
+ t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
+
+ rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
+ txq->drb_rd_idx, DPMAIF_READ);
+
+ real_rel_cnt = min_not_zero(budget, rel_cnt);
+ if (real_rel_cnt)
+ real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
+
+ return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
+}
+
+static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
+{
+ return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
+}
+
+static void t7xx_dpmaif_tx_done(struct work_struct *work)
+{
+ struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
+ struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
+ struct dpmaif_hw_info *hw_info;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
+ /* The device may be in low power state. Disable sleep if needed */
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ hw_info = &dpmaif_ctrl->hw_info;
+ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
+ if (ret == -EAGAIN ||
+ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
+ t7xx_dpmaif_drb_ring_not_empty(txq))) {
+ queue_work(dpmaif_ctrl->txq[txq->index].worker,
+ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
+ /* Give the device time to enter the low power state */
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ }
+ }
+
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+}
+
+static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
+ unsigned int channel_id)
+{
+ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
+ struct dpmaif_drb *drb = drb_base + cur_idx;
+
+ drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
+ FIELD_PREP(DRB_HDR_CONT, 1) |
+ FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
+
+ drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
+ FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
+ FIELD_PREP(DRB_MSG_L4_CHK, 1));
+}
+
+static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, dma_addr_t data_addr,
+ unsigned int pkt_size, bool last_one)
+{
+ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
+ struct dpmaif_drb *drb = drb_base + cur_idx;
+ u32 header;
+
+ header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
+ if (!last_one)
+ header |= FIELD_PREP(DRB_HDR_CONT, 1);
+
+ drb->header = cpu_to_le32(header);
+ drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
+ drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
+}
+
+static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
+ unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
+ bool is_frag, bool is_last_one, dma_addr_t bus_addr,
+ unsigned int data_len)
+{
+ struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
+ struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
+
+ drb_skb->skb = skb;
+ drb_skb->bus_addr = bus_addr;
+ drb_skb->data_len = data_len;
+ drb_skb->index = cur_idx;
+ drb_skb->is_msg = is_msg;
+ drb_skb->is_frag = is_frag;
+ drb_skb->is_last = is_last_one;
+}
+
+static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
+{
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ unsigned int wr_cnt, send_cnt, payload_cnt;
+ unsigned int cur_idx, drb_wr_idx_backup;
+ struct skb_shared_info *shinfo;
+ struct dpmaif_tx_queue *txq;
+ struct t7xx_skb_cb *skb_cb;
+ unsigned long flags;
+
+ skb_cb = T7XX_SKB_CB(skb);
+ txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
+ if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
+ return -ENODEV;
+
+ atomic_set(&txq->tx_processing, 1);
+ /* Ensure tx_processing is changed to 1 before actually begin TX flow */
+ smp_mb();
+
+ shinfo = skb_shinfo(skb);
+ if (shinfo->frag_list)
+ dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
+
+ payload_cnt = shinfo->nr_frags + 1;
+ /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
+ send_cnt = payload_cnt + 1;
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ cur_idx = txq->drb_wr_idx;
+ drb_wr_idx_backup = cur_idx;
+ txq->drb_wr_idx += send_cnt;
+ if (txq->drb_wr_idx >= txq->drb_size_cnt)
+ txq->drb_wr_idx -= txq->drb_size_cnt;
+ t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
+ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+
+ for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
+ bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
+ unsigned int data_len;
+ dma_addr_t bus_addr;
+ void *data_addr;
+
+ if (!wr_cnt) {
+ data_len = skb_headlen(skb);
+ data_addr = skb->data;
+ is_frag = false;
+ } else {
+ skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
+
+ data_len = skb_frag_size(frag);
+ data_addr = skb_frag_address(frag);
+ is_frag = true;
+ }
+
+ bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
+ goto unmap_buffers;
+
+ cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
+
+ spin_lock_irqsave(&txq->tx_lock, flags);
+ t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
+ is_last_one);
+ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
+ is_last_one, bus_addr, data_len);
+ spin_unlock_irqrestore(&txq->tx_lock, flags);
+ }
+
+ if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
+
+ atomic_set(&txq->tx_processing, 0);
+
+ return 0;
+
+unmap_buffers:
+ while (wr_cnt--) {
+ struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
+
+ cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
+ drb_skb += cur_idx;
+ dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
+ drb_skb->data_len, DMA_TO_DEVICE);
+ }
+
+ txq->drb_wr_idx = drb_wr_idx_backup;
+ atomic_set(&txq->tx_processing, 0);
+
+ return -ENOMEM;
+}
+
+static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
+ return false;
+ }
+
+ return true;
+}
+
+/* Currently, only the default TX queue is used */
+static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_tx_queue *txq;
+
+ txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
+ if (!txq->que_started)
+ return NULL;
+
+ return txq;
+}
+
+static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
+{
+ return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
+ txq->drb_wr_idx, DPMAIF_WRITE);
+}
+
+static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
+{
+ /* Normal DRB (frags data + skb linear data) + msg DRB */
+ return skb_shinfo(skb)->nr_frags + 2;
+}
+
+static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
+{
+ unsigned int drb_remain_cnt, i;
+ unsigned int send_drb_cnt;
+ int drb_cnt = 0;
+ int ret = 0;
+
+ drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
+
+ for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
+ struct sk_buff *skb;
+
+ skb = skb_peek(&txq->tx_skb_head);
+ if (!skb)
+ break;
+
+ send_drb_cnt = t7xx_skb_drb_cnt(skb);
+ if (drb_remain_cnt < send_drb_cnt) {
+ drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
+ continue;
+ }
+
+ drb_remain_cnt -= send_drb_cnt;
+
+ ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
+ if (ret < 0) {
+ dev_err(txq->dpmaif_ctrl->dev,
+ "Failed to add skb to device's ring: %d\n", ret);
+ break;
+ }
+
+ drb_cnt += send_drb_cnt;
+ skb_unlink(skb, &txq->tx_skb_head);
+ }
+
+ if (drb_cnt > 0)
+ return drb_cnt;
+
+ return ret;
+}
+
+static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ bool wait_disable_sleep = true;
+
+ do {
+ struct dpmaif_tx_queue *txq;
+ int drb_send_cnt;
+
+ txq = t7xx_select_tx_queue(dpmaif_ctrl);
+ if (!txq)
+ return;
+
+ drb_send_cnt = t7xx_txq_burst_send_skb(txq);
+ if (drb_send_cnt <= 0) {
+ usleep_range(10, 20);
+ cond_resched();
+ continue;
+ }
+
+ /* Wait for the PCIe resource to unlock */
+ if (wait_disable_sleep) {
+ if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ return;
+
+ wait_disable_sleep = false;
+ }
+
+ t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
+ drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
+
+ cond_resched();
+ } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
+ (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
+}
+
+static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = arg;
+ int ret;
+
+ while (!kthread_should_stop()) {
+ if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
+ dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
+ if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
+ (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
+ dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
+ kthread_should_stop()))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ t7xx_do_tx_hw_push(dpmaif_ctrl);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ }
+
+ return 0;
+}
+
+int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ init_waitqueue_head(&dpmaif_ctrl->tx_wq);
+ dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
+ dpmaif_ctrl, "dpmaif_tx_hw_push");
+ return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
+}
+
+void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ if (dpmaif_ctrl->tx_thread)
+ kthread_stop(dpmaif_ctrl->tx_thread);
+}
+
+/**
+ * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
+ * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
+ * @txq_number: Queue number to xmit on.
+ * @skb: Pointer to the skb to transmit.
+ *
+ * Add the skb to the queue of the skbs to be transmit.
+ * Wake up the thread that push the skbs from the queue to the HW.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EBUSY - Tx budget exhausted.
+ * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
+ * state to prevent this error condition.
+ */
+int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
+ struct sk_buff *skb)
+{
+ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
+ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
+ struct t7xx_skb_cb *skb_cb;
+
+ if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
+ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
+ return -EBUSY;
+ }
+
+ skb_cb = T7XX_SKB_CB(skb);
+ skb_cb->txq_number = txq_number;
+ skb_queue_tail(&txq->tx_skb_head, skb);
+ wake_up(&dpmaif_ctrl->tx_wq);
+
+ return 0;
+}
+
+void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ if (que_mask & BIT(i))
+ queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
+ }
+}
+
+static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
+{
+ size_t brb_skb_size, brb_pd_size;
+
+ brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
+ brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
+
+ txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
+
+ /* For HW && AP SW */
+ txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
+ &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!txq->drb_base)
+ return -ENOMEM;
+
+ /* For AP SW to record the skb information */
+ txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
+ if (!txq->drb_skb_base) {
+ dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
+ txq->drb_base, txq->drb_bus_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
+{
+ struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
+ unsigned int i;
+
+ if (!drb_skb_base)
+ return;
+
+ for (i = 0; i < txq->drb_size_cnt; i++) {
+ drb_skb = drb_skb_base + i;
+ if (!drb_skb->skb)
+ continue;
+
+ if (!drb_skb->is_msg)
+ dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
+ drb_skb->data_len, DMA_TO_DEVICE);
+
+ if (drb_skb->is_last) {
+ dev_kfree_skb(drb_skb->skb);
+ drb_skb->skb = NULL;
+ }
+ }
+}
+
+static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
+{
+ if (txq->drb_base)
+ dma_free_coherent(txq->dpmaif_ctrl->dev,
+ txq->drb_size_cnt * sizeof(struct dpmaif_drb),
+ txq->drb_base, txq->drb_bus_addr);
+
+ t7xx_dpmaif_tx_free_drb_skb(txq);
+}
+
+/**
+ * t7xx_dpmaif_txq_init() - Initialize TX queue.
+ * @txq: Pointer to struct dpmaif_tx_queue.
+ *
+ * Initialize the TX queue data structure and allocate memory for it to use.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
+{
+ int ret;
+
+ skb_queue_head_init(&txq->tx_skb_head);
+ init_waitqueue_head(&txq->req_wq);
+ atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
+
+ ret = t7xx_dpmaif_tx_drb_buf_init(txq);
+ if (ret) {
+ dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
+ return ret;
+ }
+
+ txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
+ (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
+ if (!txq->worker)
+ return -ENOMEM;
+
+ INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
+ spin_lock_init(&txq->tx_lock);
+
+ return 0;
+}
+
+void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
+{
+ if (txq->worker)
+ destroy_workqueue(txq->worker);
+
+ skb_queue_purge(&txq->tx_skb_head);
+ t7xx_dpmaif_tx_drb_buf_rel(txq);
+}
+
+void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
+ struct dpmaif_tx_queue *txq;
+ int count = 0;
+
+ txq = &dpmaif_ctrl->txq[i];
+ txq->que_started = false;
+ /* Make sure TXQ is disabled */
+ smp_mb();
+
+ /* Wait for active Tx to be done */
+ while (atomic_read(&txq->tx_processing)) {
+ if (++count >= DPMAIF_MAX_CHECK_COUNT) {
+ dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
+ break;
+ }
+ }
+ }
+}
+
+static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
+{
+ txq->que_started = false;
+
+ cancel_work_sync(&txq->dpmaif_tx_work);
+ flush_work(&txq->dpmaif_tx_work);
+ t7xx_dpmaif_tx_free_drb_skb(txq);
+
+ txq->drb_rd_idx = 0;
+ txq->drb_wr_idx = 0;
+ txq->drb_release_rd_idx = 0;
+}
+
+void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int i;
+
+ for (i = 0; i < DPMAIF_TXQ_NUM; i++)
+ t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h
new file mode 100644
index 000000000000..ca9b9ea2da8b
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_HIF_DPMA_TX_H__
+#define __T7XX_HIF_DPMA_TX_H__
+
+#include <linux/bits.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+
+#define DPMAIF_TX_DEFAULT_QUEUE 0
+
+struct dpmaif_drb {
+ __le32 header;
+ union {
+ struct {
+ __le32 data_addr_l;
+ __le32 data_addr_h;
+ } pd;
+ struct {
+ __le32 msg_hdr;
+ __le32 reserved1;
+ } msg;
+ };
+ __le32 reserved2;
+};
+
+/* Header fields */
+#define DRB_HDR_DATA_LEN GENMASK(31, 16)
+#define DRB_HDR_RESERVED GENMASK(15, 3)
+#define DRB_HDR_CONT BIT(2)
+#define DRB_HDR_DTYP GENMASK(1, 0)
+
+#define DRB_MSG_DW2_RES GENMASK(31, 30)
+#define DRB_MSG_L4_CHK BIT(29)
+#define DRB_MSG_IP_CHK BIT(28)
+#define DRB_MSG_RESERVED BIT(27)
+#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24)
+#define DRB_MSG_CHANNEL_ID GENMASK(23, 16)
+#define DRB_MSG_COUNT_L GENMASK(15, 0)
+
+struct dpmaif_drb_skb {
+ struct sk_buff *skb;
+ dma_addr_t bus_addr;
+ unsigned int data_len;
+ u16 index:13;
+ u16 is_msg:1;
+ u16 is_frag:1;
+ u16 is_last:1;
+};
+
+int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
+ struct sk_buff *skb);
+void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl);
+int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq);
+void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask);
+int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq);
+void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
+void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
+
+#endif /* __T7XX_HIF_DPMA_TX_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c
new file mode 100644
index 000000000000..3ee18d46f8d2
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/dev_printk.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+
+#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \
+ D2H_INT_RESUME_ACK | \
+ D2H_INT_SUSPEND_ACK_AP | \
+ D2H_INT_RESUME_ACK_AP)
+
+static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask)
+{
+ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
+
+ /* Clear level 2 interrupt */
+ iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK);
+ /* Ensure write is complete */
+ t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ /* Clear level 1 interrupt */
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT);
+}
+
+static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+ u32 int_status, val;
+
+ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
+ iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+
+ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ if (int_status & D2H_SW_INT_MASK) {
+ int ret = t7xx_pci_mhccif_isr(t7xx_dev);
+
+ if (ret)
+ dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret);
+ }
+
+ t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
+
+ if (int_status & D2H_INT_DS_LOCK_ACK)
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+
+ if (int_status & D2H_INT_SR_ACK)
+ complete(&t7xx_dev->pm_sr_ack);
+
+ iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+
+ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ if (!int_status) {
+ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
+ iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ }
+
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+ return IRQ_HANDLED;
+}
+
+u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev)
+{
+ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS);
+}
+
+void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val)
+{
+ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET);
+}
+
+void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val)
+{
+ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR);
+}
+
+u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev)
+{
+ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK);
+}
+
+static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base +
+ MHCCIF_RC_DEV_BASE -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+
+ t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler;
+ t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread;
+ t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev;
+}
+
+void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel)
+{
+ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
+
+ iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY);
+ iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h
new file mode 100644
index 000000000000..209b386bc088
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_MHCCIF_H__
+#define __T7XX_MHCCIF_H__
+
+#include <linux/types.h>
+
+#include "t7xx_pci.h"
+#include "t7xx_reg.h"
+
+#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \
+ D2H_INT_EXCEPTION_INIT_DONE | \
+ D2H_INT_EXCEPTION_CLEARQ_DONE | \
+ D2H_INT_EXCEPTION_ALLQ_RESET | \
+ D2H_INT_PORT_ENUM | \
+ D2H_INT_ASYNC_MD_HK)
+
+void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
+void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val);
+u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev);
+u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel);
+
+#endif /*__T7XX_MHCCIF_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
new file mode 100644
index 000000000000..3458af31e864
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/kthread.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_cldma.h"
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define RT_ID_MD_PORT_ENUM 0
+/* Modem feature query identification code - "ICCC" */
+#define MD_FEATURE_QUERY_ID 0x49434343
+
+#define FEATURE_VER GENMASK(7, 4)
+#define FEATURE_MSK GENMASK(3, 0)
+
+#define RGU_RESET_DELAY_MS 10
+#define PORT_RESET_DELAY_MS 2000
+#define EX_HS_TIMEOUT_MS 5000
+#define EX_HS_POLL_DELAY_MS 10
+
+enum mtk_feature_support_type {
+ MTK_FEATURE_DOES_NOT_EXIST,
+ MTK_FEATURE_NOT_SUPPORTED,
+ MTK_FEATURE_MUST_BE_SUPPORTED,
+};
+
+static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
+}
+
+/**
+ * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
+ * @t7xx_dev: MTK device.
+ *
+ * Check the interrupt status and queue commands accordingly.
+ *
+ * Returns:
+ ** 0 - Success.
+ ** -EINVAL - Failure to get FSM control.
+ */
+int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+ struct t7xx_fsm_ctl *ctl;
+ unsigned int int_sta;
+ int ret = 0;
+ u32 mask;
+
+ ctl = md->fsm_ctl;
+ if (!ctl) {
+ dev_err_ratelimited(&t7xx_dev->pdev->dev,
+ "MHCCIF interrupt received before initializing MD monitor\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&md->exp_lock);
+ int_sta = t7xx_get_interrupt_status(t7xx_dev);
+ md->exp_id |= int_sta;
+ if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
+ if (ctl->md_state == MD_STATE_INVALID ||
+ ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
+ ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
+ ctl->md_state == MD_STATE_READY) {
+ md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
+ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
+ }
+ } else if (md->exp_id & D2H_INT_PORT_ENUM) {
+ md->exp_id &= ~D2H_INT_PORT_ENUM;
+
+ if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
+ ctl->curr_state == FSM_STATE_STOPPED)
+ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
+ } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
+ mask = t7xx_mhccif_mask_get(t7xx_dev);
+ if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ queue_work(md->handshake_wq, &md->handshake_work);
+ }
+ }
+ spin_unlock_bh(&md->exp_lock);
+
+ return ret;
+}
+
+static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
+ void __iomem *reset_pcie_reg;
+ u32 val;
+
+ reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
+ pbase_addr->pcie_dev_reg_trsl_addr;
+ val = ioread32(reset_pcie_reg);
+ iowrite32(val, reset_pcie_reg);
+}
+
+void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Clear L2 */
+ t7xx_clr_device_irq_via_pcie(t7xx_dev);
+ /* Clear L1 */
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+}
+
+static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = &t7xx_dev->pdev->dev;
+ acpi_status acpi_ret;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle) {
+ dev_err(dev, "ACPI handle not found\n");
+ return -EFAULT;
+ }
+
+ if (!acpi_has_method(handle, fn_name)) {
+ dev_err(dev, "%s method not found\n", fn_name);
+ return -EFAULT;
+ }
+
+ acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
+ return -EFAULT;
+ }
+
+#endif
+ return 0;
+}
+
+int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "_RST");
+}
+
+static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
+{
+ u32 val;
+
+ val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ if (val & MISC_RESET_TYPE_PLDR)
+ t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ else if (val & MISC_RESET_TYPE_FLDR)
+ t7xx_acpi_fldr_func(t7xx_dev);
+}
+
+static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+
+ msleep(RGU_RESET_DELAY_MS);
+ t7xx_reset_device_via_pmic(t7xx_dev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
+{
+ struct t7xx_pci_dev *t7xx_dev = data;
+ struct t7xx_modem *modem;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ if (!t7xx_dev->rgu_pci_irq_en)
+ return IRQ_HANDLED;
+
+ modem = t7xx_dev->md;
+ modem->rgu_irq_asserted = true;
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ return IRQ_WAKE_THREAD;
+}
+
+static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Registers RGU callback ISR with PCIe driver */
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+
+ t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
+ t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
+ t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+}
+
+/**
+ * t7xx_cldma_exception() - CLDMA exception handler.
+ * @md_ctrl: modem control struct.
+ * @stage: exception stage.
+ *
+ * Part of the modem exception recovery.
+ * Stages are one after the other as describe below:
+ * HIF_EX_INIT: Disable and clear TXQ.
+ * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
+ * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
+ */
+
+/* Modem Exception Handshake Flow
+ *
+ * Modem HW Exception interrupt received
+ * (MD_IRQ_CCIF_EX)
+ * |
+ * +---------v--------+
+ * | HIF_EX_INIT | : Disable and clear TXQ
+ * +------------------+
+ * |
+ * +---------v--------+
+ * | HIF_EX_INIT_DONE | : Wait for the init to be done
+ * +------------------+
+ * |
+ * +---------v--------+
+ * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
+ * +------------------+ : Flush TX/RX workqueues
+ * |
+ * +---------v--------+
+ * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
+ * +------------------+
+ */
+static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
+{
+ switch (stage) {
+ case HIF_EX_INIT:
+ t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
+ t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
+ break;
+
+ case HIF_EX_CLEARQ_DONE:
+ /* We do not want to get CLDMA IRQ when MD is
+ * resetting CLDMA after it got clearq_ack.
+ */
+ t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
+ t7xx_cldma_stop(md_ctrl);
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
+
+ t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
+ break;
+
+ case HIF_EX_ALLQ_RESET:
+ t7xx_cldma_hw_init(&md_ctrl->hw_info);
+ t7xx_cldma_start(md_ctrl);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
+{
+ struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
+
+ if (stage == HIF_EX_CLEARQ_DONE) {
+ /* Give DHL time to flush data */
+ msleep(PORT_RESET_DELAY_MS);
+ t7xx_port_proxy_reset(md->port_prox);
+ }
+
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
+
+ if (stage == HIF_EX_INIT)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
+ else if (stage == HIF_EX_CLEARQ_DONE)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
+}
+
+static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
+{
+ unsigned int waited_time_ms = 0;
+
+ do {
+ if (md->exp_id & event_id)
+ return 0;
+
+ waited_time_ms += EX_HS_POLL_DELAY_MS;
+ msleep(EX_HS_POLL_DELAY_MS);
+ } while (waited_time_ms < EX_HS_TIMEOUT_MS);
+
+ return -EFAULT;
+}
+
+static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Register the MHCCIF ISR for MD exception, port enum and
+ * async handshake notifications.
+ */
+ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
+ t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
+
+ /* Register RGU IRQ handler for sAP exception notification */
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_register_rgu_isr(t7xx_dev);
+}
+
+struct feature_query {
+ __le32 head_pattern;
+ u8 feature_set[FEATURE_COUNT];
+ __le32 tail_pattern;
+};
+
+static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
+{
+ struct feature_query *ft_query;
+ struct sk_buff *skb;
+
+ skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
+ if (!skb)
+ return;
+
+ ft_query = skb_put(skb, sizeof(*ft_query));
+ ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+ memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
+ ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+
+ /* Send HS1 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
+}
+
+static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
+ void *data)
+{
+ struct feature_query *md_feature = data;
+ struct mtk_runtime_feature *rt_feature;
+ unsigned int i, rt_data_len = 0;
+ struct sk_buff *skb;
+
+ /* Parse MD runtime data query */
+ if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
+ le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
+ dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
+ le32_to_cpu(md_feature->head_pattern),
+ le32_to_cpu(md_feature->tail_pattern));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
+ MTK_FEATURE_MUST_BE_SUPPORTED)
+ rt_data_len += sizeof(*rt_feature);
+ }
+
+ skb = t7xx_ctrl_alloc_skb(rt_data_len);
+ if (!skb)
+ return -ENOMEM;
+
+ rt_feature = skb_put(skb, rt_data_len);
+ memset(rt_feature, 0, rt_data_len);
+
+ /* Fill runtime feature */
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
+
+ if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ rt_feature->feature_id = i;
+ if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
+ rt_feature->support_info = md_feature->feature_set[i];
+
+ rt_feature++;
+ }
+
+ /* Send HS3 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
+ return 0;
+}
+
+static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
+ struct device *dev, void *data, int data_length)
+{
+ enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
+ struct mtk_runtime_feature *rt_feature;
+ int i, offset;
+
+ offset = sizeof(struct feature_query);
+ for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
+ rt_feature = data + offset;
+ offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
+
+ ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
+ if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
+ if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
+ return -EINVAL;
+
+ if (i == RT_ID_MD_PORT_ENUM)
+ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
+ }
+
+ return 0;
+}
+
+static int t7xx_core_reset(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ md->core_md.ready = false;
+
+ if (!ctl) {
+ dev_err(dev, "FSM is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (md->core_md.handshake_ongoing) {
+ int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ md->core_md.handshake_ongoing = false;
+ return 0;
+}
+
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
+ enum t7xx_fsm_event_state event_id,
+ enum t7xx_fsm_event_state err_detect)
+{
+ struct t7xx_fsm_event *event = NULL, *event_next;
+ struct t7xx_sys_info *core_info = &md->core_md;
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned long flags;
+ int ret;
+
+ t7xx_prepare_host_rt_data_query(core_info);
+
+ while (!kthread_should_stop()) {
+ bool event_received = false;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
+ if (event->event_id == err_detect) {
+ list_del(&event->entry);
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+ dev_err(dev, "Core handshake error event received\n");
+ goto err_free_event;
+ } else if (event->event_id == event_id) {
+ list_del(&event->entry);
+ event_received = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ if (event_received)
+ break;
+
+ wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ goto err_free_event;
+ }
+
+ if (!event || ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
+ if (ret) {
+ dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
+ goto err_free_event;
+ }
+
+ if (ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
+ if (ret) {
+ dev_err(dev, "Device failure parsing runtime data: %d", ret);
+ goto err_free_event;
+ }
+
+ core_info->ready = true;
+ core_info->handshake_ongoing = false;
+ wake_up(&ctl->async_hk_wq);
+err_free_event:
+ kfree(event);
+}
+
+static void t7xx_md_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in core_reset() */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
+ md->core_md.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+}
+
+void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ void __iomem *mhccif_base;
+ unsigned int int_sta;
+ unsigned long flags;
+
+ switch (evt_id) {
+ case FSM_PRE_START:
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
+ break;
+
+ case FSM_START:
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
+
+ spin_lock_irqsave(&md->exp_lock, flags);
+ int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
+ md->exp_id |= int_sta;
+ if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
+ ctl->exp_flg = true;
+ md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ } else if (ctl->exp_flg) {
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+ queue_work(md->handshake_wq, &md->handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ } else {
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ }
+ spin_unlock_irqrestore(&md->exp_lock, flags);
+
+ t7xx_mhccif_mask_clr(md->t7xx_dev,
+ D2H_INT_EXCEPTION_INIT |
+ D2H_INT_EXCEPTION_INIT_DONE |
+ D2H_INT_EXCEPTION_CLEARQ_DONE |
+ D2H_INT_EXCEPTION_ALLQ_RESET);
+ break;
+
+ case FSM_READY:
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void t7xx_md_exception_handshake(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ int ret;
+
+ t7xx_md_exception(md, HIF_EX_INIT);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
+
+ t7xx_md_exception(md, HIF_EX_INIT_DONE);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
+
+ t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
+ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
+ if (ret)
+ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
+
+ t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
+}
+
+static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct t7xx_modem *md;
+
+ md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return NULL;
+
+ md->t7xx_dev = t7xx_dev;
+ t7xx_dev->md = md;
+ spin_lock_init(&md->exp_lock);
+ md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ 0, "md_hk_wq");
+ if (!md->handshake_wq)
+ return NULL;
+
+ INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+ return md;
+}
+
+int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+
+ md->md_init_finish = false;
+ md->exp_id = 0;
+ t7xx_fsm_reset(md);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_reset(md->port_prox);
+ md->md_init_finish = true;
+ return t7xx_core_reset(md);
+}
+
+/**
+ * t7xx_md_init() - Initialize modem.
+ * @t7xx_dev: MTK device.
+ *
+ * Allocate and initialize MD control block, and initialize data path.
+ * Register MHCCIF ISR and RGU ISR, and start the state machine.
+ *
+ * Return:
+ ** 0 - Success.
+ ** -ENOMEM - Allocation failure.
+ */
+int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md;
+ int ret;
+
+ md = t7xx_md_alloc(t7xx_dev);
+ if (!md)
+ return -ENOMEM;
+
+ ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
+ ret = t7xx_fsm_init(md);
+ if (ret)
+ goto err_destroy_hswq;
+
+ ret = t7xx_ccmni_init(t7xx_dev);
+ if (ret)
+ goto err_uninit_fsm;
+
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
+ if (ret)
+ goto err_uninit_ccmni;
+
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_md_cldma;
+
+ ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
+ if (ret) /* fsm_uninit flushes cmd queue */
+ goto err_uninit_proxy;
+
+ t7xx_md_sys_sw_init(t7xx_dev);
+ md->md_init_finish = true;
+ return 0;
+
+err_uninit_proxy:
+ t7xx_port_proxy_uninit(md->port_prox);
+
+err_uninit_md_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+
+err_uninit_ccmni:
+ t7xx_ccmni_exit(t7xx_dev);
+
+err_uninit_fsm:
+ t7xx_fsm_uninit(md);
+
+err_destroy_hswq:
+ destroy_workqueue(md->handshake_wq);
+ dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
+ return ret;
+}
+
+void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_modem *md = t7xx_dev->md;
+
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+
+ if (!md->md_init_finish)
+ return;
+
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ t7xx_port_proxy_uninit(md->port_prox);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_ccmni_exit(t7xx_dev);
+ t7xx_fsm_uninit(md);
+ destroy_workqueue(md->handshake_wq);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
new file mode 100644
index 000000000000..7469ed636ae8
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_MODEM_OPS_H__
+#define __T7XX_MODEM_OPS_H__
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_pci.h"
+
+#define FEATURE_COUNT 64
+
+/**
+ * enum hif_ex_stage - HIF exception handshake stages with the HW.
+ * @HIF_EX_INIT: Disable and clear TXQ.
+ * @HIF_EX_INIT_DONE: Polling for initialization to be done.
+ * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
+ * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
+ */
+enum hif_ex_stage {
+ HIF_EX_INIT,
+ HIF_EX_INIT_DONE,
+ HIF_EX_CLEARQ_DONE,
+ HIF_EX_ALLQ_RESET,
+};
+
+struct mtk_runtime_feature {
+ u8 feature_id;
+ u8 support_info;
+ u8 reserved[2];
+ __le32 data_len;
+ __le32 data[];
+};
+
+enum md_event_id {
+ FSM_PRE_START,
+ FSM_START,
+ FSM_READY,
+};
+
+struct t7xx_sys_info {
+ bool ready;
+ bool handshake_ongoing;
+ u8 feature_set[FEATURE_COUNT];
+ struct t7xx_port *ctl_port;
+};
+
+struct t7xx_modem {
+ struct cldma_ctrl *md_ctrl[CLDMA_NUM];
+ struct t7xx_pci_dev *t7xx_dev;
+ struct t7xx_sys_info core_md;
+ bool md_init_finish;
+ bool rgu_irq_asserted;
+ struct workqueue_struct *handshake_wq;
+ struct work_struct handshake_work;
+ struct t7xx_fsm_ctl *fsm_ctl;
+ struct port_proxy *port_prox;
+ unsigned int exp_id;
+ spinlock_t exp_lock; /* Protects exception events */
+};
+
+void t7xx_md_exception_handshake(struct t7xx_modem *md);
+void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id);
+int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
new file mode 100644
index 000000000000..f71d3bc3b237
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/wwan.h>
+#include <net/pkt_sched.h>
+
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define IP_MUX_SESSION_DEFAULT 0
+
+static int t7xx_ccmni_open(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ atomic_inc(&ccmni->usage);
+ return 0;
+}
+
+static int t7xx_ccmni_close(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+
+ atomic_dec(&ccmni->usage);
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ return 0;
+}
+
+static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
+ unsigned int txq_number)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
+ struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
+
+ skb_cb->netif_idx = ccmni->index;
+
+ if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
+ return NETDEV_TX_BUSY;
+
+ return 0;
+}
+
+static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ int skb_len = skb->len;
+
+ /* If MTU is changed or there is no headroom, drop the packet */
+ if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
+ return NETDEV_TX_BUSY;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+}
+
+static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
+{
+ struct t7xx_ccmni *ccmni = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_wake_all_queues(dev);
+}
+
+static const struct net_device_ops ccmni_netdev_ops = {
+ .ndo_open = t7xx_ccmni_open,
+ .ndo_stop = t7xx_ccmni_close,
+ .ndo_start_xmit = t7xx_ccmni_start_xmit,
+ .ndo_tx_timeout = t7xx_ccmni_tx_timeout,
+};
+
+static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netif_tx_start_all_queues(ccmni->dev);
+ netif_carrier_on(ccmni->dev);
+ }
+ }
+}
+
+static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_disable(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_carrier_off(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_wwan_setup(struct net_device *dev)
+{
+ dev->hard_header_len += sizeof(struct ccci_header);
+
+ dev->mtu = ETH_DATA_LEN;
+ dev->max_mtu = CCMNI_MTU_MAX;
+ BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
+
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev->features = NETIF_F_VLAN_CHALLENGED;
+
+ dev->features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_SG;
+
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_HW_CSUM;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ dev->needs_free_netdev = true;
+
+ dev->type = ARPHRD_NONE;
+
+ dev->netdev_ops = &ccmni_netdev_ops;
+}
+
+static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
+ struct netlink_ext_ack *extack)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ struct t7xx_ccmni *ccmni;
+ int ret;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return -EINVAL;
+
+ ccmni = wwan_netdev_drvpriv(dev);
+ ccmni->index = if_id;
+ ccmni->ctlb = ctlb;
+ ccmni->dev = dev;
+ atomic_set(&ccmni->usage, 0);
+ ctlb->ccmni_inst[if_id] = ccmni;
+
+ ret = register_netdevice(dev);
+ if (ret)
+ return ret;
+
+ netif_device_attach(dev);
+ return 0;
+}
+
+static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ u8 if_id = ccmni->index;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return;
+
+ if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
+ return;
+
+ unregister_netdevice(dev);
+}
+
+static const struct wwan_ops ccmni_wwan_ops = {
+ .priv_size = sizeof(struct t7xx_ccmni),
+ .setup = t7xx_ccmni_wwan_setup,
+ .newlink = t7xx_ccmni_wwan_newlink,
+ .dellink = t7xx_ccmni_wwan_dellink,
+};
+
+static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct device *dev = ctlb->hif_ctrl->dev;
+ int ret;
+
+ if (ctlb->wwan_is_registered)
+ return 0;
+
+ /* WWAN core will create a netdev for the default IP MUX channel */
+ ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
+ if (ret < 0) {
+ dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
+ return ret;
+ }
+
+ ctlb->wwan_is_registered = true;
+ return 0;
+}
+
+static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
+{
+ struct t7xx_ccmni_ctrl *ctlb = para;
+ struct device *dev;
+ int ret = 0;
+
+ dev = ctlb->hif_ctrl->dev;
+ ctlb->md_sta = state;
+
+ switch (state) {
+ case MD_STATE_READY:
+ ret = t7xx_ccmni_register_wwan(ctlb);
+ if (!ret)
+ t7xx_ccmni_start(ctlb);
+ break;
+
+ case MD_STATE_EXCEPTION:
+ case MD_STATE_STOPPED:
+ t7xx_ccmni_pre_stop(ctlb);
+
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ t7xx_ccmni_post_stop(ctlb);
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_TO_STOP:
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+ struct t7xx_fsm_notifier *md_status_notifier;
+
+ md_status_notifier = &ctlb->md_status_notify;
+ INIT_LIST_HEAD(&md_status_notifier->entry);
+ md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
+ md_status_notifier->data = ctlb;
+
+ t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
+}
+
+static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
+{
+ struct t7xx_skb_cb *skb_cb;
+ struct net_device *net_dev;
+ struct t7xx_ccmni *ccmni;
+ int pkt_type, skb_len;
+ u8 netif_id;
+
+ skb_cb = T7XX_SKB_CB(skb);
+ netif_id = skb_cb->netif_idx;
+ ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
+ if (!ccmni) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ net_dev = ccmni->dev;
+ skb->dev = net_dev;
+
+ pkt_type = skb_cb->rx_pkt_type;
+ if (pkt_type == PKT_TYPE_IP6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_len = skb->len;
+ netif_rx(skb);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += skb_len;
+}
+
+static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ if (netif_tx_queue_stopped(net_queue))
+ netif_tx_wake_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ netif_tx_stop_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
+ enum dpmaif_txq_state state, int qno)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ if (ctlb->md_sta != MD_STATE_READY)
+ return;
+
+ if (!ctlb->ccmni_inst[0]) {
+ dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
+ return;
+ }
+
+ if (state == DMPAIF_TXQ_STATE_IRQ)
+ t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
+ else if (state == DMPAIF_TXQ_STATE_FULL)
+ t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
+}
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+
+ ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
+ if (!ctlb)
+ return -ENOMEM;
+
+ t7xx_dev->ccmni_ctlb = ctlb;
+ ctlb->t7xx_dev = t7xx_dev;
+ ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
+ ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
+ ctlb->nic_dev_num = NIC_DEV_DEFAULT;
+
+ ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
+ if (!ctlb->hif_ctrl)
+ return -ENOMEM;
+
+ init_md_status_notifier(t7xx_dev);
+ return 0;
+}
+
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
+
+ if (ctlb->wwan_is_registered) {
+ wwan_unregister_ops(&t7xx_dev->pdev->dev);
+ ctlb->wwan_is_registered = false;
+ }
+
+ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
new file mode 100644
index 000000000000..f5ad49ca12a7
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_NETDEV_H__
+#define __T7XX_NETDEV_H__
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_pci.h"
+#include "t7xx_state_monitor.h"
+
+#define RXQ_NUM DPMAIF_RXQ_NUM
+#define NIC_DEV_MAX 21
+#define NIC_DEV_DEFAULT 2
+
+#define CCMNI_NETDEV_WDT_TO (1 * HZ)
+#define CCMNI_MTU_MAX 3000
+
+struct t7xx_ccmni {
+ u8 index;
+ atomic_t usage;
+ struct net_device *dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+};
+
+struct t7xx_ccmni_ctrl {
+ struct t7xx_pci_dev *t7xx_dev;
+ struct dpmaif_ctrl *hif_ctrl;
+ struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX];
+ struct dpmaif_callbacks callbacks;
+ unsigned int nic_dev_num;
+ unsigned int md_sta;
+ struct t7xx_fsm_notifier md_status_notify;
+ bool wwan_is_registered;
+};
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_NETDEV_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
new file mode 100644
index 000000000000..871f2a27a398
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_PCI_IREG_BASE 0
+#define T7XX_PCI_EREG_BASE 2
+
+#define PM_SLEEP_DIS_TIMEOUT_MS 20
+#define PM_ACK_TIMEOUT_MS 1500
+#define PM_AUTOSUSPEND_MS 20000
+#define PM_RESOURCE_POLL_TIMEOUT_US 10000
+#define PM_RESOURCE_POLL_STEP_US 100
+
+enum t7xx_pm_state {
+ MTK_PM_EXCEPTION,
+ MTK_PM_INIT, /* Device initialized, but handshake not completed */
+ MTK_PM_SUSPENDED,
+ MTK_PM_RESUMED,
+};
+
+static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
+ u32 value;
+
+ value = ioread32(ctrl_reg);
+
+ if (enable)
+ value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+ else
+ value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+
+ iowrite32(value, ctrl_reg);
+}
+
+static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
+{
+ int ret, val;
+
+ ret = read_poll_timeout(ioread32, val,
+ (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
+ PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
+ IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (ret == -ETIMEDOUT)
+ dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
+
+ return ret;
+}
+
+static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct pci_dev *pdev = t7xx_dev->pdev;
+
+ INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
+ mutex_init(&t7xx_dev->md_pm_entity_mtx);
+ spin_lock_init(&t7xx_dev->md_pm_lock);
+ init_completion(&t7xx_dev->sleep_lock_acquire);
+ init_completion(&t7xx_dev->pm_sr_ack);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ device_init_wakeup(&pdev->dev, true);
+ dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
+ DPM_FLAG_NO_DIRECT_COMPLETE);
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Enable the PCIe resource lock only after MD deep sleep is done */
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_DS_LOCK_ACK |
+ D2H_INT_SUSPEND_ACK |
+ D2H_INT_RESUME_ACK |
+ D2H_INT_SUSPEND_ACK_AP |
+ D2H_INT_RESUME_ACK_AP);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+}
+
+static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* The device is kept in FSM re-init flow
+ * so just roll back PM setting to the init setting.
+ */
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
+{
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
+}
+
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+}
+
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity, *tmp_entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ list_del(&pm_entity->entity);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+
+ return -ENXIO;
+}
+
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+
+ ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
+ msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
+ if (!ret)
+ dev_err_ratelimited(dev, "Resource wait complete timed out\n");
+
+ return ret;
+}
+
+/**
+ * t7xx_pci_disable_sleep() - Disable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * Lock the deep sleep capability, note that the device can still go into deep sleep
+ * state while device is in D0 state, from the host's point-of-view.
+ *
+ * If device is in deep sleep state, wake up the device and disable deep sleep capability.
+ */
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count++;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock_and_complete;
+
+ if (t7xx_dev->sleep_disable_count == 1) {
+ u32 status;
+
+ reinit_completion(&t7xx_dev->sleep_lock_acquire);
+ t7xx_dev_set_sleep_capability(t7xx_dev, false);
+
+ status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (status & T7XX_PCIE_RESOURCE_STS_MSK)
+ goto unlock_and_complete;
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
+ }
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ return;
+
+unlock_and_complete:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+}
+
+/**
+ * t7xx_pci_enable_sleep() - Enable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * After enabling deep sleep, device can enter into deep sleep state.
+ */
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count--;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock;
+
+ if (t7xx_dev->sleep_disable_count == 0)
+ t7xx_dev_set_sleep_capability(t7xx_dev, true);
+
+unlock:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+}
+
+static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
+{
+ unsigned long wait_ret;
+
+ reinit_completion(&t7xx_dev->pm_sr_ack);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
+ wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
+ msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
+ if (!wait_ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
+{
+ enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ int ret;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
+ return -EFAULT;
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ ret = t7xx_wait_pm_config(t7xx_dev);
+ if (ret) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = false;
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (!entity->suspend)
+ continue;
+
+ ret = entity->suspend(t7xx_dev, entity->entity_param);
+ if (ret) {
+ entity_id = entity->id;
+ dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
+ goto abort_suspend;
+ }
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
+ if (ret) {
+ t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->suspend_late)
+ entity->suspend_late(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+
+abort_suspend:
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity_id == entity->id)
+ break;
+
+ if (entity->resume)
+ entity->resume(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ return ret;
+}
+
+static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
+
+ /* Disable interrupt first and let the IPs enable them */
+ iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
+
+ /* Device disables PCIe interrupts during resume and
+ * following function will re-enable PCIe interrupts.
+ */
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+}
+
+static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
+{
+ int ret;
+
+ ret = pcim_enable_device(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_atr_init(t7xx_dev);
+ t7xx_pcie_interrupt_reinit(t7xx_dev);
+
+ if (is_d3) {
+ t7xx_mhccif_init(t7xx_dev);
+ return t7xx_pci_pm_reinit(t7xx_dev);
+ }
+
+ return 0;
+}
+
+static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
+{
+ struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret = -EINVAL;
+
+ switch (event) {
+ case FSM_CMD_STOP:
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ break;
+
+ case FSM_CMD_START:
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
+
+ return ret;
+}
+
+static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ u32 prev_state;
+ int ret = 0;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+ }
+
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
+
+ if (state_check) {
+ /* For D3/L3 resume, the device could boot so quickly that the
+ * initial value of the dummy register might be overwritten.
+ * Identify new boots if the ATR source address register is not initialized.
+ */
+ u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
+ ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
+ if (prev_state == PM_RESUME_REG_STATE_L3 ||
+ (prev_state == PM_RESUME_REG_STATE_INIT &&
+ atr_reg_val == ATR_SRC_ADDR_INVALID)) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, true);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_EXP ||
+ prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_EXCEPTION_INIT |
+ D2H_INT_EXCEPTION_INIT_DONE |
+ D2H_INT_EXCEPTION_CLEARQ_DONE |
+ D2H_INT_EXCEPTION_ALLQ_RESET |
+ D2H_INT_PORT_ENUM);
+
+ return ret;
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_L2) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+
+ } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
+ prev_state != PM_RESUME_REG_STATE_INIT) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ return 0;
+ }
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume_early)
+ entity->resume_early(t7xx_dev, entity->entity_param);
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume) {
+ ret = entity->resume(t7xx_dev, entity->entity_param);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
+ entity->id, ret);
+ }
+ }
+
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ return ret;
+}
+
+static int t7xx_pci_pm_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+
+ return 0;
+}
+
+static void t7xx_pci_shutdown(struct pci_dev *pdev)
+{
+ __t7xx_pci_pm_suspend(pdev);
+}
+
+static int t7xx_pci_pm_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
+static int t7xx_pci_pm_thaw(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
+}
+
+static int t7xx_pci_pm_runtime_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_runtime_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
+static const struct dev_pm_ops t7xx_pci_pm_ops = {
+ .suspend = t7xx_pci_pm_suspend,
+ .resume = t7xx_pci_pm_resume,
+ .resume_noirq = t7xx_pci_pm_resume_noirq,
+ .freeze = t7xx_pci_pm_suspend,
+ .thaw = t7xx_pci_pm_thaw,
+ .poweroff = t7xx_pci_pm_suspend,
+ .restore = t7xx_pci_pm_resume,
+ .restore_noirq = t7xx_pci_pm_resume_noirq,
+ .runtime_suspend = t7xx_pci_pm_runtime_suspend,
+ .runtime_resume = t7xx_pci_pm_runtime_resume
+};
+
+static int t7xx_request_irq(struct pci_dev *pdev)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int ret = 0, i;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+
+ for (i = 0; i < EXT_INT_NUM; i++) {
+ const char *irq_descr;
+ int irq_vec;
+
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ dev_driver_string(&pdev->dev), i);
+ if (!irq_descr) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ irq_vec = pci_irq_vector(pdev, i);
+ ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
+ t7xx_dev->intr_thread[i], 0, irq_descr,
+ t7xx_dev->callback_param[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ while (i--) {
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
+ }
+ }
+
+ return ret;
+}
+
+static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct pci_dev *pdev = t7xx_dev->pdev;
+ int ret;
+
+ /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
+ ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
+ return ret;
+ }
+
+ ret = t7xx_request_irq(pdev);
+ if (ret) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
+ return 0;
+}
+
+static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ int ret, i;
+
+ if (!t7xx_dev->pdev->msix_cap)
+ return -EINVAL;
+
+ ret = t7xx_setup_msix(t7xx_dev);
+ if (ret)
+ return ret;
+
+ /* IPs enable interrupts when ready */
+ for (i = 0; i < EXT_INT_NUM; i++)
+ t7xx_pcie_mac_set_int(t7xx_dev, i);
+
+ return 0;
+}
+
+static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
+ INFRACFG_AO_DEV_CHIP -
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+}
+
+static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int ret;
+
+ t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
+ if (!t7xx_dev)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, t7xx_dev);
+ t7xx_dev->pdev = pdev;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
+ pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
+ t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
+
+ ret = t7xx_pci_pm_init(t7xx_dev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_atr_init(t7xx_dev);
+ t7xx_pci_infracfg_ao_calc(t7xx_dev);
+ t7xx_mhccif_init(t7xx_dev);
+
+ ret = t7xx_md_init(t7xx_dev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+
+ ret = t7xx_interrupt_init(t7xx_dev);
+ if (ret) {
+ t7xx_md_exit(t7xx_dev);
+ return ret;
+ }
+
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+
+ return 0;
+}
+
+static void t7xx_pci_remove(struct pci_dev *pdev)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ int i;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ t7xx_md_exit(t7xx_dev);
+
+ for (i = 0; i < EXT_INT_NUM; i++) {
+ if (!t7xx_dev->intr_handler[i])
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
+ }
+
+ pci_free_irq_vectors(t7xx_dev->pdev);
+}
+
+static const struct pci_device_id t7xx_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
+
+static struct pci_driver t7xx_pci_driver = {
+ .name = "mtk_t7xx",
+ .id_table = t7xx_pci_table,
+ .probe = t7xx_pci_probe,
+ .remove = t7xx_pci_remove,
+ .driver.pm = &t7xx_pci_pm_ops,
+ .shutdown = t7xx_pci_shutdown,
+};
+
+module_pci_driver(t7xx_pci_driver);
+
+MODULE_AUTHOR("MediaTek Inc");
+MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
new file mode 100644
index 000000000000..50b37056ce5a
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ */
+
+#ifndef __T7XX_PCI_H__
+#define __T7XX_PCI_H__
+
+#include <linux/completion.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "t7xx_reg.h"
+
+/* struct t7xx_addr_base - holds base addresses
+ * @pcie_mac_ireg_base: PCIe MAC register base
+ * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers
+ * @pcie_dev_reg_trsl_addr: used to calculate the register base address
+ * @infracfg_ao_base: base address used in CLDMA reset operations
+ * @mhccif_rc_base: host view of MHCCIF rc base addr
+ */
+struct t7xx_addr_base {
+ void __iomem *pcie_mac_ireg_base;
+ void __iomem *pcie_ext_reg_base;
+ u32 pcie_dev_reg_trsl_addr;
+ void __iomem *infracfg_ao_base;
+ void __iomem *mhccif_rc_base;
+};
+
+typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+
+/* struct t7xx_pci_dev - MTK device context structure
+ * @intr_handler: array of handler function for request_threaded_irq
+ * @intr_thread: array of thread_fn for request_threaded_irq
+ * @callback_param: array of cookie passed back to interrupt functions
+ * @pdev: PCI device
+ * @base_addr: memory base addresses of HW components
+ * @md: modem interface
+ * @ccmni_ctlb: context structure used to control the network data path
+ * @rgu_pci_irq_en: RGU callback ISR registered and active
+ * @md_pm_entities: list of pm entities
+ * @md_pm_entity_mtx: protects md_pm_entities list
+ * @pm_sr_ack: ack from the device when went to sleep or woke up
+ * @md_pm_state: state for resume/suspend
+ * @md_pm_lock: protects PCIe sleep lock
+ * @sleep_disable_count: PCIe L1.2 lock counter
+ * @sleep_lock_acquire: indicates that sleep has been disabled
+ */
+struct t7xx_pci_dev {
+ t7xx_intr_callback intr_handler[EXT_INT_NUM];
+ t7xx_intr_callback intr_thread[EXT_INT_NUM];
+ void *callback_param[EXT_INT_NUM];
+ struct pci_dev *pdev;
+ struct t7xx_addr_base base_addr;
+ struct t7xx_modem *md;
+ struct t7xx_ccmni_ctrl *ccmni_ctlb;
+ bool rgu_pci_irq_en;
+
+ /* Low Power Items */
+ struct list_head md_pm_entities;
+ struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
+ struct completion pm_sr_ack;
+ atomic_t md_pm_state;
+ spinlock_t md_pm_lock; /* Protects PCI resource lock */
+ unsigned int sleep_disable_count;
+ struct completion sleep_lock_acquire;
+};
+
+enum t7xx_pm_id {
+ PM_ENTITY_ID_CTRL1,
+ PM_ENTITY_ID_CTRL2,
+ PM_ENTITY_ID_DATA,
+ PM_ENTITY_ID_INVALID
+};
+
+/* struct md_pm_entity - device power management entity
+ * @entity: list of PM Entities
+ * @suspend: callback invoked before sending D3 request to device
+ * @suspend_late: callback invoked after getting D3 ACK from device
+ * @resume_early: callback invoked before sending the resume request to device
+ * @resume: callback invoked after getting resume ACK from device
+ * @id: unique PM entity identifier
+ * @entity_param: parameter passed to the registered callbacks
+ *
+ * This structure is used to indicate PM operations required by internal
+ * HW modules such as CLDMA and DPMA.
+ */
+struct md_pm_entity {
+ struct list_head entity;
+ int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ enum t7xx_pm_id id;
+ void *entity_param;
+};
+
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
new file mode 100644
index 000000000000..76da4c15e3de
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_reg.h"
+
+#define T7XX_PCIE_REG_BAR 2
+#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0
+#define T7XX_PCIE_REG_TABLE_NUM 0
+#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0
+
+#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0
+#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2
+#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0
+#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0
+#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0
+#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1
+#define T7XX_PCIE_DEV_DMA_SIZE 0
+
+enum t7xx_atr_src_port {
+ ATR_SRC_PCI_WIN0,
+ ATR_SRC_PCI_WIN1,
+ ATR_SRC_AXIS_0,
+ ATR_SRC_AXIS_1,
+ ATR_SRC_AXIS_2,
+ ATR_SRC_AXIS_3,
+};
+
+enum t7xx_atr_dst_port {
+ ATR_DST_PCI_TRX,
+ ATR_DST_PCI_CONFIG,
+ ATR_DST_AXIM_0 = 4,
+ ATR_DST_AXIM_1,
+ ATR_DST_AXIM_2,
+ ATR_DST_AXIM_3,
+};
+
+struct t7xx_atr_config {
+ u64 src_addr;
+ u64 trsl_addr;
+ u64 size;
+ u32 port;
+ u32 table;
+ enum t7xx_atr_dst_port trsl_id;
+ u32 transparent;
+};
+
+static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port)
+{
+ void __iomem *reg;
+ int i, offset;
+
+ for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
+ offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ iowrite64(0, reg);
+ }
+}
+
+static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ void __iomem *pbase = IREG_BASE(t7xx_dev);
+ int atr_size, pos, offset;
+ void __iomem *reg;
+ u64 value;
+
+ if (cfg->transparent) {
+ /* No address conversion is performed */
+ atr_size = ATR_TRANSPARENT_SIZE;
+ } else {
+ if (cfg->src_addr & (cfg->size - 1)) {
+ dev_err(dev, "Source address is not aligned to size\n");
+ return -EINVAL;
+ }
+
+ if (cfg->trsl_addr & (cfg->size - 1)) {
+ dev_err(dev, "Translation address %llx is not aligned to size %llx\n",
+ cfg->trsl_addr, cfg->size - 1);
+ return -EINVAL;
+ }
+
+ pos = __ffs64(cfg->size);
+
+ /* HW calculates the address translation space as 2^(atr_size + 1) */
+ atr_size = pos - 1;
+ }
+
+ offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table;
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
+ value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
+ iowrite64(value, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
+ iowrite32(cfg->trsl_id, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
+ iowrite64(value, reg);
+
+ /* Ensure ATR is set */
+ ioread64(reg);
+ return 0;
+}
+
+/**
+ * t7xx_pcie_mac_atr_init() - Initialize address translation.
+ * @t7xx_dev: MTK device.
+ *
+ * Setup ATR for ports & device.
+ */
+void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_atr_config cfg;
+ u32 i;
+
+ /* Disable for all ports */
+ for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++)
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i);
+
+ memset(&cfg, 0, sizeof(cfg));
+ /* Config ATR for RC to access device's register */
+ cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR);
+ cfg.size = T7XX_PCIE_REG_SIZE_CHIP;
+ cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
+ cfg.port = T7XX_PCIE_REG_PORT;
+ cfg.table = T7XX_PCIE_REG_TABLE_NUM;
+ cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT;
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
+ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
+
+ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
+
+ /* Config ATR for EP to access RC's memory */
+ for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) {
+ cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR;
+ cfg.size = T7XX_PCIE_DEV_DMA_SIZE;
+ cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR;
+ cfg.port = i;
+ cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM;
+ cfg.trsl_id = ATR_DST_PCI_TRX;
+ cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT;
+ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
+ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
+ }
+}
+
+/**
+ * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts.
+ * @t7xx_dev: MTK device.
+ * @enable: Enable/disable.
+ *
+ * Enable or disable device interrupts.
+ */
+static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
+
+ if (enable)
+ value &= ~ISTAT_HST_CTRL_DIS;
+ else
+ value |= ISTAT_HST_CTRL_DIS;
+
+ iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
+}
+
+void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_mac_enable_disable_int(t7xx_dev, true);
+}
+
+void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_mac_enable_disable_int(t7xx_dev, false);
+}
+
+/**
+ * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type.
+ * @t7xx_dev: MTK device.
+ * @int_type: Interrupt type.
+ * @clear: Clear/set.
+ *
+ * Clear or set device interrupt by type.
+ */
+static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev,
+ enum t7xx_int int_type, bool clear)
+{
+ void __iomem *reg;
+ u32 val;
+
+ if (clear)
+ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0;
+ else
+ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0;
+
+ val = BIT(EXT_INT_START + int_type);
+ iowrite32(val, reg);
+}
+
+void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true);
+}
+
+void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false);
+}
+
+/**
+ * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type.
+ * @t7xx_dev: MTK device.
+ * @int_type: Interrupt type.
+ *
+ * Enable or disable device interrupts' status by type.
+ */
+void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
+{
+ void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0;
+ u32 val = BIT(EXT_INT_START + int_type);
+
+ iowrite32(val, reg);
+}
+
+/**
+ * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration.
+ * @t7xx_dev: MTK device.
+ * @irq_count: Number of MSIX IRQ vectors.
+ *
+ * Write IRQ count to device.
+ */
+void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count)
+{
+ u32 val = ffs(irq_count) * 2 - 1;
+
+ iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.h b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h
new file mode 100644
index 000000000000..50336fa7e8c2
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ *
+ * Contributors:
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_PCIE_MAC_H__
+#define __T7XX_PCIE_MAC_H__
+
+#include "t7xx_pci.h"
+#include "t7xx_reg.h"
+
+#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base)
+
+void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type);
+void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count);
+
+#endif /* __T7XX_PCIE_MAC_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
new file mode 100644
index 000000000000..dc4133eb433a
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#ifndef __T7XX_PORT_H__
+#define __T7XX_PORT_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_pci.h"
+
+#define PORT_CH_ID_MASK GENMASK(7, 0)
+
+/* Channel ID and Message ID definitions.
+ * The channel number consists of peer_id(15:12) , channel_id(11:0)
+ * peer_id:
+ * 0:reserved, 1: to sAP, 2: to MD
+ */
+enum port_ch {
+ /* to MD */
+ PORT_CH_CONTROL_RX = 0x2000,
+ PORT_CH_CONTROL_TX = 0x2001,
+ PORT_CH_UART1_RX = 0x2006, /* META */
+ PORT_CH_UART1_TX = 0x2008,
+ PORT_CH_UART2_RX = 0x200a, /* AT */
+ PORT_CH_UART2_TX = 0x200c,
+ PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */
+ PORT_CH_MD_LOG_TX = 0x202b,
+ PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */
+ PORT_CH_LB_IT_TX = 0x203f,
+ PORT_CH_STATUS_RX = 0x2043, /* Status events */
+ PORT_CH_MIPC_RX = 0x20ce, /* MIPC */
+ PORT_CH_MIPC_TX = 0x20cf,
+ PORT_CH_MBIM_RX = 0x20d0,
+ PORT_CH_MBIM_TX = 0x20d1,
+ PORT_CH_DSS0_RX = 0x20d2,
+ PORT_CH_DSS0_TX = 0x20d3,
+ PORT_CH_DSS1_RX = 0x20d4,
+ PORT_CH_DSS1_TX = 0x20d5,
+ PORT_CH_DSS2_RX = 0x20d6,
+ PORT_CH_DSS2_TX = 0x20d7,
+ PORT_CH_DSS3_RX = 0x20d8,
+ PORT_CH_DSS3_TX = 0x20d9,
+ PORT_CH_DSS4_RX = 0x20da,
+ PORT_CH_DSS4_TX = 0x20db,
+ PORT_CH_DSS5_RX = 0x20dc,
+ PORT_CH_DSS5_TX = 0x20dd,
+ PORT_CH_DSS6_RX = 0x20de,
+ PORT_CH_DSS6_TX = 0x20df,
+ PORT_CH_DSS7_RX = 0x20e0,
+ PORT_CH_DSS7_TX = 0x20e1,
+};
+
+struct t7xx_port;
+struct port_ops {
+ int (*init)(struct t7xx_port *port);
+ int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb);
+ void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state);
+ void (*uninit)(struct t7xx_port *port);
+ int (*enable_chl)(struct t7xx_port *port);
+ int (*disable_chl)(struct t7xx_port *port);
+};
+
+struct t7xx_port_conf {
+ enum port_ch tx_ch;
+ enum port_ch rx_ch;
+ unsigned char txq_index;
+ unsigned char rxq_index;
+ unsigned char txq_exp_index;
+ unsigned char rxq_exp_index;
+ enum cldma_id path_id;
+ struct port_ops *ops;
+ char *name;
+ enum wwan_port_type port_type;
+};
+
+struct t7xx_port {
+ /* Members not initialized in definition */
+ const struct t7xx_port_conf *port_conf;
+ struct wwan_port *wwan_port;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct device *dev;
+ u16 seq_nums[2]; /* TX/RX sequence numbers */
+ atomic_t usage_cnt;
+ struct list_head entry;
+ struct list_head queue_entry;
+ /* TX and RX flows are asymmetric since ports are multiplexed on
+ * queues.
+ *
+ * TX: data blocks are sent directly to a queue. Each port
+ * does not maintain a TX list; instead, they only provide
+ * a wait_queue_head for blocking writes.
+ *
+ * RX: Each port uses a RX list to hold packets,
+ * allowing the modem to dispatch RX packet as quickly as possible.
+ */
+ struct sk_buff_head rx_skb_list;
+ spinlock_t port_update_lock; /* Protects port configuration */
+ wait_queue_head_t rx_wq;
+ int rx_length_th;
+ bool chan_enable;
+ struct task_struct *thread;
+};
+
+struct sk_buff *t7xx_port_alloc_skb(int payload);
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg);
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg);
+
+#endif /* __T7XX_PORT_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
new file mode 100644
index 000000000000..68430b130a67
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define PORT_MSG_VERSION GENMASK(31, 16)
+#define PORT_MSG_PRT_CNT GENMASK(15, 0)
+
+struct port_msg {
+ __le32 head_pattern;
+ __le32 info;
+ __le32 tail_pattern;
+ __le32 data[];
+};
+
+static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = t7xx_ctrl_alloc_skb(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg);
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl,
+ struct sk_buff *skb)
+{
+ struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ enum md_state md_state;
+ int ret = -EINVAL;
+
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state != MD_STATE_EXCEPTION) {
+ dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n",
+ ctrl_msg_h->ex_msg, md_state);
+ return -EINVAL;
+ }
+
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_MD_EX:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) {
+ dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID);
+ if (ret) {
+ dev_err(dev, "Failed to send exception message to modem\n");
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception event");
+
+ break;
+
+ case CTL_ID_MD_EX_ACK:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) {
+ dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Received event");
+
+ break;
+
+ case CTL_ID_MD_EX_PASS:
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Passed event");
+
+ break;
+
+ case CTL_ID_DRV_VER_ERROR:
+ dev_err(dev, "AP/MD driver version mismatch\n");
+ }
+
+ return ret;
+}
+
+/**
+ * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
+ * @md: Modem context.
+ * @msg: Message.
+ *
+ * Used to control create/remove device node.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EFAULT - Message check failure.
+ */
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned int version, port_count, i;
+ struct port_msg *port_msg = msg;
+
+ version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
+ if (version != PORT_ENUM_VER ||
+ le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
+ le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) {
+ dev_err(dev, "Invalid port control message %x:%x:%x\n",
+ version, le32_to_cpu(port_msg->head_pattern),
+ le32_to_cpu(port_msg->tail_pattern));
+ return -EFAULT;
+ }
+
+ port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
+ for (i = 0; i < port_count; i++) {
+ u32 port_info = le32_to_cpu(port_msg->data[i]);
+ unsigned int ch_id;
+ bool en_flag;
+
+ ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info);
+ en_flag = port_info & PORT_INFO_ENFLG;
+ if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag))
+ dev_dbg(dev, "Port:%x not found\n", ch_id);
+ }
+
+ return 0;
+}
+
+static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ struct ctrl_msg_header *ctrl_msg_h;
+ int ret = 0;
+
+ ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_HS2_MSG:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
+ le32_to_cpu(ctrl_msg_h->data_length));
+ if (ret)
+ dev_err(port->dev, "Failed to append Handshake 2 event");
+ }
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_MD_EX:
+ case CTL_ID_MD_EX_ACK:
+ case CTL_ID_MD_EX_PASS:
+ case CTL_ID_DRV_VER_ERROR:
+ ret = fsm_ee_message_handler(port, ctl, skb);
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_PORT_ENUM:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+ ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
+ if (!ret)
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
+ else
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM,
+ PORT_ENUM_VER_MISMATCH);
+
+ break;
+
+ default:
+ ret = -EINVAL;
+ dev_err(port->dev, "Unknown control message ID to FSM %x\n",
+ le32_to_cpu(ctrl_msg_h->ctrl_msg_id));
+ break;
+ }
+
+ if (ret)
+ dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret);
+
+ return ret;
+}
+
+static int port_ctl_rx_thread(void *arg)
+{
+ while (!kthread_should_stop()) {
+ struct t7xx_port *port = arg;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (skb_queue_empty(&port->rx_skb_list) &&
+ wait_event_interruptible_locked_irq(port->rx_wq,
+ !skb_queue_empty(&port->rx_skb_list) ||
+ kthread_should_stop())) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ continue;
+ }
+ if (kthread_should_stop()) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ break;
+ }
+ skb = __skb_dequeue(&port->rx_skb_list);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ control_msg_handler(port, skb);
+ }
+
+ return 0;
+}
+
+static int port_ctl_init(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
+ if (IS_ERR(port->thread)) {
+ dev_err(port->dev, "Failed to start port control thread\n");
+ return PTR_ERR(port->thread);
+ }
+
+ port->rx_length_th = CTRL_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void port_ctl_uninit(struct t7xx_port *port)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ if (port->thread)
+ kthread_stop(port->thread);
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ port->rx_length_th = 0;
+ while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL)
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+}
+
+struct port_ops ctl_port_ops = {
+ .init = port_ctl_init,
+ .recv_skb = t7xx_port_enqueue_skb,
+ .uninit = port_ctl_uninit,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
new file mode 100644
index 000000000000..d4de047ff0d4
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define Q_IDX_CTRL 0
+#define Q_IDX_MBIM 2
+#define Q_IDX_AT_CMD 5
+
+#define INVALID_SEQ_NUM GENMASK(15, 0)
+
+#define for_each_proxy_port(i, p, proxy) \
+ for (i = 0, (p) = &(proxy)->ports[i]; \
+ i < (proxy)->port_count; \
+ i++, (p) = &(proxy)->ports[i])
+
+static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UART2_TX,
+ .rx_ch = PORT_CH_UART2_RX,
+ .txq_index = Q_IDX_AT_CMD,
+ .rxq_index = Q_IDX_AT_CMD,
+ .txq_exp_index = 0xff,
+ .rxq_exp_index = 0xff,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "AT",
+ .port_type = WWAN_PORT_AT,
+ }, {
+ .tx_ch = PORT_CH_MBIM_TX,
+ .rx_ch = PORT_CH_MBIM_RX,
+ .txq_index = Q_IDX_MBIM,
+ .rxq_index = Q_IDX_MBIM,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "MBIM",
+ .port_type = WWAN_PORT_MBIM,
+ }, {
+ .tx_ch = PORT_CH_CONTROL_TX,
+ .rx_ch = PORT_CH_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_MD,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ctrl",
+ },
+};
+
+static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
+{
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port_conf = port->port_conf;
+ if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
+{
+ u32 status = le32_to_cpu(ccci_h->status);
+ u16 seq_num, next_seq_num;
+ bool assert_bit;
+
+ seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
+ next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
+ assert_bit = status & CCCI_H_AST_BIT;
+ if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
+ return next_seq_num;
+
+ if (seq_num != port->seq_nums[MTK_RX])
+ dev_warn_ratelimited(port->dev,
+ "seq num out-of-order %u != %u (header %X, len %X)\n",
+ seq_num, port->seq_nums[MTK_RX],
+ le32_to_cpu(ccci_h->packet_header),
+ le32_to_cpu(ccci_h->packet_len));
+
+ return next_seq_num;
+}
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ }
+}
+
+static int t7xx_port_get_queue_no(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+
+ return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
+ port_conf->txq_exp_index : port_conf->txq_index;
+}
+
+static void t7xx_port_struct_init(struct t7xx_port *port)
+{
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->queue_entry);
+ skb_queue_head_init(&port->rx_skb_list);
+ init_waitqueue_head(&port->rx_wq);
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ atomic_set(&port->usage_cnt, 0);
+}
+
+struct sk_buff *t7xx_port_alloc_skb(int payload)
+{
+ struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ccci_header));
+
+ return skb;
+}
+
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
+{
+ struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ctrl_msg_header));
+
+ return skb;
+}
+
+/**
+ * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
+ * @port: port context.
+ * @skb: received skb.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed.
+ */
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (port->rx_skb_list.qlen >= port->rx_length_th) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ return -ENOBUFS;
+ }
+ __skb_queue_tail(&port->rx_skb_list, skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ wake_up_all(&port->rx_wq);
+ return 0;
+}
+
+static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ struct cldma_ctrl *md_ctrl;
+ int ret, tx_qno;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ tx_qno = t7xx_port_get_queue_no(port);
+ ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
+ if (ret)
+ dev_err(port->dev, "Failed to send skb: %d\n", ret);
+
+ return ret;
+}
+
+static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
+ unsigned int pkt_header, unsigned int ex_msg)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct ccci_header *ccci_h;
+ u32 status;
+ int ret;
+
+ ccci_h = skb_push(skb, sizeof(*ccci_h));
+ status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
+ FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
+ ccci_h->status = cpu_to_le32(status);
+ ccci_h->packet_header = cpu_to_le32(pkt_header);
+ ccci_h->packet_len = cpu_to_le32(skb->len);
+ ccci_h->ex_msg = cpu_to_le32(ex_msg);
+
+ ret = t7xx_port_send_raw_skb(port, skb);
+ if (ret)
+ return ret;
+
+ port->seq_nums[MTK_TX]++;
+ return 0;
+}
+
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg)
+{
+ struct ctrl_msg_header *ctrl_msg_h;
+ unsigned int msg_len = skb->len;
+ u32 pkt_header = 0;
+
+ ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
+ ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
+ ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
+ ctrl_msg_h->data_length = cpu_to_le32(msg_len);
+
+ if (!msg_len)
+ pkt_header = CCCI_HEADER_NO_DATA;
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg)
+{
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ unsigned int fsm_state;
+
+ fsm_state = t7xx_fsm_get_ctl_state(ctl);
+ if (fsm_state != FSM_STATE_PRE_START) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum md_state md_state = t7xx_fsm_get_md_state(ctl);
+
+ switch (md_state) {
+ case MD_STATE_EXCEPTION:
+ if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
+ return -EBUSY;
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_FOR_HS2:
+ case MD_STATE_STOPPED:
+ case MD_STATE_WAITING_TO_STOP:
+ case MD_STATE_INVALID:
+ return -ENODEV;
+
+ default:
+ break;
+ }
+ }
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
+static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
+ INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
+
+ for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
+ for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
+ INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
+ }
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum cldma_id path_id = port_conf->path_id;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
+ list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
+ list_add_tail(&port->queue_entry,
+ &port_prox->queue_ports[path_id][port_conf->rxq_index]);
+ }
+}
+
+static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
+ struct cldma_queue *queue, u16 channel)
+{
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ struct list_head *port_list;
+ struct t7xx_port *port;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
+ port_list = &port_prox->rx_ch_ports[ch_id];
+ list_for_each_entry(port, port_list, entry) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (queue->md_ctrl->hif_id == port_conf->path_id &&
+ channel == port_conf->rx_ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * t7xx_port_proxy_recv_skb() - Dispatch received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = queue->md_ctrl->dev;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ u16 seq_num, channel;
+ int ret;
+
+ channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
+ if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
+ goto drop_skb;
+ }
+
+ port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
+ if (!port) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
+ goto drop_skb;
+ }
+
+ seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
+ port_conf = port->port_conf;
+ skb_pull(skb, sizeof(*ccci_h));
+
+ ret = port_conf->ops->recv_skb(port, skb);
+ /* Error indicates to try again later */
+ if (ret) {
+ skb_push(skb, sizeof(*ccci_h));
+ return ret;
+ }
+
+ port->seq_nums[MTK_RX] = seq_num;
+ return 0;
+
+drop_skb:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_md_status_notify() - Notify all ports of state.
+ *@port_prox: The port_proxy pointer.
+ *@state: State.
+ *
+ * Called by t7xx_fsm. Used to dispatch modem status for all ports,
+ * which want to know MD state transition.
+ */
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->md_state_notify)
+ port_conf->ops->md_state_notify(port, state);
+ }
+}
+
+static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ t7xx_port_struct_init(port);
+
+ if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
+ md->core_md.ctl_port = port;
+
+ port->t7xx_dev = md->t7xx_dev;
+ port->dev = &md->t7xx_dev->pdev->dev;
+ spin_lock_init(&port->port_update_lock);
+ port->chan_enable = false;
+
+ if (port_conf->ops->init)
+ port_conf->ops->init(port);
+ }
+
+ t7xx_proxy_setup_ch_mapping(port_prox);
+}
+
+static int t7xx_proxy_alloc(struct t7xx_modem *md)
+{
+ unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct port_proxy *port_prox;
+ int i;
+
+ port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ GFP_KERNEL);
+ if (!port_prox)
+ return -ENOMEM;
+
+ md->port_prox = port_prox;
+ port_prox->dev = dev;
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
+
+ port_prox->port_count = port_count;
+ t7xx_proxy_init_all_ports(md);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_init() - Initialize ports.
+ * @md: Modem.
+ *
+ * Create all port instances.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_port_proxy_init(struct t7xx_modem *md)
+{
+ int ret;
+
+ ret = t7xx_proxy_alloc(md);
+ if (ret)
+ return ret;
+
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
+ return 0;
+}
+
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->uninit)
+ port_conf->ops->uninit(port);
+ }
+}
+
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag)
+{
+ struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
+ const struct t7xx_port_conf *port_conf;
+
+ if (!port)
+ return -EINVAL;
+
+ port_conf = port->port_conf;
+
+ if (en_flag) {
+ if (port_conf->ops->enable_chl)
+ port_conf->ops->enable_chl(port);
+ } else {
+ if (port_conf->ops->disable_chl)
+ port_conf->ops->disable_chl(port);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
new file mode 100644
index 000000000000..bc1ff5c6c700
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_PORT_PROXY_H__
+#define __T7XX_PORT_PROXY_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+
+#define MTK_QUEUES 16
+#define RX_QUEUE_MAXLEN 32
+#define CTRL_QUEUE_MAXLEN 16
+
+struct port_proxy {
+ int port_count;
+ struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
+ struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
+ struct device *dev;
+ struct t7xx_port ports[];
+};
+
+struct ccci_header {
+ __le32 packet_header;
+ __le32 packet_len;
+ __le32 status;
+ __le32 ex_msg;
+};
+
+/* Coupled with HW - indicates if there is data following the CCCI header or not */
+#define CCCI_HEADER_NO_DATA 0xffffffff
+
+#define CCCI_H_AST_BIT BIT(31)
+#define CCCI_H_SEQ_FLD GENMASK(30, 16)
+#define CCCI_H_CHN_FLD GENMASK(15, 0)
+
+struct ctrl_msg_header {
+ __le32 ctrl_msg_id;
+ __le32 ex_msg;
+ __le32 data_length;
+};
+
+/* Control identification numbers for AP<->MD messages */
+#define CTL_ID_HS1_MSG 0x0
+#define CTL_ID_HS2_MSG 0x1
+#define CTL_ID_HS3_MSG 0x2
+#define CTL_ID_MD_EX 0x4
+#define CTL_ID_DRV_VER_ERROR 0x5
+#define CTL_ID_MD_EX_ACK 0x6
+#define CTL_ID_MD_EX_PASS 0x8
+#define CTL_ID_PORT_ENUM 0x9
+
+/* Modem exception check identification code - "EXCP" */
+#define MD_EX_CHK_ID 0x45584350
+/* Modem exception check acknowledge identification code - "EREC" */
+#define MD_EX_CHK_ACK_ID 0x45524543
+
+#define PORT_INFO_RSRVD GENMASK(31, 16)
+#define PORT_INFO_ENFLG BIT(15)
+#define PORT_INFO_CH_ID GENMASK(14, 0)
+
+#define PORT_ENUM_VER 0
+#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a
+#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5
+#define PORT_ENUM_VER_MISMATCH 0x00657272
+
+/* Port operations mapping */
+extern struct port_ops wwan_sub_port_ops;
+extern struct port_ops ctl_port_ops;
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox);
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
+int t7xx_port_proxy_init(struct t7xx_modem *md);
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state);
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag);
+
+#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
new file mode 100644
index 000000000000..33931bfd78fd
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+static int t7xx_port_ctrl_start(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ if (atomic_read(&port_mtk->usage_cnt))
+ return -EBUSY;
+
+ atomic_inc(&port_mtk->usage_cnt);
+ return 0;
+}
+
+static void t7xx_port_ctrl_stop(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ atomic_dec(&port_mtk->usage_cnt);
+}
+
+static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_fsm_ctl *ctl;
+ enum md_state md_state;
+
+ len = skb->len;
+ if (!len || !port_private->chan_enable)
+ return -EINVAL;
+
+ port_conf = port_private->port_conf;
+ ctl = port_private->t7xx_dev->md->fsm_ctl;
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
+ dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ port_conf->name, md_state);
+ return -ENODEV;
+ }
+
+ for (offset = 0; offset < len; offset += chunk_len) {
+ struct sk_buff *skb_ccci;
+ int ret;
+
+ chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
+ skb_ccci = t7xx_port_alloc_skb(chunk_len);
+ if (!skb_ccci)
+ return -ENOMEM;
+
+ skb_put_data(skb_ccci, skb->data + offset, chunk_len);
+ ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
+ if (ret) {
+ dev_kfree_skb_any(skb_ccci);
+ dev_err(port_private->dev, "Write error on %s port, %d\n",
+ port_conf->name, ret);
+ return ret;
+ }
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_ops = {
+ .start = t7xx_port_ctrl_start,
+ .stop = t7xx_port_ctrl_stop,
+ .tx = t7xx_port_ctrl_tx,
+};
+
+static int t7xx_port_wwan_init(struct t7xx_port *port)
+{
+ port->rx_length_th = RX_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void t7xx_port_wwan_uninit(struct t7xx_port *port)
+{
+ if (!port->wwan_port)
+ return;
+
+ port->rx_length_th = 0;
+ wwan_remove_port(port->wwan_port);
+ port->wwan_port = NULL;
+}
+
+static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ if (!atomic_read(&port->usage_cnt) || !port->chan_enable) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ dev_kfree_skb_any(skb);
+ dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n",
+ port_conf->name);
+ /* Dropping skb, caller should not access skb.*/
+ return 0;
+ }
+
+ wwan_port_rx(port->wwan_port, skb);
+ return 0;
+}
+
+static int t7xx_port_wwan_enable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = true;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = false;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (state != MD_STATE_READY)
+ return;
+
+ if (!port->wwan_port) {
+ port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, port);
+ if (IS_ERR(port->wwan_port))
+ dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
+ }
+}
+
+struct port_ops wwan_sub_port_ops = {
+ .init = t7xx_port_wwan_init,
+ .recv_skb = t7xx_port_wwan_recv_skb,
+ .uninit = t7xx_port_wwan_uninit,
+ .enable_chl = t7xx_port_wwan_enable_chl,
+ .disable_chl = t7xx_port_wwan_disable_chl,
+ .md_state_notify = t7xx_port_wwan_md_state_notify,
+};
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
new file mode 100644
index 000000000000..7c1b81091a0f
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_REG_H__
+#define __T7XX_REG_H__
+
+#include <linux/bits.h>
+
+/* Device base address offset */
+#define MHCCIF_RC_DEV_BASE 0x10024000
+
+#define REG_RC2EP_SW_BSY 0x04
+#define REG_RC2EP_SW_INT_START 0x08
+
+#define REG_RC2EP_SW_TCHNUM 0x0c
+#define H2D_CH_EXCEPTION_ACK 1
+#define H2D_CH_EXCEPTION_CLEARQ_ACK 2
+#define H2D_CH_DS_LOCK 3
+/* Channels 4-8 are reserved */
+#define H2D_CH_SUSPEND_REQ 9
+#define H2D_CH_RESUME_REQ 10
+#define H2D_CH_SUSPEND_REQ_AP 11
+#define H2D_CH_RESUME_REQ_AP 12
+#define H2D_CH_DEVICE_RESET 13
+#define H2D_CH_DRM_DISABLE_AP 14
+
+#define REG_EP2RC_SW_INT_STS 0x10
+#define REG_EP2RC_SW_INT_ACK 0x14
+#define REG_EP2RC_SW_INT_EAP_MASK 0x20
+#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30
+#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40
+
+#define D2H_INT_DS_LOCK_ACK BIT(0)
+#define D2H_INT_EXCEPTION_INIT BIT(1)
+#define D2H_INT_EXCEPTION_INIT_DONE BIT(2)
+#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3)
+#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4)
+#define D2H_INT_PORT_ENUM BIT(5)
+/* Bits 6-10 are reserved */
+#define D2H_INT_SUSPEND_ACK BIT(11)
+#define D2H_INT_RESUME_ACK BIT(12)
+#define D2H_INT_SUSPEND_ACK_AP BIT(13)
+#define D2H_INT_RESUME_ACK_AP BIT(14)
+#define D2H_INT_ASYNC_SAP_HK BIT(15)
+#define D2H_INT_ASYNC_MD_HK BIT(16)
+
+/* Register base */
+#define INFRACFG_AO_DEV_CHIP 0x10001000
+
+/* ATR setting */
+#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000
+#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000
+
+/* Reset Generic Unit (RGU) */
+#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c
+
+#define ATR_PORT_OFFSET 0x100
+#define ATR_TABLE_OFFSET 0x20
+#define ATR_TABLE_NUM_PER_ATR 8
+#define ATR_TRANSPARENT_SIZE 0x3f
+
+/* PCIE_MAC_IREG Register Definition */
+
+#define ISTAT_HST_CTRL 0x01ac
+#define ISTAT_HST_CTRL_DIS BIT(0)
+
+#define T7XX_PCIE_MISC_CTRL 0x0348
+#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7)
+
+#define T7XX_PCIE_CFG_MSIX 0x03ec
+#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600
+#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608
+#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610
+#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12)
+
+#define ATR_SRC_ADDR_INVALID 0x007f
+
+#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c
+
+enum t7xx_pm_resume_state {
+ PM_RESUME_REG_STATE_L3,
+ PM_RESUME_REG_STATE_L1,
+ PM_RESUME_REG_STATE_INIT,
+ PM_RESUME_REG_STATE_EXP,
+ PM_RESUME_REG_STATE_L2,
+ PM_RESUME_REG_STATE_L2_EXP,
+};
+
+#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
+#define MISC_STAGE_MASK GENMASK(2, 0)
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_RESET_TYPE_FLDR BIT(27)
+#define LINUX_STAGE 4
+
+#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
+#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
+
+#define DISABLE_ASPM_LOWPWR 0x0e50
+#define ENABLE_ASPM_LOWPWR 0x0e54
+#define T7XX_L1_BIT(i) BIT((i) * 4 + 1)
+#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2)
+#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3)
+
+#define MSIX_ISTAT_HST_GRP0_0 0x0f00
+#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000
+#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080
+#define EXT_INT_START 24
+#define EXT_INT_NUM 8
+#define MSIX_MSK_SET_ALL GENMASK(31, 24)
+
+enum t7xx_int {
+ DPMAIF_INT,
+ CLDMA0_INT,
+ CLDMA1_INT,
+ CLDMA2_INT,
+ MHCCIF_INT,
+ DPMAIF2_INT,
+ SAP_RGU_INT,
+ CLDMA3_INT,
+};
+
+/* DPMA definitions */
+
+#define DPMAIF_PD_BASE 0x1022d000
+#define BASE_DPMAIF_UL DPMAIF_PD_BASE
+#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100)
+#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400)
+#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600)
+#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900)
+#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00)
+#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00)
+
+#define DPMAIF_AO_BASE 0x10014000
+#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE
+#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400)
+
+#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00)
+#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88)
+#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac)
+#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0)
+
+#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00)
+#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04)
+#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08)
+#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c)
+#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10)
+#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50)
+#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4)
+
+#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00)
+#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50)
+#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60)
+#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68)
+#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90)
+#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94)
+#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0)
+#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0)
+#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0)
+
+#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0)
+#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c)
+#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80)
+#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84)
+#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88)
+#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c)
+#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90)
+#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94)
+#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98)
+#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c)
+
+#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10)
+#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14)
+#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18)
+#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70)
+
+#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00)
+#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c)
+#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28)
+#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34)
+
+#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00)
+#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04)
+#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08)
+#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c)
+#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10)
+
+#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20)
+#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24)
+#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28)
+#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38)
+#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40)
+
+#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8)
+#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc)
+#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec)
+#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60)
+#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78)
+#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4)
+
+#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4)
+#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0)
+
+#define DPMAIF_HPC_DLQ_PATH_MODE 3
+#define DPMAIF_HPC_ADD_MODE_DF 0
+#define DPMAIF_HPC_TOTAL_NUM 8
+#define DPMAIF_HPC_MAX_TOTAL_NUM 8
+
+#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00)
+#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10)
+#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14)
+#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18)
+#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c)
+#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20)
+#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24)
+#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28)
+#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c)
+
+#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q))
+#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q))
+#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q))
+#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q))
+#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q))
+
+#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16)
+
+#define DPMAIF_AP_RGU_ASSERT 0x10001150
+#define DPMAIF_AP_RGU_DEASSERT 0x10001154
+#define DPMAIF_AP_RST_BIT BIT(2)
+
+#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140
+#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144
+#define DPMAIF_AP_AO_RST_BIT BIT(6)
+
+/* DPMAIF init/restore */
+#define DPMAIF_UL_ADD_NOT_READY BIT(31)
+#define DPMAIF_UL_ADD_UPDATE BIT(31)
+#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0)
+#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8)
+
+#define DPMAIF_DL_ADD_UPDATE BIT(31)
+#define DPMAIF_DL_ADD_NOT_READY BIT(31)
+#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16)
+#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0)
+
+#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0)
+#define DPMAIF_DL_BAT_FRG_INIT BIT(16)
+#define DPMAIF_DL_BAT_INIT_EN BIT(31)
+#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31)
+#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0
+
+#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0)
+#define DPMAIF_DL_PIT_INIT_EN BIT(31)
+#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31)
+
+#define DPMAIF_BAT_REMAIN_SZ_BASE 16
+#define DPMAIF_BAT_BUFFER_SZ_BASE 128
+#define DPMAIF_FRG_BUFFER_SZ_BASE 128
+
+#define DLQ_PIT_IDX_SIZE 0x20
+
+#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0)
+
+#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0)
+
+#define DPMAIF_BAT_EN_MSK BIT(16)
+#define DPMAIF_FRG_EN_MSK BIT(28)
+#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0)
+
+#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16)
+#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8)
+#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24)
+#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8)
+#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8)
+#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0)
+#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22)
+
+#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16)
+#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0)
+
+#define DPMAIF_PKT_ALIGN_EN BIT(23)
+
+#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0)
+
+#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0)
+
+/* DPMAIF_UL_CHK_BUSY */
+#define DPMAIF_UL_IDLE_STS BIT(11)
+/* DPMAIF_DL_CHK_BUSY */
+#define DPMAIF_DL_IDLE_STS BIT(23)
+/* DPMAIF_AO_DL_RDY_CHK_THRES */
+#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31)
+#define DPMAIF_PORT_MODE_PCIE BIT(30)
+#define DPMAIF_DL_BURST_PIT_EN BIT(13)
+/* DPMAIF_DL_BAT_INIT_CON1 */
+#define DPMAIF_DL_BAT_CACHE_PRI BIT(22)
+/* DPMAIF_AP_MEM_CLR */
+#define DPMAIF_MEM_CLR BIT(0)
+/* DPMAIF_AP_OVERWRITE_CFG */
+#define DPMAIF_SRAM_SYNC BIT(0)
+/* DPMAIF_AO_UL_INIT_SET */
+#define DPMAIF_UL_INIT_DONE BIT(0)
+/* DPMAIF_AO_DL_INIT_SET */
+#define DPMAIF_DL_INIT_DONE BIT(0)
+/* DPMAIF_AO_DL_PIT_SEQ_END */
+#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0)
+/* DPMAIF_UL_RESERVE_AO_RW */
+#define DPMAIF_PCIE_MODE_SET_VALUE 0x55
+/* DPMAIF_AP_CG_EN */
+#define DPMAIF_CG_EN 0x7f
+
+#define DPMAIF_UDL_IP_BUSY BIT(0)
+#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8)
+#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9)
+#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10)
+#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11)
+#define DPMAIF_DL_INT_Q2TOQ1 BIT(24)
+#define DPMAIF_DL_INT_Q2APTOP BIT(25)
+
+#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0)
+#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16)
+
+/* DPMAIF DLQ HW configure */
+#define DPMAIF_AGG_MAX_LEN_DF 65535
+#define DPMAIF_AGG_TBL_ENT_NUM_DF 50
+#define DPMAIF_HASH_PRIME_DF 13
+#define DPMAIF_MID_TIMEOUT_THRES_DF 100
+#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100
+#define DPMAIF_DLQ_PRS_THRES_DF 10
+#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0
+
+#define DPMAIF_DLQPIT_EN_MSK BIT(20)
+#define DPMAIF_DLQPIT_CHAN_OFS 16
+#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20
+
+#endif /* __T7XX_REG_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
new file mode 100644
index 000000000000..0bcca08ff2bd
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_mhccif.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_pci.h"
+#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
+
+#define FSM_DRM_DISABLE_DELAY_MS 200
+#define FSM_EVENT_POLL_INTERVAL_MS 20
+#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
+#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
+#define FSM_CMD_TIMEOUT_MS 2000
+
+void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_add_tail(&notifier->entry, &ctl->notifier_list);
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
+{
+ struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
+ if (notifier_cur == notifier)
+ list_del(&notifier->entry);
+ }
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ struct t7xx_fsm_notifier *notifier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ list_for_each_entry(notifier, &ctl->notifier_list, entry) {
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+ if (notifier->notifier_fn)
+ notifier->notifier_fn(state, notifier->data);
+
+ spin_lock_irqsave(&ctl->notifier_lock, flags);
+ }
+ spin_unlock_irqrestore(&ctl->notifier_lock, flags);
+}
+
+void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
+{
+ ctl->md_state = state;
+
+ /* Update to port first, otherwise sending message on HS2 may fail */
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
+ fsm_state_notify(ctl->md, state);
+}
+
+static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
+{
+ if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ *cmd->ret = result;
+ complete_all(cmd->done);
+ }
+
+ kfree(cmd);
+}
+
+static void fsm_del_kf_event(struct t7xx_fsm_event *event)
+{
+ list_del(&event->entry);
+ kfree(event);
+}
+
+static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_event *event, *evt_next;
+ struct t7xx_fsm_command *cmd, *cmd_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
+ dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
+ list_del(&cmd->entry);
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ }
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
+ dev_warn(dev, "Unhandled event %d\n", event->event_id);
+ fsm_del_kf_event(event);
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+}
+
+static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
+ enum t7xx_fsm_event_state event_ignore, int retries)
+{
+ struct t7xx_fsm_event *event;
+ bool event_received = false;
+ unsigned long flags;
+ int cnt = 0;
+
+ while (cnt++ < retries && !event_received) {
+ bool sleep_required = true;
+
+ if (kthread_should_stop())
+ return;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
+ if (event) {
+ event_received = event->event_id == event_expected;
+ if (event_received || event->event_id == event_ignore) {
+ fsm_del_kf_event(event);
+ sleep_required = false;
+ }
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ if (sleep_required)
+ msleep(FSM_EVENT_POLL_INTERVAL_MS);
+ }
+}
+
+static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
+ enum t7xx_ex_reason reason)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+
+ if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
+ if (cmd)
+ fsm_finish_command(ctl, cmd, -EINVAL);
+
+ return;
+ }
+
+ ctl->curr_state = FSM_STATE_EXCEPTION;
+
+ switch (reason) {
+ case EXCEPTION_HS_TIMEOUT:
+ dev_err(dev, "Boot Handshake failure\n");
+ break;
+
+ case EXCEPTION_EVENT:
+ dev_err(dev, "Exception event\n");
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
+ t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
+ t7xx_md_exception_handshake(ctl->md);
+
+ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
+ FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
+ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
+ FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
+ break;
+
+ default:
+ dev_err(dev, "Exception %d\n", reason);
+ break;
+ }
+
+ if (cmd)
+ fsm_finish_command(ctl, cmd, 0);
+}
+
+static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
+{
+ ctl->curr_state = FSM_STATE_STOPPED;
+
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
+ return t7xx_md_reset(ctl->md->t7xx_dev);
+}
+
+static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ if (ctl->curr_state == FSM_STATE_STOPPED) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
+}
+
+static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct cldma_ctrl *md_ctrl;
+ int err;
+
+ if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ t7xx_dev = ctl->md->t7xx_dev;
+
+ ctl->curr_state = FSM_STATE_STOPPING;
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
+ t7xx_cldma_stop(md_ctrl);
+
+ if (!ctl->md->rgu_irq_asserted) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+
+ err = t7xx_acpi_fldr_func(t7xx_dev);
+ if (err)
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ }
+
+ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
+}
+
+static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
+ return;
+
+ ctl->md_state = MD_STATE_READY;
+
+ fsm_state_notify(ctl->md, MD_STATE_READY);
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
+}
+
+static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
+{
+ struct t7xx_modem *md = ctl->md;
+
+ ctl->curr_state = FSM_STATE_READY;
+ t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_md_event_notify(md, FSM_READY);
+}
+
+static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct device *dev;
+
+ ctl->curr_state = FSM_STATE_STARTING;
+
+ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
+ t7xx_md_event_notify(md, FSM_START);
+
+ wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
+ HZ * 60);
+ dev = &md->t7xx_dev->pdev->dev;
+
+ if (ctl->exp_flg)
+ dev_err(dev, "MD exception is captured during handshake\n");
+
+ if (!md->core_md.ready) {
+ dev_err(dev, "MD handshake timeout\n");
+ if (md->core_md.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+
+ t7xx_pci_pm_init_late(md->t7xx_dev);
+ fsm_routine_ready(ctl);
+ return 0;
+}
+
+static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
+{
+ struct t7xx_modem *md = ctl->md;
+ u32 dev_status;
+ int ret;
+
+ if (!md)
+ return;
+
+ if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
+ ctl->curr_state != FSM_STATE_STOPPED) {
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ return;
+ }
+
+ ctl->curr_state = FSM_STATE_PRE_START;
+ t7xx_md_event_notify(md, FSM_PRE_START);
+
+ ret = read_poll_timeout(ioread32, dev_status,
+ (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
+ false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ if (ret) {
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+
+ fsm_finish_command(ctl, cmd, -ETIMEDOUT);
+ dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
+ return;
+ }
+
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+}
+
+static int fsm_main_thread(void *data)
+{
+ struct t7xx_fsm_ctl *ctl = data;
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
+ kthread_should_stop()))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
+ list_del(&cmd->entry);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ switch (cmd->cmd_id) {
+ case FSM_CMD_START:
+ fsm_routine_start(ctl, cmd);
+ break;
+
+ case FSM_CMD_EXCEPTION:
+ fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
+ break;
+
+ case FSM_CMD_PRE_STOP:
+ fsm_routine_stopping(ctl, cmd);
+ break;
+
+ case FSM_CMD_STOP:
+ fsm_routine_stopped(ctl, cmd);
+ break;
+
+ default:
+ fsm_finish_command(ctl, cmd, -EINVAL);
+ fsm_flush_event_cmd_qs(ctl);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->entry);
+ cmd->cmd_id = cmd_id;
+ cmd->flag = flag;
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ cmd->done = &done;
+ cmd->ret = &ret;
+ }
+
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_add_tail(&cmd->entry, &ctl->command_queue);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+
+ wake_up(&ctl->command_wq);
+
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ unsigned long wait_ret;
+
+ wait_ret = wait_for_completion_timeout(&done,
+ msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
+ if (!wait_ret)
+ return -ETIMEDOUT;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
+ unsigned char *data, unsigned int length)
+{
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_event *event;
+ unsigned long flags;
+
+ if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
+ dev_err(dev, "Invalid event %d\n", event_id);
+ return -EINVAL;
+ }
+
+ event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&event->entry);
+ event->event_id = event_id;
+ event->length = length;
+
+ if (data && length)
+ memcpy(event->data, data, length);
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_add_tail(&event->entry, &ctl->event_queue);
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ wake_up_all(&ctl->event_wq);
+ return 0;
+}
+
+void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
+{
+ struct t7xx_fsm_event *event, *evt_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
+ if (event->event_id == event_id)
+ fsm_del_kf_event(event);
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+}
+
+enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl)
+ return ctl->md_state;
+
+ return MD_STATE_INVALID;
+}
+
+unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
+{
+ if (ctl)
+ return ctl->curr_state;
+
+ return FSM_STATE_STOPPED;
+}
+
+int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
+{
+ unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
+
+ if (type == MD_IRQ_PORT_ENUM) {
+ return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
+ } else if (type == MD_IRQ_CCIF_EX) {
+ ctl->exp_flg = true;
+ wake_up(&ctl->async_hk_wq);
+ cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
+ return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
+ }
+
+ return -EINVAL;
+}
+
+void t7xx_fsm_reset(struct t7xx_modem *md)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ fsm_flush_event_cmd_qs(ctl);
+ ctl->curr_state = FSM_STATE_STOPPED;
+ ctl->exp_flg = false;
+}
+
+int t7xx_fsm_init(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_ctl *ctl;
+
+ ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ md->fsm_ctl = ctl;
+ ctl->md = md;
+ ctl->curr_state = FSM_STATE_INIT;
+ INIT_LIST_HEAD(&ctl->command_queue);
+ INIT_LIST_HEAD(&ctl->event_queue);
+ init_waitqueue_head(&ctl->async_hk_wq);
+ init_waitqueue_head(&ctl->event_wq);
+ INIT_LIST_HEAD(&ctl->notifier_list);
+ init_waitqueue_head(&ctl->command_wq);
+ spin_lock_init(&ctl->event_lock);
+ spin_lock_init(&ctl->command_lock);
+ ctl->exp_flg = false;
+ spin_lock_init(&ctl->notifier_lock);
+
+ ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
+ return PTR_ERR_OR_ZERO(ctl->fsm_thread);
+}
+
+void t7xx_fsm_uninit(struct t7xx_modem *md)
+{
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ if (!ctl)
+ return;
+
+ if (ctl->fsm_thread)
+ kthread_stop(ctl->fsm_thread);
+
+ fsm_flush_event_cmd_qs(ctl);
+}
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
new file mode 100644
index 000000000000..b1af0259d4c5
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Eliot Lee <eliot.lee@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_MONITOR_H__
+#define __T7XX_MONITOR_H__
+
+#include <linux/bits.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "t7xx_modem_ops.h"
+
+enum t7xx_fsm_state {
+ FSM_STATE_INIT,
+ FSM_STATE_PRE_START,
+ FSM_STATE_STARTING,
+ FSM_STATE_READY,
+ FSM_STATE_EXCEPTION,
+ FSM_STATE_STOPPING,
+ FSM_STATE_STOPPED,
+};
+
+enum t7xx_fsm_event_state {
+ FSM_EVENT_INVALID,
+ FSM_EVENT_MD_HS2,
+ FSM_EVENT_MD_EX,
+ FSM_EVENT_MD_EX_REC_OK,
+ FSM_EVENT_MD_EX_PASS,
+ FSM_EVENT_MD_HS2_EXIT,
+ FSM_EVENT_MAX
+};
+
+enum t7xx_fsm_cmd_state {
+ FSM_CMD_INVALID,
+ FSM_CMD_START,
+ FSM_CMD_EXCEPTION,
+ FSM_CMD_PRE_STOP,
+ FSM_CMD_STOP,
+};
+
+enum t7xx_ex_reason {
+ EXCEPTION_HS_TIMEOUT,
+ EXCEPTION_EVENT,
+};
+
+enum t7xx_md_irq_type {
+ MD_IRQ_WDT,
+ MD_IRQ_CCIF_EX,
+ MD_IRQ_PORT_ENUM,
+};
+
+enum md_state {
+ MD_STATE_INVALID,
+ MD_STATE_WAITING_FOR_HS1,
+ MD_STATE_WAITING_FOR_HS2,
+ MD_STATE_READY,
+ MD_STATE_EXCEPTION,
+ MD_STATE_WAITING_TO_STOP,
+ MD_STATE_STOPPED,
+};
+
+#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0)
+#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1)
+#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2)
+#define FSM_CMD_EX_REASON GENMASK(23, 16)
+
+struct t7xx_fsm_ctl {
+ struct t7xx_modem *md;
+ enum md_state md_state;
+ unsigned int curr_state;
+ struct list_head command_queue;
+ struct list_head event_queue;
+ wait_queue_head_t command_wq;
+ wait_queue_head_t event_wq;
+ wait_queue_head_t async_hk_wq;
+ spinlock_t event_lock; /* Protects event queue */
+ spinlock_t command_lock; /* Protects command queue */
+ struct task_struct *fsm_thread;
+ bool exp_flg;
+ spinlock_t notifier_lock; /* Protects notifier list */
+ struct list_head notifier_list;
+};
+
+struct t7xx_fsm_event {
+ struct list_head entry;
+ enum t7xx_fsm_event_state event_id;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct t7xx_fsm_command {
+ struct list_head entry;
+ enum t7xx_fsm_cmd_state cmd_id;
+ unsigned int flag;
+ struct completion *done;
+ int *ret;
+};
+
+struct t7xx_fsm_notifier {
+ struct list_head entry;
+ int (*notifier_fn)(enum md_state state, void *data);
+ void *data;
+};
+
+int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id,
+ unsigned int flag);
+int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
+ unsigned char *data, unsigned int length);
+void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id);
+void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state);
+void t7xx_fsm_reset(struct t7xx_modem *md);
+int t7xx_fsm_init(struct t7xx_modem *md);
+void t7xx_fsm_uninit(struct t7xx_modem *md);
+int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type);
+enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl);
+unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl);
+void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier);
+void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier);
+
+#endif /* __T7XX_MONITOR_H__ */
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 1508dc2a497b..62e9f7d6c9fe 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/wwan.h>
#include <net/rtnetlink.h>
@@ -160,6 +161,42 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent)
return wwandev->debugfs_dir;
}
EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
+static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
+{
+ struct wwan_device *wwandev;
+
+ if (dev->type != &wwan_dev_type)
+ return 0;
+
+ wwandev = to_wwan_dev(dev);
+
+ return wwandev->debugfs_dir == dir;
+}
+
+static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+void wwan_put_debugfs_dir(struct dentry *dir)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ /* wwan_dev_get_by_debugfs() also got a reference */
+ put_device(&wwandev->dev);
+ put_device(&wwandev->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
#endif
/* This function allocates and registers a new WWAN device OR if a WWAN device
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index 5b62cf3b3c42..2397a903d8f5 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate;
static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
static LIST_HEAD(wwan_hwsim_devs);
static unsigned int wwan_hwsim_dev_idx;
+static struct workqueue_struct *wwan_wq;
struct wwan_hwsim_dev {
struct list_head list;
@@ -156,8 +157,8 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if ((i + 1) < in->len && in->data[i + 1] == '\n')
i++;
n = i - s + 1;
- memcpy(skb_put(out, n), &in->data[s], n);/* Echo */
- memcpy(skb_put(out, 6), "\r\nOK\r\n", 6);
+ skb_put_data(out, &in->data[s], n);/* Echo */
+ skb_put_data(out, "\r\nOK\r\n", 6);
s = i + 1;
port->pstate = AT_PARSER_WAIT_A;
} else if (port->pstate == AT_PARSER_SKIP_LINE) {
@@ -170,7 +171,7 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if (i > s) {
/* Echo the processed portion of a not yet completed command */
n = i - s;
- memcpy(skb_put(out, n), &in->data[s], n);
+ skb_put_data(out, &in->data[s], n);
}
consume_skb(in);
@@ -310,7 +311,7 @@ err_unreg_dev:
return ERR_PTR(err);
err_free_dev:
- kfree(dev);
+ put_device(&dev->dev);
return ERR_PTR(err);
}
@@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&port->del_work);
+ queue_work(wwan_wq, &port->del_work);
return count;
}
@@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&dev->del_work);
+ queue_work(wwan_wq, &dev->del_work);
return count;
}
@@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
+ wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ if (!wwan_wq)
+ return -ENOMEM;
+
wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class))
- return PTR_ERR(wwan_hwsim_class);
+ if (IS_ERR(wwan_hwsim_class)) {
+ err = PTR_ERR(wwan_hwsim_class);
+ goto err_wq_destroy;
+ }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void)
return 0;
err_clean_devs:
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+err_wq_destroy:
+ destroy_workqueue(wwan_wq);
return err;
}
@@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void)
{
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
- flush_scheduled_work(); /* Wait deletion works completion */
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+ destroy_workqueue(wwan_wq);
}
module_init(wwan_hwsim_init);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index d9dea4829c86..1545cbee77a4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,7 +48,6 @@
#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
@@ -63,7 +62,7 @@ struct pending_tx_info {
* ubuf_to_vif is a helper which finds the struct xenvif from a pointer
* to this field.
*/
- struct ubuf_info callback_struct;
+ struct ubuf_info_msgzc callback_struct;
};
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
-#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
-
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* The maximum number of frags is derived from the size of a grant (same
@@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
-int xenvif_schedulable(struct xenvif *vif);
-
-int xenvif_queue_stopped(struct xenvif_queue *queue);
-void xenvif_wake_queue(struct xenvif_queue *queue);
-
/* (Un)Map communication rings. */
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
@@ -394,7 +386,6 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
-void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -403,9 +394,6 @@ void xenvif_carrier_on(struct xenvif *vif);
void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
bool zerocopy_success);
-/* Unmap a pending page and release it back to the guest */
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
-
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fe8e21ad8ed9..650fa180220f 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -42,7 +42,6 @@
#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
-#define XENVIF_NAPI_WEIGHT 64
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
@@ -70,7 +69,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
wake_up(&queue->dealloc_wq);
}
-int xenvif_schedulable(struct xenvif *vif)
+static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
@@ -178,20 +177,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int xenvif_queue_stopped(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
-}
-
-void xenvif_wake_queue(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
-}
-
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -606,8 +591,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
}
for (i = 0; i < MAX_PENDING_REQS; i++) {
- queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
- { .callback = xenvif_zerocopy_callback,
+ queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
+ { { .callback = xenvif_zerocopy_callback },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
@@ -738,8 +723,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
- netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- XENVIF_NAPI_WEIGHT);
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
queue->stalled = true;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0f7fd159f0f2..3d2081bbbc86 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+
static inline int tx_work_todo(struct xenvif_queue *queue);
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
@@ -131,7 +133,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -828,7 +830,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
- work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+ work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
if (!work_to_do)
break;
@@ -1199,9 +1201,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
}
mss = skb_shinfo(skb)->gso_size;
- hdrlen = skb_transport_header(skb) -
- skb_mac_header(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
skb_shinfo(skb)->gso_segs =
DIV_ROUND_UP(skb->len - hdrlen, mss);
@@ -1228,11 +1228,12 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
@@ -1241,7 +1242,7 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
- ubuf = (struct ubuf_info *) ubuf->ctx;
+ ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
index = pending_index(queue->dealloc_prod);
@@ -1418,7 +1419,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
notify_remote_via_irq(queue->tx_irq);
}
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index dbac4c03d21a..932762177110 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -486,7 +486,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
#define RX_BATCH_SIZE 64
-void xenvif_rx_action(struct xenvif_queue *queue)
+static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;
@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
+ !skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d24b7a7993aa..c1ba4294f364 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -675,7 +675,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -824,15 +823,11 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- NULL, hotplug_status_changed,
- "%s/%s", dev->nodename,
- "hotplug-status");
- if (err)
- goto err;
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+ hotplug_status_changed,
+ "%s/%s", dev->nodename, "hotplug-status");
+ if (!err)
be->have_hotplug_status_watch = 1;
- }
netif_tx_wake_all_queues(be->vif->dev);
@@ -870,13 +865,12 @@ static int connect_data_rings(struct backend_info *be,
* queue-N.
*/
if (num_queues == 1) {
- xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ xspath = kstrdup(dev->otherend, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
- strcpy(xspath, dev->otherend);
} else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kzalloc(xspathsize, GFP_KERNEL);
@@ -988,6 +982,7 @@ static int netback_remove(struct xenbus_device *dev)
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b18246ad999..9af2b027c19c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@@ -78,8 +82,6 @@ struct netfront_cb {
#define RX_COPY_THRESHOLD 256
-#define GRANT_INVALID_REF 0
-
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
@@ -175,6 +177,9 @@ struct netfront_info {
/* Is device behaving sane? */
bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */
+ bool bounce;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -224,7 +229,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
{
int i = xennet_rxidx(ri);
grant_ref_t ref = queue->grant_rx_ref[i];
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
return ref;
}
@@ -273,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool);
+ page = page_pool_alloc_pages(queue->page_pool,
+ GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
@@ -424,17 +430,15 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
- if (unlikely(gnttab_query_foreign_access(
- queue->grant_tx_ref[id]) != 0)) {
+ if (unlikely(!gnttab_end_foreign_access_ref(
+ queue->grant_tx_ref[id]))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
- gnttab_end_foreign_access_ref(
- queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
- queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[id] = INVALID_GRANT_REF;
queue->grant_tx_page[id] = NULL;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
@@ -669,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
+static struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+ unsigned int headerlen = skb_headroom(skb);
+ /* Align size to allocate full pages and avoid contiguous data leaks */
+ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+ XEN_PAGE_SIZE);
+ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+ if (!n)
+ return NULL;
+
+ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+ WARN_ONCE(1, "misaligned skb allocated\n");
+ kfree_skb(n);
+ return NULL;
+ }
+
+ /* Set the data pointer */
+ skb_reserve(n, headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n, skb->len);
+
+ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+ skb_copy_header(n, skb);
+ return n;
+}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -722,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
+ *
+ * If the backend is not trusted bounce all data to zeroed pages to
+ * avoid exposing contiguous data on the granted page not belonging to
+ * the skb.
*/
- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@@ -842,13 +877,35 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_destroy_queues(np);
+}
+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;
spin_lock_irqsave(&queue->rx_cons_lock, flags);
queue->rx.rsp_cons = val;
- queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
}
@@ -968,7 +1025,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
- unsigned long ret;
int slots = 1;
int err = 0;
u32 verdict;
@@ -987,22 +1043,12 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
for (;;) {
- if (unlikely(rx->status < 0 ||
- rx->offset + rx->status > XEN_PAGE_SIZE)) {
- if (net_ratelimit())
- dev_warn(dev, "rx->offset: %u, size: %d\n",
- rx->offset, rx->status);
- xennet_move_rx_slot(queue, skb, ref);
- err = -EINVAL;
- goto next;
- }
-
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
* situation to the system controller to reboot the backend.
*/
- if (ref == GRANT_INVALID_REF) {
+ if (ref == INVALID_GRANT_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
rx->id);
@@ -1010,8 +1056,23 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
- ret = gnttab_end_foreign_access_ref(ref, 0);
- BUG_ON(!ret);
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > XEN_PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %u, size: %d\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(queue, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
+ if (!gnttab_end_foreign_access_ref(ref)) {
+ dev_alert(dev,
+ "Grant still in use by backend domain\n");
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
+ return -EINVAL;
+ }
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
@@ -1031,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
}
rcu_read_unlock();
-next:
+
__skb_queue_tail(list, skb);
+
+next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -1232,6 +1295,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
&need_xdp_flush);
if (unlikely(err)) {
+ if (queue->info->broken) {
+ spin_unlock(&queue->rx_lock);
+ return 0;
+ }
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
@@ -1360,10 +1427,9 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
- GNTMAP_readonly,
- (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i]);
queue->grant_tx_page[i] = NULL;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb);
}
@@ -1384,7 +1450,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
continue;
ref = queue->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF)
+ if (ref == INVALID_GRANT_REF)
continue;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -1393,9 +1459,8 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
* foreign access is ended (which may be deferred).
*/
get_page(page);
- gnttab_end_foreign_access(ref, 0,
- (unsigned long)page_address(page));
- queue->grant_rx_ref[id] = GRANT_INVALID_REF;
+ gnttab_end_foreign_access(ref, page);
+ queue->grant_rx_ref[id] = INVALID_GRANT_REF;
kfree_skb(skb);
}
@@ -1473,7 +1538,7 @@ static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
return false;
spin_lock_irqsave(&queue->rx_cons_lock, flags);
- work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
if (work_queued > queue->rx_rsp_unconsumed) {
queue->rx_rsp_unconsumed = work_queued;
*eoi = 0;
@@ -1611,6 +1676,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
static const struct net_device_ops xennet_netdev_ops = {
+ .ndo_uninit = xennet_uninit,
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
@@ -1733,8 +1799,8 @@ static int netfront_probe(struct xenbus_device *dev,
static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
- if (ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+ if (ref != INVALID_GRANT_REF)
+ gnttab_end_foreign_access(ref, virt_to_page(page));
}
static void xennet_disconnect_backend(struct netfront_info *info)
@@ -1770,8 +1836,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->tx.sring = NULL;
queue->rx.sring = NULL;
@@ -1896,41 +1962,26 @@ static int setup_netfront(struct xenbus_device *dev,
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
- grant_ref_t gref;
int err;
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->rx.sring = NULL;
queue->tx.sring = NULL;
- txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!txs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
+ 1, &queue->tx_ring_ref);
+ if (err)
goto fail;
- }
- SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- err = xenbus_grant_ring(dev, txs, 1, &gref);
- if (err < 0)
- goto grant_tx_ring_fail;
- queue->tx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!rxs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating rx ring page");
- goto alloc_rx_ring_fail;
- }
- SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
+ 1, &queue->rx_ring_ref);
+ if (err)
+ goto fail;
- err = xenbus_grant_ring(dev, rxs, 1, &gref);
- if (err < 0)
- goto grant_rx_ring_fail;
- queue->rx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
if (feature_split_evtchn)
err = setup_netfront_split(queue);
@@ -1942,22 +1993,14 @@ static int setup_netfront(struct xenbus_device *dev,
err = setup_netfront_single(queue);
if (err)
- goto alloc_evtchn_fail;
+ goto fail;
return 0;
- /* If we fail to setup netfront, it is safe to just revoke access to
- * granted pages because backend is not accessing it at this point.
- */
-alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
-grant_rx_ring_fail:
- free_page((unsigned long)rxs);
-alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
-grant_tx_ring_fail:
- free_page((unsigned long)txs);
-fail:
+ fail:
+ xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
+ xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
+
return err;
}
@@ -1986,7 +2029,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
queue->tx_link[i] = i + 1;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
queue->grant_tx_page[i] = NULL;
}
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
@@ -1994,7 +2037,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
queue->rx_skbs[i] = NULL;
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
}
/* A grant for every tx ring slot */
@@ -2103,22 +2146,6 @@ error:
return err;
}
-static void xennet_destroy_queues(struct netfront_info *info)
-{
- unsigned int i;
-
- for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
- struct netfront_queue *queue = &info->queues[i];
-
- if (netif_running(info->netdev))
- napi_disable(&queue->napi);
- netif_napi_del(&queue->napi);
- }
-
- kfree(info->queues);
- info->queues = NULL;
-}
-
static int xennet_create_page_pool(struct netfront_queue *queue)
@@ -2197,8 +2224,7 @@ static int xennet_create_queues(struct netfront_info *info,
return ret;
}
- netif_napi_add(queue->info->netdev, &queue->napi,
- xennet_poll, 64);
+ netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
if (netif_running(info->netdev))
napi_enable(&queue->napi);
}
@@ -2228,6 +2254,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
+ /* Check if backend is trusted. */
+ info->bounce = !xennet_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@@ -2395,6 +2425,9 @@ static int xennet_connect(struct net_device *dev)
return err;
if (np->netback_has_xdp_headroom)
pr_info("backend supports XDP headroom\n");
+ if (np->bounce)
+ dev_info(&np->xbdev->dev,
+ "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
@@ -2430,10 +2463,6 @@ static int xennet_connect(struct net_device *dev)
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
- spin_lock_irq(&queue->tx_lock);
- xennet_tx_buf_gc(queue);
- spin_unlock_irq(&queue->tx_lock);
-
spin_lock_bh(&queue->rx_lock);
xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);